blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
eeaa45b85f2ad11bc1462bbc6fe5dd16a502735b | 2c0993aac7ad3848d343ffb4f838a5bda8f740f3 | /funcation_Python/harshad.py | 019eb9bed9d0b4ecbe37f410ebf3b95bc61b6fa5 | [] | no_license | Praveen-Kumar-Bairagi/Python_Logical_Quetions | 6e20e04bf737f8f4592be76694b54470d5d79e7a | 4e0f3262cf183f56e5a57157f0593a454923317f | refs/heads/master | 2023-08-22T21:40:36.343811 | 2021-10-24T05:21:43 | 2021-10-24T05:21:43 | 420,594,339 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 271 | py | def is_harshad_num(num):
num2=num
add=0
while 0<num:
a=num%10
add+=a
num=num//10
if num2%add==0:
return "true"
else:
return "False"
num=int(input("enter the num"))
print(is_harshad_num(num))
| [
"[email protected]"
] | |
a4badd11e840906a2c0e96b040d2ad0c1d23965d | e631f155b30122d813678fd8dd98004085b9579e | /setup.py | 0d6bb32b8676adacd2c8026933b7605a6ce3bab9 | [
"MIT"
] | permissive | SidSachdev/pycrunchbase | 4efd716c58b2bdbee379c5f4d9fd30c310d43502 | f0a9b945bc5d3f7b3827820bd28a75265c28f756 | refs/heads/master | 2020-12-14T18:53:51.512180 | 2015-09-28T01:17:19 | 2015-09-28T01:17:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,791 | py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
from __future__ import absolute_import, print_function
import io
import os
import re
from glob import glob
from os.path import basename
from os.path import dirname
from os.path import join
from os.path import relpath
from os.path import splitext
from setuptools import find_packages
from setuptools import setup
def read(*names, **kwargs):
return io.open(
join(dirname(__file__), *names),
encoding=kwargs.get("encoding", "utf8")
).read()
setup(
name="pycrunchbase",
version="0.3.5",
license="MIT",
description="Python bindings to CrunchBase",
long_description="{0}\n{1}".format(read("README.rst"), re.sub(":obj:`~?(.*?)`", r"``\1``", read("CHANGELOG.rst"))),
author="Ng Zhi An",
author_email="[email protected]",
url="https://github.com/ngzhian/pycrunchbase",
packages=find_packages("src"),
package_dir={"": "src"},
py_modules=[splitext(basename(path))[0] for path in glob("src/*.py")],
include_package_data=True,
zip_safe=False,
classifiers=[
# complete classifier list: http://pypi.python.org/pypi?%3Aaction=list_classifiers
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: Unix",
"Operating System :: POSIX",
"Operating System :: Microsoft :: Windows",
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: Implementation :: CPython",
"Topic :: Utilities",
],
keywords=[
"crunchbase"
],
install_requires=[
"requests==2.5.1", "six==1.9.0"
],
)
| [
"[email protected]"
] | |
9a552b56252d358c502b6dbb7ae63745a4fca22c | 2452bdfb25628e55190c46694e156bf4b1459cf0 | /prejudge/views.py | 1dcd4f1dae49f5c5ed8bf029726d85ea47c4ae9a | [] | no_license | bobjiangps/bug_prejudge | 0da3fbeab9dae1620330d16852d3e8792def56d5 | aeadd84476de0cf10a0341d694888f768e6c3706 | refs/heads/master | 2023-03-01T14:43:44.904473 | 2023-02-13T10:09:59 | 2023-02-13T10:09:59 | 195,923,513 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 746 | py | from prejudge_process import PrejudgeProcess
from rest_framework.views import APIView
from rest_framework.response import Response
class PrejudgeRound(APIView):
def get(self, request, round_id):
p = PrejudgeProcess(round_id=round_id)
result = p.run()
return Response(result)
class PrejudgeScript(APIView):
def get(self, request, round_id, script_id):
p = PrejudgeProcess(round_id=round_id, script_id=script_id)
result = p.run()
return Response(result)
class PrejudgeCase(APIView):
def get(self, request, round_id, script_id, case_id):
p = PrejudgeProcess(round_id=round_id, script_id=script_id, case_id=case_id)
result = p.run()
return Response(result)
| [
"[email protected]"
] | |
eac58c5c03b7ab9050d3193746a20cb1e83c733e | d2e78c7c8c94cd2ef16a0b77eb9852312ead4ee7 | /elementpath/xpath2/xpath2_functions.py | ad67a79b39019425263b4d8e465394d1f68c8e11 | [
"MIT"
] | permissive | evhayes/elementpath | fa72cfba0c761b0ddb7100d36101b5c117e00fee | a74ce89c04622d8ae98ab739886c3e46f87b024e | refs/heads/master | 2023-08-04T05:16:09.136086 | 2021-09-16T14:00:22 | 2021-09-16T14:01:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 55,019 | py | #
# Copyright (c), 2018-2021, SISSA (International School for Advanced Studies).
# All rights reserved.
# This file is distributed under the terms of the MIT License.
# See the file 'LICENSE' in the root directory of the present
# distribution, or http://opensource.org/licenses/MIT.
#
# @author Davide Brunato <[email protected]>
#
"""
XPath 2.0 implementation - part 2 (functions)
"""
import math
import datetime
import time
import re
import locale
import os.path
import unicodedata
from copy import copy
from decimal import Decimal, DecimalException
from string import ascii_letters
from urllib.parse import urlsplit, quote as urllib_quote
from ..helpers import is_idrefs, is_xml_codepoint
from ..datatypes import QNAME_PATTERN, DateTime10, DateTime, Date10, Date, \
Float10, DoubleProxy, Time, Duration, DayTimeDuration, YearMonthDuration, \
UntypedAtomic, AnyURI, QName, NCName, Id, ArithmeticProxy, NumericProxy
from ..namespaces import XML_NAMESPACE, get_namespace, split_expanded_name, XML_ID, XML_LANG
from ..xpath_context import XPathContext, XPathSchemaContext
from ..xpath_nodes import AttributeNode, NamespaceNode, is_element_node, \
is_document_node, is_xpath_node, node_name, node_nilled, node_base_uri, \
node_document_uri, node_kind, etree_deep_equal
from ..regex import RegexError, translate_pattern
from .xpath2_parser import XPath2Parser
method = XPath2Parser.method
function = XPath2Parser.function
def is_local_url_scheme(scheme):
return scheme in ('', 'file') or len(scheme) == 1 and scheme in ascii_letters
###
# Sequence types (allowed only for type checking in treat-as/instance-of statements)
function('empty-sequence', nargs=0, label='sequence type')
@method(function('item', nargs=0, label='sequence type'))
def evaluate_item_sequence_type(self, context=None):
if context is None:
raise self.missing_context()
return context.root if context.item is None else context.item
###
# Function for QNames
@method(function('prefix-from-QName', nargs=1,
sequence_types=('xs:QName?', 'xs:NCName?')))
def evaluate_prefix_from_qname_function(self, context=None):
qname = self.get_argument(context)
if qname is None:
return
elif not isinstance(qname, QName):
raise self.error('XPTY0004', 'argument has an invalid type %r' % type(qname))
return qname.prefix or []
@method(function('local-name-from-QName', nargs=1,
sequence_types=('xs:QName?', 'xs:NCName?')))
def evaluate_local_name_from_qname_function(self, context=None):
qname = self.get_argument(context)
if qname is None:
return
elif not isinstance(qname, QName):
raise self.error('XPTY0004', 'argument has an invalid type %r' % type(qname))
return NCName(qname.local_name)
@method(function('namespace-uri-from-QName', nargs=1,
sequence_types=('xs:QName?', 'xs:anyURI?')))
def evaluate_uri_from_qname_function(self, context=None):
qname = self.get_argument(context)
if qname is None:
return
elif not isinstance(qname, QName):
raise self.error('XPTY0004', 'argument has an invalid type %r' % type(qname))
return AnyURI(qname.uri or '')
@method(function('namespace-uri-for-prefix', nargs=2,
sequence_types=('xs:string?', 'element()', 'xs:anyURI?')))
def evaluate_namespace_uri_for_prefix_function(self, context=None):
if context is None:
raise self.missing_context()
prefix = self.get_argument(context=copy(context))
if prefix is None:
prefix = ''
if not isinstance(prefix, str):
raise self.error('FORG0006', '1st argument has an invalid type %r' % type(prefix))
elem = self.get_argument(context, index=1)
if not is_element_node(elem):
raise self.error('FORG0006', '2nd argument %r is not an element node' % elem)
ns_uris = {get_namespace(e.tag) for e in elem.iter()}
for p, uri in self.parser.namespaces.items():
if uri in ns_uris:
if p == prefix:
if not prefix or uri:
return AnyURI(uri)
else:
msg = 'Prefix %r is associated to no namespace'
raise self.error('XPST0081', msg % prefix)
@method(function('in-scope-prefixes', nargs=1, sequence_types=('element()', 'xs:string*')))
def select_in_scope_prefixes_function(self, context=None):
if context is None:
raise self.missing_context()
elem = self.get_argument(context)
if not is_element_node(elem):
raise self.error('XPTY0004', 'argument %r is not an element node' % elem)
if isinstance(context, XPathSchemaContext):
# For schema context returns prefixes of static namespaces
yield from self.parser.namespaces
elif hasattr(elem, 'nsmap'):
# For lxml returns Element's prefixes
yield 'xml'
yield from filter(lambda x: x and x != 'xml', elem.nsmap)
else:
# For ElementTree returns module registered prefixes
prefixes = {'xml'} # 'xml' prefix is always registered
prefixes.update(x for x in self.parser.namespaces if x)
if context.namespaces:
prefixes.update(x for x in context.namespaces if x)
yield from prefixes
@method(function('resolve-QName', nargs=2,
sequence_types=('xs:string?', 'element()', 'xs:QName?')))
def evaluate_resolve_qname_function(self, context=None):
qname = self.get_argument(context=copy(context))
if qname is None:
return
elif not isinstance(qname, str):
raise self.error('FORG0006', '1st argument has an invalid type %r' % type(qname))
if context is None:
raise self.missing_context()
elem = self.get_argument(context, index=1)
if not is_element_node(elem):
raise self.error('FORG0006', '2nd argument %r is not an element node' % elem)
qname = qname.strip()
match = QNAME_PATTERN.match(qname)
if match is None:
raise self.error('FOCA0002', '1st argument must be an xs:QName')
prefix = match.groupdict()['prefix'] or ''
if prefix == 'xml':
return QName(XML_NAMESPACE, qname)
try:
nsmap = {**self.parser.namespaces, **elem.nsmap}
except AttributeError:
nsmap = self.parser.namespaces
for pfx, uri in nsmap.items():
if pfx == prefix:
if pfx:
return QName(uri, '{}:{}'.format(pfx, match.groupdict()['local']))
else:
return QName(uri, match.groupdict()['local'])
if prefix or '' in self.parser.namespaces:
raise self.error('FONS0004', 'no namespace found for prefix %r' % prefix)
return QName('', qname)
###
# Accessor functions
@method(function('node-name', nargs=1, sequence_types=('node()?', 'xs:QName?')))
def evaluate_node_name_function(self, context=None):
arg = self.get_argument(context)
if arg is None:
return
elif not is_xpath_node(arg):
raise self.error('XPTY0004', 'an XPath node required')
name = node_name(arg)
if name is None:
return
elif name.startswith('{'):
# name is a QName in extended format
namespace, local_name = split_expanded_name(name)
for pfx, uri in self.parser.namespaces.items():
if uri == namespace:
return QName(uri, '{}:{}'.format(pfx, local_name))
raise self.error('FONS0004', 'no prefix found for namespace {}'.format(namespace))
else:
# name is a local name
return QName(self.parser.namespaces.get('', ''), name)
@method(function('nilled', nargs=1, sequence_types=('node()?', 'xs:boolean?')))
def evaluate_nilled_function(self, context=None):
arg = self.get_argument(context)
if arg is None:
return
elif not is_xpath_node(arg):
raise self.error('XPTY0004', 'an XPath node required')
return node_nilled(arg)
@method(function('data', nargs=1, sequence_types=('item()*', 'xs:anyAtomicType*')))
def select_data_function(self, context=None):
for item in self[0].select(context):
value = self.data_value(item)
if value is None:
raise self.error('FOTY0012', "argument node does not have a typed value")
else:
yield value
@method(function('base-uri', nargs=(0, 1), sequence_types=('node()?', 'xs:anyURI?')))
def evaluate_base_uri_function(self, context=None):
item = self.get_argument(context, default_to_context=True)
if context is None:
raise self.missing_context("context item is undefined")
elif item is None:
return
elif not is_xpath_node(item):
raise self.wrong_context_type("context item is not a node")
else:
uri = node_base_uri(item)
if uri is not None:
return AnyURI(uri)
@method(function('document-uri', nargs=1, sequence_types=('node()?', 'xs:anyURI?')))
def evaluate_document_uri_function(self, context=None):
if context is None:
raise self.missing_context()
arg = self.get_argument(context)
if arg is None or not is_document_node(arg):
return
uri = node_document_uri(arg)
if uri is not None:
return AnyURI(uri)
elif is_document_node(context.root):
try:
for uri, doc in context.documents.items():
if doc is context.root:
return AnyURI(uri)
except AttributeError:
pass
###
# Number functions
@method(function('round-half-to-even', nargs=(1, 2),
sequence_types=('numeric?', 'xs:integer', 'numeric?')))
def evaluate_round_half_to_even_function(self, context=None):
item = self.get_argument(context)
if item is None:
return
elif isinstance(item, float) and (math.isnan(item) or math.isinf(item)):
return item
elif not isinstance(item, (float, int, Decimal)):
code = 'XPTY0004' if isinstance(item, str) else 'FORG0006'
raise self.error(code, "invalid argument type {!r}".format(type(item)))
precision = 0 if len(self) < 2 else self[1].evaluate(context)
try:
if isinstance(item, int):
return round(item, precision)
elif isinstance(item, Decimal):
return round(item, precision)
elif isinstance(item, Float10):
return Float10(round(item, precision))
return float(round(Decimal.from_float(item), precision))
except TypeError as err:
raise self.error('XPTY0004', err)
except (DecimalException, OverflowError):
if isinstance(item, Decimal):
return Decimal.from_float(round(float(item), precision))
return round(item, precision)
@method(function('abs', nargs=1, sequence_types=('numeric?', 'numeric?')))
def evaluate_abs_function(self, context=None):
item = self.get_argument(context)
if item is None:
return
elif isinstance(item, float) and math.isnan(item):
return item
elif is_xpath_node(item):
value = self.string_value(item)
try:
return abs(Decimal(value))
except DecimalException:
raise self.error('FOCA0002', "invalid string value {!r} for {!r}".format(value, item))
elif isinstance(item, bool) or not isinstance(item, (float, int, Decimal)):
raise self.error('XPTY0004', "invalid argument type {!r}".format(type(item)))
else:
return abs(item)
###
# Aggregate functions
@method(function('avg', nargs=1, sequence_types=('xs:anyAtomicType*', 'xs:anyAtomicType')))
def evaluate_avg_function(self, context=None):
values = []
for item in self[0].select_data_values(context):
if isinstance(item, UntypedAtomic):
values.append(self.cast_to_double(item.value))
elif isinstance(item, (AnyURI, bool)):
raise self.error('FORG0006', 'non numeric value {!r} in the sequence'.format(item))
else:
values.append(item)
if not values:
return
elif isinstance(values[0], Duration):
value = values[0]
try:
for item in values[1:]:
value = value + item
return value / len(values)
except TypeError as err:
raise self.error('FORG0006', err)
elif all(isinstance(x, int) for x in values):
result = sum(values) / Decimal(len(values))
return int(result) if result % 1 == 0 else result
elif all(isinstance(x, (int, Decimal)) for x in values):
return sum(values) / Decimal(len(values))
elif all(not isinstance(x, DoubleProxy) for x in values):
try:
return sum(Float10(x) if isinstance(x, Decimal) else x for x in values) / len(values)
except TypeError as err:
raise self.error('FORG0006', err)
else:
try:
return sum(float(x) if isinstance(x, Decimal) else x for x in values) / len(values)
except TypeError as err:
raise self.error('FORG0006', err)
@method(function('max', nargs=(1, 2),
sequence_types=('xs:anyAtomicType*', 'xs:string', 'xs:anyAtomicType?')))
@method(function('min', nargs=(1, 2),
sequence_types=('xs:anyAtomicType*', 'xs:string', 'xs:anyAtomicType?')))
def evaluate_max_min_functions(self, context=None):
def max_or_min():
if not values:
return values
elif any(isinstance(x, str) for x in values):
if any(isinstance(x, ArithmeticProxy) for x in values):
raise self.error('FORG0006', "cannot compare strings with numeric data")
elif all(isinstance(x, (Decimal, int)) for x in values):
return aggregate_func(values)
elif any(isinstance(x, float) and math.isnan(x) for x in values):
return float_class('NaN')
elif all(isinstance(x, (int, float, Decimal)) for x in values):
return float_class(aggregate_func(values))
return aggregate_func(values)
values = []
float_class = None
aggregate_func = max if self.symbol == 'max' else min
for item in self[0].select_data_values(context):
if isinstance(item, UntypedAtomic):
values.append(self.cast_to_double(item))
float_class = float
elif isinstance(item, float):
values.append(item)
if float_class is None:
float_class = type(item)
elif float_class is Float10 and not isinstance(item, Float10):
float_class = float
elif isinstance(item, AnyURI):
values.append(item.value)
elif isinstance(item, (DayTimeDuration, YearMonthDuration)):
values.append(item)
elif isinstance(item, (Duration, QName)):
raise self.error('FORG0006', "xs:{} is not an ordered type".format(type(item).name))
else:
values.append(item)
try:
if len(self) > 1:
with self.use_locale(collation=self.get_argument(context, 1)):
return max_or_min()
return max_or_min()
except TypeError as err:
raise self.error('FORG0006', err)
###
# General functions for sequences
@method(function('empty', nargs=1, sequence_types=('item()*', 'xs:boolean')))
@method(function('exists', nargs=1, sequence_types=('item()*', 'xs:boolean')))
def evaluate_empty_and_exists_functions(self, context=None):
return next(iter(self.select(context)))
@method('empty')
def select_empty_function(self, context=None):
try:
next(iter(self[0].select(context)))
except StopIteration:
yield True
else:
yield False
@method('exists')
def select_exists_function(self, context=None):
try:
next(iter(self[0].select(context)))
except StopIteration:
yield False
else:
yield True
@method(function('distinct-values', nargs=(1, 2),
sequence_types=('xs:anyAtomicType*', 'xs:string', 'xs:anyAtomicType*')))
def select_distinct_values_function(self, context=None):
def distinct_values():
nan = False
results = []
for item in self[0].select(context):
value = self.data_value(item)
if context is not None:
context.item = value
if isinstance(value, (float, Decimal)):
if math.isnan(value):
if not nan:
yield value
nan = True
elif all(not math.isclose(value, x, rel_tol=1E-7, abs_tol=0)
for x in results if isinstance(x, (int, Decimal, float))):
yield value
results.append(value)
elif value not in results:
yield value
results.append(value)
if len(self) > 1:
with self.use_locale(collation=self.get_argument(context, 1)):
yield from distinct_values()
else:
yield from distinct_values()
@method(function('insert-before', nargs=3,
sequence_types=('item()*', 'xs:integer', 'item()*', 'item()*')))
def select_insert_before_function(self, context=None):
try:
insert_at_pos = max(0, self[1].value - 1)
except TypeError:
raise self.error('XPTY0004', '2nd argument must be an xs:integer') from None
inserted = False
for pos, result in enumerate(self[0].select(context)):
if not inserted and pos == insert_at_pos:
yield from self[2].select(context)
inserted = True
yield result
if not inserted:
yield from self[2].select(context)
@method(function('index-of', nargs=(2, 3), sequence_types=(
'xs:anyAtomicType*', 'xs:anyAtomicType', 'xs:string', 'xs:integer*')))
def select_index_of_function(self, context=None):
value = self[1].get_atomized_operand(copy(context))
if value is None:
raise self.error('XPTY0004', "2nd argument cannot be an empty sequence")
if len(self) < 3:
for pos, result in enumerate(self[0].select(context), start=1):
if self.data_value(result) == value:
yield pos
else:
with self.use_locale(collation=self.get_argument(context, 2)):
for pos, result in enumerate(self[0].select(context), start=1):
if self.data_value(result) == value:
yield pos
@method(function('remove', nargs=2, sequence_types=('item()*', 'xs:integer', 'item()*')))
def select_remove_function(self, context=None):
position = self[1].evaluate(context)
if not isinstance(position, int):
raise self.error('XPTY0004', 'an xs:integer required')
for pos, result in enumerate(self[0].select(context), start=1):
if pos != position:
yield result
@method(function('reverse', nargs=1, sequence_types=('item()*', 'item()*')))
def select_reverse_function(self, context=None):
yield from reversed([x for x in self[0].select(context)])
@method(function('subsequence', nargs=(2, 3),
sequence_types=('item()*', 'xs:double', 'xs:double', 'item()*')))
def select_subsequence_function(self, context=None):
starting_loc = self.get_argument(context, 1, cls=NumericProxy)
if not math.isnan(starting_loc) and not math.isinf(starting_loc):
starting_loc = round(starting_loc)
if len(self) == 2:
for pos, result in enumerate(self[0].select(context), start=1):
if starting_loc <= pos:
yield result
else:
length = self.get_argument(context, 2, cls=NumericProxy)
if not math.isnan(length) and not math.isinf(length):
length = round(length)
for pos, result in enumerate(self[0].select(context), start=1):
if starting_loc <= pos < starting_loc + length:
yield result
@method(function('unordered', nargs=1, sequence_types=('item()*', 'item()*')))
def select_unordered_function(self, context=None):
yield from sorted([x for x in self[0].select(context)], key=lambda x: self.string_value(x))
###
# Cardinality functions for sequences
@method(function('zero-or-one', nargs=1, sequence_types=('item()*', 'item()?')))
def select_zero_or_one_function(self, context=None):
results = iter(self[0].select(context))
try:
item = next(results)
except StopIteration:
return
try:
next(results)
except StopIteration:
yield item
else:
raise self.error('FORG0003')
@method(function('one-or-more', nargs=1, sequence_types=('item()*', 'item()+')))
def select_one_or_more_function(self, context=None):
results = iter(self[0].select(context))
try:
item = next(results)
except StopIteration:
raise self.error('FORG0004') from None
else:
yield item
while True:
try:
yield next(results)
except StopIteration:
break
@method(function('exactly-one', nargs=1, sequence_types=('item()*', 'item()')))
def select_exactly_one_function(self, context=None):
results = iter(self[0].select(context))
try:
item = next(results)
except StopIteration:
raise self.error('FORG0005') from None
else:
try:
next(results)
except StopIteration:
yield item
else:
raise self.error('FORG0005')
###
# Comparing sequences
@method(function('deep-equal', nargs=(2, 3),
sequence_types=('item()*', 'item()*', 'xs:string', 'xs:boolean')))
def evaluate_deep_equal_function(self, context=None):
def deep_equal():
while True:
value1 = next(seq1, None)
value2 = next(seq2, None)
if (value1 is None) ^ (value2 is None):
return False
elif value1 is None:
return True
elif (is_xpath_node(value1)) ^ (is_xpath_node(value2)):
return False
elif not is_xpath_node(value1):
try:
if isinstance(value1, bool):
if not isinstance(value2, bool) or value1 is not value2:
return False
elif isinstance(value2, bool):
return False
elif isinstance(value1, UntypedAtomic):
if not isinstance(value2, UntypedAtomic) or value1 != value2:
return False
elif isinstance(value2, UntypedAtomic):
return False
elif isinstance(value1, float):
if math.isnan(value1):
if not math.isnan(value2):
return False
elif isinstance(value2, Decimal):
if value1 != float(value2):
return False
elif value1 != value2:
return False
elif isinstance(value2, float):
if math.isnan(value2):
return False
elif isinstance(value1, Decimal):
if value2 != float(value1):
return False
elif value1 != value2:
return False
elif value1 != value2:
return False
except TypeError:
return False
elif node_kind(value1) != node_kind(value2):
return False
elif is_element_node(value1):
if not etree_deep_equal(value1, value2):
return False
elif value1.value != value2.value:
return False
elif isinstance(value1, AttributeNode):
if value1.name != value2.name:
return False
elif isinstance(value1, NamespaceNode):
if value1.prefix != value2.prefix:
return False
seq1 = iter(self[0].select(copy(context)))
seq2 = iter(self[1].select(copy(context)))
if len(self) > 2:
with self.use_locale(collation=self.get_argument(context, 2)):
return deep_equal()
else:
return deep_equal()
###
# Regex
@method(function('matches', nargs=(2, 3),
sequence_types=('xs:string?', 'xs:string', 'xs:string', 'xs:boolean')))
def evaluate_matches_function(self, context=None):
input_string = self.get_argument(context, default='', cls=str)
pattern = self.get_argument(context, 1, required=True, cls=str)
flags = 0
if len(self) > 2:
for c in self.get_argument(context, 2, required=True, cls=str):
if c in 'smix':
flags |= getattr(re, c.upper())
else:
raise self.error('FORX0001', "Invalid regular expression flag %r" % c)
try:
python_pattern = translate_pattern(pattern, flags, self.parser.xsd_version)
return re.search(python_pattern, input_string, flags=flags) is not None
except (re.error, RegexError) as err:
msg = "Invalid regular expression: {}"
raise self.error('FORX0002', msg.format(str(err))) from None
except OverflowError as err:
raise self.error('FORX0002', err) from None
REPLACEMENT_PATTERN = re.compile(r'^([^\\$]|[\\]{2}|\\\$|\$\d+)*$')
@method(function('replace', nargs=(3, 4), sequence_types=(
'xs:string?', 'xs:string', 'xs:string', 'xs:string', 'xs:string')))
def evaluate_replace_function(self, context=None):
input_string = self.get_argument(context, default='', cls=str)
pattern = self.get_argument(context, 1, required=True, cls=str)
replacement = self.get_argument(context, 2, required=True, cls=str)
flags = 0
if len(self) > 3:
for c in self.get_argument(context, 3, required=True, cls=str):
if c in 'smix':
flags |= getattr(re, c.upper())
else:
raise self.error('FORX0001', "Invalid regular expression flag %r" % c)
try:
python_pattern = translate_pattern(pattern, flags, self.parser.xsd_version)
pattern = re.compile(python_pattern, flags=flags)
except (re.error, RegexError):
raise self.error('FORX0002', "Invalid regular expression %r" % pattern)
else:
if pattern.search(''):
msg = "Regular expression %r matches zero-length string"
raise self.error('FORX0003', msg % pattern.pattern)
elif REPLACEMENT_PATTERN.search(replacement) is None:
raise self.error('FORX0004', "Invalid replacement string %r" % replacement)
else:
for g in range(pattern.groups, -1, -1):
if '$%d' % g in replacement:
replacement = re.sub(r'(?<!\\)\$%d' % g, r'\\g<%d>' % g, replacement)
return pattern.sub(replacement, input_string).replace('\\$', '$')
@method(function('tokenize', nargs=(2, 3),
sequence_types=('xs:string?', 'xs:string', 'xs:string', 'xs:string*')))
def select_tokenize_function(self, context=None):
input_string = self.get_argument(context, cls=str)
pattern = self.get_argument(context, 1, required=True, cls=str)
flags = 0
if len(self) > 2:
for c in self.get_argument(context, 2, required=True, cls=str):
if c in 'smix':
flags |= getattr(re, c.upper())
else:
raise self.error('FORX0001', "Invalid regular expression flag %r" % c)
try:
python_pattern = translate_pattern(pattern, flags, self.parser.xsd_version)
pattern = re.compile(python_pattern, flags=flags)
except (re.error, RegexError):
raise self.error('FORX0002', "Invalid regular expression %r" % pattern) from None
else:
if pattern.search(''):
msg = "Regular expression %r matches zero-length string"
raise self.error('FORX0003', msg % pattern.pattern)
if input_string:
for value in pattern.split(input_string):
if value is not None and pattern.search(value) is None:
yield value
###
# Functions on anyURI
@method(function('resolve-uri', nargs=(1, 2),
sequence_types=('xs:string?', 'xs:string', 'xs:anyURI?')))
def evaluate_resolve_uri_function(self, context=None):
relative = self.get_argument(context, cls=str)
if len(self) == 1:
if self.parser.base_uri is None:
raise self.error('FONS0005')
elif relative is None:
return
elif not AnyURI.is_valid(relative):
raise self.error('FORG0002', '{!r} is not a valid URI'.format(relative))
else:
return self.get_absolute_uri(relative, as_string=False)
base_uri = self.get_argument(context, index=1, required=True, cls=str)
if not AnyURI.is_valid(base_uri):
raise self.error('FORG0002', '{!r} is not a valid URI'.format(base_uri))
elif relative is None:
return
elif not AnyURI.is_valid(relative):
raise self.error('FORG0002', '{!r} is not a valid URI'.format(relative))
else:
return self.get_absolute_uri(relative, base_uri, as_string=False)
###
# String functions
@method(function('codepoints-to-string', nargs=1,
sequence_types=('xs:integer*', 'xs:string')))
def evaluate_codepoints_to_string_function(self, context=None):
result = []
for value in self[0].select(context):
if not isinstance(value, int):
msg = "invalid type {} for codepoint {}".format(type(value), value)
if isinstance(value, str):
raise self.error('XPTY0004', msg)
raise self.error('FORG0006', msg)
elif is_xml_codepoint(value):
result.append(chr(value))
else:
msg = "{} is not a valid XML 1.0 codepoint".format(value)
raise self.error('FOCH0001', msg)
return ''.join(result)
@method(function('string-to-codepoints', nargs=1,
sequence_types=('xs:string?', 'xs:integer*')))
def evaluate_string_to_codepoints_function(self, context=None):
try:
return [ord(c) for c in self[0].evaluate(context)] or None
except TypeError:
raise self.error('XPTY0004', 'an xs:string required') from None
@method(function('compare', nargs=(2, 3),
sequence_types=('xs:string?', 'xs:string?', 'xs:string', 'xs:integer?')))
def evaluate_compare_function(self, context=None):
comp1 = self.get_argument(context, 0, cls=str, promote=(AnyURI, UntypedAtomic))
comp2 = self.get_argument(context, 1, cls=str, promote=(AnyURI, UntypedAtomic))
if comp1 is None or comp2 is None:
return
if len(self) < 3:
value = locale.strcoll(comp1, comp2)
else:
with self.use_locale(collation=self.get_argument(context, 2)):
value = locale.strcoll(comp1, comp2)
return 0 if not value else 1 if value > 0 else -1
@method(function('contains', nargs=(2, 3),
sequence_types=('xs:string?', 'xs:string?', 'xs:string', 'xs:boolean')))
def evaluate_contains_function(self, context=None):
arg1 = self.get_argument(context, default='', cls=str)
arg2 = self.get_argument(context, index=1, default='', cls=str)
if len(self) < 3:
return arg2 in arg1
else:
with self.use_locale(collation=self.get_argument(context, 2)):
return arg2 in arg1
@method(function('codepoint-equal', nargs=2,
sequence_types=('xs:string?', 'xs:string?', 'xs:boolean?')))
def evaluate_codepoint_equal_function(self, context=None):
comp1 = self.get_argument(context, 0, cls=str)
comp2 = self.get_argument(context, 1, cls=str)
if comp1 is None or comp2 is None:
return
elif len(comp1) != len(comp2):
return False
else:
return all(ord(c1) == ord(c2) for c1, c2 in zip(comp1, comp2))
@method(function('string-join', nargs=2,
sequence_types=('xs:string*', 'xs:string', 'xs:string')))
def evaluate_string_join_function(self, context=None):
items = [self.string_value(s) for s in self[0].select(context)]
return self.get_argument(context, 1, required=True, cls=str).join(items)
@method(function('normalize-unicode', nargs=(1, 2),
sequence_types=('xs:string?', 'xs:string', 'xs:string')))
def evaluate_normalize_unicode_function(self, context=None):
arg = self.get_argument(context, default='', cls=str)
if len(self) > 1:
normalization_form = self.get_argument(context, 1, cls=str)
if normalization_form is None:
raise self.error('XPTY0004', "2nd argument can't be an empty sequence")
else:
normalization_form = normalization_form.strip().upper()
else:
normalization_form = 'NFC'
if normalization_form == 'FULLY-NORMALIZED':
msg = "%r normalization form not supported" % normalization_form
raise self.error('FOCH0003', msg)
if not arg:
return ''
elif not normalization_form:
return arg
try:
return unicodedata.normalize(normalization_form, arg)
except ValueError:
msg = "unsupported normalization form %r" % normalization_form
raise self.error('FOCH0003', msg) from None
@method(function('upper-case', nargs=1, sequence_types=('xs:string?', 'xs:string')))
def evaluate_upper_case_function(self, context=None):
return self.get_argument(context, default='', cls=str).upper()
@method(function('lower-case', nargs=1, sequence_types=('xs:string?', 'xs:string')))
def evaluate_lower_case_function(self, context=None):
return self.get_argument(context, default='', cls=str).lower()
@method(function('encode-for-uri', nargs=1, sequence_types=('xs:string?', 'xs:string')))
def evaluate_encode_for_uri_function(self, context=None):
uri_part = self.get_argument(context, cls=str)
return '' if uri_part is None else urllib_quote(uri_part, safe='~')
@method(function('iri-to-uri', nargs=1, sequence_types=('xs:string?', 'xs:string')))
def evaluate_iri_to_uri_function(self, context=None):
iri = self.get_argument(context, cls=str, promote=AnyURI)
return '' if iri is None else urllib_quote(iri, safe='-_.!~*\'()#;/?:@&=+$,[]%')
@method(function('escape-html-uri', nargs=1, sequence_types=('xs:string?', 'xs:string')))
def evaluate_escape_html_uri_function(self, context=None):
uri = self.get_argument(context, cls=str)
if uri is None:
return ''
return urllib_quote(uri, safe=''.join(chr(cp) for cp in range(32, 127)))
@method(function('starts-with', nargs=(2, 3),
sequence_types=('xs:string?', 'xs:string?', 'xs:string', 'xs:boolean')))
def evaluate_starts_with_function(self, context=None):
arg1 = self.get_argument(context, default='', cls=str)
arg2 = self.get_argument(context, index=1, default='', cls=str)
if len(self) < 3:
return arg1.startswith(arg2)
else:
with self.use_locale(collation=self.get_argument(context, 2)):
return arg1.startswith(arg2)
@method(function('ends-with', nargs=(2, 3),
sequence_types=('xs:string?', 'xs:string?', 'xs:string', 'xs:boolean')))
def evaluate_ends_with_function(self, context=None):
arg1 = self.get_argument(context, default='', cls=str)
arg2 = self.get_argument(context, index=1, default='', cls=str)
if len(self) < 3:
return arg1.endswith(arg2)
else:
with self.use_locale(collation=self.get_argument(context, 2)):
return arg1.endswith(arg2)
@method(function('substring-before', nargs=(2, 3),
sequence_types=('xs:string?', 'xs:string?', 'xs:string', 'xs:string')))
@method(function('substring-after', nargs=(2, 3),
sequence_types=('xs:string?', 'xs:string?', 'xs:string', 'xs:string')))
def evaluate_substring_functions(self, context=None):
arg1 = self.get_argument(context, default='', cls=str)
arg2 = self.get_argument(context, index=1, default='', cls=str)
if len(self) < 3:
index = arg1.find(arg2)
else:
with self.use_locale(collation=self.get_argument(context, 2)):
index = arg1.find(arg2)
if index < 0:
return ''
if self.symbol == 'substring-before':
return arg1[:index]
else:
return arg1[index + len(arg2):]
###
# Functions on durations, dates and times
@method(function('years-from-duration', nargs=1,
sequence_types=('xs:duration?', 'xs:integer?')))
def evaluate_years_from_duration_function(self, context=None):
item = self.get_argument(context, cls=Duration)
if item is None:
return
else:
return item.months // 12 if item.months >= 0 else -(abs(item.months) // 12)
@method(function('months-from-duration', nargs=1,
sequence_types=('xs:duration?', 'xs:integer?')))
def evaluate_months_from_duration_function(self, context=None):
item = self.get_argument(context, cls=Duration)
if item is None:
return
else:
return item.months % 12 if item.months >= 0 else -(abs(item.months) % 12)
@method(function('days-from-duration', nargs=1,
sequence_types=('xs:duration?', 'xs:integer?')))
def evaluate_days_from_duration_function(self, context=None):
item = self.get_argument(context, cls=Duration)
if item is None:
return
else:
return item.seconds // 86400 if item.seconds >= 0 else -(abs(item.seconds) // 86400)
@method(function('hours-from-duration', nargs=1,
sequence_types=('xs:duration?', 'xs:integer?')))
def evaluate_hours_from_duration_function(self, context=None):
item = self.get_argument(context, cls=Duration)
if item is None:
return
else:
return item.seconds // 3600 % 24 if item.seconds >= 0 else -(abs(item.seconds) // 3600 % 24)
@method(function('minutes-from-duration', nargs=1,
sequence_types=('xs:duration?', 'xs:integer?')))
def evaluate_minutes_from_duration_function(self, context=None):
item = self.get_argument(context, cls=Duration)
if item is None:
return
else:
return item.seconds // 60 % 60 if item.seconds >= 0 else -(abs(item.seconds) // 60 % 60)
@method(function('seconds-from-duration', nargs=1,
sequence_types=('xs:duration?', 'xs:integer?')))
def evaluate_seconds_from_duration_function(self, context=None):
item = self.get_argument(context, cls=Duration)
if item is None:
return
else:
return item.seconds % 60 if item.seconds >= 0 else -(abs(item.seconds) % 60)
@method(function('year-from-dateTime', nargs=1, sequence_types=('xs:dateTime?', 'xs:integer?')))
@method(function('month-from-dateTime', nargs=1, sequence_types=('xs:dateTime?', 'xs:integer?')))
@method(function('day-from-dateTime', nargs=1, sequence_types=('xs:dateTime?', 'xs:integer?')))
@method(function('hours-from-dateTime', nargs=1, sequence_types=('xs:dateTime?', 'xs:integer?')))
@method(function('minutes-from-dateTime', nargs=1, sequence_types=('xs:dateTime?', 'xs:integer?')))
@method(function('seconds-from-dateTime', nargs=1, sequence_types=('xs:dateTime?', 'xs:decimal?')))
def evaluate_from_datetime_functions(self, context=None):
cls = DateTime if self.parser.xsd_version == '1.1' else DateTime10
item = self.get_argument(context, cls=cls)
if item is None:
return
elif self.symbol.startswith('year'):
return item.year
elif self.symbol.startswith('month'):
return item.month
elif self.symbol.startswith('day'):
return item.day
elif self.symbol.startswith('hour'):
return item.hour
elif self.symbol.startswith('minute'):
return item.minute
elif item.microsecond:
return Decimal('{}.{}'.format(item.second, item.microsecond))
else:
return item.second
@method(function('timezone-from-dateTime', nargs=1,
sequence_types=('xs:dateTime?', 'xs:dayTimeDuration?')))
def evaluate_timezone_from_datetime_function(self, context=None):
cls = DateTime if self.parser.xsd_version == '1.1' else DateTime10
item = self.get_argument(context, cls=cls)
if item is None or item.tzinfo is None:
return
return DayTimeDuration(seconds=item.tzinfo.offset.total_seconds())
@method(function('year-from-date', nargs=1, sequence_types=('xs:date?', 'xs:integer?')))
@method(function('month-from-date', nargs=1, sequence_types=('xs:date?', 'xs:integer?')))
@method(function('day-from-date', nargs=1, sequence_types=('xs:date?', 'xs:integer?')))
@method(function('timezone-from-date', nargs=1,
sequence_types=('xs:date?', 'xs:dayTimeDuration?')))
def evaluate_from_date_functions(self, context=None):
cls = Date if self.parser.xsd_version == '1.1' else Date10
item = self.get_argument(context, cls=cls)
if item is None:
return
elif self.symbol.startswith('year'):
return item.year
elif self.symbol.startswith('month'):
return item.month
elif self.symbol.startswith('day'):
return item.day
elif item.tzinfo is None:
return
return DayTimeDuration(seconds=item.tzinfo.offset.total_seconds())
@method(function('hours-from-time', nargs=1, sequence_types=('xs:time?', 'xs:integer?')))
def evaluate_hours_from_time_function(self, context=None):
item = self.get_argument(context, cls=Time)
return None if item is None else item.hour
@method(function('minutes-from-time', nargs=1, sequence_types=('xs:time?', 'xs:integer?')))
def evaluate_minutes_from_time_function(self, context=None):
item = self.get_argument(context, cls=Time)
return None if item is None else item.minute
@method(function('seconds-from-time', nargs=1, sequence_types=('xs:time?', 'xs:decimal?')))
def evaluate_seconds_from_time_function(self, context=None):
item = self.get_argument(context, cls=Time)
return None if item is None else item.second + item.microsecond / Decimal('1000000.0')
@method(function('timezone-from-time', nargs=1,
sequence_types=('xs:time?', 'xs:dayTimeDuration?')))
def evaluate_timezone_from_time_function(self, context=None):
item = self.get_argument(context, cls=Time)
if item is None or item.tzinfo is None:
return
return DayTimeDuration(seconds=item.tzinfo.offset.total_seconds())
###
# Timezone adjustment functions
@method(function('adjust-dateTime-to-timezone', nargs=(1, 2),
sequence_types=('xs:dateTime?', 'xs:dayTimeDuration?', 'xs:dateTime?')))
def evaluate_adjust_datetime_to_timezone_function(self, context=None):
cls = DateTime if self.parser.xsd_version == '1.1' else DateTime10
return self.adjust_datetime(context, cls)
@method(function('adjust-date-to-timezone', nargs=(1, 2),
sequence_types=('xs:date?', 'xs:dayTimeDuration?', 'xs:date?')))
def evaluate_adjust_date_to_timezone_function(self, context=None):
cls = Date if self.parser.xsd_version == '1.1' else Date10
return self.adjust_datetime(context, cls)
@method(function('adjust-time-to-timezone', nargs=(1, 2),
sequence_types=('xs:time?', 'xs:dayTimeDuration?', 'xs:time?')))
def evaluate_adjust_time_to_timezone_function(self, context=None):
return self.adjust_datetime(context, Time)
###
# Static context functions
@method(function('default-collation', nargs=0, sequence_types=('xs:string',)))
def evaluate_default_collation_function(self, context=None):
return self.parser.default_collation
@method(function('static-base-uri', nargs=0, sequence_types=('xs:anyURI?',)))
def evaluate_static_base_uri_function(self, context=None):
if self.parser.base_uri is not None:
return AnyURI(self.parser.base_uri)
###
# Dynamic context functions
@method(function('current-dateTime', nargs=0, sequence_types=('xs:dateTime',)))
def evaluate_current_datetime_function(self, context=None):
dt = datetime.datetime.now() if context is None else context.current_dt
if self.parser.xsd_version == '1.1':
return DateTime(dt.year, dt.month, dt.day, dt.hour, dt.minute,
dt.second, dt.microsecond, dt.tzinfo)
return DateTime10(dt.year, dt.month, dt.day, dt.hour, dt.minute,
dt.second, dt.microsecond, dt.tzinfo)
@method(function('current-date', nargs=0, sequence_types=('xs:date',)))
def evaluate_current_date_function(self, context=None):
dt = datetime.datetime.now() if context is None else context.current_dt
if self.parser.xsd_version == '1.1':
return Date(dt.year, dt.month, dt.day, tzinfo=dt.tzinfo)
return Date10(dt.year, dt.month, dt.day, tzinfo=dt.tzinfo)
@method(function('current-time', nargs=0, sequence_types=('xs:time',)))
def evaluate_current_time_function(self, context=None):
dt = datetime.datetime.now() if context is None else context.current_dt
return Time(dt.hour, dt.minute, dt.second, dt.microsecond, dt.tzinfo)
@method(function('implicit-timezone', nargs=0, sequence_types=('xs:dayTimeDuration',)))
def evaluate_implicit_timezone_function(self, context=None):
if context is not None and context.timezone is not None:
return DayTimeDuration.fromtimedelta(context.timezone.offset)
else:
return DayTimeDuration.fromtimedelta(datetime.timedelta(seconds=time.timezone))
###
# The root function (Ref: https://www.w3.org/TR/2010/REC-xpath-functions-20101214/#func-root)
@method(function('root', nargs=(0, 1), sequence_types=('node()?', 'node()?')))
def evaluate_root_function(self, context=None):
if context is None:
raise self.missing_context()
elif isinstance(context, XPathSchemaContext):
return
elif not self:
if context.item is None or is_xpath_node(context.item):
return context.root
else:
raise self.error('XPTY0004')
else:
item = self.get_argument(context)
if item is None:
return
elif not is_xpath_node(item):
raise self.error('XPTY0004')
elif any(item is x for x in context.iter()):
return context.root
try:
for uri, doc in context.documents.items():
doc_context = XPathContext(root=doc)
if any(item is x for x in doc_context.iter()):
return doc
except AttributeError:
pass
@method(function('lang', nargs=(1, 2),
sequence_types=('xs:string?', 'node()', 'xs:boolean')))
def evaluate_lang_function(self, context=None):
if len(self) > 1:
item = self.get_argument(context, index=1, default_to_context=True)
elif context is None:
raise self.missing_context()
else:
item = context.item
if not is_element_node(item):
raise self.error('XPTY0004')
try:
lang = item.attrib[XML_LANG].strip()
except KeyError:
if len(self) > 1:
return False
for elem in context.iter_ancestors():
try:
if XML_LANG in elem.attrib:
lang = elem.attrib[XML_LANG]
break
except AttributeError:
pass # is a document node
else:
return False
test_lang = self.get_argument(context, cls=str)
if test_lang is None:
return
test_lang = test_lang.strip().lower()
lang = lang.strip().lower()
return lang == test_lang or lang.startswith(test_lang) and lang[len(test_lang)] == '-'
###
# Functions that generate sequences
@method(function('element-with-id', nargs=(1, 2),
sequence_types=('xs:string*', 'node()', 'element()*')))
@method(function('id', nargs=(1, 2),
sequence_types=('xs:string*', 'node()', 'element()*')))
def select_id_function(self, context=None):
idrefs = {x for item in self[0].select(copy(context))
for x in self.string_value(item).split()}
node = self.get_argument(context, index=1, default_to_context=True)
if isinstance(context, XPathSchemaContext):
return
if not is_xpath_node(node):
raise self.error('XPTY0004')
elif not is_element_node(node) and not is_document_node(node):
return
# TODO: PSVI bindings with also xsi:type evaluation
for elem in node.iter():
if Id.is_valid(elem.text) and elem.text in idrefs:
if self.parser.schema is not None:
path = context.get_path(elem)
xsd_element = self.parser.schema.find(path, self.parser.namespaces)
if xsd_element is None or not xsd_element.type.is_key():
continue
idrefs.remove(elem.text)
if self.symbol == 'id':
yield elem
else:
parent = context.get_parent(elem)
if parent is not None:
yield parent
continue # pragma: no cover
for attr in map(lambda x: AttributeNode(*x), elem.attrib.items()):
if attr.value in idrefs:
if self.parser.schema is not None:
path = context.get_path(elem)
xsd_element = self.parser.schema.find(path, self.parser.namespaces)
if xsd_element is None:
continue
xsd_attribute = xsd_element.attrib.get(attr.name)
if xsd_attribute is None or not xsd_attribute.type.is_key():
continue # pragma: no cover
idrefs.remove(attr.value)
yield elem
break
@method(function('idref', nargs=(1, 2), sequence_types=('xs:string*', 'node()', 'node()*')))
def select_idref_function(self, context=None):
# TODO: PSVI bindings with also xsi:type evaluation
ids = [x for x in self[0].select(context=copy(context))]
node = self.get_argument(context, index=1, default_to_context=True)
if isinstance(context, XPathSchemaContext):
return
elif context is None or node is not context.item:
pass
elif context.item is None:
node = context.root
if not is_xpath_node(node):
raise self.error('XPTY0004')
elif not is_element_node(node) and not is_document_node(node):
return
for elem in node.iter():
if is_idrefs(elem.text) and any(v in elem.text.split() for x in ids for v in x.split()):
yield elem
continue
for attr in map(lambda x: AttributeNode(*x), elem.attrib.items()): # pragma: no cover
if attr.name != XML_ID and any(v in attr.value.split() for x in ids for v in x.split()):
yield elem
break
@method(function('doc', nargs=1, sequence_types=('xs:string?', 'document-node()?')))
@method(function('doc-available', nargs=1, sequence_types=('xs:string?', 'xs:boolean')))
def evaluate_doc_functions(self, context=None):
uri = self.get_argument(context)
if uri is None:
return None if self.symbol == 'doc' else False
elif context is None:
raise self.missing_context()
elif isinstance(uri, str):
pass
elif isinstance(uri, UntypedAtomic):
raise self.error('FODC0002')
else:
raise self.error('XPTY0004')
uri = self.get_absolute_uri(uri.strip())
if not isinstance(context, XPathSchemaContext):
try:
doc = context.documents[uri]
except (KeyError, TypeError):
if self.symbol == 'doc':
url_parts = urlsplit(uri)
if is_local_url_scheme(url_parts.scheme) \
and os.path.isdir(url_parts.path.lstrip(':')):
raise self.error('FODC0005', 'document URI is a directory')
raise self.error('FODC0002')
return False
else:
if doc is None:
raise self.error('FODC0002')
try:
sequence_type = self.parser.document_types[uri]
except (KeyError, TypeError):
sequence_type = 'document-node()'
if not self.parser.match_sequence_type(doc, sequence_type):
msg = "Type does not match sequence type {!r}"
raise self.wrong_sequence_type(msg.format(sequence_type))
return doc if self.symbol == 'doc' else True
@method(function('collection', nargs=(0, 1), sequence_types=('xs:string?', 'node()*')))
def evaluate_collection_function(self, context=None):
uri = self.get_argument(context)
if context is None:
raise self.missing_context()
elif isinstance(context, XPathSchemaContext):
return
elif not self or uri is None:
if context.default_collection is None:
raise self.error('FODC0002', 'no default collection has been defined')
collection = context.default_collection
sequence_type = self.parser.default_collection_type
else:
uri = self.get_absolute_uri(uri)
try:
collection = context.collections[uri]
except (KeyError, TypeError):
url_parts = urlsplit(uri)
if is_local_url_scheme(url_parts.scheme) and \
not url_parts.path.startswith(':') and url_parts.path.endswith('/'):
raise self.error('FODC0003', 'collection URI is a directory')
raise self.error('FODC0002', '{!r} collection not found'.format(uri)) from None
try:
sequence_type = self.parser.collection_types[uri]
except (KeyError, TypeError):
return collection
if not self.parser.match_sequence_type(collection, sequence_type):
msg = "Type does not match sequence type {!r}"
raise self.wrong_sequence_type(msg.format(sequence_type))
return collection
###
# The error function
#
# https://www.w3.org/TR/2010/REC-xpath-functions-20101214/#func-error
# https://www.w3.org/TR/xpath-functions/#func-error
#
@method(function('error', nargs=(0, 3),
sequence_types=('xs:QName?', 'xs:string', 'item()*', 'none')))
def evaluate_error_function(self, context=None):
if not self:
raise self.error('FOER0000')
elif len(self) == 1:
error = self.get_argument(context, cls=QName)
raise self.error(error or 'FOER0000')
else:
error = self.get_argument(context, cls=QName)
description = self.get_argument(context, index=1, cls=str)
raise self.error(error or 'FOER0000', description)
###
# The trace function
#
# https://www.w3.org/TR/2010/REC-xpath-functions-20101214/#func-trace
#
@method(function('trace', nargs=2, sequence_types=('item()*', 'xs:string', 'item()*')))
def select_trace_function(self, context=None):
label = self.get_argument(context, index=1, cls=str)
for value in self[0].select(context):
'{} {}'.format(label, str(value).strip()) # TODO: trace dataset
yield value
# XPath 2.0 definitions continue into module xpath2_constructors
| [
"[email protected]"
] | |
81c0be1f662045795975963953f6ca78e7b13dc9 | e173098f9ecd39bef112432a8bb7ed7fb1209fe9 | /wfm_client/migrations/0017_auto_20160914_1241.py | b48fd76cef7c8e48e380eb28be2f3b3e3694d74f | [] | no_license | isaiahiyede/inventory | 51b639257c14e257ababae047d83caa93b809893 | cedecc5b6d22d977b4bdac00e5faf775da7382ab | refs/heads/master | 2021-08-29T08:41:02.062763 | 2017-12-13T15:46:57 | 2017-12-13T15:46:57 | 114,137,491 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,015 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-09-14 12:41
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('wfm_client', '0016_item_item_edited_by'),
]
operations = [
migrations.AlterField(
model_name='item',
name='item_category',
field=models.CharField(blank=True, max_length=100, null=True),
),
migrations.AlterField(
model_name='item',
name='item_desc',
field=models.CharField(blank=True, max_length=150, null=True),
),
migrations.AlterField(
model_name='item',
name='item_name',
field=models.CharField(blank=True, max_length=50, null=True),
),
migrations.AlterField(
model_name='item',
name='item_num',
field=models.CharField(blank=True, max_length=100, null=True),
),
]
| [
"[email protected]"
] | |
d7148b6c3654a2afeb0b18046d3d86b65ce7fde1 | edd75bcf8c450dfce6b26b92b4fc012b399bd319 | /Exe41_dividas_com_juros.py | 83923f6847e4b9e94b7bd00a054b371381f67bc8 | [
"MIT"
] | permissive | lucaslk122/Exercicios-python-estrutura-de-repeticao | ac130cf9f6e78aff50e15e41aa1badfba55d6c8b | 1f203918e9bb8415128bb69f515240057b118a14 | refs/heads/main | 2022-12-20T17:08:43.105838 | 2020-10-20T14:08:15 | 2020-10-20T14:08:15 | 304,305,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,904 | py | print("""Quantidade de Parcelas % de Juros sobre o valor inicial da dívida
1 0
3 10
6 15
9 20
12 25""")
divida = float(input("Digite o valor da sua divida: "))
quantidade_parcelas = int(input("Digite a quantidade de parcelas: "))
if quantidade_parcelas == 1:
print(F"Valor da divida: R${divida}")
print("Valor do juros: 0")
print("Quantidade de parcelas: 1")
print(f"Valor da parcela; R${divida}")
elif quantidade_parcelas == 3:
print(F"Valor da divida: R${divida}")
juros = round((divida * 0.1),2)
parcelas = round(((divida + juros)/ quantidade_parcelas),2 )
print(f"Valor do juros: R${juros}")
print("Numero de parcelas: 3")
print(f"Valor da parcela; R${parcelas}")
elif quantidade_parcelas == 6:
print(F"Valor da divida: R${divida}")
juros = round((divida * 0.15),2)
parcelas = round(((divida + juros)/ quantidade_parcelas),2 )
print(f"Valor do juros: R${juros}")
print(f"Numero de parcelas: {quantidade_parcelas}")
print(f"Valor da parcela; R${parcelas}")
elif quantidade_parcelas == 9:
print(F"Valor da divida: R${divida}")
juros = round((divida * 0.2),2)
parcelas = round(((divida + juros)/ quantidade_parcelas),2 )
print(f"Valor do juros: R${juros}")
print(f"Numero de parcelas: {quantidade_parcelas}")
print(f"Valor da parcela; R${parcelas}")
elif quantidade_parcelas == 12:
print(F"Valor da divida: R${divida}")
juros = round((divida * 0.25),2)
parcelas = round(((divida + juros)/ quantidade_parcelas),2 )
print(f"Valor do juros: R${juros}")
print(f"Numero de parcelas: {quantidade_parcelas}")
print(f"Valor da parcela; R${parcelas}")
else:
print("Opção invalida, reinicie o programa") | [
"[email protected]"
] | |
f1876448cae5a208714bf6e18d72d3522170ef33 | 9576d5a3676b09f3b892083988cfbe6985a9ef4a | /resender.py | 465639185d7a50c7a097a50fe6b58cd6e0360243 | [] | no_license | ual-cci/music_gen_interaction_RTML | da4d2a8c754423d223ca342bac577967e291ad71 | 39419d5bd53ff685a2a9efcf4f373a624c8b28f9 | refs/heads/master | 2021-09-04T09:41:55.976149 | 2021-08-16T19:58:54 | 2021-08-16T19:58:54 | 215,372,452 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,150 | py |
"""
def resender_function(arr):
pass
"""
from autoencoder.autoencoders_handler import process_using_VAE, to_latent_vector_using_VAE
def resender_function(arr):
# Send one frame only - the last generated one
#send_one_frame(arr)
send_all_gen_frames(arr)
#encode_spectrogram_VAE(arr)
#arr = process_spectrogram_VAE(arr)
# return spectrogram ...
return arr
def process_spectrogram_VAE(arr):
print("processing")
processed = process_using_VAE(arr)
print("HAX OUTPUT === ", processed.shape)
return processed
def encode_spectrogram_VAE(arr):
print("encoding")
latents = to_latent_vector_using_VAE(arr)
print("HAX OUTPUT === ", latents.shape)
sequence_length = 40
to_send = latents #[sequence_length:]
for count, one_frame in enumerate(to_send):
print("HAX OUTPUT === ", one_frame.shape)
if count % 4 == 0:
osc_handler.send_arr(one_frame)
def send_one_frame(arr):
last_spectum = arr[-1]
low_frequencies = last_spectum[0:512]
#print("HAX OUTPUT === ", low_frequencies.shape)
global osc_handler
osc_handler.send_arr(low_frequencies)
def send_all_gen_frames(arr):
# settings/server sequence_length by default on 40
sequence_length = 40
global osc_handler
to_send = arr[sequence_length:]
for count, one_frame in enumerate(to_send):
low_frequencies = one_frame[0:512]
#print("HAX OUTPUT === ", low_frequencies.shape)
if count % 4 == 0:
osc_handler.send_arr(low_frequencies)
# https://github.com/kivy/oscpy
from oscpy.client import OSCClient
class OSCSender(object):
"""
Sends OSC messages from GUI
"""
def send_arr(self,arr):
signal_latent = arr
signal_latent = [float(v) for v in signal_latent]
print("Sending message=", [0, 0, len(signal_latent)])
self.osc.send_message(b'/send_gan_i', [0, 0] + signal_latent)
def __init__(self):
#address = "127.0.0.1"
#port = 8000
address = '0.0.0.0'
port = 8000
self.osc = OSCClient(address, port)
osc_handler = OSCSender()
| [
"[email protected]"
] | |
65124adc85975262069df0af5fa469ff0f938db3 | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/nncorpul.py | f663c3de78ca7c9ee1c5fec007032320a6f1f46a | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 426 | py | ii = [('LeakWTI3.py', 1), ('AdamWEP.py', 1), ('ClarGE2.py', 1), ('LyttELD.py', 1), ('CoolWHM.py', 1), ('CrokTPS.py', 1), ('ClarGE.py', 7), ('AinsWRR.py', 1), ('MedwTAI.py', 1), ('GodwWLN.py', 1), ('MedwTAI2.py', 1), ('HogaGMM.py', 1), ('CoolWHM3.py', 3), ('DequTKM.py', 1), ('FitzRNS.py', 2), ('FerrSDO.py', 1), ('RoscTTI.py', 1), ('ClarGE3.py', 7), ('DibdTRL.py', 1), ('HogaGMM2.py', 1), ('BeckWRE.py', 1), ('ClarGE4.py', 6)] | [
"[email protected]"
] | |
7bb2e73006c6ee160b2255fc289034c470d89208 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03126/s820418794.py | c6ce4db220fdbf39b8cd22445a417e1ceea7b485 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 172 | py | n, m = map(int, input().split())
foods = [0] * (m + 1)
for i in range(n):
k, *a = map(int, input().split())
for j in a:
foods[j] += 1
print(foods.count(n)) | [
"[email protected]"
] | |
f9f75ef1594181d3bfa18a88bdb0291d5d66b770 | 2c565dadbf0f02fe0f08c2b2111bf71140a018dc | /convert.py | 47f7d78c53a21d89a8e24ea0245ae57015705dd7 | [] | no_license | nottrobin/discoursifier | e30396df2c13e9a9481149ac9f2f2cd6e820fddb | b803900b676ea56e9bced58044662c35f0616a42 | refs/heads/master | 2020-04-22T09:52:55.556816 | 2019-02-12T09:05:08 | 2019-02-12T09:05:08 | 144,270,341 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,682 | py | #! /usr/bin/env python3
from glob import glob
import markdown
import re
import json
filepaths = glob("**/*.md", recursive=True)
def convert_notifications(content):
"""
Convert old-style notifications:
!!! Note "title":
this is some note contents
Into new style:
[note="title"]
this is some note contents
[/note]
"""
notification_match = (
"!!! (Note|Warning|Positive|Negative|Important|Tip|Information)"
'(?: "([^"]*)")?:?(.*\n(?: .+\n)*)'
)
for match in re.finditer(notification_match, content):
matched_text = match.group(0)
note_type = match.group(1).lower()
title = match.group(2)
body = match.group(3).strip()
if note_type in ["warning", "important"]:
note_type = "caution"
if note_type == "tip":
note_type = "note"
if note_type and body:
body = re.sub("^ ", "", body).replace("\n ", "\n")
options = ""
if note_type != "note":
options = f"={note_type}"
if title:
options = f'{options} title="{title}"'
replacement = f"[note{options}]\n{body}\n[/note]\n"
content = content.replace(matched_text, replacement)
return content
def convert_metadata(content):
"""
Convert Markdown metadata
(See https://python-markdown.github.io/extensions/meta_data/)
"Title" will be added as a <h1>, if there isn't one already
"TODO" will be preserved in `<!-- -->` HTML comments
anything else will be ignored
"""
parser = markdown.Markdown(extensions=["markdown.extensions.meta"])
parser.convert(content)
title = parser.Meta.get("title", [None])[0]
todo = "\n- ".join(parser.Meta.get("todo", []))
content = re.sub("^( *\w.*\n)*", "", content).lstrip()
title_match = re.match("^# ([^\n]+)(.*)$", content, re.DOTALL)
if title_match:
# Prefer the <h1> value to the metadata
title = title_match.groups()[0]
content = title_match.groups()[1].strip()
if todo:
content = f"<!--\nTodo:\n- {todo}\n-->\n\n" + content
return title, content
title_map = {}
# Convert markdown
for path in filepaths:
with open(path) as file_handle:
content = file_handle.read()
content = convert_notifications(content)
title, content = convert_metadata(content)
title_map[path] = title
with open(path, "w") as file_handle:
file_handle.write(content)
# Write title mapping to file
with open("title-map.json", "w") as title_map_file:
json.dump(title_map, title_map_file)
| [
"[email protected]"
] | |
491148774ac1fa2b690aa1334fcef76f3d45bf60 | ac69799f105ec928ecfd5e1aa67062b3e19dead3 | /sdk/python/tests/compiler/testdata/tekton_pipeline_conf.py | 481b38418982617f055d134e0b11f7f8a6f2541a | [
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] | permissive | kubeflow/kfp-tekton | 5326373a4056b2b3ad78fc9199cae91c9a084282 | 136e7a93528b1b5845dcab058d46272b15af6c54 | refs/heads/master | 2023-08-05T07:25:29.697741 | 2023-08-03T16:04:20 | 2023-08-03T16:04:20 | 217,148,415 | 149 | 111 | Apache-2.0 | 2023-09-11T18:21:37 | 2019-10-23T20:33:01 | TypeScript | UTF-8 | Python | false | false | 2,470 | py | # Copyright 2021 kubeflow.org
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from kfp import dsl, components
import kfp_tekton
import json
from kubernetes.client import V1SecurityContext
from kubernetes.client.models import V1Volume, V1PersistentVolumeClaimVolumeSource, \
V1PersistentVolumeClaimSpec, V1ResourceRequirements
def echo_op():
return components.load_component_from_text("""
name: echo
description: echo
implementation:
container:
image: busybox
command:
- sh
- -c
args:
- echo
- Got scheduled
""")()
@dsl.pipeline(
name='echo',
description='echo pipeline'
)
def echo_pipeline():
echo = echo_op()
workspace_json = {'new-ws': {"readOnly": True}}
echo.add_pod_annotation('workspaces', json.dumps(workspace_json))
pipeline_conf = kfp_tekton.compiler.pipeline_utils.TektonPipelineConf()
pipeline_conf.add_pipeline_label('test', 'label')
pipeline_conf.add_pipeline_label('test2', 'label2')
pipeline_conf.add_pipeline_annotation('test', 'annotation')
pipeline_conf.set_security_context(V1SecurityContext(run_as_user=0))
pipeline_conf.set_automount_service_account_token(False)
pipeline_conf.add_pipeline_env('WATSON_CRED', 'ABCD1234')
pipeline_conf.add_pipeline_workspace(workspace_name="new-ws", volume=V1Volume(
name='data',
persistent_volume_claim=V1PersistentVolumeClaimVolumeSource(
claim_name='data-volume')
), path_prefix='artifact_data/')
pipeline_conf.add_pipeline_workspace(workspace_name="new-ws-template",
volume_claim_template_spec=V1PersistentVolumeClaimSpec(
access_modes=["ReadWriteOnce"],
resources=V1ResourceRequirements(requests={"storage": "30Gi"})
))
pipeline_conf.set_generate_component_spec_annotations(False)
if __name__ == "__main__":
from kfp_tekton.compiler import TektonCompiler
TektonCompiler().compile(echo_pipeline, 'echo_pipeline.yaml', tekton_pipeline_conf=pipeline_conf)
| [
"[email protected]"
] | |
aae3482d533ac325cf980331aa0c2e91802bc44c | 625daac7e73b98935f9fe93e647eb809b48b712e | /Arcade/The_Core/isSmooth.py | 3866fb0c2934491668610ab80dcd586c71aad324 | [] | no_license | aleksaa01/codefights-codesignal | 19b2d70779cc60f62511b6f88ae5d049451eac82 | a57a5589ab2c9d9580ef44900ea986c826b23051 | refs/heads/master | 2022-03-15T04:46:40.356440 | 2019-12-08T15:41:37 | 2019-12-08T15:41:37 | 112,034,380 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,226 | py | def isSmooth(arr):
first = arr[0]
last = arr[-1]
if len(arr) % 2 == 0:
middle = arr[len(arr) // 2 - 1] + arr[len(arr) // 2]
else:
middle = arr[len(arr) // 2]
return first == middle == last
"""
We define the middle of the array arr as follows:
if arr contains an odd number of elements, its middle is the element whose index number is the same when counting
from the beginning of the array and from its end;
if arr contains an even number of elements, its middle is the sum of the two elements whose index numbers when
counting from the beginning and from the end of the array differ by one.
An array is called smooth if its first and its last elements are equal to one another and to the middle.
Given an array arr, determine if it is smooth or not.
Example
For arr = [7, 2, 2, 5, 10, 7], the output should be
isSmooth(arr) = true.
The first and the last elements of arr are equal to 7, and its middle also equals 2 + 5 = 7.
Thus, the array is smooth and the output is true.
For arr = [-5, -5, 10], the output should be
isSmooth(arr) = false.
The first and middle elements are equal to -5, but the last element equals 10.
Thus, arr is not smooth and the output is false.
""" | [
"[email protected]"
] | |
4a2196a9ecc0a0210ca916a9f75a99c30dd18bba | c268dcf432f3b7171be6eb307aafbe1bd173285a | /reddit2telegram/channels/~inactive/comedynecrophilia/app.py | 32bb06036f62836443d6cbaa929a8408c8d36c71 | [
"MIT"
] | permissive | Fillll/reddit2telegram | a7162da2cc08c81bcc8078ea4160d4ee07461fee | 5d8ee3097e716734d55a72f5a16ce3d7467e2ed7 | refs/heads/master | 2023-08-09T10:34:16.163262 | 2023-07-30T18:36:19 | 2023-07-30T18:36:19 | 67,726,018 | 258 | 205 | MIT | 2023-09-07T02:36:36 | 2016-09-08T17:39:46 | Python | UTF-8 | Python | false | false | 155 | py | #encoding:utf-8
subreddit = 'comedynecrophilia'
t_channel = '@comedynecrophilia'
def send_post(submission, r2t):
return r2t.send_simple(submission)
| [
"[email protected]"
] | |
7a69ab2b1e9995247ed2756773d1d9f7a656de28 | fcb4a07f27494497ef03e157de7ab50fa4e9375f | /core/admin.py | e76cfa38f98048dcac69da644072a4683ef1269b | [] | no_license | 21toffy/IMA | 2ff452025fad908270d2aab0bafa3ee4c26c7710 | 03770d49578817e1466cedc8e09df1840f5349b0 | refs/heads/master | 2023-03-02T02:15:58.778086 | 2021-02-14T16:08:01 | 2021-02-14T16:08:01 | 298,578,125 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 154 | py | from django.contrib import admin
from .models import HomeView
from .models import ViewCount
admin.site.register(HomeView)
admin.site.register(ViewCount) | [
"[email protected]"
] | |
7a6a61c44bb4e6e43038823fc2ef01793c1f76ee | a6d77d00163b80dfb6d34ee254f9ba049290e43e | /fabfile.py | c58c9178b11c15c41c67b72a520ac88bcb192822 | [
"Apache-2.0"
] | permissive | tswicegood/armstrong.core.arm_layout | 6f1cf6c9dc2c9a4030348fa88972eb8a57682705 | 70850a3068660b51a93816e83ecee73637c781c0 | refs/heads/master | 2021-01-16T21:04:18.735887 | 2011-07-20T20:33:59 | 2011-07-20T20:33:59 | 2,080,221 | 0 | 0 | null | null | null | null | UTF-8 | Python | true | false | 365 | py | from armstrong.dev.tasks import *
settings = {
'DEBUG': True,
'INSTALLED_APPS': (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'armstrong.core.arm_layout',
'lettuce.django',
),
}
tested_apps = ("arm_layout", )
| [
"[email protected]"
] | |
aa4ae4645bd75fa85f73bee597894e82e3e0ac43 | 209c876b1e248fd67bd156a137d961a6610f93c7 | /python/paddle/fluid/tests/unittests/test_basic_gru_unit_op.py | ae3e6dc4f77d2653909e2ea5e62135ab3859e314 | [
"Apache-2.0"
] | permissive | Qengineering/Paddle | 36e0dba37d29146ebef4fba869490ecedbf4294e | 591456c69b76ee96d04b7d15dca6bb8080301f21 | refs/heads/develop | 2023-01-24T12:40:04.551345 | 2022-10-06T10:30:56 | 2022-10-06T10:30:56 | 544,837,444 | 0 | 0 | Apache-2.0 | 2022-10-03T10:12:54 | 2022-10-03T10:12:54 | null | UTF-8 | Python | false | false | 4,838 | py | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy
import paddle.fluid as fluid
import paddle.fluid.layers as layers
import paddle.fluid.core as core
from paddle.fluid.contrib.layers import BasicGRUUnit
from paddle.fluid.executor import Executor
from paddle.fluid import framework
import numpy as np
np.set_seed(123)
SIGMOID_THRESHOLD_MIN = -40.0
SIGMOID_THRESHOLD_MAX = 13.0
EXP_MAX_INPUT = 40.0
def sigmoid(x):
y = np.copy(x)
y[x < SIGMOID_THRESHOLD_MIN] = SIGMOID_THRESHOLD_MIN
y[x > SIGMOID_THRESHOLD_MAX] = SIGMOID_THRESHOLD_MAX
return 1. / (1. + np.exp(-y))
def tanh(x):
y = -2. * x
y[y > EXP_MAX_INPUT] = EXP_MAX_INPUT
return (2. / (1. + np.exp(y))) - 1.
def step(step_in, pre_hidden, gate_w, gate_b, candidate_w, candidate_b):
concat_1 = np.concatenate([step_in, pre_hidden], 1)
gate_input = np.matmul(concat_1, gate_w)
gate_input += gate_b
gate_input = sigmoid(gate_input)
r, u = np.split(gate_input, indices_or_sections=2, axis=1)
r_hidden = r * pre_hidden
candidate = np.matmul(np.concatenate([step_in, r_hidden], 1), candidate_w)
candidate += candidate_b
c = tanh(candidate)
new_hidden = u * pre_hidden + (1 - u) * c
return new_hidden
class TestBasicGRUUnit(unittest.TestCase):
def setUp(self):
self.hidden_size = 5
self.batch_size = 5
def test_run(self):
x = layers.data(name='x', shape=[-1, self.hidden_size], dtype='float32')
pre_hidden = layers.data(name="pre_hidden",
shape=[-1, self.hidden_size],
dtype='float32')
gru_unit = BasicGRUUnit("gru_unit", self.hidden_size)
new_hidden = gru_unit(x, pre_hidden)
new_hidden.persisbale = True
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
else:
place = core.CPUPlace()
exe = Executor(place)
exe.run(framework.default_startup_program())
param_list = fluid.default_main_program().block(0).all_parameters()
# process weight and bias
gate_w_name = "gru_unit/BasicGRUUnit_0.w_0"
gate_b_name = "gru_unit/BasicGRUUnit_0.b_0"
candidate_w_name = "gru_unit/BasicGRUUnit_0.w_1"
candidate_b_name = "gru_unit/BasicGRUUnit_0.b_1"
gate_w = np.array(
fluid.global_scope().find_var(gate_w_name).get_tensor())
gate_w = np.random.uniform(-0.1, 0.1,
size=gate_w.shape).astype('float32')
fluid.global_scope().find_var(gate_w_name).get_tensor().set(
gate_w, place)
gate_b = np.array(
fluid.global_scope().find_var(gate_b_name).get_tensor())
gate_b = np.random.uniform(-0.1, 0.1,
size=gate_b.shape).astype('float32')
fluid.global_scope().find_var(gate_b_name).get_tensor().set(
gate_b, place)
candidate_w = np.array(
fluid.global_scope().find_var(candidate_w_name).get_tensor())
candidate_w = np.random.uniform(
-0.1, 0.1, size=candidate_w.shape).astype('float32')
fluid.global_scope().find_var(candidate_w_name).get_tensor().set(
candidate_w, place)
candidate_b = np.array(
fluid.global_scope().find_var(candidate_b_name).get_tensor())
candidate_b = np.random.uniform(
-0.1, 0.1, size=candidate_b.shape).astype('float32')
fluid.global_scope().find_var(candidate_b_name).get_tensor().set(
candidate_b, place)
step_input_np = np.random.uniform(
-0.1, 0.1, (self.batch_size, self.hidden_size)).astype('float32')
pre_hidden_np = np.random.uniform(
-0.1, 0.1, (self.batch_size, self.hidden_size)).astype('float32')
out = exe.run(feed={
'x': step_input_np,
'pre_hidden': pre_hidden_np
},
fetch_list=[new_hidden])
api_out = out[0]
np_out = step(step_input_np, pre_hidden_np, gate_w, gate_b, candidate_w,
candidate_b)
np.testing.assert_allclose(api_out, np_out, rtol=0.0001, atol=0)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
ff9ea866ff4b14d685a908c860c705eb0ba43a96 | 0a7223017a2e2f83fa15f5ffbe2355c92ab1e62d | /landscapes_assignment/settings.py | 399c2d01e8c170cc8c50b4e0f117d1558955e1cc | [] | no_license | mazurbeam/landscapes | 7d033dad59e3cbceae1c2ca563de726f5286ae48 | 94085cf5cc8f7f40a93c4d4bb5ab652a87bfa448 | refs/heads/master | 2021-01-01T15:26:38.899601 | 2017-07-18T15:43:59 | 2017-07-18T15:43:59 | 97,616,642 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,162 | py | """
Django settings for landscapes_assignment project.
Generated by 'django-admin startproject' using Django 1.10.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'vr1@i&#b6n+5^sc_=z_6@6l3s_lk&szc5h=()e5evp6ks2q32e'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'apps.landscapes',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'landscapes_assignment.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'landscapes_assignment.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
b87776d03301cf9cd4a5dbd6cedc5763d2a60525 | 1375f57f96c4021f8b362ad7fb693210be32eac9 | /kubernetes/test/test_v1_subject_access_review.py | 813eff18c4f80272dcd42bfa8903f85d0e4fa9b2 | [
"Apache-2.0"
] | permissive | dawidfieluba/client-python | 92d637354e2f2842f4c2408ed44d9d71d5572606 | 53e882c920d34fab84c76b9e38eecfed0d265da1 | refs/heads/master | 2021-12-23T20:13:26.751954 | 2017-10-06T22:29:14 | 2017-10-06T22:29:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 909 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.7.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_subject_access_review import V1SubjectAccessReview
class TestV1SubjectAccessReview(unittest.TestCase):
""" V1SubjectAccessReview unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1SubjectAccessReview(self):
"""
Test V1SubjectAccessReview
"""
model = kubernetes.client.models.v1_subject_access_review.V1SubjectAccessReview()
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
af32c19a05a7ba77388de6a5ca7225230bb65d65 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /pdMwiMpYkJkn8WY83_6.py | eb07438f90569faa3aa2f1d20414768ac52e5228 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 424 | py | """
Write a function that **recursively** determines if a string is a palindrome.
### Examples
is_palindrome("abcba") ➞ True
is_palindrome("b") ➞ True
is_palindrome("") ➞ True
is_palindrome("ad") ➞ False
### Notes
An empty string counts as a palindrome.
"""
def is_palindrome(word):
if len(word) < 2: return True
return word[0] == word[-1] and is_palindrome(word[1:-1])
| [
"[email protected]"
] | |
c1225ac0b8b80664c5c7e7d70b5f53f12f8fd153 | 0b3c5260cd5c33a1beccc5710a5d0fd097a5ea15 | /anchore_engine/db/db_archivedocument.py | b13024780094cf487f4e5bfefd708f78d7b91e2c | [
"Apache-2.0"
] | permissive | omerlh/anchore-engine | fb2d7cb3d8bd259f6c973b450fbaa2c2e00497f0 | 669a0327f8baaee3f5c7c64b482909fe38830d80 | refs/heads/master | 2021-09-02T12:48:51.661648 | 2018-01-02T19:26:47 | 2018-01-02T19:26:47 | 116,236,136 | 1 | 0 | null | 2018-01-04T08:41:39 | 2018-01-04T08:41:39 | null | UTF-8 | Python | false | false | 4,670 | py | import time
from anchore_engine import db
from anchore_engine.db import ArchiveDocument
# specific DB interface helpers for the 'services' table
def add(userId, bucket, archiveId, documentName, inobj, session=None):
if not session:
session = db.Session
our_result = session.query(ArchiveDocument).filter_by(userId=userId, bucket=bucket,archiveId=archiveId,documentName=documentName).first()
if not our_result:
new_service = ArchiveDocument(userId=userId, bucket=bucket,archiveId=archiveId,documentName=documentName)
new_service.update(inobj)
session.add(new_service)
else:
dbobj = {}
dbobj.update(inobj)
our_result.update(dbobj)
dbobj.clear()
return(True)
def get_all(session=None):
if not session:
session = db.Session
ret = []
our_results = session.query(ArchiveDocument)
for result in our_results:
obj = dict((key,value) for key, value in vars(result).iteritems() if not key.startswith('_'))
ret.append(obj)
return(ret)
def get(userId, bucket, archiveId, session=None):
#session = db.Session()
ret = {}
result = session.query(ArchiveDocument).filter_by(userId=userId, bucket=bucket, archiveId=archiveId).first()
if result:
obj = dict((key,value) for key, value in vars(result).iteritems() if not key.startswith('_'))
ret.update(obj)
return(ret)
def get_byname(userId, documentName, session=None):
if not session:
session = db.Session
ret = {}
result = session.query(ArchiveDocument).filter_by(userId=userId, documentName=documentName).first()
if result:
obj = dict((key,value) for key, value in vars(result).iteritems() if not key.startswith('_'))
ret = obj
return(ret)
def exists(userId, bucket, archiveId, session=None):
if not session:
session = db.Session
ret = {}
result = session.query(ArchiveDocument.userId, ArchiveDocument.bucket, ArchiveDocument.archiveId).filter_by(userId=userId, bucket=bucket, archiveId=archiveId).first()
from anchore_engine.subsys import logger
if result:
for i in range(0, len(result.keys())):
k = result.keys()[i]
ret[k] = result[i]
#obj = dict((key,value) for key, value in vars(result).iteritems() if not key.startswith('_'))
#ret = obj
return(ret)
def list_all(session=None, **dbfilter):
if not session:
session = db.Session
ret = []
results = session.query(ArchiveDocument.bucket, ArchiveDocument.archiveId, ArchiveDocument.userId, ArchiveDocument.record_state_key, ArchiveDocument.record_state_val, ArchiveDocument.created_at, ArchiveDocument.last_updated).filter_by(**dbfilter)
for result in results:
obj = {}
for i in range(0,len(result.keys())):
k = result.keys()[i]
obj[k] = result[i]
if obj:
ret.append(obj)
return(ret)
def list_all_byuserId(userId, session=None, **dbfilter):
if not session:
session = db.Session
ret = []
dbfilter['userId'] = userId
results = session.query(ArchiveDocument.bucket, ArchiveDocument.archiveId, ArchiveDocument.userId, ArchiveDocument.record_state_key, ArchiveDocument.record_state_val, ArchiveDocument.created_at, ArchiveDocument.last_updated).filter_by(**dbfilter)
for result in results:
obj = {}
for i in range(0,len(result.keys())):
k = result.keys()[i]
obj[k] = result[i]
if obj:
ret.append(obj)
return(ret)
def update(userId, bucket, archiveId, documentName, inobj, session=None):
return(add(userId, bucket, archiveId, documentName, inobj, session=session))
def delete_byfilter(userId, remove=True, session=None, **dbfilter):
if not session:
session = db.Session
ret = False
results = session.query(ArchiveDocument).filter_by(**dbfilter)
if results:
for result in results:
if remove:
session.delete(result)
else:
result.update({"record_state_key": "to_delete", "record_state_val": str(time.time())})
ret = True
return(ret)
def delete(userId, bucket, archiveId, remove=True, session=None):
if not session:
session = db.Session
result = session.query(ArchiveDocument).filter_by(userId=userId, bucket=bucket, archiveId=archiveId).first()
if result:
if remove:
session.delete(result)
else:
result.update({"record_state_key": "to_delete", "record_state_val": str(time.time())})
return(True)
| [
"[email protected]"
] | |
079c862affe8e445280aaa0c46eb37b192e9b4c3 | ffe59803cd35129ea317a53b2b4a754bfb9a200d | /longest_common_prefix.py | 6591f14b796eff634370383a695dd8b356ff4109 | [] | no_license | codyowl/leetcode | 78a7a96c54d6592c34987620793eed3dcf1fe1fd | 706924944c6b8d94a7247de13ffb9b1d715496fd | refs/heads/master | 2020-06-16T10:19:09.477053 | 2019-08-30T13:37:14 | 2019-08-30T13:37:14 | 195,537,374 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,353 | py | class Solution(object):
def longestCommonPrefix(self, strs):
"""
:type strs: List[str]
:rtype: str
"""
prefix_first = []
if len(strs) == 1:
for data in strs:
if len(data)>0:
return strs[0][0]
else:
return ""
else:
none_finder = [data for data in strs if len(data)>0]
if len(none_finder) != len(strs):
return ""
else:
for data in strs:
prefix_first.append(data[0])
#check whether atelast the first letter is same or not
if not len(set(prefix_first)) == 1:
return ""
else:
min_length = min([len(d) for d in strs])
# truncating the list based on minimum length of words
trun_list = [data[0:min_length+1] for data in strs]
prefix_list = []
# to get all the letters from words
for i in range(min_length):
prefix_list.append([data[i] for data in trun_list])
final_str = ""
for data in prefix_list:
if len(set(data)) == 1:
final_str += data[0]
else:
break
return final_str
s = Solution()
# tweaked after seeing this input on test case
# print s.longestCommonPrefix(["", ""])
# # tweaked after seeing this input on test case
# print s.longestCommonPrefix([""])
# # tweaked afet seeing this input on test case
# print s.longestCommonPrefix(["", "b"])
print s.longestCommonPrefix(["abab","aba",""]) | [
"[email protected]"
] | |
9f9485d4d8b84ea56512f623d103aa2a35a1a322 | 006341ca12525aa0979d6101600e78c4bd9532ab | /CMS/Zope-3.2.1/Dependencies/ZODB-Zope-3.2.1/ZODB/FileStorage/fspack.py | 24eaa776b4a3b4f3496260e9aeae91ce4db9d7d0 | [
"ZPL-2.1",
"Python-2.0",
"ICU",
"LicenseRef-scancode-public-domain",
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference",
"ZPL-2.0"
] | permissive | germanfriday/code-examples-sandbox | d0f29e20a3eed1f8430d06441ac2d33bac5e4253 | 4c538584703754c956ca66392fdcecf0a0ca2314 | refs/heads/main | 2023-05-30T22:21:57.918503 | 2021-06-15T15:06:47 | 2021-06-15T15:06:47 | 377,200,448 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25,676 | py | ##############################################################################
#
# Copyright (c) 2003 Zope Corporation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""FileStorage helper to perform pack.
A storage contains an ordered set of object revisions. When a storage
is packed, object revisions that are not reachable as of the pack time
are deleted. The notion of reachability is complicated by
backpointers -- object revisions that point to earlier revisions of
the same object.
An object revisions is reachable at a certain time if it is reachable
from the revision of the root at that time or if it is reachable from
a backpointer after that time.
"""
import os
from ZODB.serialize import referencesf
from ZODB.utils import p64, u64, z64
from ZODB.fsIndex import fsIndex
from ZODB.FileStorage.format \
import FileStorageFormatter, CorruptedDataError, DataHeader, \
TRANS_HDR_LEN
class DataCopier(FileStorageFormatter):
"""Mixin class for copying transactions into a storage.
The restore() and pack() methods share a need to copy data records
and update pointers to data in earlier transaction records. This
class provides the shared logic.
The mixin extends the FileStorageFormatter with a copy() method.
It also requires that the concrete class provides the following
attributes:
_file -- file with earlier destination data
_tfile -- destination file for copied data
_pos -- file pos of destination transaction
_tindex -- maps oid to data record file pos
_tvindex -- maps version name to data record file pos
_tindex and _tvindex are updated by copy().
The copy() method does not do any locking.
"""
def _txn_find(self, tid, stop_at_pack):
# _pos always points just past the last transaction
pos = self._pos
while pos > 4:
self._file.seek(pos - 8)
pos = pos - u64(self._file.read(8)) - 8
self._file.seek(pos)
h = self._file.read(TRANS_HDR_LEN)
_tid = h[:8]
if _tid == tid:
return pos
if stop_at_pack:
if h[16] == 'p':
break
raise UndoError(None, "Invalid transaction id")
def _data_find(self, tpos, oid, data):
# Return backpointer for oid. Must call with the lock held.
# This is a file offset to oid's data record if found, else 0.
# The data records in the transaction at tpos are searched for oid.
# If a data record for oid isn't found, returns 0.
# Else if oid's data record contains a backpointer, that
# backpointer is returned.
# Else oid's data record contains the data, and the file offset of
# oid's data record is returned. This data record should contain
# a pickle identical to the 'data' argument.
# Unclear: If the length of the stored data doesn't match len(data),
# an exception is raised. If the lengths match but the data isn't
# the same, 0 is returned. Why the discrepancy?
h = self._read_txn_header(tpos)
tend = tpos + h.tlen
pos = self._file.tell()
while pos < tend:
h = self._read_data_header(pos)
if h.oid == oid:
# Make sure this looks like the right data record
if h.plen == 0:
# This is also a backpointer. Gotta trust it.
return pos
if h.plen != len(data):
# The expected data doesn't match what's in the
# backpointer. Something is wrong.
error("Mismatch between data and backpointer at %d", pos)
return 0
_data = self._file.read(h.plen)
if data != _data:
return 0
return pos
pos += h.recordlen()
return 0
def _restore_pnv(self, oid, prev, version, bp):
# Find a valid pnv (previous non-version) pointer for this version.
# If there is no previous record, there can't be a pnv.
if not prev:
return None
pnv = None
h = self._read_data_header(prev, oid)
# If the previous record is for a version, it must have
# a valid pnv.
if h.version:
return h.pnv
elif bp:
# Unclear: Not sure the following is always true:
# The previous record is not for this version, yet we
# have a backpointer to it. The current record must
# be an undo of an abort or commit, so the backpointer
# must be to a version record with a pnv.
h2 = self._read_data_header(bp, oid)
if h2.version:
return h2.pnv
else:
warn("restore could not find previous non-version data "
"at %d or %d", prev, bp)
return None
def _resolve_backpointer(self, prev_txn, oid, data):
prev_pos = 0
if prev_txn is not None:
prev_txn_pos = self._txn_find(prev_txn, 0)
if prev_txn_pos:
prev_pos = self._data_find(prev_txn_pos, oid, data)
return prev_pos
def copy(self, oid, serial, data, version, prev_txn,
txnpos, datapos):
prev_pos = self._resolve_backpointer(prev_txn, oid, data)
old = self._index.get(oid, 0)
# Calculate the pos the record will have in the storage.
here = datapos
# And update the temp file index
self._tindex[oid] = here
if prev_pos:
# If there is a valid prev_pos, don't write data.
data = None
if data is None:
dlen = 0
else:
dlen = len(data)
# Write the recovery data record
h = DataHeader(oid, serial, old, txnpos, len(version), dlen)
if version:
h.version = version
pnv = self._restore_pnv(oid, old, version, prev_pos)
if pnv is not None:
h.pnv = pnv
else:
h.pnv = old
# Link to the last record for this version
h.vprev = self._tvindex.get(version, 0)
if not h.vprev:
h.vprev = self._vindex.get(version, 0)
self._tvindex[version] = here
self._tfile.write(h.asString())
# Write the data or a backpointer
if data is None:
if prev_pos:
self._tfile.write(p64(prev_pos))
else:
# Write a zero backpointer, which indicates an
# un-creation transaction.
self._tfile.write(z64)
else:
self._tfile.write(data)
class GC(FileStorageFormatter):
def __init__(self, file, eof, packtime):
self._file = file
self._name = file.name
self.eof = eof
self.packtime = packtime
# packpos: position of first txn header after pack time
self.packpos = None
self.oid2curpos = fsIndex() # maps oid to current data record position
self.oid2verpos = fsIndex() # maps oid to current version data
# The set of reachable revisions of each object.
#
# This set as managed using two data structures. The first is
# an fsIndex mapping oids to one data record pos. Since only
# a few objects will have more than one revision, we use this
# efficient data structure to handle the common case. The
# second is a dictionary mapping objects to lists of
# positions; it is used to handle the same number of objects
# for which we must keep multiple revisions.
self.reachable = fsIndex()
self.reach_ex = {}
# keep ltid for consistency checks during initial scan
self.ltid = z64
def isReachable(self, oid, pos):
"""Return 1 if revision of `oid` at `pos` is reachable."""
rpos = self.reachable.get(oid)
if rpos is None:
return 0
if rpos == pos:
return 1
return pos in self.reach_ex.get(oid, [])
def findReachable(self):
self.buildPackIndex()
self.findReachableAtPacktime([z64])
self.findReachableFromFuture()
# These mappings are no longer needed and may consume a lot
# of space.
del self.oid2verpos
del self.oid2curpos
def buildPackIndex(self):
pos = 4L
# We make the initial assumption that the database has been
# packed before and set unpacked to True only after seeing the
# first record with a status == " ". If we get to the packtime
# and unpacked is still False, we need to watch for a redundant
# pack.
unpacked = False
while pos < self.eof:
th = self._read_txn_header(pos)
if th.tid > self.packtime:
break
self.checkTxn(th, pos)
if th.status != "p":
unpacked = True
tpos = pos
end = pos + th.tlen
pos += th.headerlen()
while pos < end:
dh = self._read_data_header(pos)
self.checkData(th, tpos, dh, pos)
if dh.version:
self.oid2verpos[dh.oid] = pos
else:
self.oid2curpos[dh.oid] = pos
pos += dh.recordlen()
tlen = self._read_num(pos)
if tlen != th.tlen:
self.fail(pos, "redundant transaction length does not "
"match initial transaction length: %d != %d",
tlen, th.tlen)
pos += 8
self.packpos = pos
if unpacked:
return
# check for a redundant pack. If the first record following
# the newly computed packpos has status 'p', then it was
# packed earlier and the current pack is redudant.
try:
th = self._read_txn_header(pos)
except CorruptedDataError, err:
if err.buf != "":
raise
if th.status == 'p':
# Delayed import to cope with circular imports.
# TODO: put exceptions in a separate module.
from ZODB.FileStorage.FileStorage import RedundantPackWarning
raise RedundantPackWarning(
"The database has already been packed to a later time"
" or no changes have been made since the last pack")
def findReachableAtPacktime(self, roots):
"""Mark all objects reachable from the oids in roots as reachable."""
todo = list(roots)
while todo:
oid = todo.pop()
if self.reachable.has_key(oid):
continue
L = []
pos = self.oid2curpos.get(oid)
if pos is not None:
L.append(pos)
todo.extend(self.findrefs(pos))
pos = self.oid2verpos.get(oid)
if pos is not None:
L.append(pos)
todo.extend(self.findrefs(pos))
if not L:
continue
pos = L.pop()
self.reachable[oid] = pos
if L:
self.reach_ex[oid] = L
def findReachableFromFuture(self):
# In this pass, the roots are positions of object revisions.
# We add a pos to extra_roots when there is a backpointer to a
# revision that was not current at the packtime. The
# non-current revision could refer to objects that were
# otherwise unreachable at the packtime.
extra_roots = []
pos = self.packpos
while pos < self.eof:
th = self._read_txn_header(pos)
self.checkTxn(th, pos)
tpos = pos
end = pos + th.tlen
pos += th.headerlen()
while pos < end:
dh = self._read_data_header(pos)
self.checkData(th, tpos, dh, pos)
if dh.back and dh.back < self.packpos:
if self.reachable.has_key(dh.oid):
L = self.reach_ex.setdefault(dh.oid, [])
if dh.back not in L:
L.append(dh.back)
extra_roots.append(dh.back)
else:
self.reachable[dh.oid] = dh.back
if dh.version and dh.pnv:
if self.reachable.has_key(dh.oid):
L = self.reach_ex.setdefault(dh.oid, [])
if dh.pnv not in L:
L.append(dh.pnv)
extra_roots.append(dh.pnv)
else:
self.reachable[dh.oid] = dh.back
pos += dh.recordlen()
tlen = self._read_num(pos)
if tlen != th.tlen:
self.fail(pos, "redundant transaction length does not "
"match initial transaction length: %d != %d",
tlen, th.tlen)
pos += 8
for pos in extra_roots:
refs = self.findrefs(pos)
self.findReachableAtPacktime(refs)
def findrefs(self, pos):
"""Return a list of oids referenced as of packtime."""
dh = self._read_data_header(pos)
# Chase backpointers until we get to the record with the refs
while dh.back:
dh = self._read_data_header(dh.back)
if dh.plen:
return referencesf(self._file.read(dh.plen))
else:
return []
class PackCopier(DataCopier):
# PackCopier has to cope with _file and _tfile being the
# same file. The copy() implementation is written assuming
# that they are different, so that using one object doesn't
# mess up the file pointer for the other object.
# PackCopier overrides _resolve_backpointer() and _restore_pnv()
# to guarantee that they keep the file pointer for _tfile in
# the right place.
def __init__(self, f, index, vindex, tindex, tvindex):
self._file = f
self._tfile = f
self._index = index
self._vindex = vindex
self._tindex = tindex
self._tvindex = tvindex
self._pos = None
def setTxnPos(self, pos):
self._pos = pos
def _resolve_backpointer(self, prev_txn, oid, data):
pos = self._tfile.tell()
try:
return DataCopier._resolve_backpointer(self, prev_txn, oid, data)
finally:
self._tfile.seek(pos)
def _restore_pnv(self, oid, prev, version, bp):
pos = self._tfile.tell()
try:
return DataCopier._restore_pnv(self, oid, prev, version, bp)
finally:
self._tfile.seek(pos)
class FileStoragePacker(FileStorageFormatter):
# path is the storage file path.
# stop is the pack time, as a TimeStamp.
# la and lr are the acquire() and release() methods of the storage's lock.
# cla and clr similarly, for the storage's commit lock.
# current_size is the storage's _pos. All valid data at the start
# lives before that offset (there may be a checkpoint transaction in
# progress after it).
def __init__(self, path, stop, la, lr, cla, clr, current_size):
self._name = path
# We open our own handle on the storage so that much of pack can
# proceed in parallel. It's important to close this file at every
# return point, else on Windows the caller won't be able to rename
# or remove the storage file.
self._file = open(path, "rb")
self._path = path
self._stop = stop
self.locked = 0
self.file_end = current_size
self.gc = GC(self._file, self.file_end, self._stop)
# The packer needs to acquire the parent's commit lock
# during the copying stage, so the two sets of lock acquire
# and release methods are passed to the constructor.
self._lock_acquire = la
self._lock_release = lr
self._commit_lock_acquire = cla
self._commit_lock_release = clr
# The packer will use several indexes.
# index: oid -> pos
# vindex: version -> pos
# tindex: oid -> pos, for current txn
# tvindex: version -> pos, for current txn
# oid2tid: not used by the packer
self.index = fsIndex()
self.vindex = {}
self.tindex = {}
self.tvindex = {}
self.oid2tid = {}
self.toid2tid = {}
self.toid2tid_delete = {}
# Index for non-version data. This is a temporary structure
# to reduce I/O during packing
self.nvindex = fsIndex()
def pack(self):
# Pack copies all data reachable at the pack time or later.
#
# Copying occurs in two phases. In the first phase, txns
# before the pack time are copied if the contain any reachable
# data. In the second phase, all txns after the pack time
# are copied.
#
# Txn and data records contain pointers to previous records.
# Because these pointers are stored as file offsets, they
# must be updated when we copy data.
# TODO: Should add sanity checking to pack.
self.gc.findReachable()
# Setup the destination file and copy the metadata.
# TODO: rename from _tfile to something clearer.
self._tfile = open(self._name + ".pack", "w+b")
self._file.seek(0)
self._tfile.write(self._file.read(self._metadata_size))
self._copier = PackCopier(self._tfile, self.index, self.vindex,
self.tindex, self.tvindex)
ipos, opos = self.copyToPacktime()
assert ipos == self.gc.packpos
if ipos == opos:
# pack didn't free any data. there's no point in continuing.
self._tfile.close()
self._file.close()
os.remove(self._name + ".pack")
return None
self._commit_lock_acquire()
self.locked = 1
self._lock_acquire()
try:
# Re-open the file in unbuffered mode.
# The main thread may write new transactions to the file,
# which creates the possibility that we will read a status
# 'c' transaction into the pack thread's stdio buffer even
# though we're acquiring the commit lock. Transactions
# can still be in progress throughout much of packing, and
# are written to the same physical file but via a distinct
# Python file object. The code used to leave off the
# trailing 0 argument, and then on every platform except
# native Windows it was observed that we could read stale
# data from the tail end of the file.
self._file.close() # else self.gc keeps the original alive & open
self._file = open(self._path, "rb", 0)
self._file.seek(0, 2)
self.file_end = self._file.tell()
finally:
self._lock_release()
if ipos < self.file_end:
self.copyRest(ipos)
# OK, we've copied everything. Now we need to wrap things up.
pos = self._tfile.tell()
self._tfile.flush()
self._tfile.close()
self._file.close()
return pos
def copyToPacktime(self):
offset = 0L # the amount of space freed by packing
pos = self._metadata_size
new_pos = pos
while pos < self.gc.packpos:
th = self._read_txn_header(pos)
new_tpos, pos = self.copyDataRecords(pos, th)
if new_tpos:
new_pos = self._tfile.tell() + 8
tlen = new_pos - new_tpos - 8
# Update the transaction length
self._tfile.seek(new_tpos + 8)
self._tfile.write(p64(tlen))
self._tfile.seek(new_pos - 8)
self._tfile.write(p64(tlen))
tlen = self._read_num(pos)
if tlen != th.tlen:
self.fail(pos, "redundant transaction length does not "
"match initial transaction length: %d != %d",
tlen, th.tlen)
pos += 8
return pos, new_pos
def fetchBackpointer(self, oid, back):
"""Return data and refs backpointer `back` to object `oid.
If `back` is 0 or ultimately resolves to 0, return None
and None. In this case, the transaction undoes the object
creation.
"""
if back == 0:
return None
data, tid = self._loadBackTxn(oid, back, 0)
return data
def copyDataRecords(self, pos, th):
"""Copy any current data records between pos and tend.
Returns position of txn header in output file and position
of next record in the input file.
If any data records are copied, also write txn header (th).
"""
copy = 0
new_tpos = 0L
tend = pos + th.tlen
pos += th.headerlen()
while pos < tend:
h = self._read_data_header(pos)
if not self.gc.isReachable(h.oid, pos):
pos += h.recordlen()
continue
pos += h.recordlen()
# If we are going to copy any data, we need to copy
# the transaction header. Note that we will need to
# patch up the transaction length when we are done.
if not copy:
th.status = "p"
s = th.asString()
new_tpos = self._tfile.tell()
self._tfile.write(s)
new_pos = new_tpos + len(s)
copy = 1
if h.plen:
data = self._file.read(h.plen)
else:
# If a current record has a backpointer, fetch
# refs and data from the backpointer. We need
# to write the data in the new record.
data = self.fetchBackpointer(h.oid, h.back)
self.writePackedDataRecord(h, data, new_tpos)
new_pos = self._tfile.tell()
return new_tpos, pos
def writePackedDataRecord(self, h, data, new_tpos):
# Update the header to reflect current information, then write
# it to the output file.
if data is None:
data = ""
h.prev = 0
h.back = 0
h.plen = len(data)
h.tloc = new_tpos
pos = self._tfile.tell()
if h.version:
h.pnv = self.index.get(h.oid, 0)
h.vprev = self.vindex.get(h.version, 0)
self.vindex[h.version] = pos
self.index[h.oid] = pos
if h.version:
self.vindex[h.version] = pos
self._tfile.write(h.asString())
self._tfile.write(data)
if not data:
# Packed records never have backpointers (?).
# If there is no data, write a z64 backpointer.
# This is a George Bailey event.
self._tfile.write(z64)
def copyRest(self, ipos):
# After the pack time, all data records are copied.
# Copy one txn at a time, using copy() for data.
# Release the commit lock every 20 copies
self._lock_counter = 0
try:
while 1:
ipos = self.copyOne(ipos)
except CorruptedDataError, err:
# The last call to copyOne() will raise
# CorruptedDataError, because it will attempt to read past
# the end of the file. Double-check that the exception
# occurred for this reason.
self._file.seek(0, 2)
endpos = self._file.tell()
if endpos != err.pos:
raise
def copyOne(self, ipos):
# The call below will raise CorruptedDataError at EOF.
th = self._read_txn_header(ipos)
self._lock_counter += 1
if self._lock_counter % 20 == 0:
self._commit_lock_release()
pos = self._tfile.tell()
self._copier.setTxnPos(pos)
self._tfile.write(th.asString())
tend = ipos + th.tlen
ipos += th.headerlen()
while ipos < tend:
h = self._read_data_header(ipos)
ipos += h.recordlen()
prev_txn = None
if h.plen:
data = self._file.read(h.plen)
else:
data = self.fetchBackpointer(h.oid, h.back)
if h.back:
prev_txn = self.getTxnFromData(h.oid, h.back)
self._copier.copy(h.oid, h.tid, data, h.version,
prev_txn, pos, self._tfile.tell())
tlen = self._tfile.tell() - pos
assert tlen == th.tlen
self._tfile.write(p64(tlen))
ipos += 8
self.index.update(self.tindex)
self.tindex.clear()
self.vindex.update(self.tvindex)
self.tvindex.clear()
if self._lock_counter % 20 == 0:
self._commit_lock_acquire()
return ipos
| [
"[email protected]"
] | |
ccb13a5e3d845bf09eec3fb40555e403ece082c2 | 677d142be25f5904b4ab418ce5ffa1387fe0f695 | /app/config.py | a0f945c9dbd2036423157c7b61e20d7f0375030d | [
"MIT"
] | permissive | mfannick/pitch | ee2ded6a70d6f26e5fa34f635ca5821766006b20 | af321e36d2ad23bc2129b4d6cc41c7779bdea063 | refs/heads/master | 2022-10-12T16:07:54.439498 | 2019-09-30T07:53:03 | 2019-09-30T07:53:03 | 210,202,951 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,367 | py | import os
class Config:
'''
General configuration parent class
'''
SECRET_KEY ='Fannick'
SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://annick:escofavi@localhost/pitch'
UPLOADED_PHOTOS_DEST ='app/static/photos'
# simple mde configurations
SIMPLEMDE_JS_IIFE = True
SIMPLEMDE_USE_CDN = True
# email configurations
MAIL_SERVER = 'smtp.googlemail.com'
MAIL_PORT = 587
MAIL_USE_TLS = True
MAIL_USERNAME = os.environ.get("MAIL_USERNAME")
MAIL_PASSWORD = os.environ.get("MAIL_PASSWORD")
@staticmethod
def init_app(app):
pass
class ProdConfig(Config):
'''
Production configuration child class
Args:
Config: The parent configuration class with General configuration settings
'''
pass
SQLALCHEMY_DATABASE_URI = os.environ.get("DATABASE_URL")
class TestConfig(Config):
SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://annick:escofavi@localhost/watchlist_test'
class DevConfig(Config):
'''
Development configuration child class
Args:
Config: The parent configuration class with General configuration settings
'''
SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://annick:escofavi@localhost/pitch'
DEBUG = True
config_options = {
'development':DevConfig,
'production':ProdConfig,
'test':TestConfig
}
| [
"[email protected]"
] | |
1d6d23ca4b07fa9ea47cddf4a29db00d629a4c56 | 7c081ac61722f11de1758c9701662f85c1bb802c | /pysigep/webservices/webservice_base.py | 270eeb115b16b5f8f51c39007bedbef3ee9fb66f | [
"MIT"
] | permissive | trocafone/pysigep | 8a3ad14febce45bc54aea721b481c87bfcd92f50 | a899fb85e9195ac8686313e20c8bec7c03bde198 | refs/heads/develop | 2021-01-16T22:07:06.093299 | 2016-07-05T13:57:20 | 2016-07-05T13:57:20 | 62,578,970 | 1 | 0 | null | 2016-07-05T13:57:22 | 2016-07-04T17:40:17 | Python | UTF-8 | Python | false | false | 2,803 | py | # -*- coding: utf-8 -*-
# #############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) 2016 Michell Stuttgart
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
###############################################################################
import xml.etree.cElementTree as Et
import requests
from pysigep import sigep_exceptions
class WebserviceBase(object):
def __init__(self, url):
self._url = url
@property
def url(self):
return self._url
def request(self, obj_param, ssl_verify=False):
try:
resposta = requests.post(self.url, data=obj_param.get_data(),
headers={'Content-type': 'text/xml'},
verify=ssl_verify)
if not resposta.ok:
msg = self.parse_error(resposta.text.encode('utf8'))
raise sigep_exceptions.ErroValidacaoXML(msg)
# Criamos um response dinamicamente para cada tipo de classe
response = obj_param.response_class_ref()
response.status_code = resposta.status_code
response.encoding = resposta.encoding
response.xml = resposta.text.encode('utf8')
response.body_request = resposta.request.body
return response
except requests.ConnectionError as exc:
raise sigep_exceptions.ErroConexaoComServidor(exc.message)
except requests.Timeout as exc:
raise sigep_exceptions.ErroConexaoTimeOut(exc.message)
except requests.exceptions.RequestException as exc:
raise sigep_exceptions.ErroRequisicao(exc.message)
def parse_error(self, xml):
return Et.fromstring(xml).findtext('.//faultstring')
| [
"[email protected]"
] | |
84143160260f1dd336ed44bdb166a188128d844e | adcbefa6cba639ec8c8eb74766b7f6cd5301d041 | /coffeehouse_nlpfr/translate/ibm_model.py | 5ea12030cfdd157ed39846bc43e3a1cde15a84a8 | [] | no_license | intellivoid/CoffeeHouse-NLPFR | b39ae1eaeb8936c5c5634f39e0a30d1feece6705 | 8ad1b988ddba086478c320f638d10d0c0cacca4c | refs/heads/master | 2022-11-28T02:13:40.670494 | 2020-06-07T04:02:00 | 2020-06-07T04:02:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,039 | py | # -*- coding: utf-8 -*-
# Natural Language Toolkit: IBM Model Core
#
# Copyright (C) 2001-2019 NLTK Project
# Author: Tah Wei Hoon <[email protected]>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
Common methods and classes for all IBM models. See ``IBMModel1``,
``IBMModel2``, ``IBMModel3``, ``IBMModel4``, and ``IBMModel5``
for specific implementations.
The IBM models are a series of generative models that learn lexical
translation probabilities, p(target language word|source language word),
given a sentence-aligned parallel corpus.
The models increase in sophistication from model 1 to 5. Typically, the
output of lower models is used to seed the higher models. All models
use the Expectation-Maximization (EM) algorithm to learn various
probability tables.
Words in a sentence are one-indexed. The first word of a sentence has
position 1, not 0. Index 0 is reserved in the source sentence for the
NULL token. The concept of position does not apply to NULL, but it is
indexed at 0 by convention.
Each target word is aligned to exactly one source word or the NULL
token.
References:
Philipp Koehn. 2010. Statistical Machine Translation.
Cambridge University Press, New York.
Peter E Brown, Stephen A. Della Pietra, Vincent J. Della Pietra, and
Robert L. Mercer. 1993. The Mathematics of Statistical Machine
Translation: Parameter Estimation. Computational Linguistics, 19 (2),
263-311.
"""
from bisect import insort_left
from collections import defaultdict
from copy import deepcopy
from math import ceil
def longest_target_sentence_length(sentence_aligned_corpus):
"""
:param sentence_aligned_corpus: Parallel corpus under consideration
:type sentence_aligned_corpus: list(AlignedSent)
:return: Number of words in the longest target language sentence
of ``sentence_aligned_corpus``
"""
max_m = 0
for aligned_sentence in sentence_aligned_corpus:
m = len(aligned_sentence.words)
max_m = max(m, max_m)
return max_m
class IBMModel(object):
"""
Abstract base class for all IBM models
"""
# Avoid division by zero and precision errors by imposing a minimum
# value for probabilities. Note that this approach is theoretically
# incorrect, since it may create probabilities that sum to more
# than 1. In practice, the contribution of probabilities with MIN_PROB
# is tiny enough that the value of MIN_PROB can be treated as zero.
MIN_PROB = 1.0e-12 # GIZA++ is more liberal and uses 1.0e-7
def __init__(self, sentence_aligned_corpus):
self.init_vocab(sentence_aligned_corpus)
self.reset_probabilities()
def reset_probabilities(self):
self.translation_table = defaultdict(
lambda: defaultdict(lambda: IBMModel.MIN_PROB)
)
"""
dict[str][str]: float. Probability(target word | source word).
Values accessed as ``translation_table[target_word][source_word]``.
"""
self.alignment_table = defaultdict(
lambda: defaultdict(
lambda: defaultdict(lambda: defaultdict(lambda: IBMModel.MIN_PROB))
)
)
"""
dict[int][int][int][int]: float. Probability(i | j,l,m).
Values accessed as ``alignment_table[i][j][l][m]``.
Used in model 2 and hill climbing in models 3 and above
"""
self.fertility_table = defaultdict(lambda: defaultdict(lambda: self.MIN_PROB))
"""
dict[int][str]: float. Probability(fertility | source word).
Values accessed as ``fertility_table[fertility][source_word]``.
Used in model 3 and higher.
"""
self.p1 = 0.5
"""
Probability that a generated word requires another target word
that is aligned to NULL.
Used in model 3 and higher.
"""
def set_uniform_probabilities(self, sentence_aligned_corpus):
"""
Initialize probability tables to a uniform distribution
Derived classes should implement this accordingly.
"""
pass
def init_vocab(self, sentence_aligned_corpus):
src_vocab = set()
trg_vocab = set()
for aligned_sentence in sentence_aligned_corpus:
trg_vocab.update(aligned_sentence.words)
src_vocab.update(aligned_sentence.mots)
# Add the NULL token
src_vocab.add(None)
self.src_vocab = src_vocab
"""
set(str): All source language words used in training
"""
self.trg_vocab = trg_vocab
"""
set(str): All target language words used in training
"""
def sample(self, sentence_pair):
"""
Sample the most probable alignments from the entire alignment
space
First, determine the best alignment according to IBM Model 2.
With this initial alignment, use hill climbing to determine the
best alignment according to a higher IBM Model. Add this
alignment and its neighbors to the sample set. Repeat this
process with other initial alignments obtained by pegging an
alignment point.
Hill climbing may be stuck in a local maxima, hence the pegging
and trying out of different alignments.
:param sentence_pair: Source and target language sentence pair
to generate a sample of alignments from
:type sentence_pair: AlignedSent
:return: A set of best alignments represented by their ``AlignmentInfo``
and the best alignment of the set for convenience
:rtype: set(AlignmentInfo), AlignmentInfo
"""
sampled_alignments = set()
l = len(sentence_pair.mots)
m = len(sentence_pair.words)
# Start from the best model 2 alignment
initial_alignment = self.best_model2_alignment(sentence_pair)
potential_alignment = self.hillclimb(initial_alignment)
sampled_alignments.update(self.neighboring(potential_alignment))
best_alignment = potential_alignment
# Start from other model 2 alignments,
# with the constraint that j is aligned (pegged) to i
for j in range(1, m + 1):
for i in range(0, l + 1):
initial_alignment = self.best_model2_alignment(sentence_pair, j, i)
potential_alignment = self.hillclimb(initial_alignment, j)
neighbors = self.neighboring(potential_alignment, j)
sampled_alignments.update(neighbors)
if potential_alignment.score > best_alignment.score:
best_alignment = potential_alignment
return sampled_alignments, best_alignment
def best_model2_alignment(self, sentence_pair, j_pegged=None, i_pegged=0):
"""
Finds the best alignment according to IBM Model 2
Used as a starting point for hill climbing in Models 3 and
above, because it is easier to compute than the best alignments
in higher models
:param sentence_pair: Source and target language sentence pair
to be word-aligned
:type sentence_pair: AlignedSent
:param j_pegged: If specified, the alignment point of j_pegged
will be fixed to i_pegged
:type j_pegged: int
:param i_pegged: Alignment point to j_pegged
:type i_pegged: int
"""
src_sentence = [None] + sentence_pair.mots
trg_sentence = ["UNUSED"] + sentence_pair.words # 1-indexed
l = len(src_sentence) - 1 # exclude NULL
m = len(trg_sentence) - 1
alignment = [0] * (m + 1) # init all alignments to NULL
cepts = [[] for i in range((l + 1))] # init all cepts to empty list
for j in range(1, m + 1):
if j == j_pegged:
# use the pegged alignment instead of searching for best one
best_i = i_pegged
else:
best_i = 0
max_alignment_prob = IBMModel.MIN_PROB
t = trg_sentence[j]
for i in range(0, l + 1):
s = src_sentence[i]
alignment_prob = (
self.translation_table[t][s] * self.alignment_table[i][j][l][m]
)
if alignment_prob >= max_alignment_prob:
max_alignment_prob = alignment_prob
best_i = i
alignment[j] = best_i
cepts[best_i].append(j)
return AlignmentInfo(
tuple(alignment), tuple(src_sentence), tuple(trg_sentence), cepts
)
def hillclimb(self, alignment_info, j_pegged=None):
"""
Starting from the alignment in ``alignment_info``, look at
neighboring alignments iteratively for the best one
There is no guarantee that the best alignment in the alignment
space will be found, because the algorithm might be stuck in a
local maximum.
:param j_pegged: If specified, the search will be constrained to
alignments where ``j_pegged`` remains unchanged
:type j_pegged: int
:return: The best alignment found from hill climbing
:rtype: AlignmentInfo
"""
alignment = alignment_info # alias with shorter name
max_probability = self.prob_t_a_given_s(alignment)
while True:
old_alignment = alignment
for neighbor_alignment in self.neighboring(alignment, j_pegged):
neighbor_probability = self.prob_t_a_given_s(neighbor_alignment)
if neighbor_probability > max_probability:
alignment = neighbor_alignment
max_probability = neighbor_probability
if alignment == old_alignment:
# Until there are no better alignments
break
alignment.score = max_probability
return alignment
def neighboring(self, alignment_info, j_pegged=None):
"""
Determine the neighbors of ``alignment_info``, obtained by
moving or swapping one alignment point
:param j_pegged: If specified, neighbors that have a different
alignment point from j_pegged will not be considered
:type j_pegged: int
:return: A set neighboring alignments represented by their
``AlignmentInfo``
:rtype: set(AlignmentInfo)
"""
neighbors = set()
l = len(alignment_info.src_sentence) - 1 # exclude NULL
m = len(alignment_info.trg_sentence) - 1
original_alignment = alignment_info.alignment
original_cepts = alignment_info.cepts
for j in range(1, m + 1):
if j != j_pegged:
# Add alignments that differ by one alignment point
for i in range(0, l + 1):
new_alignment = list(original_alignment)
new_cepts = deepcopy(original_cepts)
old_i = original_alignment[j]
# update alignment
new_alignment[j] = i
# update cepts
insort_left(new_cepts[i], j)
new_cepts[old_i].remove(j)
new_alignment_info = AlignmentInfo(
tuple(new_alignment),
alignment_info.src_sentence,
alignment_info.trg_sentence,
new_cepts,
)
neighbors.add(new_alignment_info)
for j in range(1, m + 1):
if j != j_pegged:
# Add alignments that have two alignment points swapped
for other_j in range(1, m + 1):
if other_j != j_pegged and other_j != j:
new_alignment = list(original_alignment)
new_cepts = deepcopy(original_cepts)
other_i = original_alignment[other_j]
i = original_alignment[j]
# update alignments
new_alignment[j] = other_i
new_alignment[other_j] = i
# update cepts
new_cepts[other_i].remove(other_j)
insort_left(new_cepts[other_i], j)
new_cepts[i].remove(j)
insort_left(new_cepts[i], other_j)
new_alignment_info = AlignmentInfo(
tuple(new_alignment),
alignment_info.src_sentence,
alignment_info.trg_sentence,
new_cepts,
)
neighbors.add(new_alignment_info)
return neighbors
def maximize_lexical_translation_probabilities(self, counts):
for t, src_words in counts.t_given_s.items():
for s in src_words:
estimate = counts.t_given_s[t][s] / counts.any_t_given_s[s]
self.translation_table[t][s] = max(estimate, IBMModel.MIN_PROB)
def maximize_fertility_probabilities(self, counts):
for phi, src_words in counts.fertility.items():
for s in src_words:
estimate = counts.fertility[phi][s] / counts.fertility_for_any_phi[s]
self.fertility_table[phi][s] = max(estimate, IBMModel.MIN_PROB)
def maximize_null_generation_probabilities(self, counts):
p1_estimate = counts.p1 / (counts.p1 + counts.p0)
p1_estimate = max(p1_estimate, IBMModel.MIN_PROB)
# Clip p1 if it is too large, because p0 = 1 - p1 should not be
# smaller than MIN_PROB
self.p1 = min(p1_estimate, 1 - IBMModel.MIN_PROB)
def prob_of_alignments(self, alignments):
probability = 0
for alignment_info in alignments:
probability += self.prob_t_a_given_s(alignment_info)
return probability
def prob_t_a_given_s(self, alignment_info):
"""
Probability of target sentence and an alignment given the
source sentence
All required information is assumed to be in ``alignment_info``
and self.
Derived classes should override this method
"""
return 0.0
class AlignmentInfo(object):
"""
Helper data object for training IBM Models 3 and up
Read-only. For a source sentence and its counterpart in the target
language, this class holds information about the sentence pair's
alignment, cepts, and fertility.
Warning: Alignments are one-indexed here, in contrast to
coffeehouse_nlpfr.translate.Alignment and AlignedSent, which are zero-indexed
This class is not meant to be used outside of IBM models.
"""
def __init__(self, alignment, src_sentence, trg_sentence, cepts):
if not isinstance(alignment, tuple):
raise TypeError(
"The alignment must be a tuple because it is used "
"to uniquely identify AlignmentInfo objects."
)
self.alignment = alignment
"""
tuple(int): Alignment function. ``alignment[j]`` is the position
in the source sentence that is aligned to the position j in the
target sentence.
"""
self.src_sentence = src_sentence
"""
tuple(str): Source sentence referred to by this object.
Should include NULL token (None) in index 0.
"""
self.trg_sentence = trg_sentence
"""
tuple(str): Target sentence referred to by this object.
Should have a dummy element in index 0 so that the first word
starts from index 1.
"""
self.cepts = cepts
"""
list(list(int)): The positions of the target words, in
ascending order, aligned to a source word position. For example,
cepts[4] = (2, 3, 7) means that words in positions 2, 3 and 7
of the target sentence are aligned to the word in position 4 of
the source sentence
"""
self.score = None
"""
float: Optional. Probability of alignment, as defined by the
IBM model that assesses this alignment
"""
def fertility_of_i(self, i):
"""
Fertility of word in position ``i`` of the source sentence
"""
return len(self.cepts[i])
def is_head_word(self, j):
"""
:return: Whether the word in position ``j`` of the target
sentence is a head word
"""
i = self.alignment[j]
return self.cepts[i][0] == j
def center_of_cept(self, i):
"""
:return: The ceiling of the average positions of the words in
the tablet of cept ``i``, or 0 if ``i`` is None
"""
if i is None:
return 0
average_position = sum(self.cepts[i]) / len(self.cepts[i])
return int(ceil(average_position))
def previous_cept(self, j):
"""
:return: The previous cept of ``j``, or None if ``j`` belongs to
the first cept
"""
i = self.alignment[j]
if i == 0:
raise ValueError(
"Words aligned to NULL cannot have a previous "
"cept because NULL has no position"
)
previous_cept = i - 1
while previous_cept > 0 and self.fertility_of_i(previous_cept) == 0:
previous_cept -= 1
if previous_cept <= 0:
previous_cept = None
return previous_cept
def previous_in_tablet(self, j):
"""
:return: The position of the previous word that is in the same
tablet as ``j``, or None if ``j`` is the first word of the
tablet
"""
i = self.alignment[j]
tablet_position = self.cepts[i].index(j)
if tablet_position == 0:
return None
return self.cepts[i][tablet_position - 1]
def zero_indexed_alignment(self):
"""
:return: Zero-indexed alignment, suitable for use in external
``coffeehouse_nlpfr.translate`` modules like ``coffeehouse_nlpfr.translate.Alignment``
:rtype: list(tuple)
"""
zero_indexed_alignment = []
for j in range(1, len(self.trg_sentence)):
i = self.alignment[j] - 1
if i < 0:
i = None # alignment to NULL token
zero_indexed_alignment.append((j - 1, i))
return zero_indexed_alignment
def __eq__(self, other):
return self.alignment == other.alignment
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self.alignment)
class Counts(object):
"""
Data object to store counts of various parameters during training
"""
def __init__(self):
self.t_given_s = defaultdict(lambda: defaultdict(lambda: 0.0))
self.any_t_given_s = defaultdict(lambda: 0.0)
self.p0 = 0.0
self.p1 = 0.0
self.fertility = defaultdict(lambda: defaultdict(lambda: 0.0))
self.fertility_for_any_phi = defaultdict(lambda: 0.0)
def update_lexical_translation(self, count, alignment_info, j):
i = alignment_info.alignment[j]
t = alignment_info.trg_sentence[j]
s = alignment_info.src_sentence[i]
self.t_given_s[t][s] += count
self.any_t_given_s[s] += count
def update_null_generation(self, count, alignment_info):
m = len(alignment_info.trg_sentence) - 1
fertility_of_null = alignment_info.fertility_of_i(0)
self.p1 += fertility_of_null * count
self.p0 += (m - 2 * fertility_of_null) * count
def update_fertility(self, count, alignment_info):
for i in range(0, len(alignment_info.src_sentence)):
s = alignment_info.src_sentence[i]
phi = alignment_info.fertility_of_i(i)
self.fertility[phi][s] += count
self.fertility_for_any_phi[s] += count
| [
"[email protected]"
] | |
eb2c401bccb6319a30e48d31688eae4f130ad2be | e0c378f27462cb00b656473cbe0b172886741818 | /src/flash/image/embedding/__init__.py | 30f7a15003c7c0e8b58925b300084fe61302bb93 | [
"Apache-2.0"
] | permissive | Lightning-Universe/lightning-flash | d0c955d7fdf962175750d154b3a369a483b8d188 | fc6c97a43d65b49561c896bf05bc1c75536d0dc0 | refs/heads/master | 2023-08-17T12:03:52.563905 | 2023-08-14T12:35:10 | 2023-08-14T12:35:10 | 333,857,397 | 58 | 12 | Apache-2.0 | 2023-09-11T14:43:06 | 2021-01-28T18:47:16 | Python | UTF-8 | Python | false | false | 68 | py | from flash.image.embedding.model import ImageEmbedder # noqa: F401
| [
"[email protected]"
] | |
716098a8f7469e8ffcbdd834a9aae73b196fa55b | 5efc1623d9e06d9b0caa104630d2b5d7610fb19d | /learn/deep_reinforcement_learning_course/deep_q_doom.py | d44d9b1f72a46b6b1336e6e4f9410ed328773ead | [] | no_license | techyajay/phd | 20fd01535b5147b7ef86aa19f6683fa01dca4404 | a1348bb6645a67a1f09aef7155c0db1720291bb6 | refs/heads/master | 2020-04-13T19:37:35.060245 | 2018-12-18T13:44:30 | 2018-12-18T13:44:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,915 | py | """Deep Q learning for the game Doom.
See: https://medium.freecodecamp.org/an-introduction-to-deep-q-learning-lets-play-doom-54d02d8017d8
"""
import pathlib
import typing
import skimage
import vizdoom
from absl import app
from absl import flags
from absl import logging
from labm8 import bazelutil
FLAGS = flags.FLAGS
flags.DEFINE_string(
'doom_config',
str(bazelutil.DataPath(
'phd/learn/deep_reinforcement_learning_course/data/doom_config.cfg')),
'Path to Doom config file.')
flags.DEFINE_string(
'doom_scenario',
str(bazelutil.DataPath(
'phd/learn/deep_reinforcement_learning_course/data/doom_scenario.wad')),
'Path to Doom scenario file.')
def CreateEnvironment(
config_path: typing.Optional[pathlib.Path] = None,
scenario_path: typing.Optional[pathlib.Path] = None
) -> typing.Tuple[None, typing.List[typing.List[int]]]:
"""Create the Doom game environment.
Returns:
A tuple of the environment and action space.
"""
config_path = config_path or FLAGS.doom_config
scenario_path = scenario_path or FLAGS.doom_scenario
game = vizdoom.DoomGame()
game.load_config(config_path)
game.set_doom_scenario_path(scenario_path)
game.init()
left = [1, 0, 0]
right = [0, 1, 0]
shoot = [0, 0, 1]
possible_actions = [left, right, shoot]
return game, possible_actions
def PreprocessFrame(frame):
# Crop the screen (remove the roof because it contains no information).
cropped_frame = frame[30:-10, 30:-30]
# Normalize Pixel Values.
normalized_frame = cropped_frame / 255.0
# Resize.
preprocessed_frame = skimage.transform.resize(normalized_frame, [84, 84])
return preprocessed_frame
def main(argv: typing.List[str]):
"""Main entry point."""
if len(argv) > 1:
raise app.UsageError("Unknown arguments: '{}'.".format(' '.join(argv[1:])))
logging.info('Done.')
if __name__ == '__main__':
app.run(main)
| [
"[email protected]"
] | |
24bef4da51b7e473f483cd9fe30280a5be5bb0ea | d6851011cf9669036078a848a55f3dab7528bbd1 | /tests/mock_commands/ghdl | 806201636fad5ce30b2824afa73edbd89f47b119 | [
"BSD-2-Clause"
] | permissive | msfschaffner/edalize | 442743f6c2f41da034482b69c5276efd1a6a40ad | d6757f36c6fc804f1876c907d0df55485937c0f2 | refs/heads/master | 2022-11-30T15:52:48.724111 | 2020-04-13T16:33:20 | 2020-04-16T10:35:00 | 256,582,248 | 1 | 0 | BSD-2-Clause | 2020-04-17T18:38:44 | 2020-04-17T18:38:43 | null | UTF-8 | Python | false | false | 441 | #!/usr/bin/env python
import os
import sys
cmd_file = 'analyze.cmd' if sys.argv[1] == '-i' else 'elab-run.cmd'
with open(cmd_file, 'a') as f:
f.write(' '.join(sys.argv[1:]) + '\n')
if sys.argv[1] == '-i':
for arg in sys.argv:
if arg.startswith('--std'):
std = arg.split('=')[1]
output_file = 'work-obj'+std+'.cf'
with open(output_file, 'a'):
os.utime(output_file, None)
| [
"[email protected]"
] | ||
8232a25f494ea58fe5f4743fe20ae90eea3baca4 | 087f7ec4bb11bca64f29eac49df1104d885067b4 | /midiscenemanager/midiio.py | cf0f27661d1cb8cb11b1990db844f4a85846ac42 | [
"MIT"
] | permissive | SpotlightKid/midiscenemanager | 38dd7e5299fc97fae3de6e8ecfde934c26974da0 | a4f9268ba73f575d5d3313eaf256eb9cebdcbdd0 | refs/heads/master | 2023-06-01T15:21:16.520511 | 2023-05-21T14:44:33 | 2023-05-21T14:44:33 | 111,223,358 | 2 | 0 | null | 2017-11-26T15:26:59 | 2017-11-18T17:06:18 | Python | UTF-8 | Python | false | false | 6,597 | py | # -*- coding: utf-8 -*-
#
# midiio.py
#
"""Wrapper clas for rtmidi.MidiOut to facilitate sending common MIDI events."""
import binascii
import rtmidi
from rtmidi.midiconstants import *
from rtmidi.midiutil import open_midioutput
from .sequencer import SequencerThread
def parse_sysex_string(s):
return binascii.unhexlify(s.replace(' ', ''))
class MidiOutWrapper:
def __init__(self, midi, name, ch=1):
self.channel = ch
self.midi = midi
self.name = name
@property
def midi(self):
return self._midi
@midi.setter
def midi(self, obj):
if hasattr(self, '_midi'):
with self._midi.lock:
self._midi.midiout = obj
else:
self._midi = SequencerThread(obj)
self._midi.start()
def _cleanup(self):
self.midi.stop()
self.midi.midiout.close_port()
def send_channel_message(self, status, data1=None, data2=None, ch=None, delay=0):
"""Send a MIDI channel mode message."""
msg = [(status & 0xf0) | ((ch if ch else self.channel) - 1 & 0xF)]
if data1 is not None:
msg.append(data1 & 0x7F)
if data2 is not None:
msg.append(data2 & 0x7F)
self.midi.add(msg, delta=delay)
def send_system_common_message(self, status=0xF7, data1=None, data2=None):
msg = [status & 0xF7]
if msg[0] in (0xF1, 0xF2, 0xF3):
msg.append(data1 & 0x7F)
if msg[0] == 0xF2:
msg.append(data2 & 0x7F)
self.midi.add(msg, delta=delay)
def send_system_realtime_message(self, status=0xF8):
self.midi.add([status & 0xF7], delta=1)
def send_system_exclusive(self, value=""):
msg = parse_sysex_string(value)
if (msg and msg.startswith(b'\xF0') and msg.endswith(b'\xF7') and
all((val < 128 for val in msg[1:-1]))):
self.midi.add(msg, delta=delay)
else:
raise ValueError("Invalid sysex string: %s", msg)
def send_note_off(self, note=60, velocity=0, ch=None, delay=0):
"""Send a 'Note Off' message."""
self.send_channel_message(NOTE_OFF, note, velocity, ch=ch, delay=delay)
def send_note_on(self, note=60, velocity=127, ch=None, delay=0):
"""Send a 'Note On' message."""
self.send_channel_message(NOTE_ON, note, velocity, ch=ch, delay=delay)
def send_poly_pressure(self, note=60, value=0, ch=None, delay=0):
"""Send a 'Polyphonic Pressure' (Aftertouch) message."""
self.send_channel_message(POLY_PRESSURE, note, value, ch=ch, delay=delay)
def send_control_change(self, cc=0, value=0, ch=None, delay=0):
"""Send a 'Control Change' message."""
self.send_channel_message(CONTROL_CHANGE, cc, value, ch=ch, delay=delay)
def send_program_change(self, program=0, ch=None, delay=0):
"""Send a 'Program Change' message."""
self.send_channel_message(PROGRAM_CHANGE, program, ch=ch, delay=delay)
def send_channel_pressure(self, value=0, ch=None, delay=0):
"""Send a 'Polyphonic Pressure' (Aftertouch) message."""
self.send_channel_message(CHANNEL_PRESSURE, value, ch=ch, delay=delay)
def send_pitch_bend(self, value=8192, ch=None, delay=0):
"""Send a 'Program Change' message."""
self.send_channel_message(PITCH_BEND, value & 0x7f, (value >> 7) & 0x7f, ch=ch,
delay=delay)
def send_bank_select(self, bank=None, msb=None, lsb=None, ch=None, delay=0):
"""Send 'Bank Select' MSB and/or LSB 'Control Change' messages."""
if bank is not None:
msb = (bank << 7) & 0x7F
lsb = bank & 0x7F
if msb is not None:
self.send_control_change(BANK_SELECT_MSB, msb, ch=ch, delay=delay)
if lsb is not None:
self.send_control_change(BANK_SELECT_LSB, lsb, ch=ch, delay=delay)
def send_modulation(self, value=0, ch=None, delay=0):
"""Send a 'Modulation' (CC #1) 'Control Change' message."""
self.send_control_change(MODULATION, value, ch=ch, delay=delay)
def send_breath_controller(self, value=0, ch=None, delay=0):
"""Send a 'Breath Controller' (CC #3) 'Control Change' message."""
self.send_control_change(BREATH_CONTROLLER, value, ch=ch, delay=delay)
def send_foot_controller(self, value=0, ch=None, delay=0):
"""Send a 'Foot Controller' (CC #4) 'Control Change' message."""
self.send_control_change(FOOT_CONTROLLER, value, ch=ch, delay=delay)
def send_channel_volume(self, value=127, ch=None, delay=0):
"""Send a 'Volume' (CC #7) 'Control Change' message."""
self.send_control_change(CHANNEL_VOLUME, value, ch=ch, delay=delay)
def send_balance(self, value=63, ch=None, delay=0):
"""Send a 'Balance' (CC #8) 'Control Change' message."""
self.send_control_change(BALANCE, value, ch=ch, delay=delay)
def send_pan(self, value=63, ch=None, delay=0):
"""Send a 'Pan' (CC #10) 'Control Change' message."""
self.send_control_change(PAN, value, ch=ch, delay=delay)
def send_expression(self, value=127, ch=None, delay=0):
"""Send a 'Expression' (CC #11) 'Control Change' message."""
self.send_control_change(EXPRESSION_CONTROLLER, value, ch=ch, delay=delay)
def send_all_sound_off(self, ch=None, delay=0):
"""Send a 'All Sound Off' (CC #120) 'Control Change' message."""
self.send_control_change(ALL_SOUND_OFF, 0, ch=ch, delay=delay)
def send_reset_all_controllers(self, ch=None, delay=0):
"""Send a 'All Sound Off' (CC #121) 'Control Change' message."""
self.send_control_change(RESET_ALL_CONTROLLERS, 0, ch=ch, delay=delay)
def send_local_control(self, value=1, ch=None, delay=0):
"""Send a 'Local Control On/Off' (CC #122) 'Control Change' message."""
self.send_control_change(EXPRESSION_CONTROLLER, 0, ch=ch, delay=delay)
def send_all_notes_off(self, ch=None, delay=0):
"""Send a 'All Notes Off' (CC #123) 'Control Change' message."""
self.send_control_change(ALL_NOTES_OFF, 0, ch=ch, delay=delay)
# add more convenience methods for other common MIDI events here...
def get_midiout(port, api="UNSPECIFIED"):
api = getattr(rtmidi, 'API_' + api)
midiout, name = open_midioutput(port, api=api, interactive=False, use_virtual=False)
return MidiOutWrapper(midiout, name)
def get_midiout_ports(api="UNSPECIFIED"):
mo = rtmidi.MidiOut(rtapi=getattr(rtmidi, 'API_' + api))
return sorted(mo.get_ports())
| [
"[email protected]"
] | |
edaf6548d496e07a077b970fcdf68d7076a424c7 | 716d9e678c884fd9e9f07bbf57c7a0ec684f8255 | /foodboxes/app_items/migrations/0001_initial.py | 8934997852b104c017f9a76a5e7ef50064a6e946 | [] | no_license | arifgafizov/foodboxes_v.2.0 | e6716ba3ab3c0dd77bac212b90db8d710f46d495 | 1093a520e391fd409ba18bab341d6ffbec1104c7 | refs/heads/master | 2023-02-17T21:20:32.491143 | 2021-01-21T19:57:26 | 2021-01-21T19:57:26 | 330,394,733 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 767 | py | # Generated by Django 3.1.5 on 2021-01-10 16:33
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Item',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('description', models.TextField()),
('image', models.ImageField(default=None, null=True, upload_to='items_images')),
('weight', models.IntegerField()),
('price', models.DecimalField(decimal_places=2, max_digits=13)),
],
),
]
| [
"[email protected]"
] | |
0006a8010b47859f57692841a577d972fa0ffe63 | fcd02cbf7fae38f0b0d6a95deedc49d5993927da | /models/backbone/__init__.py | 5d8d9e12d55ba6f87789786da9b2fb6e2c00b36a | [
"Apache-2.0"
] | permissive | Highlightbeast/DBNet.pytorch | 7d5082c66516319fb8a02855c1a03be6c122a143 | d95a7dbd37b031f2cf1ca33c63f5658d29803242 | refs/heads/master | 2022-09-21T02:51:33.104828 | 2020-06-05T07:28:01 | 2020-06-05T07:28:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 786 | py | # -*- coding: utf-8 -*-
# @Time : 2019/8/23 21:54
# @Author : zhoujun
__all__ = ['build_backbone']
from .resnet import *
from .resnest import *
from .shufflenetv2 import *
from .MobilenetV3 import MobileNetV3
support_backbone = ['resnet18', 'deformable_resnet18', 'deformable_resnet50',
'resnet50', 'resnet34', 'resnest101', 'resnet152',
'resnest50', 'resnest101', 'resnest200', 'resnest269',
'shufflenet_v2_x0_5', 'shufflenet_v2_x1_0', 'shufflenet_v2_x1_5', 'shufflenet_v2_x2_0',
'MobileNetV3']
def build_backbone(backbone_name, **kwargs):
assert backbone_name in support_backbone, f'all support backbone is {support_backbone}'
backbone = eval(backbone_name)(**kwargs)
return backbone
| [
"[email protected]"
] | |
abcd8f62143d26c371224047ec2538617405b71b | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/pg_1627+006/sdB_pg_1627+006_lc.py | 3d8cb550156a2ce741e67bc7f9c22da1c2ea971c | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 344 | py | from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[247.399583,0.530394], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_pg_1627+006/sdB_pg_1627+006_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
d06d2e0a911f5010f8561348d3ab54923e923e31 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_059/ch86_2020_06_22_20_42_01_067760.py | 2a606c4d7a824d5d1b2a4313ad94567cbeff57d6 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 941 | py |
with open('criptografado.txt', 'r') as arquivo:
conteudo = arquivo.read()
for i in conteudo:
if i == 's':
with open('criptografado.txt', 'w') as arquivo2:
arquivo2.write(conteudo.replace(i, 'z'))
if i == 'a':
with open('criptografado.txt', 'w') as arquivo2:
arquivo2.write(conteudo.replace(i, 'e'))
if i == 'r':
with open('criptografado.txt', 'w') as arquivo2:
arquivo2.write(conteudo.replace(i, 'b'))
if i == 'b':
with open('criptografado.txt', 'w') as arquivo2:
arquivo2.write(conteudo.replace(i, 'r'))
if i == 'e':
with open('criptografado.txt', 'w') as arquivo2:
arquivo2.write(conteudo.replace(i, 'a'))
if i == 'z':
with open('criptografado.txt', 'w') as arquivo2:
arquivo2.write(conteudo.replace(i, 's'))
| [
"[email protected]"
] | |
461ccf1fcf588f87ddd2359e410e5de3eddd855e | ad113ffed76e72ed0a881a7a6d6a74ea9021e5bf | /tests_compare.py | 09f259dd46dda07db612be2449e8522ef61f31b4 | [] | no_license | biletboh/bitexchange | 03c0bfc04e2f103928c173f014a75b6ceea0def9 | 8d541d6bb82f5e3ff4c71cb65b609503ba6b9417 | refs/heads/master | 2021-01-22T10:46:46.717291 | 2017-05-30T14:43:37 | 2017-05-30T14:43:37 | 92,656,591 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,388 | py | import time
import os
import configparser
from bitfinex.client import TradeClient
from exmoclient import ExmoTradeClient
from compare import compare_exchange
# Set up configuration
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
conf = configparser.ConfigParser()
conf.read(os.path.join(BASE_DIR, 'settings.ini'))
# Get API keys
bitfinex_api = conf.get('bitexchange', 'BITFINEX_API_KEY')
bitfinex_secret = conf.get('bitexchange', 'BITFINEX_API_SECRET')
exmo_api = conf.get('bitexchange', 'EXMO_API_KEY')
exmo_secret = conf.get('bitexchange', 'EXMO_API_SECRET')
# Set up bitfinex Trade Client
tradeclient = TradeClient(bitfinex_api, bitfinex_secret)
# Set up Exmo Trade Client
tradeclient2 = ExmoTradeClient(exmo_api, exmo_secret)
# Simple Tests
print("Run compare algorithm simple tests")
# second exchange is cheaper than first
bitfinex_data = [2300, 2310]
exmo_data = [2000, 2010]
print('Test 1:')
compare_exchange(tradeclient, tradeclient2, bitfinex_data, exmo_data)
# first exchange is cheaper than second
bitfinex_data = [2000, 2010] # data is in a format [bid, ask]
exmo_data = [2300, 2310]
print('Test 2:')
compare_exchange(tradeclient, tradeclient2, bitfinex_data, exmo_data)
# an exchange difference is below 1.5%
bitfinex_data = [2000, 2010]
exmo_data = [2020, 2030]
print('Test 3:')
compare_exchange(tradeclient, tradeclient2, bitfinex_data, exmo_data)
| [
"[email protected]"
] | |
8db79c9a273b1cf103453d785a36dac40873619a | 928c53ea78be51eaf05e63f149fb291ec48be73e | /Linked_List_Cycle_II.py | 38b7ef80851bbce59fda03879bef014eaff77462 | [] | no_license | saurabhchris1/Algorithm-Pratice-Questions-LeetCode | 35021d8fc082ecac65d7970d9f83f9be904fb333 | ea4a7d6a78d86c8619f91a75594de8eea264bcca | refs/heads/master | 2022-12-10T16:50:50.678365 | 2022-12-04T10:12:18 | 2022-12-04T10:12:18 | 219,918,074 | 5 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,226 | py | # Given a linked list, return the node where the cycle begins.
# If there is no cycle, return null.
#
# There is a cycle in a linked list if there is some node in
# the list that can be reached again by continuously following
# the next pointer. Internally, pos is used to denote the index
# of the node that tail's next pointer is connected to. Note that pos is not passed as a parameter.
# Notice that you should not modify the linked list.
#
# Input: head = [3,2,0,-4], pos = 1
# Output: tail connects to node index 1
# Explanation: There is a cycle in the linked list, where tail connects to the second node.
class Solution:
def detectCycle(self, head):
if not head:
return None
intersection = self.findIntersection(head)
if not intersection:
return None
ptr1 = head
ptr2 = intersection
while ptr1 != ptr2:
ptr1 = ptr1.next
ptr2 = ptr2.next
return ptr1
def findIntersection(self, head):
slow = head
fast = head
while fast and fast.next:
slow = slow.next
fast = fast.next.next
if slow == fast:
return fast
return None | [
"[email protected]"
] | |
5ca5396e7e9e6058085893cdf33e810e187ad30d | 82b946da326148a3c1c1f687f96c0da165bb2c15 | /sdk/python/pulumi_azure_native/storagecache/v20200301/storage_target.py | 3694ea282ed82b03a402dd9a6a13f5cfb2ccad63 | [
"Apache-2.0",
"BSD-3-Clause"
] | permissive | morrell/pulumi-azure-native | 3916e978382366607f3df0a669f24cb16293ff5e | cd3ba4b9cb08c5e1df7674c1c71695b80e443f08 | refs/heads/master | 2023-06-20T19:37:05.414924 | 2021-07-19T20:57:53 | 2021-07-19T20:57:53 | 387,815,163 | 0 | 0 | Apache-2.0 | 2021-07-20T14:18:29 | 2021-07-20T14:18:28 | null | UTF-8 | Python | false | false | 17,683 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['StorageTargetArgs', 'StorageTarget']
@pulumi.input_type
class StorageTargetArgs:
def __init__(__self__, *,
cache_name: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
target_type: pulumi.Input[Union[str, 'StorageTargetType']],
clfs: Optional[pulumi.Input['ClfsTargetArgs']] = None,
junctions: Optional[pulumi.Input[Sequence[pulumi.Input['NamespaceJunctionArgs']]]] = None,
nfs3: Optional[pulumi.Input['Nfs3TargetArgs']] = None,
provisioning_state: Optional[pulumi.Input[Union[str, 'ProvisioningStateType']]] = None,
storage_target_name: Optional[pulumi.Input[str]] = None,
unknown: Optional[pulumi.Input['UnknownTargetArgs']] = None):
"""
The set of arguments for constructing a StorageTarget resource.
:param pulumi.Input[str] cache_name: Name of Cache. Length of name must be not greater than 80 and chars must be in list of [-0-9a-zA-Z_] char class.
:param pulumi.Input[str] resource_group_name: Target resource group.
:param pulumi.Input[Union[str, 'StorageTargetType']] target_type: Type of the Storage Target.
:param pulumi.Input['ClfsTargetArgs'] clfs: Properties when targetType is clfs.
:param pulumi.Input[Sequence[pulumi.Input['NamespaceJunctionArgs']]] junctions: List of Cache namespace junctions to target for namespace associations.
:param pulumi.Input['Nfs3TargetArgs'] nfs3: Properties when targetType is nfs3.
:param pulumi.Input[Union[str, 'ProvisioningStateType']] provisioning_state: ARM provisioning state, see https://github.com/Azure/azure-resource-manager-rpc/blob/master/v1.0/Addendum.md#provisioningstate-property
:param pulumi.Input[str] storage_target_name: Name of the Storage Target. Length of name must be not greater than 80 and chars must be in list of [-0-9a-zA-Z_] char class.
:param pulumi.Input['UnknownTargetArgs'] unknown: Properties when targetType is unknown.
"""
pulumi.set(__self__, "cache_name", cache_name)
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "target_type", target_type)
if clfs is not None:
pulumi.set(__self__, "clfs", clfs)
if junctions is not None:
pulumi.set(__self__, "junctions", junctions)
if nfs3 is not None:
pulumi.set(__self__, "nfs3", nfs3)
if provisioning_state is not None:
pulumi.set(__self__, "provisioning_state", provisioning_state)
if storage_target_name is not None:
pulumi.set(__self__, "storage_target_name", storage_target_name)
if unknown is not None:
pulumi.set(__self__, "unknown", unknown)
@property
@pulumi.getter(name="cacheName")
def cache_name(self) -> pulumi.Input[str]:
"""
Name of Cache. Length of name must be not greater than 80 and chars must be in list of [-0-9a-zA-Z_] char class.
"""
return pulumi.get(self, "cache_name")
@cache_name.setter
def cache_name(self, value: pulumi.Input[str]):
pulumi.set(self, "cache_name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
Target resource group.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="targetType")
def target_type(self) -> pulumi.Input[Union[str, 'StorageTargetType']]:
"""
Type of the Storage Target.
"""
return pulumi.get(self, "target_type")
@target_type.setter
def target_type(self, value: pulumi.Input[Union[str, 'StorageTargetType']]):
pulumi.set(self, "target_type", value)
@property
@pulumi.getter
def clfs(self) -> Optional[pulumi.Input['ClfsTargetArgs']]:
"""
Properties when targetType is clfs.
"""
return pulumi.get(self, "clfs")
@clfs.setter
def clfs(self, value: Optional[pulumi.Input['ClfsTargetArgs']]):
pulumi.set(self, "clfs", value)
@property
@pulumi.getter
def junctions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['NamespaceJunctionArgs']]]]:
"""
List of Cache namespace junctions to target for namespace associations.
"""
return pulumi.get(self, "junctions")
@junctions.setter
def junctions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['NamespaceJunctionArgs']]]]):
pulumi.set(self, "junctions", value)
@property
@pulumi.getter
def nfs3(self) -> Optional[pulumi.Input['Nfs3TargetArgs']]:
"""
Properties when targetType is nfs3.
"""
return pulumi.get(self, "nfs3")
@nfs3.setter
def nfs3(self, value: Optional[pulumi.Input['Nfs3TargetArgs']]):
pulumi.set(self, "nfs3", value)
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> Optional[pulumi.Input[Union[str, 'ProvisioningStateType']]]:
"""
ARM provisioning state, see https://github.com/Azure/azure-resource-manager-rpc/blob/master/v1.0/Addendum.md#provisioningstate-property
"""
return pulumi.get(self, "provisioning_state")
@provisioning_state.setter
def provisioning_state(self, value: Optional[pulumi.Input[Union[str, 'ProvisioningStateType']]]):
pulumi.set(self, "provisioning_state", value)
@property
@pulumi.getter(name="storageTargetName")
def storage_target_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the Storage Target. Length of name must be not greater than 80 and chars must be in list of [-0-9a-zA-Z_] char class.
"""
return pulumi.get(self, "storage_target_name")
@storage_target_name.setter
def storage_target_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "storage_target_name", value)
@property
@pulumi.getter
def unknown(self) -> Optional[pulumi.Input['UnknownTargetArgs']]:
"""
Properties when targetType is unknown.
"""
return pulumi.get(self, "unknown")
@unknown.setter
def unknown(self, value: Optional[pulumi.Input['UnknownTargetArgs']]):
pulumi.set(self, "unknown", value)
class StorageTarget(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
cache_name: Optional[pulumi.Input[str]] = None,
clfs: Optional[pulumi.Input[pulumi.InputType['ClfsTargetArgs']]] = None,
junctions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['NamespaceJunctionArgs']]]]] = None,
nfs3: Optional[pulumi.Input[pulumi.InputType['Nfs3TargetArgs']]] = None,
provisioning_state: Optional[pulumi.Input[Union[str, 'ProvisioningStateType']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
storage_target_name: Optional[pulumi.Input[str]] = None,
target_type: Optional[pulumi.Input[Union[str, 'StorageTargetType']]] = None,
unknown: Optional[pulumi.Input[pulumi.InputType['UnknownTargetArgs']]] = None,
__props__=None):
"""
Type of the Storage Target.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] cache_name: Name of Cache. Length of name must be not greater than 80 and chars must be in list of [-0-9a-zA-Z_] char class.
:param pulumi.Input[pulumi.InputType['ClfsTargetArgs']] clfs: Properties when targetType is clfs.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['NamespaceJunctionArgs']]]] junctions: List of Cache namespace junctions to target for namespace associations.
:param pulumi.Input[pulumi.InputType['Nfs3TargetArgs']] nfs3: Properties when targetType is nfs3.
:param pulumi.Input[Union[str, 'ProvisioningStateType']] provisioning_state: ARM provisioning state, see https://github.com/Azure/azure-resource-manager-rpc/blob/master/v1.0/Addendum.md#provisioningstate-property
:param pulumi.Input[str] resource_group_name: Target resource group.
:param pulumi.Input[str] storage_target_name: Name of the Storage Target. Length of name must be not greater than 80 and chars must be in list of [-0-9a-zA-Z_] char class.
:param pulumi.Input[Union[str, 'StorageTargetType']] target_type: Type of the Storage Target.
:param pulumi.Input[pulumi.InputType['UnknownTargetArgs']] unknown: Properties when targetType is unknown.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: StorageTargetArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Type of the Storage Target.
:param str resource_name: The name of the resource.
:param StorageTargetArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(StorageTargetArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
cache_name: Optional[pulumi.Input[str]] = None,
clfs: Optional[pulumi.Input[pulumi.InputType['ClfsTargetArgs']]] = None,
junctions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['NamespaceJunctionArgs']]]]] = None,
nfs3: Optional[pulumi.Input[pulumi.InputType['Nfs3TargetArgs']]] = None,
provisioning_state: Optional[pulumi.Input[Union[str, 'ProvisioningStateType']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
storage_target_name: Optional[pulumi.Input[str]] = None,
target_type: Optional[pulumi.Input[Union[str, 'StorageTargetType']]] = None,
unknown: Optional[pulumi.Input[pulumi.InputType['UnknownTargetArgs']]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = StorageTargetArgs.__new__(StorageTargetArgs)
if cache_name is None and not opts.urn:
raise TypeError("Missing required property 'cache_name'")
__props__.__dict__["cache_name"] = cache_name
__props__.__dict__["clfs"] = clfs
__props__.__dict__["junctions"] = junctions
__props__.__dict__["nfs3"] = nfs3
__props__.__dict__["provisioning_state"] = provisioning_state
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["storage_target_name"] = storage_target_name
if target_type is None and not opts.urn:
raise TypeError("Missing required property 'target_type'")
__props__.__dict__["target_type"] = target_type
__props__.__dict__["unknown"] = unknown
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:storagecache/v20200301:StorageTarget"), pulumi.Alias(type_="azure-native:storagecache:StorageTarget"), pulumi.Alias(type_="azure-nextgen:storagecache:StorageTarget"), pulumi.Alias(type_="azure-native:storagecache/v20190801preview:StorageTarget"), pulumi.Alias(type_="azure-nextgen:storagecache/v20190801preview:StorageTarget"), pulumi.Alias(type_="azure-native:storagecache/v20191101:StorageTarget"), pulumi.Alias(type_="azure-nextgen:storagecache/v20191101:StorageTarget"), pulumi.Alias(type_="azure-native:storagecache/v20201001:StorageTarget"), pulumi.Alias(type_="azure-nextgen:storagecache/v20201001:StorageTarget"), pulumi.Alias(type_="azure-native:storagecache/v20210301:StorageTarget"), pulumi.Alias(type_="azure-nextgen:storagecache/v20210301:StorageTarget")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(StorageTarget, __self__).__init__(
'azure-native:storagecache/v20200301:StorageTarget',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'StorageTarget':
"""
Get an existing StorageTarget resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = StorageTargetArgs.__new__(StorageTargetArgs)
__props__.__dict__["clfs"] = None
__props__.__dict__["junctions"] = None
__props__.__dict__["location"] = None
__props__.__dict__["name"] = None
__props__.__dict__["nfs3"] = None
__props__.__dict__["provisioning_state"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["target_type"] = None
__props__.__dict__["type"] = None
__props__.__dict__["unknown"] = None
return StorageTarget(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def clfs(self) -> pulumi.Output[Optional['outputs.ClfsTargetResponse']]:
"""
Properties when targetType is clfs.
"""
return pulumi.get(self, "clfs")
@property
@pulumi.getter
def junctions(self) -> pulumi.Output[Optional[Sequence['outputs.NamespaceJunctionResponse']]]:
"""
List of Cache namespace junctions to target for namespace associations.
"""
return pulumi.get(self, "junctions")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Region name string.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Name of the Storage Target.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def nfs3(self) -> pulumi.Output[Optional['outputs.Nfs3TargetResponse']]:
"""
Properties when targetType is nfs3.
"""
return pulumi.get(self, "nfs3")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> pulumi.Output[Optional[str]]:
"""
ARM provisioning state, see https://github.com/Azure/azure-resource-manager-rpc/blob/master/v1.0/Addendum.md#provisioningstate-property
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:
"""
The system meta data relating to this resource.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter(name="targetType")
def target_type(self) -> pulumi.Output[str]:
"""
Type of the Storage Target.
"""
return pulumi.get(self, "target_type")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Type of the Storage Target; Microsoft.StorageCache/Cache/StorageTarget
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def unknown(self) -> pulumi.Output[Optional['outputs.UnknownTargetResponse']]:
"""
Properties when targetType is unknown.
"""
return pulumi.get(self, "unknown")
| [
"[email protected]"
] | |
724d42efc164f460aadf422bde792465cd5a9eb8 | d4129d743b958e6ed71af445c0dd7baa7f2ad6e4 | /teambeat/admin.py | 3be24de4fad0c0b5aac5759d5cef320a24ead2c2 | [] | no_license | astromitts/team-beat | f2077bdeaa457bb8cd11094f14a75bdf170a9b0e | a49608890e4fe2b238cbec9c0e3d9629aae51c55 | refs/heads/main | 2023-08-10T16:11:14.231042 | 2020-12-09T14:20:04 | 2020-12-09T14:20:04 | 319,043,973 | 0 | 0 | null | 2021-09-22T19:42:46 | 2020-12-06T13:48:36 | Python | UTF-8 | Python | false | false | 439 | py | from django.contrib import admin
from teambeat.models import (
Organization,
OrganizationInvitation,
OrganizationUser,
Team,
TeamAdmin,
TeamMember,
TeamMemberStatus
)
admin.site.register(Organization)
admin.site.register(OrganizationUser)
admin.site.register(OrganizationInvitation)
admin.site.register(Team)
admin.site.register(TeamAdmin)
admin.site.register(TeamMember)
admin.site.register(TeamMemberStatus)
| [
"[email protected]"
] | |
f4f16e0aed94288f25b1a7cbcc39162959543704 | d554b1aa8b70fddf81da8988b4aaa43788fede88 | /5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/223/users/4194/codes/1644_2711.py | 77e78d3042d279f2c36b818186c04f26f7708ed1 | [] | no_license | JosephLevinthal/Research-projects | a3bc3ca3b09faad16f5cce5949a2279cf14742ba | 60d5fd6eb864a5181f4321e7a992812f3c2139f9 | refs/heads/master | 2022-07-31T06:43:02.686109 | 2020-05-23T00:24:26 | 2020-05-23T00:24:26 | 266,199,309 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 343 | py | valor = float(input("Qual o seu valor disponivel?"))
tickets = int(input("Quantos tickets deseja comprar?"))
vt = float(input("Quanto custa um ticket?"))
passes = int(input("Quantos passes de onibus deseja comprar?"))
vp = float(input("Valor dos passes"))
if(valor >= tickets*vt + passes*vp):
print("SUFICIENTE")
else:
print("INSUFICIENTE") | [
"[email protected]"
] | |
e74457c0c58813616881e9d64c3a8320e2e88c3e | d8b1362113e4f3302ab1d04d5f57c1b4c8c44b6a | /leetcode_py2/Medium 73. Set Matrix Zeroes.py | 18cf03f8dbfd6347ba627f0749d6d0ea7e565484 | [] | no_license | mohki890/Danny-Huang | 06d12b2e1ac110684cbf114db46079cda5a380eb | 3eb27b793b5a819f3fb5e166b02e04d593f1bf37 | refs/heads/master | 2020-05-16T00:22:11.602739 | 2019-03-29T15:00:14 | 2019-03-29T15:00:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,596 | py | # -*- coding: utf-8 -*-
# !/usr/bin/env python
"""
This module is provided by
Authors: hxk11111
Date: 2019/1/5
File: Medium 73. Set Matrix Zeroes.py
"""
'''
Given a m x n matrix, if an element is 0, set its entire row and column to 0. Do it in-place.
Example 1:
Input:
[
[1,1,1],
[1,0,1],
[1,1,1]
]
Output:
[
[1,0,1],
[0,0,0],
[1,0,1]
]
Example 2:
Input:
[
[0,1,2,0],
[3,4,5,2],
[1,3,1,5]
]
Output:
[
[0,0,0,0],
[0,4,5,0],
[0,3,1,0]
]
Follow up:
A straight forward solution using O(mn) space is probably a bad idea.
A simple improvement uses O(m + n) space, but still not the best solution.
Could you devise a constant space solution?
'''
class Solution(object):
def setZeroes(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: void Do not return anything, modify matrix in-place instead.
"""
row = len(matrix)
col = len(matrix[0])
for r in range(row):
for c in range(col):
if matrix[r][c] == 0:
for i in range(row):
if matrix[i][c] != 0:
matrix[i][c] = "."
for j in range(col):
if matrix[r][j] != 0:
matrix[r][j] = "."
for r in range(row):
for c in range(col):
if matrix[r][c] == ".":
matrix[r][c] = 0
if __name__ == '__main__':
s = Solution()
l = [
[0, 1, 2, 0],
[3, 4, 5, 2],
[1, 3, 1, 5]
]
s.setZeroes(l)
print l
| [
"[email protected]"
] | |
a395b580244b142aed88453bc740a4d78ac26421 | 074e2815a0c3dbb03cae346560c27e409a4444e4 | /drivers/ssd1351/ssd1351_16bit.py | 9d2c75c185149003b96bc9cac270e8e172672775 | [
"MIT"
] | permissive | maysrp/micropython-nano-gui | 9127e1cbb024810ac2920c8227a448d6cf4678b1 | 5dbcc65828106cfb15c544bb86cc7380a9a83c47 | refs/heads/master | 2023-01-19T18:06:52.286059 | 2020-11-29T10:26:11 | 2020-11-29T10:26:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,266 | py | # SSD1351_16bit.py MicroPython driver for Adafruit color OLED displays.
# Adafruit 1.5" 128*128 OLED display: https://www.adafruit.com/product/1431
# Adafruit 1.27" 128*96 display https://www.adafruit.com/product/1673
# For wiring details see drivers/ADAFRUIT.md in this repo.
# This driver is based on the Adafruit C++ library for Arduino
# https://github.com/adafruit/Adafruit-SSD1351-library.git
# The MIT License (MIT)
# Copyright (c) 2019 Peter Hinch
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import framebuf
import utime
import gc
import micropython
from uctypes import addressof
import sys
# https://github.com/peterhinch/micropython-nano-gui/issues/2
# The ESP32 does not work reliably in SPI mode 1,1. Waveforms look correct.
# Keep 0,0 on STM as testing was done in that mode.
_bs = 0 if sys.platform == 'esp32' else 1 # SPI bus state
# Initialisation commands in cmd_init:
# 0xfd, 0x12, 0xfd, 0xb1, # Unlock command mode
# 0xae, # display off (sleep mode)
# 0xb3, 0xf1, # clock div
# 0xca, 0x7f, # mux ratio
# 0xa0, 0x74, # setremap 0x74
# 0x15, 0, 0x7f, # setcolumn
# 0x75, 0, 0x7f, # setrow
# 0xa1, 0, # set display start line
# 0xa2, 0, # displayoffset
# 0xb5, 0, # setgpio
# 0xab, 1, # functionselect: serial interface, internal Vdd regulator
# 0xb1, 0x32, # Precharge
# 0xbe, 0x05, # vcommh
# 0xa6, # normaldisplay
# 0xc1, 0xc8, 0x80, 0xc8, # contrast abc
# 0xc7, 0x0f, # Master contrast
# 0xb4, 0xa0, 0xb5, 0x55, # set vsl (see datasheet re ext circuit)
# 0xb6, 1, # Precharge 2
# 0xaf, # Display on
# SPI baudrate: Pyboard can produce 10.5MHz or 21MHz. Datasheet gives max of 20MHz.
# Attempt to use 21MHz failed but might work on a PCB or with very short leads.
class SSD1351(framebuf.FrameBuffer):
# Convert r, g, b in range 0-255 to a 16 bit colour value RGB565
# acceptable to hardware: rrrrrggggggbbbbb
@staticmethod
def rgb(r, g, b):
return ((r & 0xf8) << 5) | ((g & 0x1c) << 11) | (b & 0xf8) | ((g & 0xe0) >> 5)
def __init__(self, spi, pincs, pindc, pinrs, height=128, width=128):
if height not in (96, 128):
raise ValueError('Unsupported height {}'.format(height))
self.spi = spi
self.rate = 11000000 # See baudrate note above.
self.pincs = pincs
self.pindc = pindc # 1 = data 0 = cmd
self.height = height # Required by Writer class
self.width = width
# Save color mode for use by writer_gui (blit)
self.mode = framebuf.RGB565
gc.collect()
self.buffer = bytearray(self.height * self.width * 2)
super().__init__(self.buffer, self.width, self.height, self.mode)
self.mvb = memoryview(self.buffer)
pinrs(0) # Pulse the reset line
utime.sleep_ms(1)
pinrs(1)
utime.sleep_ms(1)
# See above comment to explain this allocation-saving gibberish.
self._write(b'\xfd\x12\xfd\xb1\xae\xb3\xf1\xca\x7f\xa0\x74'\
b'\x15\x00\x7f\x75\x00\x7f\xa1\x00\xa2\x00\xb5\x00\xab\x01'\
b'\xb1\x32\xbe\x05\xa6\xc1\xc8\x80\xc8\xc7\x0f'\
b'\xb4\xa0\xb5\x55\xb6\x01\xaf', 0)
self.show()
gc.collect()
def _write(self, mv, dc):
self.spi.init(baudrate=self.rate, polarity=_bs, phase=_bs)
self.pincs(1)
self.pindc(dc)
self.pincs(0)
self.spi.write(bytes(mv))
self.pincs(1)
# Write lines from the framebuf out of order to match the mapping of the
# SSD1351 RAM to the OLED device.
def show(self):
mvb = self.mvb
bw = self.width * 2 # Width in bytes
self._write(b'\x5c', 0) # Enable data write
if self.height == 128:
for l in range(128):
l0 = (95 - l) % 128 # 95 94 .. 1 0 127 126 .. 96
start = l0 * self.width * 2
self._write(mvb[start : start + bw], 1) # Send a line
else:
for l in range(128):
if l < 64:
start = (63 -l) * self.width * 2 # 63 62 .. 1 0
elif l < 96:
start = 0
else:
start = (191 - l) * self.width * 2 # 127 126 .. 95
self._write(mvb[start : start + bw], 1) # Send a line
| [
"[email protected]"
] | |
6c459b34f8f0ab75e9f04a8db8d170bea67c1736 | 39e647e9ec8524a1cee90ef15f37a3d3bbf8ac43 | /poet/trunk/pythonLibs/Imaging-1.1.7/Scripts/pilconvert.py | 71b1fe3830cf45b22540f16176b2ca141291253c | [
"LicenseRef-scancode-secret-labs-2011"
] | permissive | AgileAdaptiveTools/POETTools | 85158f043e73b430c1d19a172b75e028a15c2018 | 60244865dd850a3e7346f9c6c3daf74ea1b02448 | refs/heads/master | 2021-01-18T14:46:08.025574 | 2013-01-28T19:18:11 | 2013-01-28T19:18:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,366 | py | #! /usr/local/bin/python
#
# The Python Imaging Library.
# $Id$
#
# convert image files
#
# History:
# 0.1 96-04-20 fl Created
# 0.2 96-10-04 fl Use draft mode when converting images
# 0.3 96-12-30 fl Optimize output (PNG, JPEG)
# 0.4 97-01-18 fl Made optimize an option (PNG, JPEG)
# 0.5 98-12-30 fl Fixed -f option (from Anthony Baxter)
#
import site
import getopt, string, sys
from PIL import Image
def usage():
print "PIL Convert 0.5/1998-12-30 -- convert image files"
print "Usage: pilconvert [option] infile outfile"
print
print "Options:"
print
print " -c <format> convert to format (default is given by extension)"
print
print " -g convert to greyscale"
print " -p convert to palette image (using standard palette)"
print " -r convert to rgb"
print
print " -o optimize output (trade speed for size)"
print " -q <value> set compression quality (0-100, JPEG only)"
print
print " -f list supported file formats"
sys.exit(1)
if len(sys.argv) == 1:
usage()
try:
opt, argv = getopt.getopt(sys.argv[1:], "c:dfgopq:r")
except getopt.error, v:
print v
sys.exit(1)
format = None
convert = None
options = { }
for o, a in opt:
if o == "-f":
Image.init()
id = Image.ID[:]
id.sort()
print "Supported formats (* indicates output format):"
for i in id:
if Image.SAVE.has_key(i):
print i+"*",
else:
print i,
sys.exit(1)
elif o == "-c":
format = a
if o == "-g":
convert = "L"
elif o == "-p":
convert = "P"
elif o == "-r":
convert = "RGB"
elif o == "-o":
options["optimize"] = 1
elif o == "-q":
options["quality"] = string.atoi(a)
if len(argv) != 2:
usage()
try:
im = Image.open(argv[0])
if convert and im.mode != convert:
im.draft(convert, im.size)
im = im.convert(convert)
if format:
apply(im.save, (argv[1], format), options)
else:
apply(im.save, (argv[1],), options)
except:
print "cannot convert image",
print "(%s:%s)" % (sys.exc_type, sys.exc_value)
| [
"[email protected]"
] | |
22ee1291f7c8806b76361ad8a451f24e1c1d6079 | e573161a9d4fc74ef4debdd9cfd8956bdd1d0416 | /src/products/models/order.py | 4c07332ebf2b2204eeff6db64f51f087e3f38f77 | [] | no_license | tanjibpa/rx-verify | a11c471afc628524bf95103711102258e6e04c19 | 3947fd2f9a640b422014d1857b9377e42d8961a5 | refs/heads/main | 2023-02-23T08:16:43.910998 | 2021-01-23T07:07:43 | 2021-01-23T07:07:43 | 331,103,311 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 526 | py | from django.db import models
from base.models import BaseModel
from .raw_material import RawMaterial
from .product import Product
class Order(BaseModel):
raw_materials = models.ManyToManyField(RawMaterial)
product = models.ForeignKey(Product, on_delete=models.CASCADE)
quantity = models.IntegerField(default=1)
approved = models.BooleanField(default=False)
class Meta:
db_table = "orders"
verbose_name_plural = "Orders"
verbose_name = "Order"
ordering = ["-updated_at"]
| [
"[email protected]"
] | |
35418b217181a0478eb1d238b23420bc0c421bbf | 5154dbf4eee6ea6499957bd1e6b6860abcb3d85a | /Face-Recognition/recognize_faces_image.py | b53c526e33bc593d8f67f4668ff14dcfb3734ed7 | [] | no_license | sayands/opencv-implementations | 876c345a6842335d70b2d9b27e746da5a6fd938f | c8f0c7b9dca5e6d874b863bd70e4ec3898f6e7d5 | refs/heads/master | 2020-03-19T11:51:54.513186 | 2018-12-22T05:48:48 | 2018-12-22T05:48:48 | 136,481,409 | 2 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,537 | py | # import the necessary packages
import face_recognition
import argparse
import pickle
import cv2
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-e", "--encodings", required = True, help ="Path to serialized db of facial encodings")
ap.add_argument("-i", "--image", required = True, help = "path to input image")
ap.add_argument("-d", "--detection-method", type = str, default = "hog", help ="face detection model to use either hog or cnn")
args = vars(ap.parse_args())
# load the known faces and embedings
print("[INFO] loading encodings...")
data = pickle.loads(open(args["encodings"], "rb").read())
# load the input image and convert it from BGR to RGB
image = cv2.imread(args["image"])
rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# detect the (x, y) coordinates of the bounding boxes corresponding
# to each face in the input image, then compute the facial
# embeddings for each face
print("[INFO] recognizing faces...")
boxes = face_recognition.face_locations(rgb, model = args["detection_method"])
encodings = face_recognition.face_encodings(rgb, boxes)
# initialize the list of names for each face detected
names = []
# loop over the facial embeddings
for encoding in encodings:
# attempt to match each face in the input image to our known encodings
matches = face_recognition.compare_faces(data["encodings"], encoding)
name = "Unknown"
# check to see if we have found a match
if True in matches:
# find the indexes of all matched faces then initialise a
# dictionary to count the total no.of times each face was
# matched
matchedIdxs = [i for (i,b) in enumerate(matches) if b]
counts = {}
# loop over the matched indexes and maintain a count for each
# recognised face
for i in matchedIdxs:
name = data["names"][i]
counts[name] = counts.get(name, 0) + 1
# determine the recognized face with the largest number of votes
name = max(counts, key = counts.get)
# update the list of names
names.append(name)
# loop over the recognized faces
for ((top, right, bottom, left), name) in zip(boxes, names):
# draw the predicted face name on the image
cv2.rectangle(image, (left, top), (right, bottom), (0, 255, 0), 2 )
y = top - 15 if top - 15 > 15 else top + 15
cv2.putText(image, name, (left, y), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 255, 0), 2)
# show the output image
cv2.imshow("Image", image)
cv2.waitKey(0) | [
"[email protected]"
] | |
e1c8620a560ca4069bcc80749ec00da1a6b6bace | 87277cddfc489dd7d3837ffccda2f11bb4ad43cc | /py/Task198.py | 5b0fbec9bf247b0e9b9e05389fafae6b23ba0644 | [] | no_license | rain-zhao/leetcode | 22c01d1803af7dd66164a204e6dc718e6bab6f0e | 8d47147f1c78896d7021aede767b5c659cd47035 | refs/heads/master | 2022-05-29T10:54:14.709070 | 2022-05-14T09:38:05 | 2022-05-14T09:38:05 | 242,631,842 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,417 | py | class Solution:
def rob(self, nums: [int]) -> int:
if not nums:
return 0
if len(nums) == 1:
return nums[0]
# init
dp = [[0, 0] for _ in range(len(nums))]
dp[0] = [0, nums[0]]
for i, num in enumerate(nums[1:], 1):
dp[i][0] = max(dp[i-1][0], dp[i-1][1])
dp[i][1] = dp[i-1][0]+num
return max(dp[-1][0], dp[-1][1])
def rob2(self, nums: [int]) -> int:
if not nums:
return 0
if len(nums) == 1:
return nums[0]
# init
dp = [0] * len(nums)
dp[0] = nums[0]
dp[1] = max(nums[0], nums[1])
for i, num in enumerate(nums[2:], 2):
dp[i] = max(dp[i-2]+num, dp[i-1])
return dp[-1]
def rob3(self, nums: [int]) -> int:
if not nums:
return 0
if len(nums) == 1:
return nums[0]
# init
do = nums[0]
nodo = 0
# loop
for num in nums[1:]:
do, nodo = nodo + num, max(do, nodo)
return max(do, nodo)
def rob4(self, nums: [int]) -> int:
if not nums:
return 0
if len(nums) == 1:
return nums[0]
# init
fst = nums[0]
sed = max(nums[0], nums[1])
# loop
for num in nums[2:]:
fst, sed = sed, max(fst + num, sed)
return sed
| [
"[email protected]"
] | |
328ae953d1241d177cd632306190037e5ea3a1da | 1078c61f2c6d9fe220117d4c0fbbb09f1a67f84c | /paws/lib/python2.7/site-packages/euca2ools-3.4.1_2_g6b3f62f2-py2.7.egg/EGG-INFO/scripts/euare-servercertlistbypath | 5273dd6177c560a319e8eaa8198e7b0d60df4f5c | [
"MIT"
] | permissive | cirobessa/receitas-aws | c21cc5aa95f3e8befb95e49028bf3ffab666015c | b4f496050f951c6ae0c5fa12e132c39315deb493 | refs/heads/master | 2021-05-18T06:50:34.798771 | 2020-03-31T02:59:47 | 2020-03-31T02:59:47 | 251,164,945 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 235 | #!/media/ciro/LOCALDRV/A_DESENVOLVIMENTO/AWS/receitas/paws/bin/python -tt
import euca2ools.commands.iam.listservercertificates
if __name__ == '__main__':
euca2ools.commands.iam.listservercertificates.ListServerCertificates.run()
| [
"[email protected]"
] | ||
581477f217c0de64acb027fb57b07564e4b2d1eb | 0df898bf192b6ad388af160ecbf6609445c34f96 | /middleware/backend/app/alembic/versions/20201021_001556_.py | ad0a9945ac1de56f57047d383df661ef635fc9e4 | [] | no_license | sasano8/magnet | a5247e6eb0a7153d6bbca54296f61194925ab3dc | 65191c877f41c632d29133ebe4132a0bd459f752 | refs/heads/master | 2023-01-07T10:11:38.599085 | 2020-11-13T02:42:41 | 2020-11-13T02:42:41 | 298,334,432 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 666 | py | """empty message
Revision ID: 20201021_001556
Revises: 20201021_001530
Create Date: 2020-10-21 00:15:57.520730
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '20201021_001556'
down_revision = '20201021_001530'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_unique_constraint(None, 'trade_job', ['name'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'trade_job', type_='unique')
# ### end Alembic commands ###
| [
"[email protected]"
] | |
e207e8460215041552640cba1dd67c59d79db97c | 956fd28ea7a7ec83b62cd85691c512e735e60b3a | /bin/azure/mgmt/eventgrid/operations/__init__.py | e7dcca6122851fa842b7b3ecff4c908c0301d34f | [
"MIT"
] | permissive | zdmc23/bash-lambda-layer | 5517a27809d33801c65504c11f867d0d511b2e1c | e762df0189cfb894dab2d96bae1655b8857d5efb | refs/heads/master | 2021-01-05T02:32:20.765963 | 2020-02-16T09:41:47 | 2020-02-16T09:41:47 | 240,846,840 | 0 | 0 | MIT | 2020-02-16T06:59:55 | 2020-02-16T06:59:54 | null | UTF-8 | Python | false | false | 1,000 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .domains_operations import DomainsOperations
from .domain_topics_operations import DomainTopicsOperations
from .event_subscriptions_operations import EventSubscriptionsOperations
from .operations import Operations
from .topics_operations import TopicsOperations
from .topic_types_operations import TopicTypesOperations
__all__ = [
'DomainsOperations',
'DomainTopicsOperations',
'EventSubscriptionsOperations',
'Operations',
'TopicsOperations',
'TopicTypesOperations',
]
| [
"[email protected]"
] | |
0e3e892d28c69731125eab1f400dfb5cdf382315 | e0b5a869c687fea3c9dda138734d25b3c5e68b88 | /9. Decorators/9. 2 Exercises/Problem 1- Logged.py | 22f6e5741de525740b650eedb601357d66b590f8 | [] | no_license | asen-krasimirov/Python-OOP-Course | b74de5f83fb3e287cb206d48c3db79d15657c902 | c6df3830168d8b8d780d4fb4ccfe67d1bb350f7e | refs/heads/main | 2023-02-01T04:09:33.796334 | 2020-12-15T14:56:59 | 2020-12-15T14:56:59 | 309,389,119 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 443 | py |
def logged(func):
def wrapper(*args, **kwargs):
func_name = func.__name__
parameters = args+tuple(kwargs.keys())
result = func(*parameters)
information = f"you called {func_name}{parameters}\nit returned {result}"
return information
return wrapper
@logged
def func(*args):
return 3 + len(args)
@logged
def sum_func(a, b):
return a + b
print(func(4, 4, 4))
print(sum_func(1, 4))
| [
"[email protected]"
] | |
edacf9cfce5b069972303074f24fded09f89fe81 | 19c2f173d3d5384710b9271853fc3e1e60a054e2 | /env/bin/pip | 28e2a8a511905191e3cd014074c7318b9333df4b | [] | no_license | rahonur/hellosisters | 8cd1332ccb7a347c00383bb643bd14a27f781b9f | 94da959d4be276e280ca5e1049a1c566523c9d60 | refs/heads/main | 2023-01-24T02:57:31.518942 | 2020-11-30T22:53:59 | 2020-11-30T22:53:59 | 314,942,537 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 265 | #!/home/vagrant/src/hello-sisters-master/env/bin/python3.6
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"[email protected]"
] | ||
e2fe714bb3d00b6ae53efd2431b00c1a3ed70786 | b56ca08eb67163d3ccb02ff0775f59a2d971d910 | /backend/moderation/migrations/0009_merge_20191003_0725.py | f5847052ee467c5c2157709054ed2f2005b4a4ae | [] | no_license | globax89/dating-work | f23d07f98dcb5efad62a1c91cdb04b1a8ef021f7 | bb3d09c4e2f48ecd3d73e664ab8e3982fc97b534 | refs/heads/master | 2022-12-11T22:45:19.360096 | 2019-10-16T07:01:40 | 2019-10-16T07:01:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 279 | py | # Generated by Django 2.2.4 on 2019-10-03 07:25
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('moderation', '0008_auto_20190930_2116'),
('moderation', '0008_auto_20190930_0958'),
]
operations = [
]
| [
"[email protected]"
] | |
915597ed60c80192a3ea7c652ca9ce6cd0a4d85d | 9fcc0fc4e8fe9fd2618ad9506594811685cbe065 | /lt_cmu.py | 73895d53c24cfe4949402d2e2e037a74250b57ba | [] | no_license | cheezebone/Timit_Phoneme_Recognition | 36a1617a02449184c1cc6f5f4b91c8f30bf1b20f | b7fc8318d160828d03371fee3424ca494387a102 | refs/heads/master | 2023-01-19T02:17:11.012204 | 2020-11-23T12:33:06 | 2020-11-23T12:33:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 466 | py | from persephone import corpus
from persephone import corpus_reader
from persephone import rnn_ctc
lt_corpus = corpus.Corpus("fbank_and_pitch", "phonemes", "lt")
lt_corpus = corpus_reader.CorpusReader(lt_corpus, batch_size=32)
model = rnn_ctc.Model("exp_cmu/", lt_corpus, num_layers=3, hidden_size=256)
# model.train(max_epochs=50)
model.eval(restore_model_path="exp_cmu/model/model_best.ckpt")
# model.transcribe(restore_model_path="exp_cmu/model/model_best.ckpt")
| [
"[email protected]"
] | |
dbffe0154bc07e3e8416502de5c23ab330d083b1 | 7b60c00eb1a45fb8fb58aefaf786a5c29700ed7e | /payment_gateways/migrations/0010_auto_20180828_1359.py | 8d6ce36b12732ee6fe2536c64631d11c09c2feff | [] | no_license | kshutashvili/mfo | 521b126146b7582ca6b56bc6bb605f4aee79dfc2 | 663662dd58ee0faab667d5e9bb463301342cb21a | refs/heads/master | 2022-12-15T20:00:44.759395 | 2019-06-23T17:06:13 | 2019-06-23T17:06:13 | 203,863,751 | 0 | 0 | null | 2022-11-22T02:24:17 | 2019-08-22T19:56:43 | Python | UTF-8 | Python | false | false | 992 | py | # Generated by Django 2.0.2 on 2018-08-28 10:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('payment_gateways', '0009_auto_20180828_1259'),
]
operations = [
migrations.AlterModelOptions(
name='city24payment',
options={'verbose_name': "Транзакция Банк 'Фамільний'", 'verbose_name_plural': "Транзакции Банк 'Фамільний'"},
),
migrations.AlterField(
model_name='city24payment',
name='order_id',
field=models.BigIntegerField(verbose_name="Уникальный идентификатор транзакции Банк 'Фамільний'"),
),
migrations.AlterField(
model_name='city24payment',
name='service_id',
field=models.IntegerField(verbose_name="Номер EF в системе Банк 'Фамільний'"),
),
]
| [
"[email protected]"
] | |
04f7bc385f1b8ac6d86d0e075bc5edae8a51d202 | bf99b1b14e9ca1ad40645a7423f23ef32f4a62e6 | /AtCoder/abc/158a_2.py | 048e2b8a5c7f0c3a977f8d7b8756d1db030f8749 | [] | no_license | y-oksaku/Competitive-Programming | 3f9c1953956d1d1dfbf46d5a87b56550ff3ab3db | a3ff52f538329bed034d3008e051f30442aaadae | refs/heads/master | 2021-06-11T16:14:12.635947 | 2021-05-04T08:18:35 | 2021-05-04T08:18:35 | 188,639,647 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 62 | py | S = set(list(input()))
print('Yes' if len(S) == 2 else 'No')
| [
"[email protected]"
] | |
840164d5a14a0caf4c19930e425304128403178c | 41ede4fd3bfba1bff0166bca7aee80dcf21434c6 | /suvari/gtk2chain/reverses2/libgsf/actions.py | a7de56c2638edd3d8d8dcf8cb916ee27fd666874 | [] | no_license | pisilinux/playground | a7db4b42559a21cc72fd4c8649e0231ab6a3eb3c | e4e12fff8a847ba210befc8db7e2af8556c3adf7 | refs/heads/master | 2022-08-12T23:03:27.609506 | 2022-08-11T18:28:19 | 2022-08-11T18:28:19 | 8,429,459 | 16 | 22 | null | 2022-08-11T18:28:20 | 2013-02-26T09:37:11 | Python | UTF-8 | Python | false | false | 678 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Licensed under the GNU General Public License, version 3.
# See the file http://www.gnu.org/licenses/gpl.txt
from pisi.actionsapi import get
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
def setup():
autotools.configure("--disable-static \
--enable-introspection \
--with-pic")
pisitools.dosed("libtool"," -shared ", " -Wl,--as-needed -shared ")
def build():
autotools.make()
def install():
autotools.rawInstall("DESTDIR=%s" % get.installDIR())
pisitools.dodoc("AUTHORS", "BUGS", "COPY*", "HACKING", "NEWS", "README", "TODO")
| [
"[email protected]"
] | |
253e032a7578b0693751dc6c07b7ec26f2937f27 | 3fe96227e910eb3ef13b80ded266da924d32cb86 | /pump/main/models.py | 26fe322b093b7cff7fe553bf16ab51637c5d9dc2 | [] | no_license | akingon/pump | b974054c54d12b8efa644a161e3ab9acb9b82601 | 6ce07a9b3faf7e1ed97062f854f57624a896e8a7 | refs/heads/master | 2021-01-18T06:32:25.974631 | 2015-05-01T14:27:22 | 2015-05-01T14:27:22 | 31,909,188 | 0 | 0 | null | 2015-03-09T16:18:46 | 2015-03-09T16:18:46 | null | UTF-8 | Python | false | false | 3,054 | py | from django.db import models
from django.template import Context
from django.template.loader import get_template
from .criteria import Houghton, ABC, PassFail
class Response(models.Model):
added = models.DateTimeField(auto_now_add=True)
# Houghton
q1 = models.TextField(blank=True, default="")
q2 = models.TextField(blank=True, default="")
q3 = models.TextField(blank=True, default="")
q4 = models.TextField(blank=True, default="")
q5 = models.TextField(blank=True, default="")
q6 = models.TextField(blank=True, default="")
# ABC
q7 = models.TextField(blank=True, default="")
q8 = models.TextField(blank=True, default="")
q9 = models.TextField(blank=True, default="")
q10 = models.TextField(blank=True, default="")
q11 = models.TextField(blank=True, default="")
q12 = models.TextField(blank=True, default="")
q13 = models.TextField(blank=True, default="")
q14 = models.TextField(blank=True, default="")
q15 = models.TextField(blank=True, default="")
q16 = models.TextField(blank=True, default="")
q17 = models.TextField(blank=True, default="")
q18 = models.TextField(blank=True, default="")
q19 = models.TextField(blank=True, default="")
q20 = models.TextField(blank=True, default="")
q21 = models.TextField(blank=True, default="")
q22 = models.TextField(blank=True, default="")
# pick up
q23 = models.TextField(blank=True, default="")
# look behind
q24 = models.TextField(blank=True, default="")
def __unicode__(self):
return "Response %s" % self.added
def results(self):
s = Scorer(self)
return s.results()
def email_text(self):
""" body of email version """
t = get_template("main/response_email.txt")
c = Context(dict(object=self))
return t.render(c)
class Scorer(object):
def __init__(self, r):
h_values = [r.q1, r.q2, r.q3, r.q4, r.q5, r.q6]
h_values = [int(v) for v in h_values if v != ""]
self.h = Houghton(h_values)
a_values = [
r.q7, r.q8, r.q9, r.q10, r.q11, r.q12,
r.q13, r.q14, r.q15, r.q16, r.q17, r.q18,
r.q19, r.q20, r.q21, r.q22,
]
a_values = [int(v) for v in a_values if v != ""]
self.abc = ABC(a_values)
self.pick_up = PassFail(int(r.q23 or '1'))
self.look_behind = PassFail(int(r.q24 or '1'))
def number_passed(self):
return (
self.h.pass_fail() +
self.abc.pass_fail() +
self.pick_up.pass_fail() +
self.look_behind.pass_fail())
def percentage_likelihood(self):
percents = ['95.6', '93.9', '92.1', '81.4', '59.3']
return percents[self.number_passed()]
def results(self):
return dict(
houghton=self.h,
abc=self.abc,
pick_up=self.pick_up,
look_behind=self.look_behind,
number_passed=self.number_passed(),
percentage_likelihood=self.percentage_likelihood(),
)
| [
"[email protected]"
] | |
7879ca81348c624f31155cb54edc381e6820a388 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02602/s085786581.py | 99dcc1901822553bf70ab6c5531a4412172e6f36 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 147 | py | n, k = map(int, input().split())
A = list(map(int, input().split()))
for i in range(k, n):
if A[i-k] < A[i]: print('Yes')
else: print('No') | [
"[email protected]"
] | |
b4e1ebbc795815ff08d8939927b0ced162c3d4cc | ef46adcd3bed245807f169db788f4bb183b87f51 | /sklearn/preprocessing/label.py | 6afaae035444606946018cb807257351a880070b | [] | no_license | auroua/aurora_detection | b35cddee8c77dafc70ee3869e51df8cc9a742d2d | ad4f940dccf9280a6efc9e2528285ec76042d6e6 | refs/heads/master | 2021-01-10T13:20:56.933107 | 2016-01-04T14:23:48 | 2016-01-04T14:23:48 | 44,159,924 | 4 | 4 | null | null | null | null | UTF-8 | Python | false | false | 26,702 | py | # Authors: Alexandre Gramfort <[email protected]>
# Mathieu Blondel <[email protected]>
# Olivier Grisel <[email protected]>
# Andreas Mueller <[email protected]>
# Joel Nothman <[email protected]>
# Hamzeh Alsalhi <[email protected]>
# License: BSD 3 clause
from collections import defaultdict
import itertools
import array
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..utils.fixes import np_version
from ..utils.fixes import sparse_min_max
from ..utils.fixes import astype
from ..utils.fixes import in1d
from ..utils import column_or_1d
from ..utils.validation import check_array
from ..utils.validation import check_is_fitted
from ..utils.validation import _num_samples
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
from ..externals import six
zip = six.moves.zip
map = six.moves.map
__all__ = [
'label_binarize',
'LabelBinarizer',
'LabelEncoder',
'MultiLabelBinarizer',
]
def _check_numpy_unicode_bug(labels):
"""Check that user is not subject to an old numpy bug
Fixed in master before 1.7.0:
https://github.com/numpy/numpy/pull/243
"""
if np_version[:3] < (1, 7, 0) and labels.dtype.kind == 'U':
raise RuntimeError("NumPy < 1.7.0 does not implement searchsorted"
" on unicode data correctly. Please upgrade"
" NumPy to use LabelEncoder with unicode inputs.")
class LabelEncoder(BaseEstimator, TransformerMixin):
"""Encode labels with value between 0 and n_classes-1.
Read more in the :ref:`User Guide <preprocessing_targets>`.
Attributes
----------
classes_ : array of shape (n_class,)
Holds the label for each class.
Examples
--------
`LabelEncoder` can be used to normalize labels.
>>> from sklearn import preprocessing
>>> le = preprocessing.LabelEncoder()
>>> le.fit([1, 2, 2, 6])
LabelEncoder()
>>> le.classes_
array([1, 2, 6])
>>> le.transform([1, 1, 2, 6]) #doctest: +ELLIPSIS
array([0, 0, 1, 2]...)
>>> le.inverse_transform([0, 0, 1, 2])
array([1, 1, 2, 6])
It can also be used to transform non-numerical labels (as long as they are
hashable and comparable) to numerical labels.
>>> le = preprocessing.LabelEncoder()
>>> le.fit(["paris", "paris", "tokyo", "amsterdam"])
LabelEncoder()
>>> list(le.classes_)
['amsterdam', 'paris', 'tokyo']
>>> le.transform(["tokyo", "tokyo", "paris"]) #doctest: +ELLIPSIS
array([2, 2, 1]...)
>>> list(le.inverse_transform([2, 2, 1]))
['tokyo', 'tokyo', 'paris']
"""
def fit(self, y):
"""Fit label encoder
Parameters
----------
y : array-like of shape (n_samples,)
Target values.
Returns
-------
self : returns an instance of self.
"""
y = column_or_1d(y, warn=True)
_check_numpy_unicode_bug(y)
self.classes_ = np.unique(y)
return self
def fit_transform(self, y):
"""Fit label encoder and return encoded labels
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
y = column_or_1d(y, warn=True)
_check_numpy_unicode_bug(y)
self.classes_, y = np.unique(y, return_inverse=True)
return y
def transform(self, y):
"""Transform labels to normalized encoding.
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
check_is_fitted(self, 'classes_')
classes = np.unique(y)
_check_numpy_unicode_bug(classes)
if len(np.intersect1d(classes, self.classes_)) < len(classes):
diff = np.setdiff1d(classes, self.classes_)
raise ValueError("y contains new labels: %s" % str(diff))
return np.searchsorted(self.classes_, y)
def inverse_transform(self, y):
"""Transform labels back to original encoding.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
y : numpy array of shape [n_samples]
"""
check_is_fitted(self, 'classes_')
diff = np.setdiff1d(y, np.arange(len(self.classes_)))
if diff:
raise ValueError("y contains new labels: %s" % str(diff))
y = np.asarray(y)
return self.classes_[y]
class LabelBinarizer(BaseEstimator, TransformerMixin):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
At learning time, this simply consists in learning one regressor
or binary classifier per class. In doing so, one needs to convert
multi-class labels to binary labels (belong or does not belong
to the class). LabelBinarizer makes this process easy with the
transform method.
At prediction time, one assigns the class for which the corresponding
model gave the greatest confidence. LabelBinarizer makes this easy
with the inverse_transform method.
Read more in the :ref:`User Guide <preprocessing_targets>`.
Parameters
----------
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
sparse_output : boolean (default: False)
True if the returned array from transform is desired to be in sparse
CSR format.
Attributes
----------
classes_ : array of shape [n_class]
Holds the label for each class.
y_type_ : str,
Represents the type of the target data as evaluated by
utils.multiclass.type_of_target. Possible type are 'continuous',
'continuous-multioutput', 'binary', 'multiclass',
'mutliclass-multioutput', 'multilabel-indicator', and 'unknown'.
sparse_input_ : boolean,
True if the input data to transform is given as a sparse matrix, False
otherwise.
Examples
--------
>>> from sklearn import preprocessing
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit([1, 2, 6, 4, 2])
LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)
>>> lb.classes_
array([1, 2, 4, 6])
>>> lb.transform([1, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
Binary targets transform to a column vector
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit_transform(['yes', 'no', 'no', 'yes'])
array([[1],
[0],
[0],
[1]])
Passing a 2D matrix for multilabel classification
>>> import numpy as np
>>> lb.fit(np.array([[0, 1, 1], [1, 0, 0]]))
LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)
>>> lb.classes_
array([0, 1, 2])
>>> lb.transform([0, 1, 2, 1])
array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 1, 0]])
See also
--------
label_binarize : function to perform the transform operation of
LabelBinarizer with fixed classes.
"""
def __init__(self, neg_label=0, pos_label=1, sparse_output=False):
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if sparse_output and (pos_label == 0 or neg_label != 0):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
self.neg_label = neg_label
self.pos_label = pos_label
self.sparse_output = sparse_output
def fit(self, y):
"""Fit label binarizer
Parameters
----------
y : numpy array of shape (n_samples,) or (n_samples, n_classes)
Target values. The 2-d matrix should only contain 0 and 1,
represents multilabel classification.
Returns
-------
self : returns an instance of self.
"""
self.y_type_ = type_of_target(y)
if 'multioutput' in self.y_type_:
raise ValueError("Multioutput target data is not supported with "
"label binarization")
if _num_samples(y) == 0:
raise ValueError('y has 0 samples: %r' % y)
self.sparse_input_ = sp.issparse(y)
self.classes_ = unique_labels(y)
return self
def transform(self, y):
"""Transform multi-class labels to binary labels
The output of transform is sometimes referred to by some authors as the
1-of-K coding scheme.
Parameters
----------
y : numpy array or sparse matrix of shape (n_samples,) or
(n_samples, n_classes) Target values. The 2-d matrix should only
contain 0 and 1, represents multilabel classification. Sparse
matrix can be CSR, CSC, COO, DOK, or LIL.
Returns
-------
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
"""
check_is_fitted(self, 'classes_')
y_is_multilabel = type_of_target(y).startswith('multilabel')
if y_is_multilabel and not self.y_type_.startswith('multilabel'):
raise ValueError("The object was not fitted with multilabel"
" input.")
return label_binarize(y, self.classes_,
pos_label=self.pos_label,
neg_label=self.neg_label,
sparse_output=self.sparse_output)
def inverse_transform(self, Y, threshold=None):
"""Transform binary labels back to multi-class labels
Parameters
----------
Y : numpy array or sparse matrix with shape [n_samples, n_classes]
Target values. All sparse matrices are converted to CSR before
inverse transformation.
threshold : float or None
Threshold used in the binary and multi-label cases.
Use 0 when:
- Y contains the output of decision_function (classifier)
Use 0.5 when:
- Y contains the output of predict_proba
If None, the threshold is assumed to be half way between
neg_label and pos_label.
Returns
-------
y : numpy array or CSR matrix of shape [n_samples] Target values.
Notes
-----
In the case when the binary labels are fractional
(probabilistic), inverse_transform chooses the class with the
greatest value. Typically, this allows to use the output of a
linear model's decision_function method directly as the input
of inverse_transform.
"""
check_is_fitted(self, 'classes_')
if threshold is None:
threshold = (self.pos_label + self.neg_label) / 2.
if self.y_type_ == "multiclass":
y_inv = _inverse_binarize_multiclass(Y, self.classes_)
else:
y_inv = _inverse_binarize_thresholding(Y, self.y_type_,
self.classes_, threshold)
if self.sparse_input_:
y_inv = sp.csr_matrix(y_inv)
elif sp.issparse(y_inv):
y_inv = y_inv.toarray()
return y_inv
def label_binarize(y, classes, neg_label=0, pos_label=1, sparse_output=False):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
This function makes it possible to compute this transformation for a
fixed set of class labels known ahead of time.
Parameters
----------
y : array-like
Sequence of integer labels or multilabel data to encode.
classes : array-like of shape [n_classes]
Uniquely holds the label for each class.
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
sparse_output : boolean (default: False),
Set to true if output binary array is desired in CSR sparse format
Returns
-------
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
Examples
--------
>>> from sklearn.preprocessing import label_binarize
>>> label_binarize([1, 6], classes=[1, 2, 4, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
The class ordering is preserved:
>>> label_binarize([1, 6], classes=[1, 6, 4, 2])
array([[1, 0, 0, 0],
[0, 1, 0, 0]])
Binary targets transform to a column vector
>>> label_binarize(['yes', 'no', 'no', 'yes'], classes=['no', 'yes'])
array([[1],
[0],
[0],
[1]])
See also
--------
LabelBinarizer : class used to wrap the functionality of label_binarize and
allow for fitting to classes independently of the transform operation
"""
if not isinstance(y, list):
# XXX Workaround that will be removed when list of list format is
# dropped
y = check_array(y, accept_sparse='csr', ensure_2d=False, dtype=None)
else:
if _num_samples(y) == 0:
raise ValueError('y has 0 samples: %r' % y)
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if (sparse_output and (pos_label == 0 or neg_label != 0)):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
# To account for pos_label == 0 in the dense case
pos_switch = pos_label == 0
if pos_switch:
pos_label = -neg_label
y_type = type_of_target(y)
if 'multioutput' in y_type:
raise ValueError("Multioutput target data is not supported with label "
"binarization")
if y_type == 'unknown':
raise ValueError("The type of target data is not known")
n_samples = y.shape[0] if sp.issparse(y) else len(y)
n_classes = len(classes)
classes = np.asarray(classes)
if y_type == "binary":
if len(classes) == 1:
Y = np.zeros((len(y), 1), dtype=np.int)
Y += neg_label
return Y
elif len(classes) >= 3:
y_type = "multiclass"
sorted_class = np.sort(classes)
if (y_type == "multilabel-indicator" and classes.size != y.shape[1]):
raise ValueError("classes {0} missmatch with the labels {1}"
"found in the data".format(classes, unique_labels(y)))
if y_type in ("binary", "multiclass"):
y = column_or_1d(y)
# pick out the known labels from y
y_in_classes = in1d(y, classes)
y_seen = y[y_in_classes]
indices = np.searchsorted(sorted_class, y_seen)
indptr = np.hstack((0, np.cumsum(y_in_classes)))
data = np.empty_like(indices)
data.fill(pos_label)
Y = sp.csr_matrix((data, indices, indptr),
shape=(n_samples, n_classes))
elif y_type == "multilabel-indicator":
Y = sp.csr_matrix(y)
if pos_label != 1:
data = np.empty_like(Y.data)
data.fill(pos_label)
Y.data = data
else:
raise ValueError("%s target data is not supported with label "
"binarization" % y_type)
if not sparse_output:
Y = Y.toarray()
Y = astype(Y, int, copy=False)
if neg_label != 0:
Y[Y == 0] = neg_label
if pos_switch:
Y[Y == pos_label] = 0
else:
Y.data = astype(Y.data, int, copy=False)
# preserve label ordering
if np.any(classes != sorted_class):
indices = np.searchsorted(sorted_class, classes)
Y = Y[:, indices]
if y_type == "binary":
if sparse_output:
Y = Y.getcol(-1)
else:
Y = Y[:, -1].reshape((-1, 1))
return Y
def _inverse_binarize_multiclass(y, classes):
"""Inverse label binarization transformation for multiclass.
Multiclass uses the maximal score instead of a threshold.
"""
classes = np.asarray(classes)
if sp.issparse(y):
# Find the argmax for each row in y where y is a CSR matrix
y = y.tocsr()
n_samples, n_outputs = y.shape
outputs = np.arange(n_outputs)
row_max = sparse_min_max(y, 1)[1]
row_nnz = np.diff(y.indptr)
y_data_repeated_max = np.repeat(row_max, row_nnz)
# picks out all indices obtaining the maximum per row
y_i_all_argmax = np.flatnonzero(y_data_repeated_max == y.data)
# For corner case where last row has a max of 0
if row_max[-1] == 0:
y_i_all_argmax = np.append(y_i_all_argmax, [len(y.data)])
# Gets the index of the first argmax in each row from y_i_all_argmax
index_first_argmax = np.searchsorted(y_i_all_argmax, y.indptr[:-1])
# first argmax of each row
y_ind_ext = np.append(y.indices, [0])
y_i_argmax = y_ind_ext[y_i_all_argmax[index_first_argmax]]
# Handle rows of all 0
y_i_argmax[np.where(row_nnz == 0)[0]] = 0
# Handles rows with max of 0 that contain negative numbers
samples = np.arange(n_samples)[(row_nnz > 0) &
(row_max.ravel() == 0)]
for i in samples:
ind = y.indices[y.indptr[i]:y.indptr[i + 1]]
y_i_argmax[i] = classes[np.setdiff1d(outputs, ind)][0]
return classes[y_i_argmax]
else:
return classes.take(y.argmax(axis=1), mode="clip")
def _inverse_binarize_thresholding(y, output_type, classes, threshold):
"""Inverse label binarization transformation using thresholding."""
if output_type == "binary" and y.ndim == 2 and y.shape[1] > 2:
raise ValueError("output_type='binary', but y.shape = {0}".
format(y.shape))
if output_type != "binary" and y.shape[1] != len(classes):
raise ValueError("The number of class is not equal to the number of "
"dimension of y.")
classes = np.asarray(classes)
# Perform thresholding
if sp.issparse(y):
if threshold > 0:
if y.format not in ('csr', 'csc'):
y = y.tocsr()
y.data = np.array(y.data > threshold, dtype=np.int)
y.eliminate_zeros()
else:
y = np.array(y.toarray() > threshold, dtype=np.int)
else:
y = np.array(y > threshold, dtype=np.int)
# Inverse transform data
if output_type == "binary":
if sp.issparse(y):
y = y.toarray()
if y.ndim == 2 and y.shape[1] == 2:
return classes[y[:, 1]]
else:
if len(classes) == 1:
y = np.empty(len(y), dtype=classes.dtype)
y.fill(classes[0])
return y
else:
return classes[y.ravel()]
elif output_type == "multilabel-indicator":
return y
else:
raise ValueError("{0} format is not supported".format(output_type))
class MultiLabelBinarizer(BaseEstimator, TransformerMixin):
"""Transform between iterable of iterables and a multilabel format
Although a list of sets or tuples is a very intuitive format for multilabel
data, it is unwieldy to process. This transformer converts between this
intuitive format and the supported multilabel format: a (samples x classes)
binary matrix indicating the presence of a class label.
Parameters
----------
classes : array-like of shape [n_classes] (optional)
Indicates an ordering for the class labels
sparse_output : boolean (default: False),
Set to true if output binary array is desired in CSR sparse format
Attributes
----------
classes_ : array of labels
A copy of the `classes` parameter where provided,
or otherwise, the sorted set of classes found when fitting.
Examples
--------
>>> mlb = MultiLabelBinarizer()
>>> mlb.fit_transform([(1, 2), (3,)])
array([[1, 1, 0],
[0, 0, 1]])
>>> mlb.classes_
array([1, 2, 3])
>>> mlb.fit_transform([set(['sci-fi', 'thriller']), set(['comedy'])])
array([[0, 1, 1],
[1, 0, 0]])
>>> list(mlb.classes_)
['comedy', 'sci-fi', 'thriller']
"""
def __init__(self, classes=None, sparse_output=False):
self.classes = classes
self.sparse_output = sparse_output
def fit(self, y):
"""Fit the label sets binarizer, storing `classes_`
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
self : returns this MultiLabelBinarizer instance
"""
if self.classes is None:
classes = sorted(set(itertools.chain.from_iterable(y)))
else:
classes = self.classes
dtype = np.int if all(isinstance(c, int) for c in classes) else object
self.classes_ = np.empty(len(classes), dtype=dtype)
self.classes_[:] = classes
return self
def fit_transform(self, y):
"""Fit the label sets binarizer and transform the given label sets
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
if self.classes is not None:
return self.fit(y).transform(y)
# Automatically increment on new class
class_mapping = defaultdict(int)
class_mapping.default_factory = class_mapping.__len__
yt = self._transform(y, class_mapping)
# sort classes and reorder columns
tmp = sorted(class_mapping, key=class_mapping.get)
# (make safe for tuples)
dtype = np.int if all(isinstance(c, int) for c in tmp) else object
class_mapping = np.empty(len(tmp), dtype=dtype)
class_mapping[:] = tmp
self.classes_, inverse = np.unique(class_mapping, return_inverse=True)
yt.indices = np.take(inverse, yt.indices)
if not self.sparse_output:
yt = yt.toarray()
return yt
def transform(self, y):
"""Transform the given label sets
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
class_to_index = dict(zip(self.classes_, range(len(self.classes_))))
yt = self._transform(y, class_to_index)
if not self.sparse_output:
yt = yt.toarray()
return yt
def _transform(self, y, class_mapping):
"""Transforms the label sets with a given mapping
Parameters
----------
y : iterable of iterables
class_mapping : Mapping
Maps from label to column index in label indicator matrix
Returns
-------
y_indicator : sparse CSR matrix, shape (n_samples, n_classes)
Label indicator matrix
"""
indices = array.array('i')
indptr = array.array('i', [0])
for labels in y:
indices.extend(set(class_mapping[label] for label in labels))
indptr.append(len(indices))
data = np.ones(len(indices), dtype=int)
return sp.csr_matrix((data, indices, indptr),
shape=(len(indptr) - 1, len(class_mapping)))
def inverse_transform(self, yt):
"""Transform the given indicator matrix into label sets
Parameters
----------
yt : array or sparse matrix of shape (n_samples, n_classes)
A matrix containing only 1s ands 0s.
Returns
-------
y : list of tuples
The set of labels for each sample such that `y[i]` consists of
`classes_[j]` for each `yt[i, j] == 1`.
"""
if yt.shape[1] != len(self.classes_):
raise ValueError('Expected indicator for {0} classes, but got {1}'
.format(len(self.classes_), yt.shape[1]))
if sp.issparse(yt):
yt = yt.tocsr()
if len(yt.data) != 0 and len(np.setdiff1d(yt.data, [0, 1])) > 0:
raise ValueError('Expected only 0s and 1s in label indicator.')
return [tuple(self.classes_.take(yt.indices[start:end]))
for start, end in zip(yt.indptr[:-1], yt.indptr[1:])]
else:
unexpected = np.setdiff1d(yt, [0, 1])
if len(unexpected) > 0:
raise ValueError('Expected only 0s and 1s in label indicator. '
'Also got {0}'.format(unexpected))
return [tuple(self.classes_.compress(indicators)) for indicators
in yt]
| [
"[email protected]"
] | |
7d5a16b79347dc1a7ab5601542779245f50ac4dd | d4b344780e893a19d44aed51ebfe514c91e920c2 | /aliyun-python-sdk-openanalytics-open/aliyunsdkopenanalytics_open/request/v20180619/UntagResourcesRequest.py | 953b86ca9d42e26feb6484c0520c3745231814fc | [
"Apache-2.0"
] | permissive | WenONGs/aliyun-openapi-python-sdk | 6d164160eac7a8020e3b8d1960d170e08d2c8f23 | b6de95a32030b421665c0833c9d64d92fcaf81c8 | refs/heads/master | 2023-04-28T06:36:51.740098 | 2021-05-17T09:37:00 | 2021-05-17T09:37:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,172 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkopenanalytics_open.endpoint import endpoint_data
class UntagResourcesRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'openanalytics-open', '2018-06-19', 'UntagResources','openanalytics-cap')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_All(self):
return self.get_query_params().get('All')
def set_All(self,All):
self.add_query_param('All',All)
def get_ResourceIds(self):
return self.get_query_params().get('ResourceId')
def set_ResourceIds(self, ResourceIds):
for depth1 in range(len(ResourceIds)):
if ResourceIds[depth1] is not None:
self.add_query_param('ResourceId.' + str(depth1 + 1) , ResourceIds[depth1])
def get_ResourceType(self):
return self.get_query_params().get('ResourceType')
def set_ResourceType(self,ResourceType):
self.add_query_param('ResourceType',ResourceType)
def get_TagKeys(self):
return self.get_query_params().get('TagKey')
def set_TagKeys(self, TagKeys):
for depth1 in range(len(TagKeys)):
if TagKeys[depth1] is not None:
self.add_query_param('TagKey.' + str(depth1 + 1) , TagKeys[depth1]) | [
"[email protected]"
] | |
b6542980555fe27fde1765fa72bc2a691b8a0f44 | a59568148b79abaeaa7911f37b3734f72f86d758 | /django/website/datamap/migrations/0008_auto__del_field_field_formattedname.py | 1988a35be7e9c351a07b60ce5e43de4feb8fd049 | [
"MIT"
] | permissive | rasulov3645/opendatacomparison | 55bd0820bd21f2b5c3f53055d89f8c3b80d5985f | 2dcfefbd931fa4c7a0981a7f6bd966259aa4f16a | refs/heads/master | 2021-06-23T06:20:54.775422 | 2015-05-06T15:11:31 | 2015-05-06T15:11:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,674 | py | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'Field.formattedname'
db.delete_column(u'datamap_field', 'formattedname')
def backwards(self, orm):
# Adding field 'Field.formattedname'
db.add_column(u'datamap_field', 'formattedname',
self.gf('django.db.models.fields.CharField')(max_length=100, null=True, blank=True),
keep_default=False)
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'datamap.concept': {
'Meta': {'object_name': 'Concept'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'datamap.datamap': {
'Meta': {'object_name': 'Datamap'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'dataset': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'datamaps'", 'to': u"orm['package.Package']"}),
'format': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['package.Format']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
u'datamap.field': {
'Meta': {'unique_together': "((u'datamap', u'fieldname'),)", 'object_name': 'Field'},
'concept': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['datamap.Concept']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'datamap': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'fields'", 'to': u"orm['datamap.Datamap']"}),
'datatype': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'datatype_notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'fieldname': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mapsto': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'mapsto_rel_+'", 'null': 'True', 'to': u"orm['datamap.Field']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'standardname': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
u'datamap.translatedfield': {
'Meta': {'unique_together': "((u'field', u'language'),)", 'object_name': 'TranslatedField'},
'allowable_values': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'field': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'translations'", 'to': u"orm['datamap.Field']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'default': "u'en_US'", 'max_length': '10'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'package.category': {
'Meta': {'ordering': "['title']", 'object_name': 'Category'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': "'50'"}),
'title_plural': ('django.db.models.fields.CharField', [], {'max_length': "'50'", 'blank': 'True'})
},
u'package.format': {
'Meta': {'object_name': 'Format'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': "'100'"})
},
u'package.package': {
'Meta': {'object_name': 'Package'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'packages'", 'to': u"orm['package.Category']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'creator'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'documentation': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'documentation_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'formats': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['package.Format']", 'symmetrical': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'modifier'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'machine_readable': ('django.db.models.fields.BooleanField', [], {}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'nesting': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'nesting_depth': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'publisher': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'datasets'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['publisher.Publisher']"}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': "'100'"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'usage': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.User']", 'symmetrical': 'False', 'blank': 'True'})
},
u'publisher.publisher': {
'Meta': {'object_name': 'Publisher'},
'administrative_cat': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'administrative_level': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '2', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
}
}
complete_apps = ['datamap'] | [
"[email protected]"
] | |
c9de3cec05d3f3046559f4d8e7e7f95a8b23248e | 78cc4a9de8815bb042b25f70cbea5da2058553e8 | /src/fetch/fetchclass/grap_userinfo.py | bc27d9475706abb115c0787ee0e0262e5e6afa7e | [] | no_license | simple2source/fetch_crwal | 504c6aae18cc6c9520c3a5b6cb5e76fb65500d82 | 390998556f72a2053574e6ad5c58cbf0b850c8c0 | refs/heads/master | 2021-01-19T02:47:07.951209 | 2016-06-26T03:57:38 | 2016-06-26T03:57:38 | 52,797,780 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,705 | py | # coding: utf-8
"""
script to send email daily about the grap user info in MySQL grapuser_info table
"""
import MySQLdb
import common
from prettytable import PrettyTable
import datetime, time
import libaccount
sql_config = common.sql_config
def update_num():
try:
sql = """select grap_source, user_name from grapuser_info where account_type = '购买账号'"""
db = MySQLdb.connect(**sql_config)
cursor = db.cursor()
cursor.execute(sql)
data = cursor.fetchall()
for i in data:
a = libaccount.Manage(i[0])
ck_str = a.redis_ck_get(i[1])
a.num_update(i[0], i[1], ck_str)
time.sleep(3)
except Exception as e:
print Exception, e
def grap_info():
db = MySQLdb.connect(**sql_config)
cursor = db.cursor()
sql = """ select grap_source, user_name, account_mark, buy_num, pub_num, expire_time
from grapuser_info """ # where account_type = '购买账号' """
cursor.execute(sql)
data = cursor.fetchall()
x = PrettyTable(['来源', '用户名', '地区', '购买余额', '发布余额', '过期时间'])
for i in data:
ll = list(i)
ll2 = ll[:5] + [str(ll[5])]
x.add_row(ll2)
db.close()
return x.get_html_string(sortby=u"过期时间").encode('utf8')
def eformat(html):
msg_style = """<style type="text/css">
.body{
font-family: Monaco, Menlo, Consolas, "Courier New", "Lucida Sans Unicode", "Lucida Sans", "Lucida Console", monospace;
font-size: 14px;
line-height: 20px;
}
.table{ border-collapse:collapse; border:solid 1px gray; padding:6px}
.table td{border:solid 1px gray; padding:6px}
.color-ok {color: green;}
.color-warning {color: coral;}
.color-error {color: red;}
.bg-ok {background-color: lavender;}
.bg-warning {background-color: yellow;}
.bg-error {background-color: deeppink;}
</style>"""
msg_head = """<html><head><meta charset="utf-8"></head>""" + msg_style + "<body>"
msg = msg_head + """<h2>简历下载账号信息</h2>"""
msg2 = grap_info()
day_list = [str(datetime.datetime.now().date() + datetime.timedelta(days=x)) for x in xrange(-30, 60)]
day_list2 = ['<td>' + x + '</td>' for x in day_list]
for i in day_list2:
msg2 = msg2.replace(i, i.replace('<td>', '<td style="color:red; text-align:right">'))
msg = msg + msg2 + "</body></html>"
msg = msg.replace('<table>', '<table class="table">').replace('<td>', '<td style="text-align:right">').replace('<th>', "<th class='table'>")
# print msg
return msg
if __name__ == '__main__':
data =grap_info()
msg = eformat(data)
common.sendEmail('main', '简历渠道账号信息', msg, msg_type=1, des= 'op') | [
"[email protected]"
] | |
e8f98f81fddd7ab8606cf4a92a637ea8342d3535 | 36c46ac6e19ea611e5ebe5d851c4ea14a3613bf5 | /beets/util/__init__.py | 67a878342bf2ad83e7b2d31fa5431f33bd156803 | [
"MIT"
] | permissive | navinpai/beets | e0fd569f255f3dc642dd431c6e283ae6d095ab42 | 94569a774e8075337f96a071ce77e1df692add98 | refs/heads/master | 2021-01-18T10:42:33.321553 | 2011-08-29T01:25:38 | 2011-08-29T01:25:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,849 | py | # This file is part of beets.
# Copyright 2011, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Miscellaneous utility functions."""
import os
import sys
import re
import shutil
from collections import defaultdict
MAX_FILENAME_LENGTH = 200
def normpath(path):
"""Provide the canonical form of the path suitable for storing in
the database.
"""
return os.path.normpath(os.path.abspath(os.path.expanduser(path)))
def ancestry(path, pathmod=None):
"""Return a list consisting of path's parent directory, its
grandparent, and so on. For instance:
>>> ancestry('/a/b/c')
['/', '/a', '/a/b']
"""
pathmod = pathmod or os.path
out = []
last_path = None
while path:
path = pathmod.dirname(path)
if path == last_path:
break
last_path = path
if path: # don't yield ''
out.insert(0, path)
return out
def sorted_walk(path):
"""Like os.walk, but yields things in sorted, breadth-first
order.
"""
# Make sure the path isn't a Unicode string.
path = bytestring_path(path)
# Get all the directories and files at this level.
dirs = []
files = []
for base in os.listdir(path):
cur = os.path.join(path, base)
if os.path.isdir(syspath(cur)):
dirs.append(base)
else:
files.append(base)
# Sort lists and yield the current level.
dirs.sort()
files.sort()
yield (path, dirs, files)
# Recurse into directories.
for base in dirs:
cur = os.path.join(path, base)
# yield from _sorted_walk(cur)
for res in sorted_walk(cur):
yield res
def mkdirall(path):
"""Make all the enclosing directories of path (like mkdir -p on the
parent).
"""
for ancestor in ancestry(path):
if not os.path.isdir(syspath(ancestor)):
os.mkdir(syspath(ancestor))
def prune_dirs(path, root, clutter=('.DS_Store', 'Thumbs.db')):
"""If path is an empty directory, then remove it. Recursively
remove path's ancestry up to root (which is never removed) where
there are empty directories. If path is not contained in root, then
nothing is removed. Filenames in clutter are ignored when
determining emptiness.
"""
path = normpath(path)
root = normpath(root)
ancestors = ancestry(path)
if root in ancestors:
# Only remove directories below the root.
ancestors = ancestors[ancestors.index(root)+1:]
# Traverse upward from path.
ancestors.append(path)
ancestors.reverse()
for directory in ancestors:
directory = syspath(directory)
if all(fn in clutter for fn in os.listdir(directory)):
# Directory contains only clutter (or nothing).
try:
shutil.rmtree(directory)
except OSError:
break
else:
break
def components(path, pathmod=None):
"""Return a list of the path components in path. For instance:
>>> components('/a/b/c')
['a', 'b', 'c']
"""
pathmod = pathmod or os.path
comps = []
ances = ancestry(path, pathmod)
for anc in ances:
comp = pathmod.basename(anc)
if comp:
comps.append(comp)
else: # root
comps.append(anc)
last = pathmod.basename(path)
if last:
comps.append(last)
return comps
def bytestring_path(path):
"""Given a path, which is either a str or a unicode, returns a str
path (ensuring that we never deal with Unicode pathnames).
"""
# Pass through bytestrings.
if isinstance(path, str):
return path
# Try to encode with default encodings, but fall back to UTF8.
encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
try:
return path.encode(encoding)
except UnicodeError:
return path.encode('utf8')
def syspath(path, pathmod=None):
"""Convert a path for use by the operating system. In particular,
paths on Windows must receive a magic prefix and must be converted
to unicode before they are sent to the OS.
"""
pathmod = pathmod or os.path
windows = pathmod.__name__ == 'ntpath'
# Don't do anything if we're not on windows
if not windows:
return path
if not isinstance(path, unicode):
# Try to decode with default encodings, but fall back to UTF8.
encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
try:
path = path.decode(encoding, 'replace')
except UnicodeError:
path = path.decode('utf8', 'replace')
# Add the magic prefix if it isn't already there
if not path.startswith(u'\\\\?\\'):
path = u'\\\\?\\' + path
return path
def samefile(p1, p2):
"""Safer equality for paths."""
return shutil._samefile(syspath(p1), syspath(p2))
def soft_remove(path):
"""Remove the file if it exists."""
path = syspath(path)
if os.path.exists(path):
os.remove(path)
def _assert_not_exists(path, pathmod=None):
"""Raises an OSError if the path exists."""
pathmod = pathmod or os.path
if pathmod.exists(path):
raise OSError('file exists: %s' % path)
def copy(path, dest, replace=False, pathmod=None):
"""Copy a plain file. Permissions are not copied. If dest already
exists, raises an OSError unless replace is True. Paths are
translated to system paths before the syscall.
"""
path = syspath(path)
dest = syspath(dest)
_assert_not_exists(dest, pathmod)
return shutil.copyfile(path, dest)
def move(path, dest, replace=False, pathmod=None):
"""Rename a file. dest may not be a directory. If dest already
exists, raises an OSError unless replace is True. Paths are
translated to system paths.
"""
path = syspath(path)
dest = syspath(dest)
_assert_not_exists(dest, pathmod)
return shutil.move(path, dest)
# Note: POSIX actually supports \ and : -- I just think they're
# a pain. And ? has caused problems for some.
CHAR_REPLACE = [
(re.compile(r'[\\/\?]|^\.'), '_'),
(re.compile(r':'), '-'),
]
CHAR_REPLACE_WINDOWS = re.compile(r'["\*<>\|]|^\.|\.$| +$'), '_'
def sanitize_path(path, pathmod=None):
"""Takes a path and makes sure that it is legal. Returns a new path.
Only works with fragments; won't work reliably on Windows when a
path begins with a drive letter. Path separators (including altsep!)
should already be cleaned from the path components.
"""
pathmod = pathmod or os.path
windows = pathmod.__name__ == 'ntpath'
comps = components(path, pathmod)
if not comps:
return ''
for i, comp in enumerate(comps):
# Replace special characters.
for regex, repl in CHAR_REPLACE:
comp = regex.sub(repl, comp)
if windows:
regex, repl = CHAR_REPLACE_WINDOWS
comp = regex.sub(repl, comp)
# Truncate each component.
comp = comp[:MAX_FILENAME_LENGTH]
comps[i] = comp
return pathmod.join(*comps)
def sanitize_for_path(value, pathmod, key=None):
"""Sanitize the value for inclusion in a path: replace separators
with _, etc. Doesn't guarantee that the whole path will be valid;
you should still call sanitize_path on the complete path.
"""
if isinstance(value, basestring):
for sep in (pathmod.sep, pathmod.altsep):
if sep:
value = value.replace(sep, u'_')
elif key in ('track', 'tracktotal', 'disc', 'disctotal'):
# pad with zeros
value = u'%02i' % value
elif key == 'bitrate':
# Bitrate gets formatted as kbps.
value = u'%ikbps' % (value / 1000)
else:
value = unicode(value)
return value
def str2bool(value):
"""Returns a boolean reflecting a human-entered string."""
if value.lower() in ('yes', '1', 'true', 't', 'y'):
return True
else:
return False
def levenshtein(s1, s2):
"""A nice DP edit distance implementation from Wikibooks:
http://en.wikibooks.org/wiki/Algorithm_implementation/Strings/
Levenshtein_distance#Python
"""
if len(s1) < len(s2):
return levenshtein(s2, s1)
if not s1:
return len(s2)
previous_row = xrange(len(s2) + 1)
for i, c1 in enumerate(s1):
current_row = [i + 1]
for j, c2 in enumerate(s2):
insertions = previous_row[j + 1] + 1
deletions = current_row[j] + 1
substitutions = previous_row[j] + (c1 != c2)
current_row.append(min(insertions, deletions, substitutions))
previous_row = current_row
return previous_row[-1]
def plurality(objs):
"""Given a sequence of comparable objects, returns the object that
is most common in the set and the frequency of that object.
"""
# Calculate frequencies.
freqs = defaultdict(int)
for obj in objs:
freqs[obj] += 1
# Find object with maximum frequency.
max_freq = 0
res = None
for obj, freq in freqs.items():
if freq > max_freq:
max_freq = freq
res = obj
return res, max_freq
| [
"[email protected]"
] | |
63f1be347c7875fec36366c86232173a42630430 | 3a7412502b89b917f23cda9a3318d2dc4d02185b | /panoptes/analysis/panels/events/admin.py | c24c66fb0d3a697e3b34747002864d68c5a7f646 | [
"BSD-2-Clause"
] | permissive | cilcoberlin/panoptes | 5f0b19d872993bc5c7f51a44c9ccc596fe0a8ab5 | 67d451ea4ffc58c23b5f347bfa5609fa7f853b45 | refs/heads/master | 2021-01-21T00:17:42.038637 | 2012-07-10T03:20:47 | 2012-07-10T03:20:47 | 1,660,305 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 300 | py |
from django.contrib import admin
from panoptes.analysis.panels.events.models import LocationCalendar
class LocationCalendarAdmin(admin.ModelAdmin):
list_display = ('name', 'location', 'calendar_id')
ordering = ('location', 'order')
admin.site.register(LocationCalendar, LocationCalendarAdmin)
| [
"[email protected]"
] | |
de1964489e7a06b573dd7b0b5646fc231e174d46 | 146cd740649b87032cbbfb97cde6ae486f76230b | /venv/lib/python3.6/site-packages/PIL/BdfFontFile.py | fc85c8a287d218f13805660da1f2d7bc33c3793c | [] | no_license | shellyhuang18/plank-filter-master | 8b7024c46334062496f05d31eefc618ebae50b4e | 8993a5b00f45841c3385fe997857bfdd10b71a84 | refs/heads/master | 2020-03-30T18:14:45.017957 | 2018-12-27T20:51:25 | 2018-12-27T20:51:25 | 151,490,556 | 0 | 1 | null | 2018-12-19T22:42:26 | 2018-10-03T22:50:58 | Python | UTF-8 | Python | false | false | 3,119 | py | #
# The Python Imaging Library
# $Id$
#
# bitmap distribution font (bdf) file parser
#
# history:
# 1996-05-16 fl created (as bdf2pil)
# 1997-08-25 fl converted to FontFile driver
# 2001-05-25 fl removed bogus __init__ call
# 2002-11-20 fl robustification (from Kevin Cazabon, Dmitry Vasiliev)
# 2003-04-22 fl more robustification (from Graham Dumpleton)
#
# Copyright (c) 1997-2003 by Secret Labs AB.
# Copyright (c) 1997-2003 by Fredrik Lundh.
#
# See the README file for information on usage and redistribution.
#
from __future__ import print_function
from . import Image, FontFile
# --------------------------------------------------------------------
# parse X Bitmap Distribution Format (BDF)
# --------------------------------------------------------------------
bdf_slant = {
"R": "Roman",
"I": "Italic",
"O": "Oblique",
"RI": "Reverse Italic",
"RO": "Reverse Oblique",
"OT": "Other"
}
bdf_spacing = {
"P": "Proportional",
"M": "Monospaced",
"C": "Cell"
}
def bdf_char(f):
# skip to STARTCHAR
while True:
s = f.readline()
if not s:
return None
if s[:9] == b"STARTCHAR":
break
id = s[9:].strip().decode('ascii')
# load symbol properties
props = {}
while True:
s = f.readline()
if not s or s[:6] == b"BITMAP":
break
i = s.find(b" ")
props[s[:i].decode('ascii')] = s[i+1:-1].decode('ascii')
# load bitmap
bitmap = []
while True:
s = f.readline()
if not s or s[:7] == b"ENDCHAR":
break
bitmap.append(s[:-1])
bitmap = b"".join(bitmap)
[x, y, l, d] = [int(p) for p in props["BBX"].split()]
[dx, dy] = [int(p) for p in props["DWIDTH"].split()]
bbox = (dx, dy), (l, -d-y, x+l, -d), (0, 0, x, y)
try:
im = Image.frombytes("1", (x, y), bitmap, "hex", "1")
except ValueError:
# deal with zero-width characters
im = Image.new("1", (x, y))
return id, int(props["ENCODING"]), bbox, im
##
# Font file plugin for the X11 BDF format.
class BdfFontFile(FontFile.FontFile):
def __init__(self, fp):
FontFile.FontFile.__init__(self)
s = fp.readline()
if s[:13] != b"STARTFONT 2.1":
raise SyntaxError("not a valid BDF file")
props = {}
comments = []
while True:
s = fp.readline()
if not s or s[:13] == b"ENDPROPERTIES":
break
i = s.find(b" ")
props[s[:i].decode('ascii')] = s[i+1:-1].decode('ascii')
if s[:i] in [b"COMMENT", b"COPYRIGHT"]:
if s.find(b"LogicalFontDescription") < 0:
comments.append(s[i+1:-1].decode('ascii'))
while True:
c = bdf_char(fp)
if not c:
break
id, ch, (xy, dst, src), im = c
if 0 <= ch < len(self.glyph):
self.glyph[ch] = xy, dst, src, im
| [
"[email protected]"
] | |
b2e647ca7e61dc983ece0837c25e3743abde5e29 | e755453c853ae400d94f562ad215b59166b63782 | /tests/splay_tests/test_contains.py | 90f77cc4f6c2ffda415a14fbc9008bc8390adea1 | [
"MIT"
] | permissive | lycantropos/dendroid | 0cb3e276dd9c476b82b0b7a17c25c2e05616a993 | fd11c74a395eb791caf803c848805569869080f6 | refs/heads/master | 2023-04-07T11:07:55.550796 | 2023-03-27T00:46:03 | 2023-03-27T00:46:03 | 215,369,321 | 0 | 1 | MIT | 2020-09-24T05:02:02 | 2019-10-15T18:29:36 | Python | UTF-8 | Python | false | false | 891 | py | from typing import Tuple
from hypothesis import given
from dendroid.hints import Value
from tests.utils import (BaseSet,
are_keys_equal,
implication,
to_height,
to_max_binary_tree_height,
set_value_to_key)
from . import strategies
@given(strategies.non_empty_sets_with_values)
def test_properties(set_with_value: Tuple[BaseSet, Value]) -> None:
set_, value = set_with_value
assert implication(value in set_,
are_keys_equal(set_value_to_key(set_, value),
set_.tree.root.key))
@given(strategies.non_empty_sets)
def test_accessing_in_order(set_: BaseSet) -> None:
for value in set_:
value in set_
tree = set_.tree
assert to_height(tree) == to_max_binary_tree_height(tree)
| [
"[email protected]"
] | |
e4ff0e7628f0b153ac84f54932d7be59746127b4 | be0c6e2071945edcb47ee4f3fadc1f4629a2c6aa | /grandapp/migrations/0105_auto_20210224_1527.py | d80a750747203ef42ed7326139cdc3f30142b816 | [] | no_license | QuackenbushLab/grand | 9719a395e6a30951c3ffdef1eccdb5e422da737c | f23031d1f240550d25c2842b4af0aae08c653bae | refs/heads/master | 2023-08-10T09:58:58.381264 | 2023-07-25T18:23:26 | 2023-07-25T18:23:26 | 201,113,575 | 5 | 2 | null | 2022-06-24T19:11:29 | 2019-08-07T19:18:58 | JavaScript | UTF-8 | Python | false | false | 1,050 | py | # Generated by Django 3.0.2 on 2021-02-24 15:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('grandapp', '0104_auto_20210224_0043'),
]
operations = [
migrations.AddField(
model_name='ggbmd1sample',
name='link',
field=models.CharField(default='', max_length=200),
preserve_default=False,
),
migrations.AddField(
model_name='ggbmd1sample',
name='size',
field=models.CharField(default='', max_length=200),
preserve_default=False,
),
migrations.AddField(
model_name='ggbmd2sample',
name='link',
field=models.CharField(default='', max_length=200),
preserve_default=False,
),
migrations.AddField(
model_name='ggbmd2sample',
name='size',
field=models.CharField(default='', max_length=200),
preserve_default=False,
),
]
| [
"[email protected]"
] | |
6ed5f68e9e400070cd30140a79a7a876e9457eda | 8188bb529e538926fda5e90c62e1eaba38cff4f8 | /apps/__init__.py | 26c8f87ee9cbfb2d2b8c6406523ae10f59ee3280 | [
"BSD-2-Clause",
"BSD-3-Clause"
] | permissive | wangyibin/bioway | 68fbc59d65c0f94dcf2d4d56785522a62797fadd | a534f35fc6f96fe1b3a6ca78853a5aa076337328 | refs/heads/master | 2020-03-30T15:14:35.049692 | 2019-01-25T06:03:42 | 2019-01-25T06:03:42 | 151,354,414 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 74 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
from .base import must_open
| [
"[email protected]"
] | |
0c87e8d62731907e5730a66a9a585858c48baab9 | 45c86c7e7c6e84dcae3eba5544d93db6bee19905 | /venv/lib/python2.7/site-packages/pip/_vendor/cachecontrol/caches/file_cache.py | 50d18037e88dceca1d054d694fcec1680aab9b8b | [] | no_license | 5eanpoint/moweb | 60b5ed74183b019e04e4fa243d3c1930c4cb4a64 | bbf0d96b651230c231115a3eace7b950a908b57e | refs/heads/master | 2016-09-14T05:29:27.601814 | 2016-05-24T06:40:05 | 2016-05-24T06:40:10 | 59,546,065 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,529 | py | import hashlib
import os
from pip._vendor.lockfile import LockFile
from pip._vendor.lockfile.mkdirlockfile import MkdirLockFile
from ..cache import BaseCache
from ..controller import CacheController
def _secure_open_write(filename, fmode):
# We only want to write to this file, so open it in write only mode
flags = os.O_WRONLY
# os.O_CREAT | os.O_EXCL will fail if the file already exists, so we only
# will open *new* files.
# We specify this because we want to ensure that the mode we pass is the
# mode of the file.
flags |= os.O_CREAT | os.O_EXCL
# Do not follow symlinks to prevent someone from making a symlink that
# we follow and insecurely open a cache file.
if hasattr(os, "O_NOFOLLOW"):
flags |= os.O_NOFOLLOW
# On Windows we'll mark this file as binary
if hasattr(os, "O_BINARY"):
flags |= os.O_BINARY
# Before we open our file, we want to delete any existing file that is
# there
try:
os.remove(filename)
except (IOError, OSError):
# The file must not exist already, so we can just skip ahead to opening
pass
# Open our file, the use of os.O_CREAT | os.O_EXCL will ensure that if a
# race condition happens between the os.remove and this line, that an
# error will be raised. Because we utilize a lockfile this should only
# happen if someone is attempting to attack us.
fd = os.open(filename, flags, fmode)
try:
return os.fdopen(fd, "wb")
except:
# An error occurred wrapping our FD in a file object
os.close(fd)
raise
class FileCache(BaseCache):
def __init__(self, directory, forever=False, filemode=0o0600,
dirmode=0o0700, use_dir_lock=None, lock_class=None):
if use_dir_lock is not None and lock_class is not None:
raise ValueError("Cannot use use_dir_lock and lock_class together")
if use_dir_lock:
lock_class = MkdirLockFile
if lock_class is None:
lock_class = LockFile
self.directory = directory
self.forever = forever
self.filemode = filemode
self.dirmode = dirmode
self.lock_class = lock_class
@staticmethod
def encode(x):
return hashlib.sha224(x.encode()).hexdigest()
def _fn(self, name):
# NOTE: This method should not change as some may depend on it.
# See: https://github.com/ionrock/cachecontrol/issues/63
hashed = self.encode(name)
parts = list(hashed[:5]) + [hashed]
return os.path.join(self.directory, *parts)
def get(self, key):
name = self._fn(key)
if not os.path.exists(name):
return None
with open(name, 'rb') as fh:
return fh.read()
def set(self, key, value):
name = self._fn(key)
# Make sure the directory exists
try:
os.makedirs(os.path.dirname(name), self.dirmode)
except (IOError, OSError):
pass
with self.lock_class(name) as lock:
# Write our actual file
with _secure_open_write(lock.path, self.filemode) as fh:
fh.write(value)
def delete(self, key):
name = self._fn(key)
if not self.forever:
os.remove(name)
def url_to_file_path(url, filecache):
"""Return the file cache path based on the URL.
This does not ensure the file exists!
"""
key = CacheController.cache_url(url)
return filecache._fn(key)
| [
"[email protected]"
] | |
3440ae0cb78e579db0a6945b28742609be05790a | 0d77846403606b8300a53e05cd2103d5470b6a6a | /tensorflow/python/profiler/profiler_client.py | dc542e2c726ecdbd9c79293f77ca662075532b7a | [
"Apache-2.0"
] | permissive | alubanana/tensorflow | e7cb694073773be4c46607e7af4fb8ed9c74b812 | 454f89ab3baacbac567d6bcceef4c743f23ce58b | refs/heads/master | 2021-01-04T15:44:16.441471 | 2020-02-14T22:50:45 | 2020-02-14T22:56:47 | 240,614,446 | 1 | 0 | Apache-2.0 | 2020-02-14T23:12:02 | 2020-02-14T23:12:01 | null | UTF-8 | Python | false | false | 2,762 | py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Profiler client APIs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python import pywrap_tfe
from tensorflow.python.client import pywrap_tf_session
from tensorflow.python.framework import c_api_util
from tensorflow.python.framework import errors
def trace(service_addr,
logdir,
duration_ms,
worker_list='',
num_tracing_attempts=3):
"""Sends grpc requests to profiler server to perform on-demand profiling.
This method will block caller thread until receives tracing result.
Args:
service_addr: Address of profiler service e.g. localhost:6009.
logdir: Path of TensorBoard log directory e.g. /tmp/tb_log.
duration_ms: Duration of tracing or monitoring in ms.
worker_list: Optional. The list of workers that we are about to profile in
the current session (TPU only).
num_tracing_attempts: Optional. Automatically retry N times when no trace
event is collected (default 3).
Raises:
UnavailableError: If no trace event is collected.
"""
if not pywrap_tfe.TFE_ProfilerClientStartTracing(
service_addr, logdir, worker_list, True, duration_ms,
num_tracing_attempts):
raise errors.UnavailableError(None, None, 'No trace event is collected.')
def monitor(service_addr, duration_ms, level=1):
"""Sends grpc requests to profiler server to perform on-demand monitoring.
This method will block caller thread until receives monitoring result.
Args:
service_addr: Address of profiler service e.g. localhost:6009.
duration_ms: Duration of monitoring in ms.
level: Choose a monitoring level between 1 and 2 to monitor your
job. Level 2 is more verbose than level 1 and shows more metrics.
Returns:
A string of monitoring output.
"""
with c_api_util.tf_buffer() as buffer_:
pywrap_tfe.TFE_ProfilerClientMonitor(service_addr, duration_ms, level, True,
buffer_)
return pywrap_tf_session.TF_GetBuffer(buffer_)
| [
"[email protected]"
] | |
d22f1d29c71d26afd2458a666800317fbbbe8db4 | 91e9d429f8e34fd48d5e34a898b32bc090660fe1 | /core/migrations/0008_article_featured_mob_img.py | ab29fbd735ece0954ef2f0c6a20182b6c8281707 | [] | no_license | sherrywilly/blog-graphene | b286d4ba9a5a3499aefc684399630bcbd348ba8f | b05430896dff6b8944b2e6f5f30b6cfd875dc820 | refs/heads/main | 2023-07-14T22:20:29.973765 | 2021-08-07T06:35:32 | 2021-08-07T06:35:32 | 392,514,698 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 446 | py | # Generated by Django 3.2.6 on 2021-08-03 09:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0007_auto_20210803_0850'),
]
operations = [
migrations.AddField(
model_name='article',
name='featured_mob_img',
field=models.ImageField(blank=True, editable=False, null=True, upload_to='feature_images/'),
),
]
| [
"[email protected]"
] | |
fde06b2ff44e982bb2583bb88563f02a0ba24b88 | 7cd9b9f41fdbf52de6e4393c43e3ff4e7466b417 | /pokemon/urls.py | 68b22f96b1bbe9442e8b4515f49e8579b6d1466c | [] | no_license | BaeJuneHyuck/demo-pusan-univ-201907-django | 36f5be596c6850acc211358276c66df127e05633 | 4ef9e64a42a977e0b435fb2f83842433fac53bf3 | refs/heads/master | 2022-01-22T21:35:57.419007 | 2019-07-23T06:51:14 | 2019-07-23T06:51:14 | 198,371,100 | 1 | 0 | null | 2019-07-23T06:56:52 | 2019-07-23T06:56:51 | null | UTF-8 | Python | false | false | 242 | py | from django.urls import path
from pokemon.views import index, pokemon_new, pokemon_edit
urlpatterns = [
path('', index),
path('new/', pokemon_new),
path('<int:pk>/', pokemon_edit),
# re_path(r'(?P<pk>\d+)', pokemon_edit),
]
| [
"[email protected]"
] | |
b4150d116c9be477b3d501231d89ebeae46b0aa9 | e44c1ac44a3cc912fbeaa0152b9294a03fd893ea | /test/test_boolean_syntax.py | 5239ddb35aae0c52b65012e4a9d113be293c672b | [
"BSD-2-Clause"
] | permissive | umd-lhcb/pyTuplingUtils | ca03db1975f7f283caab1436ac1c5d85fad75d2a | 85f3ca90f01389f834af6de1044364843210c4c5 | refs/heads/master | 2023-03-10T00:12:40.922444 | 2023-03-03T23:31:09 | 2023-03-03T23:31:09 | 215,201,702 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,192 | py | #!/usr/bin/env python3
#
# Authorop: Yipeng Sun
# License: BSD 2-clause
# Last Change: Thu Jun 18, 2020 at 02:47 AM +0800
import unittest
from context import pyTuplingUtils as ptu
parser = ptu.boolean.syntax.boolean_parser.parse
class ArithmeticTest(unittest.TestCase):
def test_var(self):
self.assertEqual(
parser('a').pretty(),
"var\ta\n"
)
self.assertEqual(
parser('-a').pretty(),
"neg\n"
" var\ta\n"
)
self.assertEqual(
parser('a1').pretty(),
"var\ta1\n"
)
self.assertEqual(
parser('a_1b').pretty(),
"var\ta_1b\n"
)
def test_num(self):
self.assertEqual(
parser('1').pretty(),
"num\t1\n"
)
self.assertEqual(
parser('+1').pretty(),
"num\t+1\n"
)
def test_negative_num(self):
self.assertEqual(
parser('-1.6').pretty(),
"num\t-1.6\n"
)
self.assertEqual(
parser('+1').pretty(),
"num\t+1\n"
)
def test_add(self):
self.assertEqual(
parser('-1 +2.3').pretty(),
"add\n"
" num\t-1\n"
" num\t2.3\n"
)
def test_add_sub(self):
self.assertEqual(
parser('-1 +2.3 - 10').pretty(),
"sub\n"
" add\n"
" num\t-1\n"
" num\t2.3\n"
" num\t10\n"
)
def test_add_mul(self):
self.assertEqual(
parser('-1 +2.3 * 10').pretty(),
"add\n"
" num\t-1\n"
" mul\n"
" num\t2.3\n"
" num\t10\n"
)
def test_add_mul_par(self):
self.assertEqual(
parser('-(1 +2.3) * 10').pretty(),
"mul\n"
" neg\n"
" add\n"
" num\t1\n"
" num\t2.3\n"
" num\t10\n"
)
class BooleanTest(unittest.TestCase):
def test_comp(self):
self.assertEqual(
parser('!(-a_2 +2.3) ').pretty(),
"comp\n"
" add\n"
" neg\n"
" var\ta_2\n"
" num\t2.3\n"
)
def test_eq(self):
self.assertEqual(
parser('a == b').pretty(),
"eq\n"
" var\ta\n"
" var\tb\n"
)
self.assertEqual(
parser('a == 1').pretty(),
"eq\n"
" var\ta\n"
" num\t1\n"
)
self.assertEqual(
parser('a <= -1+x').pretty(),
"lte\n"
" var\ta\n"
" add\n"
" num\t-1\n"
" var\tx\n"
)
def test_bool(self):
self.assertEqual(
parser('a & 1').pretty(),
"andop\n"
" var\ta\n"
" num\t1\n"
)
self.assertEqual(
parser('True | False').pretty(),
"orop\n"
" bool\tTrue\n"
" bool\tFalse\n"
)
self.assertEqual(
parser('True | False & True').pretty(),
"orop\n"
" bool\tTrue\n"
" andop\n"
" bool\tFalse\n"
" bool\tTrue\n"
)
self.assertEqual(
parser('(True | False) & !True | false').pretty(),
"orop\n"
" andop\n"
" orop\n"
" bool\tTrue\n"
" bool\tFalse\n"
" comp\n"
" bool\tTrue\n"
" bool\tfalse\n"
)
def test_comb(self):
self.assertEqual(
parser('a >= !(-1+x)*3').pretty(),
"gte\n"
" var\ta\n"
" comp\n"
" mul\n"
" add\n"
" num\t-1\n"
" var\tx\n"
" num\t3\n"
)
self.assertEqual(
parser('a >= !(-1+x)*3 | x<8 & y != -(z+3)').pretty(),
"orop\n"
" gte\n"
" var\ta\n"
" comp\n"
" mul\n"
" add\n"
" num\t-1\n"
" var\tx\n"
" num\t3\n"
" andop\n"
" lt\n"
" var\tx\n"
" num\t8\n"
" neq\n"
" var\ty\n"
" neg\n"
" add\n"
" var\tz\n"
" num\t3\n"
)
self.assertEqual(
parser('a >= !(-1+x)*3 | x<8 & y != -(z+3)').pretty(),
parser('a >= !(-1+x)*3 | (x<8 & y != -(z+3))').pretty()
)
class FunctionCallTest(unittest.TestCase):
def test_func_call_zero_arg(self):
self.assertEqual(
parser('(some_func0())').pretty(),
"func_call\tsome_func0\n"
)
def test_func_call_one_arg(self):
self.assertEqual(
parser('some_func1(arg1)').pretty(),
"func_call\n"
" some_func1\n"
" arglist\n"
" var\targ1\n"
)
def test_func_call_two_args(self):
self.assertEqual(
parser('some_func2(arg1, arg2)').pretty(),
"func_call\n"
" some_func2\n"
" arglist\n"
" var\targ1\n"
" var\targ2\n"
)
def test_func_call_arithmetic(self):
self.assertEqual(
parser('arith_func((arg1+2)*val3, arg2)').pretty(),
"func_call\n"
" arith_func\n"
" arglist\n"
" mul\n"
" add\n"
" var\targ1\n"
" num\t2\n"
" var\tval3\n"
" var\targ2\n"
)
def test_func_call_nested(self):
self.assertEqual(
parser('arith_func(inner(arg1+2)*val3, arg2)').pretty(),
"func_call\n"
" arith_func\n"
" arglist\n"
" mul\n"
" func_call\n"
" inner\n"
" arglist\n"
" add\n"
" var\targ1\n"
" num\t2\n"
" var\tval3\n"
" var\targ2\n"
)
def test_func_call_nested_boolean_op(self):
self.assertEqual(
parser('arith_func(inner(arg1+2)*val3, arg2) > stuff(a)').pretty(),
"gt\n"
" func_call\n"
" arith_func\n"
" arglist\n"
" mul\n"
" func_call\n"
" inner\n"
" arglist\n"
" add\n"
" var\targ1\n"
" num\t2\n"
" var\tval3\n"
" var\targ2\n"
" func_call\n"
" stuff\n"
" arglist\n"
" var\ta\n"
)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
338d1428bc7508a2da49be3bd292cdb85916ced9 | 44064ed79f173ddca96174913910c1610992b7cb | /Second_Processing_app/temboo/Library/Google/Drive/Files/Update.py | 5ed78c1781be712f6acaa6a31eb70220ec8e1590 | [] | no_license | dattasaurabh82/Final_thesis | 440fb5e29ebc28dd64fe59ecd87f01494ed6d4e5 | 8edaea62f5987db026adfffb6b52b59b119f6375 | refs/heads/master | 2021-01-20T22:25:48.999100 | 2014-10-14T18:58:00 | 2014-10-14T18:58:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,745 | py | # -*- coding: utf-8 -*-
###############################################################################
#
# Update
# Updates the metadata or content of an existing file.
#
# Python version 2.6
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class Update(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the Update Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
Choreography.__init__(self, temboo_session, '/Library/Google/Drive/Files/Update')
def new_input_set(self):
return UpdateInputSet()
def _make_result_set(self, result, path):
return UpdateResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return UpdateChoreographyExecution(session, exec_id, path)
class UpdateInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the Update
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_RequestBody(self, value):
"""
Set the value of the RequestBody input for this Choreo. ((conditional, json) A JSON representation of fields in a file resource. File metadata information (such as the title) can be updated using this input. See documentation for formatting examples.)
"""
InputSet._set_input(self, 'RequestBody', value)
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((optional, string) A valid access token retrieved during the OAuth2 process. This is required unless you provide the ClientID, ClientSecret, and RefreshToken to generate a new access token.)
"""
InputSet._set_input(self, 'AccessToken', value)
def set_ClientID(self, value):
"""
Set the value of the ClientID input for this Choreo. ((conditional, string) The Client ID provided by Google. Required unless providing a valid AccessToken.)
"""
InputSet._set_input(self, 'ClientID', value)
def set_ClientSecret(self, value):
"""
Set the value of the ClientSecret input for this Choreo. ((conditional, string) The Client Secret provided by Google. Required unless providing a valid AccessToken.)
"""
InputSet._set_input(self, 'ClientSecret', value)
def set_ContentType(self, value):
"""
Set the value of the ContentType input for this Choreo. ((conditional, string) The Content-Type of the file that is being updated (i.e. image/jpeg). Required if modifying the file content.)
"""
InputSet._set_input(self, 'ContentType', value)
def set_Convert(self, value):
"""
Set the value of the Convert input for this Choreo. ((optional, boolean) Whether to convert this file to the corresponding Google Docs format. (Default: false).)
"""
InputSet._set_input(self, 'Convert', value)
def set_Fields(self, value):
"""
Set the value of the Fields input for this Choreo. ((optional, string) Selector specifying which fields to include in a partial response.)
"""
InputSet._set_input(self, 'Fields', value)
def set_FileContent(self, value):
"""
Set the value of the FileContent input for this Choreo. ((conditional, string) The new Base64 encoded contents of the file that is being updated.)
"""
InputSet._set_input(self, 'FileContent', value)
def set_FileID(self, value):
"""
Set the value of the FileID input for this Choreo. ((required, string) The id of the file to update.)
"""
InputSet._set_input(self, 'FileID', value)
def set_OCR(self, value):
"""
Set the value of the OCR input for this Choreo. ((optional, boolean) Whether to attempt OCR on .jpg, .png, .gif, or .pdf uploads. (Default: false))
"""
InputSet._set_input(self, 'OCR', value)
def set_OcrLanguage(self, value):
"""
Set the value of the OcrLanguage input for this Choreo. ((optional, string) If ocr is true, hints at the language to use. Valid values are ISO 639-1 codes.)
"""
InputSet._set_input(self, 'OcrLanguage', value)
def set_Pinned(self, value):
"""
Set the value of the Pinned input for this Choreo. ((optional, boolean) Whether to pin the new revision. (Default: false).)
"""
InputSet._set_input(self, 'Pinned', value)
def set_RefreshToken(self, value):
"""
Set the value of the RefreshToken input for this Choreo. ((conditional, string) An OAuth refresh token used to generate a new access token when the original token is expired. Required unless providing a valid AccessToken.)
"""
InputSet._set_input(self, 'RefreshToken', value)
def set_SetModifiedDate(self, value):
"""
Set the value of the SetModifiedDate input for this Choreo. ((optional, boolean) Whether to set the modified date with the supplied modified date.)
"""
InputSet._set_input(self, 'SetModifiedDate', value)
def set_SourceLanguage(self, value):
"""
Set the value of the SourceLanguage input for this Choreo. ((optional, string) The language of the original file to be translated.)
"""
InputSet._set_input(self, 'SourceLanguage', value)
def set_TargetLanguage(self, value):
"""
Set the value of the TargetLanguage input for this Choreo. ((optional, string) Target language to translate the file to. If no sourceLanguage is provided, the API will attempt to detect the language.)
"""
InputSet._set_input(self, 'TargetLanguage', value)
def set_TimedTextLanguage(self, value):
"""
Set the value of the TimedTextLanguage input for this Choreo. ((optional, string) The language of the timed text.)
"""
InputSet._set_input(self, 'TimedTextLanguage', value)
def set_TimedTextTrackName(self, value):
"""
Set the value of the TimedTextTrackName input for this Choreo. ((optional, string) The timed text track name.)
"""
InputSet._set_input(self, 'TimedTextTrackName', value)
def set_UpdateViewedDate(self, value):
"""
Set the value of the UpdateViewedDate input for this Choreo. ((optional, boolean) Whether to update the view date after successfully updating the file.)
"""
InputSet._set_input(self, 'UpdateViewedDate', value)
class UpdateResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the Update Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from Google.)
"""
return self._output.get('Response', None)
def get_NewAccessToken(self):
"""
Retrieve the value for the "NewAccessToken" output from this Choreo execution. ((string) Contains a new AccessToken when the RefreshToken is provided.)
"""
return self._output.get('NewAccessToken', None)
class UpdateChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return UpdateResultSet(response, path)
| [
"[email protected]"
] | |
af3a982bb1e38db919790e04e56f80327509c4af | f3a55a42086d2bae3fdffa892318bf0f518b3549 | /tests/test_gp_interp.py | 265798b58161fcdcccd8cd4077f4898b5a13ac82 | [
"BSD-2-Clause"
] | permissive | LBJ-Wade/Piff_PSF | 0aa5d904dd416a81292b4d929ee29f19173b18f6 | 475939e250eaec12781596af90963daa3de7dbdb | refs/heads/main | 2023-04-03T08:47:15.933418 | 2020-12-10T15:18:45 | 2020-12-10T15:18:45 | 352,978,568 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 17,624 | py | # Copyright (c) 2016 by Mike Jarvis and the other collaborators on GitHub at
# https://github.com/rmjarvis/Piff All rights reserved.
#
# Piff is free software: Redistribution and use in source and binary forms
# with or without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the disclaimer given in the accompanying LICENSE
# file.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the disclaimer given in the documentation
# and/or other materials provided with the distribution.
from __future__ import print_function
import galsim
import treegp
import numpy as np
import piff
import os
import copy
import fitsio
from scipy.linalg import cholesky, cho_solve
from sklearn.model_selection import train_test_split
from piff_test_helper import get_script_name, timer
kolmogorov = galsim.Kolmogorov(half_light_radius=1., flux=1.)
def get_correlation_length_matrix(correlation_length, g1, g2):
"""
Produce correlation matrix to introduce anisotropy in kernel.
Used same parametrization as shape measurement in weak-lensing
because this is mathematicaly equivalent (anistropic kernel
will have an elliptical shape).
:param correlation_length: Correlation lenght of the kernel.
:param g1, g2: Shear applied to isotropic kernel.
"""
if abs(g1)>1 or abs(g2)>1:
raise ValueError('abs value of g1 and g2 must be lower than one')
e = np.sqrt(g1**2 + g2**2)
q = (1-e) / (1+e)
m = galsim.Shear(g1=g2, g2=g2).getMatrix() * correlation_length
L = m.dot(m) * q
return L
def make_single_star(u, v, size, g1, g2, size_err, g1_err, g2_err):
"""Make a Star instance filled with a Kolmogorov profile
:param u, v: Star coordinate.
:param size: Star size.
:param g1, g2: Shear applied to profile.
:param size_err: Size error.
:param g1_err, g2_err: Shear error.
"""
star = piff.Star.makeTarget(x=None, y=None, u=u, v=v,
properties={}, wcs=None, scale=0.26,
stamp_size=24, image=None,
pointing=None, flux=1.)
kolmogorov.drawImage(star.image, method='auto')
fit = piff.StarFit(np.array([size, g1, g2]),
params_var=np.array([size_err**2, g1_err**2, g2_err**2]),
flux=1.)
final_star = piff.Star(star.data, fit)
return final_star
def return_gp_predict(y, X1, X2, kernel, factor):
"""Compute interpolation with gaussian process for a given kernel.
:param y: The dependent responses. (n_samples, n_targets)
:param X1: The independent covariates. (n_samples, 2)
:param X2: The independent covariates at which to interpolate. (n_samples, 2)
:param kernel: sklearn.gaussian_process kernel.
:param factor: Cholesky decomposition of sklearn.gaussian_process kernel.
"""
HT = kernel.__call__(X2, Y=X1)
alpha = cho_solve(factor, y, overwrite_b=False)
y_predict = np.dot(HT,alpha.reshape((len(alpha),1))).T[0]
return y_predict
def make_gaussian_random_fields(kernel, nstars, noise_level=1e-3,
xlim=-10, ylim=10, seed=30352010,
test_size=0.20, vmax=8, plot=False):
"""
Make psf params as gaussian random fields.
:param kernel: sklearn kernel to used for generating
the data.
:param nstars: number of stars to generate.
:param noise_level: quantity of noise to add to the data.
:param xlim: x limit of the field.
:param ylim: y limit of the field.
:param seed: seed of the generator.
:param test_size: size ratio of the test sample.
:param plot: set to true to have plot of the field.
:param vmax=8 max value for the color map.
"""
np.random.seed(seed)
# generate star coordinate
if nstars<1500:
nstars_interp = nstars
else:
nstars_interp = 1500
u_interp = np.random.uniform(-xlim, xlim, nstars_interp)
v_interp = np.random.uniform(-ylim, ylim, nstars_interp)
coord_interp = np.array([u_interp, v_interp]).T
# generate covariance matrix
kernel = treegp.eval_kernel(kernel)
cov_interp = kernel.__call__(coord_interp)
# generate gaussian random fields
size_interp = np.random.multivariate_normal([0]*nstars_interp, cov_interp)
g1_interp = np.random.multivariate_normal([0]*nstars_interp, cov_interp)
g2_interp = np.random.multivariate_normal([0]*nstars_interp, cov_interp)
if nstars<1500:
size = size_interp
g1 = g1_interp
g2 = g2_interp
u = u_interp
v = v_interp
coord = coord_interp
else:
# Interp on stars position using a gp interp with truth kernel.
# Trick to have more stars faster as a gaussian random field.
u = np.random.uniform(-xlim, xlim, nstars)
v = np.random.uniform(-ylim, ylim, nstars)
coord = np.array([u,v]).T
K = kernel.__call__(coord_interp) + np.eye(nstars_interp)*1e-10
factor = (cholesky(K, overwrite_a=True, lower=False), False)
size = return_gp_predict(size_interp, coord_interp, coord, kernel, factor)
g1 = return_gp_predict(g1_interp, coord_interp, coord, kernel, factor)
g2 = return_gp_predict(g2_interp, coord_interp, coord, kernel, factor)
# add noise on psfs parameters
size += np.random.normal(scale=noise_level, size=nstars)
g1 += np.random.normal(scale=noise_level, size=nstars)
g2 += np.random.normal(scale=noise_level, size=nstars)
size_err = np.ones(nstars)*noise_level
g1_err = np.ones(nstars)*noise_level
g2_err = np.ones(nstars)*noise_level
# create stars
stars = []
for i in range(nstars):
star = make_single_star(u[i], v[i],
size[i], g1[i], g2[i],
size_err[i], g1_err[i], g2_err[i])
stars.append(star)
if plot:
import matplotlib.pyplot as plt
plt.figure()
plt.scatter(u, v, c=size, vmin=-vmax, vmax=vmax, cmap=plt.cm.seismic)
plt.figure()
plt.scatter(u, v, c=g1, vmin=-vmax, vmax=vmax, cmap=plt.cm.seismic)
plt.figure()
plt.scatter(u, v, c=g2, vmin=-vmax, vmax=vmax, cmap=plt.cm.seismic)
# split training / validation
stars_training, stars_validation = train_test_split(stars, test_size=test_size, random_state=42)
return stars_training, stars_validation
def check_gp(stars_training, stars_validation, kernel, optimizer,
min_sep=None, max_sep=None, nbins=20, l0=3000., rows=None,
plotting=False, atol=4e-2, rtol=1e-3, test_star_fit=False):
""" Solve for global PSF model, test it, and optionally display it.
"""
interp = piff.GPInterp(kernel=kernel, optimizer=optimizer,
normalize=True, white_noise=0., l0=l0,
n_neighbors=4, average_fits=None, rows=rows,
nbins=nbins, min_sep=min_sep, max_sep=max_sep,
logger=None)
interp.initialize(stars_training)
interp.solve(stars=stars_training, logger=None)
if not test_star_fit:
stars_test = interp.interpolateList(stars_validation)
else:
stars_v = copy.deepcopy(stars_validation)
for s in stars_v:
s.fit = None
stars_test = interp.interpolateList(stars_v)
xtest = np.array([interp.getProperties(star) for star in stars_validation])
y_validation = np.array([star.fit.params for star in stars_validation])
y_err = np.sqrt(np.array([star.fit.params_var for star in stars_validation]))
y_test = np.array([star.fit.params for star in stars_test])
np.testing.assert_allclose(y_test, y_validation, atol=atol)
if optimizer != 'none':
truth_hyperparameters = np.exp(interp._init_theta)
fitted_hyperparameters = np.exp(
np.array([gp._optimizer._kernel.theta for gp in interp.gps]))
np.testing.assert_allclose(np.mean(fitted_hyperparameters, axis=0),
np.mean(truth_hyperparameters, axis=0),
rtol=rtol)
# Invalid kernel (can't use an instantiated kernel object for the kernel here)
with np.testing.assert_raises(TypeError):
piff.GPInterp(kernel=interp.gps[0].kernel, optimizer=optimizer)
# Invalid optimizer
with np.testing.assert_raises(ValueError):
piff.GPInterp(kernel=kernel, optimizer='invalid')
# Invalid number of kernels. (Can't tell until initialize)
if isinstance(kernel, str):
interp2 = piff.GPInterp(kernel=[kernel] * 4, optimizer=optimizer)
with np.testing.assert_raises(ValueError):
interp2.initialize(stars_training)
# Check I/O.
file_name = os.path.join('output', 'test_gp.fits')
with fitsio.FITS(file_name,'rw',clobber=True) as fout:
interp.write(fout, extname='gp')
with fitsio.FITS(file_name,'r') as fin:
interp2 = piff.Interp.read(fin, extname='gp')
stars_test = interp2.interpolateList(stars_validation)
y_test = np.array([star.fit.params for star in stars_test])
np.testing.assert_allclose(y_test, y_validation, atol=atol)
if plotting:
import matplotlib.pyplot as plt
title = ["size", "$g_1$", "$g_2$"]
for j in range(3):
plt.figure()
plt.title('%s validation'%(title[j]), fontsize=18)
plt.scatter(xtest[:,0], xtest[:,1], c=y_validation[:,j], vmin=-4e-2, vmax=4e-2,
cmap=plt.cm.seismic)
plt.colorbar()
plt.figure()
plt.title('%s test (gp interp)'%(title[j]), fontsize=18)
plt.scatter(xtest[:,0], xtest[:,1], c=y_test[:,j], vmin=-4e-2, vmax=4e-2,
cmap=plt.cm.seismic)
plt.colorbar()
if optimizer in ['isotropic', 'anisotropic']:
if optimizer == 'isotropic':
for gp in interp.gps:
plt.figure()
plt.scatter(gp._optimizer._2pcf_dist, gp._optimizer._2pcf)
plt.plot(gp._optimizer._2pcf_dist, gp._optimizer._2pcf_fit)
plt.plot(gp._optimizer._2pcf_dist,
np.ones_like(gp._optimizer._2pcf_dist)*4e-4,'b--')
plt.ylim(0,7e-4)
else:
for gp in interp.gps:
EXT = [np.min(gp._optimizer._2pcf_dist[:,0]),
np.max(gp._optimizer._2pcf_dist[:,0]),
np.min(gp._optimizer._2pcf_dist[:,1]),
np.max(gp._optimizer._2pcf_dist[:,1])]
CM = plt.cm.seismic
MAX = np.max(gp._optimizer._2pcf)
N = int(np.sqrt(len(gp._optimizer._2pcf)))
plt.figure(figsize=(10,5) ,frameon=False)
plt.subplots_adjust(wspace=0.5,left=0.07,right=0.95, bottom=0.15,top=0.85)
plt.subplot(1,2,1)
plt.imshow(gp._optimizer._2pcf.reshape(N,N), extent=EXT,
interpolation='nearest', origin='lower',
vmin=-MAX, vmax=MAX, cmap=CM)
cbar = plt.colorbar()
cbar.formatter.set_powerlimits((0, 0))
cbar.update_ticks()
cbar.set_label('$\\xi$',fontsize=20)
plt.xlabel('$\\theta_X$',fontsize=20)
plt.ylabel('$\\theta_Y$',fontsize=20)
plt.title('Measured 2-PCF',fontsize=16)
plt.subplot(1,2,2)
plt.imshow(gp._optimizer._2pcf_fit.reshape(N,N), extent=EXT,
interpolation='nearest',
origin='lower',vmin=-MAX,vmax=MAX, cmap=CM)
cbar = plt.colorbar()
cbar.formatter.set_powerlimits((0, 0))
cbar.update_ticks()
cbar.set_label('$\\xi\'$',fontsize=20)
plt.xlabel('$\\theta_X$',fontsize=20)
plt.ylabel('$\\theta_Y$',fontsize=20)
plt.show()
@timer
def test_gp_interp_isotropic():
if __name__ == "__main__":
atol = 4e-2
rtol = 3e-1
nstars = [1600, 1600, 4000, 4000]
else:
atol = 4e-1
rtol = 5e-1
nstars = [160, 160, 400, 400]
noise_level = 1e-3
LIM = [10, 10, 20, 20]
kernels = [["4e-4 * RBF(4.)", "4e-4 * RBF(4.)", "4e-4 * RBF(4.)"],
"4e-4 * RBF(4.)",
"4e-4 * RBF(4.)",
"4e-4 * VonKarman(20.)"]
optimizer = ['none',
'likelihood',
'isotropic',
'isotropic']
rows = [[0,1,2], None, None, None]
test_star_fit = [True, False, False, False]
for i in range(len(kernels)):
if i!=0:
K = kernels[i]
else:
K = kernels[i][0]
stars_training, stars_validation = make_gaussian_random_fields(
K, nstars[i], xlim=-LIM[i], ylim=LIM[i],
seed=30352010, vmax=4e-2, noise_level=noise_level)
check_gp(stars_training, stars_validation, kernels[i],
optimizer[i], rows=rows[i],
atol=atol, rtol=rtol, test_star_fit=test_star_fit[i],
plotting=False)
@timer
def test_gp_interp_anisotropic():
if __name__ == "__main__":
atol = 4e-2
rtol = 3e-1
nstars = [1600, 4000, 1600, 4000]
else:
atol = 4e-1
rtol = 5e-1
nstars = [160, 500, 160, 500]
noise_level = 1e-4
L1 = get_correlation_length_matrix(4., 0.3, 0.3)
invL1 = np.linalg.inv(L1)
L2 = get_correlation_length_matrix(20., 0.3, 0.3)
invL2 = np.linalg.inv(L2)
kernels = ["4e-4 * AnisotropicRBF(invLam={0!r})".format(invL1),
"4e-4 * AnisotropicRBF(invLam={0!r})".format(invL1),
"4e-4 * AnisotropicVonKarman(invLam={0!r})".format(invL2),
"4e-4 * AnisotropicVonKarman(invLam={0!r})".format(invL2)]
optimizer = ['none',
'anisotropic',
'none',
'anisotropic']
for i in range(len(kernels)):
stars_training, stars_validation = make_gaussian_random_fields(
kernels[i], nstars[i], xlim=-20, ylim=20,
seed=30352010, vmax=4e-2,
noise_level=noise_level)
check_gp(stars_training, stars_validation, kernels[i],
optimizer[i], min_sep=0., max_sep=5., nbins=11,
l0=20., atol=atol, rtol=rtol, plotting=False)
@timer
def test_yaml():
if __name__ == '__main__':
logger = piff.config.setup_logger(verbose=2)
else:
logger = piff.config.setup_logger(log_file='output/test_gp.log')
# Take DES test image, and test doing a psf run with GP interpolator
# Use config parser:
psf_file = os.path.join('output','gp_psf.fits')
config = {
'input' : {
# These can be regular strings
'image_file_name' : 'input/DECam_00241238_01.fits.fz',
# Or any GalSim str value type. e.g. FormattedStr
'cat_file_name' : {
'type': 'FormattedStr',
'format': '%s/DECam_%08d_%02d_psfcat_tb_maxmag_17.0_magcut_3.0_findstars.fits',
'items': [
'input', # dir
241238, # expnum
1 # chipnum
]
},
# What hdu is everything in?
'image_hdu' : 1,
'badpix_hdu' : 2,
'weight_hdu' : 3,
'cat_hdu' : 2,
# What columns in the catalog have things we need?
'x_col' : 'XWIN_IMAGE',
'y_col' : 'YWIN_IMAGE',
'ra' : 'TELRA',
'dec' : 'TELDEC',
'gain' : 'GAINA',
'sky_col' : 'BACKGROUND',
# How large should the postage stamp cutouts of the stars be?
'stamp_size' : 21,
},
'psf' : {
'model' : { 'type' : 'GSObjectModel',
'fastfit' : True,
'gsobj' : 'galsim.Gaussian(sigma=1.0)' },
'interp' : { 'type' : 'GPInterp',
'keys' : ['u', 'v'],
'optimizer' : 'none',
'kernel' : 'RBF(200.0)'}
},
'output' : { 'file_name' : psf_file },
}
piff.piffify(config, logger)
psf = piff.read(psf_file)
assert type(psf.model) == piff.GSObjectModel
assert type(psf.interp) == piff.GPInterp
print('nstars = ',len(psf.stars))
target = psf.stars[17]
test_star = psf.interp.interpolate(target)
np.testing.assert_almost_equal(test_star.fit.params, target.fit.params, decimal=3)
# This should also work if the target doesn't have a fit yet.
print('interpolate ',piff.Star(target.data,None))
test_star = psf.interp.interpolate(piff.Star(target.data,None))
np.testing.assert_almost_equal(test_star.fit.params, target.fit.params, decimal=3)
if __name__ == "__main__":
test_gp_interp_isotropic()
test_gp_interp_anisotropic()
test_yaml()
| [
"[email protected]"
] | |
9c59f9dbd1305703fbe6cfa40102879fac180355 | de6fb3a55196b6bd36a4fda0e08ad658679fb7a1 | /vt_manager/src/python/agent/xen/provisioning/configurators/mediacat/MediacatVMConfigurator.py | 1c32a6c5a7a87130868b322f6583514d2cd725a1 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | dana-i2cat/felix | 4a87af639e4c7db686bfa03f1ae4ce62711615e3 | 059ed2b3308bda2af5e1942dc9967e6573dd6a53 | refs/heads/master | 2021-01-02T23:12:43.840754 | 2016-02-04T10:04:24 | 2016-02-04T10:04:24 | 17,132,912 | 4 | 4 | null | null | null | null | UTF-8 | Python | false | false | 2,703 | py | import shutil
import os
import jinja2
import subprocess
from xen.provisioning.hdmanagers.LVMHdManager import LVMHdManager
from xen.provisioning.HdManager import HdManager
from settings.settingsLoader import OXA_XEN_SERVER_KERNEL,OXA_XEN_SERVER_INITRD,OXA_DEBIAN_INTERFACES_FILE_LOCATION,OXA_DEBIAN_UDEV_FILE_LOCATION
class MediacatVMConfigurator:
''' Private methods '''
@staticmethod
def __createParavirtualizationVM(vm):
swap = 0
if len(vm.xen_configuration.users.user) == 1 and vm.xen_configuration.users.user[0].name == "root":
passwd = str(vm.xen_configuration.users.user[0].password)
if vm.xen_configuration.memory_mb < 1024:
swap = vm.xen_configuration.memory_mb*2
else:
swap = 1024
p = subprocess.Popen(['/usr/bin/xen-create-image','--hostname=' + vm.name,'--size=' + str(vm.xen_configuration.hd_size_gb) + 'Gb','--swap=' + str(swap) + 'Mb','--memory=' + str(vm.xen_configuration.memory_mb) + 'Mb','--arch=amd64','--password=' + passwd,'--output=' + LVMHdManager.getConfigFileDir(vm), '--role=udev'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
p.wait()
@staticmethod
def __createHVMFileHdConfigFile(vm,env):
template_name = "mediacatHVMFileHd.pt"
template = env.get_template(template_name)
#Set vars&render
output = template.render(
kernelImg=OXA_XEN_SERVER_KERNEL,
initrdImg=OXA_XEN_SERVER_INITRD,
vm=vm)
#write file
cfile = open(HdManager.getConfigFilePath(vm),'w')
cfile.write(output)
cfile.close()
#Public methods
@staticmethod
def getIdentifier():
return MediacatVMConfigurator.__name__
@staticmethod
def configureVmDisk(vm,path):
return
@staticmethod
def createVmConfigurationFile(vm):
#get env
template_dirs = []
template_dirs.append(os.path.join(os.path.dirname(__file__), 'templates/'))
env = jinja2.Environment(loader=jinja2.FileSystemLoader(template_dirs))
if vm.xen_configuration.hd_setup_type == "logical-volume-image" and vm.xen_configuration.virtualization_setup_type == "paravirtualization":
MediacatVMConfigurator.__createParavirtualizationVM(vm)
elif vm.xen_configuration.hd_setup_type == "logical-volume-image" and vm.xen_configuration.virtualization_setup_type == "hardware-assisted-virtualization":
MediacatVMConfigurator.__createHVMFileHdConfigFile(vm,env)
else:
raise Exception("type of file or type of virtualization not supported for the creation of xen vm configuration file")
| [
"[email protected]"
] | |
f5bd5d0daea26ef86acd064dbec79ff7205d9815 | 10b5a703c0166b55331513d2a9aead687032e804 | /leetcode1578.py | 7f4f21ed382eed47d291365b4de17dc2a0e596ab | [] | no_license | jack456054/leetcode | f2d623d5683098b2038322ee3eef81dc020f6fb1 | 24f0075909f8620513f6f21c9ad3dc299dee8967 | refs/heads/master | 2022-11-12T18:01:52.832206 | 2022-11-10T07:24:39 | 2022-11-10T07:24:39 | 125,793,890 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,450 | py | class Solution:
# def minCost(self, colors: str, neededTime: List[int]) -> int:
# if len(colors) == 1:
# return 0
# current_color: str = None
# time_list: List[int] = []
# results: int = 0
# for index, color in enumerate(colors):
# if current_color == color:
# heappush(time_list, neededTime[index])
# else:
# tl_len = len(time_list)
# if tl_len > 1:
# results += sum(nsmallest(tl_len - 1, time_list))
# current_color = color
# time_list = [neededTime[index]]
# tl_len = len(time_list)
# if tl_len > 1:
# results += sum(nsmallest(tl_len - 1, time_list))
# return results
def minCost(self, colors: str, neededTime: List[int]) -> int:
if len(colors) == 1:
return 0
current_color: str = colors[0]
current_largest: int = neededTime[0]
results: int = neededTime[0]
for index, color in enumerate(colors[1:]):
results += neededTime[index + 1]
if current_color == color:
current_largest = max(current_largest, neededTime[index + 1])
else:
results -= current_largest
current_color = color
current_largest = neededTime[index + 1]
results -= current_largest
return results
| [
"[email protected]"
] | |
e2a991b355677ee0a574dce0a2c19d1b6cac5bc7 | 2256a61b57eed52ce5b3dd19e54108545e3fa1a1 | /sandbox/ipython/mydemo.py | fd31887d834e800fcd42827f23ebcf608aea26b6 | [
"Apache-2.0"
] | permissive | hubitor/progs | 18877b7dbd455f1192c96ebe1905f67329d1c749 | e4537da6da47d380a1a1a04e8068866013c7b7b6 | refs/heads/master | 2020-04-28T18:49:23.446076 | 2019-02-18T07:59:28 | 2019-02-18T07:59:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 128 | py | #! /usr/bin/env python
# -*- coding: latin-1 -*-
from IPython.lib.demo import Demo
mydemo = Demo('myscript.py')
#mydemo()
| [
"[email protected]"
] | |
1f87942cf0798f90d596f602e6524b16e1aed34a | df4b577668b830fcb41be675d691a72b952e892b | /releasenotes/source/conf.py | 9550aff663dfded18ef4e450b36cecf4ea41502c | [
"Apache-2.0"
] | permissive | 4383/tobiko | 37ef7bfb3b51918825c4c412136467fb32850494 | f8e6916db890021fa17ddbfc5e6007a25093c8cb | refs/heads/master | 2020-10-01T00:03:27.723538 | 2019-12-11T16:01:53 | 2019-12-11T16:04:14 | 227,405,383 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,970 | py | # Copyright 2019 Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
TOBIKO_DIR = os.path.abspath(os.path.join(BASE_DIR, "..", ".."))
sys.path.insert(0, TOBIKO_DIR)
# -- Project information -----------------------------------------------------
project = 'Tobiko Release Notes'
copyright = "2019, Red Hat"
author = "Tobiko's Team"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# Version info
from tobiko import version
release = version.release
# The short X.Y version.
version = version.version
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'reno.sphinxext',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = []
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
"canonical_url": "https://docs.openstack.org/tobiko/latest/",
"logo_only": False,
"display_version": True,
"prev_next_buttons_location": "top",
"style_external_links": True,
# Toc options
"collapse_navigation": True,
"sticky_navigation": True,
"navigation_depth": 4,
"includehidden": True,
"titles_only": False,
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'TobikoReleaseNotesdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'TobikoReleaseNotes.tex', u'Tobiko Release Notes Documentation',
u'Tobiko developers', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'tobikoreleasenotes', u'Tobiko Release Notes Documentation',
[u'Tobiko developers'], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'TobikoReleaseNotes', u'Tobiko Release Notes Documentation',
u'Tobiko developers', 'TobikoReleaseNotes', 'One line description of project.',
'Miscellaneous'),
]
| [
"[email protected]"
] | |
cfbde29af5c7d47388410751eaebd45c382e38a4 | ff3f4b3117847f70fe68741288f28576fe2cc5e3 | /baekjoon/1712_손익분기점.py | 7ad98c5c1bd7ff74104ee79698dc45bd51b98083 | [] | no_license | manuck/myAlgo | 087bbe99672c40762759e9202fe371c394736fb1 | c673687d23a2d5cc06b6a6d5fb1bc0cb2e2b7bd9 | refs/heads/master | 2021-07-01T00:13:27.590848 | 2020-12-17T18:10:16 | 2020-12-17T18:10:16 | 203,954,477 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,280 | py | import sys
sys.stdin = open('1712_input.txt')
'''
문제
월드전자는 노트북을 제조하고 판매하는 회사이다. 노트북 판매 대수에 상관없이 매년 임대료, 재산세, 보험료, 급여 등 A만원의 고정 비용이 들며, 한 대의 노트북을 생산하는 데에는 재료비와 인건비 등 총 B만원의 가변 비용이 든다고 한다.
예를 들어 A=1,000, B=70이라고 하자. 이 경우 노트북을 한 대 생산하는 데는 총 1,070만원이 들며, 열 대 생산하는 데는 총 1,700만원이 든다.
노트북 가격이 C만원으로 책정되었다고 한다. 일반적으로 생산 대수를 늘려 가다 보면 어느 순간 총 수입(판매비용)이 총 비용(=고정비용+가변비용)보다 많아지게 된다. 최초로 총 수입이 총 비용보다 많아져 이익이 발생하는 지점을 손익분기점(BREAK-EVEN POINT)이라고 한다.
A, B, C가 주어졌을 때, 손익분기점을 구하는 프로그램을 작성하시오.
입력
첫째 줄에 A, B, C가 빈 칸을 사이에 두고 순서대로 주어진다. A, B, C는 21억 이하의 자연수이다.
'''
a, b, c = map(int, input().split())
answer = 0
print(a, b, c)
if b < c:
answer = a // (c-b)+1
else:
answer = -1
print(answer) | [
"[email protected]"
] | |
b5528fe3267e31e75996bc3fffe002a6cea4fa77 | 4de98697b5ad1ab7a7ca52ea39b1cc863ef2022f | /python/pacman_game/pacman.py | 43a332e5fd07d20c227adfa040de1f0adbb3c9e5 | [] | no_license | yask123/other-codes | bfb60b5fdca58df8dd46ce6dc6cf5782882bfd2a | adc73c7e3969d103b1154d2ec29a19348bca17ae | refs/heads/master | 2021-01-21T00:56:47.253189 | 2014-11-30T14:50:47 | 2014-11-30T14:50:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,316 | py | from random import randint
import os
#FUNCTION USED FROM : https://gist.github.com/jasonrdsouza/1901709
def getchar():
if os.name == 'nt':
ch = raw_input('Enter your turn (w,a,s,d) : ')
else:
import tty, termios, sys
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
class bcolors:
PACMAN = '\033[1;32m'
WALL = '\033[0;36m'
GHOST = '\033[1;31m'
COINS = '\033[1;37m'
EMPTY = '\033[1;37m'
NEWEMP = '\033[0;30m'
def rem_dup(seq):
seen = set()
seen_add = seen.add
return [ x for x in seq if not (x in seen or seen_add(x))]
class Board:
def safe_play (self):
for i in [-1,self.__x]:
for j in range(-1,self.__y+2):
self.set_wall(i,j)
for j in [-1,self.__y]:
for i in range(-1,self.__x+2):
self.set_wall(i,j)
def map_with_old_wall(self,ghost=0):
for i in range(0,self.__x):
for j in range(0,self.__y):
if self.__pac.get_xy != [i,j] and self.check_wall(i,j)==False:
rand=randint(0,30)
if rand%9==0:
self.set_coin(i,j)
if rand==5 and ghost==1:
self.add_ghost(i,j)
def __init__(self, anything=''):
self.__score=0
self.__ghosts=[]
self.__coins=[]
self.__walls=[]
self.__change_map=0
self.__random=0
if(anything=='random'):
os.system("clear")
x = int(raw_input('\n Dimenstions of the board is needed before procceding.\n x (>5) = '))
y = int(raw_input(' y (>5) = '))
self.__change_map = int(raw_input('Do you want map to change after every completion (1->Yes, 0->No) : '))
self.__x = int(x) - 2
self.__y = int(y) - 2
self.__pac=Pacman(randint(0,x-3),randint(0,y-3))
self.__random=1
self.__random_board()
elif(anything=='level'):
os.system("clear")
self.__x = 15 - 2
self.__y = 35 - 2
self.__pac=Pacman(10,20)
self.__level_board()
self.__change_map=0
self.safe_play()
def __level_board(self):
for j in range(1,6):
self.set_wall(j,3)
for j in range(6,11):
self.set_wall(j,8)
for j in range(3,9):
self.set_wall(1,j)
for j in range(3,9):
self.set_wall(6,j)
for j in range(3,9):
self.set_wall(11,j)
self.set_wall(6,11)
for j in range(1,12):
self.set_wall(j,14)
for j in range(2,6):
self.set_wall(-j+6,j+14)
for j in range(2,6):
self.set_wall(j+6,j+14)
self.set_wall(6,20)
for j in range(1,12):
self.set_wall(j,22)
for j in range(1,12):
self.set_wall(j,31)
for j in range(1,5):
self.set_wall(j,26)
for j in range(1,7):
self.set_wall(6,j+23)
for j in range(8,12):
self.set_wall(j,26)
self.map_with_old_wall()
self.add_ghost(5,8)
def __random_board(self):
for i in range(0,self.__x):
for j in range(0,self.__y):
if self.__pac.get_xy != [i,j]:
rand=randint(0,20)
if rand==1:
self.set_wall(i,j)
elif rand==2:
self.set_coin(i,j)
elif ((rand==3)and(randint(0,5)==0)):
self.add_ghost(i,j)
elif rand==4:
self.set_coin(i,j)
elif rand==5 or rand==6:
self.set_wall(i,j)
def add_ghost(self,x,y):
if(self.check_wall(x,y)==False):
self.__ghosts+=[Ghost(x,y)]
def add_coin(self,x,y):
if(self.check_wall(x,y)==False):
self.__coins+=[[x,y]]
self.set_coin(x,y)
def set_wall(self,x,y):
self.__walls+=[[x,y]]
def set_coin(self,x,y):
if [x,y] not in self.__walls and [x,y] != self.__pac.get_xy():
self.__coins+=[[x,y]]
def set_pac(self,x,y):
self.__pac.set_position(x,y)
def check_wall(self,x,y):
if [x,y] in self.__walls:
return True;
else:
return False;
def check_ghost(self,x,y):
temp=[]
for i in range(0,len(self.__ghosts)):
temp+=[self.__ghosts[i].get_xy()]
if [x,y] in temp:
return True;
else:
return False;
def check_coin(self,x,y):
if [x,y] in self.__coins:
return True;
else:
return False;
def display_char(self,x,y):
temp=[]
h=len(self.__ghosts)
for i in range(0,h):
temp+=[self.__ghosts[i].get_xy()]
if [x,y] in temp:
print bcolors.GHOST+"G",
elif [x,y] == self.__pac.get_xy():
print bcolors.PACMAN+"P",
elif [x,y] in self.__walls:
print bcolors.WALL+"X",
elif [x,y] in self.__coins:
print bcolors.COINS+"C",
else:
print bcolors.NEWEMP+".",
def display(self):
print ""
for i in range(-1,self.__x+1):
for j in range(-1,self.__y+1):
self.display_char(i,j)
print ""
print bcolors.EMPTY+""
def __checkGhost():
over=0
loc=self.__pac.get_xy()
for i in range(0,len(self.__ghosts)):
if(self.__ghosts[i].get_xy()==loc):
over=1
break
if over==1:
return False
#Don't over
else:
return True
#Over the game
def collectCoin(self,out):
self.__coins.remove([out[0],out[1]])
def player_turn(self):
play=1
over=0
os.system("clear")
self.display()
while(play):
out=self.__pac.move_packman_input('Enter your turn sir (w,a,s,d) : ')
os.system("clear")
if self.check_wall(out[0],out[1]) == False:
self.__pac.set_position(out[0],out[1])
if self.check_coin(out[0],out[1]) == True:
self.__score+=10
self.collectCoin(out)
for i in range(0,len(self.__ghosts)):
possible_moves=self.__ghosts[i].move_ghost_future_preference(self.__pac)
for j in range(0,len(possible_moves)):
if self.check_wall(possible_moves[j][0],possible_moves[j][1]) == False and self.check_ghost(possible_moves[j][0],possible_moves[j][1]) == False:
self.__ghosts[i].set_position(possible_moves[j][0],possible_moves[j][1])
break
if possible_moves[j]==self.__pac.get_xy():
over=1
if (over==1):
play=0
if (out[2]=='q' or out[2]=='Q'):
play=0
print "Thank you for playing PACMAN ! :)"
else:
self.display()
print "Score : "+self.get_score()+" Coins left : "+str(len(self.__coins))
if over==1:
print "Game Over !"
if len(self.__coins)==0 and self.__random==1:
os.system("clear")
self.__ghosts=[]
self.__coins=[]
if(self.__change_map==1):
self.__walls=[]
self.__random_board()
else:
self.map_with_old_wall(1)
self.safe_play()
self.display()
print "Score : "+self.get_score()+" Coins left : "+str(len(self.__coins))
if self.__random==0 and len(self.__coins)==0:
self.add_ghost(5,8)
self.map_with_old_wall(0)
def get_score(self):
return str(self.__score)
class Person:
def __init__(self,x=0,y=0):
self.__x=x
self.__y=y
def set_position(self,x,y):
self.__x=x
self.__y=y
def get_x(self):
return self.__x
def get_y(self):
return self.__y
def get_xy(self):
return [self.__x,self.__y]
class Pacman(Person):
def move_packman_input(self,msg=''):
x_n=self.get_x()
y_n=self.get_y()
inp=getchar()
#inp=raw_input(msg)
if(inp=="a" or inp=="A"):
x_n = self.get_x()
y_n = self.get_y()-1
if(inp=="d" or inp=="D"):
x_n = self.get_x()
y_n = self.get_y()+1
if(inp=="s" or inp=="S"):
x_n = self.get_x()+1
y_n = self.get_y()
if(inp=="w" or inp=="W"):
x_n = self.get_x()-1
y_n = self.get_y()
return [x_n,y_n,inp]
class Ghost(Person):
def move_ghost_future_preference(self,Packman):
jaadu = []
x = self.get_x()
y = self.get_y()
if x - Packman.get_x() > 0 :
if y - Packman.get_y() > 0 :
jaadu = [[x-1,y],[x,y-1],[x+1,y],[x,y+1],[x,y]]
elif y - Packman.get_y() < 0 :
jaadu = [[x-1,y],[x,y+1],[x+1,y],[x,y-1],[x,y]]
else :
jaadu = [[x-1,y],[x+1,y],[x,y]]
elif x - Packman.get_x() < 0 :
if y - Packman.get_y() > 0 :
jaadu = [[x+1,y],[x,y-1],[x-1,y],[x,y+1],[x,y]]
elif y - Packman.get_y() < 0 :
jaadu = [[x+1,y],[x,y+1],[x,y-1],[x-1,y],[x,y]]
else:
jaadu = [[x+1,y],[x-1,y],[x,y]]
else :
if y - Packman.get_y() > 0 :
jaadu = [[x,y-1],[x,y+1],[x,y]]
elif y - Packman.get_y() < 0 :
jaadu = [[x,y+1],[x,y-1],[x,y]]
else:
jaadu = [[x,y]]
return jaadu
os.system("clear")
print "Please select a option :\n \n1. Play level wise\n2. Play on a random map\n3. Quit\n\n"
inp=input('Enter your choice : ')
inp=int(inp)
if inp==1:
t=Board('level')
t.player_turn()
elif inp==2:
t=Board('random')
t.player_turn()
elif inp==3:
print "Thank you for using the program.. :)"
else:
print "Wrong input !! \nQuitting program...."
#os.system("clear")
#t.display() | [
"[email protected]"
] | |
fe6f9e517fa57fa574ad57811ee18bab68c95b8d | 1243d11e36e61542693fb9a4b187f328aa2eb473 | /account/models.py | 6d5ebea9fe2dd56a1a41c1d14b7e00f2ff50f127 | [] | no_license | LatorreDev/Django-socialNetwork | 344fa7d4fdee2a136695cdf9be20feaa8a8e8094 | b15f7a3c13e118d3ce1144784357b44f81c426ac | refs/heads/master | 2022-12-08T23:02:43.221239 | 2020-09-18T01:05:03 | 2020-09-18T01:05:03 | 296,475,039 | 0 | 0 | null | 2020-09-18T01:05:04 | 2020-09-18T00:44:37 | null | UTF-8 | Python | false | false | 476 | py | from django.db import models
from django.conf import settings
# Create your models here.
class Profile(models.Model):
user = models.OneToOneField(settings.AUTH_USER_MODEL,
on_delete=models.CASCADE)
date_of_birth = models.DateField(blank=True, null=True)
photo = models.ImageField(upload_to='users/%Y/%m/%d',
blank=True)
def __str__(self):
return f'Profile for user {self.user.username}' | [
"[email protected]"
] | |
f82607f74d1a032f97f36e8f68b0a32b783431f7 | 09e57dd1374713f06b70d7b37a580130d9bbab0d | /data/p4VQE/R1/benchmark/startQiskit_QC115.py | aac0e298834b98a16d87fa3890ff00e155618dd8 | [
"BSD-3-Clause"
] | permissive | UCLA-SEAL/QDiff | ad53650034897abb5941e74539e3aee8edb600ab | d968cbc47fe926b7f88b4adf10490f1edd6f8819 | refs/heads/main | 2023-08-05T04:52:24.961998 | 2021-09-19T02:56:16 | 2021-09-19T02:56:16 | 405,159,939 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,817 | py | # qubit number=3
# total number=20
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def bitwise_xor(s: str, t: str) -> str:
length = len(s)
res = []
for i in range(length):
res.append(str(int(s[i]) ^ int(t[i])))
return ''.join(res[::-1])
def bitwise_dot(s: str, t: str) -> str:
length = len(s)
res = 0
for i in range(length):
res += int(s[i]) * int(t[i])
return str(res % 2)
def build_oracle(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the oracle O_f
# NOTE: use multi_control_toffoli_gate ('noancilla' mode)
# https://qiskit.org/documentation/_modules/qiskit/aqua/circuits/gates/multi_control_toffoli_gate.html
# https://quantumcomputing.stackexchange.com/questions/3943/how-do-you-implement-the-toffoli-gate-using-only-single-qubit-and-cnot-gates
# https://quantumcomputing.stackexchange.com/questions/2177/how-can-i-implement-an-n-bit-toffoli-gate
controls = QuantumRegister(n, "ofc")
target = QuantumRegister(1, "oft")
oracle = QuantumCircuit(controls, target, name="Of")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
oracle.mct(controls, target[0], None, mode='noancilla')
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
# oracle.draw('mpl', filename=(kernel + '-oracle.png'))
return oracle
def build_circuit(n: int, f: Callable[[str], str]) -> QuantumCircuit:
# implement the Bernstein-Vazirani circuit
zero = np.binary_repr(0, n)
b = f(zero)
# initial n + 1 bits
input_qubit = QuantumRegister(n+1, "qc")
classicals = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classicals)
# inverse last one (can be omitted if using O_f^\pm)
prog.x(input_qubit[n])
# circuit begin
prog.h(input_qubit[1]) # number=1
prog.cx(input_qubit[0],input_qubit[2]) # number=11
prog.cx(input_qubit[0],input_qubit[2]) # number=17
prog.x(input_qubit[2]) # number=18
prog.cx(input_qubit[0],input_qubit[2]) # number=19
prog.cx(input_qubit[0],input_qubit[2]) # number=13
prog.h(input_qubit[1]) # number=7
prog.cz(input_qubit[2],input_qubit[1]) # number=8
prog.h(input_qubit[1]) # number=9
prog.cx(input_qubit[2],input_qubit[1]) # number=4
prog.y(input_qubit[1]) # number=14
prog.cx(input_qubit[2],input_qubit[1]) # number=10
prog.z(input_qubit[2]) # number=3
prog.y(input_qubit[2]) # number=5
# apply H to get superposition
for i in range(n):
prog.h(input_qubit[i])
prog.h(input_qubit[n])
prog.barrier()
# apply oracle O_f
oracle = build_oracle(n, f)
prog.append(
oracle.to_gate(),
[input_qubit[i] for i in range(n)] + [input_qubit[n]])
# apply H back (QFT on Z_2^n)
for i in range(n):
prog.h(input_qubit[i])
prog.barrier()
# measure
return prog
def get_statevector(prog: QuantumCircuit) -> Any:
state_backend = Aer.get_backend('statevector_simulator')
statevec = execute(prog, state_backend).result()
quantum_state = statevec.get_statevector()
qubits = round(log2(len(quantum_state)))
quantum_state = {
"|" + np.binary_repr(i, qubits) + ">": quantum_state[i]
for i in range(2 ** qubits)
}
return quantum_state
def evaluate(backend_str: str, prog: QuantumCircuit, shots: int, b: str) -> Any:
# Q: which backend should we use?
# get state vector
quantum_state = get_statevector(prog)
# get simulate results
# provider = IBMQ.load_account()
# backend = provider.get_backend(backend_str)
# qobj = compile(prog, backend, shots)
# job = backend.run(qobj)
# job.result()
backend = Aer.get_backend(backend_str)
# transpile/schedule -> assemble -> backend.run
results = execute(prog, backend, shots=shots).result()
counts = results.get_counts()
a = Counter(counts).most_common(1)[0][0][::-1]
return {
"measurements": counts,
# "state": statevec,
"quantum_state": quantum_state,
"a": a,
"b": b
}
def bernstein_test_1(rep: str):
"""011 . x + 1"""
a = "011"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_2(rep: str):
"""000 . x + 0"""
a = "000"
b = "0"
return bitwise_xor(bitwise_dot(a, rep), b)
def bernstein_test_3(rep: str):
"""111 . x + 1"""
a = "111"
b = "1"
return bitwise_xor(bitwise_dot(a, rep), b)
if __name__ == "__main__":
n = 2
a = "11"
b = "1"
f = lambda rep: \
bitwise_xor(bitwise_dot(a, rep), b)
prog = build_circuit(n, f)
sample_shot =4000
writefile = open("../data/startQiskit_QC115.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = provider.get_backend("ibmq_5_yorktown")
circuit1 = transpile(prog, FakeYorktown())
circuit1.h(qubit=2)
circuit1.x(qubit=3)
circuit1.measure_all()
info = execute(circuit1,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
| [
"[email protected]"
] | |
0f85e629918d9c10d47d0f02fe53bdb4da9e6d75 | 328ebfdbcef076ce0e930715f9bd786d7498185b | /lang/python/learn-python-programming-masterclass/section-3/strings.py | 91b26393876205c803ac842e2289ac95f64a1bc6 | [] | no_license | pnowak2/learnjs | b618e6f9563b3e86be0b1a21d647698e289daec0 | f8842b4e9e5d2eae6fb4e0d663b6699d74c90e9c | refs/heads/master | 2023-08-30T05:04:00.227920 | 2023-08-18T10:58:24 | 2023-08-18T10:58:24 | 41,912,571 | 3 | 0 | null | 2023-03-31T06:58:40 | 2015-09-04T11:36:13 | Python | UTF-8 | Python | false | false | 467 | py | print("Today is a good day")
print('Python is fun')
print("Python's string are easy to use")
print('We can include "quotes" in string')
print("Hello" + " world")
greeting = "Hello, "
# name = input("Please \n enter your name ")
name = "Tim"
# if we want a space, we can add that too
print(greeting + name)
print(greeting + ' ' + name)
age = 24
print(age)
print(type(greeting))
print(type(age))
age_in_words = "2 years"
print(type(age))
print(f'hello {age}')
| [
"[email protected]"
] | |
41861453f09818f9a2f62d37ff76df50af693824 | c5a1c95e9d8ce937f71caf8340cf11fe98e64f56 | /day15/problem2/[노태윤]소수 찾기.py | 8b5fff7359c78175d22f73d43ea64b2a1a1acb6b | [] | no_license | Boot-Camp-Coding-Test/Programmers | 963e5ceeaa331d99fbc7465f7b129bd68e96eae3 | 83a4b62ba2268a47859a6ce88ae1819bc96dcd85 | refs/heads/main | 2023-05-23T08:21:57.398594 | 2021-06-12T16:39:21 | 2021-06-12T16:39:21 | 366,589,317 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,037 | py | from itertools import permutations
def isprime(N) :
if N <= 1 :
return False # 0 또는 1 이면 소수가 아니므로 False 반환
for i in range(2,N) :
if N % i == 0 :
return False # 하나라도 나누어 떨어지면 소수가 아니므로 False 반환
return True # 그 외에는 True 반환 (소수)
def solution(numbers):
answer = 0
total_permutations_list = []
for i in range(1,len(numbers)+1) : # 1부터 numbers 의 length 까지 permutation 진행
permutations_list = list(permutations(numbers,i))
total_permutations_list.extend(list(map(lambda x : int("".join(x)),permutations_list))) # [('1','7') , ('1','3')] 이라고 치면 [17,13] 을 전체 permutation list 에 extend
set_total_permutations_list = set(total_permutations_list) # 중복되는 것이 있을 수도 있으므로 set 로 중복 제거
for i in set_total_permutations_list :
if isprime(i) == True : # 소수면 answer +=1
answer+=1
return answer
| [
"[email protected]"
] | |
a52be8922affd67547e135f801c5f101f05b49af | 60b2738284ae25231da1b1d91e3b61008c548673 | /ImageGenerator/generate_image_with_index.py | 0c4caf197fe57011c8892e239f78338ba67557a7 | [] | no_license | BhallaLab/Scripts | f54eb06693ae0f9de3b41a8ed2adda1da930aa24 | a5d3a2be92b269590316403b3c6194db020b261a | refs/heads/master | 2021-06-02T21:43:13.374239 | 2019-08-06T05:53:03 | 2019-08-06T05:53:03 | 37,115,228 | 8 | 7 | null | null | null | null | UTF-8 | Python | false | false | 1,025 | py | #!/usr/bin/env python
"""generate_image_with_index.py:
Generates transparent image with numbers written on them. Can be used to
caliberate projectors for frame per seconds.
"""
__author__ = "Dilawar Singh"
__copyright__ = "Copyright 2015, Dilawar Singh and NCBS Bangalore"
__credits__ = ["NCBS Bangalore"]
__license__ = "GNU GPL"
__version__ = "1.0.0"
__maintainer__ = "Dilawar Singh"
__email__ = "[email protected]"
__status__ = "Development"
import matplotlib.pyplot as plt
import os
import sys
fontsize = 100
dpi = 96
height = 768.0 / dpi
width = 1024.0 / dpi
if not os.path.exists('_images'):
os.makedirs('_images')
for i in range(1000):
print("Creating image for %s" % i)
fig = plt.figure( figsize=(width, height) )
plt.axis('off')
plt.plot()
plt.figtext(0.4, 0.4, '%s' % i, fontsize=fontsize)
plt.savefig('_images/%s.png' % i, dpi=dpi, transparent=True)
print("[INFO] All images are saved to _images")
| [
"[email protected]"
] | |
2d583075790cb83484032d009a3e8cfe584193d0 | 70e970ce9ec131449b0888388f65f0bb55f098cd | /SignalMC/python/pythia8/AMSB_gluinoToChargino_M-1300GeV_M-900GeV_CTau-10cm_TuneCP5_13TeV_pythia8_cff.py | adb04714d2ed3a9d54092513fa5a34287cf407ba | [] | no_license | OSU-CMS/DisappTrks | 53b790cc05cc8fe3a9f7fbd097284c5663e1421d | 1d1c076863a9f8dbd3f0c077d5821a8333fc5196 | refs/heads/master | 2023-09-03T15:10:16.269126 | 2023-05-25T18:37:40 | 2023-05-25T18:37:40 | 13,272,469 | 5 | 12 | null | 2023-09-13T12:15:49 | 2013-10-02T13:58:51 | Python | UTF-8 | Python | false | false | 8,096 | py | COM_ENERGY = 13000.
MGLU = 1300 # GeV
MCHI = 900 # GeV
CTAU = 100 # mm
CROSS_SECTION = 0.0522 # pb
SLHA_TABLE="""
# ISAJET SUSY parameters in SUSY Les Houches Accord 2 format
# Created by ISALHA 2.0 Last revision: C. Balazs 21 Apr 2009
Block SPINFO # Program information
1 ISASUGRA from ISAJET # Spectrum Calculator
2 7.80 29-OCT-2009 12:50:36 # Version number
Block MODSEL # Model selection
1 3 # Minimal anomaly mediated (AMSB) model
Block SMINPUTS # Standard Model inputs
1 1.27836258E+02 # alpha_em^(-1)
2 1.16570000E-05 # G_Fermi
3 1.17200002E-01 # alpha_s(M_Z)
4 9.11699982E+01 # m_{Z}(pole)
5 4.19999981E+00 # m_{b}(m_{b})
6 1.73070007E+02 # m_{top}(pole)
7 1.77699995E+00 # m_{tau}(pole)
Block MINPAR # SUSY breaking input parameters
1 1.50000000E+03 # m_0
2 3.20160000E+05 # m_{3/2}
3 5.00000000E+00 # tan(beta)
4 1.00000000E+00 # sign(mu)
Block EXTPAR # Non-universal SUSY breaking parameters
0 9.63624875E+15 # Input scale
Block MASS # Scalar and gaugino mass spectrum
# PDG code mass particle
24 8.04229965E+01 # W^+
25 1.17885536E+02 # h^0
35 5.14209375E+03 # H^0
36 5.10833789E+03 # A^0
37 5.12604248E+03 # H^+
1000001 5.84499561E+03 # dnl
1000002 5.84445264E+03 # upl
1000003 5.84499561E+03 # stl
1000004 5.84445264E+03 # chl
1000005 5.11084131E+03 # b1
1000006 4.26797754E+03 # t1
1000011 8.44497009E+02 # el-
1000012 7.82294617E+02 # nuel
1000013 8.44497009E+02 # mul-
1000014 7.82294617E+02 # numl
1000015 4.59390961E+02 # tau1
1000016 7.43124634E+02 # nutl
1000021 %.9g # glss
1000022 8.99857849E+02 # z1ss
1000023 2.96498828E+03 # z2ss
1000024 9.00032288E+02 # w1ss
1000025 -4.94443994E+03 # z3ss
1000035 4.94548633E+03 # z4ss
1000037 4.95200684E+03 # w2ss
2000001 5.94409229E+03 # dnr
2000002 5.88074072E+03 # upr
2000003 5.94409229E+03 # str
2000004 5.88074072E+03 # chr
2000005 5.89824365E+03 # b2
2000006 5.15734326E+03 # t2
2000011 4.41901886E+02 # er-
2000013 4.41901886E+02 # mur-
2000015 7.75092834E+02 # tau2
Block ALPHA # Effective Higgs mixing parameter
-1.97571859E-01 # alpha
Block STOPMIX # stop mixing matrix
1 1 6.91948459E-02 # O_{11}
1 2 -9.97603178E-01 # O_{12}
2 1 9.97603178E-01 # O_{21}
2 2 6.91948459E-02 # O_{22}
Block SBOTMIX # sbottom mixing matrix
1 1 9.99987841E-01 # O_{11}
1 2 4.92899446E-03 # O_{12}
2 1 -4.92899446E-03 # O_{21}
2 2 9.99987841E-01 # O_{22}
Block STAUMIX # stau mixing matrix
1 1 9.16852951E-02 # O_{11}
1 2 9.95788038E-01 # O_{12}
2 1 -9.95788038E-01 # O_{21}
2 2 9.16852951E-02 # O_{22}
Block NMIX # neutralino mixing matrix
1 1 -7.91596598E-04 #
1 2 9.99869168E-01 #
1 3 -1.56042408E-02 #
1 4 4.20085900E-03 #
2 1 9.99881387E-01 #
2 2 1.02774356E-03 #
2 3 1.28675103E-02 #
2 4 -8.40762258E-03 #
3 1 -3.16098332E-03 #
3 2 8.06056987E-03 #
3 3 7.07025349E-01 #
3 4 7.07135558E-01 #
4 1 1.50564853E-02 #
4 2 -1.39906351E-02 #
4 3 -7.06899285E-01 #
4 4 7.07015812E-01 #
Block UMIX # chargino U mixing matrix
1 1 -9.99734461E-01 # U_{11}
1 2 2.30428278E-02 # U_{12}
2 1 -2.30428278E-02 # U_{21}
2 2 -9.99734461E-01 # U_{22}
Block VMIX # chargino V mixing matrix
1 1 -9.99961317E-01 # V_{11}
1 2 8.79876781E-03 # V_{12}
2 1 -8.79876781E-03 # V_{21}
2 2 -9.99961317E-01 # V_{22}
Block GAUGE Q= 4.47923682E+03 #
1 3.57524991E-01 # g`
2 6.52378619E-01 # g_2
3 1.21928000E+00 # g_3
Block YU Q= 4.47923682E+03 #
3 3 8.32892656E-01 # y_t
Block YD Q= 4.47923682E+03 #
3 3 6.45801947E-02 # y_b
Block YE Q= 4.47923682E+03 #
3 3 5.14558963E-02 # y_tau
Block HMIX Q= 4.47923682E+03 # Higgs mixing parameters
1 4.95111182E+03 # mu(Q)
2 5.00000000E+00 # tan(beta)(M_GUT)
3 2.51892105E+02 # Higgs vev at Q
4 2.60951160E+07 # m_A^2(Q)
Block MSOFT Q= 4.47923682E+03 # DRbar SUSY breaking parameters
1 3.00553760E+03 # M_1(Q)
2 8.59459534E+02 # M_2(Q)
3 -5.73397852E+03 # M_3(Q)
31 7.99010315E+02 # MeL(Q)
32 7.99010315E+02 # MmuL(Q)
33 7.61961365E+02 # MtauL(Q)
34 5.51579651E+02 # MeR(Q)
35 5.51579651E+02 # MmuR(Q)
36 3.78081726E+02 # MtauR(Q)
41 5.55658252E+03 # MqL1(Q)
42 5.55658252E+03 # MqL2(Q)
43 4.88496289E+03 # MqL3(Q)
44 5.59192773E+03 # MuR(Q)
45 5.59192773E+03 # McR(Q)
46 4.10720898E+03 # MtR(Q)
47 5.65382471E+03 # MdR(Q)
48 5.65382471E+03 # MsR(Q)
49 5.68008496E+03 # MbR(Q)
Block AU Q= 4.47923682E+03 #
1 1 4.93593066E+03 # A_u
2 2 4.93593066E+03 # A_c
3 3 4.93593066E+03 # A_t
Block AD Q= 4.47923682E+03 #
1 1 1.17858047E+04 # A_d
2 2 1.17858047E+04 # A_s
3 3 1.17858047E+04 # A_b
Block AE Q= 4.47923682E+03 #
1 1 3.34377515E+03 # A_e
2 2 3.34377515E+03 # A_mu
3 3 3.34377515E+03 # A_tau
#
#
#
# =================
# |The decay table|
# =================
#
# PDG Width
DECAY 1000021 5.50675438E+00 # gluino decay
# BR NDA ID1 ID2 ID3
2.50000000E-01 3 1 -1 1000022
2.50000000E-01 3 2 -2 1000022
2.50000000E-01 3 1 -2 1000024
2.50000000E-01 3 -1 2 -1000024
#
# PDG Width
DECAY 1000024 %.9g # chargino decay
#
""" % (MGLU, (1.97326979e-13 / CTAU))
import FWCore.ParameterSet.Config as cms
from Configuration.Generator.Pythia8CommonSettings_cfi import *
from Configuration.Generator.MCTunes2017.PythiaCP5Settings_cfi import *
generator = cms.EDFilter("Pythia8GeneratorFilter",
pythiaPylistVerbosity = cms.untracked.int32(0),
filterEfficiency = cms.untracked.double(-1),
pythiaHepMCVerbosity = cms.untracked.bool(False),
SLHATableForPythia8 = cms.string('%s' % SLHA_TABLE),
comEnergy = cms.double(COM_ENERGY),
crossSection = cms.untracked.double(CROSS_SECTION),
maxEventsToPrint = cms.untracked.int32(0),
PythiaParameters = cms.PSet(
pythia8CommonSettingsBlock,
pythia8CP5SettingsBlock,
processParameters = cms.vstring(
'SUSY:all = off',
'SUSY:gg2gluinogluino = on',
'SUSY:qqbar2gluinogluino = on',
'1000024:isResonance = false',
'1000024:oneChannel = 1 1.0 100 1000022 211',
'1000024:tau0 = %.1f' % CTAU,
'ParticleDecays:tau0Max = %.1f' % (CTAU * 10),
),
parameterSets = cms.vstring(
'pythia8CommonSettings',
'pythia8CP5Settings',
'processParameters')
),
# The following parameters are required by Exotica_HSCP_SIM_cfi:
slhaFile = cms.untracked.string(''), # value not used
processFile = cms.untracked.string('SimG4Core/CustomPhysics/data/RhadronProcessList.txt'),
useregge = cms.bool(False),
hscpFlavor = cms.untracked.string('stau'),
massPoint = cms.untracked.int32(MCHI), # value not used
particleFile = cms.untracked.string('Configuration/GenProduction/python/ThirteenTeV/DisappTrksAMSBCascade/test/geant4_AMSB_chargino_%sGeV_ctau%scm.slha' % (MCHI, CTAU/10))
)
ProductionFilterSequence = cms.Sequence(generator)
| [
"[email protected]"
] | |
18746d107ac80f317f7718f9df54959cb3ba6e77 | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/6/o4y.py | 89cc0accfba79c045c1482dbd947f1898b0456fa | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'o4Y':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"[email protected]"
] | |
758ddeec7b172f7f1236be16322795bec7ad6325 | e7263026bd4f34bae664c37e57a299ce83c7f111 | /03-Spider/scrapy-spider/xiciIPSpider/xiciIPSpider/items.py | 06d00e44d6c499c8beabdb1eace42bc540d723ca | [] | no_license | Aries000004/grocery | 34d0ad0648c6dff5c36f4a68abf9eeac59da214d | 27492f4ac7ef66d544f853dd6686920bcb9dc663 | refs/heads/master | 2020-03-24T22:50:37.703257 | 2018-07-26T11:43:54 | 2018-07-26T11:43:54 | 143,105,389 | 1 | 0 | null | 2018-08-01T04:51:12 | 2018-08-01T04:51:12 | null | UTF-8 | Python | false | false | 292 | py | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class XiciipspiderItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
| [
"[email protected]"
] | |
0bb427439c4a8a56d195361c85fb14868b964517 | f508e4a751d3dfadd268794fd6092b3781a74d4c | /docs/conf.py | 090e77a6ac8479a9a127eddbd3cf278fac9292ef | [
"MIT"
] | permissive | mildewey/higlass | c2e274e0bdafccbfe5e994b8992248ae806e82fd | 8cc135017500216cb24b98c3c82d85ca861081b1 | refs/heads/master | 2020-04-02T19:26:31.670874 | 2018-10-19T16:05:30 | 2018-10-19T16:05:30 | 154,734,076 | 0 | 0 | null | 2018-10-25T20:34:59 | 2018-10-25T20:34:59 | null | UTF-8 | Python | false | false | 5,306 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# higlass documentation build configuration file, created by
# sphinx-quickstart on Mon Jul 3 16:40:45 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.viewcode', 'sphinx.ext.imgmath', 'sphinx.ext.todo']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'HiGlass'
copyright = '2017,2018 HiGlass Authors'
author = 'HiGlass Authors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = 'v1.0'
# The full version, including alpha/beta/rc tags.
release = 'v1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'higlass_theme'
html_theme_path= ["."]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
"sidebar_collapse": False
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# This is required for the alabaster theme
# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html', # needs 'show_related': True theme option to display
'searchbox.html',
'donate.html',
]
}
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'higlassdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'higlass.tex', 'HiGlass Documentation',
'Peter Kerpedjiev', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'higlass', 'HiGlass Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'higlass', 'HiGlass Documentation',
author, 'HiGlass', 'A visual explorer for large genomic data.',
'Miscellaneous'),
]
| [
"[email protected]"
] | |
2285b70ca377c6f1d7352094d1f626929e63df89 | c21546695e35a3f7c60e684de04bcbe88b2b985a | /0191_Number_of_1_Bits.py | c55f7da2b0e4f58f2b0605fc72f4ee8b7757bcc1 | [] | no_license | piecesofreg09/study_leetcode | 4a05ddee44c72a6d0c50bca7cb0b70abd33b0b85 | fc69721dbe003fcc2f7795a6b38c41d877905205 | refs/heads/master | 2023-01-14T05:02:24.779136 | 2020-11-18T17:23:30 | 2020-11-18T17:23:30 | 288,774,420 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 234 | py | '''
n & n - 1 will remove the rightmost 1 to be 0
'''
class Solution:
def hammingWeight(self, n: int) -> int:
count = 0
while n != 0:
count += 1
n = n & n - 1
return count
| [
"[email protected]"
] | |
186aad5955392fda1d645d2081f56cfc70054898 | 9b9fa48ec458fec2b451b3be54bf2be23188b11e | /labs/functions/character_permutations.py | 08fb258bbbda4e629cb991a9d908bb5c6e327b6c | [] | no_license | Nikoletazl/Advanced-Python | 1bc55ce42693ff0a5bcf082f9f7867e07b771007 | f1e31fbd423b31e2b24db151df8b73c7eaf35ab5 | refs/heads/main | 2023-08-21T20:46:43.572803 | 2021-10-22T09:47:52 | 2021-10-22T09:47:52 | 415,001,443 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 337 | py | def permute(index, values):
if index == len(values):
print("".join(values))
return
for i in range(index, len(values)):
values[i], values[index] = values[index], values[i]
permute(index + 1, values)
values[i], values[index] = values[index], values[i]
permute(0, list(input()))
| [
"[email protected]"
] | |
cc4a17a7174b8409f4875d4c7fce223c8ee00a2d | cd486d096d2c92751557f4a97a4ba81a9e6efebd | /18/addons/context.venom/playFromHere.py | 887d841b3746e04a0b3b31161b57f8631de3a007 | [] | no_license | bopopescu/firestick-loader-kodi-data | 2f8cb72b9da67854b64aa76f720bdad6d4112926 | e4d7931d8f62c94f586786cd8580108b68d3aa40 | refs/heads/master | 2022-04-28T11:14:10.452251 | 2020-05-01T03:12:13 | 2020-05-01T03:12:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,874 | py | import sys, xbmc, json
try:
from urlparse import parse_qsl
from urllib import quote_plus
except:
from urllib.parse import parse_qsl, quote_plus
xbmc.log('__name__= %s' % __name__, 2)
xbmc.log('__package__= %s' % __package__, 2)
# sys.path = []
# if __name__ == '__main__' and __package__ is None:
# from os import sys, path
# test = sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
# xbmc.log('test= %s' % test, 2)
if __name__ == '__main__':
item = sys.listitem
message = item.getLabel()
path = item.getPath()
xbmc.log('path = %s' % path, 2)
plugin = 'plugin://plugin.video.venom/'
args = path.split(plugin, 1)
xbmc.log('args = %s' % args, 2)
params = dict(parse_qsl(args[1].replace('?', '')))
xbmc.log('playlist = %s' % len(xbmc.PlayList(xbmc.PLAYLIST_VIDEO)), 2)
if 'meta' in params:
meta = json.loads(params['meta'])
year = meta.get('year', '')
imdb = meta.get('imdb', '')
tmdb = meta.get('tmdb', '')
tvdb = meta.get('tvdb', '')
season = meta.get('season', '')
episode = meta.get('episode', '')
tvshowtitle = meta.get('tvshowtitle', '')
else:
year = params.get('year', '')
imdb = params.get('imdb', '')
tmdb = params.get('tmdb', '')
tvdb = params.get('tvdb', '')
season = params.get('season', '')
episode = params.get('episode', '')
tvshowtitle = params.get('tvshowtitle', '')
# items = seasons.Seasons().tvdb_list(item['tvshowtitle'], item['year'], item['imdb'], item['tmdb'], item['tvdb'], control.apiLanguage()['tvdb'], '-1') # fetch new meta (uncached)
# for item in items:
# path = '%s?action=episodes&tvshowtitle=%s&year=%s&imdb=%s&tmdb=%s&tvdb=%s&season=%s&episode=%s' % (
# plugin, tvshowtitle, year, imdb, tmdb, tvdb, season, episode)
# path = 'PlayMedia(%s?action=playAll)' % plugin
path = 'RunPlugin(%s?action=playAll)' % plugin
xbmc.executebuiltin(path)
| [
"[email protected]"
] | |
c31e659110b66300d3a5f2982f28690d73a5c462 | 4ed3db861ae2fe727c7be604d42d540a00923320 | /samsung_multiroom/service/player.py | 68cf53039187d62a94f10fb1ba54dd7a7eac1581 | [
"MIT"
] | permissive | kusma/samsung_multiroom | 7cac147283a52bf491d7f50a6569c64de53eb4a5 | 09ca86d27b87a4aa0c97ec2accbd4ec67dd0cc61 | refs/heads/master | 2020-12-04T07:46:19.688568 | 2019-04-20T16:29:44 | 2019-04-20T16:29:44 | 231,683,383 | 0 | 0 | MIT | 2020-01-03T23:47:29 | 2020-01-03T23:47:28 | null | UTF-8 | Python | false | false | 6,225 | py | """Player allows playback control depending on selected source."""
import abc
# repeat mode constants
REPEAT_ONE = 'one'
REPEAT_ALL = 'all'
REPEAT_OFF = 'off'
class Player(metaclass=abc.ABCMeta):
"""Player interface to control playback functions."""
@abc.abstractmethod
def play(self, playlist):
"""
Enqueue and play a playlist.
Player may choose to not play the playlist if it's not compatible with this player. For instance you can't
play DLNA source tracks using TuneIn player. If player is unable to play the playlist it must return False.
:param playlist: Iterable returning player combatible objects
:returns: True if playlist was accepted, False otherwise
"""
raise NotImplementedError()
@abc.abstractmethod
def jump(self, time):
"""
Advance current playback to specific time.
:param time: Time from the beginning of the track in seconds
"""
raise NotImplementedError()
@abc.abstractmethod
def resume(self):
"""Play/resume current track."""
raise NotImplementedError()
@abc.abstractmethod
def stop(self):
"""Stop current track and reset position to the beginning."""
raise NotImplementedError()
@abc.abstractmethod
def pause(self):
"""Pause current track and retain position."""
raise NotImplementedError()
@abc.abstractmethod
def next(self):
"""Play next track in the queue."""
raise NotImplementedError()
@abc.abstractmethod
def previous(self):
"""Play previous track in the queue."""
raise NotImplementedError()
@abc.abstractmethod
def repeat(self, mode):
"""
Set playback repeat mode.
:param mode: one of REPEAT_* constants
"""
raise NotImplementedError()
@abc.abstractmethod
def shuffle(self, enabled):
"""
Enable/disable playback shuffle mode.
:param enabled: True to enable, False to disable
"""
raise NotImplementedError()
@abc.abstractmethod
def get_repeat(self):
"""
Get playback repeat mode.
:returns: one of REPEAT_* constants
"""
raise NotImplementedError()
@abc.abstractmethod
def get_shuffle(self):
"""
Get playback shuffle mode.
:returns: boolean, True if enabled, False otherwise
"""
raise NotImplementedError()
@abc.abstractmethod
def get_current_track(self):
"""
Get current track info.
:returns: Track instance, or None if unavailable
"""
raise NotImplementedError()
@abc.abstractmethod
def is_active(self, function, submode=None):
"""
Check if this player is active based on current function/submode.
:returns: Boolean True if function/submode is supported
"""
raise NotImplementedError()
def __getattribute__(self, name):
"""
Magic is_[function]_supported method.
Function can be any Player method. In order to mark method as unsupported, use @unsupported decorator.
Example:
MyPlayer(Player):
@unsupported
def play(self, playlist):
return False
player = MyPlayer()
player.is_play_supported() # returns False
"""
try:
return super().__getattribute__(name)
except AttributeError:
function_name = get_is_supported_function_name(name)
if not function_name:
raise
if not hasattr(self, function_name):
raise
function = getattr(self, function_name)
if not hasattr(function, '__is_supported__'):
return lambda: True
return lambda: bool(function.__is_supported__)
class Track:
"""Defines a media track on the playlist."""
def __init__(self, title, artist, album, duration, position, thumbnail_url, metadata=None):
self._title = title
self._artist = artist
self._album = album
self._duration = duration
self._position = position
self._thumbnail_url = thumbnail_url
self._metadata = metadata or {}
@property
def title(self):
"""
:returns: Title of the current track
"""
return self._title
@property
def artist(self):
"""
:returns: Artist of the current track
"""
return self._artist
@property
def album(self):
"""
:returns: Album title of the current track
"""
return self._album
@property
def duration(self):
"""
:returns: Duration in seconds
"""
return self._duration
@property
def position(self):
"""
:returns: Current playback position in seconds
"""
return self._position
@property
def thumbnail_url(self):
"""
:returns: URL of the track thumbnail
"""
return self._thumbnail_url
def __getattr__(self, name):
"""
:returns: Metadata item value
"""
if name in self._metadata:
return self._metadata[name]
return None
def init_track_kwargs(object_type):
"""
:returns: kwargs dict fro Track initialisation
"""
return {
'title': None,
'artist': None,
'album': None,
'duration': None,
'position': None,
'thumbnail_url': None,
'metadata': {
'object_id': None,
'object_type': object_type,
}
}
def unsupported(function):
"""Decorator to mark player function as unsupported."""
function.__is_supported__ = False
return function
def get_is_supported_function_name(name):
"""
:param name: function name
:returns: Function name from is_[function_name]_supported structure, None otherwise
"""
import re
pattern = re.compile(r'^is_(\w+)_supported$')
matches = pattern.findall(name)
if not matches:
return None
return matches[0]
| [
"[email protected]"
] | |
4071d21eb5cd3463dc714a85424e09a1fedfa660 | cd3e195e3eff75a01d93fe6a3df082bc839507db | /Prime_range.py | 57398abf3955c4143a4b1ae5cd175f678247cfe2 | [] | no_license | swathichinnaiyan/Sandya | 493002be8b221cad7af63bc0ee5833ef678171d2 | 34b77340f556054dd39c2a5de4ed933943ada319 | refs/heads/master | 2020-06-09T00:38:02.884184 | 2019-05-28T12:44:48 | 2019-05-28T12:44:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 219 | py | n,k=map(int,input().split())
c=0
f=0
for i in range(n,k+1):
for j in range(2,i+1):
if ((i%j)==0) & (i!=j):
f=1
break
else:
f=0
if f==0:
c=c+1
print(c)
| [
"[email protected]"
] | |
d4ea202031d8effa052f5564c89a304be2b0d059 | 1e826a1c4194aaba4e84c3dfeb7976f1ed3f2e78 | /news/news_project/comments/migrations/0002_auto_20200801_1158.py | 7570e07062b98136f31ef9e35be2eca3ba793561 | [] | no_license | choirulihwan/django | 95e62c8601dc34ddc7a3b816296683437fbc57f8 | 5cee04f4443f463088a5309b81aee6cb688f15ac | refs/heads/master | 2022-12-13T10:07:12.276471 | 2022-01-09T08:57:36 | 2022-01-09T08:57:36 | 237,208,107 | 0 | 0 | null | 2022-11-22T09:48:06 | 2020-01-30T12:26:17 | HTML | UTF-8 | Python | false | false | 988 | py | # Generated by Django 3.0.8 on 2020-08-01 04:58
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('articles', '0005_auto_20200621_1449'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('comments', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='comment',
name='username',
),
migrations.AddField(
model_name='comment',
name='article',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='articles.Article'),
),
migrations.AddField(
model_name='comment',
name='user_comment',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='commentator', to=settings.AUTH_USER_MODEL),
),
]
| [
"[email protected]"
] | |
c6fefc0d9cd14428c2fecf957fb3004e2681e8c1 | d2c4934325f5ddd567963e7bd2bdc0673f92bc40 | /tests/artificial/transf_Quantization/trend_Lag1Trend/cycle_12/ar_12/test_artificial_32_Quantization_Lag1Trend_12_12_20.py | eb6deb2d6bbd89999096af776def00111e27d043 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | jmabry/pyaf | 797acdd585842474ff4ae1d9db5606877252d9b8 | afbc15a851a2445a7824bf255af612dc429265af | refs/heads/master | 2020-03-20T02:14:12.597970 | 2018-12-17T22:08:11 | 2018-12-17T22:08:11 | 137,104,552 | 0 | 0 | BSD-3-Clause | 2018-12-17T22:08:12 | 2018-06-12T17:15:43 | Python | UTF-8 | Python | false | false | 273 | py | import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "Lag1Trend", cycle_length = 12, transform = "Quantization", sigma = 0.0, exog_count = 20, ar_order = 12); | [
"[email protected]"
] | |
4f58e11e6d3006d75c64a4af4da9ea4792b0bd65 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_detain.py | 3dfe8ec171cbb2f81c8c7242a587dcbea4b45df8 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 451 | py |
#calss header
class _DETAIN():
def __init__(self,):
self.name = "DETAIN"
self.definitions = [u'to force someone officially to stay in a place: ', u'to delay someone for a short length of time: ', u'to keep someone in prison for as long as the courts feel is necessary']
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.specie = 'verbs'
def run(self, obj1 = [], obj2 = []):
return self.jsondata
| [
"[email protected]"
] | |
fd8ed3a75dca376e8d0523b9c1913aeb43585369 | d305e9667f18127e4a1d4d65e5370cf60df30102 | /scripts/update_onnx_weight.py | eaff46f61b3bafc13431d6b903dee7bce0b5030e | [
"Apache-2.0",
"MIT",
"Libpng",
"LicenseRef-scancode-proprietary-license",
"LGPL-2.1-only",
"AGPL-3.0-only",
"MPL-2.0-no-copyleft-exception",
"IJG",
"Zlib",
"MPL-1.1",
"BSD-3-Clause",
"BSD-3-Clause-Open-MPI",
"MPL-1.0",
"GPL-2.0-only",
"MPL-2.0",
"BSL-1.0",
"LicenseRef-scancode-unknown-license-reference",
"Unlicense",
"LicenseRef-scancode-public-domain",
"BSD-2-Clause"
] | permissive | imyzx2017/mindspore_pcl | d8e5bd1f80458538d07ef0a8fc447b552bd87420 | f548c9dae106879d1a83377dd06b10d96427fd2d | refs/heads/master | 2023-01-13T22:28:42.064535 | 2020-11-18T11:15:41 | 2020-11-18T11:15:41 | 313,906,414 | 6 | 1 | Apache-2.0 | 2020-11-18T11:25:08 | 2020-11-18T10:57:26 | null | UTF-8 | Python | false | false | 2,557 | py | #!/usr/bin/env python3
# coding=UTF-8
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
Function:
Use checkpoint file and onnx file as inputs, create a new onnx with Initializer's value from checkpoint file
Usage:
python update_onnx_weight.py onnx_file checkpoint_file [output_file]
"""
import sys
from onnx import onnx_pb
from mindspore.train.serialization import load_checkpoint
def update_onnx_initializer(onnx_file, ckpt_file, output_file):
"Update onnx initializer."
with open(onnx_file, 'rb') as f:
data = f.read()
model = onnx_pb.ModelProto()
model.ParseFromString(data)
initializer = model.graph.initializer
param_dict = load_checkpoint(ckpt_file)
for i, _ in enumerate(initializer):
item = initializer[i]
if not item.name in param_dict:
print(f"Warning: Can not find '{item.name}' in checkpoint parameters dictionary")
continue
weight = param_dict[item.name].data.asnumpy()
bin_data = weight.tobytes()
if len(item.raw_data) != len(bin_data):
print(f"Warning: Size of weight from checkpoint is different from original size, ignore it")
continue
item.raw_data = bin_data
pb_msg = model.SerializeToString()
with open(output_file, 'wb') as f:
f.write(pb_msg)
print(f'Graph name: {model.graph.name}')
print(f'Initializer length: {len(initializer)}')
print(f'Checkpoint dict length: {len(param_dict)}')
print(f'The new weights have been written to file {output_file} successfully')
def main():
if len(sys.argv) < 3:
print(f'Usage: {sys.argv[0]} onnx_file checkpoint_file [output_file]')
sys.exit(1)
onnx_file = sys.argv[1]
ckpt_file = sys.argv[2]
output_file = f'new_{onnx_file}' if len(sys.argv) == 3 else sys.argv[3]
update_onnx_initializer(onnx_file, ckpt_file, output_file)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
869cbcd717d4522fce402db11a0f1460e1bfc621 | 5ff8cefa68d52d2427bb3d35320cd8bd0d072968 | /Python/StringExample1.py | 7a0151c415f069ae83b964413dd1a6c11c307e85 | [] | no_license | gsudarshan1990/PythonSampleProjects | a65a111454f8dc551f1cd29901cead0798ad6dc3 | 3c1a5174c5f966b0eed2828221add76ec0d019d5 | refs/heads/master | 2020-05-09T16:02:37.743568 | 2019-07-14T06:22:55 | 2019-07-14T06:22:55 | 181,255,262 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,550 | py | string1='banana'
print(string1[1])
print(string1[0])
index=0
while index<len(string1):
print(string1[index])
index=index+1
for char in string1:
print(char)
s='Monty Python'
print(s[0:5])
print(s[6:len(s)])
fruit='banana'
print(fruit[:3])
print(fruit[3:])
#count of 'a' in banana
count=0
fruit='banana'
for letter in fruit:
if letter == 'a':
count=count+1
print(count)
def count_letter(fruit,letter):
count=0
for index in fruit:
if index == letter:
count=count+1
print(count)
count_letter('banana','a')
print('a' in 'banana')
print('seed' in 'banana')
word='banana'
if word == 'banana':
print('both the words are same')
word1='apple'
word2='orange'
if word1<word:
print('Apple come Before Banana')
if word2>word:
print('Oragne comes after banana')
stuff='Hello World'
print(type(stuff))
print(dir(stuff))
string1='Good Morning'
print(dir(string1))
list1=[1,2,3]
print(dir(list1))
list1.append(4)
print(list1)
print(help(stuff.capitalize()))
string2='banana'
string3=string2.upper()
print(string3)
index_of_a=string2.find('a')
print(index_of_a)
print(string2.find('na'))
print(string2.find('na',3))
line=' Here we go '
print(line.strip())
print(line.rstrip())
line='Have a nice day'
print(line.startswith('Have'))
print(line.startswith('h'))
data='From [email protected] Sat Jan 5 09:14:16 2008'
initial_position=data.find('@')
space=data.find(' ',initial_position)
print(initial_position)
print(space)
print(data[initial_position+1:space])
| [
"[email protected]"
] | |
a821b7476c8f38c2679e241169a4e01ca9220af4 | 779c469b548d42dc679bf34da6041c813a7ce9cc | /sphinx_template/my_package/viz.py | 3026a4ac69e8fb1c3f86813e32afdc92271c6376 | [
"MIT"
] | permissive | millerbest/zero_to_docs | b2e68af564db8f47441d44ded18c3a8a3b0c21f2 | 3f5c72ca76c457fefaba9b2a182e11cc89e5bf6d | refs/heads/master | 2021-08-14T14:35:37.415612 | 2017-11-16T01:08:21 | 2017-11-16T01:26:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 355 | py | import numpy as np
import matplotlib.pyplot as plt
def plot_random_dots(N, scale=10, ax=None, cmap=None):
"""Plot some random dots"""
if ax is None:
fig, ax = plt.subplots()
if cmap is None:
cmap = plt.cm.viridis
dots = np.random.randn(2, N)
size = dots * scale
ax.scatter(*dots, s=size, cmap=cmap)
return ax
| [
"[email protected]"
] | |
39a8852e875c0852577e8c9e9103df9b6f18d343 | 6deafbf6257a5c30f084c3678712235c2c31a686 | /Toolz/sqlmap/waf/asm.py | 17244efb49a3344e7790047f270fd98805bfc3e7 | [
"Unlicense",
"LicenseRef-scancode-generic-cla",
"GPL-1.0-or-later",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-proprietary-license",
"GPL-2.0-only",
"LicenseRef-scancode-commercial-license",
"LicenseRef-scancode-other-permissive"
] | permissive | thezakman/CTF-Heaven | 53fcb4a72afa821ad05d8cc3b309fb388f958163 | 4b52a2178922f1502ab00fa8fc156d35e1dc653f | refs/heads/master | 2023-04-05T18:20:54.680378 | 2023-03-21T13:47:45 | 2023-03-21T13:47:45 | 167,290,879 | 182 | 24 | Unlicense | 2022-11-29T21:41:30 | 2019-01-24T02:44:24 | Python | UTF-8 | Python | false | false | 671 | py | #!/usr/bin/env python
"""
Copyright (c) 2006-2019 sqlmap developers (http://sqlmap.org/)
See the file 'LICENSE' for copying permission
"""
from lib.core.settings import WAF_ATTACK_VECTORS
__product__ = "Application Security Manager (F5 Networks)"
def detect(get_page):
retval = False
for vector in WAF_ATTACK_VECTORS:
page, headers, code = get_page(get=vector)
retval = "The requested URL was rejected. Please consult with your administrator." in (page or "")
retval |= all(_ in (page or "") for _ in ("security.f5aas.com", "Please enable JavaScript to view the page content"))
if retval:
break
return retval
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.