blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6528c8298b100f2a4c7f9b7942fa43168ff41697
|
39632b273847febed4e7c4f70f804b3c1bb05eb8
|
/tpc/participate.py
|
f8bedf6c14c11299e48f3da31b0ba1413266e8fe
|
[] |
no_license
|
MintYiqingchen/transaction-database
|
6172e067efb21119dd7e8542010e379f41b656d2
|
ffa73384693a5604cfc9c51a206570107beffebc
|
refs/heads/master
| 2022-04-09T17:33:21.897370 | 2020-03-31T23:05:00 | 2020-03-31T23:05:00 | 237,696,675 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,631 |
py
|
from xmlrpc.server import SimpleXMLRPCServer
from xmlrpc.client import ServerProxy, Error
from threading import Thread, Lock
import os
import sys
import argparse
import time
import socket
import psycopg2 as pg2
import psycopg2.pool
import asyncio
import traceback
from collections import defaultdict
parser = argparse.ArgumentParser()
parser.add_argument('--host', default="127.0.0.1")
parser.add_argument('--user', default="postgres")
parser.add_argument('--passwd', default="1z2x3c4v5b")
parser.add_argument('--database', default="tippers")
parser.add_argument('--port', default="5432")
parser.add_argument('--rpcport', default=15000, type=int)
parser.add_argument('--coordinator_uri', default="http://127.0.0.1:25000")
parser.add_argument('--thread_num', type=int, default=32)
parser.add_argument('--timeout', type=int, default=30)
args = parser.parse_args()
def statusItem():
return {'xid':None, 'status':'Init', 'task': None}
class Participate(object):
_rpc_methods_ = ['tpc_prepare', 'tpc_commit', 'tpc_abort', 'execute', 'wait_message']
def __init__(self, address, db_pool):
self.db_pool = db_pool
self._ip = address[0]
self._port = address[1]
self._serv = SimpleXMLRPCServer(address, allow_none=True)
for name in self._rpc_methods_:
self._serv.register_function(getattr(self, name))
self._status = defaultdict(statusItem) # txn_id --> ["Init", "Abort", "Prepare"]
self._locks = {} # txn_id --> threading.Lock
self._bigLock = Lock()
self._loop = asyncio.get_event_loop()
def recover_prepared_txn(self):
conn = self.db_pool.getconn()
uri = 'http://'+self._ip + ':'+str(self._port)
xids = conn.tpc_recover()
for xid in xids:
self._locks[xid.gtrid] = Lock()
self._status[xid.gtrid]['xid'] = xid
self._status[xid.gtrid]['status'] = 'Prepare'
key = list(self._status.keys())
print('After participate recover, txn_ids', key)
with ServerProxy(args.coordinator_uri, allow_none=True) as proxy:
for txn_id in key:
try:
res = proxy.recovery_message(txn_id, uri)
print('{} ask for txn_id {} op {}'.format(uri, txn_id, res['op']))
except ConnectionError as v:
print("Connection ERROR ", v)
continue
if res['op'] == 'COMMIT':
conn.tpc_commit(self._status[txn_id]['xid'])
del self._status[txn_id]
del self._locks[txn_id]
elif res['op'] == 'ABORT':
conn.tpc_rollback(self._status[txn_id]['xid'])
del self._status[txn_id]
del self._locks[txn_id]
self.db_pool.putconn(conn)
def wait_message(self, txn_id):
if txn_id not in self._locks:
return {'errCode': 0, 'isWait': 0}
return {'errCode': 0, 'isWait': 1}
def tpc_prepare(self, txn_id):
if txn_id not in self._locks:
return {'errCode': 0, 'vote': 0}
with self._locks[txn_id]:
self._status[txn_id]['task'].cancel()
if self._status[txn_id]['status'] == "Abort": # abort
return {'errCode': 0, 'vote': 0}
if self._status[txn_id]['status'] == "Prepare":
return {'errCode': 0, 'vote': 1}
conn = self.db_pool.getconn(txn_id)
conn.tpc_prepare()
self._status[txn_id]['status'] = 'Prepare'
return {'errCode': 0, 'vote': 1}
def tpc_abort(self, txn_id):
if txn_id not in self._locks:
return {'errCode': 0}
with self._locks[txn_id]:
if self._status[txn_id]['status'] == 'Prepare':
conn = self.db_pool.getconn(txn_id)
conn.tpc_rollback()
self.db_pool.putconn(conn, key = txn_id)
del self._status[txn_id]
del self._locks[txn_id]
return {'errCode': 0}
def tpc_commit(self, txn_id):
if txn_id not in self._locks:
return {'errCode': 0}
with self._locks[txn_id]:
if self._status[txn_id]['status'] == 'Prepare':
conn = self.db_pool.getconn(txn_id)
conn.tpc_commit()
self.db_pool.putconn(conn, key = txn_id)
del self._status[txn_id]
del self._locks[txn_id]
return {'errCode': 0}
def execute(self, txn_id, sql):
while True:
try:
conn = self.db_pool.getconn(txn_id)
break
except Exception as e:
print('Execute Error ', e)
time.sleep(25)
with self._bigLock:
if txn_id not in self._locks:
self._locks[txn_id] = Lock()
with self._locks[txn_id]:
if txn_id not in self._status:
xid = conn.xid(0, txn_id, 'pj2')
task = self._loop.call_later(args.timeout, serv.change_to_abort, txn_id)
self._status[txn_id] = {'xid': xid, 'status': 'Init', 'task': task}
conn.tpc_begin(xid)
elif self._status[txn_id]['status'] != "Init":
return {'errCode': 1, 'errString': "Participate status is "+self._status[txn_id]['status']}
try:
with conn.cursor() as curs:
curs.execute(sql)
except pg2.DatabaseError:
traceback.print_exc()
self._status[txn_id]['status'] = "Abort"
conn.tpc_rollback()
self.db_pool.putconn(conn, key=txn_id)
return {'errCode': 0}
def serve_forever(self):
self._serv.serve_forever()
def participate_register(self):
with ServerProxy(args.coordinator_uri, allow_none=True) as proxy:
uri = 'http://'+self._ip + ':'+str(self._port)
a = proxy.participate_register(uri)
def change_to_abort(self, txn_id):
if txn_id not in self._locks:
return
with self._locks[txn_id]:
if self._status[txn_id]['status'] != "Init":
return
conn = self.db_pool.getconn(txn_id)
conn.tpc_rollback()
self.db_pool.putconn(conn, key=txn_id)
self._status[txn_id]['status'] = 'Abort'
def timeout_loop(self):
try:
self._loop.run_forever()
except Exception:
self._loop.close()
if __name__ == '__main__':
global IP
try:
pgpool = psycopg2.pool.ThreadedConnectionPool(args.thread_num, 100,\
host = args.host, user=args.user, password=args.passwd, database=args.database, port=args.port)
except:
raise Exception("unable to connect to database")
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
IP = s.getsockname()[0]
s.close()
print(IP)
serv = Participate((IP, args.rpcport), pgpool)
for _ in range(1, args.thread_num):
t = Thread(target=serv.serve_forever)
t.daemon = True
t.start()
t = Thread(target=serv.timeout_loop)
t.daemon = True
t.start()
serv.recover_prepared_txn()
serv.participate_register()
serv.serve_forever()
|
[
"[email protected]"
] | |
6fc75f5b8dadbafbb0a244d38751341b91a114ee
|
a2ef47ae30bbf44175aa2f3398f9d3f7358870a6
|
/fnsservice/test4.py
|
03b8c2c22a835406770415aea4cbb1513c9c0300
|
[] |
no_license
|
kmakeev/fnsservice
|
a1342f045441f0ee6683cfbbe1358d11bb3cf60e
|
66d677ce6cc298d7404fb4db5ffe1162a5f493a3
|
refs/heads/master
| 2021-06-17T18:09:15.894765 | 2021-02-10T07:39:35 | 2021-02-10T07:39:35 | 243,219,506 | 2 | 1 | null | 2021-02-10T07:22:23 | 2020-02-26T09:11:34 |
JavaScript
|
UTF-8
|
Python
| false | false | 221 |
py
|
# -*- coding: utf-8 -*-
from bokeh.io import show, output_file
from fnsservice.fns.views import DublicatedGraph
g = DublicatedGraph(width=800, height=800, N=1500)
p = g.get_graph()
output_file("graph.html")
show(p)
|
[
"[email protected]"
] | |
7c1086255d4dbfefae1b7f45a3fa382b54892810
|
00ede1aac54593f9b0e41822b29a3781b47a1142
|
/zqxt_tml/manage.py
|
ad1867356c893da759e2f70cee5984e20cce4bc8
|
[] |
no_license
|
lannyMa/learn-py
|
b8c619cc9b6ac494eeceb61dbeaa92cafed91844
|
380d6f4392af6fff725a22b2843a5a7e04d5002d
|
refs/heads/master
| 2021-06-23T06:50:09.455717 | 2017-09-06T00:15:43 | 2017-09-06T00:15:43 | 87,917,766 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 806 |
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "zqxt_tml.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
[
"[email protected]"
] | |
3f778c20399ecda6ea8109817fb7a9c9d796ad82
|
0190835675f591b159463e50e7d814bbe5303d8d
|
/django_confit/loaders.py
|
dc9bfac0338e7af071529d7e3461273f47944a10
|
[] |
no_license
|
zebuline/django-confit
|
56b46dafbe0aa0b067e80086dfbc953bdc44250f
|
ad9fcd458d8d6409066acd110e385c24f2f0fe37
|
refs/heads/master
| 2021-01-18T08:44:11.729820 | 2014-06-30T13:42:41 | 2014-06-30T13:42:41 | 32,524,209 | 0 | 0 | null | 2015-03-19T13:52:25 | 2015-03-19T13:52:24 |
Python
|
UTF-8
|
Python
| false | false | 3,370 |
py
|
# -*- coding: utf-8 -*-
"""Utilities to load configuration from various sources:
* from :attr:`os.environ` or similar dictionary:
:func:`settings_from_string_mapping`;
* from Python module: :func:`settings_from_module`;
* from JSON or YAML file: :func:`settings_from_file`.
"""
import json
import six
import yaml
def load_mapping(input, prefix=''):
"""Convert mapping of {key: string} to {key: complex type}.
This function makes it possible (and easy) to load complex types from
single-level key-value stores, such as environment variables or INI files.
Of course, both flat and nested mappings are supported:
>>> flat_mapping = {'DEBUG': 'True', 'SECRET_KEY': 'not a secret'}
>>> output = load_mapping(flat_mapping)
>>> output == flat_mapping
True
>>> nested_mapping = {'DATABASES': {'USER': 'me', 'HOST': 'localhost'}}
>>> output = load_mapping(nested_mapping)
>>> output == nested_mapping
True
Values can be complex types (sequences, mappings) using JSON or YAML.
Keys using ".json" or ".yaml" suffix are automatically decoded:
>>> nested_mapping = {
... 'DATABASES.yaml': 'ENGINE: sqlite3',
... }
>>> output = load_mapping(nested_mapping)
>>> output['DATABASES'] == {'ENGINE': 'sqlite3'}
True
You can use optional ``prefix`` argument to load only a subset of mapping:
>>> mapping = {'YES_ONE': '1', 'NO_TWO': '2'}
>>> load_mapping(mapping, prefix='YES_')
{'ONE': '1'}
"""
output = {}
for key, value in six.iteritems(input):
if key.startswith(prefix):
key = key[len(prefix):]
if key.endswith('.json'):
output[key[:-5]] = json.loads(value)
elif key.endswith('.yaml'):
output[key[:-5]] = yaml.load(value)
else:
output[key] = value
return output
def load_file(file_obj):
"""Return mapping from file object, using ``name`` attr to guess format.
Supported file formats are JSON and YAML. The lowercase extension is used
to guess the file type.
>>> from six.moves import StringIO
>>> file_obj = StringIO('SOME_LIST: [a, b, c]')
>>> file_obj.name = 'something.yaml'
>>> load_file(file_obj) == {
... 'SOME_LIST': ['a', 'b', 'c'],
... }
True
"""
file_name = file_obj.name
if file_name.endswith('.yaml'):
return yaml.load(file_obj)
elif file_name.endswith('.json'):
return json.load(file_obj)
else:
raise ValueError(
'Cannot guess format of configuration file "{name}". '
'Expected one of these extensions: "{extensions}".'.format(
name=file_name,
extensions='", "'.join('.yaml', '.json')))
def load_module(module_path):
"""Return module's globals as a dict.
>>> settings = load_module('django.conf.global_settings')
>>> settings['DATABASES']
{}
It does not load "protected" and "private" attributes (those with
underscores).
>>> '__name__' in settings
False
"""
module = __import__(module_path, fromlist='*', level=0)
is_uppercase = lambda x: x.upper() == x
is_special = lambda x: x.startswith('_')
return dict([(key, value) for key, value in module.__dict__.items()
if is_uppercase(key) and not is_special(key)])
|
[
"[email protected]"
] | |
a64656d6558ad1672d290e857e1e2ec75ad77f90
|
c104dbd09a853725cb4f4b17df7c5dd59d47e04e
|
/opsgenie_swagger/models/nagios_xiv2_integration.py
|
fcd09c3b7ef19f34e72808fef562054ece011832
|
[
"Apache-2.0"
] |
permissive
|
bm-lab/opsgenie-python-sdk
|
5a64e2c24f1b9168ecadf482ba8084ba27a659fc
|
244c4c40ddcc25e70df5ba4425ab8d7c8da59c18
|
refs/heads/master
| 2021-10-09T03:18:48.101672 | 2018-12-15T01:03:36 | 2018-12-20T15:13:31 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 17,946 |
py
|
# coding: utf-8
"""
OpsGenie REST API
OpsGenie OpenAPI Specification # noqa: E501
OpenAPI spec version: 2.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from opsgenie_swagger.models.alert_filter import AlertFilter # noqa: F401,E501
from opsgenie_swagger.models.integration import Integration # noqa: F401,E501
from opsgenie_swagger.models.marid_callback import MaridCallback # noqa: F401,E501
from opsgenie_swagger.models.recipient import Recipient # noqa: F401,E501
from opsgenie_swagger.models.team_meta import TeamMeta # noqa: F401,E501
from opsgenie_swagger.models.token_based_incoming_feature import TokenBasedIncomingFeature # noqa: F401,E501
class NagiosXIV2Integration(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'suppress_notifications': 'bool',
'ignore_teams_from_payload': 'bool',
'ignore_recipients_from_payload': 'bool',
'recipients': 'list[Recipient]',
'is_advanced': 'bool',
'feature_type': 'str',
'allow_configuration_access': 'bool',
'allow_write_access': 'bool',
'alert_filter': 'AlertFilter',
'alert_actions': 'list[str]',
'callback_type': 'str',
'send_alert_actions': 'bool',
'bidirectional_callback_type': 'str'
}
attribute_map = {
'suppress_notifications': 'suppressNotifications',
'ignore_teams_from_payload': 'ignoreTeamsFromPayload',
'ignore_recipients_from_payload': 'ignoreRecipientsFromPayload',
'recipients': 'recipients',
'is_advanced': 'isAdvanced',
'feature_type': 'feature-type',
'allow_configuration_access': 'allowConfigurationAccess',
'allow_write_access': 'allowWriteAccess',
'alert_filter': 'alertFilter',
'alert_actions': 'alertActions',
'callback_type': 'callback-type',
'send_alert_actions': 'sendAlertActions',
'bidirectional_callback_type': 'bidirectional-callback-type'
}
def __init__(self, suppress_notifications=None, ignore_teams_from_payload=None, ignore_recipients_from_payload=None, recipients=None, is_advanced=None, feature_type=None, allow_configuration_access=None, allow_write_access=None, alert_filter=None, alert_actions=None, callback_type=None, send_alert_actions=None, bidirectional_callback_type=None): # noqa: E501
"""NagiosXIV2Integration - a model defined in Swagger""" # noqa: E501
self._suppress_notifications = None
self._ignore_teams_from_payload = None
self._ignore_recipients_from_payload = None
self._recipients = None
self._is_advanced = None
self._feature_type = None
self._allow_configuration_access = None
self._allow_write_access = None
self._alert_filter = None
self._alert_actions = None
self._callback_type = None
self._send_alert_actions = None
self._bidirectional_callback_type = None
self.discriminator = None
if suppress_notifications is not None:
self.suppress_notifications = suppress_notifications
if ignore_teams_from_payload is not None:
self.ignore_teams_from_payload = ignore_teams_from_payload
if ignore_recipients_from_payload is not None:
self.ignore_recipients_from_payload = ignore_recipients_from_payload
if recipients is not None:
self.recipients = recipients
if is_advanced is not None:
self.is_advanced = is_advanced
if feature_type is not None:
self.feature_type = feature_type
if allow_configuration_access is not None:
self.allow_configuration_access = allow_configuration_access
if allow_write_access is not None:
self.allow_write_access = allow_write_access
if alert_filter is not None:
self.alert_filter = alert_filter
if alert_actions is not None:
self.alert_actions = alert_actions
if callback_type is not None:
self.callback_type = callback_type
if send_alert_actions is not None:
self.send_alert_actions = send_alert_actions
if bidirectional_callback_type is not None:
self.bidirectional_callback_type = bidirectional_callback_type
@property
def suppress_notifications(self):
"""Gets the suppress_notifications of this NagiosXIV2Integration. # noqa: E501
If enabled, notifications that come from alerts will be suppressed. Defaults to false # noqa: E501
:return: The suppress_notifications of this NagiosXIV2Integration. # noqa: E501
:rtype: bool
"""
return self._suppress_notifications
@suppress_notifications.setter
def suppress_notifications(self, suppress_notifications):
"""Sets the suppress_notifications of this NagiosXIV2Integration.
If enabled, notifications that come from alerts will be suppressed. Defaults to false # noqa: E501
:param suppress_notifications: The suppress_notifications of this NagiosXIV2Integration. # noqa: E501
:type: bool
"""
self._suppress_notifications = suppress_notifications
@property
def ignore_teams_from_payload(self):
"""Gets the ignore_teams_from_payload of this NagiosXIV2Integration. # noqa: E501
If enabled, the integration will ignore teams sent in request payloads. Defaults to false # noqa: E501
:return: The ignore_teams_from_payload of this NagiosXIV2Integration. # noqa: E501
:rtype: bool
"""
return self._ignore_teams_from_payload
@ignore_teams_from_payload.setter
def ignore_teams_from_payload(self, ignore_teams_from_payload):
"""Sets the ignore_teams_from_payload of this NagiosXIV2Integration.
If enabled, the integration will ignore teams sent in request payloads. Defaults to false # noqa: E501
:param ignore_teams_from_payload: The ignore_teams_from_payload of this NagiosXIV2Integration. # noqa: E501
:type: bool
"""
self._ignore_teams_from_payload = ignore_teams_from_payload
@property
def ignore_recipients_from_payload(self):
"""Gets the ignore_recipients_from_payload of this NagiosXIV2Integration. # noqa: E501
If enabled, the integration will ignore recipients sent in request payloads. Defaults to false # noqa: E501
:return: The ignore_recipients_from_payload of this NagiosXIV2Integration. # noqa: E501
:rtype: bool
"""
return self._ignore_recipients_from_payload
@ignore_recipients_from_payload.setter
def ignore_recipients_from_payload(self, ignore_recipients_from_payload):
"""Sets the ignore_recipients_from_payload of this NagiosXIV2Integration.
If enabled, the integration will ignore recipients sent in request payloads. Defaults to false # noqa: E501
:param ignore_recipients_from_payload: The ignore_recipients_from_payload of this NagiosXIV2Integration. # noqa: E501
:type: bool
"""
self._ignore_recipients_from_payload = ignore_recipients_from_payload
@property
def recipients(self):
"""Gets the recipients of this NagiosXIV2Integration. # noqa: E501
Optional user, schedule, teams or escalation names to calculate which users will receive the notifications of the alert. Recipients which are exceeding the limit are ignored # noqa: E501
:return: The recipients of this NagiosXIV2Integration. # noqa: E501
:rtype: list[Recipient]
"""
return self._recipients
@recipients.setter
def recipients(self, recipients):
"""Sets the recipients of this NagiosXIV2Integration.
Optional user, schedule, teams or escalation names to calculate which users will receive the notifications of the alert. Recipients which are exceeding the limit are ignored # noqa: E501
:param recipients: The recipients of this NagiosXIV2Integration. # noqa: E501
:type: list[Recipient]
"""
self._recipients = recipients
@property
def is_advanced(self):
"""Gets the is_advanced of this NagiosXIV2Integration. # noqa: E501
:return: The is_advanced of this NagiosXIV2Integration. # noqa: E501
:rtype: bool
"""
return self._is_advanced
@is_advanced.setter
def is_advanced(self, is_advanced):
"""Sets the is_advanced of this NagiosXIV2Integration.
:param is_advanced: The is_advanced of this NagiosXIV2Integration. # noqa: E501
:type: bool
"""
self._is_advanced = is_advanced
@property
def feature_type(self):
"""Gets the feature_type of this NagiosXIV2Integration. # noqa: E501
:return: The feature_type of this NagiosXIV2Integration. # noqa: E501
:rtype: str
"""
return self._feature_type
@feature_type.setter
def feature_type(self, feature_type):
"""Sets the feature_type of this NagiosXIV2Integration.
:param feature_type: The feature_type of this NagiosXIV2Integration. # noqa: E501
:type: str
"""
allowed_values = ["email-based", "token-based"] # noqa: E501
if feature_type not in allowed_values:
raise ValueError(
"Invalid value for `feature_type` ({0}), must be one of {1}" # noqa: E501
.format(feature_type, allowed_values)
)
self._feature_type = feature_type
@property
def allow_configuration_access(self):
"""Gets the allow_configuration_access of this NagiosXIV2Integration. # noqa: E501
This parameter is for allowing or restricting the configuration access. If configuration access is restricted, the integration will be limited to Alert API requests and sending heartbeats. Defaults to false # noqa: E501
:return: The allow_configuration_access of this NagiosXIV2Integration. # noqa: E501
:rtype: bool
"""
return self._allow_configuration_access
@allow_configuration_access.setter
def allow_configuration_access(self, allow_configuration_access):
"""Sets the allow_configuration_access of this NagiosXIV2Integration.
This parameter is for allowing or restricting the configuration access. If configuration access is restricted, the integration will be limited to Alert API requests and sending heartbeats. Defaults to false # noqa: E501
:param allow_configuration_access: The allow_configuration_access of this NagiosXIV2Integration. # noqa: E501
:type: bool
"""
self._allow_configuration_access = allow_configuration_access
@property
def allow_write_access(self):
"""Gets the allow_write_access of this NagiosXIV2Integration. # noqa: E501
This parameter is for configuring the read-only access of integration. If the integration is limited to read-only access, the integration will not be authorized to perform any create, update or delete action within any domain. Defaults to true # noqa: E501
:return: The allow_write_access of this NagiosXIV2Integration. # noqa: E501
:rtype: bool
"""
return self._allow_write_access
@allow_write_access.setter
def allow_write_access(self, allow_write_access):
"""Sets the allow_write_access of this NagiosXIV2Integration.
This parameter is for configuring the read-only access of integration. If the integration is limited to read-only access, the integration will not be authorized to perform any create, update or delete action within any domain. Defaults to true # noqa: E501
:param allow_write_access: The allow_write_access of this NagiosXIV2Integration. # noqa: E501
:type: bool
"""
self._allow_write_access = allow_write_access
@property
def alert_filter(self):
"""Gets the alert_filter of this NagiosXIV2Integration. # noqa: E501
:return: The alert_filter of this NagiosXIV2Integration. # noqa: E501
:rtype: AlertFilter
"""
return self._alert_filter
@alert_filter.setter
def alert_filter(self, alert_filter):
"""Sets the alert_filter of this NagiosXIV2Integration.
:param alert_filter: The alert_filter of this NagiosXIV2Integration. # noqa: E501
:type: AlertFilter
"""
self._alert_filter = alert_filter
@property
def alert_actions(self):
"""Gets the alert_actions of this NagiosXIV2Integration. # noqa: E501
:return: The alert_actions of this NagiosXIV2Integration. # noqa: E501
:rtype: list[str]
"""
return self._alert_actions
@alert_actions.setter
def alert_actions(self, alert_actions):
"""Sets the alert_actions of this NagiosXIV2Integration.
:param alert_actions: The alert_actions of this NagiosXIV2Integration. # noqa: E501
:type: list[str]
"""
self._alert_actions = alert_actions
@property
def callback_type(self):
"""Gets the callback_type of this NagiosXIV2Integration. # noqa: E501
:return: The callback_type of this NagiosXIV2Integration. # noqa: E501
:rtype: str
"""
return self._callback_type
@callback_type.setter
def callback_type(self, callback_type):
"""Sets the callback_type of this NagiosXIV2Integration.
:param callback_type: The callback_type of this NagiosXIV2Integration. # noqa: E501
:type: str
"""
allowed_values = ["bidirectional-callback", "webhook-callback", "campfire-callback", "flowdock-callback", "flowdock-v2-callback", "planio-callback"] # noqa: E501
if callback_type not in allowed_values:
raise ValueError(
"Invalid value for `callback_type` ({0}), must be one of {1}" # noqa: E501
.format(callback_type, allowed_values)
)
self._callback_type = callback_type
@property
def send_alert_actions(self):
"""Gets the send_alert_actions of this NagiosXIV2Integration. # noqa: E501
:return: The send_alert_actions of this NagiosXIV2Integration. # noqa: E501
:rtype: bool
"""
return self._send_alert_actions
@send_alert_actions.setter
def send_alert_actions(self, send_alert_actions):
"""Sets the send_alert_actions of this NagiosXIV2Integration.
:param send_alert_actions: The send_alert_actions of this NagiosXIV2Integration. # noqa: E501
:type: bool
"""
self._send_alert_actions = send_alert_actions
@property
def bidirectional_callback_type(self):
"""Gets the bidirectional_callback_type of this NagiosXIV2Integration. # noqa: E501
:return: The bidirectional_callback_type of this NagiosXIV2Integration. # noqa: E501
:rtype: str
"""
return self._bidirectional_callback_type
@bidirectional_callback_type.setter
def bidirectional_callback_type(self, bidirectional_callback_type):
"""Sets the bidirectional_callback_type of this NagiosXIV2Integration.
:param bidirectional_callback_type: The bidirectional_callback_type of this NagiosXIV2Integration. # noqa: E501
:type: str
"""
allowed_values = ["datadog-callback", "circonus-callback", "connect-wise-callback", "desk-callback", "es-watcher-callback", "freshdesk-callback", "hipchat-callback", "marid-callback", "logic-monitor-callback", "mattermost-callback", "slack-callback", "solarwinds-web-helpdesk-callback", "stackdriver-callback", "status-io-callback"] # noqa: E501
if bidirectional_callback_type not in allowed_values:
raise ValueError(
"Invalid value for `bidirectional_callback_type` ({0}), must be one of {1}" # noqa: E501
.format(bidirectional_callback_type, allowed_values)
)
self._bidirectional_callback_type = bidirectional_callback_type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, NagiosXIV2Integration):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"[email protected]"
] | |
059cb11799e13b9f2b0d70c4c4df93b82a2fad6f
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_bleaching.py
|
2f016290a8dda6c9ca2d17f70eb94990392f8f9d
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 226 |
py
|
#calss header
class _BLEACHING():
def __init__(self,):
self.name = "BLEACHING"
self.definitions = bleach
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['bleach']
|
[
"[email protected]"
] | |
bf7232fea12b557764b27e0935161115e397e3f1
|
6062d8f2aaa850fa941a51cc366a16e6727f1c9b
|
/cauliflowervest/server/handlers/passphrase_handler.py
|
de14d853f2d792ae209b11a2cc873f8d6cf55ba5
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
maximermilov/cauliflowervest
|
bf9bfeb2d93c01f52c0b4d2c65979efb139ce449
|
11a9ac74b86b0902211b6714c8db498b65f7bbd9
|
refs/heads/master
| 2020-04-04T20:59:32.679075 | 2018-12-03T20:31:46 | 2018-12-03T20:31:46 | 51,160,285 | 0 | 0 | null | 2016-02-05T16:55:24 | 2016-02-05T16:55:23 |
Python
|
UTF-8
|
Python
| false | false | 10,411 |
py
|
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base class for passphrase upload/retrieval handlers."""
import base64
import cgi
import httplib
import logging
import StringIO
from google.appengine.api import app_identity
from google.appengine.api import datastore_errors
from google.appengine.ext import db
from cauliflowervest import settings as base_settings
from cauliflowervest.server import permissions
from cauliflowervest.server import service_factory
from cauliflowervest.server import settings
from cauliflowervest.server import util
from cauliflowervest.server.handlers import base_handler
from cauliflowervest.server.models import base
from cauliflowervest.server.models import errors
class InvalidArgumentError(errors.Error):
"""One of argument has invalid value or missing."""
error_code = httplib.BAD_REQUEST
def SendRetrievalEmail(
permission_type, entity, user, template='retrieval_email.txt',
skip_emails=None):
"""Sends a retrieval notification email.
Args:
permission_type: string, one of permission.TYPE_* variables.
entity: base.BasePassphrase instance of retrieved object.
user: base.User object of the user that retrieved the secret.
template: str message template.
skip_emails: list filter emails from recipients.
"""
data = {
'entity': entity,
'helpdesk_email': settings.HELPDESK_EMAIL,
'helpdesk_name': settings.HELPDESK_NAME,
'retrieved_by': user.user.email(),
'user': user,
'server_hostname': app_identity.get_default_version_hostname(),
}
body = util.RenderTemplate(template, data)
user_email = user.user.email()
try:
base_handler.VerifyPermissions(
permissions.SILENT_RETRIEVE, user, permission_type)
return
except errors.AccessDeniedError:
pass
try:
# If the user has access to "silently" retrieve keys without the owner
# being notified, email only SILENT_AUDIT_ADDRESSES.
base_handler.VerifyPermissions(
permissions.SILENT_RETRIEVE_WITH_AUDIT_EMAIL, user, permission_type)
to = [user_email] + settings.SILENT_AUDIT_ADDRESSES
except errors.AccessDeniedError:
# Otherwise email the owner and RETRIEVE_AUDIT_ADDRESSES.
to = [user_email] + settings.RETRIEVE_AUDIT_ADDRESSES
if entity.owners:
to.extend(entity.owners)
if skip_emails:
to = [email for email in to if email not in skip_emails]
subject_var = '%s_RETRIEVAL_EMAIL_SUBJECT' % entity.ESCROW_TYPE_NAME.upper()
subject = getattr(
settings, subject_var, 'Escrow secret retrieval notification.')
util.SendEmail(to, subject, body)
class PassphraseHandler(base_handler.BaseHandler):
"""Class which handles passphrase upload/retrieval."""
JSON_SECRET_NAME = 'passphrase'
PERMISSION_TYPE = 'base'
TARGET_ID_REGEX = None
SECRET_REGEX = None
QRCODE_DURING_PASSPHRASE_RETRIEVAL = True
def get(self, target_id):
"""Handles GET requests."""
if not self.IsValidTargetId(target_id):
raise errors.AccessError('target_id is malformed')
self.RetrieveSecret(target_id)
def put(self, target_id=None):
"""Handles PUT requests."""
if not target_id:
target_id = self.request.get('volume_uuid')
email = self._VerifyEscrowPermission()
self.VerifyXsrfToken(base_settings.SET_PASSPHRASE_ACTION)
if not self.IsValidTargetId(target_id):
raise errors.AccessError('target_id is malformed')
secret = self.GetSecretFromBody()
if not target_id or not secret:
self.AUDIT_LOG_MODEL.Log(message='Unknown PUT', request=self.request)
self.error(httplib.BAD_REQUEST)
return
if not self.IsValidSecret(secret):
raise errors.AccessError('secret is malformed')
owner = self.SanitizeEntityValue('owner', self.request.get('owner'))
if email:
owner = owner or email
self.PutNewSecret(owner, target_id, secret, self.request)
def _CreateNewSecretEntity(self, *args):
raise NotImplementedError()
def _VerifyEscrowPermission(self):
"""Returns user object or None."""
user = self.VerifyPermissions(permissions.ESCROW)
return user.email
def GetSecretFromBody(self):
"""Returns the uploaded secret from a PUT or POST request."""
secret = self.request.body
if not secret:
return None
# Work around a client/server bug which causes a stray '=' to be added
# to the request body when a form-encoded content type is sent.
if (self.request.content_type ==
'application/x-www-form-urlencoded' and secret[-1] == '='):
return secret[:-1]
else:
return secret
def IsValidSecret(self, unused_secret):
"""Returns true if secret str is a well formatted."""
return True
def IsValidTargetId(self, target_id):
"""Returns true if target_id str is a well formatted."""
if self.TARGET_ID_REGEX is None:
return True
return self.TARGET_ID_REGEX.match(target_id) is not None
def PutNewSecret(self, owner, target_id, secret, metadata):
"""Puts a new DuplicityKeyPair entity to Datastore.
Args:
owner: str, email address of the key pair's owner.
target_id: str, target id associated with this passphrase.
secret: str, secret data to escrow.
metadata: dict, dict of str metadata with keys matching
model's property names.
"""
if not target_id:
raise errors.AccessError('target_id is required')
entity = self._CreateNewSecretEntity(owner, target_id, secret)
for prop_name in entity.properties():
value = metadata.get(prop_name)
if value:
setattr(entity, prop_name, self.SanitizeEntityValue(prop_name, value))
inventory = service_factory.GetInventoryService()
inventory.FillInventoryServicePropertiesDuringEscrow(
entity, self.request)
for k, v in inventory.GetMetadataUpdates(entity).items():
setattr(entity, k, v)
try:
entity.put()
except errors.DuplicateEntity:
logging.info('Same data already in datastore.')
else:
self.AUDIT_LOG_MODEL.Log(
entity=entity, message='PUT', request=self.request)
self.response.out.write('Secret successfully escrowed!')
def CheckRetrieveAuthorizationAndNotifyOwner(self, entity):
"""Checks whether the user is authorised to retrieve the secret.
Args:
entity: base.BasePassPhrase instance of retrieved object.
Raises:
errors.AccessDeniedError: user lacks any retrieval permissions.
errors.AccessError: user lacks a specific retrieval permission.
"""
user = base.GetCurrentUser()
try:
self.VerifyPermissions(permissions.RETRIEVE, user=user)
except errors.AccessDeniedError:
try:
self.VerifyPermissions(permissions.RETRIEVE_CREATED_BY, user=user)
if str(entity.created_by) not in str(user.user.email()):
raise
except errors.AccessDeniedError:
self.VerifyPermissions(permissions.RETRIEVE_OWN, user=user)
if user.email not in entity.owners:
raise
if user.email not in entity.owners:
SendRetrievalEmail(self.PERMISSION_TYPE, entity, user)
def RetrieveSecret(self, target_id):
"""Handles a GET request to retrieve a secret.
Args:
target_id: str, Target ID to fetch the secret for.
Raises:
base.AccessError: given target_id is malformed.
base.NotFoundError: no secret was found for the given target_id.
"""
self.VerifyXsrfToken(base_settings.GET_PASSPHRASE_ACTION)
if self.request.get('id'):
try:
entity = self.SECRET_MODEL.get(db.Key(self.request.get('id')))
except datastore_errors.BadKeyError:
raise errors.AccessError('target_id is malformed')
else:
entity = self.SECRET_MODEL.GetLatestForTarget(
target_id, tag=self.request.get('tag', 'default'))
if not entity:
raise errors.NotFoundError(
'Passphrase not found: target_id %s' % target_id)
self.CheckRetrieveAuthorizationAndNotifyOwner(entity=entity)
self.AUDIT_LOG_MODEL.Log(message='GET', entity=entity, request=self.request)
escrow_secret = str(entity.secret).strip()
escrow_barcode_svg = None
qr_img_url = None
if self.QRCODE_DURING_PASSPHRASE_RETRIEVAL:
if len(escrow_secret) <= 100:
qr_img_url = (
'https://chart.googleapis.com/chart?chs=245x245&cht=qr&chl='
+ cgi.escape(escrow_secret))
recovery_str = self._PassphraseTypeName(entity)
params = {
'volume_type': self.SECRET_MODEL.ESCROW_TYPE_NAME,
'volume_uuid': entity.target_id,
'qr_img_url': qr_img_url,
'escrow_secret': escrow_secret,
'checksum': entity.checksum,
'recovery_str': recovery_str,
}
params[self.JSON_SECRET_NAME] = escrow_secret
if entity.active:
entity.UpdateMutableProperty('force_rekeying', True)
self.response.out.write(util.ToSafeJson(params))
def _PassphraseTypeName(self, entity):
return '%s key' % entity.ESCROW_TYPE_NAME
def SanitizeEntityValue(self, unused_prop_name, value):
if value is not None:
return cgi.escape(value)
def VerifyPermissions(
self, required_permission, user=None, permission_type=None):
"""Verifies a valid user is logged in.
Args:
required_permission: permission string from permissions.*.
user: optional, base.User entity; default current user.
permission_type: optional, string, one of permission.TYPE_* variables. if
omitted, self.PERMISSION_TYPE is used.
Returns:
base.User object of the current user.
Raises:
errors.AccessDeniedError: there was a permissions issue.
"""
permission_type = permission_type or self.PERMISSION_TYPE
if user is None:
user = base.GetCurrentUser()
base_handler.VerifyPermissions(required_permission, user, permission_type)
return user
|
[
"[email protected]"
] | |
e8a46ea11ce16518bff9bf364ce3f3ddbecfb4c9
|
153da69b35f032f5b83a06f17008ba41a1b336b4
|
/src/demo/calculator/core/enums/calc_operations.py
|
be7a79dd2dc2c43523ceea7e9ca10aca5c105aa2
|
[
"MIT"
] |
permissive
|
TrendingTechnology/hspylib
|
6400cadf9dfe6ab5733712dcfeccf8022d61c589
|
c79a2c17e89fe21d00ccd9c1646a03407cd61839
|
refs/heads/master
| 2023-06-20T15:47:35.962661 | 2021-07-19T22:12:18 | 2021-07-19T23:45:41 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 621 |
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
TODO Purpose of the file
@project: HSPyLib
hspylib.demo.calculator.core.enum
@file: calc_operations.py
@created: Tue, 4 May 2021
@author: <B>H</B>ugo <B>S</B>aporetti <B>J</B>unior"
@site: https://github.com/yorevs/hspylib
@license: MIT - Please refer to <https://opensource.org/licenses/MIT>
Copyright 2021, HSPyLib team
"""
from hspylib.core.enums.enumeration import Enumeration
class CalcOperations(Enumeration):
NO_OP = None
DIVISION = '/'
MULTIPLICATION = 'x'
SUBTRACTION = '-'
SUM = '+'
PERCENT = '%'
|
[
"[email protected]"
] | |
2bc8b8d2909fc34cf28f01487adfe67227bc5f8f
|
55c250525bd7198ac905b1f2f86d16a44f73e03a
|
/Email/Reverse Email Address Lookup/buster/.eggs/twint-2.1.20-py3.9.egg/twint/storage/db.py
|
f15eb7a49b4a932756426874e2e973dd6935f7c1
|
[] |
no_license
|
NateWeiler/Resources
|
213d18ba86f7cc9d845741b8571b9e2c2c6be916
|
bd4a8a82a3e83a381c97d19e5df42cbababfc66c
|
refs/heads/master
| 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null |
UTF-8
|
Python
| false | false | 130 |
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:1bfee100772a62a8b82dc9142325b46126ad5a38e7090ebdd4c72c1a70a5cc92
size 10419
|
[
"[email protected]"
] | |
4a589351fa2393c773bb74d5a6069d589b1e9813
|
e262e64415335060868e9f7f73ab8701e3be2f7b
|
/.history/Test002/其他流程控制工具_20201204100832.py
|
731c162d1e5a7392659bf40e9605c24fcfa3c253
|
[] |
no_license
|
Allison001/developer_test
|
6e211f1e2bd4287ee26fd2b33baf1c6a8d80fc63
|
b8e04b4b248b0c10a35e93128a5323165990052c
|
refs/heads/master
| 2023-06-18T08:46:40.202383 | 2021-07-23T03:31:54 | 2021-07-23T03:31:54 | 322,807,303 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,180 |
py
|
#if 语句
# x = int(input("请输入数字:"))
# if x < 0:
# x = 0
# print("Negative changed to zero")
# elif x == 0:
# print("Zero")
# elif x ==1:
# print("Single")
# else:
# print("More")
# for语句
# words = ['cat', 'window', 'defenestrate']
# for i in words:
# print(i,len(i))
# for i in range(5):
# print(i)
# for i in range(5,10):
# print(i)
# for i in range(0,10,2):
# print(i)
# for i in range(-10,-100,-10):
# print(i)
# a = ['Mary', 'had', 'a', 'little', 'lamb']
# for i in range(len(a)):
# print(i,a[i])
# print(range(10))
# for i in range(10):
# print(i)
# a = sum(range(0,11))
# print(a)
# b = list(range(0,11))
# print(b)
# for i in range(0,11):
# print(i)
# for n in range(2,10):
# for x in range(2,n):
# if n % x ==0:
# print(n, 'equals', x, '*', n//x)
# break
# else:
# print(n," is a prime number")
# for i in range(2,10):
# if i % 2 ==0:
# print("even number",i)
# continue
# print("old number",i)
# pass语句
# 定义函数
def sum(n):
a,b = 0,1
while a < n:
print(a,end=" ")
|
[
"[email protected]"
] | |
7f71265e9ff133f979a97088418c785819c87174
|
f8da830331428a8e1bbeadf23345f79f1750bd98
|
/msgraph-cli-extensions/v1_0/notes_v1_0/azext_notes_v1_0/vendored_sdks/notes/_notes.py
|
6d2851668537aa7fd655b335cdb0054322639a03
|
[
"MIT"
] |
permissive
|
ezkemboi/msgraph-cli
|
e023e1b7589461a738e42cbad691d9a0216b0779
|
2ceeb27acabf7cfa219c8a20238d8c7411b9e782
|
refs/heads/main
| 2023-02-12T13:45:03.402672 | 2021-01-07T11:33:54 | 2021-01-07T11:33:54 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 55,016 |
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
from azure.mgmt.core import ARMPipelineClient
from msrest import Deserializer, Serializer
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Optional
from azure.core.credentials import TokenCredential
from ._configuration import NotesConfiguration
from .operations import GroupOperations
from .operations import GroupOnenoteOperations
from .operations import GroupOnenoteNotebookOperations
from .operations import GroupOnenoteNotebookSectionGroupOperations
from .operations import GroupOnenoteNotebookSectionGroupSectionOperations
from .operations import GroupOnenoteNotebookSectionGroupSectionPageOperations
from .operations import GroupOnenoteNotebookSectionOperations
from .operations import GroupOnenoteNotebookSectionPageOperations
from .operations import GroupOnenoteNotebookSectionParentSectionGroupOperations
from .operations import GroupOnenotePageOperations
from .operations import GroupOnenotePageParentNotebookOperations
from .operations import GroupOnenotePageParentNotebookSectionGroupOperations
from .operations import GroupOnenotePageParentNotebookSectionGroupSectionOperations
from .operations import GroupOnenotePageParentNotebookSectionOperations
from .operations import GroupOnenotePageParentNotebookSectionParentSectionGroupOperations
from .operations import GroupOnenotePageParentSectionOperations
from .operations import GroupOnenotePageParentSectionParentNotebookOperations
from .operations import GroupOnenotePageParentSectionParentNotebookSectionGroupOperations
from .operations import GroupOnenotePageParentSectionParentSectionGroupOperations
from .operations import GroupOnenotePageParentSectionParentSectionGroupParentNotebookOperations
from .operations import GroupOnenoteSectionGroupOperations
from .operations import GroupOnenoteSectionGroupParentNotebookOperations
from .operations import GroupOnenoteSectionGroupParentNotebookSectionOperations
from .operations import GroupOnenoteSectionGroupParentNotebookSectionPageOperations
from .operations import GroupOnenoteSectionGroupSectionOperations
from .operations import GroupOnenoteSectionGroupSectionPageOperations
from .operations import GroupOnenoteSectionGroupSectionPageParentNotebookOperations
from .operations import GroupOnenoteSectionGroupSectionParentNotebookOperations
from .operations import GroupOnenoteSectionOperations
from .operations import GroupOnenoteSectionPageOperations
from .operations import GroupOnenoteSectionPageParentNotebookOperations
from .operations import GroupOnenoteSectionPageParentNotebookSectionGroupOperations
from .operations import GroupOnenoteSectionParentNotebookOperations
from .operations import GroupOnenoteSectionParentNotebookSectionGroupOperations
from .operations import GroupOnenoteSectionParentSectionGroupOperations
from .operations import GroupOnenoteSectionParentSectionGroupParentNotebookOperations
from .operations import SiteOperations
from .operations import SiteOnenoteOperations
from .operations import SiteOnenoteNotebookOperations
from .operations import SiteOnenoteNotebookSectionGroupOperations
from .operations import SiteOnenoteNotebookSectionGroupSectionOperations
from .operations import SiteOnenoteNotebookSectionGroupSectionPageOperations
from .operations import SiteOnenoteNotebookSectionOperations
from .operations import SiteOnenoteNotebookSectionPageOperations
from .operations import SiteOnenoteNotebookSectionParentSectionGroupOperations
from .operations import SiteOnenotePageOperations
from .operations import SiteOnenotePageParentNotebookOperations
from .operations import SiteOnenotePageParentNotebookSectionGroupOperations
from .operations import SiteOnenotePageParentNotebookSectionGroupSectionOperations
from .operations import SiteOnenotePageParentNotebookSectionOperations
from .operations import SiteOnenotePageParentNotebookSectionParentSectionGroupOperations
from .operations import SiteOnenotePageParentSectionOperations
from .operations import SiteOnenotePageParentSectionParentNotebookOperations
from .operations import SiteOnenotePageParentSectionParentNotebookSectionGroupOperations
from .operations import SiteOnenotePageParentSectionParentSectionGroupOperations
from .operations import SiteOnenotePageParentSectionParentSectionGroupParentNotebookOperations
from .operations import SiteOnenoteSectionGroupOperations
from .operations import SiteOnenoteSectionGroupParentNotebookOperations
from .operations import SiteOnenoteSectionGroupParentNotebookSectionOperations
from .operations import SiteOnenoteSectionGroupParentNotebookSectionPageOperations
from .operations import SiteOnenoteSectionGroupSectionOperations
from .operations import SiteOnenoteSectionGroupSectionPageOperations
from .operations import SiteOnenoteSectionGroupSectionPageParentNotebookOperations
from .operations import SiteOnenoteSectionGroupSectionParentNotebookOperations
from .operations import SiteOnenoteSectionOperations
from .operations import SiteOnenoteSectionPageOperations
from .operations import SiteOnenoteSectionPageParentNotebookOperations
from .operations import SiteOnenoteSectionPageParentNotebookSectionGroupOperations
from .operations import SiteOnenoteSectionParentNotebookOperations
from .operations import SiteOnenoteSectionParentNotebookSectionGroupOperations
from .operations import SiteOnenoteSectionParentSectionGroupOperations
from .operations import SiteOnenoteSectionParentSectionGroupParentNotebookOperations
from .operations import UserOperations
from .operations import UserOnenoteOperations
from .operations import UserOnenoteNotebookOperations
from .operations import UserOnenoteNotebookSectionGroupOperations
from .operations import UserOnenoteNotebookSectionGroupSectionOperations
from .operations import UserOnenoteNotebookSectionGroupSectionPageOperations
from .operations import UserOnenoteNotebookSectionOperations
from .operations import UserOnenoteNotebookSectionPageOperations
from .operations import UserOnenoteNotebookSectionParentSectionGroupOperations
from .operations import UserOnenotePageOperations
from .operations import UserOnenotePageParentNotebookOperations
from .operations import UserOnenotePageParentNotebookSectionGroupOperations
from .operations import UserOnenotePageParentNotebookSectionGroupSectionOperations
from .operations import UserOnenotePageParentNotebookSectionOperations
from .operations import UserOnenotePageParentNotebookSectionParentSectionGroupOperations
from .operations import UserOnenotePageParentSectionOperations
from .operations import UserOnenotePageParentSectionParentNotebookOperations
from .operations import UserOnenotePageParentSectionParentNotebookSectionGroupOperations
from .operations import UserOnenotePageParentSectionParentSectionGroupOperations
from .operations import UserOnenotePageParentSectionParentSectionGroupParentNotebookOperations
from .operations import UserOnenoteSectionGroupOperations
from .operations import UserOnenoteSectionGroupParentNotebookOperations
from .operations import UserOnenoteSectionGroupParentNotebookSectionOperations
from .operations import UserOnenoteSectionGroupParentNotebookSectionPageOperations
from .operations import UserOnenoteSectionGroupSectionOperations
from .operations import UserOnenoteSectionGroupSectionPageOperations
from .operations import UserOnenoteSectionGroupSectionPageParentNotebookOperations
from .operations import UserOnenoteSectionGroupSectionParentNotebookOperations
from .operations import UserOnenoteSectionOperations
from .operations import UserOnenoteSectionPageOperations
from .operations import UserOnenoteSectionPageParentNotebookOperations
from .operations import UserOnenoteSectionPageParentNotebookSectionGroupOperations
from .operations import UserOnenoteSectionParentNotebookOperations
from .operations import UserOnenoteSectionParentNotebookSectionGroupOperations
from .operations import UserOnenoteSectionParentSectionGroupOperations
from .operations import UserOnenoteSectionParentSectionGroupParentNotebookOperations
from . import models
class Notes(object):
"""Notes.
:ivar group: GroupOperations operations
:vartype group: notes.operations.GroupOperations
:ivar group_onenote: GroupOnenoteOperations operations
:vartype group_onenote: notes.operations.GroupOnenoteOperations
:ivar group_onenote_notebook: GroupOnenoteNotebookOperations operations
:vartype group_onenote_notebook: notes.operations.GroupOnenoteNotebookOperations
:ivar group_onenote_notebook_section_group: GroupOnenoteNotebookSectionGroupOperations operations
:vartype group_onenote_notebook_section_group: notes.operations.GroupOnenoteNotebookSectionGroupOperations
:ivar group_onenote_notebook_section_group_section: GroupOnenoteNotebookSectionGroupSectionOperations operations
:vartype group_onenote_notebook_section_group_section: notes.operations.GroupOnenoteNotebookSectionGroupSectionOperations
:ivar group_onenote_notebook_section_group_section_page: GroupOnenoteNotebookSectionGroupSectionPageOperations operations
:vartype group_onenote_notebook_section_group_section_page: notes.operations.GroupOnenoteNotebookSectionGroupSectionPageOperations
:ivar group_onenote_notebook_section: GroupOnenoteNotebookSectionOperations operations
:vartype group_onenote_notebook_section: notes.operations.GroupOnenoteNotebookSectionOperations
:ivar group_onenote_notebook_section_page: GroupOnenoteNotebookSectionPageOperations operations
:vartype group_onenote_notebook_section_page: notes.operations.GroupOnenoteNotebookSectionPageOperations
:ivar group_onenote_notebook_section_parent_section_group: GroupOnenoteNotebookSectionParentSectionGroupOperations operations
:vartype group_onenote_notebook_section_parent_section_group: notes.operations.GroupOnenoteNotebookSectionParentSectionGroupOperations
:ivar group_onenote_page: GroupOnenotePageOperations operations
:vartype group_onenote_page: notes.operations.GroupOnenotePageOperations
:ivar group_onenote_page_parent_notebook: GroupOnenotePageParentNotebookOperations operations
:vartype group_onenote_page_parent_notebook: notes.operations.GroupOnenotePageParentNotebookOperations
:ivar group_onenote_page_parent_notebook_section_group: GroupOnenotePageParentNotebookSectionGroupOperations operations
:vartype group_onenote_page_parent_notebook_section_group: notes.operations.GroupOnenotePageParentNotebookSectionGroupOperations
:ivar group_onenote_page_parent_notebook_section_group_section: GroupOnenotePageParentNotebookSectionGroupSectionOperations operations
:vartype group_onenote_page_parent_notebook_section_group_section: notes.operations.GroupOnenotePageParentNotebookSectionGroupSectionOperations
:ivar group_onenote_page_parent_notebook_section: GroupOnenotePageParentNotebookSectionOperations operations
:vartype group_onenote_page_parent_notebook_section: notes.operations.GroupOnenotePageParentNotebookSectionOperations
:ivar group_onenote_page_parent_notebook_section_parent_section_group: GroupOnenotePageParentNotebookSectionParentSectionGroupOperations operations
:vartype group_onenote_page_parent_notebook_section_parent_section_group: notes.operations.GroupOnenotePageParentNotebookSectionParentSectionGroupOperations
:ivar group_onenote_page_parent_section: GroupOnenotePageParentSectionOperations operations
:vartype group_onenote_page_parent_section: notes.operations.GroupOnenotePageParentSectionOperations
:ivar group_onenote_page_parent_section_parent_notebook: GroupOnenotePageParentSectionParentNotebookOperations operations
:vartype group_onenote_page_parent_section_parent_notebook: notes.operations.GroupOnenotePageParentSectionParentNotebookOperations
:ivar group_onenote_page_parent_section_parent_notebook_section_group: GroupOnenotePageParentSectionParentNotebookSectionGroupOperations operations
:vartype group_onenote_page_parent_section_parent_notebook_section_group: notes.operations.GroupOnenotePageParentSectionParentNotebookSectionGroupOperations
:ivar group_onenote_page_parent_section_parent_section_group: GroupOnenotePageParentSectionParentSectionGroupOperations operations
:vartype group_onenote_page_parent_section_parent_section_group: notes.operations.GroupOnenotePageParentSectionParentSectionGroupOperations
:ivar group_onenote_page_parent_section_parent_section_group_parent_notebook: GroupOnenotePageParentSectionParentSectionGroupParentNotebookOperations operations
:vartype group_onenote_page_parent_section_parent_section_group_parent_notebook: notes.operations.GroupOnenotePageParentSectionParentSectionGroupParentNotebookOperations
:ivar group_onenote_section_group: GroupOnenoteSectionGroupOperations operations
:vartype group_onenote_section_group: notes.operations.GroupOnenoteSectionGroupOperations
:ivar group_onenote_section_group_parent_notebook: GroupOnenoteSectionGroupParentNotebookOperations operations
:vartype group_onenote_section_group_parent_notebook: notes.operations.GroupOnenoteSectionGroupParentNotebookOperations
:ivar group_onenote_section_group_parent_notebook_section: GroupOnenoteSectionGroupParentNotebookSectionOperations operations
:vartype group_onenote_section_group_parent_notebook_section: notes.operations.GroupOnenoteSectionGroupParentNotebookSectionOperations
:ivar group_onenote_section_group_parent_notebook_section_page: GroupOnenoteSectionGroupParentNotebookSectionPageOperations operations
:vartype group_onenote_section_group_parent_notebook_section_page: notes.operations.GroupOnenoteSectionGroupParentNotebookSectionPageOperations
:ivar group_onenote_section_group_section: GroupOnenoteSectionGroupSectionOperations operations
:vartype group_onenote_section_group_section: notes.operations.GroupOnenoteSectionGroupSectionOperations
:ivar group_onenote_section_group_section_page: GroupOnenoteSectionGroupSectionPageOperations operations
:vartype group_onenote_section_group_section_page: notes.operations.GroupOnenoteSectionGroupSectionPageOperations
:ivar group_onenote_section_group_section_page_parent_notebook: GroupOnenoteSectionGroupSectionPageParentNotebookOperations operations
:vartype group_onenote_section_group_section_page_parent_notebook: notes.operations.GroupOnenoteSectionGroupSectionPageParentNotebookOperations
:ivar group_onenote_section_group_section_parent_notebook: GroupOnenoteSectionGroupSectionParentNotebookOperations operations
:vartype group_onenote_section_group_section_parent_notebook: notes.operations.GroupOnenoteSectionGroupSectionParentNotebookOperations
:ivar group_onenote_section: GroupOnenoteSectionOperations operations
:vartype group_onenote_section: notes.operations.GroupOnenoteSectionOperations
:ivar group_onenote_section_page: GroupOnenoteSectionPageOperations operations
:vartype group_onenote_section_page: notes.operations.GroupOnenoteSectionPageOperations
:ivar group_onenote_section_page_parent_notebook: GroupOnenoteSectionPageParentNotebookOperations operations
:vartype group_onenote_section_page_parent_notebook: notes.operations.GroupOnenoteSectionPageParentNotebookOperations
:ivar group_onenote_section_page_parent_notebook_section_group: GroupOnenoteSectionPageParentNotebookSectionGroupOperations operations
:vartype group_onenote_section_page_parent_notebook_section_group: notes.operations.GroupOnenoteSectionPageParentNotebookSectionGroupOperations
:ivar group_onenote_section_parent_notebook: GroupOnenoteSectionParentNotebookOperations operations
:vartype group_onenote_section_parent_notebook: notes.operations.GroupOnenoteSectionParentNotebookOperations
:ivar group_onenote_section_parent_notebook_section_group: GroupOnenoteSectionParentNotebookSectionGroupOperations operations
:vartype group_onenote_section_parent_notebook_section_group: notes.operations.GroupOnenoteSectionParentNotebookSectionGroupOperations
:ivar group_onenote_section_parent_section_group: GroupOnenoteSectionParentSectionGroupOperations operations
:vartype group_onenote_section_parent_section_group: notes.operations.GroupOnenoteSectionParentSectionGroupOperations
:ivar group_onenote_section_parent_section_group_parent_notebook: GroupOnenoteSectionParentSectionGroupParentNotebookOperations operations
:vartype group_onenote_section_parent_section_group_parent_notebook: notes.operations.GroupOnenoteSectionParentSectionGroupParentNotebookOperations
:ivar site: SiteOperations operations
:vartype site: notes.operations.SiteOperations
:ivar site_onenote: SiteOnenoteOperations operations
:vartype site_onenote: notes.operations.SiteOnenoteOperations
:ivar site_onenote_notebook: SiteOnenoteNotebookOperations operations
:vartype site_onenote_notebook: notes.operations.SiteOnenoteNotebookOperations
:ivar site_onenote_notebook_section_group: SiteOnenoteNotebookSectionGroupOperations operations
:vartype site_onenote_notebook_section_group: notes.operations.SiteOnenoteNotebookSectionGroupOperations
:ivar site_onenote_notebook_section_group_section: SiteOnenoteNotebookSectionGroupSectionOperations operations
:vartype site_onenote_notebook_section_group_section: notes.operations.SiteOnenoteNotebookSectionGroupSectionOperations
:ivar site_onenote_notebook_section_group_section_page: SiteOnenoteNotebookSectionGroupSectionPageOperations operations
:vartype site_onenote_notebook_section_group_section_page: notes.operations.SiteOnenoteNotebookSectionGroupSectionPageOperations
:ivar site_onenote_notebook_section: SiteOnenoteNotebookSectionOperations operations
:vartype site_onenote_notebook_section: notes.operations.SiteOnenoteNotebookSectionOperations
:ivar site_onenote_notebook_section_page: SiteOnenoteNotebookSectionPageOperations operations
:vartype site_onenote_notebook_section_page: notes.operations.SiteOnenoteNotebookSectionPageOperations
:ivar site_onenote_notebook_section_parent_section_group: SiteOnenoteNotebookSectionParentSectionGroupOperations operations
:vartype site_onenote_notebook_section_parent_section_group: notes.operations.SiteOnenoteNotebookSectionParentSectionGroupOperations
:ivar site_onenote_page: SiteOnenotePageOperations operations
:vartype site_onenote_page: notes.operations.SiteOnenotePageOperations
:ivar site_onenote_page_parent_notebook: SiteOnenotePageParentNotebookOperations operations
:vartype site_onenote_page_parent_notebook: notes.operations.SiteOnenotePageParentNotebookOperations
:ivar site_onenote_page_parent_notebook_section_group: SiteOnenotePageParentNotebookSectionGroupOperations operations
:vartype site_onenote_page_parent_notebook_section_group: notes.operations.SiteOnenotePageParentNotebookSectionGroupOperations
:ivar site_onenote_page_parent_notebook_section_group_section: SiteOnenotePageParentNotebookSectionGroupSectionOperations operations
:vartype site_onenote_page_parent_notebook_section_group_section: notes.operations.SiteOnenotePageParentNotebookSectionGroupSectionOperations
:ivar site_onenote_page_parent_notebook_section: SiteOnenotePageParentNotebookSectionOperations operations
:vartype site_onenote_page_parent_notebook_section: notes.operations.SiteOnenotePageParentNotebookSectionOperations
:ivar site_onenote_page_parent_notebook_section_parent_section_group: SiteOnenotePageParentNotebookSectionParentSectionGroupOperations operations
:vartype site_onenote_page_parent_notebook_section_parent_section_group: notes.operations.SiteOnenotePageParentNotebookSectionParentSectionGroupOperations
:ivar site_onenote_page_parent_section: SiteOnenotePageParentSectionOperations operations
:vartype site_onenote_page_parent_section: notes.operations.SiteOnenotePageParentSectionOperations
:ivar site_onenote_page_parent_section_parent_notebook: SiteOnenotePageParentSectionParentNotebookOperations operations
:vartype site_onenote_page_parent_section_parent_notebook: notes.operations.SiteOnenotePageParentSectionParentNotebookOperations
:ivar site_onenote_page_parent_section_parent_notebook_section_group: SiteOnenotePageParentSectionParentNotebookSectionGroupOperations operations
:vartype site_onenote_page_parent_section_parent_notebook_section_group: notes.operations.SiteOnenotePageParentSectionParentNotebookSectionGroupOperations
:ivar site_onenote_page_parent_section_parent_section_group: SiteOnenotePageParentSectionParentSectionGroupOperations operations
:vartype site_onenote_page_parent_section_parent_section_group: notes.operations.SiteOnenotePageParentSectionParentSectionGroupOperations
:ivar site_onenote_page_parent_section_parent_section_group_parent_notebook: SiteOnenotePageParentSectionParentSectionGroupParentNotebookOperations operations
:vartype site_onenote_page_parent_section_parent_section_group_parent_notebook: notes.operations.SiteOnenotePageParentSectionParentSectionGroupParentNotebookOperations
:ivar site_onenote_section_group: SiteOnenoteSectionGroupOperations operations
:vartype site_onenote_section_group: notes.operations.SiteOnenoteSectionGroupOperations
:ivar site_onenote_section_group_parent_notebook: SiteOnenoteSectionGroupParentNotebookOperations operations
:vartype site_onenote_section_group_parent_notebook: notes.operations.SiteOnenoteSectionGroupParentNotebookOperations
:ivar site_onenote_section_group_parent_notebook_section: SiteOnenoteSectionGroupParentNotebookSectionOperations operations
:vartype site_onenote_section_group_parent_notebook_section: notes.operations.SiteOnenoteSectionGroupParentNotebookSectionOperations
:ivar site_onenote_section_group_parent_notebook_section_page: SiteOnenoteSectionGroupParentNotebookSectionPageOperations operations
:vartype site_onenote_section_group_parent_notebook_section_page: notes.operations.SiteOnenoteSectionGroupParentNotebookSectionPageOperations
:ivar site_onenote_section_group_section: SiteOnenoteSectionGroupSectionOperations operations
:vartype site_onenote_section_group_section: notes.operations.SiteOnenoteSectionGroupSectionOperations
:ivar site_onenote_section_group_section_page: SiteOnenoteSectionGroupSectionPageOperations operations
:vartype site_onenote_section_group_section_page: notes.operations.SiteOnenoteSectionGroupSectionPageOperations
:ivar site_onenote_section_group_section_page_parent_notebook: SiteOnenoteSectionGroupSectionPageParentNotebookOperations operations
:vartype site_onenote_section_group_section_page_parent_notebook: notes.operations.SiteOnenoteSectionGroupSectionPageParentNotebookOperations
:ivar site_onenote_section_group_section_parent_notebook: SiteOnenoteSectionGroupSectionParentNotebookOperations operations
:vartype site_onenote_section_group_section_parent_notebook: notes.operations.SiteOnenoteSectionGroupSectionParentNotebookOperations
:ivar site_onenote_section: SiteOnenoteSectionOperations operations
:vartype site_onenote_section: notes.operations.SiteOnenoteSectionOperations
:ivar site_onenote_section_page: SiteOnenoteSectionPageOperations operations
:vartype site_onenote_section_page: notes.operations.SiteOnenoteSectionPageOperations
:ivar site_onenote_section_page_parent_notebook: SiteOnenoteSectionPageParentNotebookOperations operations
:vartype site_onenote_section_page_parent_notebook: notes.operations.SiteOnenoteSectionPageParentNotebookOperations
:ivar site_onenote_section_page_parent_notebook_section_group: SiteOnenoteSectionPageParentNotebookSectionGroupOperations operations
:vartype site_onenote_section_page_parent_notebook_section_group: notes.operations.SiteOnenoteSectionPageParentNotebookSectionGroupOperations
:ivar site_onenote_section_parent_notebook: SiteOnenoteSectionParentNotebookOperations operations
:vartype site_onenote_section_parent_notebook: notes.operations.SiteOnenoteSectionParentNotebookOperations
:ivar site_onenote_section_parent_notebook_section_group: SiteOnenoteSectionParentNotebookSectionGroupOperations operations
:vartype site_onenote_section_parent_notebook_section_group: notes.operations.SiteOnenoteSectionParentNotebookSectionGroupOperations
:ivar site_onenote_section_parent_section_group: SiteOnenoteSectionParentSectionGroupOperations operations
:vartype site_onenote_section_parent_section_group: notes.operations.SiteOnenoteSectionParentSectionGroupOperations
:ivar site_onenote_section_parent_section_group_parent_notebook: SiteOnenoteSectionParentSectionGroupParentNotebookOperations operations
:vartype site_onenote_section_parent_section_group_parent_notebook: notes.operations.SiteOnenoteSectionParentSectionGroupParentNotebookOperations
:ivar user: UserOperations operations
:vartype user: notes.operations.UserOperations
:ivar user_onenote: UserOnenoteOperations operations
:vartype user_onenote: notes.operations.UserOnenoteOperations
:ivar user_onenote_notebook: UserOnenoteNotebookOperations operations
:vartype user_onenote_notebook: notes.operations.UserOnenoteNotebookOperations
:ivar user_onenote_notebook_section_group: UserOnenoteNotebookSectionGroupOperations operations
:vartype user_onenote_notebook_section_group: notes.operations.UserOnenoteNotebookSectionGroupOperations
:ivar user_onenote_notebook_section_group_section: UserOnenoteNotebookSectionGroupSectionOperations operations
:vartype user_onenote_notebook_section_group_section: notes.operations.UserOnenoteNotebookSectionGroupSectionOperations
:ivar user_onenote_notebook_section_group_section_page: UserOnenoteNotebookSectionGroupSectionPageOperations operations
:vartype user_onenote_notebook_section_group_section_page: notes.operations.UserOnenoteNotebookSectionGroupSectionPageOperations
:ivar user_onenote_notebook_section: UserOnenoteNotebookSectionOperations operations
:vartype user_onenote_notebook_section: notes.operations.UserOnenoteNotebookSectionOperations
:ivar user_onenote_notebook_section_page: UserOnenoteNotebookSectionPageOperations operations
:vartype user_onenote_notebook_section_page: notes.operations.UserOnenoteNotebookSectionPageOperations
:ivar user_onenote_notebook_section_parent_section_group: UserOnenoteNotebookSectionParentSectionGroupOperations operations
:vartype user_onenote_notebook_section_parent_section_group: notes.operations.UserOnenoteNotebookSectionParentSectionGroupOperations
:ivar user_onenote_page: UserOnenotePageOperations operations
:vartype user_onenote_page: notes.operations.UserOnenotePageOperations
:ivar user_onenote_page_parent_notebook: UserOnenotePageParentNotebookOperations operations
:vartype user_onenote_page_parent_notebook: notes.operations.UserOnenotePageParentNotebookOperations
:ivar user_onenote_page_parent_notebook_section_group: UserOnenotePageParentNotebookSectionGroupOperations operations
:vartype user_onenote_page_parent_notebook_section_group: notes.operations.UserOnenotePageParentNotebookSectionGroupOperations
:ivar user_onenote_page_parent_notebook_section_group_section: UserOnenotePageParentNotebookSectionGroupSectionOperations operations
:vartype user_onenote_page_parent_notebook_section_group_section: notes.operations.UserOnenotePageParentNotebookSectionGroupSectionOperations
:ivar user_onenote_page_parent_notebook_section: UserOnenotePageParentNotebookSectionOperations operations
:vartype user_onenote_page_parent_notebook_section: notes.operations.UserOnenotePageParentNotebookSectionOperations
:ivar user_onenote_page_parent_notebook_section_parent_section_group: UserOnenotePageParentNotebookSectionParentSectionGroupOperations operations
:vartype user_onenote_page_parent_notebook_section_parent_section_group: notes.operations.UserOnenotePageParentNotebookSectionParentSectionGroupOperations
:ivar user_onenote_page_parent_section: UserOnenotePageParentSectionOperations operations
:vartype user_onenote_page_parent_section: notes.operations.UserOnenotePageParentSectionOperations
:ivar user_onenote_page_parent_section_parent_notebook: UserOnenotePageParentSectionParentNotebookOperations operations
:vartype user_onenote_page_parent_section_parent_notebook: notes.operations.UserOnenotePageParentSectionParentNotebookOperations
:ivar user_onenote_page_parent_section_parent_notebook_section_group: UserOnenotePageParentSectionParentNotebookSectionGroupOperations operations
:vartype user_onenote_page_parent_section_parent_notebook_section_group: notes.operations.UserOnenotePageParentSectionParentNotebookSectionGroupOperations
:ivar user_onenote_page_parent_section_parent_section_group: UserOnenotePageParentSectionParentSectionGroupOperations operations
:vartype user_onenote_page_parent_section_parent_section_group: notes.operations.UserOnenotePageParentSectionParentSectionGroupOperations
:ivar user_onenote_page_parent_section_parent_section_group_parent_notebook: UserOnenotePageParentSectionParentSectionGroupParentNotebookOperations operations
:vartype user_onenote_page_parent_section_parent_section_group_parent_notebook: notes.operations.UserOnenotePageParentSectionParentSectionGroupParentNotebookOperations
:ivar user_onenote_section_group: UserOnenoteSectionGroupOperations operations
:vartype user_onenote_section_group: notes.operations.UserOnenoteSectionGroupOperations
:ivar user_onenote_section_group_parent_notebook: UserOnenoteSectionGroupParentNotebookOperations operations
:vartype user_onenote_section_group_parent_notebook: notes.operations.UserOnenoteSectionGroupParentNotebookOperations
:ivar user_onenote_section_group_parent_notebook_section: UserOnenoteSectionGroupParentNotebookSectionOperations operations
:vartype user_onenote_section_group_parent_notebook_section: notes.operations.UserOnenoteSectionGroupParentNotebookSectionOperations
:ivar user_onenote_section_group_parent_notebook_section_page: UserOnenoteSectionGroupParentNotebookSectionPageOperations operations
:vartype user_onenote_section_group_parent_notebook_section_page: notes.operations.UserOnenoteSectionGroupParentNotebookSectionPageOperations
:ivar user_onenote_section_group_section: UserOnenoteSectionGroupSectionOperations operations
:vartype user_onenote_section_group_section: notes.operations.UserOnenoteSectionGroupSectionOperations
:ivar user_onenote_section_group_section_page: UserOnenoteSectionGroupSectionPageOperations operations
:vartype user_onenote_section_group_section_page: notes.operations.UserOnenoteSectionGroupSectionPageOperations
:ivar user_onenote_section_group_section_page_parent_notebook: UserOnenoteSectionGroupSectionPageParentNotebookOperations operations
:vartype user_onenote_section_group_section_page_parent_notebook: notes.operations.UserOnenoteSectionGroupSectionPageParentNotebookOperations
:ivar user_onenote_section_group_section_parent_notebook: UserOnenoteSectionGroupSectionParentNotebookOperations operations
:vartype user_onenote_section_group_section_parent_notebook: notes.operations.UserOnenoteSectionGroupSectionParentNotebookOperations
:ivar user_onenote_section: UserOnenoteSectionOperations operations
:vartype user_onenote_section: notes.operations.UserOnenoteSectionOperations
:ivar user_onenote_section_page: UserOnenoteSectionPageOperations operations
:vartype user_onenote_section_page: notes.operations.UserOnenoteSectionPageOperations
:ivar user_onenote_section_page_parent_notebook: UserOnenoteSectionPageParentNotebookOperations operations
:vartype user_onenote_section_page_parent_notebook: notes.operations.UserOnenoteSectionPageParentNotebookOperations
:ivar user_onenote_section_page_parent_notebook_section_group: UserOnenoteSectionPageParentNotebookSectionGroupOperations operations
:vartype user_onenote_section_page_parent_notebook_section_group: notes.operations.UserOnenoteSectionPageParentNotebookSectionGroupOperations
:ivar user_onenote_section_parent_notebook: UserOnenoteSectionParentNotebookOperations operations
:vartype user_onenote_section_parent_notebook: notes.operations.UserOnenoteSectionParentNotebookOperations
:ivar user_onenote_section_parent_notebook_section_group: UserOnenoteSectionParentNotebookSectionGroupOperations operations
:vartype user_onenote_section_parent_notebook_section_group: notes.operations.UserOnenoteSectionParentNotebookSectionGroupOperations
:ivar user_onenote_section_parent_section_group: UserOnenoteSectionParentSectionGroupOperations operations
:vartype user_onenote_section_parent_section_group: notes.operations.UserOnenoteSectionParentSectionGroupOperations
:ivar user_onenote_section_parent_section_group_parent_notebook: UserOnenoteSectionParentSectionGroupParentNotebookOperations operations
:vartype user_onenote_section_parent_section_group_parent_notebook: notes.operations.UserOnenoteSectionParentSectionGroupParentNotebookOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:param top: Show only the first n items.
:type top: int
:param skip: Skip the first n items.
:type skip: int
:param search: Search items by search phrases.
:type search: str
:param filter: Filter items by property values.
:type filter: str
:param count: Include count of items.
:type count: bool
:param str base_url: Service URL
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
"""
def __init__(
self,
credential, # type: "TokenCredential"
top=None, # type: Optional[int]
skip=None, # type: Optional[int]
search=None, # type: Optional[str]
filter=None, # type: Optional[str]
count=None, # type: Optional[bool]
base_url=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> None
if not base_url:
base_url = 'https://graph.microsoft.com/v1.0'
self._config = NotesConfiguration(credential, top, skip, search, filter, count, **kwargs)
self._client = ARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self.group = GroupOperations(
self._client, self._config, self._serialize, self._deserialize)
self.group_onenote = GroupOnenoteOperations(
self._client, self._config, self._serialize, self._deserialize)
self.group_onenote_notebook = GroupOnenoteNotebookOperations(
self._client, self._config, self._serialize, self._deserialize)
self.group_onenote_notebook_section_group = GroupOnenoteNotebookSectionGroupOperations(
self._client, self._config, self._serialize, self._deserialize)
self.group_onenote_notebook_section_group_section = GroupOnenoteNotebookSectionGroupSectionOperations(
self._client, self._config, self._serialize, self._deserialize)
self.group_onenote_notebook_section_group_section_page = GroupOnenoteNotebookSectionGroupSectionPageOperations(
self._client, self._config, self._serialize, self._deserialize)
self.group_onenote_notebook_section = GroupOnenoteNotebookSectionOperations(
self._client, self._config, self._serialize, self._deserialize)
self.group_onenote_notebook_section_page = GroupOnenoteNotebookSectionPageOperations(
self._client, self._config, self._serialize, self._deserialize)
self.group_onenote_notebook_section_parent_section_group = GroupOnenoteNotebookSectionParentSectionGroupOperations(
self._client, self._config, self._serialize, self._deserialize)
self.group_onenote_page = GroupOnenotePageOperations(
self._client, self._config, self._serialize, self._deserialize)
self.group_onenote_page_parent_notebook = GroupOnenotePageParentNotebookOperations(
self._client, self._config, self._serialize, self._deserialize)
self.group_onenote_page_parent_notebook_section_group = GroupOnenotePageParentNotebookSectionGroupOperations(
self._client, self._config, self._serialize, self._deserialize)
self.group_onenote_page_parent_notebook_section_group_section = GroupOnenotePageParentNotebookSectionGroupSectionOperations(
self._client, self._config, self._serialize, self._deserialize)
self.group_onenote_page_parent_notebook_section = GroupOnenotePageParentNotebookSectionOperations(
self._client, self._config, self._serialize, self._deserialize)
self.group_onenote_page_parent_notebook_section_parent_section_group = GroupOnenotePageParentNotebookSectionParentSectionGroupOperations(
self._client, self._config, self._serialize, self._deserialize)
self.group_onenote_page_parent_section = GroupOnenotePageParentSectionOperations(
self._client, self._config, self._serialize, self._deserialize)
self.group_onenote_page_parent_section_parent_notebook = GroupOnenotePageParentSectionParentNotebookOperations(
self._client, self._config, self._serialize, self._deserialize)
self.group_onenote_page_parent_section_parent_notebook_section_group = GroupOnenotePageParentSectionParentNotebookSectionGroupOperations(
self._client, self._config, self._serialize, self._deserialize)
self.group_onenote_page_parent_section_parent_section_group = GroupOnenotePageParentSectionParentSectionGroupOperations(
self._client, self._config, self._serialize, self._deserialize)
self.group_onenote_page_parent_section_parent_section_group_parent_notebook = GroupOnenotePageParentSectionParentSectionGroupParentNotebookOperations(
self._client, self._config, self._serialize, self._deserialize)
self.group_onenote_section_group = GroupOnenoteSectionGroupOperations(
self._client, self._config, self._serialize, self._deserialize)
self.group_onenote_section_group_parent_notebook = GroupOnenoteSectionGroupParentNotebookOperations(
self._client, self._config, self._serialize, self._deserialize)
self.group_onenote_section_group_parent_notebook_section = GroupOnenoteSectionGroupParentNotebookSectionOperations(
self._client, self._config, self._serialize, self._deserialize)
self.group_onenote_section_group_parent_notebook_section_page = GroupOnenoteSectionGroupParentNotebookSectionPageOperations(
self._client, self._config, self._serialize, self._deserialize)
self.group_onenote_section_group_section = GroupOnenoteSectionGroupSectionOperations(
self._client, self._config, self._serialize, self._deserialize)
self.group_onenote_section_group_section_page = GroupOnenoteSectionGroupSectionPageOperations(
self._client, self._config, self._serialize, self._deserialize)
self.group_onenote_section_group_section_page_parent_notebook = GroupOnenoteSectionGroupSectionPageParentNotebookOperations(
self._client, self._config, self._serialize, self._deserialize)
self.group_onenote_section_group_section_parent_notebook = GroupOnenoteSectionGroupSectionParentNotebookOperations(
self._client, self._config, self._serialize, self._deserialize)
self.group_onenote_section = GroupOnenoteSectionOperations(
self._client, self._config, self._serialize, self._deserialize)
self.group_onenote_section_page = GroupOnenoteSectionPageOperations(
self._client, self._config, self._serialize, self._deserialize)
self.group_onenote_section_page_parent_notebook = GroupOnenoteSectionPageParentNotebookOperations(
self._client, self._config, self._serialize, self._deserialize)
self.group_onenote_section_page_parent_notebook_section_group = GroupOnenoteSectionPageParentNotebookSectionGroupOperations(
self._client, self._config, self._serialize, self._deserialize)
self.group_onenote_section_parent_notebook = GroupOnenoteSectionParentNotebookOperations(
self._client, self._config, self._serialize, self._deserialize)
self.group_onenote_section_parent_notebook_section_group = GroupOnenoteSectionParentNotebookSectionGroupOperations(
self._client, self._config, self._serialize, self._deserialize)
self.group_onenote_section_parent_section_group = GroupOnenoteSectionParentSectionGroupOperations(
self._client, self._config, self._serialize, self._deserialize)
self.group_onenote_section_parent_section_group_parent_notebook = GroupOnenoteSectionParentSectionGroupParentNotebookOperations(
self._client, self._config, self._serialize, self._deserialize)
self.site = SiteOperations(
self._client, self._config, self._serialize, self._deserialize)
self.site_onenote = SiteOnenoteOperations(
self._client, self._config, self._serialize, self._deserialize)
self.site_onenote_notebook = SiteOnenoteNotebookOperations(
self._client, self._config, self._serialize, self._deserialize)
self.site_onenote_notebook_section_group = SiteOnenoteNotebookSectionGroupOperations(
self._client, self._config, self._serialize, self._deserialize)
self.site_onenote_notebook_section_group_section = SiteOnenoteNotebookSectionGroupSectionOperations(
self._client, self._config, self._serialize, self._deserialize)
self.site_onenote_notebook_section_group_section_page = SiteOnenoteNotebookSectionGroupSectionPageOperations(
self._client, self._config, self._serialize, self._deserialize)
self.site_onenote_notebook_section = SiteOnenoteNotebookSectionOperations(
self._client, self._config, self._serialize, self._deserialize)
self.site_onenote_notebook_section_page = SiteOnenoteNotebookSectionPageOperations(
self._client, self._config, self._serialize, self._deserialize)
self.site_onenote_notebook_section_parent_section_group = SiteOnenoteNotebookSectionParentSectionGroupOperations(
self._client, self._config, self._serialize, self._deserialize)
self.site_onenote_page = SiteOnenotePageOperations(
self._client, self._config, self._serialize, self._deserialize)
self.site_onenote_page_parent_notebook = SiteOnenotePageParentNotebookOperations(
self._client, self._config, self._serialize, self._deserialize)
self.site_onenote_page_parent_notebook_section_group = SiteOnenotePageParentNotebookSectionGroupOperations(
self._client, self._config, self._serialize, self._deserialize)
self.site_onenote_page_parent_notebook_section_group_section = SiteOnenotePageParentNotebookSectionGroupSectionOperations(
self._client, self._config, self._serialize, self._deserialize)
self.site_onenote_page_parent_notebook_section = SiteOnenotePageParentNotebookSectionOperations(
self._client, self._config, self._serialize, self._deserialize)
self.site_onenote_page_parent_notebook_section_parent_section_group = SiteOnenotePageParentNotebookSectionParentSectionGroupOperations(
self._client, self._config, self._serialize, self._deserialize)
self.site_onenote_page_parent_section = SiteOnenotePageParentSectionOperations(
self._client, self._config, self._serialize, self._deserialize)
self.site_onenote_page_parent_section_parent_notebook = SiteOnenotePageParentSectionParentNotebookOperations(
self._client, self._config, self._serialize, self._deserialize)
self.site_onenote_page_parent_section_parent_notebook_section_group = SiteOnenotePageParentSectionParentNotebookSectionGroupOperations(
self._client, self._config, self._serialize, self._deserialize)
self.site_onenote_page_parent_section_parent_section_group = SiteOnenotePageParentSectionParentSectionGroupOperations(
self._client, self._config, self._serialize, self._deserialize)
self.site_onenote_page_parent_section_parent_section_group_parent_notebook = SiteOnenotePageParentSectionParentSectionGroupParentNotebookOperations(
self._client, self._config, self._serialize, self._deserialize)
self.site_onenote_section_group = SiteOnenoteSectionGroupOperations(
self._client, self._config, self._serialize, self._deserialize)
self.site_onenote_section_group_parent_notebook = SiteOnenoteSectionGroupParentNotebookOperations(
self._client, self._config, self._serialize, self._deserialize)
self.site_onenote_section_group_parent_notebook_section = SiteOnenoteSectionGroupParentNotebookSectionOperations(
self._client, self._config, self._serialize, self._deserialize)
self.site_onenote_section_group_parent_notebook_section_page = SiteOnenoteSectionGroupParentNotebookSectionPageOperations(
self._client, self._config, self._serialize, self._deserialize)
self.site_onenote_section_group_section = SiteOnenoteSectionGroupSectionOperations(
self._client, self._config, self._serialize, self._deserialize)
self.site_onenote_section_group_section_page = SiteOnenoteSectionGroupSectionPageOperations(
self._client, self._config, self._serialize, self._deserialize)
self.site_onenote_section_group_section_page_parent_notebook = SiteOnenoteSectionGroupSectionPageParentNotebookOperations(
self._client, self._config, self._serialize, self._deserialize)
self.site_onenote_section_group_section_parent_notebook = SiteOnenoteSectionGroupSectionParentNotebookOperations(
self._client, self._config, self._serialize, self._deserialize)
self.site_onenote_section = SiteOnenoteSectionOperations(
self._client, self._config, self._serialize, self._deserialize)
self.site_onenote_section_page = SiteOnenoteSectionPageOperations(
self._client, self._config, self._serialize, self._deserialize)
self.site_onenote_section_page_parent_notebook = SiteOnenoteSectionPageParentNotebookOperations(
self._client, self._config, self._serialize, self._deserialize)
self.site_onenote_section_page_parent_notebook_section_group = SiteOnenoteSectionPageParentNotebookSectionGroupOperations(
self._client, self._config, self._serialize, self._deserialize)
self.site_onenote_section_parent_notebook = SiteOnenoteSectionParentNotebookOperations(
self._client, self._config, self._serialize, self._deserialize)
self.site_onenote_section_parent_notebook_section_group = SiteOnenoteSectionParentNotebookSectionGroupOperations(
self._client, self._config, self._serialize, self._deserialize)
self.site_onenote_section_parent_section_group = SiteOnenoteSectionParentSectionGroupOperations(
self._client, self._config, self._serialize, self._deserialize)
self.site_onenote_section_parent_section_group_parent_notebook = SiteOnenoteSectionParentSectionGroupParentNotebookOperations(
self._client, self._config, self._serialize, self._deserialize)
self.user = UserOperations(
self._client, self._config, self._serialize, self._deserialize)
self.user_onenote = UserOnenoteOperations(
self._client, self._config, self._serialize, self._deserialize)
self.user_onenote_notebook = UserOnenoteNotebookOperations(
self._client, self._config, self._serialize, self._deserialize)
self.user_onenote_notebook_section_group = UserOnenoteNotebookSectionGroupOperations(
self._client, self._config, self._serialize, self._deserialize)
self.user_onenote_notebook_section_group_section = UserOnenoteNotebookSectionGroupSectionOperations(
self._client, self._config, self._serialize, self._deserialize)
self.user_onenote_notebook_section_group_section_page = UserOnenoteNotebookSectionGroupSectionPageOperations(
self._client, self._config, self._serialize, self._deserialize)
self.user_onenote_notebook_section = UserOnenoteNotebookSectionOperations(
self._client, self._config, self._serialize, self._deserialize)
self.user_onenote_notebook_section_page = UserOnenoteNotebookSectionPageOperations(
self._client, self._config, self._serialize, self._deserialize)
self.user_onenote_notebook_section_parent_section_group = UserOnenoteNotebookSectionParentSectionGroupOperations(
self._client, self._config, self._serialize, self._deserialize)
self.user_onenote_page = UserOnenotePageOperations(
self._client, self._config, self._serialize, self._deserialize)
self.user_onenote_page_parent_notebook = UserOnenotePageParentNotebookOperations(
self._client, self._config, self._serialize, self._deserialize)
self.user_onenote_page_parent_notebook_section_group = UserOnenotePageParentNotebookSectionGroupOperations(
self._client, self._config, self._serialize, self._deserialize)
self.user_onenote_page_parent_notebook_section_group_section = UserOnenotePageParentNotebookSectionGroupSectionOperations(
self._client, self._config, self._serialize, self._deserialize)
self.user_onenote_page_parent_notebook_section = UserOnenotePageParentNotebookSectionOperations(
self._client, self._config, self._serialize, self._deserialize)
self.user_onenote_page_parent_notebook_section_parent_section_group = UserOnenotePageParentNotebookSectionParentSectionGroupOperations(
self._client, self._config, self._serialize, self._deserialize)
self.user_onenote_page_parent_section = UserOnenotePageParentSectionOperations(
self._client, self._config, self._serialize, self._deserialize)
self.user_onenote_page_parent_section_parent_notebook = UserOnenotePageParentSectionParentNotebookOperations(
self._client, self._config, self._serialize, self._deserialize)
self.user_onenote_page_parent_section_parent_notebook_section_group = UserOnenotePageParentSectionParentNotebookSectionGroupOperations(
self._client, self._config, self._serialize, self._deserialize)
self.user_onenote_page_parent_section_parent_section_group = UserOnenotePageParentSectionParentSectionGroupOperations(
self._client, self._config, self._serialize, self._deserialize)
self.user_onenote_page_parent_section_parent_section_group_parent_notebook = UserOnenotePageParentSectionParentSectionGroupParentNotebookOperations(
self._client, self._config, self._serialize, self._deserialize)
self.user_onenote_section_group = UserOnenoteSectionGroupOperations(
self._client, self._config, self._serialize, self._deserialize)
self.user_onenote_section_group_parent_notebook = UserOnenoteSectionGroupParentNotebookOperations(
self._client, self._config, self._serialize, self._deserialize)
self.user_onenote_section_group_parent_notebook_section = UserOnenoteSectionGroupParentNotebookSectionOperations(
self._client, self._config, self._serialize, self._deserialize)
self.user_onenote_section_group_parent_notebook_section_page = UserOnenoteSectionGroupParentNotebookSectionPageOperations(
self._client, self._config, self._serialize, self._deserialize)
self.user_onenote_section_group_section = UserOnenoteSectionGroupSectionOperations(
self._client, self._config, self._serialize, self._deserialize)
self.user_onenote_section_group_section_page = UserOnenoteSectionGroupSectionPageOperations(
self._client, self._config, self._serialize, self._deserialize)
self.user_onenote_section_group_section_page_parent_notebook = UserOnenoteSectionGroupSectionPageParentNotebookOperations(
self._client, self._config, self._serialize, self._deserialize)
self.user_onenote_section_group_section_parent_notebook = UserOnenoteSectionGroupSectionParentNotebookOperations(
self._client, self._config, self._serialize, self._deserialize)
self.user_onenote_section = UserOnenoteSectionOperations(
self._client, self._config, self._serialize, self._deserialize)
self.user_onenote_section_page = UserOnenoteSectionPageOperations(
self._client, self._config, self._serialize, self._deserialize)
self.user_onenote_section_page_parent_notebook = UserOnenoteSectionPageParentNotebookOperations(
self._client, self._config, self._serialize, self._deserialize)
self.user_onenote_section_page_parent_notebook_section_group = UserOnenoteSectionPageParentNotebookSectionGroupOperations(
self._client, self._config, self._serialize, self._deserialize)
self.user_onenote_section_parent_notebook = UserOnenoteSectionParentNotebookOperations(
self._client, self._config, self._serialize, self._deserialize)
self.user_onenote_section_parent_notebook_section_group = UserOnenoteSectionParentNotebookSectionGroupOperations(
self._client, self._config, self._serialize, self._deserialize)
self.user_onenote_section_parent_section_group = UserOnenoteSectionParentSectionGroupOperations(
self._client, self._config, self._serialize, self._deserialize)
self.user_onenote_section_parent_section_group_parent_notebook = UserOnenoteSectionParentSectionGroupParentNotebookOperations(
self._client, self._config, self._serialize, self._deserialize)
def close(self):
# type: () -> None
self._client.close()
def __enter__(self):
# type: () -> Notes
self._client.__enter__()
return self
def __exit__(self, *exc_details):
# type: (Any) -> None
self._client.__exit__(*exc_details)
|
[
"[email protected]"
] | |
431a3d4d65b0f75ede18b9904310843857734faa
|
a4b81839fe6d7726eb6c9d2c9fd0d9a70cf3ef3f
|
/Code/LuanqibazaoExercise/20191123/01.py
|
d20c16f2cd24a38ecd39cdb9d6b9118d2fd98e65
|
[] |
no_license
|
Python87-com/PythonExercise
|
3f84e0f194254d0f0a8b106348b214ccdeebf842
|
9bef64c6d3b143236bf06131e5f7a5aabcf6980b
|
refs/heads/master
| 2021-01-31T15:56:27.743718 | 2020-01-02T12:10:43 | 2020-01-02T12:10:43 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 363 |
py
|
"""
属性私有化
"""
class Student:
def __init__(self, name, sex):
self.name = name
self.__sex = sex
def play(self):
print(self.name, "性别是", self.__sex)
# 创建对象
s1 = Student("Python87", "女")
s1.play() # Python87 性别是 女
s1.__sex = "男"
s1.play() # Python87 性别是 女
|
[
"[email protected]"
] | |
a35eab73e09286089b343c8b91fc0a7562c32b20
|
f8666599b83d34c861651861cc7db5b3c434fc87
|
/plotly/validators/scatter3d/marker/colorbar/tickformatstop/_enabled.py
|
7738271280f1775097e8652e9c73659f9a88a3a9
|
[
"MIT"
] |
permissive
|
mode/plotly.py
|
8b66806e88c9f1820d478bab726f0bea81884432
|
c5a9ac386a40df2816e6c13264dadf14299401e4
|
refs/heads/master
| 2022-08-26T00:07:35.376636 | 2018-09-26T19:08:54 | 2018-09-26T19:19:31 | 60,372,968 | 1 | 1 |
MIT
| 2019-11-13T23:03:22 | 2016-06-03T19:34:55 |
Python
|
UTF-8
|
Python
| false | false | 514 |
py
|
import _plotly_utils.basevalidators
class EnabledValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self,
plotly_name='enabled',
parent_name='scatter3d.marker.colorbar.tickformatstop',
**kwargs
):
super(EnabledValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop('edit_type', 'calc'),
role=kwargs.pop('role', 'info'),
**kwargs
)
|
[
"[email protected]"
] | |
bdf3194356fbcc1281fbaebb3743619a8406c1c7
|
c0da86779f7037d9fa50499c470f8dd91fb11093
|
/DjangoProject/报障系统/StandYourGround/StandYourGround/wsgi.py
|
490cdd0c653e5384cf689bd9b80cceb3a8ba7787
|
[] |
no_license
|
SuoSuo-Rocky/HaiYan_left_Pro
|
9c96148a9fe9edc191b2aa1ba1a4caf55184d2e1
|
670aedee8b403141c5e81615dea89d28dfcd9ebb
|
refs/heads/master
| 2023-01-12T07:12:53.602217 | 2019-07-07T06:20:25 | 2019-07-07T06:20:25 | 195,602,247 | 0 | 0 | null | 2023-01-05T20:39:51 | 2019-07-07T02:39:33 |
JavaScript
|
UTF-8
|
Python
| false | false | 407 |
py
|
"""
WSGI config for StandYourGround project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'StandYourGround.settings')
application = get_wsgi_application()
|
[
"[email protected]"
] | |
ef1fb7e03ab2eaaddb3e5df941f24e1d597911b9
|
0ddb274058b7cf4dc4152b4167195ed322914795
|
/dxfwrite/algebra/base.py
|
bdf9002723cfebd13914c55d8162a76c3cd6c901
|
[
"MIT"
] |
permissive
|
allartburns/dxfwrite
|
b920843426ebd8cac9c66b84d306655494208d90
|
2679407a4efc797a616ac26898312f0ba1a24041
|
refs/heads/master
| 2020-05-29T22:43:01.545611 | 2015-04-11T00:32:43 | 2015-04-11T00:32:43 | 33,616,670 | 2 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,232 |
py
|
#!/usr/bin/env python
#coding:utf-8
# Purpose:
# Created: 27.03.2010
__author__ = "mozman <[email protected]>"
import math
HALF_PI = math.pi / 2.
THREE_PI_HALF = 1.5 * math.pi
DOUBLE_PI = math.pi * 2.
def rotate_2d(point, angle):
""" rotate point around origin point about angle """
x = point[0] * math.cos(angle) - point[1] * math.sin(angle)
y = point[1] * math.cos(angle) + point[0] * math.sin(angle)
return (x, y)
def equals_almost(v1, v2, places=7):
"""compare two float values
places: significant decimal places
"""
return round(v1, places) == round(v2, places)
def normalize_angle(angle):
""" return an angle between 0 and 2*pi """
angle = math.fmod(angle, DOUBLE_PI)
if angle < 0:
angle += DOUBLE_PI
return angle
def is_vertical_angle(angle, places=7):
""" returns True for 1/2pi and 3/2pi """
angle = normalize_angle(angle)
return (equals_almost(angle, HALF_PI, places) or
equals_almost(angle, THREE_PI_HALF, places))
def get_angle(p1, p2):
"""calc angle between the line p1-p2 and the x-axis
input: points as tuples
result: angle in radians
"""
dx = p2[0] - p1[0]
dy = p2[1] - p1[1]
return math.atan2(dy, dx)
def right_of_line(point, p1, p2):
""" True if the point self is right of the line p1 -> p2
"""
return not left_of_line(point, p1, p2)
def left_of_line(point, p1, p2):
""" True if the point self is left of the line p1 -> p2
"""
# check if a and b are on the same vertical line
if p1[0] == p2[0]:
# compute # on which site of the line self should be
should_be_left = p1[1] < p2[1]
if should_be_left:
return point[0] < p1[0]
else:
return point[0] > p1[0]
else:
# get pitch of line
pitch = (p2[1] - p1[1]) / (p2[0] - p1[0])
# get y-value at c's x-position
y = pitch * (point[0] - p1[0]) + p1[1]
# compute if point should be above or below the line
should_be_above = p1[0] < p2[0]
if should_be_above :
return point[1] > y
else:
return point[1] < y
|
[
"[email protected]"
] | |
0fe5f2b0b0f4f47c58605046ac2673428da3f800
|
121f82d04c299a6f6fcd3b469794ae076b74a3f6
|
/virtual/bin/django-admin
|
670898c608a26bc12d5674e334f0520b7d3d80ee
|
[
"MIT"
] |
permissive
|
monicaoyugi/User-profile
|
ba68b3f6a5a631e4235f70b351b86eb8e9061d45
|
941180648a72c3761c8df8a56c9498934808d735
|
refs/heads/master
| 2023-08-13T18:04:18.997913 | 2020-05-22T13:08:07 | 2020-05-22T13:08:07 | 266,112,023 | 0 | 0 |
MIT
| 2021-09-22T19:03:36 | 2020-05-22T13:02:44 |
Python
|
UTF-8
|
Python
| false | false | 321 |
#!/home/moringaschool/Documents/Core/Week1/pingram/virtual/bin/python3.6
# -*- coding: utf-8 -*-
import re
import sys
from django.core.management import execute_from_command_line
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(execute_from_command_line())
|
[
"[email protected]"
] | ||
e67646d83b215b610ac774793c91227ab79589ca
|
2bcf18252fa9144ece3e824834ac0e117ad0bdf3
|
/Kraken/tags/0.2/release_the_kraken.py.example
|
d4a845fa0a3f49dc07d99e0751d2aa7fb8f4dee9
|
[
"Beerware"
] |
permissive
|
chadwhitacre/public
|
32f65ba8e35d38c69ed4d0edd333283a239c5e1d
|
0c67fd7ec8bce1d8c56c7ff3506f31a99362b502
|
refs/heads/master
| 2021-05-10T14:32:03.016683 | 2010-05-13T18:24:20 | 2010-05-13T18:24:20 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 792 |
example
|
#!/usr/local/bin/python
# ----------------------------------------------------------------------------
# "THE BEER-WARE LICENSE":
# <[email protected]> wrote this file. As long as you retain this notice you
# can do whatever you want with this stuff. If we meet some day, and you think
# this stuff is worth it, you can buy me a beer in return. --Chad Whitacre
# ----------------------------------------------------------------------------
###### change this value ######
LAIR = '/path/to/Kraken'
# This is here so that we work properly when called from cron. It can't be in
# conf/kraken.conf because we need it to get there! Catch-22! If you know the
# right way to do this please let me know! !!!
###############################
from Kraken import Kraken
k = Kraken(LAIR)
k.release()
|
[
"[email protected]"
] | |
7635aee8611720059e84cdb29ab3d3f1adff70a0
|
3afe7348e830a0c5139fb7cf393736e18b59ab4a
|
/src/clusterfuzz/_internal/platforms/linux/lkl/kernel_utils.py
|
7a30a1d4229158f8a6c2d24ce8a1809175c497ae
|
[
"Apache-2.0"
] |
permissive
|
google/clusterfuzz
|
00845899e081dbbb89b70a75ce0b7eba3da73b02
|
6501a839b27a264500244f32bace8bee4d5cb9a2
|
refs/heads/master
| 2023-09-03T17:34:17.821599 | 2023-09-01T16:11:51 | 2023-09-01T16:11:51 | 168,060,021 | 5,420 | 639 |
Apache-2.0
| 2023-09-13T16:40:54 | 2019-01-29T00:19:40 |
Python
|
UTF-8
|
Python
| false | false | 2,768 |
py
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Linux Kernel Library kernel utils functions."""
import os
from clusterfuzz._internal.base import utils
from clusterfuzz._internal.platforms.android import symbols_downloader
from clusterfuzz._internal.system import environment
from . import constants
def _should_download_symbols():
"""Return True if we should continue to download symbols."""
# For local testing, we do not have access to the cloud storage bucket with
# the symbols. In this case, just bail out.
return not environment.get_value('LOCAL_DEVELOPMENT')
def get_kernel_prefix_and_full_hash(build_id):
"""Download repo.prop and return the full hash and prefix."""
android_kernel_repo_data = _get_repo_prop_data(build_id,
constants.LKL_BUILD_TARGET)
if android_kernel_repo_data:
for line in android_kernel_repo_data.splitlines():
if line.startswith(constants.LKL_REPO_KERNEL_PREFIX):
# line is of form: prefix u'hash'
return (constants.LKL_REPO_KERNEL_PREFIX, line.split(' ',
1)[1].strip('u\''))
return None, None
def _get_repo_prop_data(build_id, fuzz_target):
"""Downloads repo.prop and returuns the data based on build_id and target."""
symbols_directory = os.path.join(
environment.get_value('SYMBOLS_DIR'), fuzz_target)
repro_filename = symbols_downloader.get_repo_prop_archive_filename(
build_id, fuzz_target)
# Grab repo.prop, it is not on the device nor in the build_dir.
_download_kernel_repo_prop_if_needed(symbols_directory, build_id, fuzz_target)
local_repo_path = utils.find_binary_path(symbols_directory, repro_filename)
if local_repo_path and os.path.exists(local_repo_path):
return utils.read_data_from_file(local_repo_path, eval_data=False).decode()
return None
def _download_kernel_repo_prop_if_needed(symbols_directory, build_id,
fuzz_target):
"""Downloads the repo.prop for an LKL fuzzer"""
if not _should_download_symbols():
return
symbols_downloader.download_repo_prop_if_needed(
symbols_directory, build_id, fuzz_target, [fuzz_target], 'lkl_fuzzer')
|
[
"[email protected]"
] | |
4e25e31332ac40a4a1cd89fc7e30e4ff9596fc27
|
3b6b8223598de9ec75d827945a613f75c3c6f132
|
/03-Spider/6_my_ex_se_aio/lagou_se.py
|
bf6610509f18c30b22e6c8a365df3ec2082f4d61
|
[] |
no_license
|
Flavio58it/grocery
|
32380a145d59fb5604c44cd1d7cfe50fedb1e0dd
|
23e35d4b67208542344985b5e4865b7d5d314be4
|
refs/heads/master
| 2022-10-01T17:26:26.409840 | 2018-11-20T11:05:31 | 2018-11-20T11:05:31 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,150 |
py
|
"""
使用自动化测试工具 selenium 和 BeautifulSoup 抓取 拉钩网的职位信息
"""
import time
import pymongo
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from bs4 import BeautifulSoup
def get_html(url, keywords):
"""
获取 页面 返回获取的页面列表
:param url: 目标网站 这是是 拉钩网
:param keywords: 搜索的关键字
:return: 获取的页面列表
"""
# 存放 获取的页面的容器, 最后返回此容器
page_html_list = []
chome_options = Options()
chome_options.add_argument('--headless')
chome_options.add_argument('--disable-gpu')
chromeDriver = 'D:/00venv/soft/chromedriver_win32/chromedriver.exe'
# 后台运行
browser = webdriver.Chrome(chromeDriver, chrome_options=chome_options)
# 不是后台运行
# browser = webdriver.Chrome(chromeDriver)
# 后台运行 使用 phantomjs 下载:http://phantomjs.org/download.html
# chromeDriver = r"D:\00venv\soft\phantomjs-2.1.1-windows\bin\phantomjs.exe"
# browser = webdriver.PhantomJS(chromeDriver)
browser.get(url) # 获取页面首页
time.sleep(3)
# 首页 弹框 需要选择城市 这里选择的是成都
try:
browser.find_element_by_xpath('//*[@id="changeCityBox"]/ul/li[7]/a').click()
time.sleep(2)
except:
try:
browser.find_element_by_xpath('//*[@id="filterCollapse"]/div[1]/div[2]/li/div[1]/a[1]').click()
except:
pass
pass
# 全国
# all_in_china = browser.find_element_by_xpath('//*[@id="filterCollapse"]/div[1]/div[2]/li/div[1]/a[1]')
# 切换到 全国进行查找
# all_in_china.click()
# time.sleep(2)
# 其他城市 a[1] - a[13] 更多需要切换页面 暂时就这么多
# 可以通过循环来 获取 这里暂时不写
# city = browser.find_element_by_xpath('//*[@id="filterCollapse"]/div[1]/div[2]/li/div[2]/div/a[4]')
# 进入页面后 搜索的 元素框是不变的, 所有可以放在外面, 只需要在循环中添加关键字就行
search = browser.find_element_by_xpath('//*[@id="search_input"]')
for keyword in keywords:
# 将关键字写入到搜索框中
search.send_keys(keyword)
# 点击搜索
browser.find_element_by_xpath('//*[@id="search_button"]').click()
# 点击事件后 休眠 2 秒 等待页面全部加载出来
time.sleep(2)
# 第一次获取失败后 尝试的 次数, 这里设置的是三次,三次还获取不到,进入下一页
retry_time = 0
# 默认是第一页, 换下一页从 2 开始
page_num = 2
# 设置标志为, 循环终止条件
flag = True
while flag:
# 下一页
try:
next_page = browser.find_element_by_xpath('//*[@id="s_position_list"]/div[2]/div/span[%s]' % str(page_num))
next_page.click()
time.sleep(2)
# 获取页面
page_html = browser.page_source
# 页面添加到列表中
page_html_list.append(page_html)
# 一次获取成功 页码加 1
page_num += 1
# 判断下一页的 下一页 因为最后有 next 这个按钮, 判断 next 后还有没有元素 来终止循环
try:
browser.find_element_by_xpath('//*[@id="s_position_list"]/div[2]/div/span[%s]' % str(page_num + 1))
except:
flag = False
except:
retry_time += 1
print('第 %s 页,第 %s 尝试抓取!' % (page_num, retry_time))
if retry_time > 3:
print('结束获取页面')
page_num += 1
# 关闭浏览器
browser.quit()
return page_html_list
def main():
# 本地
# mongo = pymongo.MongoClient('mongodb://127.0.0.1:27017')
# 阿里云
mongo = pymongo.MongoClient('mongodb://39.104.171.126:10004')
db = mongo.spider
url = 'https://www.lagou.com/'
keywords = ['python']
# keywords = ['python', '爬虫', '大数据']
page_html_list = get_html(url, keywords) # 获取所有的网页信息
for page_html in page_html_list:
page = BeautifulSoup(page_html, 'lxml') # 初始化 bs 对象
company_list = page.find_all('div', {'class', 'list_item_top'}) # 获取每页的公司列表
for company in company_list: # 遍历 获取需要的信息
company_name = company.find("", {'class': "company_name"}).find('a').get_text()
job = company.find('h3').get_text()
salary = company.find('span', {'class': 'money'}).get_text()
# 插入数据库
db.lagou.insert({'公司:': company_name, '职位:': job, '工资:': salary})
print('获取拉钩网数据完毕!')
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
3110e4e698a996e1e82b66366b19e9b17f240a2c
|
9da8d60ba0c37a8f5d1f4a7ea8f33f7996b9f1bf
|
/39.No_Idea!.py
|
831a88d5f728e99974a73893bab037b24c3ef517
|
[] |
no_license
|
save6/HackerRank
|
b7e764200e813453fe6037512652f1c1df1fdff3
|
da7038b586399e599fdd9e96f7c3b599d928f6a7
|
refs/heads/master
| 2023-08-25T13:36:05.632435 | 2021-10-27T22:19:43 | 2021-10-27T22:19:43 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 376 |
py
|
# Enter your code here. Read input from STDIN. Print output to STDOUT
if __name__=='__main__':
_ = input()
answer = list(input().split())
list_A = set(input().split())
list_B = set(input().split())
happiness = 0
for a in answer:
if a in list_A:
happiness += 1
if a in list_B:
happiness -= 1
print(happiness)
|
[
"[email protected]"
] | |
ed2fbd6f401e303eb8cc4479a60c8bd94a1b8e22
|
377cbbe140fd0faf1eb53ba3794de816ac307cde
|
/src/experiment/TrainModelExperiment.py
|
c8b657d6d4c6fbbf74afb4ddf14ee5a51f53cb48
|
[
"MIT"
] |
permissive
|
dhruvtapasvi/implementation
|
fcbd7ab8e7b1368a0f07ee41dc5f0b6d6708c206
|
964980f431517f4548a87172a05107cdf700fb84
|
refs/heads/master
| 2021-09-16T01:47:50.601661 | 2018-05-17T19:22:44 | 2018-05-17T19:22:44 | 114,498,055 | 1 | 0 |
MIT
| 2018-05-05T02:17:35 | 2017-12-16T23:59:13 |
Python
|
UTF-8
|
Python
| false | false | 966 |
py
|
from experiment.Experiment import Experiment
from model.VariationalAutoencoder import VariationalAutoencoder
from dataset.loader.DatasetLoader import DatasetLoader
class TrainModelExperiment(Experiment):
def __init__(self, variationalAutoencoder: VariationalAutoencoder, datasetLoader: DatasetLoader, epochs, batchSize):
self.__variationalAutoencoder = variationalAutoencoder
self.__datasetLoader = datasetLoader
self.__epochs = epochs
self.__batchSize = batchSize
def run(self):
"""
Train the model specified in the constructor with the parameters specified there too
Side effect: the model is trained
:return: The model training history
"""
(xTrain, _), (xValidation, _), _ = self.__datasetLoader.loadData()
modelTrainingHistory = self.__variationalAutoencoder.train(xTrain, xValidation, self.__epochs, self.__batchSize)
return modelTrainingHistory.history
|
[
"[email protected]"
] | |
4cdf1fd5f6441560066c488889e7395009341605
|
c364fca8ae4c896dee2c8b0dc545f4d73c8c8314
|
/unsupervised_learning/0x01-clustering/8-main_2.py
|
bca71c105dad636e7f26d8135412abb0afb57834
|
[
"MIT"
] |
permissive
|
ledbagholberton/holbertonschool-machine_learning
|
7672509d2dc1775bd6708430d244e8f4dd4cb169
|
eaf23423ec0f412f103f5931d6610fdd67bcc5be
|
refs/heads/master
| 2020-12-22T01:12:32.824436 | 2020-10-11T12:36:48 | 2020-10-11T12:36:48 | 236,623,497 | 1 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 679 |
py
|
#!/usr/bin/env python3
import numpy as np
EM = __import__('8-EM').expectation_maximization
if __name__ == '__main__':
np.random.seed(11)
a = np.random.multivariate_normal([30, 40], [[75, 5], [5, 75]], size=10000)
b = np.random.multivariate_normal([5, 25], [[16, 10], [10, 16]], size=750)
c = np.random.multivariate_normal([60, 30], [[16, 0], [0, 16]], size=750)
d = np.random.multivariate_normal([20, 70], [[35, 10], [10, 35]], size=1000)
X = np.concatenate((a, b, c, d), axis=0)
np.random.shuffle(X)
k = 4
pi, m, S, g, l = EM(X, k, tol=0., verbose=True)
print(pi)
print(m)
print(S)
print(g)
print(l)
|
[
"[email protected]"
] | |
baba214cbfbc300cbbf3cfac8cea9aa9c1149d96
|
8f205d31e8e5555d69e0a7db086a3c93de6d2806
|
/task_scripts/merge_overlaps.py
|
7bb49e8383e4cfd29478c39d245d5a50face9f88
|
[
"MIT"
] |
permissive
|
torms3/Synaptor
|
94e0f04478118399db91d79a8a8b478858fd4138
|
5de74aa61b3d04e88e6bc4c336d543f89d64b9a4
|
refs/heads/master
| 2021-05-21T19:08:43.625841 | 2020-06-19T23:10:47 | 2020-06-19T23:10:47 | 252,764,824 | 0 | 0 |
NOASSERTION
| 2020-04-03T15:03:17 | 2020-04-03T15:03:16 | null |
UTF-8
|
Python
| false | false | 384 |
py
|
"""
Merge Edges Wrapper Script
Merges overlap matrices together
Writes the segments of max overlap
"""
import synaptor as s
import argparse
parser = argparse.ArgumentParser()
# Inputs & Outputs
parser.add_argument("storagestr")
parser.add_argument("--timing_tag", default=None)
args = parser.parse_args()
print(vars(args))
s.proc.tasks_w_io.merge_overlaps_task(**vars(args))
|
[
"[email protected]"
] | |
ccfd5f583065345312c7dcfd1cf83f32899952c9
|
4388363ba45b95910c25bae3d9c02ad78f4a75d6
|
/python/anaconda/pkgs/anaconda-project-0.6.0-py27_0/lib/python2.7/site-packages/anaconda_project/internal/conda_api.py
|
dea5e87e6955e8f321d11c6259f9edacb6dce114
|
[] |
no_license
|
locolucco209/MongoScraper
|
d494e02531f4f165b1e821633dc9661c579337b5
|
74476c9f00ee43338af696da7e9cd02b273f9005
|
refs/heads/master
| 2022-11-25T19:09:27.248747 | 2018-07-10T03:54:06 | 2018-07-10T03:54:06 | 137,553,786 | 3 | 1 | null | 2022-11-16T04:32:26 | 2018-06-16T04:49:22 | null |
UTF-8
|
Python
| false | false | 23,301 |
py
|
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------------
# Copyright © 2016, Continuum Analytics, Inc. All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, print_function, division, unicode_literals
import collections
import errno
import subprocess
import json
import os
import platform
import re
import shutil
import sys
import tempfile
from anaconda_project.internal import logged_subprocess
from anaconda_project.internal.directory_contains import subdirectory_relative_to_directory
from anaconda_project.internal.py2_compat import is_string
class CondaError(Exception):
"""General Conda error."""
def __init__(self, message, json=None):
super(CondaError, self).__init__(message)
self.json = json
class CondaEnvExistsError(CondaError):
"""Conda environment already exists."""
pass
# this function exists so we can monkeypatch it in tests
def _get_conda_command(extra_args):
# just use whatever conda is on the path
cmd_list = ['conda']
cmd_list.extend(extra_args)
return cmd_list
# This is obviously ridiculous, we'll work to
# find a better way (at least in newer versions
# of conda).
def _platform_hacked_conda_code(platform, bits):
return """import conda
try:
# this is conda 4.2 and 4.3
# fix whether default channels have msys
import conda.base.constants
from conda.base.constants import DEFAULT_CHANNELS_UNIX, DEFAULT_CHANNELS_WIN
if "{platform}" == 'win':
corrected_channels = DEFAULT_CHANNELS_WIN
else:
corrected_channels = DEFAULT_CHANNELS_UNIX
setattr(conda.base.constants, 'DEFAULT_CHANNELS', corrected_channels)
from conda.base.context import Context
class KapselHackedContext(Context):
@property
def subdir(self):
return "{platform}-{bits}"
@property
def bits(self):
return {bits}
setattr(conda.base.context.context, "__class__", KapselHackedContext)
except ImportError:
# this is conda 4.1
import conda.config
setattr(conda.config, "platform", "{platform}")
setattr(conda.config, "bits", "{bits}")
setattr(conda.config, "subdir", "{platform}-{bits}")
# fix up the default urls
msys_url = 'https://repo.continuum.io/pkgs/msys2'
if "{platform}" == "win":
if msys_url not in conda.config.defaults_:
conda.config.defaults_.append(msys_url)
else:
if msys_url in conda.config.defaults_:
conda.config.defaults_.remove(msys_url)
import conda.cli
import sys
sys.argv[0] = "conda"
sys.exit(conda.cli.main())
""".format(platform=platform,
bits=bits).strip() + "\n"
def _get_platform_hacked_conda_command(extra_args, platform):
"""Get conda command and a string representing it in error messages."""
if platform == current_platform() or platform is None:
cmd_list = _get_conda_command(extra_args)
return (cmd_list, " ".join(cmd_list))
else:
(platform_name, bits) = platform.split("-")
conda_code = _platform_hacked_conda_code(platform_name, bits)
# this has to run with the python from the root env,
# so the conda modules will be found.
root_prefix = _get_root_prefix()
root_python = None
for location in (('bin', 'python'), ('python.exe', ), ('Scripts', 'python.exe'),
('Library', 'bin', 'python.exe')):
candidate = os.path.join(root_prefix, *location)
if os.path.isfile(candidate):
root_python = candidate
break
assert root_python is not None
cmd_list = [root_python, '-c', conda_code]
cmd_list.extend(extra_args)
return (cmd_list, " ".join(["conda"] + cmd_list[3:]))
def _call_conda(extra_args, json_mode=False, platform=None):
assert len(extra_args) > 0 # we deref extra_args[0] below
(cmd_list, command_in_errors) = _get_platform_hacked_conda_command(extra_args, platform=platform)
try:
p = logged_subprocess.Popen(cmd_list, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError as e:
raise CondaError("failed to run: %r: %r" % (command_in_errors, repr(e)))
(out, err) = p.communicate()
errstr = err.decode().strip()
if p.returncode != 0:
parsed = None
message = errstr
if json_mode:
try:
parsed = json.loads(out.decode())
if parsed is not None and isinstance(parsed, dict):
# some versions of conda do 'error' and others
# both 'error' and 'message' and they appear to
# be the same.
for field in ('message', 'error'):
if field in parsed:
message = parsed[field]
break
except Exception:
pass
raise CondaError('%s: %s' % (command_in_errors, message), json=parsed)
elif errstr != '':
for line in errstr.split("\n"):
print("%s %s: %s" % ("conda", extra_args[0], line), file=sys.stderr)
return out
def _call_and_parse_json(extra_args, platform=None):
out = _call_conda(extra_args, json_mode=True, platform=platform)
try:
return json.loads(out.decode())
except ValueError as e:
raise CondaError('Invalid JSON from conda: %s' % str(e))
def info(platform=None):
"""Return a dictionary with configuration information.
No guarantee is made about which keys exist. Therefore this function
should only be used for testing and debugging.
"""
return _call_and_parse_json(['info', '--json'], platform=platform)
def resolve_env_to_prefix(name_or_prefix):
"""Convert an env name or path into a canonical prefix path.
Returns:
Absolute path of prefix or None if it isn't found.
"""
if os.path.isabs(name_or_prefix):
return name_or_prefix
json = info()
root_prefix = json.get('root_prefix', None)
if name_or_prefix == 'root':
return root_prefix
envs = json.get('envs', [])
for prefix in envs:
if os.path.basename(prefix) == name_or_prefix:
return prefix
return None
_cached_root_prefix = None
def _get_root_prefix():
global _cached_root_prefix
if _cached_root_prefix is None:
_cached_root_prefix = resolve_env_to_prefix('root')
return _cached_root_prefix
def create(prefix, pkgs=None, channels=()):
"""Create an environment either by name or path with a specified set of packages."""
if not pkgs or not isinstance(pkgs, (list, tuple)):
raise TypeError('must specify a list of one or more packages to install into new environment')
if os.path.exists(prefix):
raise CondaEnvExistsError('Conda environment [%s] already exists' % prefix)
cmd_list = ['create', '--yes', '--quiet', '--prefix', prefix]
for channel in channels:
cmd_list.extend(['--channel', channel])
cmd_list.extend(pkgs)
return _call_conda(cmd_list)
def install(prefix, pkgs=None, channels=()):
"""Install packages into an environment either by name or path with a specified set of packages."""
if not pkgs or not isinstance(pkgs, (list, tuple)):
raise TypeError('must specify a list of one or more packages to install into existing environment, not %r',
pkgs)
cmd_list = ['install', '--yes', '--quiet']
cmd_list.extend(['--prefix', prefix])
for channel in channels:
cmd_list.extend(['--channel', channel])
cmd_list.extend(pkgs)
return _call_conda(cmd_list)
def remove(prefix, pkgs=None):
"""Remove packages from an environment either by name or path."""
if not pkgs or not isinstance(pkgs, (list, tuple)):
raise TypeError('must specify a list of one or more packages to remove from existing environment')
cmd_list = ['remove', '--yes', '--quiet']
cmd_list.extend(['--prefix', prefix])
cmd_list.extend(pkgs)
return _call_conda(cmd_list)
def _parse_dist(dist):
# the "dist" is the basename of a package inside
# conda-meta, like "numpy-1.10.4-py34_1"
pieces = dist.rsplit('-', 2)
if len(pieces) == 3:
return tuple(pieces)
else:
return None
def installed(prefix):
"""Get a dict of package names to (name, version, build) tuples."""
meta_dir = os.path.join(prefix, 'conda-meta')
try:
full_names = set(fn[:-5] for fn in os.listdir(meta_dir) if fn.endswith('.json'))
except OSError as e:
if e.errno == errno.ENOENT:
full_names = set()
else:
raise CondaError(str(e))
result = dict()
for full_name in full_names:
pieces = _parse_dist(full_name)
if pieces is not None:
result[pieces[0]] = pieces
return result
def resolve_dependencies(pkgs, channels=(), platform=None):
"""Resolve packages into a full transitive list of (name, version, build) tuples."""
if not pkgs or not isinstance(pkgs, (list, tuple)):
raise TypeError('must specify a list of one or more packages to install into existing environment, not %r',
pkgs)
# even with --dry-run, conda wants to create the prefix,
# so we ensure it's somewhere out of the way.
prefix = tempfile.mkdtemp(prefix="anaconda_project_resolve_")
# conda 4.1 (and possibly other versions) will complain
# if the directory already exists. An evil attacker
# on a multiuser system could replace this with a file
# after we remove it, and then conda's mkdir would fail.
os.rmdir(prefix)
cmd_list = ['create', '--yes', '--quiet', '--json', '--dry-run', '--prefix', prefix]
for channel in channels:
cmd_list.extend(['--channel', channel])
cmd_list.extend(pkgs)
try:
parsed = _call_and_parse_json(cmd_list, platform=platform)
finally:
try:
if os.path.isdir(prefix):
shutil.rmtree(prefix)
except Exception:
pass
results = []
actions = parsed.get('actions', [])
# old conda gives us one dict, newer a list of dicts
if isinstance(actions, dict):
actions = [actions]
for action in actions:
if isinstance(action, dict):
links = action.get('LINK', [])
for link in links:
found = None
# 4.1 conda gives us a string like
# 'python-3.6.0-0 2' and 4.3 gives us a
# dict with the fields already decomposed.
if isinstance(link, dict):
name = link.get('name', None)
version = link.get('version', None)
build_string = link.get('build_string', None)
if name is not None and \
version is not None and \
build_string is not None:
found = (name, version, build_string)
elif is_string(link):
# we have a string like 'python-3.6.0-0 2'
pieces = link.split()
if len(pieces) > 0:
# 'found' can be None if we didn't understand the string
found = _parse_dist(pieces[0])
if found is not None:
results.append(found)
if len(results) == 0:
raise CondaError("Could not understand JSON from Conda, could be a problem with this Conda version.",
json=parsed)
return results
def _contains_conda_meta(path):
conda_meta = os.path.join(path, "conda-meta")
return os.path.isdir(conda_meta)
def _is_conda_bindir_unix(path):
if path.endswith("/"):
path = path[:-1]
if not path.endswith("/bin"):
return False
possible_prefix = os.path.dirname(path)
return _contains_conda_meta(possible_prefix)
def _path_endswith_windows(path, suffix):
if path.endswith("\\") or path.endswith("/"):
path = path[:-1]
replaced = suffix.replace("\\", "/")
return path.endswith("\\" + suffix) or \
path.endswith("/" + suffix) or \
path.endswith("\\" + replaced) or \
path.endswith("/" + replaced)
def _is_conda_bindir_windows(path):
# on Windows there are three conda binary locations:
# - the prefix itself (contains python.exe)
# - prefix\Library\bin
# - prefix\Scripts
if path.endswith("\\") or path.endswith("/"):
path = path[:-1]
if _contains_conda_meta(path):
return True
elif _path_endswith_windows(path, "Library\\bin"):
possible_prefix = os.path.dirname(os.path.dirname(path))
return _contains_conda_meta(possible_prefix)
elif _path_endswith_windows(path, "Scripts"):
possible_prefix = os.path.dirname(path)
return _contains_conda_meta(possible_prefix)
else:
return False
def _windows_bindirs(prefix):
# activate.bat in conda-env does it in this order, [ prefix, Scripts, Library\bin ]
dirs = [prefix]
for item in ("Scripts", "Library\\bin"):
dirs.append(os.path.join(prefix, item))
return dirs
def _unix_bindirs(prefix):
return [os.path.join(prefix, "bin")]
def _set_conda_env_in_path(path, prefix, bindirs_func, is_bindir_func):
elements = path.split(os.pathsep)
new_elements = []
if prefix is not None:
new_elements = bindirs_func(prefix)
for element in elements:
if element != "" and not is_bindir_func(element):
new_elements.append(element)
return os.pathsep.join(new_elements)
def _set_conda_env_in_path_unix(path, prefix):
return _set_conda_env_in_path(path, prefix, _unix_bindirs, _is_conda_bindir_unix)
def _set_conda_env_in_path_windows(path, prefix):
return _set_conda_env_in_path(path, prefix, _windows_bindirs, _is_conda_bindir_windows)
def set_conda_env_in_path(path, prefix):
"""Remove any existing conda envs in the given path string, then add the given one.
Args:
path (str): value of the PATH environment variable
prefix (str): the environment prefix, or None to remove all conda bindirs
Returns:
the new PATH value
"""
if platform.system() == 'Windows':
return _set_conda_env_in_path_windows(path, prefix)
else:
return _set_conda_env_in_path_unix(path, prefix)
ParsedSpec = collections.namedtuple('ParsedSpec', ['name', 'conda_constraint', 'pip_constraint', 'exact_version',
'exact_build_string'])
# this is copied from conda
_spec_pat = re.compile(r'''
(?P<name>[^=<>!\s]+) # package name
\s* # ignore spaces
(
(?P<cc>=[^=<>!]+(=[^=<>!]+)?) # conda constraint
|
(?P<pc>[=<>!]{1,2}.+) # new (pip-style) constraint(s)
)?
$ # end-of-line
''', re.VERBOSE)
_conda_constraint_pat = re.compile('=(?P<version>[^=<>!]+)(?P<build>=[^=<>!]+)?', re.VERBOSE)
def parse_spec(spec):
"""Parse a package name and version spec as conda would.
Returns:
``ParsedSpec`` or None on failure
"""
if not is_string(spec):
raise TypeError("Expected a string not %r" % spec)
m = _spec_pat.match(spec)
if m is None:
return None
name = m.group('name').lower()
pip_constraint = m.group('pc')
if pip_constraint is not None:
pip_constraint = pip_constraint.replace(' ', '')
conda_constraint = m.group('cc')
exact_version = None
exact_build_string = None
if conda_constraint is not None:
m = _conda_constraint_pat.match(conda_constraint)
assert m is not None
exact_version = m.group('version')
for special in ('|', '*', ','):
if special in exact_version:
exact_version = None
break
if exact_version is not None:
exact_build_string = m.group('build')
if exact_build_string is not None:
assert exact_build_string[0] == '='
exact_build_string = exact_build_string[1:]
return ParsedSpec(name=name,
conda_constraint=conda_constraint,
pip_constraint=pip_constraint,
exact_version=exact_version,
exact_build_string=exact_build_string)
# these are in order of preference. On pre-4.1.4 Windows,
# CONDA_PREFIX and CONDA_ENV_PATH aren't set, so we get to
# CONDA_DEFAULT_ENV.
_all_prefix_variables = ('CONDA_PREFIX', 'CONDA_ENV_PATH', 'CONDA_DEFAULT_ENV')
def conda_prefix_variable():
# conda 4.1.4 and higher sets CONDA_PREFIX to the full prefix,
# and CONDA_DEFAULT_ENV to the env name only, cross-platform.
# Pre-4.1.4, on Windows, activate.bat never sets
# CONDA_ENV_PATH but sets CONDA_DEFAULT_ENV to the full
# path to the environment.
# Pre-4.1.4, on Unix, activate script sets CONDA_ENV_PATH
# to the full path, and sets CONDA_DEFAULT_ENV to either
# just the env name or the full path.
# if we're in a conda environment, then use CONDA_PREFIX if it
# was set by conda, otherwise use CONDA_ENV_PATH if set,
# otherwise use CONDA_DEFAULT_ENV if set.
for name in _all_prefix_variables:
if name in os.environ:
return name
# if we aren't in a conda environment, just hope we have a
# newer conda...
return 'CONDA_PREFIX'
def environ_get_prefix(environ):
for name in _all_prefix_variables:
if name in environ:
return environ.get(name)
return None
def environ_delete_prefix_variables(environ):
for name in _all_prefix_variables:
if name in environ:
del environ[name]
_envs_dirs = None
_root_dir = None
def environ_set_prefix(environ, prefix, varname=conda_prefix_variable()):
prefix = os.path.normpath(prefix)
environ[varname] = prefix
if varname != 'CONDA_DEFAULT_ENV':
# This case matters on both Unix and Windows
# with conda >= 4.1.4 since requirement.env_var
# is CONDA_PREFIX, and matters on Unix only pre-4.1.4
# when requirement.env_var is CONDA_ENV_PATH.
global _envs_dirs
global _root_dir
if _envs_dirs is None:
i = info()
_envs_dirs = [os.path.normpath(d) for d in i.get('envs_dirs', [])]
_root_dir = os.path.normpath(i.get('root_prefix'))
if prefix == _root_dir:
name = 'root'
else:
for d in _envs_dirs:
name = subdirectory_relative_to_directory(prefix, d)
if name != prefix:
break
environ['CONDA_DEFAULT_ENV'] = name
# This isn't all (e.g. leaves out arm, power). it's sort of "all
# that people typically publish for"
default_platforms = ('linux-64', 'osx-64', 'win-64')
assert tuple(sorted(default_platforms)) == default_platforms
# osx-32 isn't in here since it isn't much used
default_platforms_plus_32_bit = ('linux-32', 'linux-64', 'osx-64', 'win-32', 'win-64')
assert tuple(sorted(default_platforms_plus_32_bit)) == default_platforms_plus_32_bit
_non_x86_linux_machines = {'armv6l', 'armv7l', 'ppc64le'}
# this list will get outdated, unfortunately.
_known_platforms = tuple(sorted(list(default_platforms_plus_32_bit) + ['osx-32'] + [("linux-%s" % m)
for m in _non_x86_linux_machines]))
known_platform_names = ('linux', 'osx', 'win')
assert tuple(sorted(known_platform_names)) == known_platform_names
unix_platform_names = ('linux', 'osx')
assert tuple(sorted(unix_platform_names)) == unix_platform_names
_known_platform_groups = dict()
# Fill in the 'linux', 'osx', 'win' groups
for name in known_platform_names:
result = []
for p in default_platforms_plus_32_bit:
if p.startswith(name):
result.append(p)
_known_platform_groups[name] = tuple(result)
assert tuple(sorted(_known_platform_groups[name])) == _known_platform_groups[name]
# fill in the 'unix' group
def _known_unix_platforms():
result = []
for unix_name in unix_platform_names:
for p in default_platforms_plus_32_bit:
if p.startswith(unix_name):
result.append(p)
return tuple(result)
_known_platform_groups['unix'] = _known_unix_platforms()
assert tuple(sorted(_known_platform_groups['unix'])) == _known_platform_groups['unix']
# fill in the 'all' group
_known_platform_groups['all'] = default_platforms_plus_32_bit
# this isn't just _known_platform_groups.keys() because we want to be
# in order from most to least general
_known_platform_groups_keys = ('all', 'unix') + known_platform_names
assert set(_known_platform_groups_keys) == set(_known_platform_groups.keys())
def current_platform():
m = platform.machine()
if m in _non_x86_linux_machines:
return 'linux-%s' % m
else:
_platform_map = {'linux2': 'linux', 'linux': 'linux', 'darwin': 'osx', 'win32': 'win', }
p = _platform_map.get(sys.platform, 'unknown')
return '%s-%d' % (p, (8 * tuple.__itemsize__))
_default_platforms_with_current = tuple(sorted(list(set(default_platforms + (current_platform(), )))))
def default_platforms_with_current():
return _default_platforms_with_current
def parse_platform(platform):
"""Split platform into OS name and architecture."""
assert '-' in platform
# platforms can have multiple hyphens e.g. linux-cos5-64 Our
# goal here is to separate the general name from the
# bit-width.
pieces = platform.rsplit("-", 1)
return (pieces[0], pieces[1])
def validate_platform_list(platforms):
"""Split platform list into known, unknown, and invalid platforms.
Also, sort the list into canonical order.
We return a tuple, the second list in the tuple
is a subset of the first, and indicates platforms
we don't know about. These may create a warning.
The third list is not in the other two and indicates
unusably-invalid platform names.
Returns:
Tuple of known platforms and unknown platforms.
"""
result = set()
unknown = set()
invalid = set()
for p in platforms:
if '-' not in p:
invalid.add(p)
else:
result.add(p)
if p not in _known_platforms:
unknown.add(p)
# unknown platforms aren't necessarily an error, we just
# don't do anything smart with them.
return (sort_platform_list(result), sort_platform_list(unknown), sort_platform_list(invalid))
def sort_platform_list(platforms):
"""Sort platform list (including "grouping" names) from more to less general."""
remaining = set(platforms)
result = []
for known in (_known_platform_groups_keys + _known_platforms):
if known in remaining:
result.append(known)
remaining.remove(known)
result = result + sorted(list(remaining))
return result
|
[
"[email protected]"
] | |
cd37b9253d5a541d3945acb4e29a7a6cc456b84b
|
f9a5e7233875989f994438ce267907d8210d60a1
|
/test/pump_sensor/metalearning/knn_ranking/RMSE/k=3/extra_trees/sensor_prediction_extraTrees_AUCPRC.py
|
c51bf8495532222f767c46f192a351e7ebb6c9e6
|
[] |
no_license
|
renoslyssiotis/When-are-Machine-learning-models-required-and-when-is-Statistics-enough
|
da8d53d44a69f4620954a32af3aacca45e1ed641
|
6af1670a74345f509c86b7bdb4aa0761c5b058ff
|
refs/heads/master
| 2022-08-29T20:21:57.553737 | 2020-05-26T18:03:46 | 2020-05-26T18:03:46 | 256,439,921 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,204 |
py
|
import sys, os, pickle
from pathlib import PurePath
current_dir = os.path.realpath(__file__)
p = PurePath(current_dir)
sys.path.append(str(p.parents[7])+'/metalearners/knn_ranking_method/RMSE')
from KNN_ranking_k_3_RMSE import KNN_ranking
#Load the selected meta-dataset after performing zero-variance threshold
with open(str(p.parents[7])+'/analysis/feature_selection/extra_trees/extra_trees_X_AUCPRC_202.pickle', 'rb') as handle:
metadataset_feature_selected = pickle.load(handle)
#=====================META-FEATURE EXTRACTION==================================
with open(str(p.parents[5])+'/actual/sensor_metafeatures_202.pickle', 'rb') as handle:
meta_features = pickle.load(handle)
#nested_results is a nested dictionary with all the AUC-PRC performances for each dataset and all models
with open(str(p.parents[6])+'/nested_results_prc.pickle', 'rb') as handle:
nested_results_prc = pickle.load(handle)
"""
Remove the meta-features which are not in the meta-dataset
(i.e. the features which have not been selected in the feature selection process)
"""
metafeatures_to_be_removed = []
for metafeature in meta_features.keys():
if metafeature in metadataset_feature_selected.columns:
pass
else:
metafeatures_to_be_removed.append(metafeature)
[meta_features.pop(key) for key in metafeatures_to_be_removed]
#========================META-LEARNING: RANKING================================
#KNN Ranking Method
top1, top2, top3 = KNN_ranking(metadataset_feature_selected, meta_features, nested_results_prc)
print("==========================================")
print(" AUC-PRC ")
print("==========================================")
print("Top 1 predicted model: " + top1)
print("Top 2 predicted model: " + top2)
print("Top 3 predicted model: " + top3)
#Actual results
with open(str(p.parents[5])+'/actual/sensor_top_3_prc.pickle', 'rb') as handle:
actual_results = pickle.load(handle)
print("==========================================")
print("Top 1 ACTUAL model: " + actual_results[0])
print("Top 2 ACTUAL model: " + actual_results[1])
print("Top 3 ACTUAL model: " + actual_results[2])
|
[
"[email protected]"
] | |
3df71c5457a9604d5a8d822930171a8250ec8280
|
66b1f3c3e57f53e1404d6e17c4acc850173a531d
|
/Python/Django/Calculator/calc_app/calc_app/settings.py
|
015d84b659a6d1c67ad8a34c0caeac9554a8f063
|
[] |
no_license
|
bMedarski/SoftUni
|
ca4d6891b3bbe7b03aad5960d2f4af5479fd8bbd
|
62cd9cb84b0826e3381c991882a4cdc27d94f8ab
|
refs/heads/master
| 2021-06-08T17:32:39.282975 | 2020-02-04T11:57:08 | 2020-02-04T11:57:08 | 67,947,148 | 6 | 3 | null | 2021-05-06T20:35:42 | 2016-09-11T18:31:02 |
Python
|
UTF-8
|
Python
| false | false | 3,094 |
py
|
"""
Django settings for calc_app project.
Generated by 'django-admin startproject' using Django 2.1.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '34!cf75&$en$qwof)oy(^620hq4m)_j2s77u%h!7y*snuac7b#'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'calc_app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'calc_app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
|
[
"[email protected]"
] | |
06a1b2a6764dcc999c4dc021994100e23d2a2b93
|
52b5773617a1b972a905de4d692540d26ff74926
|
/.history/duplicate_20200619182445.py
|
9dae68d8081d41f90f083abc2f6f88532d71fbe0
|
[] |
no_license
|
MaryanneNjeri/pythonModules
|
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
|
f4e56b1e4dda2349267af634a46f6b9df6686020
|
refs/heads/master
| 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 690 |
py
|
def duplicate(str):
# brute force approach
# is storing the substring and checking how many times they
# occur
# dictionary key is the substring and occurrences is how many
# times its occurred
words ={}
# pointers
left = 0
right = 1
new = []
while right <= len(str) and left < len(str) -2:
if right == len(str):
left +=1
else:
right +=1
words[str[left:right]] = 0
for i in words:
words[i] = str.count(i)
i = 0
for i in words:
if words[i] >=2:
new.append(i)
duplicate("geeksforgeeks")
|
[
"[email protected]"
] | |
4981bd2b9338bf1e9c7f1e12ac1b0cbbb93b7c11
|
7beff965d7b0e6155d6d52b27d71c557421d5ada
|
/abc160/e/a.py
|
d39046088669528af74eef2618f8b38a62258db5
|
[] |
no_license
|
uk-ar/competitive_programming
|
82a53a1007798843ac006b9c7d313826e6cb45c3
|
d2523cf303f47644cada3b03e9eed2349bdbe394
|
refs/heads/master
| 2023-03-28T13:20:07.728861 | 2021-03-30T20:25:55 | 2021-03-30T20:25:55 | 249,638,234 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 327 |
py
|
#!/usr/bin/env pypy3
import sys
sys.setrecursionlimit(15000)
x,y,a,b,c = map(int,input().split())
p = list(sorted(map(int,input().split()),reverse=True))[:x] # x
q = list(sorted(map(int,input().split()),reverse=True))[:y] # y
r = list(sorted(map(int,input().split()),reverse=True))
print(sum(sorted(p+q+r,reverse=True)[:x+y]))
|
[
"[email protected]"
] | |
4976f0269c542b42bd84c779b123bc15f165d539
|
92e3a6424326bf0b83e4823c3abc2c9d1190cf5e
|
/scripts/icehouse/opt/stack/tempest/tempest/api/compute/volumes/test_volumes_get.py
|
4f77fa7dca18d39522c591d9e676dac128abe8d6
|
[
"Apache-2.0"
] |
permissive
|
AnthonyEzeigbo/OpenStackInAction
|
d6c21cf972ce2b1f58a93a29973534ded965d1ea
|
ff28cc4ee3c1a8d3bbe477d9d6104d2c6e71bf2e
|
refs/heads/master
| 2023-07-28T05:38:06.120723 | 2020-07-25T15:19:21 | 2020-07-25T15:19:21 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,143 |
py
|
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from testtools import matchers
from tempest.api.compute import base
from tempest.common.utils import data_utils
from tempest import config
from tempest import test
CONF = config.CONF
class VolumesGetTestJSON(base.BaseV2ComputeTest):
@classmethod
def resource_setup(cls):
super(VolumesGetTestJSON, cls).resource_setup()
cls.client = cls.volumes_extensions_client
if not CONF.service_available.cinder:
skip_msg = ("%s skipped as Cinder is not available" % cls.__name__)
raise cls.skipException(skip_msg)
@test.attr(type='smoke')
def test_volume_create_get_delete(self):
# CREATE, GET, DELETE Volume
volume = None
v_name = data_utils.rand_name('Volume-%s-') % self._interface
metadata = {'Type': 'work'}
# Create volume
resp, volume = self.client.create_volume(size=1,
display_name=v_name,
metadata=metadata)
self.addCleanup(self.delete_volume, volume['id'])
self.assertEqual(200, resp.status)
self.assertIn('id', volume)
self.assertIn('displayName', volume)
self.assertEqual(volume['displayName'], v_name,
"The created volume name is not equal "
"to the requested name")
self.assertTrue(volume['id'] is not None,
"Field volume id is empty or not found.")
# Wait for Volume status to become ACTIVE
self.client.wait_for_volume_status(volume['id'], 'available')
# GET Volume
resp, fetched_volume = self.client.get_volume(volume['id'])
self.assertEqual(200, resp.status)
# Verification of details of fetched Volume
self.assertEqual(v_name,
fetched_volume['displayName'],
'The fetched Volume is different '
'from the created Volume')
self.assertEqual(volume['id'],
fetched_volume['id'],
'The fetched Volume is different '
'from the created Volume')
self.assertThat(fetched_volume['metadata'].items(),
matchers.ContainsAll(metadata.items()),
'The fetched Volume metadata misses data '
'from the created Volume')
class VolumesGetTestXML(VolumesGetTestJSON):
_interface = "xml"
|
[
"[email protected]"
] | |
7715d5679012483083ebf58aa1308f7a584f95d5
|
fbfe37305712d49c84d87b2bb3ef88d3cf68cf69
|
/apps/post/migrations/0001_initial.py
|
1409752baa7f263d6454d8f513746baf8672b72a
|
[] |
no_license
|
xal9wiii4ik/social_network
|
50ba7788bbd4b53c77f69f61c5790c3a4a13b6e4
|
23d6c72bf4ed0e182570d0e55e973f22701850cd
|
refs/heads/master
| 2023-03-30T22:03:28.379419 | 2021-04-08T21:55:56 | 2021-04-08T21:55:56 | 325,549,609 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,758 |
py
|
# Generated by Django 3.1.5 on 2021-01-27 13:55
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Subject',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('subject', models.CharField(max_length=99, unique=True, verbose_name='Тема')),
],
options={
'verbose_name': 'Тема',
'verbose_name_plural': 'Темы',
},
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=30, verbose_name='Заглавие')),
('body', models.TextField(max_length=1024, verbose_name='Тело поста')),
('published_date', models.DateTimeField(auto_now_add=True, verbose_name='Дата публикации')),
('owner', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='post_owner', to=settings.AUTH_USER_MODEL)),
('subject', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='post_subject', to='post.subject')),
],
options={
'verbose_name': 'Пост',
'verbose_name_plural': 'Посты',
},
),
]
|
[
"[email protected]"
] | |
ef6aef324a83544b3a9424e5c5c1b975b4f19d4d
|
0e04214a06ef5b220f9179bd7b7a0792ea17145b
|
/genqr.py
|
2f79d2caf4616f5d43d85dd3432ef354d949bf00
|
[
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-gutenberg-2020"
] |
permissive
|
tebeka/pythonwise
|
cbeb63d4f4b2ec8d1d0df392f721c1557d9b00f4
|
e3e1cdca77224c0ff2cf1fba69d20c997224decc
|
refs/heads/master
| 2022-11-19T03:59:17.736977 | 2022-11-08T07:22:20 | 2022-11-08T07:22:20 | 45,970,082 | 23 | 8 |
BSD-3-Clause
| 2021-04-21T04:54:40 | 2015-11-11T08:46:50 |
Python
|
UTF-8
|
Python
| false | false | 1,580 |
py
|
#!/usr/bin/env python
'''Generate QR code using Google Charts API'''
import sys
# Python 3/2 compatibility
if sys.version_info[:2] < (3, 0):
from urllib import urlopen, urlencode
import httplib
stdout = sys.stdout
else:
from urllib.request import urlopen
from urllib.parse import urlencode
import http.client as httplib
stdout = sys.stdout.buffer
def gen_qr(data, size):
charts_url = 'https://chart.googleapis.com/chart'
params = [
('cht', 'qr'),
('chs', size),
('chl', data),
]
query = urlencode(params)
url = '%s?%s' % (charts_url, query)
fo = urlopen(url)
if fo.code != httplib.OK:
raise ValueError('bad reply from Google %d' % fo.code)
return fo.read()
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser(
description='Generate QR using Google Charts (PNG)')
parser.add_argument('data', help='data to encode')
parser.add_argument('--out', '-o', help='output file name (stdin)',
default='-')
parser.add_argument('--size', '-s', help='image size (200x200)',
default='200x200')
args = parser.parse_args()
try:
img_data = gen_qr(args.data, args.size)
out = stdout if args.out == '-' else open(args.out, 'wb')
out.write(img_data)
except ValueError as err:
raise SystemExit('error: {}'.format(err))
except IOError as err:
raise SystemExit(
'error: cannot open {} for writing - {}'.format(args.out, err))
|
[
"[email protected]"
] | |
afccf24a53afcb8a7a26b451ffabd6f8218d208a
|
07b37ca45d38edea112895049acf76d96ff07eff
|
/3.Processing&UnderstadingText/stemming.py
|
7493e6263eb23c0111bcfc4069bbdb0ababcca3f
|
[] |
no_license
|
KRBhavaniSankar/NLTK
|
e335944de346be72a01c92221b0bf58d85475fb9
|
4b228338566996fbccee72cb6afaa199a6496787
|
refs/heads/master
| 2020-03-12T23:03:59.981112 | 2018-05-11T01:15:28 | 2018-05-11T01:15:28 | 130,858,622 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,539 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Fri APR 29 10:04 AM
@author: Bhavani
"""
"""
The NLTK package has several implementation for stemmers, These stemmers are implemented in the stem module , which inherits the stemmer interface in the nltk.stem.api module.
One of the most popular stemmers is the PorterStemmer. There also exists poter2 algorithm which is imporvements of original stemmig algorithm.
"""
#Porterstemmer
import collections
from nltk.stem import PorterStemmer
ps = PorterStemmer()
words_list = ["jumping","jumps","jumped","jump"]
for w in words_list:
print(ps.stem(w))
#print(ps.stem("lying"))
#print(ps.stem("strange"))
from nltk.stem import LancasterStemmer
ls = LancasterStemmer()
for w in words_list:
print(ls.stem(w))
stem_word_list = [ls.stem(w) for w in words_list]
print(stem_word_list.count('jump'))
print(stem_word_list)
print(ls.stem("lying"))
print(ls.stem("strange"))
"""
There are several other stemmers, including RegexpStemmer , where you can build
your own stemmer based on user-defined rules , and SnowballStemmer , which supports
stemming in 13 different languages besides English.
"""
#Regex Based stemmer
from nltk.stem import RegexpStemmer
rs = RegexpStemmer("ing$|s$|ed$",min=4)
for w in words_list:
print(rs.stem(w))
print(rs.stem("lying"))
print(rs.stem("strange"))
#Snow Ball stemmer
from nltk.stem import SnowballStemmer
ss = SnowballStemmer("german")
print("supported languages are :",SnowballStemmer.languages)
german_cars = "autobahnen"
print(ss.stem(german_cars))
|
[
"[email protected]"
] | |
dbe354eca1ad06148fab7434a384fd6262c32ff8
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03845/s040519010.py
|
af55c2d9fab44bd279e0ea967a34ce3e0dfda0cd
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,079 |
py
|
import sys, os, math, bisect, itertools, collections, heapq, queue, copy, array
# from scipy.sparse.csgraph import csgraph_from_dense, floyd_warshall
# from decimal import Decimal
# from collections import defaultdict, deque
sys.setrecursionlimit(10000000)
ii = lambda: int(sys.stdin.buffer.readline().rstrip())
il = lambda: list(map(int, sys.stdin.buffer.readline().split()))
fl = lambda: list(map(float, sys.stdin.buffer.readline().split()))
iln = lambda n: [int(sys.stdin.buffer.readline().rstrip()) for _ in range(n)]
iss = lambda: sys.stdin.buffer.readline().decode().rstrip()
sl = lambda: list(map(str, sys.stdin.buffer.readline().decode().split()))
isn = lambda n: [sys.stdin.buffer.readline().decode().rstrip() for _ in range(n)]
lcm = lambda x, y: (x * y) // math.gcd(x, y)
MOD = 10 ** 9 + 7
INF = float('inf')
def main():
if os.getenv("LOCAL"):
sys.stdin = open("input.txt", "r")
N = ii()
T = il()
M = ii()
sm = sum(T)
for m in range(M):
P, X = il()
print(sm-(T[P-1]-X))
if __name__ == '__main__':
main()
|
[
"[email protected]"
] | |
9ed21cd4df7efe6eaadf95f108b7c5e71d1d8a07
|
c7d0f590f3353b827ed34b731c2f6c53952a1f65
|
/autocv/researcher.py
|
7d8ad3376953e7b13829f6fe7f6647f44eb5fdb2
|
[
"MIT"
] |
permissive
|
Daniel-Gong/autoCV
|
811285a15c913776ced6c2ca49e8b4c625514399
|
ea620c88f46900bc177eb06775f001696c77a09d
|
refs/heads/master
| 2023-06-26T11:53:17.831465 | 2021-08-05T17:16:01 | 2021-08-05T17:16:01 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 8,008 |
py
|
"""
class for a researcher
"""
import os
import json
import requests
import scholarly
import pypatent
from .orcid import get_dois_from_orcid_record
from .pubmed import get_pubmed_data
from .publication import JournalArticle, Book, BookChapter
from .crossref import get_crossref_records, parse_crossref_record
from .utils import get_additional_pubs_from_csv, CustomJSONEncoder, get_random_hash, drop_excluded_pubs
class Researcher:
def __init__(self, param_file='params.json', basedir=None):
self.param_file = param_file
self.load_params(param_file)
self.basedir = os.path.dirname(param_file) if basedir is None else basedir
self.orcid_data = None
self.orcid_dois = None
self.pubmed_data = None
self.crossref_data = None
self.gscholar_data = None
self.patent_data = None
self.serialized = None
self.publications = None
self.rendered_latex = None
def load_params(self, param_file):
if os.path.exists(param_file):
with open(param_file) as f:
params = json.load(f)
else:
raise FileNotFoundError("""Please create a json file called params.json
containing the fields email (with your email address), orcid (with your ORCID id)
and query (with your pubmed query)- see documentation for help')
""")
for field in params:
setattr(self, field, params[field])
def get_orcid_data(self, timeout=60):
orcid_url = "https://pub.orcid.org/v3.0/%s" % self.orcid
print('using ORCID URL:', orcid_url)
resp = requests.get(orcid_url,
headers={'Accept': 'application/vnd.orcid+json'},
timeout=timeout)
self.orcid_data = resp.json()
def get_orcid_dois(self):
if self.orcid_data is None:
self.get_orcid_data()
self.orcid_dois = get_dois_from_orcid_record(self.orcid_data)
def get_pubmed_data(self):
self.pubmed_data = get_pubmed_data(self.query, self.email)
print('retrieved %d full pubmed records' % len(self.pubmed_data['PubmedArticle']))
def get_google_scholar_record(self):
search_query = scholarly.scholarly.search_author(
' '.join([self.firstname, self.lastname]))
query_resp = next(search_query)
self.gscholar_data = scholarly.scholarly.fill(query_resp)
def make_publication_records(self, use_exclusions=True):
# test pubmed
self.get_pubmed_data()
pubmed_dois = []
self.publications = {}
for r in self.pubmed_data['PubmedArticle']:
pub = JournalArticle()
pub.from_pubmed(r)
pub.format_reference_latex()
pub.hash = pub.get_pub_hash()
self.publications[pub.DOI] = pub
# keep track of pubmed DOIs so that we
# don't overwrite with crossref
pubmed_dois.append(pub.DOI)
if self.orcid_data is None:
self.get_orcid_data()
if self.orcid_dois is None:
self.get_orcid_dois()
print('found %d ORCID dois' % len(self.orcid_dois))
# load orcid pubs using crossref
self.crossref_data = get_crossref_records(self.orcid_dois)
print('found %d crossref records' % len(self.crossref_data))
for c in self.crossref_data:
d = parse_crossref_record(self.crossref_data[c])
if d is not None:
# skip existing pubmed records and preprints
if d['DOI'] in pubmed_dois:
continue
if d['type'] in ['journal-article', 'proceedings-article']:
p = JournalArticle()
elif d['type'] in ['book', 'monograph']:
p = Book()
elif d['type'] == 'book-chapter':
p = BookChapter()
else:
continue
p.from_dict(d)
if hasattr(p, 'DOI'):
id = p.DOI
elif hasattr(p, 'ISBN'):
id = p.ISBN
else:
id = get_random_hash()
self.publications[id] = p
if use_exclusions:
self.publications = drop_excluded_pubs(self.publications)
print('found %d additional pubs from ORCID via crossref' % (len(self.publications) - len(pubmed_dois)))
additional_pubs_file = os.path.join(
self.basedir, 'additional_pubs.csv'
)
additional_pubs = get_additional_pubs_from_csv(additional_pubs_file)
for pub in additional_pubs:
if additional_pubs[pub]['type'] in ['journal-article', 'proceedings-article']:
self.publications[pub] = JournalArticle()
elif additional_pubs[pub]['type'] in ['book', 'monograph']:
self.publications[pub] = Book()
elif additional_pubs[pub]['type'] == 'book-chapter':
self.publications[pub] = BookChapter()
else:
print('skipping unknown type', additional_pubs[pub]['type'])
continue
self.publications[pub].from_dict(additional_pubs[pub])
def get_patents(self):
results = pypatent.Search(self.lastname).as_list()
self.patent_data = []
for r in results:
for i in r['inventors']:
fn = i[0].split(' ')[0].lower()
ln = i[1].lower()
if fn == self.firstname.lower() and ln == self.lastname.lower():
self.patent_data.append(r)
def from_json(self, filename):
with open(filename, 'r') as f:
serialized = json.load(f)
for k in serialized.keys():
if hasattr(self, k):
print('ingesting', k)
if k == 'publications':
self.publications = {}
for pub in serialized[k]:
if serialized[k][pub]['type'] in ['journal-article', 'proceedings-article']:
self.publications[pub] = JournalArticle()
elif serialized[k][pub]['type'] in ['book', 'monograph']:
self.publications[pub] = Book()
elif serialized[k][pub]['type'] == 'book-chapter':
self.publications[pub] = BookChapter()
else:
print('skipping unknown type', serialized[k][pub]['type'])
continue
self.publications[pub].from_dict(serialized[k][pub])
else:
setattr(self, k, serialized[k])
def serialize(self):
self.serialized = {}
self_dict = self.__dict__.copy()
if 'gscholar_data' in self_dict:
self.serialized['gscholar_data'] = {
'hindex': self_dict['gscholar_data']['hindex']}
self.serialized['publications'] = {}
for k, pubinfo_orig in self.publications.items():
pubinfo = pubinfo_orig.to_json()
if len(pubinfo) == 0:
print('skipping', k)
continue
else:
print('keeping', k)
# fields_to_drop = []
# for kk, subfields in pubinfo.items():
# try:
# _ = json.dumps(subfields)
# except:
# fields_to_drop.append(kk)
# for f in fields_to_drop:
# del pubinfo[f]
self.serialized['publications'][k] = pubinfo # .to_json()
def to_json(self, filename):
if self.serialized is None:
self.serialize()
with open(filename, 'w') as f:
json.dump(self.serialized, f, cls=CustomJSONEncoder,
indent=4)
|
[
"[email protected]"
] | |
5a0afac41b222f0e907588c7a0cd449a2394cad6
|
5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d
|
/alipay/aop/api/domain/AntMerchantExpandMerchantTypeQueryModel.py
|
73349409d748e7a00a24a09e560029204227e6f5
|
[
"Apache-2.0"
] |
permissive
|
alipay/alipay-sdk-python-all
|
8bd20882852ffeb70a6e929038bf88ff1d1eff1c
|
1fad300587c9e7e099747305ba9077d4cd7afde9
|
refs/heads/master
| 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 |
Apache-2.0
| 2023-04-25T04:54:02 | 2018-05-14T09:40:54 |
Python
|
UTF-8
|
Python
| false | false | 890 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AntMerchantExpandMerchantTypeQueryModel(object):
def __init__(self):
self._user_id = None
@property
def user_id(self):
return self._user_id
@user_id.setter
def user_id(self, value):
self._user_id = value
def to_alipay_dict(self):
params = dict()
if self.user_id:
if hasattr(self.user_id, 'to_alipay_dict'):
params['user_id'] = self.user_id.to_alipay_dict()
else:
params['user_id'] = self.user_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AntMerchantExpandMerchantTypeQueryModel()
if 'user_id' in d:
o.user_id = d['user_id']
return o
|
[
"[email protected]"
] | |
867a322b8acc7b3c225d39448b01c645175ae915
|
e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f
|
/indices/nnemblazon.py
|
096d32b63a5bf9f6295011672fc96fef1faa3ca7
|
[] |
no_license
|
psdh/WhatsintheVector
|
e8aabacc054a88b4cb25303548980af9a10c12a8
|
a24168d068d9c69dc7a0fd13f606c080ae82e2a6
|
refs/heads/master
| 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 |
Python
|
UTF-8
|
Python
| false | false | 333 |
py
|
ii = [('KembFFF.py', 1), ('ShawHDE.py', 6), ('WilkJMC3.py', 1), ('CarlTFR.py', 1), ('RoscTTI2.py', 1), ('IrviWVD.py', 1), ('DibdTRL2.py', 2), ('AinsWRR.py', 2), ('LandWPA2.py', 1), ('HowiWRL2.py', 2), ('RogeSIP.py', 1), ('DibdTRL.py', 1), ('BeckWRE.py', 1), ('DibdTBR.py', 2), ('KeigTSS.py', 1), ('ClarGE4.py', 2), ('AdamJOA.py', 1)]
|
[
"[email protected]"
] | |
6ac230540f541b35846d2ee4981281291f47efc3
|
987697512ce9b8d7c29bfd2f18d5aec0261a6863
|
/二叉树的层次遍历II.py
|
b6a34e636804c1c42d8861937870a5d1564c2981
|
[] |
no_license
|
Luckyaxah/leetcode-python
|
65e7ff59d6f19312defdc4d4b4103c39193b198a
|
2b9c78ba88e7bf74a46a287fb1914b4d6ba9af38
|
refs/heads/master
| 2023-06-05T12:15:31.618879 | 2021-06-22T13:05:30 | 2021-06-22T13:05:30 | 262,287,940 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 621 |
py
|
from 二叉树类 import TreeNode
class Solution:
def levelOrderBottom(self, root: TreeNode) :
if not root:
return [[]]
ret = []
def fun(root,level):
if not root:
return
if len(ret)<level+1:
ret.append([root.val])
else:
ret[level].append(root.val)
fun(root.left,level+1)
fun(root.right,level+1)
fun(root,0)
return list(reversed(ret))
if __name__ == "__main__":
a = Solution()
t = TreeNode([3,9,20,None,None,15,7])
print(a.levelOrderBottom(t))
|
[
"[email protected]"
] | |
e184a01c1505720f9c09cf93c097dc09449403fa
|
f707303e4dfe383cf82c23a6bb42ccfdc4cfdb67
|
/pandas-ml-utils/pandas_ml_utils_test/ml/model/test_skmodel_accuracy.py
|
14b0da71b9536c9f63f59bff4a5256abf2c77f00
|
[
"MIT"
] |
permissive
|
jcoffi/pandas-ml-quant
|
1830ec256f8c09c04f1aa77e2eecfba07d34fe68
|
650a8e8f77bc4d71136518d1c7ee65c194a99cf0
|
refs/heads/master
| 2023-08-31T06:45:38.060737 | 2021-09-09T04:44:35 | 2021-09-09T04:44:35 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,073 |
py
|
from pandas_ml_utils import FittingParameter
from pandas_ml_utils_test.ml.model.test_model_accuracy import TestModelAccuracy
class TestSkModelAccuracy(TestModelAccuracy):
def provide_linear_regression_model(self):
from sklearn.linear_model import LinearRegression
from sklearn.neural_network import MLPRegressor
from pandas_ml_utils import FeaturesAndLabels, SkModel
return [
(
SkModel(LinearRegression(), FeaturesAndLabels(["x"], ["y"])),
FittingParameter(epochs=1, fold_epochs=1, context="LinearRegression")
),
(
SkModel(
MLPRegressor(10, learning_rate_init=0.01, max_iter=9000, validation_fraction=0),
FeaturesAndLabels(["x"], ["y"])
),
FittingParameter(epochs=1, fold_epochs=1, context="MLPRegressor")
),
(
SkModel(
MLPRegressor(10, learning_rate_init=0.01, max_iter=1, validation_fraction=0, warm_start=True),
FeaturesAndLabels(["x"], ["y"])
),
FittingParameter(epochs=9000, fold_epochs=1, context="MLPRegressor partial fit")
)
]
def provide_non_linear_regression_model(self):
from sklearn.neural_network import MLPRegressor
from pandas_ml_utils import FeaturesAndLabels, SkModel
return [
(
SkModel(
MLPRegressor(200, learning_rate_init=0.001, max_iter=5000, validation_fraction=0),
FeaturesAndLabels(["x"], ["y"])
),
FittingParameter(epochs=1, context="epoch 1 fit"),
),
(
SkModel(
MLPRegressor(200, learning_rate_init=0.001, max_iter=1, validation_fraction=0, warm_start=True),
FeaturesAndLabels(["x"], ["y"])
),
FittingParameter(epochs=5000, context="partial fit"),
)
]
|
[
"[email protected]"
] | |
7aa7cb3ed8f101f228aa9076e18398a6f5937a6e
|
b1ddd313527e84ace13729c7f0ad6953f254d0f1
|
/tester.py
|
d2cb0273fb26c0047f5e0d3b91d0945a072b4bd6
|
[] |
no_license
|
sander76/weasy-server
|
bc4e4d98aedab52037e831fed55993d1be27db8c
|
f8196d382ca7abba4156d6f62a0371b9b2ad05f2
|
refs/heads/master
| 2020-06-28T22:57:23.783768 | 2016-12-02T10:05:47 | 2016-12-02T10:05:47 | 74,461,488 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 427 |
py
|
from weasyprint import HTML
import logging
logging.basicConfig(level=logging.DEBUG)
URL = "http://tools.hde.nl/menc/site/guides/Pliss%C3%A9%20%26%20Duette%C2%AE%20Bottom-Up%20programming/"
OUT = "/home/admin-s/test.pdf"
for lp in range(0, 300):
try:
HTML(URL).write_pdf(OUT)
except OSError as e:
logging.exception("**************** ERROR AT ATTEMPT: {} *******************".format(lp))
break
|
[
"[email protected]"
] | |
654338f41e243b91073b88d0c938205d5cac1808
|
62dc63713e8c8ce8622c70117595bae857618107
|
/FLIR Examples/ImageFormatControl.py
|
551f27b29696422be574e0c548082e3f7833eb39
|
[] |
no_license
|
LiamDroog/BaldrControlSuite
|
ad7544d5e92b5e27537e7f20c7cf3ddc78b36769
|
2ca76c4c97c334b6bd5924b00cbcb8e6f687f495
|
refs/heads/master
| 2023-07-04T07:57:46.115829 | 2021-08-03T17:36:49 | 2021-08-03T17:36:49 | 371,804,134 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 19,841 |
py
|
# coding=utf-8
# =============================================================================
# Copyright (c) 2001-2021 FLIR Systems, Inc. All Rights Reserved.
#
# This software is the confidential and proprietary information of FLIR
# Integrated Imaging Solutions, Inc. ("Confidential Information"). You
# shall not disclose such Confidential Information and shall use it only in
# accordance with the terms of the license agreement you entered into
# with FLIR Integrated Imaging Solutions, Inc. (FLIR).
#
# FLIR MAKES NO REPRESENTATIONS OR WARRANTIES ABOUT THE SUITABILITY OF THE
# SOFTWARE, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE, OR NON-INFRINGEMENT. FLIR SHALL NOT BE LIABLE FOR ANY DAMAGES
# SUFFERED BY LICENSEE AS A RESULT OF USING, MODIFYING OR DISTRIBUTING
# THIS SOFTWARE OR ITS DERIVATIVES.
# =============================================================================
#
# ImageFormatControl.py shows how to apply custom image settings to
# the camera. It relies on information provided in the Enumeration,
# Acquisition, and NodeMapInfo examples.
#
# This example demonstrates setting minimums to offsets, X and Y, and maximums
# to width and height. It also shows the setting of a new pixel format, which
# is an enumeration type node.
#
# Following this, we suggest familiarizing yourself with the Exposure example
# if you haven't already. Exposure is another example on camera customization
# that is shorter and simpler than many of the others. Once comfortable with
# Exposure and ImageFormatControl, we suggest checking out any of the longer,
# more complicated examples related to camera configuration: ChunkData,
# LookupTable, Sequencer, or Trigger.
import os
import PySpin
import sys
NUM_IMAGES = 10 # number of images to grab
def configure_custom_image_settings(nodemap):
"""
Configures a number of settings on the camera including offsets X and Y, width,
height, and pixel format. These settings must be applied before BeginAcquisition()
is called; otherwise, they will be read only. Also, it is important to note that
settings are applied immediately. This means if you plan to reduce the width and
move the x offset accordingly, you need to apply such changes in the appropriate order.
:param nodemap: GenICam nodemap.
:type nodemap: INodeMap
:return: True if successful, False otherwise.
:rtype: bool
"""
print('\n*** CONFIGURING CUSTOM IMAGE SETTINGS *** \n')
try:
result = True
# Apply mono 8 pixel format
#
# *** NOTES ***
# Enumeration nodes are slightly more complicated to set than other
# nodes. This is because setting an enumeration node requires working
# with two nodes instead of the usual one.
#
# As such, there are a number of steps to setting an enumeration node:
# retrieve the enumeration node from the nodemap, retrieve the desired
# entry node from the enumeration node, retrieve the integer value from
# the entry node, and set the new value of the enumeration node with
# the integer value from the entry node.
#
# Retrieve the enumeration node from the nodemap
node_pixel_format = PySpin.CEnumerationPtr(nodemap.GetNode('PixelFormat'))
if PySpin.IsAvailable(node_pixel_format) and PySpin.IsWritable(node_pixel_format):
# Retrieve the desired entry node from the enumeration node
node_pixel_format_mono8 = PySpin.CEnumEntryPtr(node_pixel_format.GetEntryByName('Mono8'))
if PySpin.IsAvailable(node_pixel_format_mono8) and PySpin.IsReadable(node_pixel_format_mono8):
# Retrieve the integer value from the entry node
pixel_format_mono8 = node_pixel_format_mono8.GetValue()
# Set integer as new value for enumeration node
node_pixel_format.SetIntValue(pixel_format_mono8)
print('Pixel format set to %s...' % node_pixel_format.GetCurrentEntry().GetSymbolic())
else:
print('Pixel format mono 8 not available...')
else:
print('Pixel format not available...')
# Apply minimum to offset X
#
# *** NOTES ***
# Numeric nodes have both a minimum and maximum. A minimum is retrieved
# with the method GetMin(). Sometimes it can be important to check
# minimums to ensure that your desired value is within range.
node_offset_x = PySpin.CIntegerPtr(nodemap.GetNode('OffsetX'))
if PySpin.IsAvailable(node_offset_x) and PySpin.IsWritable(node_offset_x):
node_offset_x.SetValue(node_offset_x.GetMin())
print('Offset X set to %i...' % node_offset_x.GetMin())
else:
print('Offset X not available...')
# Apply minimum to offset Y
#
# *** NOTES ***
# It is often desirable to check the increment as well. The increment
# is a number of which a desired value must be a multiple of. Certain
# nodes, such as those corresponding to offsets X and Y, have an
# increment of 1, which basically means that any value within range
# is appropriate. The increment is retrieved with the method GetInc().
node_offset_y = PySpin.CIntegerPtr(nodemap.GetNode('OffsetY'))
if PySpin.IsAvailable(node_offset_y) and PySpin.IsWritable(node_offset_y):
node_offset_y.SetValue(node_offset_y.GetMin())
print('Offset Y set to %i...' % node_offset_y.GetMin())
else:
print('Offset Y not available...')
# Set maximum width
#
# *** NOTES ***
# Other nodes, such as those corresponding to image width and height,
# might have an increment other than 1. In these cases, it can be
# important to check that the desired value is a multiple of the
# increment. However, as these values are being set to the maximum,
# there is no reason to check against the increment.
node_width = PySpin.CIntegerPtr(nodemap.GetNode('Width'))
if PySpin.IsAvailable(node_width) and PySpin.IsWritable(node_width):
width_to_set = node_width.GetMax()
node_width.SetValue(width_to_set)
print('Width set to %i...' % node_width.GetValue())
else:
print('Width not available...')
# Set maximum height
#
# *** NOTES ***
# A maximum is retrieved with the method GetMax(). A node's minimum and
# maximum should always be a multiple of its increment.
node_height = PySpin.CIntegerPtr(nodemap.GetNode('Height'))
if PySpin.IsAvailable(node_height) and PySpin.IsWritable(node_height):
height_to_set = node_height.GetMax()
node_height.SetValue(height_to_set)
print('Height set to %i...' % node_height.GetValue())
else:
print('Height not available...')
except PySpin.SpinnakerException as ex:
print('Error: %s' % ex)
return False
return result
def print_device_info(nodemap):
"""
This function prints the device information of the camera from the transport
layer; please see NodeMapInfo example for more in-depth comments on printing
device information from the nodemap.
:param nodemap: Transport layer device nodemap.
:type nodemap: INodeMap
:returns: True if successful, False otherwise.
:rtype: bool
"""
print('*** DEVICE INFORMATION ***\n')
try:
result = True
node_device_information = PySpin.CCategoryPtr(nodemap.GetNode('DeviceInformation'))
if PySpin.IsAvailable(node_device_information) and PySpin.IsReadable(node_device_information):
features = node_device_information.GetFeatures()
for feature in features:
node_feature = PySpin.CValuePtr(feature)
print('%s: %s' % (node_feature.GetName(),
node_feature.ToString() if PySpin.IsReadable(node_feature) else 'Node not readable'))
else:
print('Device control information not available.')
except PySpin.SpinnakerException as ex:
print('Error: %s' % ex)
return False
return result
def acquire_images(cam, nodemap, nodemap_tldevice):
"""
This function acquires and saves 10 images from a device.
:param cam: Camera to acquire images from.
:param nodemap: Device nodemap.
:param nodemap_tldevice: Transport layer device nodemap.
:type cam: CameraPtr
:type nodemap: INodeMap
:type nodemap_tldevice: INodeMap
:return: True if successful, False otherwise.
:rtype: bool
"""
print('*** IMAGE ACQUISITION ***\n')
try:
result = True
# Set acquisition mode to continuous
#
# *** NOTES ***
# Because the example acquires and saves 10 images, setting acquisition
# mode to continuous lets the example finish. If set to single frame
# or multiframe (at a lower number of images), the example would just
# hang. This would happen because the example has been written to
# acquire 10 images while the camera would have been programmed to
# retrieve less than that.
#
# Setting the value of an enumeration node is slightly more complicated
# than other node types. Two nodes must be retrieved: first, the
# enumeration node is retrieved from the nodemap; and second, the entry
# node is retrieved from the enumeration node. The integer value of the
# entry node is then set as the new value of the enumeration node.
#
# Notice that both the enumeration and the entry nodes are checked for
# availability and readability/writability. Enumeration nodes are
# generally readable and writable whereas their entry nodes are only
# ever readable.
#
# Retrieve enumeration node from nodemap
# In order to access the node entries, they have to be casted to a pointer type (CEnumerationPtr here)
node_acquisition_mode = PySpin.CEnumerationPtr(nodemap.GetNode('AcquisitionMode'))
if not PySpin.IsAvailable(node_acquisition_mode) or not PySpin.IsWritable(node_acquisition_mode):
print('Unable to set acquisition mode to continuous (enum retrieval). Aborting...')
return False
# Retrieve entry node from enumeration node
node_acquisition_mode_continuous = node_acquisition_mode.GetEntryByName('Continuous')
if not PySpin.IsAvailable(node_acquisition_mode_continuous) or not PySpin.IsReadable(
node_acquisition_mode_continuous):
print('Unable to set acquisition mode to continuous (entry retrieval). Aborting...')
return False
# Retrieve integer value from entry node
acquisition_mode_continuous = node_acquisition_mode_continuous.GetValue()
# Set integer value from entry node as new value of enumeration node
node_acquisition_mode.SetIntValue(acquisition_mode_continuous)
print('Acquisition mode set to continuous...')
# Begin acquiring images
#
# *** NOTES ***
# What happens when the camera begins acquiring images depends on the
# acquisition mode. Single frame captures only a single image, multi
# frame catures a set number of images, and continuous captures a
# continuous stream of images. Because the example calls for the
# retrieval of 10 images, continuous mode has been set.
#
# *** LATER ***
# Image acquisition must be ended when no more images are needed.
cam.BeginAcquisition()
print('Acquiring images...')
# Retrieve device serial number for filename
#
# *** NOTES ***
# The device serial number is retrieved in order to keep cameras from
# overwriting one another. Grabbing image IDs could also accomplish
# this.
device_serial_number = ''
node_device_serial_number = PySpin.CStringPtr(nodemap_tldevice.GetNode('DeviceSerialNumber'))
if PySpin.IsAvailable(node_device_serial_number) and PySpin.IsReadable(node_device_serial_number):
device_serial_number = node_device_serial_number.GetValue()
print('Device serial number retrieved as %s...' % device_serial_number)
# Retrieve, convert, and save images
for i in range(NUM_IMAGES):
try:
# Retrieve next received image
#
# *** NOTES ***
# Capturing an image houses images on the camera buffer. Trying
# to capture an image that does not exist will hang the camera.
#
# *** LATER ***
# Once an image from the buffer is saved and/or no longer
# needed, the image must be released in order to keep the
# buffer from filling up.
image_result = cam.GetNextImage(1000)
# Ensure image completion
#
# *** NOTES ***
# Images can easily be checked for completion. This should be
# done whenever a complete image is expected or required.
# Further, check image status for a little more insight into
# why an image is incomplete.
if image_result.IsIncomplete():
print('Image incomplete with image status %d ...' % image_result.GetImageStatus())
else:
# Print image information; height and width recorded in pixels
#
# *** NOTES ***
# Images have quite a bit of available metadata including
# things such as CRC, image status, and offset values, to
# name a few.
width = image_result.GetWidth()
height = image_result.GetHeight()
print('Grabbed Image %d, width = %d, height = %d' % (i, width, height))
# Convert image to mono 8
#
# *** NOTES ***
# Images can be converted between pixel formats by using
# the appropriate enumeration value. Unlike the original
# image, the converted one does not need to be released as
# it does not affect the camera buffer.
#
# When converting images, color processing algorithm is an
# optional parameter.
image_converted = image_result.Convert(PySpin.PixelFormat_Mono8, PySpin.HQ_LINEAR)
# Create a unique filename
if device_serial_number:
filename = 'ImageFormatControl-%s-%d.jpg' % (device_serial_number, i)
else: # if serial number is empty
filename = 'ImageFormatControl-%d.jpg' % i
# Save image
#
# *** NOTES ***
# The standard practice of the examples is to use device
# serial numbers to keep images of one device from
# overwriting those of another.
image_converted.Save(filename)
print('Image saved at %s' % filename)
# Release image
#
# *** NOTES ***
# Images retrieved directly from the camera (i.e. non-converted
# images) need to be released in order to keep from filling the
# buffer.
image_result.Release()
print('')
except PySpin.SpinnakerException as ex:
print('Error: %s' % ex)
return False
# End acquisition
#
# *** NOTES ***
# Ending acquisition appropriately helps ensure that devices clean up
# properly and do not need to be power-cycled to maintain integrity.
cam.EndAcquisition()
except PySpin.SpinnakerException as ex:
print('Error: %s' % ex)
return False
return result
def run_single_camera(cam):
"""
This function acts as the body of the example; please see NodeMapInfo example
for more in-depth comments on setting up cameras.
:param cam: Camera to run on.
:type cam: CameraPtr
:return: True if successful, False otherwise.
:rtype: bool
"""
try:
result = True
# Retrieve TL device nodemap and print device information
nodemap_tldevice = cam.GetTLDeviceNodeMap()
result &= print_device_info(nodemap_tldevice)
# Initialize camera
cam.Init()
# Retrieve GenICam nodemap
nodemap = cam.GetNodeMap()
# Configure custom image settings
if not configure_custom_image_settings(nodemap):
return False
# Acquire images
result &= acquire_images(cam, nodemap, nodemap_tldevice)
# Deinitialize camera
cam.DeInit()
except PySpin.SpinnakerException as ex:
print('Error: %s' % ex)
result = False
return result
def main():
"""
Example entry point; please see Enumeration example for more in-depth
comments on preparing and cleaning up the system.
:return: True if successful, False otherwise.
:rtype: bool
"""
# Since this application saves images in the current folder
# we must ensure that we have permission to write to this folder.
# If we do not have permission, fail right away.
try:
test_file = open('test.txt', 'w+')
except IOError:
print('Unable to write to current directory. Please check permissions.')
input('Press Enter to exit...')
return False
test_file.close()
os.remove(test_file.name)
result = True
# Retrieve singleton reference to system object
system = PySpin.System.GetInstance()
# Get current library version
version = system.GetLibraryVersion()
print('Library version: %d.%d.%d.%d' % (version.major, version.minor, version.type, version.build))
# Retrieve list of cameras from the system
cam_list = system.GetCameras()
num_cameras = cam_list.GetSize()
print('Number of cameras detected: %d' % num_cameras)
# Finish if there are no cameras
if num_cameras == 0:
# Clear camera list before releasing system
cam_list.Clear()
# Release system instance
system.ReleaseInstance()
print('Not enough cameras!')
input('Done! Press Enter to exit...')
return False
# Run example on each camera
for i, cam in enumerate(cam_list):
print('Running example for camera %d...' % i)
result &= run_single_camera(cam)
print('Camera %d example complete... \n' % i)
# Release reference to camera
# NOTE: Unlike the C++ examples, we cannot rely on pointer objects being automatically
# cleaned up when going out of scope.
# The usage of del is preferred to assigning the variable to None.
del cam
# Clear camera list before releasing system
cam_list.Clear()
# Release system instance
system.ReleaseInstance()
input('Done! Press Enter to exit...')
return result
if __name__ == '__main__':
if main():
sys.exit(0)
else:
sys.exit(1)
|
[
"[email protected]"
] | |
4ad300b3af57d974576a31de15ca71fa38cfe7c8
|
8d35b8aa63f3cae4e885e3c081f41235d2a8f61f
|
/discord/ext/dl/extractor/cnbc.py
|
90c89123e5bbed7130c3977526254dd0beb26afc
|
[
"MIT"
] |
permissive
|
alexyy802/Texus
|
1255f4e54c8d3cc067f0d30daff1cf24932ea0c9
|
c282a836f43dfd588d89d5c13f432896aebb540f
|
refs/heads/master
| 2023-09-05T06:14:36.217601 | 2021-11-21T03:39:55 | 2021-11-21T03:39:55 | 429,390,575 | 0 | 0 |
MIT
| 2021-11-19T09:22:22 | 2021-11-18T10:43:11 |
Python
|
UTF-8
|
Python
| false | false | 2,370 |
py
|
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import smuggle_url
class CNBCIE(InfoExtractor):
_VALID_URL = r"https?://video\.cnbc\.com/gallery/\?video=(?P<id>[0-9]+)"
_TEST = {
"url": "http://video.cnbc.com/gallery/?video=3000503714",
"info_dict": {
"id": "3000503714",
"ext": "mp4",
"title": "Fighting zombies is big business",
"description": "md5:0c100d8e1a7947bd2feec9a5550e519e",
"timestamp": 1459332000,
"upload_date": "20160330",
"uploader": "NBCU-CNBC",
},
"params": {
# m3u8 download
"skip_download": True,
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
return {
"_type": "url_transparent",
"ie_key": "ThePlatform",
"url": smuggle_url(
"http://link.theplatform.com/s/gZWlPC/media/guid/2408950221/%s?mbr=true&manifest=m3u"
% video_id,
{"force_smil_url": True},
),
"id": video_id,
}
class CNBCVideoIE(InfoExtractor):
_VALID_URL = r"https?://(?:www\.)?cnbc\.com(?P<path>/video/(?:[^/]+/)+(?P<id>[^./?#&]+)\.html)"
_TEST = {
"url": "https://www.cnbc.com/video/2018/07/19/trump-i-dont-necessarily-agree-with-raising-rates.html",
"info_dict": {
"id": "7000031301",
"ext": "mp4",
"title": "Trump: I don't necessarily agree with raising rates",
"description": "md5:878d8f0b4ebb5bb1dda3514b91b49de3",
"timestamp": 1531958400,
"upload_date": "20180719",
"uploader": "NBCU-CNBC",
},
"params": {
"skip_download": True,
},
}
def _real_extract(self, url):
path, display_id = re.match(self._VALID_URL, url).groups()
video_id = self._download_json(
"https://webql-redesign.cnbcfm.com/graphql",
display_id,
query={
"query": """{
page(path: "%s") {
vcpsId
}
}"""
% path,
},
)["data"]["page"]["vcpsId"]
return self.url_result(
"http://video.cnbc.com/gallery/?video=%d" % video_id, CNBCIE.ie_key()
)
|
[
"[email protected]"
] | |
48d3fbbb3b724ba6e8ceb08c947796e935fdcda1
|
49812e663d0033700af72c4f451581d1f05791ef
|
/scons/scons-local-3.0.1/SCons/Util.py
|
c9aa2b53d16f91ee0281c4035c030d279b1f3e01
|
[
"Apache-2.0",
"MIT",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
faustus123/JANA
|
1d9f6af0976b45eb9ceade584f3c0faeacf19b2c
|
38ca14e79deeb4c13042c60d948356ab8e98cf0c
|
refs/heads/master
| 2023-01-09T04:18:06.795419 | 2020-11-14T16:00:10 | 2020-11-14T16:00:10 | 103,759,870 | 0 | 2 |
Apache-2.0
| 2020-11-14T15:57:50 | 2017-09-16T14:50:58 |
Python
|
UTF-8
|
Python
| false | false | 53,237 |
py
|
"""SCons.Util
Various utility functions go here.
"""
#
# Copyright (c) 2001 - 2017 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Util.py 74b2c53bc42290e911b334a6b44f187da698a668 2017/11/14 13:16:53 bdbaddog"
import os
import sys
import copy
import re
import types
import codecs
import pprint
PY3 = sys.version_info[0] == 3
try:
from UserDict import UserDict
except ImportError as e:
from collections import UserDict
try:
from UserList import UserList
except ImportError as e:
from collections import UserList
from collections import Iterable
try:
from UserString import UserString
except ImportError as e:
from collections import UserString
# Don't "from types import ..." these because we need to get at the
# types module later to look for UnicodeType.
# Below not used?
# InstanceType = types.InstanceType
MethodType = types.MethodType
FunctionType = types.FunctionType
try:
unicode
except NameError:
UnicodeType = str
else:
UnicodeType = unicode
def dictify(keys, values, result={}):
for k, v in zip(keys, values):
result[k] = v
return result
_altsep = os.altsep
if _altsep is None and sys.platform == 'win32':
# My ActivePython 2.0.1 doesn't set os.altsep! What gives?
_altsep = '/'
if _altsep:
def rightmost_separator(path, sep):
return max(path.rfind(sep), path.rfind(_altsep))
else:
def rightmost_separator(path, sep):
return path.rfind(sep)
# First two from the Python Cookbook, just for completeness.
# (Yeah, yeah, YAGNI...)
def containsAny(str, set):
"""Check whether sequence str contains ANY of the items in set."""
for c in set:
if c in str: return 1
return 0
def containsAll(str, set):
"""Check whether sequence str contains ALL of the items in set."""
for c in set:
if c not in str: return 0
return 1
def containsOnly(str, set):
"""Check whether sequence str contains ONLY items in set."""
for c in str:
if c not in set: return 0
return 1
def splitext(path):
"Same as os.path.splitext() but faster."
sep = rightmost_separator(path, os.sep)
dot = path.rfind('.')
# An ext is only real if it has at least one non-digit char
if dot > sep and not containsOnly(path[dot:], "0123456789."):
return path[:dot],path[dot:]
else:
return path,""
def updrive(path):
"""
Make the drive letter (if any) upper case.
This is useful because Windows is inconsistent on the case
of the drive letter, which can cause inconsistencies when
calculating command signatures.
"""
drive, rest = os.path.splitdrive(path)
if drive:
path = drive.upper() + rest
return path
class NodeList(UserList):
"""This class is almost exactly like a regular list of Nodes
(actually it can hold any object), with one important difference.
If you try to get an attribute from this list, it will return that
attribute from every item in the list. For example:
>>> someList = NodeList([ ' foo ', ' bar ' ])
>>> someList.strip()
[ 'foo', 'bar' ]
"""
# def __init__(self, initlist=None):
# self.data = []
# # print("TYPE:%s"%type(initlist))
# if initlist is not None:
# # XXX should this accept an arbitrary sequence?
# if type(initlist) == type(self.data):
# self.data[:] = initlist
# elif isinstance(initlist, (UserList, NodeList)):
# self.data[:] = initlist.data[:]
# elif isinstance(initlist, Iterable):
# self.data = list(initlist)
# else:
# self.data = [ initlist,]
def __nonzero__(self):
return len(self.data) != 0
def __bool__(self):
return self.__nonzero__()
def __str__(self):
return ' '.join(map(str, self.data))
def __iter__(self):
return iter(self.data)
def __call__(self, *args, **kwargs):
result = [x(*args, **kwargs) for x in self.data]
return self.__class__(result)
def __getattr__(self, name):
result = [getattr(x, name) for x in self.data]
return self.__class__(result)
def __getitem__(self, index):
"""
This comes for free on py2,
but py3 slices of NodeList are returning a list
breaking slicing nodelist and refering to
properties and methods on contained object
"""
# return self.__class__(self.data[index])
if isinstance(index, slice):
# Expand the slice object using range()
# limited by number of items in self.data
indices = index.indices(len(self.data))
return self.__class__([self[x] for x in
range(*indices)])
else:
# Return one item of the tart
return self.data[index]
_get_env_var = re.compile(r'^\$([_a-zA-Z]\w*|{[_a-zA-Z]\w*})$')
def get_environment_var(varstr):
"""Given a string, first determine if it looks like a reference
to a single environment variable, like "$FOO" or "${FOO}".
If so, return that variable with no decorations ("FOO").
If not, return None."""
mo=_get_env_var.match(to_String(varstr))
if mo:
var = mo.group(1)
if var[0] == '{':
return var[1:-1]
else:
return var
else:
return None
class DisplayEngine(object):
print_it = True
def __call__(self, text, append_newline=1):
if not self.print_it:
return
if append_newline: text = text + '\n'
try:
sys.stdout.write(UnicodeType(text))
except IOError:
# Stdout might be connected to a pipe that has been closed
# by now. The most likely reason for the pipe being closed
# is that the user has press ctrl-c. It this is the case,
# then SCons is currently shutdown. We therefore ignore
# IOError's here so that SCons can continue and shutdown
# properly so that the .sconsign is correctly written
# before SCons exits.
pass
def set_mode(self, mode):
self.print_it = mode
def render_tree(root, child_func, prune=0, margin=[0], visited=None):
"""
Render a tree of nodes into an ASCII tree view.
:Parameters:
- `root`: the root node of the tree
- `child_func`: the function called to get the children of a node
- `prune`: don't visit the same node twice
- `margin`: the format of the left margin to use for children of root. 1 results in a pipe, and 0 results in no pipe.
- `visited`: a dictionary of visited nodes in the current branch if not prune, or in the whole tree if prune.
"""
rname = str(root)
# Initialize 'visited' dict, if required
if visited is None:
visited = {}
children = child_func(root)
retval = ""
for pipe in margin[:-1]:
if pipe:
retval = retval + "| "
else:
retval = retval + " "
if rname in visited:
return retval + "+-[" + rname + "]\n"
retval = retval + "+-" + rname + "\n"
if not prune:
visited = copy.copy(visited)
visited[rname] = 1
for i in range(len(children)):
margin.append(i < len(children)-1)
retval = retval + render_tree(children[i], child_func, prune, margin, visited)
margin.pop()
return retval
IDX = lambda N: N and 1 or 0
def print_tree(root, child_func, prune=0, showtags=0, margin=[0], visited=None):
"""
Print a tree of nodes. This is like render_tree, except it prints
lines directly instead of creating a string representation in memory,
so that huge trees can be printed.
:Parameters:
- `root` - the root node of the tree
- `child_func` - the function called to get the children of a node
- `prune` - don't visit the same node twice
- `showtags` - print status information to the left of each node line
- `margin` - the format of the left margin to use for children of root. 1 results in a pipe, and 0 results in no pipe.
- `visited` - a dictionary of visited nodes in the current branch if not prune, or in the whole tree if prune.
"""
rname = str(root)
# Initialize 'visited' dict, if required
if visited is None:
visited = {}
if showtags:
if showtags == 2:
legend = (' E = exists\n' +
' R = exists in repository only\n' +
' b = implicit builder\n' +
' B = explicit builder\n' +
' S = side effect\n' +
' P = precious\n' +
' A = always build\n' +
' C = current\n' +
' N = no clean\n' +
' H = no cache\n' +
'\n')
sys.stdout.write(legend)
tags = ['[']
tags.append(' E'[IDX(root.exists())])
tags.append(' R'[IDX(root.rexists() and not root.exists())])
tags.append(' BbB'[[0,1][IDX(root.has_explicit_builder())] +
[0,2][IDX(root.has_builder())]])
tags.append(' S'[IDX(root.side_effect)])
tags.append(' P'[IDX(root.precious)])
tags.append(' A'[IDX(root.always_build)])
tags.append(' C'[IDX(root.is_up_to_date())])
tags.append(' N'[IDX(root.noclean)])
tags.append(' H'[IDX(root.nocache)])
tags.append(']')
else:
tags = []
def MMM(m):
return [" ","| "][m]
margins = list(map(MMM, margin[:-1]))
children = child_func(root)
if prune and rname in visited and children:
sys.stdout.write(''.join(tags + margins + ['+-[', rname, ']']) + '\n')
return
sys.stdout.write(''.join(tags + margins + ['+-', rname]) + '\n')
visited[rname] = 1
if children:
margin.append(1)
idx = IDX(showtags)
for C in children[:-1]:
print_tree(C, child_func, prune, idx, margin, visited)
margin[-1] = 0
print_tree(children[-1], child_func, prune, idx, margin, visited)
margin.pop()
# Functions for deciding if things are like various types, mainly to
# handle UserDict, UserList and UserString like their underlying types.
#
# Yes, all of this manual testing breaks polymorphism, and the real
# Pythonic way to do all of this would be to just try it and handle the
# exception, but handling the exception when it's not the right type is
# often too slow.
# We are using the following trick to speed up these
# functions. Default arguments are used to take a snapshot of
# the global functions and constants used by these functions. This
# transforms accesses to global variable into local variables
# accesses (i.e. LOAD_FAST instead of LOAD_GLOBAL).
DictTypes = (dict, UserDict)
ListTypes = (list, UserList)
SequenceTypes = (list, tuple, UserList)
# Note that profiling data shows a speed-up when comparing
# explicitly with str and unicode instead of simply comparing
# with basestring. (at least on Python 2.5.1)
try:
StringTypes = (str, unicode, UserString)
except NameError:
StringTypes = (str, UserString)
# Empirically, it is faster to check explicitly for str and
# unicode than for basestring.
try:
BaseStringTypes = (str, unicode)
except NameError:
BaseStringTypes = (str)
def is_Dict(obj, isinstance=isinstance, DictTypes=DictTypes):
return isinstance(obj, DictTypes)
def is_List(obj, isinstance=isinstance, ListTypes=ListTypes):
return isinstance(obj, ListTypes)
def is_Sequence(obj, isinstance=isinstance, SequenceTypes=SequenceTypes):
return isinstance(obj, SequenceTypes)
def is_Tuple(obj, isinstance=isinstance, tuple=tuple):
return isinstance(obj, tuple)
def is_String(obj, isinstance=isinstance, StringTypes=StringTypes):
return isinstance(obj, StringTypes)
def is_Scalar(obj, isinstance=isinstance, StringTypes=StringTypes, SequenceTypes=SequenceTypes):
# Profiling shows that there is an impressive speed-up of 2x
# when explicitly checking for strings instead of just not
# sequence when the argument (i.e. obj) is already a string.
# But, if obj is a not string then it is twice as fast to
# check only for 'not sequence'. The following code therefore
# assumes that the obj argument is a string most of the time.
return isinstance(obj, StringTypes) or not isinstance(obj, SequenceTypes)
def do_flatten(sequence, result, isinstance=isinstance,
StringTypes=StringTypes, SequenceTypes=SequenceTypes):
for item in sequence:
if isinstance(item, StringTypes) or not isinstance(item, SequenceTypes):
result.append(item)
else:
do_flatten(item, result)
def flatten(obj, isinstance=isinstance, StringTypes=StringTypes,
SequenceTypes=SequenceTypes, do_flatten=do_flatten):
"""Flatten a sequence to a non-nested list.
Flatten() converts either a single scalar or a nested sequence
to a non-nested list. Note that flatten() considers strings
to be scalars instead of sequences like Python would.
"""
if isinstance(obj, StringTypes) or not isinstance(obj, SequenceTypes):
return [obj]
result = []
for item in obj:
if isinstance(item, StringTypes) or not isinstance(item, SequenceTypes):
result.append(item)
else:
do_flatten(item, result)
return result
def flatten_sequence(sequence, isinstance=isinstance, StringTypes=StringTypes,
SequenceTypes=SequenceTypes, do_flatten=do_flatten):
"""Flatten a sequence to a non-nested list.
Same as flatten(), but it does not handle the single scalar
case. This is slightly more efficient when one knows that
the sequence to flatten can not be a scalar.
"""
result = []
for item in sequence:
if isinstance(item, StringTypes) or not isinstance(item, SequenceTypes):
result.append(item)
else:
do_flatten(item, result)
return result
# Generic convert-to-string functions that abstract away whether or
# not the Python we're executing has Unicode support. The wrapper
# to_String_for_signature() will use a for_signature() method if the
# specified object has one.
#
def to_String(s,
isinstance=isinstance, str=str,
UserString=UserString, BaseStringTypes=BaseStringTypes):
if isinstance(s,BaseStringTypes):
# Early out when already a string!
return s
elif isinstance(s, UserString):
# s.data can only be either a unicode or a regular
# string. Please see the UserString initializer.
return s.data
else:
return str(s)
def to_String_for_subst(s,
isinstance=isinstance, str=str, to_String=to_String,
BaseStringTypes=BaseStringTypes, SequenceTypes=SequenceTypes,
UserString=UserString):
# Note that the test cases are sorted by order of probability.
if isinstance(s, BaseStringTypes):
return s
elif isinstance(s, SequenceTypes):
return ' '.join([to_String_for_subst(e) for e in s])
elif isinstance(s, UserString):
# s.data can only be either a unicode or a regular
# string. Please see the UserString initializer.
return s.data
else:
return str(s)
def to_String_for_signature(obj, to_String_for_subst=to_String_for_subst,
AttributeError=AttributeError):
try:
f = obj.for_signature
except AttributeError:
if isinstance(obj, dict):
# pprint will output dictionary in key sorted order
# with py3.5 the order was randomized. In general depending on dictionary order
# which was undefined until py3.6 (where it's by insertion order) was not wise.
return pprint.pformat(obj, width=1000000)
else:
return to_String_for_subst(obj)
else:
return f()
# The SCons "semi-deep" copy.
#
# This makes separate copies of lists (including UserList objects)
# dictionaries (including UserDict objects) and tuples, but just copies
# references to anything else it finds.
#
# A special case is any object that has a __semi_deepcopy__() method,
# which we invoke to create the copy. Currently only used by
# BuilderDict to actually prevent the copy operation (as invalid on that object).
#
# The dispatch table approach used here is a direct rip-off from the
# normal Python copy module.
_semi_deepcopy_dispatch = d = {}
def semi_deepcopy_dict(x, exclude = [] ):
copy = {}
for key, val in x.items():
# The regular Python copy.deepcopy() also deepcopies the key,
# as follows:
#
# copy[semi_deepcopy(key)] = semi_deepcopy(val)
#
# Doesn't seem like we need to, but we'll comment it just in case.
if key not in exclude:
copy[key] = semi_deepcopy(val)
return copy
d[dict] = semi_deepcopy_dict
def _semi_deepcopy_list(x):
return list(map(semi_deepcopy, x))
d[list] = _semi_deepcopy_list
def _semi_deepcopy_tuple(x):
return tuple(map(semi_deepcopy, x))
d[tuple] = _semi_deepcopy_tuple
def semi_deepcopy(x):
copier = _semi_deepcopy_dispatch.get(type(x))
if copier:
return copier(x)
else:
if hasattr(x, '__semi_deepcopy__') and callable(x.__semi_deepcopy__):
return x.__semi_deepcopy__()
elif isinstance(x, UserDict):
return x.__class__(semi_deepcopy_dict(x))
elif isinstance(x, UserList):
return x.__class__(_semi_deepcopy_list(x))
return x
class Proxy(object):
"""A simple generic Proxy class, forwarding all calls to
subject. So, for the benefit of the python newbie, what does
this really mean? Well, it means that you can take an object, let's
call it 'objA', and wrap it in this Proxy class, with a statement
like this
proxyObj = Proxy(objA),
Then, if in the future, you do something like this
x = proxyObj.var1,
since Proxy does not have a 'var1' attribute (but presumably objA does),
the request actually is equivalent to saying
x = objA.var1
Inherit from this class to create a Proxy.
Note that, with new-style classes, this does *not* work transparently
for Proxy subclasses that use special .__*__() method names, because
those names are now bound to the class, not the individual instances.
You now need to know in advance which .__*__() method names you want
to pass on to the underlying Proxy object, and specifically delegate
their calls like this:
class Foo(Proxy):
__str__ = Delegate('__str__')
"""
def __init__(self, subject):
"""Wrap an object as a Proxy object"""
self._subject = subject
def __getattr__(self, name):
"""Retrieve an attribute from the wrapped object. If the named
attribute doesn't exist, AttributeError is raised"""
return getattr(self._subject, name)
def get(self):
"""Retrieve the entire wrapped object"""
return self._subject
def __eq__(self, other):
if issubclass(other.__class__, self._subject.__class__):
return self._subject == other
return self.__dict__ == other.__dict__
class Delegate(object):
"""A Python Descriptor class that delegates attribute fetches
to an underlying wrapped subject of a Proxy. Typical use:
class Foo(Proxy):
__str__ = Delegate('__str__')
"""
def __init__(self, attribute):
self.attribute = attribute
def __get__(self, obj, cls):
if isinstance(obj, cls):
return getattr(obj._subject, self.attribute)
else:
return self
# attempt to load the windows registry module:
can_read_reg = 0
try:
import winreg
can_read_reg = 1
hkey_mod = winreg
RegOpenKeyEx = winreg.OpenKeyEx
RegEnumKey = winreg.EnumKey
RegEnumValue = winreg.EnumValue
RegQueryValueEx = winreg.QueryValueEx
RegError = winreg.error
except ImportError:
try:
import win32api
import win32con
can_read_reg = 1
hkey_mod = win32con
RegOpenKeyEx = win32api.RegOpenKeyEx
RegEnumKey = win32api.RegEnumKey
RegEnumValue = win32api.RegEnumValue
RegQueryValueEx = win32api.RegQueryValueEx
RegError = win32api.error
except ImportError:
class _NoError(Exception):
pass
RegError = _NoError
WinError = None
# Make sure we have a definition of WindowsError so we can
# run platform-independent tests of Windows functionality on
# platforms other than Windows. (WindowsError is, in fact, an
# OSError subclass on Windows.)
class PlainWindowsError(OSError):
pass
try:
WinError = WindowsError
except NameError:
WinError = PlainWindowsError
if can_read_reg:
HKEY_CLASSES_ROOT = hkey_mod.HKEY_CLASSES_ROOT
HKEY_LOCAL_MACHINE = hkey_mod.HKEY_LOCAL_MACHINE
HKEY_CURRENT_USER = hkey_mod.HKEY_CURRENT_USER
HKEY_USERS = hkey_mod.HKEY_USERS
def RegGetValue(root, key):
"""This utility function returns a value in the registry
without having to open the key first. Only available on
Windows platforms with a version of Python that can read the
registry. Returns the same thing as
SCons.Util.RegQueryValueEx, except you just specify the entire
path to the value, and don't have to bother opening the key
first. So:
Instead of:
k = SCons.Util.RegOpenKeyEx(SCons.Util.HKEY_LOCAL_MACHINE,
r'SOFTWARE\Microsoft\Windows\CurrentVersion')
out = SCons.Util.RegQueryValueEx(k,
'ProgramFilesDir')
You can write:
out = SCons.Util.RegGetValue(SCons.Util.HKEY_LOCAL_MACHINE,
r'SOFTWARE\Microsoft\Windows\CurrentVersion\ProgramFilesDir')
"""
# I would use os.path.split here, but it's not a filesystem
# path...
p = key.rfind('\\') + 1
keyp = key[:p-1] # -1 to omit trailing slash
val = key[p:]
k = RegOpenKeyEx(root, keyp)
return RegQueryValueEx(k,val)
else:
HKEY_CLASSES_ROOT = None
HKEY_LOCAL_MACHINE = None
HKEY_CURRENT_USER = None
HKEY_USERS = None
def RegGetValue(root, key):
raise WinError
def RegOpenKeyEx(root, key):
raise WinError
if sys.platform == 'win32':
def WhereIs(file, path=None, pathext=None, reject=[]):
if path is None:
try:
path = os.environ['PATH']
except KeyError:
return None
if is_String(path):
path = path.split(os.pathsep)
if pathext is None:
try:
pathext = os.environ['PATHEXT']
except KeyError:
pathext = '.COM;.EXE;.BAT;.CMD'
if is_String(pathext):
pathext = pathext.split(os.pathsep)
for ext in pathext:
if ext.lower() == file[-len(ext):].lower():
pathext = ['']
break
if not is_List(reject) and not is_Tuple(reject):
reject = [reject]
for dir in path:
f = os.path.join(dir, file)
for ext in pathext:
fext = f + ext
if os.path.isfile(fext):
try:
reject.index(fext)
except ValueError:
return os.path.normpath(fext)
continue
return None
elif os.name == 'os2':
def WhereIs(file, path=None, pathext=None, reject=[]):
if path is None:
try:
path = os.environ['PATH']
except KeyError:
return None
if is_String(path):
path = path.split(os.pathsep)
if pathext is None:
pathext = ['.exe', '.cmd']
for ext in pathext:
if ext.lower() == file[-len(ext):].lower():
pathext = ['']
break
if not is_List(reject) and not is_Tuple(reject):
reject = [reject]
for dir in path:
f = os.path.join(dir, file)
for ext in pathext:
fext = f + ext
if os.path.isfile(fext):
try:
reject.index(fext)
except ValueError:
return os.path.normpath(fext)
continue
return None
else:
def WhereIs(file, path=None, pathext=None, reject=[]):
import stat
if path is None:
try:
path = os.environ['PATH']
except KeyError:
return None
if is_String(path):
path = path.split(os.pathsep)
if not is_List(reject) and not is_Tuple(reject):
reject = [reject]
for d in path:
f = os.path.join(d, file)
if os.path.isfile(f):
try:
st = os.stat(f)
except OSError:
# os.stat() raises OSError, not IOError if the file
# doesn't exist, so in this case we let IOError get
# raised so as to not mask possibly serious disk or
# network issues.
continue
if stat.S_IMODE(st[stat.ST_MODE]) & 0o111:
try:
reject.index(f)
except ValueError:
return os.path.normpath(f)
continue
return None
def PrependPath(oldpath, newpath, sep = os.pathsep,
delete_existing=1, canonicalize=None):
"""This prepends newpath elements to the given oldpath. Will only
add any particular path once (leaving the first one it encounters
and ignoring the rest, to preserve path order), and will
os.path.normpath and os.path.normcase all paths to help assure
this. This can also handle the case where the given old path
variable is a list instead of a string, in which case a list will
be returned instead of a string.
Example:
Old Path: "/foo/bar:/foo"
New Path: "/biz/boom:/foo"
Result: "/biz/boom:/foo:/foo/bar"
If delete_existing is 0, then adding a path that exists will
not move it to the beginning; it will stay where it is in the
list.
If canonicalize is not None, it is applied to each element of
newpath before use.
"""
orig = oldpath
is_list = 1
paths = orig
if not is_List(orig) and not is_Tuple(orig):
paths = paths.split(sep)
is_list = 0
if is_String(newpath):
newpaths = newpath.split(sep)
elif not is_List(newpath) and not is_Tuple(newpath):
newpaths = [ newpath ] # might be a Dir
else:
newpaths = newpath
if canonicalize:
newpaths=list(map(canonicalize, newpaths))
if not delete_existing:
# First uniquify the old paths, making sure to
# preserve the first instance (in Unix/Linux,
# the first one wins), and remembering them in normpaths.
# Then insert the new paths at the head of the list
# if they're not already in the normpaths list.
result = []
normpaths = []
for path in paths:
if not path:
continue
normpath = os.path.normpath(os.path.normcase(path))
if normpath not in normpaths:
result.append(path)
normpaths.append(normpath)
newpaths.reverse() # since we're inserting at the head
for path in newpaths:
if not path:
continue
normpath = os.path.normpath(os.path.normcase(path))
if normpath not in normpaths:
result.insert(0, path)
normpaths.append(normpath)
paths = result
else:
newpaths = newpaths + paths # prepend new paths
normpaths = []
paths = []
# now we add them only if they are unique
for path in newpaths:
normpath = os.path.normpath(os.path.normcase(path))
if path and not normpath in normpaths:
paths.append(path)
normpaths.append(normpath)
if is_list:
return paths
else:
return sep.join(paths)
def AppendPath(oldpath, newpath, sep = os.pathsep,
delete_existing=1, canonicalize=None):
"""This appends new path elements to the given old path. Will
only add any particular path once (leaving the last one it
encounters and ignoring the rest, to preserve path order), and
will os.path.normpath and os.path.normcase all paths to help
assure this. This can also handle the case where the given old
path variable is a list instead of a string, in which case a list
will be returned instead of a string.
Example:
Old Path: "/foo/bar:/foo"
New Path: "/biz/boom:/foo"
Result: "/foo/bar:/biz/boom:/foo"
If delete_existing is 0, then adding a path that exists
will not move it to the end; it will stay where it is in the list.
If canonicalize is not None, it is applied to each element of
newpath before use.
"""
orig = oldpath
is_list = 1
paths = orig
if not is_List(orig) and not is_Tuple(orig):
paths = paths.split(sep)
is_list = 0
if is_String(newpath):
newpaths = newpath.split(sep)
elif not is_List(newpath) and not is_Tuple(newpath):
newpaths = [ newpath ] # might be a Dir
else:
newpaths = newpath
if canonicalize:
newpaths=list(map(canonicalize, newpaths))
if not delete_existing:
# add old paths to result, then
# add new paths if not already present
# (I thought about using a dict for normpaths for speed,
# but it's not clear hashing the strings would be faster
# than linear searching these typically short lists.)
result = []
normpaths = []
for path in paths:
if not path:
continue
result.append(path)
normpaths.append(os.path.normpath(os.path.normcase(path)))
for path in newpaths:
if not path:
continue
normpath = os.path.normpath(os.path.normcase(path))
if normpath not in normpaths:
result.append(path)
normpaths.append(normpath)
paths = result
else:
# start w/ new paths, add old ones if not present,
# then reverse.
newpaths = paths + newpaths # append new paths
newpaths.reverse()
normpaths = []
paths = []
# now we add them only if they are unique
for path in newpaths:
normpath = os.path.normpath(os.path.normcase(path))
if path and not normpath in normpaths:
paths.append(path)
normpaths.append(normpath)
paths.reverse()
if is_list:
return paths
else:
return sep.join(paths)
def AddPathIfNotExists(env_dict, key, path, sep=os.pathsep):
"""This function will take 'key' out of the dictionary
'env_dict', then add the path 'path' to that key if it is not
already there. This treats the value of env_dict[key] as if it
has a similar format to the PATH variable...a list of paths
separated by tokens. The 'path' will get added to the list if it
is not already there."""
try:
is_list = 1
paths = env_dict[key]
if not is_List(env_dict[key]):
paths = paths.split(sep)
is_list = 0
if os.path.normcase(path) not in list(map(os.path.normcase, paths)):
paths = [ path ] + paths
if is_list:
env_dict[key] = paths
else:
env_dict[key] = sep.join(paths)
except KeyError:
env_dict[key] = path
if sys.platform == 'cygwin':
def get_native_path(path):
"""Transforms an absolute path into a native path for the system. In
Cygwin, this converts from a Cygwin path to a Windows one."""
return os.popen('cygpath -w ' + path).read().replace('\n', '')
else:
def get_native_path(path):
"""Transforms an absolute path into a native path for the system.
Non-Cygwin version, just leave the path alone."""
return path
display = DisplayEngine()
def Split(arg):
if is_List(arg) or is_Tuple(arg):
return arg
elif is_String(arg):
return arg.split()
else:
return [arg]
class CLVar(UserList):
"""A class for command-line construction variables.
This is a list that uses Split() to split an initial string along
white-space arguments, and similarly to split any strings that get
added. This allows us to Do the Right Thing with Append() and
Prepend() (as well as straight Python foo = env['VAR'] + 'arg1
arg2') regardless of whether a user adds a list or a string to a
command-line construction variable.
"""
def __init__(self, seq = []):
UserList.__init__(self, Split(seq))
def __add__(self, other):
return UserList.__add__(self, CLVar(other))
def __radd__(self, other):
return UserList.__radd__(self, CLVar(other))
def __coerce__(self, other):
return (self, CLVar(other))
def __str__(self):
return ' '.join(self.data)
# A dictionary that preserves the order in which items are added.
# Submitted by David Benjamin to ActiveState's Python Cookbook web site:
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/107747
# Including fixes/enhancements from the follow-on discussions.
class OrderedDict(UserDict):
def __init__(self, dict = None):
self._keys = []
UserDict.__init__(self, dict)
def __delitem__(self, key):
UserDict.__delitem__(self, key)
self._keys.remove(key)
def __setitem__(self, key, item):
UserDict.__setitem__(self, key, item)
if key not in self._keys: self._keys.append(key)
def clear(self):
UserDict.clear(self)
self._keys = []
def copy(self):
dict = OrderedDict()
dict.update(self)
return dict
def items(self):
return list(zip(self._keys, list(self.values())))
def keys(self):
return self._keys[:]
def popitem(self):
try:
key = self._keys[-1]
except IndexError:
raise KeyError('dictionary is empty')
val = self[key]
del self[key]
return (key, val)
def setdefault(self, key, failobj = None):
UserDict.setdefault(self, key, failobj)
if key not in self._keys: self._keys.append(key)
def update(self, dict):
for (key, val) in dict.items():
self.__setitem__(key, val)
def values(self):
return list(map(self.get, self._keys))
class Selector(OrderedDict):
"""A callable ordered dictionary that maps file suffixes to
dictionary values. We preserve the order in which items are added
so that get_suffix() calls always return the first suffix added."""
def __call__(self, env, source, ext=None):
if ext is None:
try:
ext = source[0].get_suffix()
except IndexError:
ext = ""
try:
return self[ext]
except KeyError:
# Try to perform Environment substitution on the keys of
# the dictionary before giving up.
s_dict = {}
for (k,v) in self.items():
if k is not None:
s_k = env.subst(k)
if s_k in s_dict:
# We only raise an error when variables point
# to the same suffix. If one suffix is literal
# and a variable suffix contains this literal,
# the literal wins and we don't raise an error.
raise KeyError(s_dict[s_k][0], k, s_k)
s_dict[s_k] = (k,v)
try:
return s_dict[ext][1]
except KeyError:
try:
return self[None]
except KeyError:
return None
if sys.platform == 'cygwin':
# On Cygwin, os.path.normcase() lies, so just report back the
# fact that the underlying Windows OS is case-insensitive.
def case_sensitive_suffixes(s1, s2):
return 0
else:
def case_sensitive_suffixes(s1, s2):
return (os.path.normcase(s1) != os.path.normcase(s2))
def adjustixes(fname, pre, suf, ensure_suffix=False):
if pre:
path, fn = os.path.split(os.path.normpath(fname))
if fn[:len(pre)] != pre:
fname = os.path.join(path, pre + fn)
# Only append a suffix if the suffix we're going to add isn't already
# there, and if either we've been asked to ensure the specific suffix
# is present or there's no suffix on it at all.
if suf and fname[-len(suf):] != suf and \
(ensure_suffix or not splitext(fname)[1]):
fname = fname + suf
return fname
# From Tim Peters,
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52560
# ASPN: Python Cookbook: Remove duplicates from a sequence
# (Also in the printed Python Cookbook.)
def unique(s):
"""Return a list of the elements in s, but without duplicates.
For example, unique([1,2,3,1,2,3]) is some permutation of [1,2,3],
unique("abcabc") some permutation of ["a", "b", "c"], and
unique(([1, 2], [2, 3], [1, 2])) some permutation of
[[2, 3], [1, 2]].
For best speed, all sequence elements should be hashable. Then
unique() will usually work in linear time.
If not possible, the sequence elements should enjoy a total
ordering, and if list(s).sort() doesn't raise TypeError it's
assumed that they do enjoy a total ordering. Then unique() will
usually work in O(N*log2(N)) time.
If that's not possible either, the sequence elements must support
equality-testing. Then unique() will usually work in quadratic
time.
"""
n = len(s)
if n == 0:
return []
# Try using a dict first, as that's the fastest and will usually
# work. If it doesn't work, it will usually fail quickly, so it
# usually doesn't cost much to *try* it. It requires that all the
# sequence elements be hashable, and support equality comparison.
u = {}
try:
for x in s:
u[x] = 1
except TypeError:
pass # move on to the next method
else:
return list(u.keys())
del u
# We can't hash all the elements. Second fastest is to sort,
# which brings the equal elements together; then duplicates are
# easy to weed out in a single pass.
# NOTE: Python's list.sort() was designed to be efficient in the
# presence of many duplicate elements. This isn't true of all
# sort functions in all languages or libraries, so this approach
# is more effective in Python than it may be elsewhere.
try:
t = sorted(s)
except TypeError:
pass # move on to the next method
else:
assert n > 0
last = t[0]
lasti = i = 1
while i < n:
if t[i] != last:
t[lasti] = last = t[i]
lasti = lasti + 1
i = i + 1
return t[:lasti]
del t
# Brute force is all that's left.
u = []
for x in s:
if x not in u:
u.append(x)
return u
# From Alex Martelli,
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52560
# ASPN: Python Cookbook: Remove duplicates from a sequence
# First comment, dated 2001/10/13.
# (Also in the printed Python Cookbook.)
def uniquer(seq, idfun=None):
if idfun is None:
def idfun(x): return x
seen = {}
result = []
for item in seq:
marker = idfun(item)
# in old Python versions:
# if seen.has_key(marker)
# but in new ones:
if marker in seen: continue
seen[marker] = 1
result.append(item)
return result
# A more efficient implementation of Alex's uniquer(), this avoids the
# idfun() argument and function-call overhead by assuming that all
# items in the sequence are hashable.
def uniquer_hashables(seq):
seen = {}
result = []
for item in seq:
#if not item in seen:
if item not in seen:
seen[item] = 1
result.append(item)
return result
# Recipe 19.11 "Reading Lines with Continuation Characters",
# by Alex Martelli, straight from the Python CookBook (2nd edition).
def logical_lines(physical_lines, joiner=''.join):
logical_line = []
for line in physical_lines:
stripped = line.rstrip()
if stripped.endswith('\\'):
# a line which continues w/the next physical line
logical_line.append(stripped[:-1])
else:
# a line which does not continue, end of logical line
logical_line.append(line)
yield joiner(logical_line)
logical_line = []
if logical_line:
# end of sequence implies end of last logical line
yield joiner(logical_line)
class LogicalLines(object):
""" Wrapper class for the logical_lines method.
Allows us to read all "logical" lines at once from a
given file object.
"""
def __init__(self, fileobj):
self.fileobj = fileobj
def readlines(self):
result = [l for l in logical_lines(self.fileobj)]
return result
class UniqueList(UserList):
def __init__(self, seq = []):
UserList.__init__(self, seq)
self.unique = True
def __make_unique(self):
if not self.unique:
self.data = uniquer_hashables(self.data)
self.unique = True
def __lt__(self, other):
self.__make_unique()
return UserList.__lt__(self, other)
def __le__(self, other):
self.__make_unique()
return UserList.__le__(self, other)
def __eq__(self, other):
self.__make_unique()
return UserList.__eq__(self, other)
def __ne__(self, other):
self.__make_unique()
return UserList.__ne__(self, other)
def __gt__(self, other):
self.__make_unique()
return UserList.__gt__(self, other)
def __ge__(self, other):
self.__make_unique()
return UserList.__ge__(self, other)
def __cmp__(self, other):
self.__make_unique()
return UserList.__cmp__(self, other)
def __len__(self):
self.__make_unique()
return UserList.__len__(self)
def __getitem__(self, i):
self.__make_unique()
return UserList.__getitem__(self, i)
def __setitem__(self, i, item):
UserList.__setitem__(self, i, item)
self.unique = False
def __getslice__(self, i, j):
self.__make_unique()
return UserList.__getslice__(self, i, j)
def __setslice__(self, i, j, other):
UserList.__setslice__(self, i, j, other)
self.unique = False
def __add__(self, other):
result = UserList.__add__(self, other)
result.unique = False
return result
def __radd__(self, other):
result = UserList.__radd__(self, other)
result.unique = False
return result
def __iadd__(self, other):
result = UserList.__iadd__(self, other)
result.unique = False
return result
def __mul__(self, other):
result = UserList.__mul__(self, other)
result.unique = False
return result
def __rmul__(self, other):
result = UserList.__rmul__(self, other)
result.unique = False
return result
def __imul__(self, other):
result = UserList.__imul__(self, other)
result.unique = False
return result
def append(self, item):
UserList.append(self, item)
self.unique = False
def insert(self, i):
UserList.insert(self, i)
self.unique = False
def count(self, item):
self.__make_unique()
return UserList.count(self, item)
def index(self, item):
self.__make_unique()
return UserList.index(self, item)
def reverse(self):
self.__make_unique()
UserList.reverse(self)
def sort(self, *args, **kwds):
self.__make_unique()
return UserList.sort(self, *args, **kwds)
def extend(self, other):
UserList.extend(self, other)
self.unique = False
class Unbuffered(object):
"""
A proxy class that wraps a file object, flushing after every write,
and delegating everything else to the wrapped object.
"""
def __init__(self, file):
self.file = file
self.softspace = 0 ## backward compatibility; not supported in Py3k
def write(self, arg):
try:
self.file.write(arg)
self.file.flush()
except IOError:
# Stdout might be connected to a pipe that has been closed
# by now. The most likely reason for the pipe being closed
# is that the user has press ctrl-c. It this is the case,
# then SCons is currently shutdown. We therefore ignore
# IOError's here so that SCons can continue and shutdown
# properly so that the .sconsign is correctly written
# before SCons exits.
pass
def __getattr__(self, attr):
return getattr(self.file, attr)
def make_path_relative(path):
""" makes an absolute path name to a relative pathname.
"""
if os.path.isabs(path):
drive_s,path = os.path.splitdrive(path)
import re
if not drive_s:
path=re.compile("/*(.*)").findall(path)[0]
else:
path=path[1:]
assert( not os.path.isabs( path ) ), path
return path
# The original idea for AddMethod() and RenameFunction() come from the
# following post to the ActiveState Python Cookbook:
#
# ASPN: Python Cookbook : Install bound methods in an instance
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/223613
#
# That code was a little fragile, though, so the following changes
# have been wrung on it:
#
# * Switched the installmethod() "object" and "function" arguments,
# so the order reflects that the left-hand side is the thing being
# "assigned to" and the right-hand side is the value being assigned.
#
# * Changed explicit type-checking to the "try: klass = object.__class__"
# block in installmethod() below so that it still works with the
# old-style classes that SCons uses.
#
# * Replaced the by-hand creation of methods and functions with use of
# the "new" module, as alluded to in Alex Martelli's response to the
# following Cookbook post:
#
# ASPN: Python Cookbook : Dynamically added methods to a class
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/81732
def AddMethod(obj, function, name=None):
"""
Adds either a bound method to an instance or the function itself (or an unbound method in Python 2) to a class.
If name is ommited the name of the specified function
is used by default.
Example::
a = A()
def f(self, x, y):
self.z = x + y
AddMethod(f, A, "add")
a.add(2, 4)
print(a.z)
AddMethod(lambda self, i: self.l[i], a, "listIndex")
print(a.listIndex(5))
"""
if name is None:
name = function.__name__
else:
function = RenameFunction(function, name)
# Note the Python version checks - WLB
# Python 3.3 dropped the 3rd parameter from types.MethodType
if hasattr(obj, '__class__') and obj.__class__ is not type:
# "obj" is an instance, so it gets a bound method.
if sys.version_info[:2] > (3, 2):
method = MethodType(function, obj)
else:
method = MethodType(function, obj, obj.__class__)
else:
# Handle classes
method = function
setattr(obj, name, method)
def RenameFunction(function, name):
"""
Returns a function identical to the specified function, but with
the specified name.
"""
return FunctionType(function.__code__,
function.__globals__,
name,
function.__defaults__)
md5 = False
def MD5signature(s):
return str(s)
def MD5filesignature(fname, chunksize=65536):
with open(fname, "rb") as f:
result = f.read()
return result
try:
import hashlib
except ImportError:
pass
else:
if hasattr(hashlib, 'md5'):
md5 = True
def MD5signature(s):
m = hashlib.md5()
try:
m.update(to_bytes(s))
except TypeError as e:
m.update(to_bytes(str(s)))
return m.hexdigest()
def MD5filesignature(fname, chunksize=65536):
m = hashlib.md5()
f = open(fname, "rb")
while True:
blck = f.read(chunksize)
if not blck:
break
m.update(to_bytes(blck))
f.close()
return m.hexdigest()
def MD5collect(signatures):
"""
Collects a list of signatures into an aggregate signature.
signatures - a list of signatures
returns - the aggregate signature
"""
if len(signatures) == 1:
return signatures[0]
else:
return MD5signature(', '.join(signatures))
def silent_intern(x):
"""
Perform sys.intern() on the passed argument and return the result.
If the input is ineligible (e.g. a unicode string) the original argument is
returned and no exception is thrown.
"""
try:
return sys.intern(x)
except TypeError:
return x
# From Dinu C. Gherman,
# Python Cookbook, second edition, recipe 6.17, p. 277.
# Also:
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/68205
# ASPN: Python Cookbook: Null Object Design Pattern
#TODO??? class Null(object):
class Null(object):
""" Null objects always and reliably "do nothing." """
def __new__(cls, *args, **kwargs):
if not '_instance' in vars(cls):
cls._instance = super(Null, cls).__new__(cls, *args, **kwargs)
return cls._instance
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
return self
def __repr__(self):
return "Null(0x%08X)" % id(self)
def __nonzero__(self):
return False
def __bool__(self):
return False
def __getattr__(self, name):
return self
def __setattr__(self, name, value):
return self
def __delattr__(self, name):
return self
class NullSeq(Null):
def __len__(self):
return 0
def __iter__(self):
return iter(())
def __getitem__(self, i):
return self
def __delitem__(self, i):
return self
def __setitem__(self, i, v):
return self
del __revision__
def to_bytes (s):
if isinstance (s, (bytes, bytearray)) or bytes is str:
return s
return bytes (s, 'utf-8')
def to_str (s):
if bytes is str or is_String(s):
return s
return str (s, 'utf-8')
# No cmp in py3, so we'll define it.
def cmp(a, b):
"""
Define cmp because it's no longer available in python3
Works under python 2 as well
"""
return (a > b) - (a < b)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
[
"[email protected]"
] | |
a410f6e281c8139f127daf8c8cac5080cc40c306
|
a0e777ea7e0d00c061068db132a30a8fa545cc75
|
/EffectivePython/item4.py
|
793758a82e0f7d72f099d9da42934bf39b0aae5f
|
[] |
no_license
|
aadisetyaa/Python-Cookbook
|
87215b64d2d3631d6b18e90a68a09400e7d80919
|
a8df0343a39725312686423296bfd860dbaf70ad
|
refs/heads/master
| 2022-04-08T13:41:27.255352 | 2017-11-27T03:54:29 | 2017-11-27T03:54:29 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 902 |
py
|
from urllib.parse import parse_qs
my_values = parse_qs('red=5&blue=0&green=', keep_blank_values=True)
#print(repr(my_values))
#print('Red: ', my_values.get('red', ['']))
#print('Green: ', my_values.get('green'))
#print('Opacity: ', my_values.get('opacity'))
#red = int(my_values.get('red', [''])[0] or 0)
#green = int(my_values.get('green', [''])[0] or 0)
opacity = int(my_values.get('opacity', [''])[0] or 0)
red = my_values.get('red', [''])
red = int(red[0]) if red[0] else 0
'''
green = my_values.get('green', [''])
if green[0]:
green = int(green[0])
else:
green = 0
'''
def get_first_int(values, key, default=0):
found = values.get(key, [''])
if found[0]:
found = int(found[0])
else:
found = default
return found
green = get_first_int(my_values, 'green')
print('Red: %r' % red)
print('Green: %r' % green)
print('Opacity: %r' % opacity)
|
[
"[email protected]"
] | |
d37848de8b37876c0e75748384723c224ddf4c87
|
4c601eaa346e660c296e270cc2d79aea9a3721fe
|
/homeassistant/components/synology_dsm/__init__.py
|
89dc39e427c58512730b8bd950ac8b479e7129f5
|
[
"Apache-2.0"
] |
permissive
|
basnijholt/home-assistant
|
f55110af9ff602274c0a929c7298ef97a0ef282f
|
ba55b4b8338a2dc0ba3f1d750efea49d86571291
|
refs/heads/dev
| 2023-01-21T11:53:52.621353 | 2020-08-08T15:03:06 | 2020-08-08T15:03:06 | 220,313,680 | 5 | 1 |
Apache-2.0
| 2023-01-13T06:04:49 | 2019-11-07T19:29:54 |
Python
|
UTF-8
|
Python
| false | false | 15,297 |
py
|
"""The Synology DSM component."""
import asyncio
from datetime import timedelta
import logging
from typing import Dict
from synology_dsm import SynologyDSM
from synology_dsm.api.core.security import SynoCoreSecurity
from synology_dsm.api.core.utilization import SynoCoreUtilization
from synology_dsm.api.dsm.information import SynoDSMInformation
from synology_dsm.api.dsm.network import SynoDSMNetwork
from synology_dsm.api.storage.storage import SynoStorage
import voluptuous as vol
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.const import (
ATTR_ATTRIBUTION,
CONF_DISKS,
CONF_HOST,
CONF_MAC,
CONF_PASSWORD,
CONF_PORT,
CONF_SCAN_INTERVAL,
CONF_SSL,
CONF_USERNAME,
)
from homeassistant.core import callback
from homeassistant.helpers import entity_registry
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import async_track_time_interval
from homeassistant.helpers.typing import HomeAssistantType
from .const import (
CONF_VOLUMES,
DEFAULT_SCAN_INTERVAL,
DEFAULT_SSL,
DOMAIN,
ENTITY_CLASS,
ENTITY_ENABLE,
ENTITY_ICON,
ENTITY_NAME,
ENTITY_UNIT,
PLATFORMS,
STORAGE_DISK_BINARY_SENSORS,
STORAGE_DISK_SENSORS,
STORAGE_VOL_SENSORS,
SYNO_API,
TEMP_SENSORS_KEYS,
UNDO_UPDATE_LISTENER,
UTILISATION_SENSORS,
)
CONFIG_SCHEMA = vol.Schema(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PORT): cv.port,
vol.Optional(CONF_SSL, default=DEFAULT_SSL): cv.boolean,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_DISKS): cv.ensure_list,
vol.Optional(CONF_VOLUMES): cv.ensure_list,
}
)
CONFIG_SCHEMA = vol.Schema(
{DOMAIN: vol.Schema(vol.All(cv.ensure_list, [CONFIG_SCHEMA]))},
extra=vol.ALLOW_EXTRA,
)
ATTRIBUTION = "Data provided by Synology"
_LOGGER = logging.getLogger(__name__)
async def async_setup(hass, config):
"""Set up Synology DSM sensors from legacy config file."""
conf = config.get(DOMAIN)
if conf is None:
return True
for dsm_conf in conf:
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=dsm_conf,
)
)
return True
async def async_setup_entry(hass: HomeAssistantType, entry: ConfigEntry):
"""Set up Synology DSM sensors."""
api = SynoApi(hass, entry)
# Migrate old unique_id
@callback
def _async_migrator(entity_entry: entity_registry.RegistryEntry):
"""Migrate away from ID using label."""
# Reject if new unique_id
if "SYNO." in entity_entry.unique_id:
return None
entries = {
**STORAGE_DISK_BINARY_SENSORS,
**STORAGE_DISK_SENSORS,
**STORAGE_VOL_SENSORS,
**UTILISATION_SENSORS,
}
infos = entity_entry.unique_id.split("_")
serial = infos.pop(0)
label = infos.pop(0)
device_id = "_".join(infos)
# Removed entity
if (
"Type" in entity_entry.unique_id
or "Device" in entity_entry.unique_id
or "Name" in entity_entry.unique_id
):
return None
entity_type = None
for entity_key, entity_attrs in entries.items():
if (
device_id
and entity_attrs[ENTITY_NAME] == "Status"
and "Status" in entity_entry.unique_id
and "(Smart)" not in entity_entry.unique_id
):
if "sd" in device_id and "disk" in entity_key:
entity_type = entity_key
continue
if "volume" in device_id and "volume" in entity_key:
entity_type = entity_key
continue
if entity_attrs[ENTITY_NAME] == label:
entity_type = entity_key
new_unique_id = "_".join([serial, entity_type])
if device_id:
new_unique_id += f"_{device_id}"
_LOGGER.info(
"Migrating unique_id from [%s] to [%s]",
entity_entry.unique_id,
new_unique_id,
)
return {"new_unique_id": new_unique_id}
await entity_registry.async_migrate_entries(hass, entry.entry_id, _async_migrator)
# Continue setup
await api.async_setup()
undo_listener = entry.add_update_listener(_async_update_listener)
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN][entry.unique_id] = {
SYNO_API: api,
UNDO_UPDATE_LISTENER: undo_listener,
}
# For SSDP compat
if not entry.data.get(CONF_MAC):
network = await hass.async_add_executor_job(getattr, api.dsm, "network")
hass.config_entries.async_update_entry(
entry, data={**entry.data, CONF_MAC: network.macs}
)
for platform in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, platform)
)
return True
async def async_unload_entry(hass: HomeAssistantType, entry: ConfigEntry):
"""Unload Synology DSM sensors."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, platform)
for platform in PLATFORMS
]
)
)
if unload_ok:
entry_data = hass.data[DOMAIN][entry.unique_id]
entry_data[UNDO_UPDATE_LISTENER]()
await entry_data[SYNO_API].async_unload()
hass.data[DOMAIN].pop(entry.unique_id)
return unload_ok
async def _async_update_listener(hass: HomeAssistantType, entry: ConfigEntry):
"""Handle options update."""
await hass.config_entries.async_reload(entry.entry_id)
class SynoApi:
"""Class to interface with Synology DSM API."""
def __init__(self, hass: HomeAssistantType, entry: ConfigEntry):
"""Initialize the API wrapper class."""
self._hass = hass
self._entry = entry
# DSM APIs
self.dsm: SynologyDSM = None
self.information: SynoDSMInformation = None
self.network: SynoDSMNetwork = None
self.security: SynoCoreSecurity = None
self.storage: SynoStorage = None
self.utilisation: SynoCoreUtilization = None
# Should we fetch them
self._fetching_entities = {}
self._with_security = True
self._with_storage = True
self._with_utilisation = True
self._unsub_dispatcher = None
@property
def signal_sensor_update(self) -> str:
"""Event specific per Synology DSM entry to signal updates in sensors."""
return f"{DOMAIN}-{self.information.serial}-sensor-update"
async def async_setup(self):
"""Start interacting with the NAS."""
self.dsm = SynologyDSM(
self._entry.data[CONF_HOST],
self._entry.data[CONF_PORT],
self._entry.data[CONF_USERNAME],
self._entry.data[CONF_PASSWORD],
self._entry.data[CONF_SSL],
device_token=self._entry.data.get("device_token"),
)
self._async_setup_api_requests()
await self._hass.async_add_executor_job(self._fetch_device_configuration)
await self.async_update()
self._unsub_dispatcher = async_track_time_interval(
self._hass,
self.async_update,
timedelta(
minutes=self._entry.options.get(
CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL
)
),
)
@callback
def subscribe(self, api_key, unique_id):
"""Subscribe an entity from API fetches."""
if api_key not in self._fetching_entities:
self._fetching_entities[api_key] = set()
self._fetching_entities[api_key].add(unique_id)
@callback
def unsubscribe() -> None:
"""Unsubscribe an entity from API fetches (when disable)."""
self._fetching_entities[api_key].remove(unique_id)
return unsubscribe
@callback
def _async_setup_api_requests(self):
"""Determine if we should fetch each API, if one entity needs it."""
# Entities not added yet, fetch all
if not self._fetching_entities:
return
# Determine if we should fetch an API
self._with_security = bool(
self._fetching_entities.get(SynoCoreSecurity.API_KEY)
)
self._with_storage = bool(self._fetching_entities.get(SynoStorage.API_KEY))
self._with_utilisation = bool(
self._fetching_entities.get(SynoCoreUtilization.API_KEY)
)
# Reset not used API
if not self._with_security:
self.dsm.reset(self.security)
self.security = None
if not self._with_storage:
self.dsm.reset(self.storage)
self.storage = None
if not self._with_utilisation:
self.dsm.reset(self.utilisation)
self.utilisation = None
def _fetch_device_configuration(self):
"""Fetch initial device config."""
self.information = self.dsm.information
self.network = self.dsm.network
if self._with_security:
self.security = self.dsm.security
if self._with_storage:
self.storage = self.dsm.storage
if self._with_utilisation:
self.utilisation = self.dsm.utilisation
async def async_unload(self):
"""Stop interacting with the NAS and prepare for removal from hass."""
self._unsub_dispatcher()
async def async_update(self, now=None):
"""Update function for updating API information."""
self._async_setup_api_requests()
await self._hass.async_add_executor_job(self.dsm.update)
async_dispatcher_send(self._hass, self.signal_sensor_update)
class SynologyDSMEntity(Entity):
"""Representation of a Synology NAS entry."""
def __init__(
self, api: SynoApi, entity_type: str, entity_info: Dict[str, str],
):
"""Initialize the Synology DSM entity."""
self._api = api
self._api_key = entity_type.split(":")[0]
self.entity_type = entity_type.split(":")[-1]
self._name = f"{api.network.hostname} {entity_info[ENTITY_NAME]}"
self._class = entity_info[ENTITY_CLASS]
self._enable_default = entity_info[ENTITY_ENABLE]
self._icon = entity_info[ENTITY_ICON]
self._unit = entity_info[ENTITY_UNIT]
self._unique_id = f"{self._api.information.serial}_{entity_type}"
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return self._unique_id
@property
def name(self) -> str:
"""Return the name."""
return self._name
@property
def icon(self) -> str:
"""Return the icon."""
return self._icon
@property
def unit_of_measurement(self) -> str:
"""Return the unit the value is expressed in."""
if self.entity_type in TEMP_SENSORS_KEYS:
return self.hass.config.units.temperature_unit
return self._unit
@property
def device_class(self) -> str:
"""Return the class of this device."""
return self._class
@property
def device_state_attributes(self) -> Dict[str, any]:
"""Return the state attributes."""
return {ATTR_ATTRIBUTION: ATTRIBUTION}
@property
def device_info(self) -> Dict[str, any]:
"""Return the device information."""
return {
"identifiers": {(DOMAIN, self._api.information.serial)},
"name": "Synology NAS",
"manufacturer": "Synology",
"model": self._api.information.model,
"sw_version": self._api.information.version_string,
}
@property
def entity_registry_enabled_default(self) -> bool:
"""Return if the entity should be enabled when first added to the entity registry."""
return self._enable_default
@property
def should_poll(self) -> bool:
"""No polling needed."""
return False
async def async_update(self):
"""Only used by the generic entity update service."""
if not self.enabled:
return
await self._api.async_update()
async def async_added_to_hass(self):
"""Register state update callback."""
self.async_on_remove(
async_dispatcher_connect(
self.hass, self._api.signal_sensor_update, self.async_write_ha_state
)
)
self.async_on_remove(self._api.subscribe(self._api_key, self.unique_id))
class SynologyDSMDeviceEntity(SynologyDSMEntity):
"""Representation of a Synology NAS disk or volume entry."""
def __init__(
self,
api: SynoApi,
entity_type: str,
entity_info: Dict[str, str],
device_id: str = None,
):
"""Initialize the Synology DSM disk or volume entity."""
super().__init__(api, entity_type, entity_info)
self._device_id = device_id
self._device_name = None
self._device_manufacturer = None
self._device_model = None
self._device_firmware = None
self._device_type = None
if "volume" in entity_type:
volume = self._api.storage._get_volume(self._device_id)
# Volume does not have a name
self._device_name = volume["id"].replace("_", " ").capitalize()
self._device_manufacturer = "Synology"
self._device_model = self._api.information.model
self._device_firmware = self._api.information.version_string
self._device_type = (
volume["device_type"]
.replace("_", " ")
.replace("raid", "RAID")
.replace("shr", "SHR")
)
elif "disk" in entity_type:
disk = self._api.storage._get_disk(self._device_id)
self._device_name = disk["name"]
self._device_manufacturer = disk["vendor"]
self._device_model = disk["model"].strip()
self._device_firmware = disk["firm"]
self._device_type = disk["diskType"]
self._name = f"{self._api.network.hostname} {self._device_name} {entity_info[ENTITY_NAME]}"
self._unique_id += f"_{self._device_id}"
@property
def available(self) -> bool:
"""Return True if entity is available."""
return bool(self._api.storage)
@property
def device_info(self) -> Dict[str, any]:
"""Return the device information."""
return {
"identifiers": {(DOMAIN, self._api.information.serial, self._device_id)},
"name": f"Synology NAS ({self._device_name} - {self._device_type})",
"manufacturer": self._device_manufacturer,
"model": self._device_model,
"sw_version": self._device_firmware,
"via_device": (DOMAIN, self._api.information.serial),
}
|
[
"[email protected]"
] | |
f2d3975eb900b5c88164db165ef91f9f5f8ff734
|
9d177d4133f64ef16daab9dd71bffcc8fbb766d9
|
/tests/core/v5/test_packet_preparation.py
|
dae4e86405ebb5d325b341e018cbfbcc26077c6b
|
[
"MIT"
] |
permissive
|
pipermerriam/ddht
|
8afb799742e1a4467b9622a65758525f7edc9f18
|
7e0b66ac96591a590e40c7e15d880af01e1f1a36
|
refs/heads/master
| 2023-08-28T08:55:18.366964 | 2020-09-11T15:26:00 | 2020-09-11T15:26:00 | 282,279,617 | 2 | 0 |
MIT
| 2020-07-24T17:31:02 | 2020-07-24T17:31:02 | null |
UTF-8
|
Python
| false | false | 11,680 |
py
|
from eth_enr import ENR
from eth_utils import decode_hex, int_to_big_endian, is_list_like
from hypothesis import given
import pytest
import rlp
from ddht.encryption import aesgcm_decrypt
from ddht.handshake_schemes import V4HandshakeScheme
from ddht.tools.v5_strategies import (
enr_seq_st,
id_nonce_st,
key_st,
node_id_st,
nonce_st,
public_key_st,
random_data_st,
tag_st,
)
from ddht.v5.constants import (
AUTH_RESPONSE_VERSION,
AUTH_SCHEME_NAME,
MAGIC_SIZE,
ZERO_NONCE,
)
from ddht.v5.messages import PingMessage, v5_registry
from ddht.v5.packets import (
AuthHeader,
AuthHeaderPacket,
AuthTagPacket,
WhoAreYouPacket,
compute_encrypted_auth_response,
)
@given(
tag=tag_st,
auth_tag=nonce_st,
id_nonce=id_nonce_st,
initiator_key=key_st,
auth_response_key=key_st,
ephemeral_public_key=public_key_st,
)
def test_auth_header_preparation(
tag, auth_tag, id_nonce, initiator_key, auth_response_key, ephemeral_public_key
):
enr = ENR(
sequence_number=1,
signature=b"",
kv_pairs={b"id": b"v4", b"secp256k1": b"\x02" * 33},
)
message = PingMessage(request_id=5, enr_seq=enr.sequence_number)
id_nonce_signature = b"\x00" * 32
packet = AuthHeaderPacket.prepare(
tag=tag,
auth_tag=auth_tag,
id_nonce=id_nonce,
message=message,
initiator_key=initiator_key,
id_nonce_signature=id_nonce_signature,
auth_response_key=auth_response_key,
enr=enr,
ephemeral_public_key=ephemeral_public_key,
)
assert packet.tag == tag
assert packet.auth_header.auth_tag == auth_tag
assert packet.auth_header.id_nonce == id_nonce
assert packet.auth_header.auth_scheme_name == AUTH_SCHEME_NAME
assert packet.auth_header.ephemeral_public_key == ephemeral_public_key
decrypted_auth_response = aesgcm_decrypt(
key=auth_response_key,
nonce=ZERO_NONCE,
cipher_text=packet.auth_header.encrypted_auth_response,
authenticated_data=b"",
)
decoded_auth_response = rlp.decode(decrypted_auth_response)
assert is_list_like(decoded_auth_response) and len(decoded_auth_response) == 3
assert decoded_auth_response[0] == int_to_big_endian(AUTH_RESPONSE_VERSION)
assert decoded_auth_response[1] == id_nonce_signature
assert ENR.deserialize(decoded_auth_response[2]) == enr
decrypted_message = aesgcm_decrypt(
key=initiator_key,
nonce=auth_tag,
cipher_text=packet.encrypted_message,
authenticated_data=tag,
)
assert decrypted_message[0] == message.message_type
assert rlp.decode(decrypted_message[1:], PingMessage) == message
@given(tag=tag_st, auth_tag=nonce_st, random_data=random_data_st)
def test_random_packet_preparation(tag, auth_tag, random_data):
packet = AuthTagPacket.prepare_random(
tag=tag, auth_tag=auth_tag, random_data=random_data
)
assert packet.tag == tag
assert packet.auth_tag == auth_tag
assert packet.encrypted_message == random_data
@given(
tag=tag_st,
auth_tag=nonce_st,
id_nonce=id_nonce_st,
initiator_key=key_st,
auth_response_key=key_st,
ephemeral_public_key=public_key_st,
)
def test_auth_header_preparation_without_enr(
tag, auth_tag, id_nonce, initiator_key, auth_response_key, ephemeral_public_key
):
message = PingMessage(request_id=5, enr_seq=1)
id_nonce_signature = b"\x00" * 32
packet = AuthHeaderPacket.prepare(
tag=tag,
auth_tag=auth_tag,
id_nonce=id_nonce,
message=message,
initiator_key=initiator_key,
id_nonce_signature=id_nonce_signature,
auth_response_key=auth_response_key,
enr=None,
ephemeral_public_key=ephemeral_public_key,
)
decrypted_auth_response = aesgcm_decrypt(
key=auth_response_key,
nonce=ZERO_NONCE,
cipher_text=packet.auth_header.encrypted_auth_response,
authenticated_data=b"",
)
decoded_auth_response = rlp.decode(decrypted_auth_response)
assert is_list_like(decoded_auth_response) and len(decoded_auth_response) == 3
assert decoded_auth_response[0] == int_to_big_endian(AUTH_RESPONSE_VERSION)
assert decoded_auth_response[1] == id_nonce_signature
assert decoded_auth_response[2] == []
@given(node_id=node_id_st, token=nonce_st, id_nonce=id_nonce_st, enr_seq=enr_seq_st)
def test_who_are_you_preparation(node_id, token, id_nonce, enr_seq):
packet = WhoAreYouPacket.prepare(
destination_node_id=node_id,
token=token,
id_nonce=id_nonce,
enr_sequence_number=enr_seq,
)
assert packet.token == token
assert packet.id_nonce == id_nonce
assert packet.enr_sequence_number == enr_seq
assert len(packet.magic) == MAGIC_SIZE
@given(tag=tag_st, auth_tag=nonce_st, key=key_st)
def test_auth_tag_packet_preparation(tag, auth_tag, key):
message = PingMessage(request_id=5, enr_seq=3)
packet = AuthTagPacket.prepare(tag=tag, auth_tag=auth_tag, message=message, key=key)
assert packet.tag == tag
assert packet.auth_tag == auth_tag
decrypted_message = aesgcm_decrypt(
key=key,
nonce=auth_tag,
cipher_text=packet.encrypted_message,
authenticated_data=tag,
)
assert decrypted_message[0] == message.message_type
assert rlp.decode(decrypted_message[1:], PingMessage) == message
@pytest.mark.parametrize(
[
"id_nonce",
"secret_key",
"enr",
"auth_response_key",
"ephemeral_public_key",
"auth_cipher_text",
],
[
[
decode_hex(
"0xe551b1c44264ab92bc0b3c9b26293e1ba4fed9128f3c3645301e8e119f179c65"
),
decode_hex(
"0x7e8107fe766b6d357205280acf65c24275129ca9e44c0fd00144ca50024a1ce7"
),
None,
decode_hex("0x8c7caa563cebc5c06bb15fc1a2d426c3"),
decode_hex(
"0xb35608c01ee67edff2cffa424b219940a81cf2fb9b66068b1cf96862a17d353e22524fbdcdebc609"
"f85cbd58ebe7a872b01e24a3829b97dd5875e8ffbc4eea81"
),
decode_hex(
"0x570fbf23885c674867ab00320294a41732891457969a0f14d11c995668858b2ad731aa7836888020"
"e2ccc6e0e5776d0d4bc4439161798565a4159aa8620992fb51dcb275c4f755c8b8030c82918898f1ac"
"387f606852"
),
]
],
)
def test_official_auth_response_encryption(
secret_key, id_nonce, enr, auth_response_key, ephemeral_public_key, auth_cipher_text
):
id_nonce_signature = V4HandshakeScheme.create_id_nonce_signature(
id_nonce=id_nonce,
private_key=secret_key,
ephemeral_public_key=ephemeral_public_key,
)
assert (
compute_encrypted_auth_response(
auth_response_key=auth_response_key,
id_nonce_signature=id_nonce_signature,
enr=enr,
)
== auth_cipher_text
)
@pytest.mark.parametrize(
[
"auth_tag",
"id_nonce",
"ephemeral_public_key",
"auth_cipher_text",
"auth_header_rlp",
],
[
[
decode_hex("0x27b5af763c446acd2749fe8e"),
decode_hex(
"0xe551b1c44264ab92bc0b3c9b26293e1ba4fed9128f3c3645301e8e119f179c65"
),
decode_hex(
"0xb35608c01ee67edff2cffa424b219940a81cf2fb9b66068b1cf96862a17d353e22524fbdcdebc609"
"f85cbd58ebe7a872b01e24a3829b97dd5875e8ffbc4eea81"
),
decode_hex(
"0x570fbf23885c674867ab00320294a41732891457969a0f14d11c995668858b2ad731aa7836888020"
"e2ccc6e0e5776d0d4bc4439161798565a4159aa8620992fb51dcb275c4f755c8b8030c82918898f1ac"
"387f606852"
),
decode_hex(
"0xf8cc8c27b5af763c446acd2749fe8ea0e551b1c44264ab92bc0b3c9b26293e1ba4fed9128f3c3645"
"301e8e119f179c658367636db840b35608c01ee67edff2cffa424b219940a81cf2fb9b66068b1cf968"
"62a17d353e22524fbdcdebc609f85cbd58ebe7a872b01e24a3829b97dd5875e8ffbc4eea81b856570f"
"bf23885c674867ab00320294a41732891457969a0f14d11c995668858b2ad731aa7836888020e2ccc6"
"e0e5776d0d4bc4439161798565a4159aa8620992fb51dcb275c4f755c8b8030c82918898f1ac387f60"
"6852"
),
]
],
)
def test_official_auth_header_encoding(
auth_tag, id_nonce, ephemeral_public_key, auth_cipher_text, auth_header_rlp
):
header = AuthHeader(
auth_tag=auth_tag,
id_nonce=id_nonce,
auth_scheme_name=AUTH_SCHEME_NAME,
ephemeral_public_key=ephemeral_public_key,
encrypted_auth_response=auth_cipher_text,
)
assert rlp.encode(header) == auth_header_rlp
@pytest.mark.parametrize(
[
"tag",
"auth_tag",
"id_nonce",
"encoded_message",
"local_private_key",
"auth_response_key",
"encryption_key",
"ephemeral_public_key",
"auth_message_rlp",
],
[
[
decode_hex(
"0x93a7400fa0d6a694ebc24d5cf570f65d04215b6ac00757875e3f3a5f42107903"
),
decode_hex("0x27b5af763c446acd2749fe8e"),
decode_hex(
"0xe551b1c44264ab92bc0b3c9b26293e1ba4fed9128f3c3645301e8e119f179c65"
),
decode_hex("0x01c20101"),
decode_hex(
"0x7e8107fe766b6d357205280acf65c24275129ca9e44c0fd00144ca50024a1ce7"
),
decode_hex("0x8c7caa563cebc5c06bb15fc1a2d426c3"),
decode_hex("0x9f2d77db7004bf8a1a85107ac686990b"),
decode_hex(
"0xb35608c01ee67edff2cffa424b219940a81cf2fb9b66068b1cf96862a17d353e22524fbdcdebc609"
"f85cbd58ebe7a872b01e24a3829b97dd5875e8ffbc4eea81"
),
decode_hex(
"0x93a7400fa0d6a694ebc24d5cf570f65d04215b6ac00757875e3f3a5f42107903f8cc8c27b5af763c"
"446acd2749fe8ea0e551b1c44264ab92bc0b3c9b26293e1ba4fed9128f3c3645301e8e119f179c6583"
"67636db840b35608c01ee67edff2cffa424b219940a81cf2fb9b66068b1cf96862a17d353e22524fbd"
"cdebc609f85cbd58ebe7a872b01e24a3829b97dd5875e8ffbc4eea81b856570fbf23885c674867ab00"
"320294a41732891457969a0f14d11c995668858b2ad731aa7836888020e2ccc6e0e5776d0d4bc44391"
"61798565a4159aa8620992fb51dcb275c4f755c8b8030c82918898f1ac387f606852a5d12a2d94b8cc"
"b3ba55558229867dc13bfa3648"
),
]
],
)
def test_official_auth_header_packet_preparation(
tag,
auth_tag,
id_nonce,
encoded_message,
local_private_key,
auth_response_key,
encryption_key,
ephemeral_public_key,
auth_message_rlp,
):
message_type_id = encoded_message[0]
message_type = v5_registry[message_type_id]
message = rlp.decode(encoded_message[1:], message_type)
assert message.to_bytes() == encoded_message
id_nonce_signature = V4HandshakeScheme.create_id_nonce_signature(
id_nonce=id_nonce,
ephemeral_public_key=ephemeral_public_key,
private_key=local_private_key,
)
packet = AuthHeaderPacket.prepare(
tag=tag,
auth_tag=auth_tag,
id_nonce=id_nonce,
message=message,
initiator_key=encryption_key,
id_nonce_signature=id_nonce_signature,
auth_response_key=auth_response_key,
enr=None,
ephemeral_public_key=ephemeral_public_key,
)
packet_wire_bytes = packet.to_wire_bytes()
assert packet_wire_bytes == auth_message_rlp
|
[
"[email protected]"
] | |
ca87ff0a336058adb8d8daa3ec98166ca00c8011
|
40fc10c31449faca8a3235d34b422c322db8af6e
|
/src/pyasl/asl/binning.py
|
54c5604387cecbbd9b56c5d52f171313d02642cf
|
[
"MIT"
] |
permissive
|
voneiden/PyAstronomy
|
93d27e9d82bd54ecaf55b8fcbcba5ecbe7021997
|
d7e92fb84aad0995bc9c3e38538f44d1d9fa6200
|
refs/heads/master
| 2021-01-16T20:52:04.139625 | 2013-07-04T14:00:12 | 2013-07-04T14:00:12 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 5,488 |
py
|
# -*- coding: utf-8 -*-
from PyAstronomy.pyaC import pyaErrors as PE
import numpy as np
def binningx0dt(x, y, yerr=None, x0=None, dt=None, nbins=None, reduceBy=None, removeEmpty=True, \
removeNoError=False, useBinCenter=True):
"""
A simple binning algorithm.
This algorithm uses a fixed bin-width to produce a binned
data set. Either the bin-width, `dt`, or the number of bins,
`nbins`, must be specified. The number of output bins may
also depend on other flags such as, for example, `removeNoError`.
If no errors are specified via `yerr`, the errors for the binned
data are estimated as the standard deviation of the input data
points divided by the square root of their number. If `yerr` has
been specified, error propagation is used to determine the error.
The behavior of the time axis can be controlled via the
`useBinCenter` flag.
Values which cannot be determined will be indicated by NaN.
Various flags can be used to remove such bins from the binned
data set.
Parameters
----------
x, y : array
The x and y data values.
yerr : array, optional
Errors on the data values.
x0 : float, optional
Starting time of first bin.
Default is lowest given x value.
dt : float, optional
Width of a bin (either `dt`, `nbins` or `reduceBy` must
be given).
nbins : int, optional
Number of bins to use (either `dt`, `nbins` or `reduceBy` must
be given). Note that this specifies the number of bins into which
the range from `x0` to the last data point is subdivided.
reduceBy : int, optional
Reduce the number of elements in the array by the given factor
(either `dt`, `nbins` or `reduceBy` must be given). Note that
in this case, `x0` is set to the first (minimum x-value) and
the number of bins, n, is calculated according to the
prescription: :math:`n = int(round(len(x)/reduceBy))`
removeEmpty : boolean, optional
If True (default), bins with no data points will be
removed from the result.
removeNoError : boolean, optional
If True, bins for which no error can be determined
will be removed from the result. Default is False.
useBinCenter : boolean, optional
If True (default), the time axis will refer to the
center of the bins. Otherwise the numbers refer to
the start of the bins.
Returns
-------
Binned LC : array
An array with four columns: 1) The binned time axis,
2) The binned data, 3) Error of binned data, 4) The
number of input data points used to create the bin, e.g.
the new x-values are LC[::,0].
dt : float
The width of the bins.
"""
if ((not dt is None) + (not nbins is None) + (not reduceBy is None)) != 1:
raise(PE.PyAParameterConflict("Specify one of `dt`, `nbins`, or `reduceBy`."))
if ((not x0 is None) + (not reduceBy is None)) != 1:
raise(PE.PyAParameterConflict("Specify either `x0` or `reduceBy`."))
if x0 is None:
# Use first time as starting point
x0 = np.min(x)
if x0 > np.max(x):
raise(PE.PyAValError("The starting point, `x0`, is larger than the end time of the data.", \
solution="Use a smaller value."))
# Calculate the new number of array elements.
if reduceBy is not None:
nbins = int(round(len(x)/float(reduceBy)))
if nbins == 0: nbins=1 # Prevent empty return arrays
if nbins is not None:
# Use a specified number of bins.
# Calculate bin length
dt = (np.max(x) - x0)/float(nbins)
# Start calculation
# In which bin do the individual data points belong?
inWhichBin = np.floor(((x-x0)/dt))
# Lonely last bin correction
# Brings the last data point into the last valid bin
# instead of creating a new bin with that data point\
# at its very beginning
if nbins is not None:
inWhichBin[np.where(inWhichBin == nbins)[0]] -= 1
# Get the number of bins (start at x0 even if the
# first bins do not contain any data points)
nbins = np.max(inWhichBin) + 1
# Bins with data
bwd = np.unique(inWhichBin)
# Sort data into the bins
# Create output array (time, flux, error, data-point-counter)
result = np.empty( (nbins, 4) )
result[:] = np.NAN
# Assign time axis (beginning of bins)
result[::,0] = x0 + np.arange(nbins) * dt
if useBinCenter:
# Use the center of the bin for timing
result[::,0] += (0.5 * dt)
# Set data point counter (points/bin) to zero
result[::,3] = 0
for b in bwd:
indi = np.where(inWhichBin == b)[0]
result[b, 3] = len(indi)
result[b, 1] = np.mean(y[indi])
if yerr is None:
# No errors on data points are given
if len(indi) > 1:
result[b, 2] = np.std(y[indi]) / np.sqrt(result[b, 3])
else:
# No error if there is only a single point in the bin
result[b, 2] = np.NAN
else:
# There are errors on the data points
# Use error propagation
result[b, 2] = np.sqrt(np.sum(yerr[indi]**2)) / result[b, 3]
if removeEmpty:
# Remove bins without data points in it
indi = np.where(np.invert(np.isnan(result[::,1])))[0]
result = result[indi,::]
if removeNoError:
# Remove bins for which no error can be given
indi = np.where(np.invert(np.isnan(result[::,2])))[0]
result = result[indi,::]
return result, dt
|
[
"[email protected]"
] | |
665e2dbb2e727b9f06db39c73e7338cd30f993c1
|
c0046aa23b6260b1fce3dc8932b62c638fe6aa46
|
/HcalTasks/python/HcalNoiseTask.py
|
99a28865cf5c4433de438d8508c875eb02ac42e4
|
[] |
no_license
|
HCALDQM/DQM
|
8c9f08fe89b844054069aba2ca893a831356b0ef
|
fa36a791f0527d0e3e96bd70c0776697a630b67c
|
refs/heads/master
| 2020-04-06T07:05:20.336782 | 2015-10-14T07:46:58 | 2015-10-14T07:46:58 | 41,037,040 | 0 | 5 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,900 |
py
|
import FWCore.ParameterSet.Config as cms
import DQM.HcalCommon.HcalDQStandard as standard
StandardSet = standard.StandardSet.clone()
# List of FEDs
lFEDs = [x+700 for x in range(32)] + [929, 1118, 1120, 1122]
moduleName = "HcalNoiseTask"
# Modify whatever is in StandardSet importing
StandardSet.moduleParameters.name = cms.untracked.string(moduleName)
StandardSet.EventsProcessed.path = cms.untracked.string(
"%s/" % moduleName)
StandardSet.EventsProcessedPerLS.path = cms.untracked.string(
"%s/" % moduleName)
StandardSet.Standard2DMap.path = cms.untracked.string(
"%s/" % moduleName)
StandardSet.Standard2DMap.desc = cms.untracked.string(
"Some Noise Task 2D Map")
# Main Task Description
hcalNoiseTask = cms.EDAnalyzer(
moduleName,
moduleParameters = StandardSet.moduleParameters,
MEs = cms.untracked.PSet(
EventsProcessed = StandardSet.EventsProcessed,
EventsProcessedPerLS = StandardSet.EventsProcessedPerLS,
HENoiseShape = cms.untracked.PSet(
path = cms.untracked.string("%s/HE" % moduleName),
kind = cms.untracked.string("TH1D"),
desc = cms.untracked.string("HE Noise Shape"),
xaxis = cms.untracked.PSet(
edges = cms.untracked.bool(False),
nbins = cms.untracked.int32(10),
min = cms.untracked.double(0.),
max = cms.untracked.double(10.),
title = cms.untracked.string("TS")
)
),
HFNoiseShape = cms.untracked.PSet(
path = cms.untracked.string("%s/HF" % moduleName),
kind = cms.untracked.string("TH1D"),
desc = cms.untracked.string("HF Noise Shape"),
xaxis = cms.untracked.PSet(
edges = cms.untracked.bool(False),
nbins = cms.untracked.int32(10),
min = cms.untracked.double(0.),
max = cms.untracked.double(10.),
title = cms.untracked.string("TS")
)
),
HONoiseShape = cms.untracked.PSet(
path = cms.untracked.string("%s/HO" % moduleName),
kind = cms.untracked.string("TH1D"),
desc = cms.untracked.string("HO Noise Shape"),
xaxis = cms.untracked.PSet(
edges = cms.untracked.bool(False),
nbins = cms.untracked.int32(10),
min = cms.untracked.double(0.),
max = cms.untracked.double(10.),
title = cms.untracked.string("TS")
)
),
NoiseSizeCheck = StandardSet.Standard2DMap
# me4 = cms.untracked.PSet(
# path = cms.untracked.string("%s/" % moduleName),
# kind = cms.untracked.string("PROF"),
# desc = cms.untracked.string("Example ME4"),
# xaxis = cms.untracked.PSet(
# edges = cms.untracked.bool(False),
# nbins = cms.untracked.int32(200),
# min = cms.untracked.double(-100),
# max = cms.untracked.double(100),
# title = cms.untracked.string("me4-X")
# ),
# yaxis = cms.untracked.PSet(
# wnbins = cms.untracked.bool(True),
# nbins = cms.untracked.int32(100),
# min = cms.untracked.double(-50),
# max = cms.untracked.double(50),
# title = cms.untracked.string("me4-Y")
# )
# )
)
)
|
[
"[email protected]"
] | |
7aeb14ec178c1278a0ad73d90f369a0ce020c9c0
|
c46cbaefaf2ddce20f4d69d79dc8ad786c71ca9b
|
/src/keystore/javacard/applets/securechannel.py
|
082793c09dbd69499c1bf5a407261c4cbf17bb6c
|
[
"MIT",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
xavierfiechter/specter-diy
|
eeca22ca1984051edeef5abd1863d8b6b25f22e1
|
2c7b6244aca54eca19c60fad9d44f4884ba0ad62
|
refs/heads/master
| 2023-03-29T23:54:31.340395 | 2021-03-30T12:52:54 | 2021-03-30T12:52:54 | 343,566,546 | 0 | 0 |
MIT
| 2021-03-30T12:40:30 | 2021-03-01T21:48:30 |
Python
|
UTF-8
|
Python
| false | false | 7,364 |
py
|
from ..util import encode
import secp256k1
import hashlib, hmac
from io import BytesIO
from rng import get_random_bytes
from ucryptolib import aes
from binascii import hexlify
AES_BLOCK = 16
IV_SIZE = 16
MAC_SIZE = 14
AES_CBC = 2
class SecureError(Exception):
"""
Raised when something went wrong with the
secure channel (i.e. signature is invalid etc)
"""
pass
class SecureChannel:
"""
Class that implements secure communication with the card.
"""
GET_PUBKEY = b"\xB0\xB2\x00\x00"
OPEN_EE = b"\xB0\xB5\x00\x00"
OPEN_SE = b"\xB0\xB4\x00\x00"
SECURE_MSG = b"\xB0\xB6\x00\x00"
CLOSE = b"\xB0\xB7\x00\x00"
SUCCESS = b"\x90\x00"
def __init__(self, applet):
"""Pass Card or Simulator instance here"""
self.applet = applet
self.iv = 0
self.card_pubkey = None
self.card_aes_key = None
self.host_aes_key = None
self.card_mac_key = None
self.host_mac_key = None
self.mode = "es"
self.is_open = False
def get_card_pubkey(self):
"""Returns static public key of the card.
This key doesn't change unless applet is reinstalled.
"""
sec = self.applet.request(self.GET_PUBKEY)
self.card_pubkey = secp256k1.ec_pubkey_parse(sec)
return self.card_pubkey
def derive_keys(self, shared_secret):
"""Derives keys necessary for encryption and authentication"""
self.host_aes_key = hashlib.sha256(b"host_aes" + shared_secret).digest()
self.card_aes_key = hashlib.sha256(b"card_aes" + shared_secret).digest()
self.host_mac_key = hashlib.sha256(b"host_mac" + shared_secret).digest()
self.card_mac_key = hashlib.sha256(b"card_mac" + shared_secret).digest()
return hashlib.sha256(shared_secret).digest()[:4]
def open(self, mode=None):
"""Opens a secure channel.
Mode can be "es" - ephemeral-static
or "ee" - ephemeral-ephemenral
"""
# save mode for later - i.e. reestablish secure channel
if mode is None:
mode = self.mode
else:
self.mode = mode
# check if we know pubkey already
if self.card_pubkey is None:
self.get_card_pubkey()
# generate ephimerial key
secret = get_random_bytes(32)
host_prv = secret
host_pub = secp256k1.ec_pubkey_create(secret)
# ee mode - ask card to create ephimerial key and send it to us
if mode == "ee":
data = secp256k1.ec_pubkey_serialize(host_pub, secp256k1.EC_UNCOMPRESSED)
# get ephimerial pubkey from the card
res = self.applet.request(self.OPEN_EE + encode(data))
s = BytesIO(res)
data = s.read(65)
pub = secp256k1.ec_pubkey_parse(data)
secp256k1.ec_pubkey_tweak_mul(pub, secret)
shared_secret = hashlib.sha256(
secp256k1.ec_pubkey_serialize(pub)[1:33]
).digest()
shared_fingerprint = self.derive_keys(shared_secret)
recv_hmac = s.read(MAC_SIZE)
h = hmac.new(self.card_mac_key, digestmod="sha256")
h.update(data)
expected_hmac = h.digest()[:MAC_SIZE]
if expected_hmac != recv_hmac:
raise SecureChannelError("Wrong HMAC.")
data += recv_hmac
raw_sig = s.read()
sig = secp256k1.ecdsa_signature_parse_der(raw_sig)
# in case card doesn't follow low s rule (but it should)
sig = secp256k1.ecdsa_signature_normalize(sig)
if not secp256k1.ecdsa_verify(
sig, hashlib.sha256(data).digest(), self.card_pubkey
):
raise SecureChannelError("Signature is invalid.")
# se mode - use our ephimerial key with card's static key
else:
data = secp256k1.ec_pubkey_serialize(host_pub, secp256k1.EC_UNCOMPRESSED)
# ugly copy
pub = secp256k1.ec_pubkey_parse(
secp256k1.ec_pubkey_serialize(self.card_pubkey)
)
secp256k1.ec_pubkey_tweak_mul(pub, secret)
shared_secret = secp256k1.ec_pubkey_serialize(pub)[1:33]
res = self.applet.request(self.OPEN_SE + encode(data))
s = BytesIO(res)
nonce_card = s.read(32)
recv_hmac = s.read(MAC_SIZE)
secret_with_nonces = hashlib.sha256(shared_secret + nonce_card).digest()
shared_fingerprint = self.derive_keys(secret_with_nonces)
data = nonce_card
h = hmac.new(self.card_mac_key, digestmod="sha256")
h.update(data)
expected_hmac = h.digest()[:MAC_SIZE]
if expected_hmac != recv_hmac:
raise SecureChannelError("Wrong HMAC.")
data += recv_hmac
sig = secp256k1.ecdsa_signature_parse_der(s.read())
# in case card doesn't follow low s rule (but it should)
sig = secp256k1.ecdsa_signature_normalize(sig)
if not secp256k1.ecdsa_verify(
sig, hashlib.sha256(data).digest(), self.card_pubkey
):
raise SecureChannelError("Signature is invalid")
# reset iv
self.iv = 0
self.is_open = True
def encrypt(self, data):
"""Encrypts the message for transmission"""
# add padding
d = data + b"\x80"
if len(d) % AES_BLOCK != 0:
d += b"\x00" * (AES_BLOCK - (len(d) % AES_BLOCK))
iv = self.iv.to_bytes(IV_SIZE, "big")
crypto = aes(self.host_aes_key, AES_CBC, iv)
ct = crypto.encrypt(d)
h = hmac.new(self.host_mac_key, digestmod="sha256")
h.update(iv)
h.update(ct)
ct += h.digest()[:MAC_SIZE]
return ct
def decrypt(self, ct):
"""Decrypts the message received from the card"""
recv_hmac = ct[-MAC_SIZE:]
ct = ct[:-MAC_SIZE]
iv = self.iv.to_bytes(IV_SIZE, "big")
h = hmac.new(self.card_mac_key, digestmod="sha256")
h.update(iv)
h.update(ct)
expected_hmac = h.digest()[:MAC_SIZE]
if expected_hmac != recv_hmac:
raise SecureChannelError("Wrong HMAC.")
crypto = aes(self.card_aes_key, AES_CBC, iv)
plain = crypto.decrypt(ct)
# check and remove \x80... padding
arr = plain.split(b"\x80")
if len(arr) == 1 or len(arr[-1].replace(b"\x00", b"")) > 0:
raise SecureChannelError("Wrong padding")
return b"\x80".join(arr[:-1])
def request(self, data):
"""Sends a secure request to the card
and returns decrypted result.
Raises a SecureError if errorcode returned from the card.
"""
# if counter reached maximum - reestablish channel
if self.iv >= 2 ** 16 or not self.is_open:
self.open()
ct = self.encrypt(data)
res = self.applet.request(self.SECURE_MSG + encode(ct))
plaintext = self.decrypt(res)
self.iv += 1
if plaintext[:2] == self.SUCCESS:
return plaintext[2:]
else:
raise SecureError(hexlify(plaintext[:2]).decode())
def close(self):
"""Closes the secure channel"""
self.applet.request(self.CLOSE)
self.is_open = False
|
[
"[email protected]"
] | |
0933399c858eea92a5d926f9873e527890ee7eee
|
471a036309c05b59243033f2480e27e19268ec55
|
/src/london/london/apps/notifications/engines/session.py
|
9840893b6a787cb3beb43026428db35a08c4bbf5
|
[
"BSD-2-Clause"
] |
permissive
|
avelino/votacao_paredao_bbb
|
1bbf33b9ec00f033db5b1d558190135315d50b03
|
875ac157b207fee80be6841f9b17c41b7069e15d
|
refs/heads/master
| 2021-01-20T12:17:48.362512 | 2012-07-13T05:41:44 | 2012-07-13T05:41:44 | 4,928,781 | 0 | 0 | null | 2020-07-27T11:05:32 | 2012-07-06T17:51:03 |
Python
|
UTF-8
|
Python
| false | false | 892 |
py
|
import logging
from london.apps.notifications.engines.base import BasePool
from london.apps.notifications.app_settings import SESSION_KEY
class SessionPool(BasePool):
def add_message_pool(self, request, message, level=logging.NOTSET):
request.session[SESSION_KEY] = request.session[SESSION_KEY] or []
request.session[SESSION_KEY].append(self.make_message_dict(request, message, level))
request.session.modified = True
def get_messages(self, request):
return [msg for msg in (request.session[SESSION_KEY] or [])]
def delete_message(self, request, message):
request.session.setdefault(SESSION_KEY, [])
msg_id = message['id']
for msg in request.session[SESSION_KEY]:
if msg['id'] == msg_id:
request.session[SESSION_KEY].remove(msg)
break
request.session.modified = True
|
[
"[email protected]"
] | |
4098559e3d5ca21f8b0ace8663ad6833d5192dc3
|
482e28dccb663459e50ac601e8cc376f2441b0f7
|
/src/ch05/rtda/LocalVars.py
|
aa2c43c0799c56ab03c9df750cc8fef69b0720b8
|
[] |
no_license
|
wlj5240/JVMByPython
|
1487d4f4b8367e9e31d71b60a9d06ff4996ad1b7
|
53502f10f4f19741646d871c46014e023ccad4a5
|
refs/heads/master
| 2022-10-05T19:55:50.381307 | 2019-10-20T05:03:38 | 2019-10-20T05:03:38 | 270,003,703 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 825 |
py
|
#!/usr/bin/env python
# encoding: utf-8
"""
@author: HuRuiFeng
@file: LocalVars.py
@time: 2019/9/15 16:22
@desc: 局部变量表,用于python的列表能存储任何数据类型,所以将基本数据类型和引用类型都用一个Slot表示。
"""
from rtda.Slot import Slot
class LocalVars:
def __init__(self, max_locals):
self.slots = []
if max_locals > 0:
self.slots = [Slot() for _ in range(max_locals)]
def set_numeric(self, index, val):
self.slots[index].num = val
def get_numeric(self, index):
return self.slots[index].num
def set_ref(self, index, ref):
self.slots[index].ref = ref
def get_ref(self, index):
return self.slots[index].ref
def __str__(self):
return "slots:{0}".format([str(t) for t in self.slots])
|
[
"[email protected]"
] | |
3cd3a0490007878e66ee2502a82d81cb04e6a0e3
|
18219d0fc95936ded56fe44f9a65ecb27f015232
|
/148 dictionary items.py
|
a63a3687830fdc6ac0f0f71e6705986048d579d8
|
[] |
no_license
|
JDavid121/Script-Curso-Cisco-Python
|
20a61b91b09376dcaef54f8ae5f86fe252de5c33
|
6d68c17ff3c3826e9fc609d110ce9d0e6ebf718b
|
refs/heads/master
| 2021-05-18T04:54:59.948970 | 2020-03-29T20:19:53 | 2020-03-29T20:19:53 | 251,120,139 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 578 |
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 26 09:58:15 2020
DICTIONARY ITEMS
@author: David
"""
dict3 = {1:"beta",2:"gamma",4:"iota",3:"theta",9:"zeta",5:"eta"}
# items() method
# This method returns a "class" of tuples.
# Each tuple is a key,value pair
print(dict3.items())
print("**********************")
for key,value in dict3.items():
print(key,"->",value)
print("**********************")
for key,value in dict3.items():
print(value,"->",key)
print("***********************")
A = dict3.items()
print(A)
print(len(A))
print(type(A))
|
[
"[email protected]"
] | |
c418f5064c035680c4a33f021984341aae2d73ca
|
42051d7c4dd80bb827f10905a6c89b7187448697
|
/cfxdb/gen/KeyValue.py
|
2067857559a4defa3aa6a5407a5f4eb3fc7d0ba7
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
om26er/cfxdb
|
1f1d195e5e5d37be08d05364518fb6d2cf13f4e4
|
ade48d664a0b4fb99d02836dc77e8e8a43076a07
|
refs/heads/master
| 2021-04-05T03:30:19.212037 | 2020-04-02T20:05:17 | 2020-04-02T20:05:17 | 248,516,500 | 0 | 0 |
NOASSERTION
| 2020-03-19T13:55:56 | 2020-03-19T13:55:56 | null |
UTF-8
|
Python
| false | false | 1,436 |
py
|
# automatically generated by the FlatBuffers compiler, do not modify
# namespace:
import flatbuffers
from flatbuffers.compat import import_numpy
np = import_numpy()
# A key-value pair with string keys & values.
class KeyValue(object):
__slots__ = ['_tab']
@classmethod
def GetRootAsKeyValue(cls, buf, offset):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = KeyValue()
x.Init(buf, n + offset)
return x
# KeyValue
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# UTF8 encoded key of KV pair.
# KeyValue
def Key(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
# UTF8 encoded value of KV pair.
# KeyValue
def Value(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
def KeyValueStart(builder): builder.StartObject(2)
def KeyValueAddKey(builder, key): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(key), 0)
def KeyValueAddValue(builder, value): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(value), 0)
def KeyValueEnd(builder): return builder.EndObject()
|
[
"[email protected]"
] | |
1df769cdfcc8ebd7c113abeab2ffd5ce6ad89079
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03210/s633395264.py
|
eedd235729f86d455915903bde9846883eaf4b10
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 81 |
py
|
X = int(input().rstrip())
if X in [3,5,7]:
print('YES')
else:
print('NO')
|
[
"[email protected]"
] | |
b334b94929f6f34e58add9421156267e499fec17
|
334172bb2d17dd0ab0506e054fea207383c50042
|
/condonsax/hiddenalbum/models.py
|
bb7d66dfb30198dc11f2a27a742526de775edb78
|
[] |
no_license
|
JayWelborn/condonsax
|
1336dd1258a4b348d145a8e1603abf73c7c20947
|
6689db0b5c35420286587531b2e6eaac5f3c3bb7
|
refs/heads/master
| 2021-01-19T22:41:04.818558 | 2017-05-19T18:52:27 | 2017-05-19T18:52:27 | 88,843,128 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 442 |
py
|
from django.db import models
from albums.models import Album
class HiddenAlbum(models.Model):
album = models.ForeignKey(Album)
slug = models.SlugField('URL slug', max_length=100, db_index=True)
def get_absolute_url(self):
"""
Defines primary key and slug as components of url
"""
args = (self.slug)
return reverse(self, args)
def __str__(self):
return self.album.title
|
[
"[email protected]"
] | |
006d8aa3c250bfd18c9ae5ac8b4bea0fc073de5f
|
b15d2787a1eeb56dfa700480364337216d2b1eb9
|
/samples/cli/accelbyte_py_sdk_cli/ugc/_admin_get_tag.py
|
f1d4a69d7577f3d4b4c8875fb982147bb1483e49
|
[
"MIT"
] |
permissive
|
AccelByte/accelbyte-python-sdk
|
dedf3b8a592beef5fcf86b4245678ee3277f953d
|
539c617c7e6938892fa49f95585b2a45c97a59e0
|
refs/heads/main
| 2023-08-24T14:38:04.370340 | 2023-08-22T01:08:03 | 2023-08-22T01:08:03 | 410,735,805 | 2 | 1 |
MIT
| 2022-08-02T03:54:11 | 2021-09-27T04:00:10 |
Python
|
UTF-8
|
Python
| false | false | 2,329 |
py
|
# Copyright (c) 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
#
# Code generated. DO NOT EDIT!
# template_file: python-cli-command.j2
# AGS Ugc Service (2.11.3)
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
import json
import yaml
from typing import Optional
import click
from .._utils import login_as as login_as_internal
from .._utils import to_dict
from accelbyte_py_sdk.api.ugc import admin_get_tag as admin_get_tag_internal
from accelbyte_py_sdk.api.ugc.models import ModelsPaginatedGetTagResponse
from accelbyte_py_sdk.api.ugc.models import ResponseError
@click.command()
@click.option("--limit", "limit", type=int)
@click.option("--offset", "offset", type=int)
@click.option("--namespace", type=str)
@click.option("--login_as", type=click.Choice(["client", "user"], case_sensitive=False))
@click.option("--login_with_auth", type=str)
@click.option("--doc", type=bool)
def admin_get_tag(
limit: Optional[int] = None,
offset: Optional[int] = None,
namespace: Optional[str] = None,
login_as: Optional[str] = None,
login_with_auth: Optional[str] = None,
doc: Optional[bool] = None,
):
if doc:
click.echo(admin_get_tag_internal.__doc__)
return
x_additional_headers = None
if login_with_auth:
x_additional_headers = {"Authorization": login_with_auth}
else:
login_as_internal(login_as)
result, error = admin_get_tag_internal(
limit=limit,
offset=offset,
namespace=namespace,
x_additional_headers=x_additional_headers,
)
if error:
raise Exception(f"AdminGetTag failed: {str(error)}")
click.echo(yaml.safe_dump(to_dict(result), sort_keys=False))
admin_get_tag.operation_id = "AdminGetTag"
admin_get_tag.is_deprecated = False
|
[
"[email protected]"
] | |
c2519ef6d7e3ea9351129a217f76116d3a604cf9
|
fba81be05c6665690ab7bd35d1fb7c2bdaf72fce
|
/backend/standard_casualty_p_26548/urls.py
|
939afdacf8f0c94508920656ffbdcaaee9c65eb2
|
[] |
no_license
|
crowdbotics-apps/standard-casualty-p-26548
|
5ff4e4433b340a47854bdd5b7d0fa52eb71ba127
|
f8de2e6d3c55f7a68457afe84532be4cdb4fbacf
|
refs/heads/master
| 2023-04-29T18:27:50.111889 | 2021-05-11T13:54:12 | 2021-05-11T13:54:12 | 366,399,402 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,302 |
py
|
"""standard_casualty_p_26548 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include, re_path
from django.views.generic.base import TemplateView
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("modules/", include("modules.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "Standard Casualty Portal"
admin.site.site_title = "Standard Casualty Portal Admin Portal"
admin.site.index_title = "Standard Casualty Portal Admin"
# swagger
api_info = openapi.Info(
title="Standard Casualty Portal API",
default_version="v1",
description="API documentation for Standard Casualty Portal App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
urlpatterns += [path("", TemplateView.as_view(template_name='index.html'))]
urlpatterns += [re_path(r"^(?:.*)/?$",
TemplateView.as_view(template_name='index.html'))]
|
[
"[email protected]"
] | |
f82fc162a15b808dfc58d3ae84fd370ab77132b3
|
f4434c85e3814b6347f8f8099c081ed4af5678a5
|
/sdk/communication/azure-communication-administration/azure/communication/administration/_phonenumber/_generated/aio/__init__.py
|
6ec72fc665b13e79f13317f3a017845a99847c9a
|
[
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] |
permissive
|
yunhaoling/azure-sdk-for-python
|
5da12a174a37672ac6ed8e3c1f863cb77010a506
|
c4eb0ca1aadb76ad892114230473034830116362
|
refs/heads/master
| 2022-06-11T01:17:39.636461 | 2020-12-08T17:42:08 | 2020-12-08T17:42:08 | 177,675,796 | 1 | 0 |
MIT
| 2020-03-31T20:35:17 | 2019-03-25T22:43:40 |
Python
|
UTF-8
|
Python
| false | false | 604 |
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from ._phone_number_administration_service_async import PhoneNumberAdministrationService
__all__ = ['PhoneNumberAdministrationService']
|
[
"[email protected]"
] | |
4df438883771aae9c61194134f1d97f85de32a2a
|
5ed63c425f4bcc7a82c91920e58f03d60a8d9b3c
|
/bid/constants.py
|
f0cbc77421dd0be6068ef06a4196e2a1ab0ae806
|
[] |
no_license
|
cesslab/winners_curse_v7
|
33d0f021d5e8fd5dee3aad99438b750ec7b3959f
|
4e096cf97dc027d727aad567c7cc859a3ba125f3
|
refs/heads/master
| 2023-09-05T03:57:02.065977 | 2021-10-13T21:33:47 | 2021-10-13T21:33:47 | 355,645,442 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 285 |
py
|
class Constants:
name_in_url = "bid"
players_per_group = None
PART_NUMBER = 1
PART_ONE = 1
MIN_VALUATION = 0
MAX_VALUATION = 100
NUM_LOTTERIES = 4
ROUNDS_PER_LOTTERY = 10
num_rounds = NUM_LOTTERIES * ROUNDS_PER_LOTTERY
PREFIX = "bid_lottery_"
|
[
"[email protected]"
] | |
b9004f1b93915081d0f1481c6719ff74811084ea
|
be52b968823880679d4d62e51ef0154a6df61d69
|
/Python/sWAP cASE.py
|
2a991761de5c71f47c66de6bfbddb817acf8f067
|
[
"MIT"
] |
permissive
|
MonwarAdeeb/HackerRank-Solutions
|
ba165c9cdb4e9cc2aa4cdc19a6b388ec7e4d6385
|
571327e9688061745000ae81c5fd74ff7a2976d4
|
refs/heads/master
| 2023-07-27T11:45:15.675437 | 2021-09-04T04:50:20 | 2021-09-04T04:50:20 | 289,210,723 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 65 |
py
|
def swap_case(s):
swapped = s.swapcase()
return swapped
|
[
"[email protected]"
] | |
bca28c273b0ec998a9950b63d221fecb922e713b
|
a9e3f3ad54ade49c19973707d2beb49f64490efd
|
/Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/lms/djangoapps/ccx/api/v0/tests/test_views.py
|
489a776e6ef475c522310baa1abbf395dff3722d
|
[
"AGPL-3.0-only",
"AGPL-3.0-or-later",
"MIT"
] |
permissive
|
luque/better-ways-of-thinking-about-software
|
8c3dda94e119f0f96edbfe5ba60ca6ec3f5f625d
|
5809eaca7079a15ee56b0b7fcfea425337046c97
|
refs/heads/master
| 2021-11-24T15:10:09.785252 | 2021-11-22T12:14:34 | 2021-11-22T12:14:34 | 163,850,454 | 3 | 1 |
MIT
| 2021-11-22T12:12:31 | 2019-01-02T14:21:30 |
JavaScript
|
UTF-8
|
Python
| false | false | 49,561 |
py
|
"""
Tests for the CCX REST APIs.
"""
import json
import math
import string
from datetime import timedelta
from unittest import mock
import urllib
import pytest
import ddt
from ccx_keys.locator import CCXLocator
from django.conf import settings
from django.urls import Resolver404, resolve, reverse
from django.utils.timezone import now
from oauth2_provider import models as dot_models
from opaque_keys.edx.keys import CourseKey
from rest_framework import status
from rest_framework.test import APITestCase
from common.djangoapps.student.models import CourseEnrollment
from common.djangoapps.student.roles import CourseCcxCoachRole, CourseInstructorRole, CourseStaffRole
from common.djangoapps.student.tests.factories import AdminFactory, UserFactory
from lms.djangoapps.ccx.api.v0 import views
from lms.djangoapps.ccx.models import CcxFieldOverride, CustomCourseForEdX
from lms.djangoapps.ccx.overrides import override_field_for_ccx
from lms.djangoapps.ccx.tests.utils import CcxTestCase
from lms.djangoapps.ccx.utils import ccx_course as ccx_course_cm
from lms.djangoapps.courseware import courses
from lms.djangoapps.instructor.access import allow_access, list_with_level
from lms.djangoapps.instructor.enrollment import enroll_email, get_email_params
from openedx.core.lib.courses import get_course_by_id
USER_PASSWORD = 'test'
class CcxRestApiTest(CcxTestCase, APITestCase):
"""
Base class with common methods to be used in the test classes of this module
"""
@classmethod
def setUpClass(cls):
super().setUpClass()
def setUp(self):
"""
Set up tests
"""
super().setUp()
# add some info about the course for easy access
self.master_course_key = self.course.location.course_key
self.master_course_key_str = str(self.master_course_key)
# OAUTH2 setup
# create a specific user for the application
self.app_user = app_user = UserFactory(
username='test_app_user',
email='[email protected]',
password=USER_PASSWORD
)
# add staff role to the app user
CourseStaffRole(self.master_course_key).add_users(app_user)
# adding instructor to master course.
instructor = UserFactory()
allow_access(self.course, instructor, 'instructor')
self.auth = self.prepare_auth_token(app_user)
self.course.enable_ccx = True
self.mstore.update_item(self.course, self.coach.id)
# making the master course chapters easily available
self.master_course_chapters = courses.get_course_chapter_ids(self.master_course_key)
def prepare_auth_token(self, user):
"""
creates auth token for users
"""
# create an oauth2 provider client app entry
app_client_oauth2_provider = dot_models.Application.objects.create(
name='test client 2',
user=user,
client_type='confidential',
authorization_grant_type='authorization-code',
redirect_uris='http://localhost:8079/complete/edxorg/'
)
# create an authorization code
auth_oauth2_provider = dot_models.AccessToken.objects.create(
user=user,
application=app_client_oauth2_provider,
expires=now() + timedelta(weeks=1),
scope='read write',
token='16MGyP3OaQYHmpT1lK7Q6MMNAZsjwF'
)
auth_header_oauth2_provider = f"Bearer {auth_oauth2_provider}"
return auth_header_oauth2_provider
def expect_error(self, http_code, error_code_str, resp_obj):
"""
Helper function that checks that the response object
has a body with the provided error
"""
assert resp_obj.status_code == http_code
assert 'error_code' in resp_obj.data
assert resp_obj.data['error_code'] == error_code_str
def expect_error_fields(self, expected_field_errors, resp_obj):
"""
Helper function that checks that the response object
has a body with the provided field errors
"""
assert resp_obj.status_code == status.HTTP_400_BAD_REQUEST
assert 'field_errors' in resp_obj.data
# restructure the error dictionary for a easier comparison
resp_dict_error = {}
for field_name, error_dict in resp_obj.data['field_errors'].items():
resp_dict_error[field_name] = error_dict.get('error_code', '')
assert expected_field_errors == resp_dict_error
@ddt.ddt
class CcxListTest(CcxRestApiTest):
"""
Test for the CCX REST APIs
"""
ENABLED_SIGNALS = ['course_published']
@classmethod
def setUpClass(cls):
super().setUpClass()
def setUp(self):
"""
Set up tests
"""
super().setUp()
self.list_url = reverse('ccx_api:v0:ccx:list')
self.list_url_master_course = urllib.parse.urljoin(
self.list_url,
f'?master_course_id={urllib.parse.quote_plus(self.master_course_key_str)}'
)
def test_authorization(self):
"""
Test that only the right token is authorized
"""
auth_list = [
"Wrong token-type-obviously",
"Bearer wrong token format",
"Bearer wrong-token",
"Bearer",
"Bearer hfbhfbfwq398248fnid939rh3489fh39nd4m34r9" # made up token
]
# all the auths in the list fail to authorize
for auth in auth_list:
resp = self.client.get(self.list_url_master_course, {}, HTTP_AUTHORIZATION=auth)
assert resp.status_code == status.HTTP_401_UNAUTHORIZED
resp = self.client.get(self.list_url_master_course, {}, HTTP_AUTHORIZATION=self.auth)
assert resp.status_code == status.HTTP_200_OK
def test_authorization_no_oauth_staff(self):
"""
Check authorization for staff users logged in without oauth
"""
# create a staff user
staff_user = UserFactory(
username='test_staff_user',
email='[email protected]',
password=USER_PASSWORD
)
# add staff role to the staff user
CourseStaffRole(self.master_course_key).add_users(staff_user)
data = {
'master_course_id': self.master_course_key_str,
'max_students_allowed': 111,
'display_name': 'CCX Test Title',
'coach_email': self.coach.email
}
# the staff user can perform the request
self.client.login(username=staff_user.username, password=USER_PASSWORD)
resp = self.client.get(self.list_url_master_course)
assert resp.status_code == status.HTTP_200_OK
resp = self.client.post(self.list_url, data, format='json')
assert resp.status_code == status.HTTP_201_CREATED
def test_authorization_no_oauth_instructor(self):
"""
Check authorization for instructor users logged in without oauth
"""
# create an instructor user
instructor_user = UserFactory(
username='test_instructor_user', email='[email protected]', password=USER_PASSWORD
)
# add instructor role to the instructor user
CourseInstructorRole(self.master_course_key).add_users(instructor_user)
data = {
'master_course_id': self.master_course_key_str,
'max_students_allowed': 111,
'display_name': 'CCX Test Title',
'coach_email': self.coach.email
}
# the instructor user can perform the request
self.client.login(username=instructor_user.username, password=USER_PASSWORD)
resp = self.client.get(self.list_url_master_course)
assert resp.status_code == status.HTTP_200_OK
resp = self.client.post(self.list_url, data, format='json')
assert resp.status_code == status.HTTP_201_CREATED
def test_authorization_no_oauth(self):
"""
Check authorization for coach users logged in without oauth
"""
# create an coach user
coach_user = UserFactory(
username='test_coach_user', email='[email protected]', password=USER_PASSWORD
)
# add coach role to the coach user
CourseCcxCoachRole(self.master_course_key).add_users(coach_user)
data = {
'master_course_id': self.master_course_key_str,
'max_students_allowed': 111,
'display_name': 'CCX Test Title',
'coach_email': self.coach.email
}
# the coach user cannot perform the request: this type of user can only get her own CCX
self.client.login(username=coach_user.username, password=USER_PASSWORD)
resp = self.client.get(self.list_url_master_course)
assert resp.status_code == status.HTTP_403_FORBIDDEN
resp = self.client.post(self.list_url, data, format='json')
assert resp.status_code == status.HTTP_403_FORBIDDEN
def test_get_list_wrong_master_course(self):
"""
Test for various get requests with wrong master course string
"""
# mock the permission class these cases can be tested
mock_class_str = 'openedx.core.lib.api.permissions.IsMasterCourseStaffInstructor.has_permission'
with mock.patch(mock_class_str, autospec=True) as mocked_perm_class:
mocked_perm_class.return_value = True
# case with no master_course_id provided
resp = self.client.get(self.list_url, {}, HTTP_AUTHORIZATION=self.auth)
self.expect_error(status.HTTP_400_BAD_REQUEST, 'master_course_id_not_provided', resp)
base_url = urllib.parse.urljoin(self.list_url, '?master_course_id=')
# case with empty master_course_id
resp = self.client.get(base_url, {}, HTTP_AUTHORIZATION=self.auth)
self.expect_error(status.HTTP_400_BAD_REQUEST, 'course_id_not_valid', resp)
# case with invalid master_course_id
url = f'{base_url}invalid_master_course_str'
resp = self.client.get(url, {}, HTTP_AUTHORIZATION=self.auth)
self.expect_error(status.HTTP_400_BAD_REQUEST, 'course_id_not_valid', resp)
# case with inexistent master_course_id
url = f'{base_url}course-v1%3Aorg_foo.0%2Bcourse_bar_0%2BRun_0'
resp = self.client.get(url, {}, HTTP_AUTHORIZATION=self.auth)
self.expect_error(status.HTTP_404_NOT_FOUND, 'course_id_does_not_exist', resp)
def test_get_list(self):
"""
Tests the API to get a list of CCX Courses
"""
# there are no CCX courses
resp = self.client.get(self.list_url_master_course, {}, HTTP_AUTHORIZATION=self.auth)
assert 'count' in resp.data
assert resp.data['count'] == 0
# create few ccx courses
num_ccx = 10
for _ in range(num_ccx):
self.make_ccx()
resp = self.client.get(self.list_url_master_course, {}, HTTP_AUTHORIZATION=self.auth)
assert resp.status_code == status.HTTP_200_OK
assert 'count' in resp.data
assert resp.data['count'] == num_ccx
assert 'results' in resp.data
assert len(resp.data['results']) == num_ccx
def test_get_sorted_list(self):
"""
Tests the API to get a sorted list of CCX Courses
"""
# create few ccx courses
num_ccx = 3
for _ in range(num_ccx):
self.make_ccx()
# update the display_name fields
all_ccx = CustomCourseForEdX.objects.all()
all_ccx = all_ccx.order_by('id')
assert len(all_ccx) == num_ccx
title_str = 'Title CCX {0}'
for num, ccx in enumerate(all_ccx):
ccx.display_name = title_str.format(string.ascii_lowercase[-(num + 1)])
ccx.save()
# sort by display name
url = f'{self.list_url_master_course}&order_by=display_name'
resp = self.client.get(url, {}, HTTP_AUTHORIZATION=self.auth)
assert resp.status_code == status.HTTP_200_OK
assert len(resp.data['results']) == num_ccx
# the display_name should be sorted as "Title CCX x", "Title CCX y", "Title CCX z"
for num, ccx in enumerate(resp.data['results']):
assert title_str.format(string.ascii_lowercase[(- (num_ccx - num))]) == ccx['display_name']
# add sort order desc
url = f'{self.list_url_master_course}&order_by=display_name&sort_order=desc'
resp = self.client.get(url, {}, HTTP_AUTHORIZATION=self.auth)
# the only thing I can check is that the display name is in alphabetically reversed order
# in the same way when the field has been updated above, so with the id asc
for num, ccx in enumerate(resp.data['results']):
assert title_str.format(string.ascii_lowercase[(- (num + 1))]) == ccx['display_name']
def test_get_paginated_list(self):
"""
Tests the API to get a paginated list of CCX Courses
"""
# create some ccx courses
num_ccx = 357
for _ in range(num_ccx):
self.make_ccx()
page_size = settings.REST_FRAMEWORK.get('PAGE_SIZE', 10)
num_pages = int(math.ceil(num_ccx / float(page_size)))
# get first page
resp = self.client.get(self.list_url_master_course, {}, HTTP_AUTHORIZATION=self.auth)
assert resp.status_code == status.HTTP_200_OK
assert resp.data['count'] == num_ccx
assert resp.data['num_pages'] == num_pages
assert resp.data['current_page'] == 1
assert resp.data['start'] == 0
assert resp.data['next'] is not None
assert resp.data['previous'] is None
# get a page in the middle
url = f'{self.list_url_master_course}&page=24'
resp = self.client.get(url, {}, HTTP_AUTHORIZATION=self.auth)
assert resp.status_code == status.HTTP_200_OK
assert resp.data['count'] == num_ccx
assert resp.data['num_pages'] == num_pages
assert resp.data['current_page'] == 24
assert resp.data['start'] == ((resp.data['current_page'] - 1) * page_size)
assert resp.data['next'] is not None
assert resp.data['previous'] is not None
# get last page
url = f'{self.list_url_master_course}&page={num_pages}'
resp = self.client.get(url, {}, HTTP_AUTHORIZATION=self.auth)
assert resp.status_code == status.HTTP_200_OK
assert resp.data['count'] == num_ccx
assert resp.data['num_pages'] == num_pages
assert resp.data['current_page'] == num_pages
assert resp.data['start'] == ((resp.data['current_page'] - 1) * page_size)
assert resp.data['next'] is None
assert resp.data['previous'] is not None
# last page + 1
url = f'{self.list_url_master_course}&page={num_pages + 1}'
resp = self.client.get(url, {}, HTTP_AUTHORIZATION=self.auth)
assert resp.status_code == status.HTTP_404_NOT_FOUND
@ddt.data(
(
{},
status.HTTP_400_BAD_REQUEST,
'master_course_id_not_provided',
),
(
{'master_course_id': None},
status.HTTP_400_BAD_REQUEST,
'master_course_id_not_provided',
),
(
{'master_course_id': ''},
status.HTTP_400_BAD_REQUEST,
'course_id_not_valid',
),
(
{'master_course_id': 'invalid_master_course_str'},
status.HTTP_400_BAD_REQUEST,
'course_id_not_valid',
),
(
{'master_course_id': 'course-v1:org_foo.0+course_bar_0+Run_0'},
status.HTTP_404_NOT_FOUND,
'course_id_does_not_exist',
),
)
@ddt.unpack
def test_post_list_wrong_master_course(self, data, expected_http_error, expected_error_string):
"""
Test for various post requests with wrong master course string
"""
# mock the permission class these cases can be tested
mock_class_str = 'openedx.core.lib.api.permissions.IsMasterCourseStaffInstructor.has_permission'
with mock.patch(mock_class_str, autospec=True) as mocked_perm_class:
mocked_perm_class.return_value = True
# case with no master_course_id provided
resp = self.client.post(self.list_url, data, format='json', HTTP_AUTHORIZATION=self.auth)
self.expect_error(expected_http_error, expected_error_string, resp)
def test_post_list_wrong_master_course_special_cases(self):
"""
Same as test_post_list_wrong_master_course,
but different ways to test the wrong master_course_id
"""
# case with ccx not enabled for master_course_id
self.course.enable_ccx = False
self.mstore.update_item(self.course, self.coach.id)
data = {'master_course_id': self.master_course_key_str}
resp = self.client.post(self.list_url, data, format='json', HTTP_AUTHORIZATION=self.auth)
self.expect_error(status.HTTP_403_FORBIDDEN, 'ccx_not_enabled_for_master_course', resp)
self.course.enable_ccx = True
self.mstore.update_item(self.course, self.coach.id)
# case with deprecated master_course_id
with mock.patch('lms.djangoapps.ccx.api.v0.views.get_course_by_id', autospec=True) as mocked:
mocked.return_value.id.deprecated = True
resp = self.client.post(self.list_url, data, format='json', HTTP_AUTHORIZATION=self.auth)
self.expect_error(status.HTTP_400_BAD_REQUEST, 'deprecated_master_course_id', resp)
@ddt.data(
(
{},
{
'max_students_allowed': 'missing_field_max_students_allowed',
'display_name': 'missing_field_display_name',
'coach_email': 'missing_field_coach_email'
},
),
(
{
'max_students_allowed': 10,
'display_name': 'CCX Title'
},
{
'coach_email': 'missing_field_coach_email'
},
),
(
{
'max_students_allowed': None,
'display_name': None,
'coach_email': None
},
{
'max_students_allowed': 'null_field_max_students_allowed',
'display_name': 'null_field_display_name',
'coach_email': 'null_field_coach_email'
},
),
(
{
'max_students_allowed': 10,
'display_name': 'CCX Title',
'coach_email': 'this is not an [email protected]'
},
{'coach_email': 'invalid_coach_email'},
),
(
{
'max_students_allowed': 10,
'display_name': '',
'coach_email': '[email protected]'
},
{'display_name': 'invalid_display_name'},
),
(
{
'max_students_allowed': 'a',
'display_name': 'CCX Title',
'coach_email': '[email protected]'
},
{'max_students_allowed': 'invalid_max_students_allowed'},
),
(
{
'max_students_allowed': 10,
'display_name': 'CCX Title',
'coach_email': '[email protected]',
'course_modules': {'foo': 'bar'}
},
{'course_modules': 'invalid_course_module_list'},
),
(
{
'max_students_allowed': 10,
'display_name': 'CCX Title',
'coach_email': '[email protected]',
'course_modules': 'block-v1:org.0+course_0+Run_0+type@chapter+block@chapter_1'
},
{'course_modules': 'invalid_course_module_list'},
),
(
{
'max_students_allowed': 10,
'display_name': 'CCX Title',
'coach_email': '[email protected]',
'course_modules': ['foo', 'bar']
},
{'course_modules': 'invalid_course_module_keys'},
)
)
@ddt.unpack
def test_post_list_wrong_input_data(self, data, expected_errors):
"""
Test for various post requests with wrong input data
"""
# add the master_course_key_str to the request data
data['master_course_id'] = self.master_course_key_str
resp = self.client.post(self.list_url, data, format='json', HTTP_AUTHORIZATION=self.auth)
self.expect_error_fields(expected_errors, resp)
def test_post_list_coach_does_not_exist(self):
"""
Specific test for the case when the input data is valid but the coach does not exist.
"""
data = {
'master_course_id': self.master_course_key_str,
'max_students_allowed': 111,
'display_name': 'CCX Title',
'coach_email': '[email protected]'
}
resp = self.client.post(self.list_url, data, format='json', HTTP_AUTHORIZATION=self.auth)
self.expect_error(status.HTTP_404_NOT_FOUND, 'coach_user_does_not_exist', resp)
def test_post_list_wrong_modules(self):
"""
Specific test for the case when the input data is valid but the
course modules do not belong to the master course
"""
data = {
'master_course_id': self.master_course_key_str,
'max_students_allowed': 111,
'display_name': 'CCX Title',
'coach_email': self.coach.email,
'course_modules': [
'block-v1:org.0+course_0+Run_0+type@chapter+block@chapter_foo',
'block-v1:org.0+course_0+Run_0+type@chapter+block@chapter_bar'
]
}
resp = self.client.post(self.list_url, data, format='json', HTTP_AUTHORIZATION=self.auth)
self.expect_error(status.HTTP_400_BAD_REQUEST, 'course_module_list_not_belonging_to_master_course', resp)
def test_post_list_mixed_wrong_and_valid_modules(self):
"""
Specific test for the case when the input data is valid but some of
the course modules do not belong to the master course
"""
modules = self.master_course_chapters[0:1] + ['block-v1:org.0+course_0+Run_0+type@chapter+block@chapter_foo']
data = {
'master_course_id': self.master_course_key_str,
'max_students_allowed': 111,
'display_name': 'CCX Title',
'coach_email': self.coach.email,
'course_modules': modules
}
resp = self.client.post(self.list_url, data, format='json', HTTP_AUTHORIZATION=self.auth)
self.expect_error(status.HTTP_400_BAD_REQUEST, 'course_module_list_not_belonging_to_master_course', resp)
def test_post_list(self):
"""
Test the creation of a CCX
"""
outbox = self.get_outbox()
data = {
'master_course_id': self.master_course_key_str,
'max_students_allowed': 111,
'display_name': 'CCX Test Title',
'coach_email': self.coach.email,
'course_modules': self.master_course_chapters[0:1]
}
resp = self.client.post(self.list_url, data, format='json', HTTP_AUTHORIZATION=self.auth)
assert resp.status_code == status.HTTP_201_CREATED
# check if the response has at least the same data of the request
for key, val in data.items():
assert resp.data.get(key) == val
assert 'ccx_course_id' in resp.data
# check that the new CCX actually exists
course_key = CourseKey.from_string(resp.data.get('ccx_course_id'))
ccx_course = CustomCourseForEdX.objects.get(pk=course_key.ccx)
assert str(CCXLocator.from_course_locator(ccx_course.course.id, ccx_course.id)) ==\
resp.data.get('ccx_course_id')
# check that the coach user has coach role on the master course
coach_role_on_master_course = CourseCcxCoachRole(self.master_course_key)
assert coach_role_on_master_course.has_user(self.coach)
# check that the coach has been enrolled in the ccx
ccx_course_object = get_course_by_id(course_key)
assert CourseEnrollment.objects.filter(course_id=ccx_course_object.id, user=self.coach).exists()
# check that an email has been sent to the coach
assert len(outbox) == 1
assert self.coach.email in outbox[0].recipients()
@ddt.data(
True,
False
)
def test_post_list_on_active_state(self, user_is_active):
"""
Test the creation of a CCX on user's active states.
"""
self.app_user.is_active = user_is_active
self.app_user.save()
data = {
'master_course_id': self.master_course_key_str,
'max_students_allowed': 111,
'display_name': 'CCX Test Title',
'coach_email': self.coach.email,
'course_modules': self.master_course_chapters[0:1]
}
resp = self.client.post(self.list_url, data, format='json', HTTP_AUTHORIZATION=self.auth)
if not user_is_active:
assert resp.status_code == status.HTTP_403_FORBIDDEN
else:
assert resp.status_code == status.HTTP_201_CREATED
def test_post_list_duplicated_modules(self):
"""
Test the creation of a CCX, but with duplicated modules
"""
chapters = self.master_course_chapters[0:1]
duplicated_chapters = chapters * 3
data = {
'master_course_id': self.master_course_key_str,
'max_students_allowed': 111,
'display_name': 'CCX Test Title',
'coach_email': self.coach.email,
'course_modules': duplicated_chapters
}
resp = self.client.post(self.list_url, data, format='json', HTTP_AUTHORIZATION=self.auth)
assert resp.status_code == status.HTTP_201_CREATED
assert resp.data.get('course_modules') == chapters
def test_post_list_staff_master_course_in_ccx(self):
"""
Specific test to check that the staff and instructor of the master
course are assigned to the CCX.
"""
outbox = self.get_outbox()
data = {
'master_course_id': self.master_course_key_str,
'max_students_allowed': 111,
'display_name': 'CCX Test Title',
'coach_email': self.coach.email
}
resp = self.client.post(self.list_url, data, format='json', HTTP_AUTHORIZATION=self.auth)
assert resp.status_code == status.HTTP_201_CREATED
# check that only one email has been sent and it is to to the coach
assert len(outbox) == 1
assert self.coach.email in outbox[0].recipients()
list_staff_master_course = list_with_level(self.course.id, 'staff')
list_instructor_master_course = list_with_level(self.course.id, 'instructor')
course_key = CourseKey.from_string(resp.data.get('ccx_course_id'))
with ccx_course_cm(course_key) as course_ccx:
list_staff_ccx_course = list_with_level(course_ccx.id, 'staff')
list_instructor_ccx_course = list_with_level(course_ccx.id, 'instructor')
# The "Coach" in the parent course becomes "Staff" on the CCX, so the CCX should have 1 "Staff"
# user more than the parent course
assert (len(list_staff_master_course) + 1) == len(list_staff_ccx_course)
# Make sure all of the existing course staff are passed to the CCX
for course_user in list_staff_master_course:
assert course_user in list_staff_ccx_course
# Make sure the "Coach" on the parent course is "Staff" on the CCX
assert self.coach in list_staff_ccx_course
assert len(list_instructor_master_course) == len(list_instructor_ccx_course)
for course_user, ccx_user in zip(sorted(list_instructor_master_course), sorted(list_instructor_ccx_course)):
assert course_user == ccx_user
@ddt.ddt
class CcxDetailTest(CcxRestApiTest):
"""
Test for the CCX REST APIs
"""
ENABLED_SIGNALS = ['course_published']
def setUp(self):
"""
Set up tests
"""
super().setUp()
self.make_coach()
# create a ccx
self.ccx = self.make_ccx(max_students_allowed=123)
self.ccx_key = CCXLocator.from_course_locator(self.ccx.course.id, self.ccx.id)
self.ccx_key_str = str(self.ccx_key)
self.detail_url = reverse('ccx_api:v0:ccx:detail', kwargs={'ccx_course_id': self.ccx_key_str})
def make_ccx(self, max_students_allowed=200):
"""
Overridden method to replicate (part of) the actual
creation of ccx courses
"""
ccx = super().make_ccx(max_students_allowed=max_students_allowed)
ccx.structure_json = json.dumps(self.master_course_chapters)
ccx.save()
override_field_for_ccx(ccx, self.course, 'start', now())
override_field_for_ccx(ccx, self.course, 'due', None)
# Hide anything that can show up in the schedule
hidden = 'visible_to_staff_only'
for chapter in self.course.get_children():
override_field_for_ccx(ccx, chapter, hidden, True)
for sequential in chapter.get_children():
override_field_for_ccx(ccx, sequential, hidden, True)
for vertical in sequential.get_children():
override_field_for_ccx(ccx, vertical, hidden, True)
# enroll the coach in the CCX
ccx_course_key = CCXLocator.from_course_locator(self.course.id, ccx.id)
email_params = get_email_params(
self.course,
auto_enroll=True,
course_key=ccx_course_key,
display_name=ccx.display_name
)
enroll_email(
course_id=ccx_course_key,
student_email=self.coach.email,
auto_enroll=True,
email_students=False,
email_params=email_params,
)
return ccx
def test_authorization(self):
"""
Test that only the right token is authorized
"""
auth_list = [
"Wrong token-type-obviously",
"Bearer wrong token format",
"Bearer wrong-token",
"Bearer",
"Bearer hfbhfbfwq398248fnid939rh3489fh39nd4m34r9" # made up token
]
# all the auths in the list fail to authorize
for auth in auth_list:
resp = self.client.get(self.detail_url, {}, HTTP_AUTHORIZATION=auth)
assert resp.status_code == status.HTTP_401_UNAUTHORIZED
resp = self.client.get(self.detail_url, {}, HTTP_AUTHORIZATION=self.auth)
assert resp.status_code == status.HTTP_200_OK
def test_authorization_no_oauth_staff(self):
"""
Check authorization for staff users logged in without oauth
"""
# create a staff user
staff_user = UserFactory.create(
username='test_staff_user', email='[email protected]', password='test',
)
# add staff role to the staff user
CourseStaffRole(self.master_course_key).add_users(staff_user)
data = {'display_name': 'CCX Title'}
# the staff user can perform the request
self.client.login(username=staff_user.username, password=USER_PASSWORD)
resp = self.client.get(self.detail_url)
assert resp.status_code == status.HTTP_200_OK
resp = self.client.patch(self.detail_url, data, format='json')
assert resp.status_code == status.HTTP_204_NO_CONTENT
def test_authorization_no_oauth_instructor(self):
"""
Check authorization for users logged in without oauth
"""
# create an instructor user
instructor_user = UserFactory.create(
username='test_instructor_user', email='[email protected]', password='test',
)
# add instructor role to the instructor user
CourseInstructorRole(self.master_course_key).add_users(instructor_user)
data = {'display_name': 'CCX Title'}
# the instructor user can perform the request
self.client.login(username=instructor_user.username, password=USER_PASSWORD)
resp = self.client.get(self.detail_url)
assert resp.status_code == status.HTTP_200_OK
resp = self.client.patch(self.detail_url, data, format='json')
assert resp.status_code == status.HTTP_204_NO_CONTENT
def test_authorization_no_oauth_other_coach(self):
"""
Check authorization for other coach users logged in without oauth
"""
# create an coach user
coach_user = UserFactory.create(
username='test_coach_user', email='[email protected]', password='test',
)
# add coach role to the coach user
CourseCcxCoachRole(self.master_course_key).add_users(coach_user)
data = {'display_name': 'CCX Title'}
# the coach user cannot perform the request: this type of user can only get her own CCX
self.client.login(username=coach_user.username, password=USER_PASSWORD)
resp = self.client.get(self.detail_url)
assert resp.status_code == status.HTTP_403_FORBIDDEN
resp = self.client.patch(self.detail_url, data, format='json')
assert resp.status_code == status.HTTP_403_FORBIDDEN
def test_authorization_no_oauth_ccx_coach(self):
"""
Check authorization for ccx coach users logged in without oauth
"""
data = {'display_name': 'CCX Title'}
# the coach owner of the CCX can perform the request only if it is a get
self.client.login(username=self.coach.username, password=USER_PASSWORD)
resp = self.client.get(self.detail_url)
assert resp.status_code == status.HTTP_200_OK
resp = self.client.patch(self.detail_url, data, format='json')
assert resp.status_code == status.HTTP_403_FORBIDDEN
def test_resolve_get_detail(self):
"""
Test for the ccx detail view resolver. This is needed because it is assumed
that only an URL with a valid course id string can reach the detail view.
"""
# get the base url from the valid one to build invalid urls
base_url = '{}/'.format(self.detail_url.rsplit('/', 1)[0])
# this url should be the same of the ccx list view
resolver = resolve(base_url)
assert views.CCXListView.__name__ == resolver.func.__name__
assert views.CCXListView.__module__ == resolver.func.__module__
# invalid urls
for invalid_ccx_id in ('foo', 'ccx-v1:org.0', 'ccx-v1:org.0+course_0'):
with pytest.raises(Resolver404):
resolve(f'{base_url}{invalid_ccx_id}')
# the following course ID works even if it is not a CCX valid course id (the regex matches course ID strings)
resolver = resolve('{}{}'.format(base_url, 'ccx-v1:org.0+course_0+Run_0'))
assert views.CCXDetailView.__name__ == resolver.func.__name__
assert views.CCXDetailView.__module__ == resolver.func.__module__
# and of course a valid ccx course id
resolver = resolve(f'{base_url}{self.ccx_key_str}')
assert views.CCXDetailView.__name__ == resolver.func.__name__
assert views.CCXDetailView.__module__ == resolver.func.__module__
@ddt.data(
'get',
'delete',
'patch',
)
def test_detail_wrong_ccx(self, http_method):
"""
Test for different methods for detail of a ccx course.
All check the validity of the ccx course id
"""
client_request = getattr(self.client, http_method)
# get a detail url with a master_course id string
mock_class_str = 'openedx.core.lib.api.permissions.IsCourseStaffInstructor.has_object_permission'
url = reverse('ccx_api:v0:ccx:detail', kwargs={'ccx_course_id': self.master_course_key_str})
# the permission class will give a 403 error because will not find the CCX
resp = client_request(url, {}, HTTP_AUTHORIZATION=self.auth)
assert resp.status_code == status.HTTP_403_FORBIDDEN
# bypassing the permission class we get another kind of error
with mock.patch(mock_class_str, autospec=True) as mocked_perm_class:
mocked_perm_class.return_value = True
resp = client_request(url, {}, HTTP_AUTHORIZATION=self.auth)
self.expect_error(status.HTTP_400_BAD_REQUEST, 'course_id_not_valid_ccx_id', resp)
# use an non existing ccx id
url = reverse('ccx_api:v0:ccx:detail', kwargs={'ccx_course_id': 'ccx-v1:foo.0+course_bar_0+Run_0+ccx@1'})
# the permission class will give a 403 error because will not find the CCX
resp = client_request(url, {}, HTTP_AUTHORIZATION=self.auth)
assert resp.status_code == status.HTTP_403_FORBIDDEN
# bypassing the permission class we get another kind of error
with mock.patch(mock_class_str, autospec=True) as mocked_perm_class:
mocked_perm_class.return_value = True
resp = client_request(url, {}, HTTP_AUTHORIZATION=self.auth)
self.expect_error(status.HTTP_404_NOT_FOUND, 'ccx_course_id_does_not_exist', resp)
# get a valid ccx key and add few 0s to get a non existing ccx for a valid course
ccx_key_str = f'{self.ccx_key_str}000000'
url = reverse('ccx_api:v0:ccx:detail', kwargs={'ccx_course_id': ccx_key_str})
# the permission class will give a 403 error because will not find the CCX
resp = client_request(url, {}, HTTP_AUTHORIZATION=self.auth)
assert resp.status_code == status.HTTP_403_FORBIDDEN
# bypassing the permission class we get another kind of error
with mock.patch(mock_class_str, autospec=True) as mocked_perm_class:
mocked_perm_class.return_value = True
resp = client_request(url, {}, HTTP_AUTHORIZATION=self.auth)
self.expect_error(status.HTTP_404_NOT_FOUND, 'ccx_course_id_does_not_exist', resp)
def test_get_detail(self):
"""
Test for getting detail of a ccx course
"""
resp = self.client.get(self.detail_url, {}, HTTP_AUTHORIZATION=self.auth)
assert resp.status_code == status.HTTP_200_OK
assert resp.data.get('ccx_course_id') == self.ccx_key_str
assert resp.data.get('display_name') == self.ccx.display_name
assert resp.data.get('max_students_allowed') == self.ccx.max_student_enrollments_allowed
assert resp.data.get('coach_email') == self.ccx.coach.email
assert resp.data.get('master_course_id') == str(self.ccx.course_id)
assert len(resp.data.get('course_modules')) == len(self.master_course_chapters)
def test_delete_detail(self):
"""
Test for deleting a ccx course
"""
# check that there are overrides
assert CcxFieldOverride.objects.filter(ccx=self.ccx).count() > 0
assert CourseEnrollment.objects.filter(course_id=self.ccx_key).count() > 0
resp = self.client.delete(self.detail_url, {}, HTTP_AUTHORIZATION=self.auth)
assert resp.status_code == status.HTTP_204_NO_CONTENT
assert resp.data is None
# the CCX does not exist any more
with pytest.raises(CustomCourseForEdX.DoesNotExist):
CustomCourseForEdX.objects.get(id=self.ccx.id)
# check that there are no overrides
assert CcxFieldOverride.objects.filter(ccx=self.ccx).count() == 0
assert CourseEnrollment.objects.filter(course_id=self.ccx_key).count() == 0
def test_patch_detail_change_master_course(self):
"""
Test to patch a ccx course to change a master course
"""
data = {
'master_course_id': 'changed_course_id'
}
resp = self.client.patch(self.detail_url, data, format='json', HTTP_AUTHORIZATION=self.auth)
self.expect_error(status.HTTP_403_FORBIDDEN, 'master_course_id_change_not_allowed', resp)
@ddt.data(
(
{
'max_students_allowed': None,
'display_name': None,
'coach_email': None
},
{
'max_students_allowed': 'null_field_max_students_allowed',
'display_name': 'null_field_display_name',
'coach_email': 'null_field_coach_email'
},
),
(
{'coach_email': 'this is not an [email protected]'},
{'coach_email': 'invalid_coach_email'},
),
(
{'display_name': ''},
{'display_name': 'invalid_display_name'},
),
(
{'max_students_allowed': 'a'},
{'max_students_allowed': 'invalid_max_students_allowed'},
),
(
{'course_modules': {'foo': 'bar'}},
{'course_modules': 'invalid_course_module_list'},
),
(
{'course_modules': 'block-v1:org.0+course_0+Run_0+type@chapter+block@chapter_1'},
{'course_modules': 'invalid_course_module_list'},
),
(
{'course_modules': ['foo', 'bar']},
{'course_modules': 'invalid_course_module_keys'},
),
)
@ddt.unpack
def test_patch_detail_wrong_input_data(self, data, expected_errors):
"""
Test for different wrong inputs for the patch method
"""
resp = self.client.patch(self.detail_url, data, format='json', HTTP_AUTHORIZATION=self.auth)
self.expect_error_fields(expected_errors, resp)
def test_empty_patch(self):
"""
An empty patch does not modify anything
"""
display_name = self.ccx.display_name
max_students_allowed = self.ccx.max_student_enrollments_allowed
coach_email = self.ccx.coach.email
ccx_structure = self.ccx.structure
resp = self.client.patch(self.detail_url, {}, format='json', HTTP_AUTHORIZATION=self.auth)
assert resp.status_code == status.HTTP_204_NO_CONTENT
ccx = CustomCourseForEdX.objects.get(id=self.ccx.id)
assert display_name == ccx.display_name
assert max_students_allowed == ccx.max_student_enrollments_allowed
assert coach_email == ccx.coach.email
assert ccx_structure == ccx.structure
def test_patch_detail_coach_does_not_exist(self):
"""
Specific test for the case when the input data is valid but the coach does not exist.
"""
data = {
'max_students_allowed': 111,
'display_name': 'CCX Title',
'coach_email': '[email protected]'
}
resp = self.client.patch(self.detail_url, data, format='json', HTTP_AUTHORIZATION=self.auth)
self.expect_error(status.HTTP_404_NOT_FOUND, 'coach_user_does_not_exist', resp)
def test_patch_detail_wrong_modules(self):
"""
Specific test for the case when the input data is valid but the
course modules do not belong to the master course
"""
data = {
'course_modules': [
'block-v1:org.0+course_0+Run_0+type@chapter+block@chapter_foo',
'block-v1:org.0+course_0+Run_0+type@chapter+block@chapter_bar'
]
}
resp = self.client.patch(self.detail_url, data, format='json', HTTP_AUTHORIZATION=self.auth)
self.expect_error(status.HTTP_400_BAD_REQUEST, 'course_module_list_not_belonging_to_master_course', resp)
def test_patch_detail_mixed_wrong_and_valid_modules(self):
"""
Specific test for the case when the input data is valid but some of
the course modules do not belong to the master course
"""
modules = self.master_course_chapters[0:1] + ['block-v1:org.0+course_0+Run_0+type@chapter+block@chapter_foo']
data = {
'course_modules': modules
}
resp = self.client.patch(self.detail_url, data, format='json', HTTP_AUTHORIZATION=self.auth)
self.expect_error(status.HTTP_400_BAD_REQUEST, 'course_module_list_not_belonging_to_master_course', resp)
def test_patch_detail(self):
"""
Test for successful patch
"""
outbox = self.get_outbox()
# create a new coach
new_coach = AdminFactory.create()
data = {
'max_students_allowed': 111,
'display_name': 'CCX Title',
'coach_email': new_coach.email
}
resp = self.client.patch(self.detail_url, data, format='json', HTTP_AUTHORIZATION=self.auth)
assert resp.status_code == status.HTTP_204_NO_CONTENT
ccx_from_db = CustomCourseForEdX.objects.get(id=self.ccx.id)
assert ccx_from_db.max_student_enrollments_allowed == data['max_students_allowed']
assert ccx_from_db.display_name == data['display_name']
assert ccx_from_db.coach.email == data['coach_email']
# check that the coach user has coach role on the master course
coach_role_on_master_course = CourseCcxCoachRole(self.master_course_key)
assert coach_role_on_master_course.has_user(new_coach)
# check that the coach has been enrolled in the ccx
ccx_course_object = get_course_by_id(self.ccx_key)
assert CourseEnrollment.objects.filter(course_id=ccx_course_object.id, user=new_coach).exists()
# check that an email has been sent to the coach
assert len(outbox) == 1
assert new_coach.email in outbox[0].recipients()
def test_patch_detail_modules(self):
"""
Specific test for successful patch of the course modules
"""
data = {'course_modules': self.master_course_chapters[0:1]}
resp = self.client.patch(self.detail_url, data, format='json', HTTP_AUTHORIZATION=self.auth)
assert resp.status_code == status.HTTP_204_NO_CONTENT
ccx_from_db = CustomCourseForEdX.objects.get(id=self.ccx.id)
assert len(ccx_from_db.structure) == len(data['course_modules'])
data = {'course_modules': []}
resp = self.client.patch(self.detail_url, data, format='json', HTTP_AUTHORIZATION=self.auth)
assert resp.status_code == status.HTTP_204_NO_CONTENT
ccx_from_db = CustomCourseForEdX.objects.get(id=self.ccx.id)
assert len(ccx_from_db.structure) == len([])
data = {'course_modules': self.master_course_chapters}
resp = self.client.patch(self.detail_url, data, format='json', HTTP_AUTHORIZATION=self.auth)
assert resp.status_code == status.HTTP_204_NO_CONTENT
ccx_from_db = CustomCourseForEdX.objects.get(id=self.ccx.id)
assert len(ccx_from_db.structure) == len(self.master_course_chapters)
data = {'course_modules': None}
resp = self.client.patch(self.detail_url, data, format='json', HTTP_AUTHORIZATION=self.auth)
assert resp.status_code == status.HTTP_204_NO_CONTENT
ccx_from_db = CustomCourseForEdX.objects.get(id=self.ccx.id)
assert ccx_from_db.structure is None
chapters = self.master_course_chapters[0:1]
data = {'course_modules': chapters * 3}
resp = self.client.patch(self.detail_url, data, format='json', HTTP_AUTHORIZATION=self.auth)
assert resp.status_code == status.HTTP_204_NO_CONTENT
ccx_from_db = CustomCourseForEdX.objects.get(id=self.ccx.id)
assert len(ccx_from_db.structure) == len(chapters)
@ddt.data(
True,
False
)
def test_patch_user_on_active_state(self, user_is_active):
"""
Test patch ccx course on user's active state.
"""
self.app_user.is_active = user_is_active
self.app_user.save()
chapters = self.master_course_chapters[0:1]
data = {'course_modules': chapters * 3}
resp = self.client.patch(self.detail_url, data, format='json', HTTP_AUTHORIZATION=self.auth)
if not user_is_active:
assert resp.status_code == status.HTTP_403_FORBIDDEN
else:
assert resp.status_code == status.HTTP_204_NO_CONTENT
ccx_from_db = CustomCourseForEdX.objects.get(id=self.ccx.id)
assert len(ccx_from_db.structure) == len(chapters)
@ddt.data(
True,
False
)
def test_delete_detail_on_active_state(self, user_is_active):
"""
Test for deleting a ccx course on user's active state.
"""
self.app_user.is_active = user_is_active
self.app_user.save()
# check that there are overrides
assert CcxFieldOverride.objects.filter(ccx=self.ccx).count() > 0
assert CourseEnrollment.objects.filter(course_id=self.ccx_key).count() > 0
resp = self.client.delete(self.detail_url, {}, HTTP_AUTHORIZATION=self.auth)
if not user_is_active:
assert resp.status_code == status.HTTP_403_FORBIDDEN
else:
assert resp.status_code == status.HTTP_204_NO_CONTENT
assert resp.data is None
# the CCX does not exist any more
with pytest.raises(CustomCourseForEdX.DoesNotExist):
CustomCourseForEdX.objects.get(id=self.ccx.id)
# check that there are no overrides
assert CcxFieldOverride.objects.filter(ccx=self.ccx).count() == 0
assert CourseEnrollment.objects.filter(course_id=self.ccx_key).count() == 0
|
[
"[email protected]"
] | |
1cee4617947c4877e2784a1c7ca58acba24dbcee
|
786de89be635eb21295070a6a3452f3a7fe6712c
|
/pyana/tags/V00-00-10/src/input.py
|
5aeb783cb0a06ff78ca4f1306da3746ced1206b1
|
[] |
no_license
|
connectthefuture/psdmrepo
|
85267cfe8d54564f99e17035efe931077c8f7a37
|
f32870a987a7493e7bf0f0a5c1712a5a030ef199
|
refs/heads/master
| 2021-01-13T03:26:35.494026 | 2015-09-03T22:22:11 | 2015-09-03T22:22:11 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,690 |
py
|
#
# $Id$
#
# Copyright (c) 2010 SLAC National Accelerator Laboratory
#
import logging
import threading
import Queue
from pypdsdata import io
_log = logging.getLogger("pyana.input")
def dgramGen(names):
""" Datagram generator """
# group files by run number
runfiles = {}
for n in names :
xname = io.XtcFileName(n)
runfiles.setdefault(xname.run(), []).append(n)
# scan all runs
runs = runfiles.keys()
runs.sort()
for run in runs :
names = runfiles[run]
logging.info("Processing run number %s" % run)
logging.info("File list: %s" % names)
# read datagrams one by one
dgiter = io.XtcMergeIterator( names )
for dg in dgiter :
fileName = dgiter.fileName()
fpos = dgiter.fpos()
run = dgiter.run()
yield (dg, fileName, fpos, run)
class _DgramReaderThread ( threading.Thread ):
def __init__(self, queue, names):
threading.Thread.__init__(self, name="DgramReader")
self.queue = queue
self.names = names
def run(self) :
for dg in dgramGen(self.names) :
self.queue.put(dg)
# signal end of data
self.queue.put(None)
def threadedDgramGen( names, queueSize = 10 ):
""" datagram generator which does reading in a separate thread """
queue = Queue.Queue(queueSize)
thread = _DgramReaderThread(queue, names)
thread.start()
while True:
dg = queue.get()
if dg is None : break
yield dg
# join the thread
thread.join()
|
[
"salnikov@b967ad99-d558-0410-b138-e0f6c56caec7"
] |
salnikov@b967ad99-d558-0410-b138-e0f6c56caec7
|
0cec27060bbaa91ea8a2fe8be0588bddeb3ec1ff
|
525c0cd60cf07e36c378539472703aa1e8354a86
|
/app/users/models.py
|
53f8eb8d9ff3a3a4b9ef50ac9285c68cfdbe417e
|
[] |
no_license
|
yatimisi/bookkeeping-server
|
a4575b871eb18886bb53ec0fe314f41801983978
|
272710aa0066711ec605755226b4387286c7e404
|
refs/heads/master
| 2020-11-30T13:38:18.194740 | 2019-12-30T01:27:04 | 2019-12-30T01:27:04 | 230,407,853 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,627 |
py
|
import os
import time
from django.db import models
from django.contrib.auth.models import AbstractUser, UserManager as AuthUerManager
from django.contrib.auth.tokens import default_token_generator
from django.template import loader
from django.utils.encoding import force_bytes
from django.utils.http import urlsafe_base64_encode
from django.utils.translation import gettext_lazy as _
def user_image_path(instance, filename):
ext = os.path.splitext(filename)[-1]
return os.path.join('user-profile', f'{instance.username}{ext}')
class UserManager(AuthUerManager):
def _create_user(self, email, password, **extra_fields):
"""
Create and save a user with the given username, email, and password.
"""
if not email:
raise ValueError('The given email must be set')
email = self.normalize_email(email)
user = self.model(email=email, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self, email, password=None, **extra_fields):
extra_fields.setdefault('is_staff', False)
extra_fields.setdefault('is_superuser', False)
return self._create_user(email, password, **extra_fields)
def create_superuser(self, email, password=None, **extra_fields):
extra_fields.setdefault('is_staff', True)
extra_fields.setdefault('is_superuser', True)
if extra_fields.get('is_staff') is not True:
raise ValueError('Superuser must have is_staff=True.')
if extra_fields.get('is_superuser') is not True:
raise ValueError('Superuser must have is_superuser=True.')
return self._create_user(email, password, **extra_fields)
class User(AbstractUser):
email = models.EmailField('電子郵件', unique=True)
profile = models.ImageField(
blank=True, null=True, upload_to=user_image_path)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = []
objects = UserManager()
def send_password_set_email(self):
subject = loader.render_to_string('email/set-password-subject.txt')
subject = ''.join(subject.splitlines())
body = loader.render_to_string('email/set-password-content.html', {
'uid': urlsafe_base64_encode(force_bytes(self.pk)),
'token': default_token_generator.make_token(self),
'user': self,
})
self.email_user(subject, body)
@property
def username(self):
return f'{self.first_name} {self.last_name}'.strip() or self.email.split('@')[0]
def __str__(self):
return self.username
|
[
"[email protected]"
] | |
c4b2387b95248fcc29f632db6c3466309d2568f4
|
e98a1e360e947a0f91edc3cb603d915a3630cfbc
|
/easy/1017_160_**intersection_of_two_linked_list.py
|
75d55c3e5c95d14a9f8c63cf3633510d95adf202
|
[] |
no_license
|
myungwooko/algorithm
|
3a6a05cf7efa469aa911fe04871ef368ab98bb65
|
673e51199a2d07198894a283479d459bef0272c5
|
refs/heads/master
| 2021-07-04T01:17:41.787653 | 2020-12-25T00:59:33 | 2020-12-25T00:59:33 | 213,865,632 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,996 |
py
|
"""
160. Intersection of Two Linked Lists
Easy
Write a program to find the node at which the intersection of two singly linked lists begins.
For example, the following two linked lists:
begin to intersect at node c1.
Example 1:
Input: intersectVal = 8, listA = [4,1,8,4,5], listB = [5,0,1,8,4,5], skipA = 2, skipB = 3
Output: Reference of the node with value = 8
Input Explanation: The intersected node's value is 8 (note that this must not be 0 if the two lists intersect). From the head of A, it reads as [4,1,8,4,5]. From the head of B, it reads as [5,0,1,8,4,5]. There are 2 nodes before the intersected node in A; There are 3 nodes before the intersected node in B.
Example 2:
Input: intersectVal = 2, listA = [0,9,1,2,4], listB = [3,2,4], skipA = 3, skipB = 1
Output: Reference of the node with value = 2
Input Explanation: The intersected node's value is 2 (note that this must not be 0 if the two lists intersect). From the head of A, it reads as [0,9,1,2,4]. From the head of B, it reads as [3,2,4]. There are 3 nodes before the intersected node in A; There are 1 node before the intersected node in B.
Example 3:
Input: intersectVal = 0, listA = [2,6,4], listB = [1,5], skipA = 3, skipB = 2
Output: null
Input Explanation: From the head of A, it reads as [2,6,4]. From the head of B, it reads as [1,5]. Since the two lists do not intersect, intersectVal must be 0, while skipA and skipB can be arbitrary values.
Explanation: The two lists do not intersect, so return null.
Notes:
If the two linked lists have no intersection at all, return null.
The linked lists must retain their original structure after the function returns.
You may assume there are no cycles anywhere in the entire linked structure.
Your code should preferably run in O(n) time and use only O(1) memory.
"""
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
# class Solution(object):
# def getIntersectionNode(self, headA, headB): =====================================> Time Limit Exceed
# """
# :type head1, head1: ListNode
# :rtype: ListNode
# """
# if not headA or not headB:
# return
#
# vals = []
# curA = headA
# curB = headB
#
# while curA:
# vals.append(curA)
# curA = curA.next
#
# while curB:
# if curB in vals:
# return curB
# curB = curB.next
#
# return
"""
If two linked lists have intersection, we can find two observations:
They must have same nodes after the intersection point.
L1+L2 must have same tail from the intersection point as L2 + L1. For example,
L1 = 1,2,3
L2 = 6,5,2,3
L1+L2 = 1,2,3,6,5,2,3
L2+L1 = 6,5,2,3,1,2,3
왜냐면 둘다 어딘가에서는 만나고 거기서부턴 똑같이 가서 끝나는 건 똑같기 때문에
위와 같이 더하면 길이는 같아지고 접합지점 부터의 모습을 같게 힐수 있다.
그래서 아래의 함수는 둘을 더해서 만나는 지점인 꼬리부분으로 들어갈떄 둘은 같아지고 그때 그것을 리턴하면 된다는 논리.
아래의 함수 while loop에서 2에서 나가게 되고 그 node를 리턴
1-2-3-None-6 -5-(2)-3
6-5-2- 3-None-1-(2)-3
그리고 마찬가지로 둘이 같은게 없을때도 위를 그대로 따라가 길이는 같아지고 같은게 없고 마지막 한번더 None==None으로 while loop을 빠져 나가게 되고 그 node를 리턴
L1 = 1,2,3
L2 = 4,5,6,7
1-2-3-None-4-5-6-7-(None)
4-5-6-7-None-1-2-3-(None)
"""
class Solution(object):
def getIntersectionNode(self, headA, headB):
p1, p2 = headA, headB
while p1 != p2:
p1 = headB if not p1 else p1.next
p2 = headA if not p2 else p2.next
return p1
a = ListNode(1)
a.next = ListNode(2)
b = ListNode(7)
b.next = ListNode(8)
s = Solution()
test = s.getIntersectionNode(a, b)
print(11, test)
|
[
"[email protected]"
] | |
2b6f169bba089af91ccf40e5c920c1520be4b508
|
fb0dd23271a274a9b622183b2f66bdd40c46169e
|
/todo/decorators.py
|
3b30554e25d05f21b6f661c6b5fa71845c3b78ea
|
[] |
no_license
|
adeelehsan/rest-todo-app
|
826e7639353ec8e2bd4d61fa673fd75e6c6ce846
|
ced2c819587b783eaeb90ba220fb46e6f796056e
|
refs/heads/master
| 2020-12-02T07:51:08.398935 | 2017-07-13T04:41:05 | 2017-07-13T04:41:05 | 96,734,896 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 361 |
py
|
# from .models import Task
#
# def user_has_perm(function):
# def wrap(request):
# if request.user.has_perm('todo.can_view'):
# task = Task.objects.all()
# return function(request, task)
# else:
# task = Task.objects.filter(user=request.user)
# return function(request, task)
# return wrap
|
[
"[email protected]"
] | |
16a4aa3762f0897041f90f18f4cf21065746f6a8
|
eb9f655206c43c12b497c667ba56a0d358b6bc3a
|
/python/testData/resolve/ImplicitDunderDocWithClassAttrNewStyleClass.py
|
545b55d94a2e15a164d7dc0b7b25eb846fa95673
|
[
"Apache-2.0"
] |
permissive
|
JetBrains/intellij-community
|
2ed226e200ecc17c037dcddd4a006de56cd43941
|
05dbd4575d01a213f3f4d69aa4968473f2536142
|
refs/heads/master
| 2023-09-03T17:06:37.560889 | 2023-09-03T11:51:00 | 2023-09-03T12:12:27 | 2,489,216 | 16,288 | 6,635 |
Apache-2.0
| 2023-09-12T07:41:58 | 2011-09-30T13:33:05 | null |
UTF-8
|
Python
| false | false | 99 |
py
|
class A(object):
__doc__ = 15
def foo(self):
return __doc__
# <ref>
|
[
"[email protected]"
] | |
8b3c3535ec1c8228fdf9be210a9644992f4e1033
|
90b2b50be27da77b6680f4c7b9cfea53267f2f6d
|
/CodingInterviews/46.把数字翻译成字符串.py
|
c6fe0263f14df73e786772453b4d6bc777200922
|
[
"Apache-2.0"
] |
permissive
|
xuhuasheng/algorithm-python
|
22cd3007d0b63d169d8f84b9b88787d6883e9c76
|
9c47c1add8da1ccfbee8882057883d90615d27b5
|
refs/heads/master
| 2023-01-18T22:52:22.027636 | 2020-11-20T09:48:23 | 2020-11-20T09:48:23 | 313,158,561 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,192 |
py
|
# 给定一个数字,按照如下规则翻译成字符串:0翻译成“a”,1翻译成“b”…25翻译成“z”。
# 一个数字有多种翻译可能,例如12258一共有5种,分别是bccfi,bwfi,bczi,mcfi,mzi。
# 实现一个函数,用来计算一个数字有多少种不同的翻译方法。
# 递归地思考问题,自底向上地解决问题
# f(n)表示从右边数第n位的结果
# f(0) = 1
# f(1) = 1
# f(n) = f(n-1) + g(n, n-1)f(n-2)
# 当str(n)x10+str(n-1)在10-25时,g=1,否则=0
# 动态规划:实质是自底向上地计算并储存复用结果
# 时间复杂度:o(n)
# 空间复杂度:o(n)
def getTranslationCount(num):
if num < 0:
return 0
numstr = str(num)
if len(numstr) == 1:
return 1
restmp = [0] * (len(numstr)+1)
restmp[0] = 1
restmp[1] = 1
g = 0
n = 0
for i in range(len(numstr)-2, -1, -1):
dd = int(numstr[i])*10+int(numstr[i+1])
if dd >= 10 and dd <=25:
g = 1
else:
g = 0
n = len(numstr)-i
restmp[n] = restmp[n-1] + g*restmp[n-2]
return restmp[n]
if __name__ == "__main__":
print(getTranslationCount(12258))
|
[
"[email protected]"
] | |
582600a5132f6e92437d624e360e300171afc73d
|
7a28b09805e6a925bb8fff2a06258c42cf9957f7
|
/parallel_wavegan/bin/compute_statistics.py
|
21f1d4d10af30691ffdd7f4b0e5ae3664bed6f9e
|
[
"MIT"
] |
permissive
|
tantk/ParallelWaveGAN
|
cf55d4533e6899b51a6a44afb3247d14b5d83196
|
a204ac47357066c58a9662b8e9b619f53d19d043
|
refs/heads/master
| 2020-09-03T19:30:24.068755 | 2019-11-04T12:32:42 | 2019-11-04T12:32:42 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,287 |
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019 Tomoki Hayashi
# MIT License (https://opensource.org/licenses/MIT)
"""Calculate statistics of feature files."""
import argparse
import logging
import os
import numpy as np
import yaml
from sklearn.preprocessing import StandardScaler
from tqdm import tqdm
from parallel_wavegan.datasets import MelDataset
from parallel_wavegan.utils import read_hdf5
from parallel_wavegan.utils import write_hdf5
def main():
"""Run preprocessing process."""
parser = argparse.ArgumentParser(
description="Compute mean and variance of dumped raw features "
"(See detail in parallel_wavegan/bin/compute_statistics.py).")
parser.add_argument("--rootdir", type=str, required=True,
help="direcotry including feature files.")
parser.add_argument("--config", type=str, required=True,
help="yaml format configuration file.")
parser.add_argument("--dumpdir", default=None, type=str,
help="direcotry to save statistics. if not provided, "
"stats will be saved in the above root direcotry. (default=None)")
parser.add_argument("--verbose", type=int, default=1,
help="logging level. higher is more logging. (default=1)")
args = parser.parse_args()
# set logger
if args.verbose > 1:
logging.basicConfig(
level=logging.DEBUG, format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s")
elif args.verbose > 0:
logging.basicConfig(
level=logging.INFO, format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s")
else:
logging.basicConfig(
level=logging.WARN, format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s")
logging.warning('skip DEBUG/INFO messages')
# load config
with open(args.config) as f:
config = yaml.load(f, Loader=yaml.Loader)
config.update(vars(args))
# check direcotry existence
if args.dumpdir is None:
args.dumpdir = os.path.dirname(args.rootdir)
if not os.path.exists(args.dumpdir):
os.makedirs(args.dumpdir)
# get dataset
if config["format"] == "hdf5":
mel_query = "*.h5"
mel_load_fn = lambda x: read_hdf5(x, "feats") # NOQA
elif config["format"] == "npy":
mel_query = "*-feats.npy"
mel_load_fn = np.load
else:
raise ValueError("support only hdf5 or npy format.")
dataset = MelDataset(
args.rootdir,
mel_query=mel_query,
mel_load_fn=mel_load_fn)
logging.info(f"the number of files = {len(dataset)}.")
# calculate statistics
scaler = StandardScaler()
for mel in tqdm(dataset):
scaler.partial_fit(mel)
if config["format"] == "hdf5":
write_hdf5(os.path.join(args.dumpdir, "stats.h5"), "mean", scaler.mean_.astype(np.float32))
write_hdf5(os.path.join(args.dumpdir, "stats.h5"), "scale", scaler.scale_.astype(np.float32))
else:
stats = np.stack([scaler.mean_, scaler.scale_], axis=0)
np.save(os.path.join(args.dumpdir, "stats.npy"), stats.astype(np.float32), allow_pickle=False)
if __name__ == "__main__":
main()
|
[
"[email protected]"
] | |
a7c1e6b42ee79fdf0c4da37853f738ecb2cb846d
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_43/137.py
|
32bf9aa7b435162a23c136394f5bfa91268e72dc
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 996 |
py
|
def print_2d(tab):
for e in tab:
print(e)
if __name__ == '__main__':
fin = open("./test.in", "r")
fout = open("./test.out", "w")
line = fin.readline()
N = int(line)
for test in range(0, N):
total = 0
num = fin.readline().replace("\n", "")
base = []
val = []
print(num)
for q in num:
val.append(q)
if q not in base:
base.append(q)
if(len(base) > 1):
base[0], base[1] = base[1], base[0]
else:
base[:0] = [0]
#print(base)
#print("val", val)
pow = 0
for i in range(len(val)-1, -1, -1):
total += (len(base)**pow) * base.index(val[i])
pow += 1
#print("base", base)
sol = "Case #" + str(test+1) + ": " + str(total) + "\n"
print(sol)
fout.write(sol)
|
[
"[email protected]"
] | |
5267c5cfb4bed1233a56ac7d2f2c81af75cc887d
|
9beac2738b54468e108b34e3d4f1455276b5885d
|
/saral_debug4.py
|
891b4251273f529ae74d6b96680d6c30d491e7c8
|
[] |
no_license
|
preetising/Dictinory
|
f6c1d7186698a91de641fc5d899ecf8367ea0e2f
|
f681933d4b1dc23e8e1aff5168bd599314930af7
|
refs/heads/main
| 2023-08-20T19:56:59.696642 | 2021-10-10T08:21:43 | 2021-10-10T08:21:43 | 415,525,407 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 154 |
py
|
s={'umesh':21,'bijender':54,'amar':67,'peter':89,'sonu':56}
a={'python':20,"gaurav":300,'dev':34,"karan":43}
c={}
for i in (s,a):
c.update(i)
print(c)
|
[
"[email protected]"
] | |
72f887b96c1f821a5ac3100152ea4535e27f17df
|
a255841ce67acc971eca1ac2ff5074835807db0b
|
/model/latticelstm.py
|
4126477bd2fc5f76d7588a56982e482d4333b33a
|
[] |
no_license
|
zingp/ChineseNER
|
ca5ce27c8eec6d0ebc923a1e1c422fd31626c4bb
|
997f2455a19803a5975777dc759f0472b18a2625
|
refs/heads/master
| 2020-09-16T20:12:45.336845 | 2019-12-09T14:30:51 | 2019-12-09T14:30:51 | 223,877,733 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 11,877 |
py
|
"""Implementation of batch-normalized LSTM."""
import torch
from torch import nn
import torch.autograd as autograd
from torch.autograd import Variable
from torch.nn import functional, init
import numpy as np
class WordLSTMCell(nn.Module):
"""A basic LSTM cell."""
def __init__(self, input_size, hidden_size, use_bias=True):
"""
Most parts are copied from torch.nn.LSTMCell.
"""
super(WordLSTMCell, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.use_bias = use_bias
self.weight_ih = nn.Parameter(
torch.FloatTensor(input_size, 3 * hidden_size))
self.weight_hh = nn.Parameter(
torch.FloatTensor(hidden_size, 3 * hidden_size))
if use_bias:
self.bias = nn.Parameter(torch.FloatTensor(3 * hidden_size))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
"""
Initialize parameters following the way proposed in the paper.
"""
init.orthogonal(self.weight_ih.data)
weight_hh_data = torch.eye(self.hidden_size)
weight_hh_data = weight_hh_data.repeat(1, 3)
self.weight_hh.data.set_(weight_hh_data)
# The bias is just set to zero vectors.
if self.use_bias:
init.constant(self.bias.data, val=0)
def forward(self, input_, hx):
"""
Args:
input_: A (batch, input_size) tensor containing input
features.
hx: A tuple (h_0, c_0), which contains the initial hidden
and cell state, where the size of both states is
(batch, hidden_size).
Returns:
h_1, c_1: Tensors containing the next hidden and cell state.
"""
h_0, c_0 = hx
batch_size = h_0.size(0)
bias_batch = (self.bias.unsqueeze(0).expand(batch_size, *self.bias.size()))
wh_b = torch.addmm(bias_batch, h_0, self.weight_hh)
wi = torch.mm(input_, self.weight_ih)
f, i, g = torch.split(wh_b + wi, split_size=self.hidden_size, dim=1)
c_1 = torch.sigmoid(f)*c_0 + torch.sigmoid(i)*torch.tanh(g)
return c_1
def __repr__(self):
s = '{name}({input_size}, {hidden_size})'
return s.format(name=self.__class__.__name__, **self.__dict__)
class MultiInputLSTMCell(nn.Module):
"""A basic LSTM cell."""
def __init__(self, input_size, hidden_size, use_bias=True):
"""
Most parts are copied from torch.nn.LSTMCell.
"""
super(MultiInputLSTMCell, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.use_bias = use_bias
self.weight_ih = nn.Parameter(
torch.FloatTensor(input_size, 3 * hidden_size))
self.weight_hh = nn.Parameter(
torch.FloatTensor(hidden_size, 3 * hidden_size))
self.alpha_weight_ih = nn.Parameter(
torch.FloatTensor(input_size, hidden_size))
self.alpha_weight_hh = nn.Parameter(
torch.FloatTensor(hidden_size, hidden_size))
if use_bias:
self.bias = nn.Parameter(torch.FloatTensor(3 * hidden_size))
self.alpha_bias = nn.Parameter(torch.FloatTensor(hidden_size))
else:
self.register_parameter('bias', None)
self.register_parameter('alpha_bias', None)
self.reset_parameters()
def reset_parameters(self):
"""
Initialize parameters following the way proposed in the paper.
"""
init.orthogonal(self.weight_ih.data)
init.orthogonal(self.alpha_weight_ih.data)
weight_hh_data = torch.eye(self.hidden_size)
weight_hh_data = weight_hh_data.repeat(1, 3)
self.weight_hh.data.set_(weight_hh_data)
alpha_weight_hh_data = torch.eye(self.hidden_size)
alpha_weight_hh_data = alpha_weight_hh_data.repeat(1, 1)
self.alpha_weight_hh.data.set_(alpha_weight_hh_data)
# The bias is just set to zero vectors.
if self.use_bias:
init.constant(self.bias.data, val=0)
init.constant(self.alpha_bias.data, val=0)
def forward(self, input_, c_input, hx):
"""
Args:
batch = 1
input_: A (batch, input_size) tensor containing input
features.
c_input: A list with size c_num,each element is the input ct from skip word (batch, hidden_size).
hx: A tuple (h_0, c_0), which contains the initial hidden
and cell state, where the size of both states is
(batch, hidden_size).
Returns:
h_1, c_1: Tensors containing the next hidden and cell state.
"""
h_0, c_0 = hx
batch_size = h_0.size(0)
assert(batch_size == 1)
bias_batch = (self.bias.unsqueeze(0).expand(batch_size, *self.bias.size()))
wh_b = torch.addmm(bias_batch, h_0, self.weight_hh)
wi = torch.mm(input_, self.weight_ih)
i, o, g = torch.split(wh_b + wi, split_size=self.hidden_size, dim=1)
i = torch.sigmoid(i)
g = torch.tanh(g)
o = torch.sigmoid(o)
c_num = len(c_input)
if c_num == 0:
f = 1 - i
c_1 = f*c_0 + i*g
h_1 = o * torch.tanh(c_1)
else:
c_input_var = torch.cat(c_input, 0)
alpha_bias_batch = (self.alpha_bias.unsqueeze(0).expand(batch_size, *self.alpha_bias.size()))
c_input_var = c_input_var.squeeze(1) ## (c_num, hidden_dim)
alpha_wi = torch.addmm(self.alpha_bias, input_, self.alpha_weight_ih).expand(c_num, self.hidden_size)
alpha_wh = torch.mm(c_input_var, self.alpha_weight_hh)
alpha = torch.sigmoid(alpha_wi + alpha_wh)
## alpha = i concat alpha
alpha = torch.exp(torch.cat([i, alpha],0))
alpha_sum = alpha.sum(0)
## alpha = softmax for each hidden element
alpha = torch.div(alpha, alpha_sum)
merge_i_c = torch.cat([g, c_input_var],0)
c_1 = merge_i_c * alpha
c_1 = c_1.sum(0).unsqueeze(0)
h_1 = o * torch.tanh(c_1)
return h_1, c_1
def __repr__(self):
s = '{name}({input_size}, {hidden_size})'
return s.format(name=self.__class__.__name__, **self.__dict__)
class LatticeLSTM(nn.Module):
"""A module that runs multiple steps of LSTM."""
def __init__(self, input_dim, hidden_dim, word_drop, word_alphabet_size, word_emb_dim, pretrain_word_emb=None, left2right=True, fix_word_emb=True, gpu=True, use_bias = True):
super(LatticeLSTM, self).__init__()
skip_direction = "forward" if left2right else "backward"
print("build LatticeLSTM... ", skip_direction, ", Fix emb:", fix_word_emb, " gaz drop:", word_drop)
self.gpu = gpu
self.hidden_dim = hidden_dim
self.word_emb = nn.Embedding(word_alphabet_size, word_emb_dim)
if pretrain_word_emb is not None:
print("load pretrain word emb...", pretrain_word_emb.shape)
self.word_emb.weight.data.copy_(torch.from_numpy(pretrain_word_emb))
else:
self.word_emb.weight.data.copy_(torch.from_numpy(self.random_embedding(word_alphabet_size, word_emb_dim)))
if fix_word_emb:
self.word_emb.weight.requires_grad = False
self.word_dropout = nn.Dropout(word_drop)
self.rnn = MultiInputLSTMCell(input_dim, hidden_dim)
self.word_rnn = WordLSTMCell(word_emb_dim, hidden_dim)
self.left2right = left2right
if self.gpu:
self.rnn = self.rnn.cuda()
self.word_emb = self.word_emb.cuda()
self.word_dropout = self.word_dropout.cuda()
self.word_rnn = self.word_rnn.cuda()
def random_embedding(self, vocab_size, embedding_dim):
pretrain_emb = np.empty([vocab_size, embedding_dim])
scale = np.sqrt(3.0 / embedding_dim)
for index in range(vocab_size):
pretrain_emb[index,:] = np.random.uniform(-scale, scale, [1, embedding_dim])
return pretrain_emb
def forward(self, input, skip_input_list, hidden=None):
"""
input: variable (batch, seq_len), batch = 1
skip_input_list: [skip_input, volatile_flag]
skip_input: three dimension list, with length is seq_len. Each element is a list of matched word id and its length.
example: [[], [[25,13],[2,3]]] 25/13 is word id, 2,3 is word length .
"""
volatile_flag = skip_input_list[1]
skip_input = skip_input_list[0]
if not self.left2right:
skip_input = convert_forward_gaz_to_backward(skip_input)
input = input.transpose(1,0)
seq_len = input.size(0)
batch_size = input.size(1)
assert(batch_size == 1)
hidden_out = []
memory_out = []
if hidden:
(hx,cx)= hidden
else:
hx = autograd.Variable(torch.zeros(batch_size, self.hidden_dim))
cx = autograd.Variable(torch.zeros(batch_size, self.hidden_dim))
if self.gpu:
hx = hx.cuda()
cx = cx.cuda()
id_list = range(seq_len)
if not self.left2right:
id_list = list(reversed(id_list))
input_c_list = init_list_of_objects(seq_len)
for t in id_list:
(hx,cx) = self.rnn(input[t], input_c_list[t], (hx,cx))
hidden_out.append(hx)
memory_out.append(cx)
if skip_input[t]:
matched_num = len(skip_input[t][0])
word_var = autograd.Variable(torch.LongTensor(skip_input[t][0]),volatile = volatile_flag)
if self.gpu:
word_var = word_var.cuda()
word_emb = self.word_emb(word_var)
word_emb = self.word_dropout(word_emb)
ct = self.word_rnn(word_emb, (hx,cx))
assert(ct.size(0)==len(skip_input[t][1]))
for idx in range(matched_num):
length = skip_input[t][1][idx]
if self.left2right:
# if t+length <= seq_len -1:
input_c_list[t+length-1].append(ct[idx,:].unsqueeze(0))
else:
# if t-length >=0:
input_c_list[t-length+1].append(ct[idx,:].unsqueeze(0))
# print len(a)
if not self.left2right:
hidden_out = list(reversed(hidden_out))
memory_out = list(reversed(memory_out))
output_hidden, output_memory = torch.cat(hidden_out, 0), torch.cat(memory_out, 0)
#(batch, seq_len, hidden_dim)
# print output_hidden.size()
return output_hidden.unsqueeze(0), output_memory.unsqueeze(0)
def init_list_of_objects(size):
list_of_objects = list()
for i in range(0,size):
list_of_objects.append( list() )
return list_of_objects
def convert_forward_gaz_to_backward(forward_gaz):
# print forward_gaz
length = len(forward_gaz)
backward_gaz = init_list_of_objects(length)
for idx in range(length):
if forward_gaz[idx]:
assert(len(forward_gaz[idx])==2)
num = len(forward_gaz[idx][0])
for idy in range(num):
the_id = forward_gaz[idx][0][idy]
the_length = forward_gaz[idx][1][idy]
new_pos = idx+the_length -1
if backward_gaz[new_pos]:
backward_gaz[new_pos][0].append(the_id)
backward_gaz[new_pos][1].append(the_length)
else:
backward_gaz[new_pos] = [[the_id],[the_length]]
return backward_gaz
|
[
"[email protected]"
] | |
161947c50d199b8842683b5136d3eeaaf338567d
|
3109e3a7f2f2dccc5a806695f0adbe0fed879112
|
/ecommerce/Loma/migrations/0029_auto_20190205_1945.py
|
335430bc000f2b635c63fc5a7519215234a82de3
|
[] |
no_license
|
Maheshwari2604/ecommercee
|
9ebbf18b4fbf933a0d9641009f7f17ce836de587
|
4411e7e10eccda907711200d2c0d873db3d7f803
|
refs/heads/master
| 2020-04-20T18:03:49.575124 | 2019-02-12T16:02:05 | 2019-02-12T16:02:05 | 169,007,411 | 1 | 1 | null | null | null | null |
UTF-8
|
Python
| false | false | 397 |
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2019-02-05 14:15
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('Loma', '0028_daily_price_list_loma_id'),
]
operations = [
migrations.RenameModel(
old_name='MD_model',
new_name='md',
),
]
|
[
"[email protected]"
] | |
f322e0b9b3bbf0ddf6522dc18c005c0b41c7478a
|
a57130d75ad75520217b92d3bd977394846501f7
|
/test.py
|
b983df123bd16dc281079046f1465292f05469d3
|
[] |
no_license
|
gittygupta/slp
|
15153b9cb580ed8d35d6be4b157ed94ac41d4a4f
|
c43fc9a01ae67dfd28d147dc4ffc2f0a70e60199
|
refs/heads/master
| 2022-12-18T08:51:42.266621 | 2020-09-28T16:46:31 | 2020-09-28T16:46:31 | 293,038,095 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,379 |
py
|
import tensorflow as tf
import numpy as np
from model import *
from attention import *
from data_utils import *
from bert_utils import *
# Global variables
num_decoder_blocks = 6
num_heads = 8
d_model = 256
d_ffn = 256
d_out = 154
# models
bert = Bert(max_sequence_length=80)
decoder = Decoder(num_decoder_blocks, num_heads, d_model, d_ffn, d_out)
# inference-loop
# only for a single input
def inference(model_path, sentence, net_seq_len):
tar = tf.zeros((1, net_seq_len, d_out))
# ckpt
checkpoint_path = model_path
ckpt = tf.train.Checkpoint(decoder=decoder,
optimizer=optimizer)
ckpt_manager = tf.train.CheckpointManager(ckpt, checkpoint_path, max_to_keep=5)
if ckpt_manager.latest_checkpoint:
ckpt.restore(ckpt_manager.latest_checkpoint)
print('Model Loaded!!')
else:
print('Initialised checkpoint')
words, _, seq_lengths = bert(sentence[0])
pad_mask = padding_mask(words.shape[1], seq_lengths)
la_mask = look_ahead_mask(net_seq_len - 1)
for i in range(net_seq_len - 1):
pred = decoder(tar[:, :-1, :], words, la_mask, pad_mask)
print("Counter : ", pred[0][i][-1])
tar[0][i+1] = pred[0][i]
return tar
# simple test
def test(model_path, sentences, path, video, net_sequence_length):
# ckpt
checkpoint_path = model_path
ckpt = tf.train.Checkpoint(decoder=decoder,
optimizer=optimizer)
ckpt_manager = tf.train.CheckpointManager(ckpt, checkpoint_path, max_to_keep=5)
if ckpt_manager.latest_checkpoint:
ckpt.restore(ckpt_manager.latest_checkpoint)
print('Model Loaded!!')
else:
print('Initialised checkpoint')
tar_inp = get_processed_data(path, video, 0, net_sequence_length)[:, :-1, :]
words, _, seq_lengths = bert(sentences[0])
pad_mask = padding_mask(words.shape[1], seq_lengths)
la_mask = look_ahead_mask(tar_inp.shape[1])
pred = decoder(tar_inp, words, la_mask, pad_mask)
return pred
if __name__ == '__main__':
model_path = 'models/path/to/model'
sentence = ['german sentence']
vid_path = 'path/to/videos'
video = 'name of video in dataset'
net_sequence_length = 512
test(model_path, sentence, vid_path, video, net_sequence_length)
|
[
"[email protected]"
] | |
aebc43e38204ef38f0a134e24421cf6b5df8c018
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03003/s239952205.py
|
9e63871a10ccf805a11575b5567761e5e95c710d
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 790 |
py
|
def count_cs(str1, str2, MOD):
"""文字列str1, str2の共通部分列(Common Subsequence, CS)を数え上げる。
添字が異なる場合は異なる部分列として考える。
計算量 O(|str1||str2|)
"""
dp = [[0] * (len(str2) + 1) for _ in range(len(str1) + 1)]
for i in range(len(str1)):
for j in range(len(str2)):
if str1[i] == str2[j]:
dp[i + 1][j + 1] = dp[i + 1][j] + dp[i][j + 1] + 1
else:
dp[i + 1][j + 1] = dp[i + 1][j] + dp[i][j + 1] - dp[i][j]
dp[i + 1][j + 1] %= MOD
return (dp[len(str1)][len(str2)] + 1) % MOD
n, m = map(int, input().split())
s = list(map(int, input().split()))
t = list(map(int, input().split()))
MOD = 10 ** 9 + 7
print(count_cs(s, t, MOD))
|
[
"[email protected]"
] | |
172edb43fd3f057e687281d8ef76d0ff212314ad
|
4be56098894a95da5964622fc4102b69e4530ab6
|
/题库/227.基本计算器II.py
|
26bff70550aefbc302360bf6d887c2c6569af83d
|
[] |
no_license
|
ACENDER/LeetCode
|
7c7c7ecc8d0cc52215272f47ec34638637fae7ac
|
3383b09ab1246651b1d7b56ab426a456f56a4ece
|
refs/heads/master
| 2023-03-13T19:19:07.084141 | 2021-03-15T09:29:21 | 2021-03-15T09:29:21 | 299,332,864 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 84 |
py
|
# !/usr/bin/env python3
# -*- coding: utf-8 -*-
# @File : 227.基本计算器II.py
|
[
"[email protected]"
] | |
3e6fc91d43aac579360e7c6d3e082c7d2104718e
|
bf099f3425b590f6cbb39e8305eac5489e558e26
|
/笔记/re/python-trade/trade.py
|
ef4f03b4ad241931859eff03cbe3c3e8247eaceb
|
[] |
no_license
|
Imtinmin/Note
|
399a08e185bf11e2ca9fbbe4823477e86e86ebe2
|
8f99a4c180b921baf46a838997bcce64c688fd70
|
refs/heads/master
| 2023-01-27T23:25:43.763941 | 2020-01-01T15:24:08 | 2020-01-01T15:24:08 | 188,774,377 | 1 | 1 | null | 2023-01-04T13:55:52 | 2019-05-27T05:07:47 |
PHP
|
UTF-8
|
Python
| false | false | 651 |
py
|
#!/usr/bin/env python
# encoding: utf-8
# 如果觉得不错,可以推荐给你的朋友!http://tool.lu/pyc
import base64
def encode(message):
s = ''
for i in message:
x = ord(i) ^ 32
x = x + 16
s += chr(x)
return base64.b64encode(s)
def decode(message):
a = base64.b64decode(message)
s = ''
for i in a:
s += chr((ord(i) - 16) ^ 32)
return s
'''correct = 'XlNkVmtUI1MgXWBZXCFeKY+AaXNt'
flag = ''
print 'Input flag:'
flag = raw_input()
if encode(flag) == correct:
print 'correct'
else:
print 'wrong'
'''
correct = 'XlNkVmtUI1MgXWBZXCFeKY+AaXNt'
print decode(correct)
|
[
"[email protected]"
] | |
baeb2f37428917f977c80645386036cb215998fb
|
b6f153a037289e1363bb09b069888bfadbcd2b44
|
/iprPy/library/Library.py
|
8c25025222c15a221d783a468b1dc86796053b5e
|
[] |
no_license
|
ad1v7/iprPy
|
da449c6f608cf6161e2ff757c71a6957c0b1cac8
|
f114f84de12ce9306629e3eb14c6e3e1f34566fb
|
refs/heads/master
| 2022-11-23T03:55:27.937077 | 2020-07-31T21:17:45 | 2020-07-31T21:17:45 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 10,903 |
py
|
# coding: utf-8
# Standard Python libraries
from pathlib import Path
import uuid
# https://github.com/usnistgov/DataModelDict
from DataModelDict import DataModelDict as DM
# https://github.com/usnistgov/atomman
import atomman as am
import requests
from .. import load_record
from ..tools import aslist
class Library(am.library.Database):
"""
Class for interacting with potential records hosted from potentials.nist.gov
"""
def download_refs(self, style=None, status='active', format='json', indent=4, verbose=False):
"""
Downloads reference records from potentials.nist.gov to the library.
Note: this will overwrite any local copies of records with matching
names. If you made changes to library files, be sure to save them
with a different name.
Parameters
----------
style : str or list, optional
The reference style(s) to download. If not given, all reference
style will be downloaded.
status : str, list or None, optional
Only the potential_LAMMPS records with the given status(es) will
be downloaded. Allowed values are 'active' (default),
'superseded', and 'retracted'. If set to None, all hosted
potential_LAMMPS will be downloaded.
"""
assert format in ['json', 'xml']
# use all_ref_styles if none are specified
all_ref_styles = ['crystal_prototype', 'dislocation', 'free_surface',
'point_defect', 'stacking_fault', 'potential_LAMMPS']
if style is None:
style = all_ref_styles
style = aslist(style)
for s in style:
if s == 'potential_LAMMPS':
self.download_lammps_potentials(format=format, indent=indent,
status=status, verbose=verbose)
else:
self.download_records(s, format=format, indent=indent, verbose=verbose)
def download_oqmd_crystals(self, elements, localpath=None, format='json', indent=4):
"""
Accesses OQMD and downloads crystal structures containing the given
elements. The structures are saved to the iprPy library as
reference_crystal records.
Parameters
----------
elements : list
A list of element symbols.
"""
assert format in ['json', 'xml']
# Load record style and set universal values
style = 'reference_crystal'
record = load_record(style)
input_dict = {}
input_dict['sourcename'] = "Open Quantum Materials Database"
input_dict['sourcelink'] = "http://oqmd.org/"
input_dict['length_unit'] = "angstrom"
# Set library subdirectory
if localpath is None:
localpath = self.localpath
style_directory = Path(localpath, style)
if not style_directory.is_dir():
style_directory.mkdir(parents=True)
# Sort elements
elements.sort()
# Build list of downloaded entries
have = []
for fname in style_directory.glob('oqmd-*.*'):
have.append(fname.stem)
# Build list of missing OQMD entries
elements_string = '-'.join(elements)
composition_r = requests.get('http://oqmd.org/materials/composition/' + elements_string)
composition_html = composition_r.text
missing = []
count = 0
while True:
count += 1
try:
start = composition_html.index('href="/materials/entry/') + len('href="/materials/entry/')
except:
break
else:
end = start + composition_html[start:].index('"')
entry_number = composition_html[start:end]
composition_html = composition_html[end+2:]
entry_id = f'oqmd-{entry_number}'
if entry_id not in have and entry_id not in missing:
missing.append(entry_id)
if count > 100:
raise ValueError('Loop likely infinite')
# Download missing entries
for entry_id in missing:
entry_number = entry_id.replace('oqmd-', '')
entry_r = requests.get('http://oqmd.org/materials/entry/' + entry_number)
entry_html = entry_r.text
start = entry_html.index('href="/materials/structure/') + len('href="/materials/structure/')
end = start + entry_html[start:].index('"')
structure_number = entry_html[start:end]
try:
structure_url = 'http://oqmd.org/materials/export/conventional/poscar/' + structure_number
structure_r = requests.get(structure_url)
structure_r.raise_for_status()
except:
try:
structure_url = 'http://oqmd.org/materials/export/primitive/poscar/' + structure_number
structure_r = requests.get(structure_url)
structure_r.raise_for_status()
except:
continue
# Build record content
input_dict['id'] = entry_id
input_dict['ucell'] = am.load('poscar', structure_r.text).normalize()
record.buildcontent(input_dict)
# Save
with open(Path(style_directory, f'{entry_id}.{format}'), 'w') as f:
if format == 'json':
record.content.json(fp=f, indent=indent)
else:
record.content.xml(fp=f, indent=indent)
print('Added', entry_id)
def download_mp_crystals(self, elements, api_key=None, localpath=None, format='json', indent=4):
"""
Accesses Materials Project and downloads crystal structures containing the given
elements. The structures are saved to the iprPy library as
reference_crystal records.
Parameters
----------
elements : list
A list of element symbols.
api_key : str, optional
The user's Materials Project API key. If not given, will use "MAPI_KEY"
environment variable
"""
assert format in ['json', 'xml']
# Function-specific imports
import pymatgen as pmg
from pymatgen.ext.matproj import MPRester
# Define subset generator
def subsets(fullset):
"""Yields element combination subsets"""
for i, item in enumerate(fullset):
yield [item]
if len(fullset) > 1:
for subset in subsets(fullset[i+1:]):
yield [item] + subset
# Load record style and set universal values
style = 'reference_crystal'
record = load_record(style)
input_dict = {}
input_dict['sourcename'] = "Materials Project"
input_dict['sourcelink'] = "https://materialsproject.org/"
input_dict['length_unit'] = "angstrom"
# Set library subdirectory
if localpath is None:
localpath = self.localpath
style_directory = Path(localpath, style)
if not style_directory.is_dir():
style_directory.mkdir(parents=True)
# Sort elements
elements.sort()
# Build list of downloaded entries
have = []
for fname in style_directory.glob('mp-*.*'):
have.append(fname.stem)
# Open connection to Materials Project
with MPRester(api_key) as m:
# Loop over subsets of elements
for subelements in subsets(elements):
# Query MP for all entries corresponding to the elements
entries = m.query({"elements": subelements}, ["material_id"])
# Add entries to the list if not there
missing = []
for entry in entries:
if entry['material_id'] not in have and entry['material_id'] not in missing:
missing.append(entry['material_id'])
# Download missing entries
try:
entries = m.query({"material_id": {"$in": missing}}, ['material_id', 'cif'])
except:
pass
else:
# Convert cif to model and save
for entry in entries:
entry_id = entry['material_id']
struct = pmg.Structure.from_str(entry['cif'], fmt='cif')
struct = pmg.symmetry.analyzer.SpacegroupAnalyzer(struct).get_conventional_standard_structure()
# Build record content
input_dict['id'] = entry_id
input_dict['ucell'] = am.load('pymatgen_Structure', struct).normalize()
record.buildcontent(input_dict)
# Save
with open(Path(style_directory, f'{entry_id}.{format}'), 'w') as f:
if format == 'json':
record.content.json(fp=f, indent=indent)
else:
record.content.xml(fp=f, indent=indent)
print('Added', entry_id)
def get_ref(self, style, name, verbose=False, asrecord=True):
"""
Gets a reference file from the iprPy library or by downloading from
potentials.nist.gov if a local copy is not found.
Parameters
----------
style : str
The reference record's style.
name : str
The name of the record.
verbose: bool, optional
If True, informative print statements will be used.
asrecord : bool, optional
If True (default) then the content will be returned as an iprPy
Record if a subclass has been defined for the style.
Returns
-------
iprPy.Record
The content as an iprPy Record object. Returned if asrecord is True
and iprPy has a subclass for the record's style.
DataModelDict.DataModelDict
The content as a DataModelDict. Returned if asrecord is False or
iprPy does not have a subclass for the record's style.
"""
content = self.get_record(template=style, title=name, verbose=verbose)
# Load as iprPy record if style exists
try:
assert asrecord
return load_record(style, name, content)
except:
return content
|
[
"[email protected]"
] | |
8987f8644e2e99713f75adc99ac002c8bbce3e86
|
22bcb68759d516eea70d18116cd434fcd0a9d842
|
/scrap/infibeam_mobiles_scrap.py
|
f6a2586c2a736359955ad15e0a9a1b85a49fdc82
|
[] |
no_license
|
lovesh/abhiabhi-web-scrapper
|
1f5da38c873fea74870d59f61c3c4f52b50f1886
|
b66fcadc56377276f625530bdf8e739a01cbe16b
|
refs/heads/master
| 2021-01-01T17:16:51.577914 | 2014-10-18T15:56:42 | 2014-10-18T15:56:42 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,656 |
py
|
import downloader
import dom
import urllib
import re
import datetime
import math
import simplejson as json
import pymongo
from collections import defaultdict
import util
siteurl='http://www.infibeam.com'
referer='http://www.infibeam.com/Mobiles/search'
ajax_url='http://www.infibeam.com/Mobiles/Search_ajax.action?store=Mobiles&page='
debug=True
brand_pattern=re.compile('\w+',re.I)
shipping_pattern=re.compile('(\d+)-(\d+)',re.I)
logfile=open('infibeam_mobile_log.txt','w')
dl=downloader.Downloader()
dl.addHeaders({'Origin':siteurl,'Referer':referer})
def getMobileUrlsOfPage(html):
mobile_url_path='//ul[@class="srch_result portrait"]/li/a'
page_dom=dom.DOM(string=html)
links=set(siteurl+l[1] for l in page_dom.getLinksWithXpath(mobile_url_path))
return links
def getAllMobileUrls():
count_path='//div[@id="resultsPane"]/div/div/b[2]'
doc=dom.DOM(url=referer)
count=int(doc.getNodesWithXpath(count_path)[0].text)
num_pages=int(math.ceil(count/20.0))
page_urls=[ajax_url+str(n) for n in xrange(1,num_pages+1)]
dl.putUrls(page_urls)
pages=dl.download()
print len(pages)
mobile_urls=[]
for p in pages:
status=pages[p][0]
html=pages[p][1]
if status > 199 and status < 400:
mobile_urls.extend(getMobileUrlsOfPage(html))
print len(mobile_urls)
return mobile_urls
def getMobileFromPage(url=None,string=None):
mobile={}
if url:
doc=dom.DOM(url=url)
else:
doc=dom.DOM(string=string)
addBox=doc.getNodesWithXpath('//input[@class="buyimg "]')
if addBox: #availability check
mobile['availability']=1
details_path='//div[@id="ib_details"]'
details=doc.getNodesWithXpath(details_path)
if details:
details=details[0].text_content()
shipping=shipping_pattern.search(details)
if shipping:
mobile['shipping']=[shipping.group(1),shipping.group(2)]
else:
mobile['availability']=0
name_path='//div[@id="ib_details"]/h1'
mobile['name']=doc.getNodesWithXpath(name_path)[0].text_content().strip()
brand=brand_pattern.search(mobile['name']).group().lower()
if re.match('sony ericsson',mobile['name'],re.I):
mobile['brand']='sony ericsson'
else:
mobile['brand']=brand
color_path='//a[@class="colorlink"]'
colors=doc.getNodesWithXpath(color_path)
mobile['colors']=[color.get('text') for color in colors]
price_path='//span[@class="infiPrice amount price"]'
price=doc.getNodesWithXpath(price_path)
if price:
mobile['price']=int(price[0].text.replace(',',''))
img_path="//div[@id='ib_img_viewer']/img"
mobile['img_url']={'0':doc.getImgUrlWithXpath(img_path)}
desc_path='//div[@class="reviews-box-cont-inner"]'
desc=doc.getNodesWithXpath(desc_path)
if desc:
mobile['description']=desc[0].text_content.strip()
mobile['last_modified_datetime']=datetime.datetime.now()
product_history={}
if 'price' in mobile:
product_history['price']=mobile['price']
if 'shipping' in mobile:
product_history['shipping']=mobile['shipping']
product_history['availability']=mobile['availability']
product_history['datetime']=mobile['last_modified_datetime']
mobile['product_history']=[product_history,]
mobile['site']='infibeam'
offer_path='//div[@class="offer"]'
offer=doc.getNodesWithXpath(offer_path)
if offer:
mobile['offer']=offer[0].text_content().replace('\r\n ','')
specs_path='//div[@id="specs"]/div'
specs=doc.getNodesWithXpath(specs_path)
specification={}
for spec in specs:
text=spec.xpath('a')[0].text.strip()
if text=='Deliverable Locations' or text=='Disclaimer':
continue
trs=spec.xpath('.//tr')
for tr in trs:
tds=tr.xpath('.//td')
if len(tds)<2:
continue
key=tds[0].text_content().strip(':\n\t ').replace('.','').lower()
value=tds[1].text_content().strip(':\n\t ').lower()
specification[key]=value
if 'android os' in specification and 'os' not in specification:
if specification['android os'] in ['available','yes']:
if 'os version' in specification:
specification['os']='android'+' '+specification['os version']
del(specification['os version'])
else:
specification['os']='android'
del(specification['android os'])
if mobile['brand']=='blackberry' and 'os version' in specification:
util.replaceKey(specification,'os version','os')
mobile['specification']=specification
return mobile
def scrapAllMobiles():
urls=getAllMobileUrls()
mobiles=[]
dl.putUrls(urls)
result=dl.download()
for r in result:
print r
status=result[r][0]
html=result[r][1]
if status > 199 and status < 400:
mobile=getMobileFromPage(string=html)
mobile['url']=r
mobiles.append(mobile)
return mobiles
def insertIntoDB(log=True):
con=pymongo.Connection('localhost',27017)
db=con['abhiabhi']
mobile_coll=db['scraped_mobiles']
mobile_coll.create_index('url',unique=True)
inserted_count=0
updated_count=0
inserted_urls=[]
updated_urls=[]
mobiles=scrapAllMobiles()
for mobile in mobiles:
try:
mobile_coll.insert(mobile,safe=True)
inserted_count+=1
inserted_urls.append(mobile['url'])
except pymongo.errors.DuplicateKeyError:
upd={'last_modified_datetime':datetime.datetime.now()}
if 'availability' in mobile:
upd['availability']=mobile['availability']
if 'price' in mobile:
upd['price']=mobile['price']
if 'shipping' in mobile:
upd['shipping']=mobile['shipping']
if 'offer' in mobile:
upd['offer']=mobile['offer']
else:
upd['offer']=''
mobile_coll.update({'url':mobile['url']},{'$push':{'product_history':mobile['product_history'][0]},'$set':upd})
updated_count+=1
updated_urls.append(mobile['url'])
if log:
scrap_log=db['scrap_log']
log={'siteurl':siteurl,'datetime':datetime.datetime.now(),'product':'mobile','products_updated_count':updated_count,'products_inserted_count':inserted_count,'products_updated_urls':updated_urls,'products_inserted_urls':inserted_urls}
scrap_log.insert(log)
print "%d inserted and %d updated"%(inserted_count,updated_count)
if __name__=='__main__':
insertIntoDB()
|
[
"[email protected]"
] | |
4fdf0eea5655ddf62a21084ec572485b7d410a69
|
5f4e13201d4c5b7edc8dbbda289380682a187bec
|
/nlpfr/nltk/test/doctest_nose_plugin.py
|
8b0dfecae97f59fa3dc62cfbe8b1662d9c2cd18f
|
[] |
no_license
|
intellivoid/CoffeeHousePy
|
92f4fb344de757837c3d3da05cb5513e90408039
|
57c453625239f28da88b88ddd0ae5f1ecdd4de3c
|
refs/heads/master
| 2023-02-23T14:32:01.606630 | 2021-01-28T02:57:10 | 2021-01-28T02:57:10 | 324,419,067 | 0 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 6,250 |
py
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import re
import sys
import os
import codecs
import doctest
from nose.util import tolist, anyp
from nose.plugins.base import Plugin
from nose.suite import ContextList
from nose.plugins.doctests import Doctest, log, DocFileCase
ALLOW_UNICODE = doctest.register_optionflag("ALLOW_UNICODE")
class _UnicodeOutputChecker(doctest.OutputChecker):
_literal_re = re.compile(r"(\W|^)[uU]([rR]?[\'\"])", re.UNICODE)
def _remove_u_prefixes(self, txt):
return re.sub(self._literal_re, r"\1\2", txt)
def check_output(self, want, got, optionflags):
res = doctest.OutputChecker.check_output(self, want, got, optionflags)
if res:
return True
if not (optionflags & ALLOW_UNICODE):
return False
# ALLOW_UNICODE is active and want != got
cleaned_want = self._remove_u_prefixes(want)
cleaned_got = self._remove_u_prefixes(got)
res = doctest.OutputChecker.check_output(
self, cleaned_want, cleaned_got, optionflags
)
return res
_checker = _UnicodeOutputChecker()
class DoctestPluginHelper(object):
"""
This mixin adds print_function future import to all test cases.
It also adds support for:
'#doctest +ALLOW_UNICODE' option that
makes DocTestCase think u'foo' == 'foo'.
'#doctest doctestencoding=utf-8' option that
changes the encoding of doctest files
"""
OPTION_BY_NAME = ("doctestencoding",)
def loadTestsFromFileUnicode(self, filename):
if self.extension and anyp(filename.endswith, self.extension):
name = os.path.basename(filename)
dh = codecs.open(filename, "r", self.options.get("doctestencoding"))
try:
doc = dh.read()
finally:
dh.close()
fixture_context = None
globs = {"__file__": filename}
if self.fixtures:
base, ext = os.path.splitext(name)
dirname = os.path.dirname(filename)
sys.path.append(dirname)
fixt_mod = base + self.fixtures
try:
fixture_context = __import__(fixt_mod, globals(), locals(), ["nop"])
except ImportError as e:
log.debug("Could not import %s: %s (%s)", fixt_mod, e, sys.path)
log.debug("Fixture module %s resolved to %s", fixt_mod, fixture_context)
if hasattr(fixture_context, "globs"):
globs = fixture_context.globs(globs)
parser = doctest.DocTestParser()
test = parser.get_doctest(
doc, globs=globs, name=name, filename=filename, lineno=0
)
if test.examples:
case = DocFileCase(
test,
optionflags=self.optionflags,
setUp=getattr(fixture_context, "setup_test", None),
tearDown=getattr(fixture_context, "teardown_test", None),
result_var=self.doctest_result_var,
)
if fixture_context:
yield ContextList((case,), context=fixture_context)
else:
yield case
else:
yield False # no tests to load
def loadTestsFromFile(self, filename):
cases = self.loadTestsFromFileUnicode(filename)
for case in cases:
if isinstance(case, ContextList):
yield ContextList([self._patchTestCase(c) for c in case], case.context)
else:
yield self._patchTestCase(case)
def loadTestsFromModule(self, module):
"""Load doctests from the module.
"""
for suite in super(DoctestPluginHelper, self).loadTestsFromModule(module):
cases = [self._patchTestCase(case) for case in suite._get_tests()]
yield self.suiteClass(cases, context=module, can_split=False)
def _patchTestCase(self, case):
if case:
case._dt_test.globs["print_function"] = print_function
case._dt_checker = _checker
return case
def configure(self, options, config):
# it is overriden in order to fix doctest options discovery
Plugin.configure(self, options, config)
self.doctest_result_var = options.doctest_result_var
self.doctest_tests = options.doctest_tests
self.extension = tolist(options.doctestExtension)
self.fixtures = options.doctestFixtures
self.finder = doctest.DocTestFinder()
# super(DoctestPluginHelper, self).configure(options, config)
self.optionflags = 0
self.options = {}
if options.doctestOptions:
stroptions = ",".join(options.doctestOptions).split(",")
for stroption in stroptions:
try:
if stroption.startswith("+"):
self.optionflags |= doctest.OPTIONFLAGS_BY_NAME[stroption[1:]]
continue
elif stroption.startswith("-"):
self.optionflags &= ~doctest.OPTIONFLAGS_BY_NAME[stroption[1:]]
continue
try:
key, value = stroption.split("=")
except ValueError:
pass
else:
if not key in self.OPTION_BY_NAME:
raise ValueError()
self.options[key] = value
continue
except (AttributeError, ValueError, KeyError):
raise ValueError("Unknown doctest option {}".format(stroption))
else:
raise ValueError(
"Doctest option is not a flag or a key/value pair: {} ".format(
stroption
)
)
class DoctestFix(DoctestPluginHelper, Doctest):
pass
|
[
"[email protected]"
] | |
db70a141cfb4f03bf7d9c154bdb978495ede765f
|
e5d5fa28999bcc6c642bb42dda93afd38e272b81
|
/LeetCode/1020. Number of Enclaves/solve1.py
|
65d26252539eea884af9a4350e4badff1630d7a4
|
[] |
no_license
|
chiahsun/problem_solving
|
cd3105969983d16d3d5d416d4a0d5797d4b58e91
|
559fafa92dd5516058bdcea82a438eadf5aa1ede
|
refs/heads/master
| 2023-02-05T06:11:27.536617 | 2023-01-26T10:51:23 | 2023-01-26T10:51:23 | 30,732,382 | 3 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 874 |
py
|
class Solution:
def numEnclaves(self, grid: List[List[int]]) -> int:
M, N = len(grid), len(grid[0])
def fill(grid, x, y):
grid[x][y] = 0
for dx, dy in [[1, 0], [-1, 0], [0, 1], [0, -1]]:
nx, ny = x+dx, y+dy
if nx >= 0 and nx < M and ny >= 0 and ny < N and grid[nx][ny] == 1:
fill(grid, nx, ny)
for i in range(M):
if grid[i][0] == 1:
fill(grid, i, 0)
if grid[i][N-1] == 1:
fill(grid, i, N-1)
for i in range(N):
if grid[0][i] == 1:
fill(grid, 0, i)
if grid[M-1][i] == 1:
fill(grid, M-1, i)
res = 0
for i in range(M):
for k in range(N):
if grid[i][k] == 1:
res += 1
return res
|
[
"[email protected]"
] | |
f6752c0fb6fb382f2304473c74d1b6030f7c9ae9
|
092dd56a1bf9357466c05d0f5aedf240cec1a27b
|
/tests/pytests/topology/TestMeshGenerator.py
|
ea7d044b789593815e26b703c24303b498c72894
|
[
"MIT"
] |
permissive
|
rwalkerlewis/pylith
|
cef02d5543e99a3e778a1c530967e6b5f1d5dcba
|
c5f872c6afff004a06311d36ac078133a30abd99
|
refs/heads/main
| 2023-08-24T18:27:30.877550 | 2023-06-21T22:03:01 | 2023-06-21T22:03:01 | 154,047,591 | 0 | 0 |
MIT
| 2018-10-21T20:05:59 | 2018-10-21T20:05:59 | null |
UTF-8
|
Python
| false | false | 1,078 |
py
|
#!/usr/bin/env nemesis
#
# ======================================================================
#
# Brad T. Aagaard, U.S. Geological Survey
# Charles A. Williams, GNS Science
# Matthew G. Knepley, University at Buffalo
#
# This code was developed as part of the Computational Infrastructure
# for Geodynamics (http://geodynamics.org).
#
# Copyright (c) 2010-2022 University of California, Davis
#
# See LICENSE.md for license information.
#
# ======================================================================
#
# @file tests/pytests/topology/TestMeshGenerator.py
#
# @brief Unit testing of Python MeshGenerator object.
import unittest
from pylith.testing.UnitTestApp import TestAbstractComponent
from pylith.topology.MeshGenerator import MeshGenerator
class TestMeshGenerator(TestAbstractComponent):
"""Unit testing of MeshGenerator object.
"""
_class = MeshGenerator
if __name__ == "__main__":
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestMeshGenerator))
unittest.TextTestRunner(verbosity=2).run(suite)
# End of file
|
[
"[email protected]"
] | |
2e673b09d7834711a6bfc7f7be612714cb09bc5e
|
692ab314e970c5c814c2c5e417c6d8186b70b563
|
/demoscene/migrations/0101_create_production_blurb.py
|
0af1e86620e43459d40255fd00276fdb8e66e8cc
|
[] |
no_license
|
alexanderk23/demozoo
|
127e09f389b099ac5a9f6bba65faff66c2380067
|
a6f57d76958120280f93c7f94f51771fe26c6f09
|
refs/heads/master
| 2021-01-12T20:32:01.364148 | 2015-06-06T00:24:06 | 2015-06-06T00:24:06 | 37,033,533 | 0 | 1 | null | 2015-06-07T21:47:58 | 2015-06-07T21:47:58 | null |
UTF-8
|
Python
| false | false | 26,815 |
py
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'ProductionBlurb'
db.create_table('demoscene_productionblurb', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('production', self.gf('django.db.models.fields.related.ForeignKey')(related_name='blurbs', to=orm['demoscene.Production'])),
('body', self.gf('django.db.models.fields.TextField')()),
))
db.send_create_signal('demoscene', ['ProductionBlurb'])
def backwards(self, orm):
# Deleting model 'ProductionBlurb'
db.delete_table('demoscene_productionblurb')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'demoscene.accountprofile': {
'Meta': {'ordering': "['user__username']", 'object_name': 'AccountProfile'},
'demozoo0_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'edit_mode_active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sticky_edit_mode': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'demoscene.competition': {
'Meta': {'ordering': "('party__name', 'name')", 'object_name': 'Competition'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'party': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'competitions'", 'to': "orm['demoscene.Party']"}),
'platform': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['demoscene.Platform']", 'null': 'True', 'blank': 'True'}),
'production_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['demoscene.ProductionType']", 'null': 'True', 'blank': 'True'}),
'shown_date_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'shown_date_precision': ('django.db.models.fields.CharField', [], {'max_length': '1', 'blank': 'True'})
},
'demoscene.competitionplacing': {
'Meta': {'object_name': 'CompetitionPlacing'},
'competition': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'placings'", 'to': "orm['demoscene.Competition']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'position': ('django.db.models.fields.IntegerField', [], {}),
'production': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'competition_placings'", 'to': "orm['demoscene.Production']"}),
'ranking': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'score': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'})
},
'demoscene.credit': {
'Meta': {'ordering': "['production__title']", 'object_name': 'Credit'},
'category': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nick': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'credits'", 'to': "orm['demoscene.Nick']"}),
'production': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'credits'", 'to': "orm['demoscene.Production']"}),
'role': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'demoscene.edit': {
'Meta': {'object_name': 'Edit'},
'action_type': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'description': ('django.db.models.fields.TextField', [], {}),
'focus2_content_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'edits_as_focus2'", 'null': 'True', 'to': "orm['contenttypes.ContentType']"}),
'focus2_object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'focus_content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'edits'", 'to': "orm['contenttypes.ContentType']"}),
'focus_object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'demoscene.membership': {
'Meta': {'object_name': 'Membership'},
'data_source': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'member_memberships'", 'to': "orm['demoscene.Releaser']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_current': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'member': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'group_memberships'", 'to': "orm['demoscene.Releaser']"})
},
'demoscene.nick': {
'Meta': {'ordering': "['name']", 'unique_together': "(('releaser', 'name'),)", 'object_name': 'Nick'},
'abbreviation': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'differentiator': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'releaser': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'nicks'", 'to': "orm['demoscene.Releaser']"})
},
'demoscene.nickvariant': {
'Meta': {'ordering': "['name']", 'object_name': 'NickVariant'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'nick': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'variants'", 'to': "orm['demoscene.Nick']"})
},
'demoscene.party': {
'Meta': {'ordering': "('name',)", 'object_name': 'Party'},
'country_code': ('django.db.models.fields.CharField', [], {'max_length': '5', 'blank': 'True'}),
'end_date_date': ('django.db.models.fields.DateField', [], {}),
'end_date_precision': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'geonames_id': ('django.db.models.fields.BigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'invitations': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'invitation_parties'", 'blank': 'True', 'to': "orm['demoscene.Production']"}),
'is_online': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'latitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'longitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'party_series': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'parties'", 'to': "orm['demoscene.PartySeries']"}),
'sceneorg_compofolders_done': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'start_date_date': ('django.db.models.fields.DateField', [], {}),
'start_date_precision': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'tagline': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'woe_id': ('django.db.models.fields.BigIntegerField', [], {'null': 'True', 'blank': 'True'})
},
'demoscene.partyexternallink': {
'Meta': {'ordering': "['link_class']", 'unique_together': "(('link_class', 'parameter', 'party'),)", 'object_name': 'PartyExternalLink'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link_class': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'parameter': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'party': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'external_links'", 'to': "orm['demoscene.Party']"})
},
'demoscene.partyseries': {
'Meta': {'ordering': "('name',)", 'object_name': 'PartySeries'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'pouet_party_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'twitter_username': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'demoscene.partyseriesdemozoo0reference': {
'Meta': {'object_name': 'PartySeriesDemozoo0Reference'},
'demozoo0_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'party_series': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'demozoo0_ids'", 'to': "orm['demoscene.PartySeries']"})
},
'demoscene.platform': {
'Meta': {'ordering': "['name']", 'object_name': 'Platform'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'intro_text': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'photo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'photo_height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'photo_width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'thumbnail': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'thumbnail_height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'thumbnail_width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'demoscene.production': {
'Meta': {'ordering': "['title']", 'object_name': 'Production'},
'author_affiliation_nicks': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'member_productions'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['demoscene.Nick']"}),
'author_nicks': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'productions'", 'blank': 'True', 'to': "orm['demoscene.Nick']"}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'data_source': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'default_screenshot': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['demoscene.Screenshot']"}),
'demozoo0_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'has_bonafide_edits': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'platforms': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'productions'", 'blank': 'True', 'to': "orm['demoscene.Platform']"}),
'release_date_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'release_date_precision': ('django.db.models.fields.CharField', [], {'max_length': '1', 'blank': 'True'}),
'scene_org_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'supertype': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'types': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'productions'", 'symmetrical': 'False', 'to': "orm['demoscene.ProductionType']"}),
'unparsed_byline': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {})
},
'demoscene.productionblurb': {
'Meta': {'object_name': 'ProductionBlurb'},
'body': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'production': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'blurbs'", 'to': "orm['demoscene.Production']"})
},
'demoscene.productiondemozoo0platform': {
'Meta': {'object_name': 'ProductionDemozoo0Platform'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'production': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'demozoo0_platforms'", 'to': "orm['demoscene.Production']"})
},
'demoscene.productionlink': {
'Meta': {'ordering': "['link_class']", 'unique_together': "(('link_class', 'parameter', 'production', 'is_download_link'),)", 'object_name': 'ProductionLink'},
'demozoo0_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'file_for_screenshot': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_download_link': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_unresolved_for_screenshotting': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'link_class': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'parameter': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'production': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'links'", 'to': "orm['demoscene.Production']"})
},
'demoscene.productiontype': {
'Meta': {'object_name': 'ProductionType'},
'depth': ('django.db.models.fields.PositiveIntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'internal_name': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'numchild': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'position': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'demoscene.releaser': {
'Meta': {'ordering': "['name']", 'object_name': 'Releaser'},
'country_code': ('django.db.models.fields.CharField', [], {'max_length': '5', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'data_source': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'demozoo0_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'geonames_id': ('django.db.models.fields.BigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_group': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'latitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'longitude': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'real_name_note': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'show_first_name': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'show_surname': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'surname': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {}),
'woe_id': ('django.db.models.fields.BigIntegerField', [], {'null': 'True', 'blank': 'True'})
},
'demoscene.releaserexternallink': {
'Meta': {'ordering': "['link_class']", 'unique_together': "(('link_class', 'parameter', 'releaser'),)", 'object_name': 'ReleaserExternalLink'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link_class': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'parameter': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'releaser': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'external_links'", 'to': "orm['demoscene.Releaser']"})
},
'demoscene.resultsfile': {
'Meta': {'object_name': 'ResultsFile'},
'encoding': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'file': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'blank': 'True'}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'filesize': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'party': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'results_files'", 'to': "orm['demoscene.Party']"}),
'sha1': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
'demoscene.screenshot': {
'Meta': {'object_name': 'Screenshot'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'original_height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'original_url': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'original_width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'production': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'screenshots'", 'to': "orm['demoscene.Production']"}),
'source_download_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'standard_height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'standard_url': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'standard_width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'thumbnail_height': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'thumbnail_url': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'thumbnail_width': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'demoscene.soundtracklink': {
'Meta': {'ordering': "['position']", 'object_name': 'SoundtrackLink'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'position': ('django.db.models.fields.IntegerField', [], {}),
'production': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'soundtrack_links'", 'to': "orm['demoscene.Production']"}),
'soundtrack': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'appearances_as_soundtrack'", 'to': "orm['demoscene.Production']"})
},
'taggit.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_tagged_items'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_items'", 'to': "orm['taggit.Tag']"})
}
}
complete_apps = ['demoscene']
|
[
"[email protected]"
] | |
934a0a3c85ee77b48326bd4b5ad873017221519d
|
43c831fa0534fac542430f7aa5d8197a0aa138a3
|
/lib/datasets/env_voc.py
|
faca99985163d3cc1d4a4e46cf9785db34d7a850
|
[
"MIT"
] |
permissive
|
MachineLP/tf-faster-rcnn
|
479d82697ba81c38688d729483865279c27d0302
|
9adb712413a65f2896f301b0161db9a0218b0f06
|
refs/heads/master
| 2021-05-15T15:59:44.624769 | 2018-02-05T03:35:38 | 2018-02-05T03:35:38 | 107,363,505 | 0 | 0 | null | 2017-10-18T05:39:47 | 2017-10-18T05:39:47 | null |
UTF-8
|
Python
| false | false | 10,990 |
py
|
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick and Xinlei Chen
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from datasets.imdb import imdb
import datasets.ds_utils as ds_utils
import xml.etree.ElementTree as ET
import numpy as np
import scipy.sparse
import scipy.io as sio
import utils.cython_bbox
import pickle
import subprocess
import uuid
from .voc_eval import voc_eval
from model.config import cfg
class env_voc(imdb):
def __init__(self, image_set, year, use_diff=False):
name = 'voc_' + year + '_' + image_set
if use_diff:
name += '_diff'
imdb.__init__(self, name)
self._year = year
self._image_set = image_set
self._devkit_path = self._get_default_path()
self._data_path = os.path.join(self._devkit_path, 'VOC' + self._year)
self._classes = ('__background__', # always index 0
'mattress', 'table', 'chair', 'people', 'else')
self._class_to_ind = dict(list(zip(self.classes, list(range(self.num_classes)))))
self._image_ext = '.jpg'
self._image_index = self._load_image_set_index()
# Default to roidb handler
self._roidb_handler = self.gt_roidb
self._salt = str(uuid.uuid4())
self._comp_id = 'comp4'
# PASCAL specific config options
self.config = {'cleanup': True,
'use_salt': True,
'use_diff': use_diff,
'matlab_eval': False,
'rpn_file': None}
assert os.path.exists(self._devkit_path), \
'VOCdevkit path does not exist: {}'.format(self._devkit_path)
assert os.path.exists(self._data_path), \
'Path does not exist: {}'.format(self._data_path)
def image_path_at(self, i):
"""
Return the absolute path to image i in the image sequence.
"""
return self.image_path_from_index(self._image_index[i])
def image_path_from_index(self, index):
"""
Construct an image path from the image's "index" identifier.
"""
image_path = os.path.join(self._data_path, 'JPEGImages',
index + self._image_ext)
assert os.path.exists(image_path), \
'Path does not exist: {}'.format(image_path)
return image_path
def _load_image_set_index(self):
"""
Load the indexes listed in this dataset's image set file.
"""
# Example path to image set file:
# self._devkit_path + /VOCdevkit2007/VOC2007/ImageSets/Main/val.txt
image_set_file = os.path.join(self._data_path, 'ImageSets', 'Main',
self._image_set + '.txt')
assert os.path.exists(image_set_file), \
'Path does not exist: {}'.format(image_set_file)
with open(image_set_file) as f:
image_index = [x.strip() for x in f.readlines()]
return image_index
def _get_default_path(self):
"""
Return the default path where PASCAL VOC is expected to be installed.
"""
return os.path.join(cfg.DATA_DIR, 'VOCdevkit' + self._year)
def gt_roidb(self):
"""
Return the database of ground-truth regions of interest.
This function loads/saves from/to a cache file to speed up future calls.
"""
cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
try:
roidb = pickle.load(fid)
except:
roidb = pickle.load(fid, encoding='bytes')
print('{} gt roidb loaded from {}'.format(self.name, cache_file))
return roidb
gt_roidb = [self._load_pascal_annotation(index)
for index in self.image_index]
with open(cache_file, 'wb') as fid:
pickle.dump(gt_roidb, fid, pickle.HIGHEST_PROTOCOL)
print('wrote gt roidb to {}'.format(cache_file))
return gt_roidb
def rpn_roidb(self):
if int(self._year) == 2007 or self._image_set != 'test':
gt_roidb = self.gt_roidb()
rpn_roidb = self._load_rpn_roidb(gt_roidb)
roidb = imdb.merge_roidbs(gt_roidb, rpn_roidb)
else:
roidb = self._load_rpn_roidb(None)
return roidb
def _load_rpn_roidb(self, gt_roidb):
filename = self.config['rpn_file']
print('loading {}'.format(filename))
assert os.path.exists(filename), \
'rpn data not found at: {}'.format(filename)
with open(filename, 'rb') as f:
box_list = pickle.load(f)
return self.create_roidb_from_box_list(box_list, gt_roidb)
def _load_pascal_annotation(self, index):
"""
Load image and bounding boxes info from XML file in the PASCAL VOC
format.
"""
filename = os.path.join(self._data_path, 'Annotations', index + '.xml')
tree = ET.parse(filename)
objs = tree.findall('object')
if not self.config['use_diff']:
# Exclude the samples labeled as difficult
non_diff_objs = [
obj for obj in objs if int(obj.find('difficult').text) == 0]
# if len(non_diff_objs) != len(objs):
# print 'Removed {} difficult objects'.format(
# len(objs) - len(non_diff_objs))
objs = non_diff_objs
num_objs = len(objs)
boxes = np.zeros((num_objs, 4), dtype=np.uint16)
gt_classes = np.zeros((num_objs), dtype=np.int32)
overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)
# "Seg" area for pascal is just the box area
seg_areas = np.zeros((num_objs), dtype=np.float32)
# Load object bounding boxes into a data frame.
for ix, obj in enumerate(objs):
bbox = obj.find('bndbox')
# Make pixel indexes 0-based
x1 = float(bbox.find('xmin').text) - 1
y1 = float(bbox.find('ymin').text) - 1
x2 = float(bbox.find('xmax').text) - 1
y2 = float(bbox.find('ymax').text) - 1
cls = self._class_to_ind[obj.find('name').text.lower().strip()]
boxes[ix, :] = [x1, y1, x2, y2]
gt_classes[ix] = cls
overlaps[ix, cls] = 1.0
seg_areas[ix] = (x2 - x1 + 1) * (y2 - y1 + 1)
overlaps = scipy.sparse.csr_matrix(overlaps)
return {'boxes': boxes,
'gt_classes': gt_classes,
'gt_overlaps': overlaps,
'flipped': False,
'seg_areas': seg_areas}
def _get_comp_id(self):
comp_id = (self._comp_id + '_' + self._salt if self.config['use_salt']
else self._comp_id)
return comp_id
def _get_voc_results_file_template(self):
# VOCdevkit/results/VOC2007/Main/<comp_id>_det_test_aeroplane.txt
filename = self._get_comp_id() + '_det_' + self._image_set + '_{:s}.txt'
path = os.path.join(
self._devkit_path,
'results',
'VOC' + self._year,
'Main',
filename)
return path
def _write_voc_results_file(self, all_boxes):
for cls_ind, cls in enumerate(self.classes):
if cls == '__background__':
continue
print('Writing {} VOC results file'.format(cls))
filename = self._get_voc_results_file_template().format(cls)
with open(filename, 'wt') as f:
for im_ind, index in enumerate(self.image_index):
dets = all_boxes[cls_ind][im_ind]
if dets == []:
continue
# the VOCdevkit expects 1-based indices
for k in range(dets.shape[0]):
f.write('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\n'.
format(index, dets[k, -1],
dets[k, 0] + 1, dets[k, 1] + 1,
dets[k, 2] + 1, dets[k, 3] + 1))
def _do_python_eval(self, output_dir='output'):
annopath = os.path.join(
self._devkit_path,
'VOC' + self._year,
'Annotations',
'{:s}.xml')
imagesetfile = os.path.join(
self._devkit_path,
'VOC' + self._year,
'ImageSets',
'Main',
self._image_set + '.txt')
cachedir = os.path.join(self._devkit_path, 'annotations_cache')
aps = []
# The PASCAL VOC metric changed in 2010
use_07_metric = True if int(self._year) < 2010 else False
print('VOC07 metric? ' + ('Yes' if use_07_metric else 'No'))
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
for i, cls in enumerate(self._classes):
if cls == '__background__':
continue
filename = self._get_voc_results_file_template().format(cls)
rec, prec, ap = voc_eval(
filename, annopath, imagesetfile, cls, cachedir, ovthresh=0.5,
use_07_metric=use_07_metric, use_diff=self.config['use_diff'])
aps += [ap]
print(('AP for {} = {:.4f}'.format(cls, ap)))
with open(os.path.join(output_dir, cls + '_pr.pkl'), 'wb') as f:
pickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)
print(('Mean AP = {:.4f}'.format(np.mean(aps))))
print('~~~~~~~~')
print('Results:')
for ap in aps:
print(('{:.3f}'.format(ap)))
print(('{:.3f}'.format(np.mean(aps))))
print('~~~~~~~~')
print('')
print('--------------------------------------------------------------')
print('Results computed with the **unofficial** Python eval code.')
print('Results should be very close to the official MATLAB eval code.')
print('Recompute with `./tools/reval.py --matlab ...` for your paper.')
print('-- Thanks, The Management')
print('--------------------------------------------------------------')
def _do_matlab_eval(self, output_dir='output'):
print('-----------------------------------------------------')
print('Computing results with the official MATLAB eval code.')
print('-----------------------------------------------------')
path = os.path.join(cfg.ROOT_DIR, 'lib', 'datasets',
'VOCdevkit-matlab-wrapper')
cmd = 'cd {} && '.format(path)
cmd += '{:s} -nodisplay -nodesktop '.format(cfg.MATLAB)
cmd += '-r "dbstop if error; '
cmd += 'voc_eval(\'{:s}\',\'{:s}\',\'{:s}\',\'{:s}\'); quit;"' \
.format(self._devkit_path, self._get_comp_id(),
self._image_set, output_dir)
print(('Running:\n{}'.format(cmd)))
status = subprocess.call(cmd, shell=True)
def evaluate_detections(self, all_boxes, output_dir):
self._write_voc_results_file(all_boxes)
self._do_python_eval(output_dir)
if self.config['matlab_eval']:
self._do_matlab_eval(output_dir)
if self.config['cleanup']:
for cls in self._classes:
if cls == '__background__':
continue
filename = self._get_voc_results_file_template().format(cls)
os.remove(filename)
def competition_mode(self, on):
if on:
self.config['use_salt'] = False
self.config['cleanup'] = False
else:
self.config['use_salt'] = True
self.config['cleanup'] = True
if __name__ == '__main__':
from datasets.pascal_voc import pascal_voc
d = pascal_voc('trainval', '2007')
res = d.roidb
from IPython import embed;
embed()
|
[
"[email protected]"
] | |
72097673a6fa9978b2b514de0f6394c8989f0578
|
2e682fd72e3feaa70e3f7bf2a3b83c50d783ec02
|
/PyTorch/dev/cv/image_classification/DPN-68_ID1889_for_PyTorch/timm/models/senet.py
|
dc181a23bdedd4edfdf006e460e3fd94e7f83571
|
[
"GPL-1.0-or-later",
"Apache-2.0",
"LicenseRef-scancode-proprietary-license",
"MIT",
"LGPL-2.0-or-later",
"BSD-3-Clause",
"CC-BY-NC-4.0",
"BSD-2-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
Ascend/ModelZoo-PyTorch
|
4c89414b9e2582cef9926d4670108a090c839d2d
|
92acc188d3a0f634de58463b6676e70df83ef808
|
refs/heads/master
| 2023-07-19T12:40:00.512853 | 2023-07-17T02:48:18 | 2023-07-17T02:48:18 | 483,502,469 | 23 | 6 |
Apache-2.0
| 2022-10-15T09:29:12 | 2022-04-20T04:11:18 |
Python
|
UTF-8
|
Python
| false | false | 19,644 |
py
|
#
# BSD 3-Clause License
#
# Copyright (c) 2017 xxxx
# All rights reserved.
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ============================================================================
#
"""
SEResNet implementation from Cadene's pretrained models
https://github.com/Cadene/pretrained-models.pytorch/blob/master/pretrainedmodels/models/senet.py
Additional credit to https://github.com/creafz
Original model: https://github.com/hujie-frank/SENet
ResNet code gently borrowed from
https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
FIXME I'm deprecating this model and moving them to ResNet as I don't want to maintain duplicate
support for extras like dilation, switchable BN/activations, feature extraction, etc that don't exist here.
"""
import math
from collections import OrderedDict
import torch.nn as nn
import torch.nn.functional as F
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from .helpers import build_model_with_cfg
from .layers import create_classifier
from .registry import register_model
import torch.npu
import os
NPU_CALCULATE_DEVICE = 0
if os.getenv('NPU_CALCULATE_DEVICE') and str.isdigit(os.getenv('NPU_CALCULATE_DEVICE')):
NPU_CALCULATE_DEVICE = int(os.getenv('NPU_CALCULATE_DEVICE'))
if torch.npu.current_device() != NPU_CALCULATE_DEVICE:
torch.npu.set_device(f'npu:{NPU_CALCULATE_DEVICE}')
__all__ = ['SENet']
def _cfg(url='', **kwargs):
return {
'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.875, 'interpolation': 'bilinear',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'layer0.conv1', 'classifier': 'last_linear',
**kwargs
}
default_cfgs = {
'legacy_senet154':
_cfg(url='http://data.lip6.fr/cadene/pretrainedmodels/senet154-c7b49a05.pth'),
'legacy_seresnet18': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnet18-4bb0ce65.pth',
interpolation='bicubic'),
'legacy_seresnet34': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnet34-a4004e63.pth'),
'legacy_seresnet50': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/se_resnet50-ce0d4300.pth'),
'legacy_seresnet101': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/se_resnet101-7e38fcc6.pth'),
'legacy_seresnet152': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/se_resnet152-d17c99b7.pth'),
'legacy_seresnext26_32x4d': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnext26_32x4d-65ebdb501.pth',
interpolation='bicubic'),
'legacy_seresnext50_32x4d':
_cfg(url='http://data.lip6.fr/cadene/pretrainedmodels/se_resnext50_32x4d-a260b3a4.pth'),
'legacy_seresnext101_32x4d':
_cfg(url='http://data.lip6.fr/cadene/pretrainedmodels/se_resnext101_32x4d-3b2fe3d8.pth'),
}
def _weight_init(m):
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1.)
nn.init.constant_(m.bias, 0.)
class SEModule(nn.Module):
def __init__(self, channels, reduction):
super(SEModule, self).__init__()
self.fc1 = nn.Conv2d(channels, channels // reduction, kernel_size=1)
self.relu = nn.ReLU(inplace=True)
self.fc2 = nn.Conv2d(channels // reduction, channels, kernel_size=1)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
module_input = x
x = x.mean((2, 3), keepdim=True)
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
x = self.sigmoid(x)
return module_input * x
class Bottleneck(nn.Module):
"""
Base class for bottlenecks that implements `forward()` method.
"""
def forward(self, x):
shortcut = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
shortcut = self.downsample(x)
out = self.se_module(out) + shortcut
out = self.relu(out)
return out
class SEBottleneck(Bottleneck):
"""
Bottleneck for SENet154.
"""
expansion = 4
def __init__(self, inplanes, planes, groups, reduction, stride=1,
downsample=None):
super(SEBottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes * 2, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes * 2)
self.conv2 = nn.Conv2d(
planes * 2, planes * 4, kernel_size=3, stride=stride,
padding=1, groups=groups, bias=False)
self.bn2 = nn.BatchNorm2d(planes * 4)
self.conv3 = nn.Conv2d(
planes * 4, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.se_module = SEModule(planes * 4, reduction=reduction)
self.downsample = downsample
self.stride = stride
class SEResNetBottleneck(Bottleneck):
"""
ResNet bottleneck with a Squeeze-and-Excitation module. It follows Caffe
implementation and uses `stride=stride` in `conv1` and not in `conv2`
(the latter is used in the torchvision implementation of ResNet).
"""
expansion = 4
def __init__(self, inplanes, planes, groups, reduction, stride=1,
downsample=None):
super(SEResNetBottleneck, self).__init__()
self.conv1 = nn.Conv2d(
inplanes, planes, kernel_size=1, bias=False, stride=stride)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(
planes, planes, kernel_size=3, padding=1, groups=groups, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.se_module = SEModule(planes * 4, reduction=reduction)
self.downsample = downsample
self.stride = stride
class SEResNeXtBottleneck(Bottleneck):
"""
ResNeXt bottleneck type C with a Squeeze-and-Excitation module.
"""
expansion = 4
def __init__(self, inplanes, planes, groups, reduction, stride=1,
downsample=None, base_width=4):
super(SEResNeXtBottleneck, self).__init__()
width = math.floor(planes * (base_width / 64)) * groups
self.conv1 = nn.Conv2d(
inplanes, width, kernel_size=1, bias=False, stride=1)
self.bn1 = nn.BatchNorm2d(width)
self.conv2 = nn.Conv2d(
width, width, kernel_size=3, stride=stride, padding=1, groups=groups, bias=False)
self.bn2 = nn.BatchNorm2d(width)
self.conv3 = nn.Conv2d(width, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.se_module = SEModule(planes * 4, reduction=reduction)
self.downsample = downsample
self.stride = stride
class SEResNetBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, groups, reduction, stride=1, downsample=None):
super(SEResNetBlock, self).__init__()
self.conv1 = nn.Conv2d(
inplanes, planes, kernel_size=3, padding=1, stride=stride, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(
planes, planes, kernel_size=3, padding=1, groups=groups, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.se_module = SEModule(planes, reduction=reduction)
self.downsample = downsample
self.stride = stride
def forward(self, x):
shortcut = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
if self.downsample is not None:
shortcut = self.downsample(x)
out = self.se_module(out) + shortcut
out = self.relu(out)
return out
class SENet(nn.Module):
def __init__(self, block, layers, groups, reduction, drop_rate=0.2,
in_chans=3, inplanes=64, input_3x3=False, downsample_kernel_size=1,
downsample_padding=0, num_classes=1000, global_pool='avg'):
"""
Parameters
----------
block (nn.Module): Bottleneck class.
- For SENet154: SEBottleneck
- For SE-ResNet models: SEResNetBottleneck
- For SE-ResNeXt models: SEResNeXtBottleneck
layers (list of ints): Number of residual blocks for 4 layers of the
network (layer1...layer4).
groups (int): Number of groups for the 3x3 convolution in each
bottleneck block.
- For SENet154: 64
- For SE-ResNet models: 1
- For SE-ResNeXt models: 32
reduction (int): Reduction ratio for Squeeze-and-Excitation modules.
- For all models: 16
dropout_p (float or None): Drop probability for the Dropout layer.
If `None` the Dropout layer is not used.
- For SENet154: 0.2
- For SE-ResNet models: None
- For SE-ResNeXt models: None
inplanes (int): Number of input channels for layer1.
- For SENet154: 128
- For SE-ResNet models: 64
- For SE-ResNeXt models: 64
input_3x3 (bool): If `True`, use three 3x3 convolutions instead of
a single 7x7 convolution in layer0.
- For SENet154: True
- For SE-ResNet models: False
- For SE-ResNeXt models: False
downsample_kernel_size (int): Kernel size for downsampling convolutions
in layer2, layer3 and layer4.
- For SENet154: 3
- For SE-ResNet models: 1
- For SE-ResNeXt models: 1
downsample_padding (int): Padding for downsampling convolutions in
layer2, layer3 and layer4.
- For SENet154: 1
- For SE-ResNet models: 0
- For SE-ResNeXt models: 0
num_classes (int): Number of outputs in `last_linear` layer.
- For all models: 1000
"""
super(SENet, self).__init__()
self.inplanes = inplanes
self.num_classes = num_classes
self.drop_rate = drop_rate
if input_3x3:
layer0_modules = [
('conv1', nn.Conv2d(in_chans, 64, 3, stride=2, padding=1, bias=False)),
('bn1', nn.BatchNorm2d(64)),
('relu1', nn.ReLU(inplace=True)),
('conv2', nn.Conv2d(64, 64, 3, stride=1, padding=1, bias=False)),
('bn2', nn.BatchNorm2d(64)),
('relu2', nn.ReLU(inplace=True)),
('conv3', nn.Conv2d(64, inplanes, 3, stride=1, padding=1, bias=False)),
('bn3', nn.BatchNorm2d(inplanes)),
('relu3', nn.ReLU(inplace=True)),
]
else:
layer0_modules = [
('conv1', nn.Conv2d(
in_chans, inplanes, kernel_size=7, stride=2, padding=3, bias=False)),
('bn1', nn.BatchNorm2d(inplanes)),
('relu1', nn.ReLU(inplace=True)),
]
self.layer0 = nn.Sequential(OrderedDict(layer0_modules))
# To preserve compatibility with Caffe weights `ceil_mode=True` is used instead of `padding=1`.
self.pool0 = nn.MaxPool2d(3, stride=2, ceil_mode=True)
self.feature_info = [dict(num_chs=inplanes, reduction=2, module='layer0')]
self.layer1 = self._make_layer(
block,
planes=64,
blocks=layers[0],
groups=groups,
reduction=reduction,
downsample_kernel_size=1,
downsample_padding=0
)
self.feature_info += [dict(num_chs=64 * block.expansion, reduction=4, module='layer1')]
self.layer2 = self._make_layer(
block,
planes=128,
blocks=layers[1],
stride=2,
groups=groups,
reduction=reduction,
downsample_kernel_size=downsample_kernel_size,
downsample_padding=downsample_padding
)
self.feature_info += [dict(num_chs=128 * block.expansion, reduction=8, module='layer2')]
self.layer3 = self._make_layer(
block,
planes=256,
blocks=layers[2],
stride=2,
groups=groups,
reduction=reduction,
downsample_kernel_size=downsample_kernel_size,
downsample_padding=downsample_padding
)
self.feature_info += [dict(num_chs=256 * block.expansion, reduction=16, module='layer3')]
self.layer4 = self._make_layer(
block,
planes=512,
blocks=layers[3],
stride=2,
groups=groups,
reduction=reduction,
downsample_kernel_size=downsample_kernel_size,
downsample_padding=downsample_padding
)
self.feature_info += [dict(num_chs=512 * block.expansion, reduction=32, module='layer4')]
self.num_features = 512 * block.expansion
self.global_pool, self.last_linear = create_classifier(
self.num_features, self.num_classes, pool_type=global_pool)
for m in self.modules():
_weight_init(m)
def _make_layer(self, block, planes, blocks, groups, reduction, stride=1,
downsample_kernel_size=1, downsample_padding=0):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(
self.inplanes, planes * block.expansion, kernel_size=downsample_kernel_size,
stride=stride, padding=downsample_padding, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = [block(self.inplanes, planes, groups, reduction, stride, downsample)]
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, groups, reduction))
return nn.Sequential(*layers)
def get_classifier(self):
return self.last_linear
def reset_classifier(self, num_classes, global_pool='avg'):
self.num_classes = num_classes
self.global_pool, self.last_linear = create_classifier(
self.num_features, self.num_classes, pool_type=global_pool)
def forward_features(self, x):
x = self.layer0(x)
x = self.pool0(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
def logits(self, x):
x = self.global_pool(x)
if self.drop_rate > 0.:
x = F.dropout(x, p=self.drop_rate, training=self.training)
x = self.last_linear(x)
return x
def forward(self, x):
x = self.forward_features(x)
x = self.logits(x)
return x
def _create_senet(variant, pretrained=False, **kwargs):
return build_model_with_cfg(
SENet, variant, pretrained,
default_cfg=default_cfgs[variant],
**kwargs)
@register_model
def legacy_seresnet18(pretrained=False, **kwargs):
model_args = dict(
block=SEResNetBlock, layers=[2, 2, 2, 2], groups=1, reduction=16, **kwargs)
return _create_senet('legacy_seresnet18', pretrained, **model_args)
@register_model
def legacy_seresnet34(pretrained=False, **kwargs):
model_args = dict(
block=SEResNetBlock, layers=[3, 4, 6, 3], groups=1, reduction=16, **kwargs)
return _create_senet('legacy_seresnet34', pretrained, **model_args)
@register_model
def legacy_seresnet50(pretrained=False, **kwargs):
model_args = dict(
block=SEResNetBottleneck, layers=[3, 4, 6, 3], groups=1, reduction=16, **kwargs)
return _create_senet('legacy_seresnet50', pretrained, **model_args)
@register_model
def legacy_seresnet101(pretrained=False, **kwargs):
model_args = dict(
block=SEResNetBottleneck, layers=[3, 4, 23, 3], groups=1, reduction=16, **kwargs)
return _create_senet('legacy_seresnet101', pretrained, **model_args)
@register_model
def legacy_seresnet152(pretrained=False, **kwargs):
model_args = dict(
block=SEResNetBottleneck, layers=[3, 8, 36, 3], groups=1, reduction=16, **kwargs)
return _create_senet('legacy_seresnet152', pretrained, **model_args)
@register_model
def legacy_senet154(pretrained=False, **kwargs):
model_args = dict(
block=SEBottleneck, layers=[3, 8, 36, 3], groups=64, reduction=16,
downsample_kernel_size=3, downsample_padding=1, inplanes=128, input_3x3=True, **kwargs)
return _create_senet('legacy_senet154', pretrained, **model_args)
@register_model
def legacy_seresnext26_32x4d(pretrained=False, **kwargs):
model_args = dict(
block=SEResNeXtBottleneck, layers=[2, 2, 2, 2], groups=32, reduction=16, **kwargs)
return _create_senet('legacy_seresnext26_32x4d', pretrained, **model_args)
@register_model
def legacy_seresnext50_32x4d(pretrained=False, **kwargs):
model_args = dict(
block=SEResNeXtBottleneck, layers=[3, 4, 6, 3], groups=32, reduction=16, **kwargs)
return _create_senet('legacy_seresnext50_32x4d', pretrained, **model_args)
@register_model
def legacy_seresnext101_32x4d(pretrained=False, **kwargs):
model_args = dict(
block=SEResNeXtBottleneck, layers=[3, 4, 23, 3], groups=32, reduction=16, **kwargs)
return _create_senet('legacy_seresnext101_32x4d', pretrained, **model_args)
|
[
"[email protected]"
] | |
346ffe2563d7b5009ca8d5426353249ca86ced67
|
b252d1f8ec5f68bf5f935c000e0bb011718ea691
|
/virtualenvs/ninetyseven/src/savoy/contrib/events/.svn/text-base/admin.py.svn-base
|
0a3e21da84e64ebedc6f1ae3b91e7b3694c532de
|
[] |
no_license
|
syncopated/97bottles
|
2ceace7ed6a852bef61796733a08eb878b045152
|
08f4210e3de77c4564fcc8c1a2e9b47a0088249f
|
refs/heads/master
| 2016-08-05T07:48:51.109089 | 2012-12-02T17:38:35 | 2012-12-02T17:38:35 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,318 |
from django.contrib import admin
from savoy.contrib.events.models import *
class OneOffEventTimeInline(admin.TabularInline):
model = OneOffEventTime
extra = 3
class AllDayEventTimeInline(admin.TabularInline):
model = AllDayEventTime
extra = 3
class WeeklyEventTimeInline(admin.TabularInline):
model = WeeklyEventTime
extra = 3
class MonthlyEventTimeInline(admin.TabularInline):
model = MonthlyEventTime
extra = 3
class EventAdmin(admin.ModelAdmin):
inlines = [OneOffEventTimeInline, AllDayEventTimeInline, WeeklyEventTimeInline, MonthlyEventTimeInline]
prepopulated_fields = {'slug': ("title",)}
list_display = ('title','short_description', 'added_by', 'date_created','start_time',)
search_fields = ('title','short_description','description','tags',)
date_hierarchy = 'date_published'
list_filter=('date_created','date_published',)
fieldsets = (
('Basics:', {'fields': ('title', 'slug', 'date_published','added_by', 'short_description', 'description', 'event_url')}),
('People and places:', {'fields': ('places', 'organizers', 'sponsors', 'individual_organizers', 'individual_sponsors',)}),
('Categorization:', {'fields': ('tags',)}),
('Cost and tickets:', {'fields': ('cost_high','cost_low','ticket_url',)}),
)
admin.site.register(Event, EventAdmin)
|
[
"[email protected]"
] | ||
29d849cb3d35305389e0177261d40116767b242f
|
6879a8596df6f302c63966a2d27f6b4d11cc9b29
|
/abc/problems160/153/b.py
|
97d4c73fb95bd547573fa60d8ad0af882d0c767f
|
[] |
no_license
|
wkwkgg/atcoder
|
41b1e02b88bf7a8291b709306e54cb56cb93e52a
|
28a7d4084a4100236510c05a88e50aa0403ac7cd
|
refs/heads/master
| 2020-07-26T03:47:19.460049 | 2020-03-01T18:29:57 | 2020-03-01T18:29:57 | 208,523,188 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 110 |
py
|
H, N = map(int, input().split())
An = list(map(int, input().split()))
print("Yes" if sum(An) >= H else "No")
|
[
"[email protected]"
] | |
55fad1ea837d7346b46547d70e497c4f672c2024
|
a86ca34e23afaf67fdf858df9e47847606b23e0c
|
/lib/temboo/Library/Klout/ListTopics.py
|
306595bee299af4f646bec8c2a32e475e9fa0bbc
|
[] |
no_license
|
miriammelnick/dont-get-mugged
|
6026ad93c910baaecbc3f5477629b0322e116fa8
|
1613ee636c027ccc49c3f84a5f186e27de7f0f9d
|
refs/heads/master
| 2021-01-13T02:18:39.599323 | 2012-08-12T23:25:47 | 2012-08-12T23:25:47 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,864 |
py
|
###############################################################################
#
# ListTopics
# Retrieves a list of the top three topics for Twitter users you specify.
#
# Python version 2.6
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
class ListTopics(Choreography):
"""
Create a new instance of the ListTopics Choreography. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
def __init__(self, temboo_session):
Choreography.__init__(self, temboo_session, '/Library/Klout/ListTopics')
def new_input_set(self):
return ListTopicsInputSet()
def _make_result_set(self, result, path):
return ListTopicsResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return ListTopicsChoreographyExecution(session, exec_id, path)
"""
An InputSet with methods appropriate for specifying the inputs to the ListTopics
choreography. The InputSet object is used to specify input parameters when executing this choreo.
"""
class ListTopicsInputSet(InputSet):
"""
Set the value of the APIKey input for this choreography. ((required, string) The API key provided by Klout.)
"""
def set_APIKey(self, value):
InputSet._set_input(self, 'APIKey', value)
"""
Set the value of the ReturnType input for this choreography. ((optional, string) The desired format for the retrieved topics: xml or json. Defaults to xml.)
"""
def set_ReturnType(self, value):
InputSet._set_input(self, 'ReturnType', value)
"""
Set the value of the Usernames input for this choreography. ((required, string) A comma-delimited string of Twitter usernames whose topics you want to retrieve.)
"""
def set_Usernames(self, value):
InputSet._set_input(self, 'Usernames', value)
"""
A ResultSet with methods tailored to the values returned by the ListTopics choreography.
The ResultSet object is used to retrieve the results of a choreography execution.
"""
class ListTopicsResultSet(ResultSet):
"""
Retrieve the value for the "Response" output from this choreography execution. ((xml) The retrieved Klout scores for the specified users. The response format depends on what is specified in the ReturnType input. Defaults to xml.)
"""
def get_Response(self):
return self._output.get('Response', None)
class ListTopicsChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return ListTopicsResultSet(response, path)
|
[
"miriam@famulus"
] |
miriam@famulus
|
9221dbcaffe38d0448eee87f5f3ed8162b467596
|
8a69c886a8bde80c0b8f1f0c03b0de6f31b784dd
|
/tests/functional/parsing/test_parsing_insert.py
|
f593edf1130db39e74abbcec79f249ed2e9401ad
|
[
"BSD-3-Clause"
] |
permissive
|
bouthilx/kleio
|
c774a85b851081f264a8bbae0f45cd45ebf3fa82
|
bf6291f95d9c35774194e3d9ed678db4544ca345
|
refs/heads/develop
| 2020-03-28T00:43:02.218807 | 2018-08-19T02:26:35 | 2018-08-19T02:26:35 | 139,289,100 | 1 | 1 | null | 2018-08-19T02:26:35 | 2018-07-01T00:18:04 |
Python
|
UTF-8
|
Python
| false | false | 1,086 |
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Perform functional tests for the parsing of the different commands."""
import argparse
import os
import pytest
from kleio.core.cli import insert
def _create_parser(need_subparser=True):
parser = argparse.ArgumentParser()
if need_subparser:
subparsers = parser.add_subparsers()
return parser, subparsers
return parser
@pytest.mark.usefixtures("clean_db")
def test_insert_command_full_parsing(database, monkeypatch):
"""Test the parsing of all the options of insert"""
monkeypatch.chdir(os.path.dirname(os.path.abspath(__file__)))
parser, subparsers = _create_parser()
args_list = ["insert", "-n", "test", "--config",
"./kleio_config_random.yaml", "./black_box.py", "-x=1"]
insert.add_subparser(subparsers)
subparsers.choices['insert'].set_defaults(func='')
args = vars(parser.parse_args(args_list))
assert args['name'] == 'test'
assert args['config'].name == './kleio_config_random.yaml'
assert args['user_args'] == ['./black_box.py', '-x=1']
|
[
"[email protected]"
] | |
56008640a4a20567e577c9b682e01646a60c0ea3
|
16c4d625ad9e945471a2a267e9992c7e9260214f
|
/criterium/forms.py
|
4a77f9477282f7224cc15e9e7c0b8948627cbecb
|
[
"BSD-2-Clause"
] |
permissive
|
andywar65/rp_repo
|
8cea1c81533250b49a4036fb9b0ff6e93a0dde66
|
726c1426d738b962cabeabd8995aa35767df0c41
|
refs/heads/master
| 2023-05-26T13:47:48.329624 | 2021-06-05T08:35:05 | 2021-06-05T08:35:05 | 255,056,987 | 1 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,064 |
py
|
from django import forms
from django.forms import ModelForm
from .models import Race, Athlete
from cronache.models import Event
from users.models import User
class RaceForm(ModelForm):
event = forms.ModelChoiceField(label="Evento", required = False,
queryset = Event.objects.filter(tags__name__in = ['criterium',
'Criterium'], ), )
def clean_date(self):
date = self.cleaned_data['date']
event = self.cleaned_data['event']
if not date and not event:
msg = 'Senza evento occorre inserire almeno la data.'
raise forms.ValidationError(msg, code='no_date')
return date
class Meta:
model = Race
fields = '__all__'
class AthleteForm(ModelForm):
user = forms.ModelChoiceField(label="Iscritto", required = True,
queryset = User.objects.filter(profile__parent = None,
profile__sector__in = ['1-YC', '2-NC'],
is_active = True ).order_by('last_name', 'first_name'), )
class Meta:
model = Athlete
fields = '__all__'
|
[
"[email protected]"
] | |
c671f526cdc219ba9326376d219ae533bae11376
|
33836016ea99776d31f7ad8f2140c39f7b43b5fe
|
/fip_collab/2016_07_07_ideas_md3/plot_linkage_check.py
|
fa41cb4110e8f87b1600d492c07e5f189351bd32
|
[] |
no_license
|
earthexploration/MKS-Experimentation
|
92a2aea83e041bfe741048d662d28ff593077551
|
9b9ff3b468767b235e7c4884b0ed56c127328a5f
|
refs/heads/master
| 2023-03-17T23:11:11.313693 | 2017-04-24T19:24:35 | 2017-04-24T19:24:35 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 4,795 |
py
|
import numpy as np
import matplotlib.pyplot as plt
import functions as rr
from constants import const
import h5py
def plot_check(par, n_pc, n_poly, H):
C = const()
# colormat = np.random.rand(len(set_id_set), 3)
colormat = np.array([[.3, .3, 1.],
[.3, 1., .3],
[1., .2, .2],
[0., .7, .7],
[.7, .0, .7],
[.7, .7, .0],
[.5, .3, .1],
[.3, .5, .1],
[.1, .3, .5]])
f_reg = h5py.File("regression_results_L%s.hdf5" % H, 'r')
order = f_reg.get('order_%s' % par)[...]
"""explicitly define #PCs"""
tmp = (order[:, 0] == n_pc)*(order[:, 1] == n_poly)
indx = np.arange(order.shape[0])[tmp]
# """calculate # PCs required to reach desired explained variance"""
# f_pc = h5py.File("pca_data_L%s.hdf5" % H, 'r')
# ratios = f_pc.get('ratios')[...]
# f_pc.close()
# tmp = np.cumsum(ratios)
# tmp = np.arange(tmp.size)[tmp >= C["ev_lvl"]]
# # max_ev = tmp.max()
# # print "max explained variance: %s" % max_ev
# # tmp = np.arange(tmp.size)[tmp >= max_ev]
# n_pc = tmp[0] + 1
# tmp = (order[:, 0] == n_pc)*(order[:, 1] == n_poly)
# indx = np.arange(order.shape[0])[tmp]
# """calculate # PCs to minimize LOOCV mean error"""
# indx = np.argmin(f_reg.get('loocv_err_%s' % par))
# n_pc = order[indx, 0]
msg = par
rr.WP(msg, C['wrt_file'])
msg = "n_pc, n_poly: %s" % str(order[indx, :])
rr.WP(msg, C['wrt_file'])
"""find the results associated with the desired n_pc, n_poly"""
"""load the simulated and predicted responses"""
if par == 'modulus':
RsimC = f_reg.get('Rsim_cal_%s' % par)[...]*(1e-3)
RpredC = f_reg.get('Rpred_cal_%s' % par)[indx, :]*(1e-3)
RsimV = f_reg.get('Rsim_val_%s' % par)[...]*(1e-3)
RpredV = f_reg.get('Rpred_val_%s' % par)[indx, :]*(1e-3)
else:
RsimC = f_reg.get('Rsim_cal_%s' % par)[...]
RpredC = f_reg.get('Rpred_cal_%s' % par)[indx, :]
RsimV = f_reg.get('Rsim_val_%s' % par)[...]
RpredV = f_reg.get('Rpred_val_%s' % par)[indx, :]
"""write out the associated error"""
errC = 100.*np.abs(RpredC-RsimC)/RsimC.mean()
msg = "mean %% error for cal: %s" % errC.mean()
rr.WP(msg, C['wrt_file'])
msg = "max %% error for cal: %s" % errC.max()
rr.WP(msg, C['wrt_file'])
errV = 100.*np.abs(RpredV-RsimV)/RsimV.mean()
msg = "mean %% error for val: %s" % errV.mean()
rr.WP(msg, C['wrt_file'])
msg = "max %% error for val: %s" % errV.max()
rr.WP(msg, C['wrt_file'])
"""plot the prediction equal to simulation line"""
fig = plt.figure(figsize=[8, 5])
minval = np.min([np.min([RsimC, RpredC]), np.min([RsimV, RpredV])])
maxval = np.max([np.max([RsimC, RpredC]), np.max([RsimV, RpredV])])
valrange = maxval-minval
minln = minval - 0.5*valrange
maxln = maxval + 0.5*valrange
line = np.array([minln, maxln])
plt.plot(line, line, 'k-')
c = 0
for ii in xrange(len(C['ns_cal'])):
c_ = c + C['ns_cal'][ii]
name = C['names_cal'][ii]
Rsim_tmp = RsimC[c:c_]
Rpred_tmp = RpredC[c:c_]
c = c_
plt.plot(Rsim_tmp, Rpred_tmp,
marker='o', markersize=7, color=colormat[ii, :], alpha=0.4,
linestyle='', label="%s (calibration)" % name)
c = 0
for ii in xrange(len(C['ns_val'])):
c_ = c + C['ns_val'][ii]
name = C['names_val'][ii]
Rsim_tmp = RsimV[c:c_]
Rpred_tmp = RpredV[c:c_]
c = c_
plt.plot(Rsim_tmp, Rpred_tmp,
marker='s', markersize=7, color=colormat[ii, :], alpha=0.4,
linestyle='', label="%s (validation)" % name)
minbnd = minval - 0.1*valrange
maxbnd = maxval + 0.1*valrange
plt.axis([minbnd, maxbnd, minbnd, maxbnd])
plt.axes().set_aspect('equal')
if par == 'modulus':
plt.xlabel("simulation (GPa)")
plt.ylabel("prediction (GPa)")
else:
plt.xlabel("simulation (MPa)")
plt.ylabel("prediction (MPa)")
# plt.xticks(rotation=20)
# plt.yticks(rotation=20)
# plt.legend(loc='upper left', shadow=True, fontsize='medium')
plt.legend(bbox_to_anchor=(1.02, 1), loc=2, shadow=True, fontsize='medium')
fig.tight_layout(rect=(0, 0, .75, 1))
fig_name = 'prediction_%s_npc%s_npoly%s_L%s.png' % (par, n_pc, n_poly, H)
fig.canvas.set_window_title(fig_name)
plt.savefig(fig_name)
f_reg.close()
if __name__ == '__main__':
C = const()
ns_set = C['ns_val']
names_set = C['names_val']
par = "c0"
plot_check(ns_set, names_set, par)
plt.show()
|
[
"[email protected]"
] | |
fe007eb975a9d674a4485a4870ad52f056034e72
|
d63b1b36634b68070f6f3c017c0250a7ea646e6f
|
/SMC/GEM5/gem5/src/dev/x86/Pc.py
|
1f1f3ca89ba3a345f277583ed85d6800e4ecccc7
|
[
"MIT",
"BSD-3-Clause",
"LicenseRef-scancode-proprietary-license",
"LGPL-2.0-or-later"
] |
permissive
|
jiwon-choe/Brown-SMCSim
|
ccf506d34d85fb3d085bf50ed47de8b4eeaee474
|
ff3d9334c1d5c8d6a00421848c0d51e50e6b67f8
|
refs/heads/master
| 2021-06-30T00:15:57.128209 | 2020-11-24T03:11:41 | 2020-11-24T03:11:41 | 192,596,189 | 15 | 8 |
MIT
| 2019-06-20T15:43:00 | 2019-06-18T18:53:40 |
C++
|
UTF-8
|
Python
| false | false | 3,388 |
py
|
# Copyright (c) 2008 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
from m5.params import *
from m5.proxy import *
from Device import IsaFake
from Pci import PciConfigAll
from Platform import Platform
from SouthBridge import SouthBridge
from Terminal import Terminal
from Uart import Uart8250
def x86IOAddress(port):
IO_address_space_base = 0x8000000000000000
return IO_address_space_base + port;
class Pc(Platform):
type = 'Pc'
cxx_header = "dev/x86/pc.hh"
system = Param.System(Parent.any, "system")
pciconfig = PciConfigAll()
south_bridge = SouthBridge()
# "Non-existant" port used for timing purposes by the linux kernel
i_dont_exist = IsaFake(pio_addr=x86IOAddress(0x80), pio_size=1)
# Ports behind the pci config and data regsiters. These don't do anything,
# but the linux kernel fiddles with them anway.
behind_pci = IsaFake(pio_addr=x86IOAddress(0xcf8), pio_size=8)
# Serial port and terminal
com_1 = Uart8250()
com_1.pio_addr = x86IOAddress(0x3f8)
com_1.terminal = Terminal()
# Devices to catch access to non-existant serial ports.
fake_com_2 = IsaFake(pio_addr=x86IOAddress(0x2f8), pio_size=8)
fake_com_3 = IsaFake(pio_addr=x86IOAddress(0x3e8), pio_size=8)
fake_com_4 = IsaFake(pio_addr=x86IOAddress(0x2e8), pio_size=8)
# A device to catch accesses to the non-existant floppy controller.
fake_floppy = IsaFake(pio_addr=x86IOAddress(0x3f2), pio_size=2)
def attachIO(self, bus, dma_ports = []):
self.south_bridge.attachIO(bus, dma_ports)
self.i_dont_exist.pio = bus.master
self.behind_pci.pio = bus.master
self.com_1.pio = bus.master
self.fake_com_2.pio = bus.master
self.fake_com_3.pio = bus.master
self.fake_com_4.pio = bus.master
self.fake_floppy.pio = bus.master
self.pciconfig.pio = bus.default
|
[
"[email protected]"
] | |
aa873476e46a8e79f0c40ca8c00123eeb3510f1e
|
4297f48daaa2aa0f5e4058bbeee111bf9236790e
|
/cv/hyper-network/residual_network_on_cifar10.py
|
cc01d12854cfb994273a508ce822eadd70d0987e
|
[] |
no_license
|
HQ01/fast-weight
|
21d9c379c8bccc29aced911d35f3b0784a072c8f
|
e0d9afe15c05fca966bb03a5b571a5486d6629c6
|
refs/heads/master
| 2021-01-21T11:54:54.423127 | 2017-05-15T13:08:42 | 2017-05-15T13:08:42 | 91,762,993 | 0 | 1 | null | 2017-05-19T03:46:09 | 2017-05-19T03:46:09 | null |
UTF-8
|
Python
| false | false | 3,661 |
py
|
import mx_layers as layers
def _convolution(**kwargs):
defaults = {'kernel_shape' : (3, 3), 'stride' : (1, 1), 'pad' : (1, 1), 'no_bias' : True}
defaults.update(kwargs)
return layers.convolution(**defaults)
def _normalized_convolution(network, **kwargs):
network = layers.batch_normalization(network, fix_gamma=False)
network = layers.ReLU(network)
network = _convolution(X=network, **kwargs)
return network
def _module(network, n_filters, n_layers):
for index in range(n_layers):
identity = network
network = _normalized_convolution(network, n_filters=n_filters)
network = _normalized_convolution(network, n_filters=n_filters)
network += identity
return network
def _transit(network, n_filters):
'''
identity = \
_convolution(X=network, n_filters=n_filters, kernel_shape=(1, 1), stride=(2, 2), pad=(0, 0))
'''
identity = layers.pooling(X=network, mode='maximum', kernel_shape=(2, 2), stride=(2, 2), pad=(0, 0))
identity = _convolution(X=identity, n_filters=n_filters, kernel_shape=(1, 1), pad=(0, 0))
network = _normalized_convolution(network, n_filters=n_filters, stride=(2, 2))
network = _normalized_convolution(network, n_filters=n_filters)
return identity + network
def build_network(n_layers):
network = layers.variable('data')
network = _convolution(X=network, n_filters=16)
for n_filters in (16, 32):
network = _module(network, n_filters, n_layers)
network = _transit(network, n_filters * 2)
network = _module(network, 64, n_layers)
network = layers.batch_normalization(network, fix_gamma=False)
network = layers.ReLU(network)
network = layers.pooling(X=network, mode='average', kernel_shape=(8, 8), stride=(1, 1), pad=(0, 0))
network = layers.flatten(network)
network = layers.batch_normalization(network, fix_gamma=False)
network = layers.fully_connected(X=network, n_hidden_units=10)
network = layers.softmax_loss(prediction=network, normalization='batch', id='softmax')
return network
if __name__ == '__main__':
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument('--batch_size', type=int, default=128)
parser.add_argument('--initial_lr', type=float, default=0.1)
parser.add_argument('--n_layers', type=int, required=True)
parser.add_argument('--postfix', type=str, default='')
args = parser.parse_args()
network = build_network(n_layers=args.n_layers)
from lr_scheduler import AtIterationScheduler
lr_table = {32000 : args.initial_lr * 0.1, 48000 : args.initial_lr * 0.01}
lr_scheduler = AtIterationScheduler(args.initial_lr, lr_table)
optimizer_settings = {
'args' : {'momentum' : 0.9},
'initial_lr' : args.initial_lr,
'lr_scheduler' : lr_scheduler,
'optimizer' : 'SGD',
'weight_decay' : 0.0001,
}
from mx_initializers import PReLUInitializer
initializer = PReLUInitializer()
from mx_solver import MXSolver
solver = MXSolver(
batch_size = args.batch_size,
devices = (0, 1, 2, 3),
epochs = 150,
initializer = initializer,
optimizer_settings = optimizer_settings,
symbol = network,
verbose = True,
)
from data_utilities import load_cifar10_record
data = load_cifar10_record(args.batch_size)
info = solver.train(data)
postfix = '-' + args.postfix if args.postfix else ''
identifier = 'residual-network-on-cifar-10-%d%s' % (args.n_layers, postfix)
import cPickle as pickle
pickle.dump(info, open('info/%s' % identifier, 'wb'))
parameters = solver.export_parameters()
pickle.dump(parameters, open('parameters/%s' % identifier, 'wb'))
|
[
"[email protected]"
] | |
796bdbddda266ec0bbdc64183fe18db01383609b
|
ceb620c4be8b34f4aa08156226187db081fc3b55
|
/loca_13/ext_personalizacion_lanta/model/models.py
|
b02f0c3f8c6077ee5b65dd0c562cef2f423c005a
|
[] |
no_license
|
hjrhjr/entrenamiento_13_odoo_ref
|
f73e292b91d085473283f63a88ccd2363a03d9bf
|
9a492c006d9c0aab68d0b095281dafda97ebdfda
|
refs/heads/main
| 2023-08-25T06:46:39.075724 | 2021-10-19T14:51:27 | 2021-10-19T14:51:27 | null | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 7,563 |
py
|
# -*- coding: utf-8 -*-
import logging
import json
from odoo.tools import float_is_zero, float_compare, safe_eval, date_utils, email_split, email_escape_char, email_re
from odoo import fields, models, api, exceptions, _
from odoo.exceptions import UserError, ValidationError
_logger = logging.getLogger('__name__')
class AccountMove(models.Model):
"""This model add fields need in the invoice for accounting in Venezuela."""
_inherit = 'account.move'
invoice_payments_widget = fields.Text(groups="account.group_account_invoice",
compute='_compute_payments_widget_reconciled_info')
@api.depends('type', 'line_ids.amount_residual')
def _compute_payments_widget_reconciled_info(self):
for move in self:
if move.state != 'posted' or not move.is_invoice(include_receipts=True):
move.invoice_payments_widget = json.dumps(False)
continue
reconciled_vals = move._get_reconciled_info_JSON_values()
if reconciled_vals:
info = {
'title': _('Aplicado'),
'outstanding': False,
'content': reconciled_vals,
}
move.invoice_payments_widget = json.dumps(info, default=date_utils.json_default)
else:
move.invoice_payments_widget = json.dumps(False)
#raise UserError(_(' valor=%s')%move.invoice_payments_widget)
def funcion_numeracion_fac(self):
if self.type=="in_invoice":
busca_correlativos = self.env['account.move'].search([('invoice_number','=',self.invoice_number_pro),('id','!=',self.id),('partner_id','=',self.partner_id.id)])
for det_corr in busca_correlativos:
if det_corr.invoice_number:
raise UserError(_(' El valor :%s ya se uso en otro documento de este proveedor')%det_corr.invoice_number)
"""busca_correlativos2 = self.env['account.move'].search([('invoice_ctrl_number','=',self.invoice_ctrl_number_pro),('id','!=',self.id)])
for det_corr2 in busca_correlativos2:
if det_corr2.invoice_ctrl_number:
raise UserError(_(' El nro de control :%s ya se uso en otro documento')%det_corr2.invoice_ctrl_number)"""
self.invoice_number=self.invoice_number_pro
self.invoice_ctrl_number=self.invoice_ctrl_number_pro
partners='pro' # aqui si es un proveedor
if self.type=="in_refund" or self.type=="in_receipt":
busca_correlativos = self.env['account.move'].search([('invoice_number','=',self.refuld_number_pro),('id','!=',self.id),('partner_id','=',self.partner_id.id)])
for det_corr in busca_correlativos:
if det_corr.invoice_number:
raise UserError(_(' El valor :%s ya se uso en otro documento de este proveedor')%det_corr.invoice_number)
busca_correlativos2 = self.env['account.move'].search([('invoice_ctrl_number','=',self.refund_ctrl_number_pro),('id','!=',self.id)])
for det_corr2 in busca_correlativos2:
if det_corr2.invoice_ctrl_number:
raise UserError(_(' El nro de control :%s ya se uso en otro documento')%det_corr2.invoice_ctrl_number)
self.invoice_number=self.refuld_number_pro
self.invoice_ctrl_number=self.refund_ctrl_number_pro
partners='cli' # aqui si es un cliente
if self.type=="out_invoice":
if self.nr_manual==False:
busca_correlativos = self.env['account.move'].search([('invoice_ctrl_number','=',self.invoice_ctrl_number),('id','!=',self.id)])
#if self.invoice_number_cli:
if busca_correlativos or not self.invoice_ctrl_number:
self.invoice_number_cli=self.get_invoice_number_cli()
self.invoice_number=self.invoice_number_cli #self.get_invoice_number_cli()
self.invoice_ctrl_number_cli=self.get_invoice_ctrl_number_unico()
self.invoice_ctrl_number=self.invoice_ctrl_number_cli #self.get_invoice_ctrl_number_cli()
else:
self.invoice_number=self.invoice_number_cli
self.invoice_ctrl_number=self.invoice_ctrl_number_cli
else:
self.invoice_number=self.invoice_number_cli
self.invoice_ctrl_number=self.invoice_ctrl_number_cli
if self.type=="out_refund":
if self.nr_manual==False:
busca_correlativos = self.env['account.move'].search([('invoice_ctrl_number','=',self.invoice_ctrl_number),('id','!=',self.id)])
if busca_correlativos or not self.invoice_ctrl_number:
self.refuld_number_cli=self.get_refuld_number_cli()
self.invoice_number=self.refuld_number_cli #self.get_refuld_number_cli()
self.refund_ctrl_number_cli=self.get_invoice_ctrl_number_unico()
self.invoice_ctrl_number=self.refund_ctrl_number_cli #self.get_refuld_ctrl_number_cli()
else:
self.invoice_number=self.refuld_number_cli
self.invoice_ctrl_number=self.refund_ctrl_number_cli
else:
self.invoice_number=self.refuld_number_cli
self.invoice_ctrl_number=self.refund_ctrl_number_cli
if self.type=="out_receipt":
if self.nr_manual==False:
busca_correlativos = self.env['account.move'].search([('invoice_ctrl_number','=',self.invoice_ctrl_number),('id','!=',self.id)])
if busca_correlativos or not self.invoice_ctrl_number:
self.refuld_number_cli=self.get_refuld_number_pro()
self.invoice_number=self.refuld_number_cli #self.get_refuld_number_cli()
self.refund_ctrl_number_cli=self.get_invoice_ctrl_number_unico()
self.invoice_ctrl_number=self.refund_ctrl_number_cli #self.get_refuld_ctrl_number_cli()
else:
self.invoice_number=self.refuld_number_cli
self.invoice_ctrl_number=self.refund_ctrl_number_cli
else:
self.invoice_number=self.refuld_number_cli
self.invoice_ctrl_number=self.refund_ctrl_number_cli
def get_invoice_ctrl_number_unico(self):
'''metodo que crea el Nombre del asiento contable si la secuencia no esta creada, crea una con el
nombre: 'l10n_ve_cuenta_retencion_iva'''
self.ensure_one()
SEQUENCE_CODE = 'l10n_ve_nro_control_unico_formato_libre'+str(self.company_id.id) #loca 14
company_id = self.company_id.id #loca 14
IrSequence = self.env['ir.sequence'].with_context(force_company=company_id) #loca 14
name = IrSequence.next_by_code(SEQUENCE_CODE)
# si aún no existe una secuencia para esta empresa, cree una
if not name:
IrSequence.sudo().create({
'prefix': '00-',
'name': 'Localización Venezolana nro control Unico Factura Forma Libre %s' % 1,
'code': SEQUENCE_CODE,
'implementation': 'no_gap',
'padding': 4,
'number_increment': 1,
'company_id': company_id, #loca 14
})
name = IrSequence.next_by_code(SEQUENCE_CODE)
#self.invoice_number_cli=name
return name
|
[
"[email protected]"
] | |
c696330ec9c2433c9e5367b330c460090a719861
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-vas/huaweicloudsdkvas/v2/model/start_task_request.py
|
fc412068b562f5923f2f59232c150d5fd5cb53c2
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318 | 2023-08-31T08:28:59 | 2023-08-31T08:28:59 | 262,207,814 | 103 | 44 |
NOASSERTION
| 2023-06-22T14:50:48 | 2020-05-08T02:28:43 |
Python
|
UTF-8
|
Python
| false | false | 3,825 |
py
|
# coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class StartTaskRequest:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'service_name': 'str',
'task_id': 'str'
}
attribute_map = {
'service_name': 'service_name',
'task_id': 'task_id'
}
def __init__(self, service_name=None, task_id=None):
"""StartTaskRequest
The model defined in huaweicloud sdk
:param service_name: 服务名称
:type service_name: str
:param task_id: 指定的服务作业ID
:type task_id: str
"""
self._service_name = None
self._task_id = None
self.discriminator = None
self.service_name = service_name
self.task_id = task_id
@property
def service_name(self):
"""Gets the service_name of this StartTaskRequest.
服务名称
:return: The service_name of this StartTaskRequest.
:rtype: str
"""
return self._service_name
@service_name.setter
def service_name(self, service_name):
"""Sets the service_name of this StartTaskRequest.
服务名称
:param service_name: The service_name of this StartTaskRequest.
:type service_name: str
"""
self._service_name = service_name
@property
def task_id(self):
"""Gets the task_id of this StartTaskRequest.
指定的服务作业ID
:return: The task_id of this StartTaskRequest.
:rtype: str
"""
return self._task_id
@task_id.setter
def task_id(self, task_id):
"""Sets the task_id of this StartTaskRequest.
指定的服务作业ID
:param task_id: The task_id of this StartTaskRequest.
:type task_id: str
"""
self._task_id = task_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, StartTaskRequest):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"[email protected]"
] | |
6cc38a6ccf3295ef06d5d0013c3c4c686aec7aa5
|
e23a4f57ce5474d468258e5e63b9e23fb6011188
|
/110_concurrency_parallelism/001_asynchronous/_exercises/templates/Async Techniques and Examples in Python/09-built-on-asyncio/the_unsync/nosync.py
|
c927d59cbe969ccdd7bff7d4f230fce878fb7974
|
[] |
no_license
|
syurskyi/Python_Topics
|
52851ecce000cb751a3b986408efe32f0b4c0835
|
be331826b490b73f0a176e6abed86ef68ff2dd2b
|
refs/heads/master
| 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 |
Python
|
UTF-8
|
Python
| false | false | 1,112 |
py
|
# ______ da..
# ______ ma..
# ______ ti..
# ______ re..
#
#
# ___ main
# t0 _ d_t_.d_t_.n..
#
# c..
# c..
# c..
# d..
# d..
# d_s_m..
# d_s_m..
# w..
# w..
# w..
# w..
#
# dt _ d_t_.d_t_.n.. - t0
# print("Synchronous version done in |:,.2_ seconds.".f.. ?.t_s..
#
#
# ___ compute_some
# print("Computing...")
# ___ _ __ ra.. 1 10_000_000
# ma__.sq..(25 ** 25 + .01)
#
#
# ___ download_some
# print("Downloading...")
# url _ 'https://talkpython.fm/episodes/show/174/coming-into-python-from-another-industry-part-2'
# resp _ re__.g.. ?
# ?.r_f_s..
#
# text _ ?.t..
#
# print("Downloaded (more) |;, characters.".f.. le. ?
#
#
# ___ download_some_more
# print("Downloading more ...")
# url _ 'https://pythonbytes.fm/episodes/show/92/will-your-python-be-compiled'
# resp _ re__.g.. ?
# ?.r_f_s..
#
# text _ ?.t..
#
# print Downloaded |:, characters. .f.. le.. ?
#
#
# ___ wait_some
# print("Waiting...")
# ___ _ __ ra.. 1 1000
# t__.s.. .001
#
#
# __ _________ __ ________
# ?
|
[
"[email protected]"
] | |
18e812bf028da39af0bf68630a41dcadfb9960b9
|
ece0d321e48f182832252b23db1df0c21b78f20c
|
/engine/2.80/python/lib/idlelib/pyparse.py
|
6196c2b7edc9eaae0d61d479ff15db66deb72d79
|
[
"Unlicense",
"GPL-3.0-only",
"Font-exception-2.0",
"GPL-3.0-or-later",
"Apache-2.0",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-public-domain-disclaimer",
"Bitstream-Vera",
"LicenseRef-scancode-blender-2010",
"LGPL-2.1-or-later",
"GPL-2.0-or-later",
"GPL-2.0-only",
"LGPL-2.0-only",
"PSF-2.0",
"LicenseRef-scancode-free-unknown",
"LicenseRef-scancode-proprietary-license",
"GPL-1.0-or-later",
"BSD-2-Clause"
] |
permissive
|
byteinc/Phasor
|
47d4e48a52fa562dfa1a2dbe493f8ec9e94625b9
|
f7d23a489c2b4bcc3c1961ac955926484ff8b8d9
|
refs/heads/master
| 2022-10-25T17:05:01.585032 | 2019-03-16T19:24:22 | 2019-03-16T19:24:22 | 175,723,233 | 3 | 1 |
Unlicense
| 2022-10-21T07:02:37 | 2019-03-15T00:58:08 |
Python
|
UTF-8
|
Python
| false | false | 20,146 |
py
|
"""Define partial Python code Parser used by editor and hyperparser.
Instances of ParseMap are used with str.translate.
The following bound search and match functions are defined:
_synchre - start of popular statement;
_junkre - whitespace or comment line;
_match_stringre: string, possibly without closer;
_itemre - line that may have bracket structure start;
_closere - line that must be followed by dedent.
_chew_ordinaryre - non-special characters.
"""
import re
import sys
# Reason last statement is continued (or C_NONE if it's not).
(C_NONE, C_BACKSLASH, C_STRING_FIRST_LINE,
C_STRING_NEXT_LINES, C_BRACKET) = range(5)
# Find what looks like the start of a popular statement.
_synchre = re.compile(r"""
^
[ \t]*
(?: while
| else
| def
| return
| assert
| break
| class
| continue
| elif
| try
| except
| raise
| import
| yield
)
\b
""", re.VERBOSE | re.MULTILINE).search
# Match blank line or non-indenting comment line.
_junkre = re.compile(r"""
[ \t]*
(?: \# \S .* )?
\n
""", re.VERBOSE).match
# Match any flavor of string; the terminating quote is optional
# so that we're robust in the face of incomplete program text.
_match_stringre = re.compile(r"""
\""" [^"\\]* (?:
(?: \\. | "(?!"") )
[^"\\]*
)*
(?: \""" )?
| " [^"\\\n]* (?: \\. [^"\\\n]* )* "?
| ''' [^'\\]* (?:
(?: \\. | '(?!'') )
[^'\\]*
)*
(?: ''' )?
| ' [^'\\\n]* (?: \\. [^'\\\n]* )* '?
""", re.VERBOSE | re.DOTALL).match
# Match a line that starts with something interesting;
# used to find the first item of a bracket structure.
_itemre = re.compile(r"""
[ \t]*
[^\s#\\] # if we match, m.end()-1 is the interesting char
""", re.VERBOSE).match
# Match start of statements that should be followed by a dedent.
_closere = re.compile(r"""
\s*
(?: return
| break
| continue
| raise
| pass
)
\b
""", re.VERBOSE).match
# Chew up non-special chars as quickly as possible. If match is
# successful, m.end() less 1 is the index of the last boring char
# matched. If match is unsuccessful, the string starts with an
# interesting char.
_chew_ordinaryre = re.compile(r"""
[^[\](){}#'"\\]+
""", re.VERBOSE).match
class ParseMap(dict):
r"""Dict subclass that maps anything not in dict to 'x'.
This is designed to be used with str.translate in study1.
Anything not specifically mapped otherwise becomes 'x'.
Example: replace everything except whitespace with 'x'.
>>> keepwhite = ParseMap((ord(c), ord(c)) for c in ' \t\n\r')
>>> "a + b\tc\nd".translate(keepwhite)
'x x x\tx\nx'
"""
# Calling this triples access time; see bpo-32940
def __missing__(self, key):
return 120 # ord('x')
# Map all ascii to 120 to avoid __missing__ call, then replace some.
trans = ParseMap.fromkeys(range(128), 120)
trans.update((ord(c), ord('(')) for c in "({[") # open brackets => '(';
trans.update((ord(c), ord(')')) for c in ")}]") # close brackets => ')'.
trans.update((ord(c), ord(c)) for c in "\"'\\\n#") # Keep these.
class Parser:
def __init__(self, indentwidth, tabwidth):
self.indentwidth = indentwidth
self.tabwidth = tabwidth
def set_code(self, s):
assert len(s) == 0 or s[-1] == '\n'
self.code = s
self.study_level = 0
def find_good_parse_start(self, is_char_in_string=None,
_synchre=_synchre):
"""
Return index of a good place to begin parsing, as close to the
end of the string as possible. This will be the start of some
popular stmt like "if" or "def". Return None if none found:
the caller should pass more prior context then, if possible, or
if not (the entire program text up until the point of interest
has already been tried) pass 0 to set_lo().
This will be reliable iff given a reliable is_char_in_string()
function, meaning that when it says "no", it's absolutely
guaranteed that the char is not in a string.
"""
code, pos = self.code, None
if not is_char_in_string:
# no clue -- make the caller pass everything
return None
# Peek back from the end for a good place to start,
# but don't try too often; pos will be left None, or
# bumped to a legitimate synch point.
limit = len(code)
for tries in range(5):
i = code.rfind(":\n", 0, limit)
if i < 0:
break
i = code.rfind('\n', 0, i) + 1 # start of colon line (-1+1=0)
m = _synchre(code, i, limit)
if m and not is_char_in_string(m.start()):
pos = m.start()
break
limit = i
if pos is None:
# Nothing looks like a block-opener, or stuff does
# but is_char_in_string keeps returning true; most likely
# we're in or near a giant string, the colorizer hasn't
# caught up enough to be helpful, or there simply *aren't*
# any interesting stmts. In any of these cases we're
# going to have to parse the whole thing to be sure, so
# give it one last try from the start, but stop wasting
# time here regardless of the outcome.
m = _synchre(code)
if m and not is_char_in_string(m.start()):
pos = m.start()
return pos
# Peeking back worked; look forward until _synchre no longer
# matches.
i = pos + 1
while 1:
m = _synchre(code, i)
if m:
s, i = m.span()
if not is_char_in_string(s):
pos = s
else:
break
return pos
def set_lo(self, lo):
""" Throw away the start of the string.
Intended to be called with the result of find_good_parse_start().
"""
assert lo == 0 or self.code[lo-1] == '\n'
if lo > 0:
self.code = self.code[lo:]
def _study1(self):
"""Find the line numbers of non-continuation lines.
As quickly as humanly possible <wink>, find the line numbers (0-
based) of the non-continuation lines.
Creates self.{goodlines, continuation}.
"""
if self.study_level >= 1:
return
self.study_level = 1
# Map all uninteresting characters to "x", all open brackets
# to "(", all close brackets to ")", then collapse runs of
# uninteresting characters. This can cut the number of chars
# by a factor of 10-40, and so greatly speed the following loop.
code = self.code
code = code.translate(trans)
code = code.replace('xxxxxxxx', 'x')
code = code.replace('xxxx', 'x')
code = code.replace('xx', 'x')
code = code.replace('xx', 'x')
code = code.replace('\nx', '\n')
# Replacing x\n with \n would be incorrect because
# x may be preceded by a backslash.
# March over the squashed version of the program, accumulating
# the line numbers of non-continued stmts, and determining
# whether & why the last stmt is a continuation.
continuation = C_NONE
level = lno = 0 # level is nesting level; lno is line number
self.goodlines = goodlines = [0]
push_good = goodlines.append
i, n = 0, len(code)
while i < n:
ch = code[i]
i = i+1
# cases are checked in decreasing order of frequency
if ch == 'x':
continue
if ch == '\n':
lno = lno + 1
if level == 0:
push_good(lno)
# else we're in an unclosed bracket structure
continue
if ch == '(':
level = level + 1
continue
if ch == ')':
if level:
level = level - 1
# else the program is invalid, but we can't complain
continue
if ch == '"' or ch == "'":
# consume the string
quote = ch
if code[i-1:i+2] == quote * 3:
quote = quote * 3
firstlno = lno
w = len(quote) - 1
i = i+w
while i < n:
ch = code[i]
i = i+1
if ch == 'x':
continue
if code[i-1:i+w] == quote:
i = i+w
break
if ch == '\n':
lno = lno + 1
if w == 0:
# unterminated single-quoted string
if level == 0:
push_good(lno)
break
continue
if ch == '\\':
assert i < n
if code[i] == '\n':
lno = lno + 1
i = i+1
continue
# else comment char or paren inside string
else:
# didn't break out of the loop, so we're still
# inside a string
if (lno - 1) == firstlno:
# before the previous \n in code, we were in the first
# line of the string
continuation = C_STRING_FIRST_LINE
else:
continuation = C_STRING_NEXT_LINES
continue # with outer loop
if ch == '#':
# consume the comment
i = code.find('\n', i)
assert i >= 0
continue
assert ch == '\\'
assert i < n
if code[i] == '\n':
lno = lno + 1
if i+1 == n:
continuation = C_BACKSLASH
i = i+1
# The last stmt may be continued for all 3 reasons.
# String continuation takes precedence over bracket
# continuation, which beats backslash continuation.
if (continuation != C_STRING_FIRST_LINE
and continuation != C_STRING_NEXT_LINES and level > 0):
continuation = C_BRACKET
self.continuation = continuation
# Push the final line number as a sentinel value, regardless of
# whether it's continued.
assert (continuation == C_NONE) == (goodlines[-1] == lno)
if goodlines[-1] != lno:
push_good(lno)
def get_continuation_type(self):
self._study1()
return self.continuation
def _study2(self):
"""
study1 was sufficient to determine the continuation status,
but doing more requires looking at every character. study2
does this for the last interesting statement in the block.
Creates:
self.stmt_start, stmt_end
slice indices of last interesting stmt
self.stmt_bracketing
the bracketing structure of the last interesting stmt; for
example, for the statement "say(boo) or die",
stmt_bracketing will be ((0, 0), (0, 1), (2, 0), (2, 1),
(4, 0)). Strings and comments are treated as brackets, for
the matter.
self.lastch
last interesting character before optional trailing comment
self.lastopenbracketpos
if continuation is C_BRACKET, index of last open bracket
"""
if self.study_level >= 2:
return
self._study1()
self.study_level = 2
# Set p and q to slice indices of last interesting stmt.
code, goodlines = self.code, self.goodlines
i = len(goodlines) - 1 # Index of newest line.
p = len(code) # End of goodlines[i]
while i:
assert p
# Make p be the index of the stmt at line number goodlines[i].
# Move p back to the stmt at line number goodlines[i-1].
q = p
for nothing in range(goodlines[i-1], goodlines[i]):
# tricky: sets p to 0 if no preceding newline
p = code.rfind('\n', 0, p-1) + 1
# The stmt code[p:q] isn't a continuation, but may be blank
# or a non-indenting comment line.
if _junkre(code, p):
i = i-1
else:
break
if i == 0:
# nothing but junk!
assert p == 0
q = p
self.stmt_start, self.stmt_end = p, q
# Analyze this stmt, to find the last open bracket (if any)
# and last interesting character (if any).
lastch = ""
stack = [] # stack of open bracket indices
push_stack = stack.append
bracketing = [(p, 0)]
while p < q:
# suck up all except ()[]{}'"#\\
m = _chew_ordinaryre(code, p, q)
if m:
# we skipped at least one boring char
newp = m.end()
# back up over totally boring whitespace
i = newp - 1 # index of last boring char
while i >= p and code[i] in " \t\n":
i = i-1
if i >= p:
lastch = code[i]
p = newp
if p >= q:
break
ch = code[p]
if ch in "([{":
push_stack(p)
bracketing.append((p, len(stack)))
lastch = ch
p = p+1
continue
if ch in ")]}":
if stack:
del stack[-1]
lastch = ch
p = p+1
bracketing.append((p, len(stack)))
continue
if ch == '"' or ch == "'":
# consume string
# Note that study1 did this with a Python loop, but
# we use a regexp here; the reason is speed in both
# cases; the string may be huge, but study1 pre-squashed
# strings to a couple of characters per line. study1
# also needed to keep track of newlines, and we don't
# have to.
bracketing.append((p, len(stack)+1))
lastch = ch
p = _match_stringre(code, p, q).end()
bracketing.append((p, len(stack)))
continue
if ch == '#':
# consume comment and trailing newline
bracketing.append((p, len(stack)+1))
p = code.find('\n', p, q) + 1
assert p > 0
bracketing.append((p, len(stack)))
continue
assert ch == '\\'
p = p+1 # beyond backslash
assert p < q
if code[p] != '\n':
# the program is invalid, but can't complain
lastch = ch + code[p]
p = p+1 # beyond escaped char
# end while p < q:
self.lastch = lastch
self.lastopenbracketpos = stack[-1] if stack else None
self.stmt_bracketing = tuple(bracketing)
def compute_bracket_indent(self):
"""Return number of spaces the next line should be indented.
Line continuation must be C_BRACKET.
"""
self._study2()
assert self.continuation == C_BRACKET
j = self.lastopenbracketpos
code = self.code
n = len(code)
origi = i = code.rfind('\n', 0, j) + 1
j = j+1 # one beyond open bracket
# find first list item; set i to start of its line
while j < n:
m = _itemre(code, j)
if m:
j = m.end() - 1 # index of first interesting char
extra = 0
break
else:
# this line is junk; advance to next line
i = j = code.find('\n', j) + 1
else:
# nothing interesting follows the bracket;
# reproduce the bracket line's indentation + a level
j = i = origi
while code[j] in " \t":
j = j+1
extra = self.indentwidth
return len(code[i:j].expandtabs(self.tabwidth)) + extra
def get_num_lines_in_stmt(self):
"""Return number of physical lines in last stmt.
The statement doesn't have to be an interesting statement. This is
intended to be called when continuation is C_BACKSLASH.
"""
self._study1()
goodlines = self.goodlines
return goodlines[-1] - goodlines[-2]
def compute_backslash_indent(self):
"""Return number of spaces the next line should be indented.
Line continuation must be C_BACKSLASH. Also assume that the new
line is the first one following the initial line of the stmt.
"""
self._study2()
assert self.continuation == C_BACKSLASH
code = self.code
i = self.stmt_start
while code[i] in " \t":
i = i+1
startpos = i
# See whether the initial line starts an assignment stmt; i.e.,
# look for an = operator
endpos = code.find('\n', startpos) + 1
found = level = 0
while i < endpos:
ch = code[i]
if ch in "([{":
level = level + 1
i = i+1
elif ch in ")]}":
if level:
level = level - 1
i = i+1
elif ch == '"' or ch == "'":
i = _match_stringre(code, i, endpos).end()
elif ch == '#':
# This line is unreachable because the # makes a comment of
# everything after it.
break
elif level == 0 and ch == '=' and \
(i == 0 or code[i-1] not in "=<>!") and \
code[i+1] != '=':
found = 1
break
else:
i = i+1
if found:
# found a legit =, but it may be the last interesting
# thing on the line
i = i+1 # move beyond the =
found = re.match(r"\s*\\", code[i:endpos]) is None
if not found:
# oh well ... settle for moving beyond the first chunk
# of non-whitespace chars
i = startpos
while code[i] not in " \t\n":
i = i+1
return len(code[self.stmt_start:i].expandtabs(\
self.tabwidth)) + 1
def get_base_indent_string(self):
"""Return the leading whitespace on the initial line of the last
interesting stmt.
"""
self._study2()
i, n = self.stmt_start, self.stmt_end
j = i
code = self.code
while j < n and code[j] in " \t":
j = j + 1
return code[i:j]
def is_block_opener(self):
"Return True if the last interesting statemtent opens a block."
self._study2()
return self.lastch == ':'
def is_block_closer(self):
"Return True if the last interesting statement closes a block."
self._study2()
return _closere(self.code, self.stmt_start) is not None
def get_last_stmt_bracketing(self):
"""Return bracketing structure of the last interesting statement.
The returned tuple is in the format defined in _study2().
"""
self._study2()
return self.stmt_bracketing
if __name__ == '__main__': #pragma: nocover
import unittest
unittest.main('idlelib.idle_test.test_pyparse', verbosity=2)
|
[
"[email protected]"
] | |
9478c584e8720ca8e131eae84299c0383e0e907d
|
ad08ee023345fcc42fdac6fab527809b2d502fa5
|
/peek_plugin_diagram/_private/tuples/location_index/LocationIndexUpdateDateTuple.py
|
3ed190106e964d14ae31c9201f88e18f70586ab8
|
[] |
no_license
|
Synerty/peek-plugin-diagram
|
fcaefc414334e4584976d0b0567712bb47a3082a
|
8efffa2bb3877b7fcd3736b80df53adc784ca69c
|
refs/heads/master
| 2021-07-03T17:42:17.129328 | 2020-10-27T04:55:52 | 2020-10-27T04:55:52 | 191,874,585 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 873 |
py
|
from typing import Dict
from peek_abstract_chunked_index.private.tuples.ACIUpdateDateTupleABC import \
ACIUpdateDateTupleABC
from peek_plugin_diagram._private.PluginNames import diagramTuplePrefix
from vortex.Tuple import addTupleType, TupleField, Tuple
#: This the type of the data that we get when the clients observe new locationIndexs.
DeviceLocationIndexT = Dict[str, str]
@addTupleType
class LocationIndexUpdateDateTuple(Tuple, ACIUpdateDateTupleABC):
__tupleType__ = diagramTuplePrefix + "LocationIndexUpdateDateTuple"
# Improve performance of the JSON serialisation
__rawJonableFields__ = ('initialLoadComplete', 'updateDateByChunkKey')
initialLoadComplete: bool = TupleField()
updateDateByChunkKey: DeviceLocationIndexT = TupleField({})
@property
def ckiUpdateDateByChunkKey(self):
return self.updateDateByChunkKey
|
[
"[email protected]"
] | |
3109118228c8de62a74220aa01d858cbd0adb4b1
|
4b660991e5c9c93c83dccccdd3ea91531201e8a3
|
/DSA/string/practise/find_left_most_char_that_repeat.py
|
07b8bae3a720ad130348c7581e41db275e14f38b
|
[
"MIT"
] |
permissive
|
RohanMiraje/DSAwithPython
|
2a1515fa5f9e5cc76b08a3e6f0ce34e451fb6f4b
|
ea4884afcac9d6cc2817a93e918c829dd10cef5d
|
refs/heads/master
| 2022-09-24T08:57:04.695470 | 2021-10-21T01:06:06 | 2021-10-21T01:06:06 | 238,381,770 | 2 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 2,613 |
py
|
import sys
"""
Find index of leftmost char that repeats
Naive approach-TC:O(n^2) SC:O(1)
start string travers from left:
check if curr left index value is repeating
at its right remaining string by traversing:
if found repeated then result curr left index
return -1 if no repeated found
better approach:TC:O(n) SC:O(256)
use count_array of len 256 with default values to -1
init res = MAX value # to store final result index
traverse string from start :
check if value in count_arr at ascii value of curr char as index is == -1:
store curr index of curr char at this position
else:
res = min(res, count_arr[ord(char)])
return -1 if res is MAX else res
faster approach than above approach is to traverse string from its end:
so, that res will be updated directly with repeated char index every time
"""
def find_left_most_char_that_repeat(string):
count_array = [-1] * 256
res = sys.maxsize
for index, char in enumerate(string):
if count_array[ord(char)] == -1:
count_array[ord(char)] = index
else:
res = min(res, count_array[ord(char)])
return -1 if res == sys.maxsize else res
def find_left_most_char_that_repeat_method2(string):
count_array = [-1] * 256
res = sys.maxsize
for index in range(len(string) - 1, -1, -1):
char = string[index]
if count_array[ord(char)] == -1:
count_array[ord(char)] = index
else:
count_array[ord(char)] = index
res = count_array[ord(char)]
return -1 if res == sys.maxsize else res
def find_left_most_char_that_not_repeat_method(string):
count_array = [-1] * 256
res = sys.maxsize
for index, char in enumerate(string):
if count_array[ord(char)] == -1:
count_array[ord(char)] = 1
else:
count_array[ord(char)] += 1
for index, char in enumerate(string):
if count_array[ord(char)] == 1:
return index
def find_left_most_char_that_not_repeat_method2(string):
count_array = [-1] * 256
res = sys.maxsize
for index, char in enumerate(string):
if count_array[ord(char)] == -1:
count_array[ord(char)] = index # this is IMP
else:
count_array[ord(char)] = -2 # this is marked repeating
for val in count_array: # const loop
if val >= 0:
res = min(res, val) # val is index of leftmost non repeated
return res
if __name__ == '__main__':
print(find_left_most_char_that_not_repeat_method2('geeksforgeeks'))
|
[
"[email protected]"
] | |
8791d022fce2f7641b74d13eb5342d85e53225e5
|
12db1c76e5411708fefd207c6a819e8e95551fe7
|
/User/migrations/0013_auto_20201102_0842.py
|
d63358a6842a9e951c6e82abd5b55e74766b429b
|
[] |
no_license
|
Jyonn/Saying
|
5bbfffa06895211f44ae6c424223a400cd9d9223
|
a32848560bc524011c1da47f509d94343f63ee59
|
refs/heads/master
| 2023-01-06T15:59:33.394067 | 2020-11-04T12:15:51 | 2020-11-04T12:15:51 | 123,360,151 | 0 | 0 | null | null | null | null |
UTF-8
|
Python
| false | false | 1,473 |
py
|
# Generated by Django 3.0.6 on 2020-11-02 08:42
import SmartDjango.models.fields
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('User', '0012_auto_20180301_0040'),
]
operations = [
migrations.AlterModelOptions(
name='user',
options={'default_manager_name': 'objects'},
),
migrations.AddField(
model_name='user',
name='inviter',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='User.User'),
),
migrations.AlterField(
model_name='user',
name='password',
field=SmartDjango.models.fields.CharField(max_length=32, verbose_name='密码'),
),
migrations.AlterField(
model_name='user',
name='pwd_change_time',
field=SmartDjango.models.fields.FloatField(blank=True, default=0, null=True),
),
migrations.AlterField(
model_name='user',
name='salt',
field=SmartDjango.models.fields.CharField(default=None, max_length=10),
),
migrations.AlterField(
model_name='user',
name='username',
field=SmartDjango.models.fields.CharField(blank=True, default=None, max_length=32, null=True, unique=True, verbose_name='用户名'),
),
]
|
[
"[email protected]"
] | |
962e2ee557ca80df05cdb6c53d37554fb019dc11
|
93e058780c3fd4d7f40dbcac263fb58f63b51b6f
|
/integration_test/test_logging.py
|
0d6fdb368d86e31734f05f185e19f07bea7a1d96
|
[
"Apache-2.0"
] |
permissive
|
multiscale/muscle3
|
2b6ffc34240b92bb2ade3e28e4dde1b6d3f8e3e7
|
be8b21cfe97218d2a941b63d5762387716a9b3f8
|
refs/heads/develop
| 2023-07-12T06:12:03.510684 | 2023-07-06T20:11:41 | 2023-07-06T20:11:41 | 122,876,985 | 24 | 15 |
Apache-2.0
| 2023-09-01T19:47:16 | 2018-02-25T21:07:17 |
Fortran
|
UTF-8
|
Python
| false | false | 1,717 |
py
|
import ymmsl
from ymmsl import Reference
from libmuscle.logging import LogLevel, LogMessage, Timestamp
from libmuscle.manager.manager import Manager
from libmuscle.mmp_client import MMPClient
def test_logging(log_file_in_tmpdir, caplog):
ymmsl_text = (
'ymmsl_version: v0.1\n'
'model:\n'
' name: test_model\n'
' components:\n'
' macro: macro_implementation\n'
' micro:\n'
' implementation: micro_implementation\n'
' multiplicity: [10]\n'
' conduits:\n'
' macro.out: micro.in\n'
' micro.out: macro.in\n'
'settings:\n'
' test1: 13\n'
' test2: 13.3\n'
' test3: testing\n'
' test4: True\n'
' test5: [2.3, 5.6]\n'
' test6:\n'
' - [1.0, 2.0]\n'
' - [3.0, 1.0]\n'
)
# create server
ymmsl_doc = ymmsl.load(ymmsl_text)
manager = Manager(ymmsl_doc)
# create client
instance_id = Reference('test_logging')
client = MMPClient(instance_id, manager.get_server_location())
message = LogMessage(
instance_id=str(instance_id),
timestamp=Timestamp(2.0),
level=LogLevel.DEBUG,
text='Integration testing')
# log and check
client.submit_log_message(message)
for rec in caplog.records:
if rec.name == 'instances.test_logging':
assert rec.time_stamp == '1970-01-01T00:00:02Z'
assert rec.levelname == 'DEBUG'
assert rec.message == 'Integration testing'
break
client.close()
manager.stop()
|
[
"[email protected]"
] | |
116dcc95d5b9a8bebd5e1bde8ab774eb1b84fd88
|
03c00aa07607c1f206c0fb3cf00fc5c510d7a4bf
|
/Infoplus/models/order_load_program.py
|
000ac0c33ad65779c78e6b4df5d3c844be7667a4
|
[] |
no_license
|
infopluscommerce/infoplus-python-client
|
748cc9af739615036c52adce70aa7f4303601b97
|
bde657057fedb5396ecf6c42e8ba668456bd1c43
|
refs/heads/master
| 2023-08-23T11:32:01.160320 | 2017-03-17T14:43:15 | 2017-03-17T14:43:15 | 58,404,638 | 2 | 2 | null | null | null | null |
UTF-8
|
Python
| false | false | 3,719 |
py
|
# coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
class OrderLoadProgram(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
OrderLoadProgram - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'id': 'int',
'label': 'str'
}
self.attribute_map = {
'id': 'id',
'label': 'label'
}
self._id = None
self._label = None
@property
def id(self):
"""
Gets the id of this OrderLoadProgram.
:return: The id of this OrderLoadProgram.
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this OrderLoadProgram.
:param id: The id of this OrderLoadProgram.
:type: int
"""
self._id = id
@property
def label(self):
"""
Gets the label of this OrderLoadProgram.
:return: The label of this OrderLoadProgram.
:rtype: str
"""
return self._label
@label.setter
def label(self, label):
"""
Sets the label of this OrderLoadProgram.
:param label: The label of this OrderLoadProgram.
:type: str
"""
self._label = label
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
[
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.