blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d07884ea9088dd79e535041db6fea087ead9d87a | e9538b7ad6d0ce0ccfbb8e10c458f9e0b73926f6 | /tests/unit/modules/network/fortios/test_fortios_log_fortianalyzer2_filter.py | 0c61c109df02d42a39cb39bdaec4e125f8829312 | []
| no_license | ansible-collection-migration/misc.not_a_real_collection | b3ef8090c59de9ac30aca083c746ec3595d7f5f5 | 7ab1af924a3db4ada2f714b09bb392614344cb1e | refs/heads/master | 2020-12-18T13:48:51.849567 | 2020-01-22T17:39:18 | 2020-01-22T17:39:18 | 235,400,821 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,103 | py | # Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible_collections.misc.not_a_real_collection.plugins.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible_collections.misc.not_a_real_collection.plugins.modules import fortios_log_fortianalyzer2_filter
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible_collections.misc.not_a_real_collection.plugins.modules.fortios_log_fortianalyzer2_filter.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_log_fortianalyzer2_filter_creation(mocker):
schema_method_mock = mocker.patch('ansible_collections.misc.not_a_real_collection.plugins.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible_collections.misc.not_a_real_collection.plugins.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'log_fortianalyzer2_filter': {
'anomaly': 'enable',
'dlp_archive': 'enable',
'dns': 'enable',
'filter': 'test_value_6',
'filter_type': 'include',
'forward_traffic': 'enable',
'gtp': 'enable',
'local_traffic': 'enable',
'multicast_traffic': 'enable',
'netscan_discovery': 'test_value_12,',
'netscan_vulnerability': 'test_value_13,',
'severity': 'emergency',
'sniffer_traffic': 'enable',
'ssh': 'enable',
'voip': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_log_fortianalyzer2_filter.fortios_log_fortianalyzer2(input_data, fos_instance)
expected_data = {
'anomaly': 'enable',
'dlp-archive': 'enable',
'dns': 'enable',
'filter': 'test_value_6',
'filter-type': 'include',
'forward-traffic': 'enable',
'gtp': 'enable',
'local-traffic': 'enable',
'multicast-traffic': 'enable',
'netscan-discovery': 'test_value_12,',
'netscan-vulnerability': 'test_value_13,',
'severity': 'emergency',
'sniffer-traffic': 'enable',
'ssh': 'enable',
'voip': 'enable'
}
set_method_mock.assert_called_with('log.fortianalyzer2', 'filter', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_log_fortianalyzer2_filter_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible_collections.misc.not_a_real_collection.plugins.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible_collections.misc.not_a_real_collection.plugins.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'log_fortianalyzer2_filter': {
'anomaly': 'enable',
'dlp_archive': 'enable',
'dns': 'enable',
'filter': 'test_value_6',
'filter_type': 'include',
'forward_traffic': 'enable',
'gtp': 'enable',
'local_traffic': 'enable',
'multicast_traffic': 'enable',
'netscan_discovery': 'test_value_12,',
'netscan_vulnerability': 'test_value_13,',
'severity': 'emergency',
'sniffer_traffic': 'enable',
'ssh': 'enable',
'voip': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_log_fortianalyzer2_filter.fortios_log_fortianalyzer2(input_data, fos_instance)
expected_data = {
'anomaly': 'enable',
'dlp-archive': 'enable',
'dns': 'enable',
'filter': 'test_value_6',
'filter-type': 'include',
'forward-traffic': 'enable',
'gtp': 'enable',
'local-traffic': 'enable',
'multicast-traffic': 'enable',
'netscan-discovery': 'test_value_12,',
'netscan-vulnerability': 'test_value_13,',
'severity': 'emergency',
'sniffer-traffic': 'enable',
'ssh': 'enable',
'voip': 'enable'
}
set_method_mock.assert_called_with('log.fortianalyzer2', 'filter', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_log_fortianalyzer2_filter_idempotent(mocker):
schema_method_mock = mocker.patch('ansible_collections.misc.not_a_real_collection.plugins.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible_collections.misc.not_a_real_collection.plugins.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'log_fortianalyzer2_filter': {
'anomaly': 'enable',
'dlp_archive': 'enable',
'dns': 'enable',
'filter': 'test_value_6',
'filter_type': 'include',
'forward_traffic': 'enable',
'gtp': 'enable',
'local_traffic': 'enable',
'multicast_traffic': 'enable',
'netscan_discovery': 'test_value_12,',
'netscan_vulnerability': 'test_value_13,',
'severity': 'emergency',
'sniffer_traffic': 'enable',
'ssh': 'enable',
'voip': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_log_fortianalyzer2_filter.fortios_log_fortianalyzer2(input_data, fos_instance)
expected_data = {
'anomaly': 'enable',
'dlp-archive': 'enable',
'dns': 'enable',
'filter': 'test_value_6',
'filter-type': 'include',
'forward-traffic': 'enable',
'gtp': 'enable',
'local-traffic': 'enable',
'multicast-traffic': 'enable',
'netscan-discovery': 'test_value_12,',
'netscan-vulnerability': 'test_value_13,',
'severity': 'emergency',
'sniffer-traffic': 'enable',
'ssh': 'enable',
'voip': 'enable'
}
set_method_mock.assert_called_with('log.fortianalyzer2', 'filter', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_log_fortianalyzer2_filter_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible_collections.misc.not_a_real_collection.plugins.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible_collections.misc.not_a_real_collection.plugins.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'log_fortianalyzer2_filter': {
'random_attribute_not_valid': 'tag',
'anomaly': 'enable',
'dlp_archive': 'enable',
'dns': 'enable',
'filter': 'test_value_6',
'filter_type': 'include',
'forward_traffic': 'enable',
'gtp': 'enable',
'local_traffic': 'enable',
'multicast_traffic': 'enable',
'netscan_discovery': 'test_value_12,',
'netscan_vulnerability': 'test_value_13,',
'severity': 'emergency',
'sniffer_traffic': 'enable',
'ssh': 'enable',
'voip': 'enable'
},
'vdom': 'root'}
is_error, changed, response = fortios_log_fortianalyzer2_filter.fortios_log_fortianalyzer2(input_data, fos_instance)
expected_data = {
'anomaly': 'enable',
'dlp-archive': 'enable',
'dns': 'enable',
'filter': 'test_value_6',
'filter-type': 'include',
'forward-traffic': 'enable',
'gtp': 'enable',
'local-traffic': 'enable',
'multicast-traffic': 'enable',
'netscan-discovery': 'test_value_12,',
'netscan-vulnerability': 'test_value_13,',
'severity': 'emergency',
'sniffer-traffic': 'enable',
'ssh': 'enable',
'voip': 'enable'
}
set_method_mock.assert_called_with('log.fortianalyzer2', 'filter', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
| [
"[email protected]"
]
| |
6e90ef1077bbefd52a47c39eaf3d32fe9090c6d7 | 99c4d4a6592fded0e8e59652484ab226ac0bd38c | /code/batch-2/vse-naloge-brez-testov/DN13-Z-077.py | bcccc875f9d0ca0da1cc8bf82548e3362986f142 | []
| no_license | benquick123/code-profiling | 23e9aa5aecb91753e2f1fecdc3f6d62049a990d5 | 0d496d649247776d121683d10019ec2a7cba574c | refs/heads/master | 2021-10-08T02:53:50.107036 | 2018-12-06T22:56:38 | 2018-12-06T22:56:38 | 126,011,752 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,260 | py | import itertools
class Minobot:
def __init__(self):
self.x = 0
self.y = 0
self.smer = "desno"
def naprej(self, premik):
if (self.smer == "desno"):
self.x += premik
elif (self.smer == "levo"):
self.x -= premik
elif (self.smer == "gor"):
self.y += premik
elif (self.smer == "dol"):
self.y -= premik
def koordinate(self):
return self.x, self.y
def desno(self):
if (self.smer == "desno"):
self.smer = "dol"
elif (self.smer == "dol"):
self.smer = "levo"
elif (self.smer == "levo"):
self.smer = "gor"
elif (self.smer == "gor"):
self.smer = "desno"
def levo(self):
if (self.smer == "desno"):
self.smer = "gor"
elif (self.smer == "gor"):
self.smer = "levo"
elif (self.smer == "levo"):
self.smer = "dol"
elif (self.smer == "dol"):
self.smer = "desno"
def razdalja(self):
return abs(self.x) + abs(self.y)
a = Minobot()
a.levo()
a.naprej(4)
a.desno()
a.naprej(3)
#print(a.koordinate())
| [
"[email protected]"
]
| |
80737f45f33bac9e5445d7f37314f4c3515006f4 | 9e41cd05ee3d36d09e2dfb49af8212c3aee3cd61 | /kisházik/classification_EMP2B5.py | c2e3885645ae26b3c237756509ab3bd62b09723f | []
| no_license | matech96/ADA | 2cf60eeacb0cdf95ce8486169ddd9e4e1bb2311f | b15c8e339291014af13e03cd3a099e1914198ff9 | refs/heads/master | 2020-09-14T18:02:05.271191 | 2020-01-28T16:04:29 | 2020-01-28T16:04:29 | 223,208,311 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,269 | py | #!/usr/bin/env python
# coding: utf-8
# In[1]:
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, roc_auc_score, precision_score, recall_score
import pandas as pd
from sklearn.linear_model import LinearRegression
# In[2]:
df = pd.read_csv('../data/DataSet_Hitelbiralat_preprocessed.csv')
# In[3]:
def rounding_score_decorator(score):
return lambda y_true, y_pred: score(y_true, y_pred > 0.5)
def text2score(optimalization):
if optimalization == 'AUC':
score = roc_auc_score
elif optimalization == 'Precision':
score = rounding_score_decorator(precision_score)
elif optimalization == 'Recall':
score = rounding_score_decorator(recall_score)
elif optimalization == 'Accuracy':
score = rounding_score_decorator(accuracy_score)
return score
def modell_evaluator(data, input_attributes, target_attribute, model, optimalization):
score = text2score(optimalization)
split_idx = len(df) // 2
data_train = data[:split_idx]
data_test = data[split_idx:]
def test_attributes(fix_input, possible_inputs):
best_score = -1
best_input = None
for possible_input in possible_inputs:
model.fit(data_train[fix_input + [possible_input]], data_train[target_attribute])
predicted = model.predict(data_test[fix_input + [possible_input]])
s = score(data_test[target_attribute], predicted)
if s > best_score:
best_score = s
best_input = possible_input
return best_input, best_score
good_inputs = []
in_race_inputs = input_attributes
best_s = -1
while len(in_race_inputs):
i_to_accept, s = test_attributes([], input_attributes)
if s < best_s:
return best_s, good_inputs
best_s = s
good_inputs.append(i_to_accept)
in_race_inputs.remove(i_to_accept)
return best_s, good_inputs
# In[4]:
i = df.columns.to_list()
i.remove('TARGET_LABEL_BAD')
modell_evaluator(df,
i, #['Sex', 'Age', 'MONTHS_IN_THE_JOB', 'PERSONAL_NET_INCOME', 'PAYMENT_DAY'],
'TARGET_LABEL_BAD',
LinearRegression(),
'AUC')
| [
"[email protected]"
]
| |
3f325bc49a183b5be3f47809de7ae0b062efb3f9 | 73a5eca1ddee1d74a3c2be9ca4e5e67ebe3d16f7 | /src/data/__init__.py | c780cbd47efafc7e74eb2dda1caa03ffc2b556db | [
"MIT"
]
| permissive | ychnlgy/Chebyshev-Lagrange | 34346692a2925cde620377e8fbcb8d588623fac7 | 74292e72b83f992d6c42a2f2db04dfdce5a52aea | refs/heads/master | 2020-05-23T06:20:10.831035 | 2020-02-12T16:31:38 | 2020-02-12T16:31:38 | 186,661,893 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 48 | py | from . import adhc
from .augment import augment
| [
"[email protected]"
]
| |
1d912dceae386ef74247aae0ce3c3d92d2ee8ed8 | e3c8f786d09e311d6ea1cab50edde040bf1ea988 | /Incident-Response/Tools/grr/grr/server/grr_response_server/gui/api_plugins/reflection_test.py | bbc604dfacc9a8f3d4e2f1ef5f8c6585f034152e | [
"MIT",
"Apache-2.0"
]
| permissive | foss2cyber/Incident-Playbook | d1add8aec6e28a19e515754c6ce2e524d67f368e | a379a134c0c5af14df4ed2afa066c1626506b754 | refs/heads/main | 2023-06-07T09:16:27.876561 | 2021-07-07T03:48:54 | 2021-07-07T03:48:54 | 384,988,036 | 1 | 0 | MIT | 2021-07-11T15:45:31 | 2021-07-11T15:45:31 | null | UTF-8 | Python | false | false | 3,974 | py | #!/usr/bin/env python
"""This module contains tests for reflection API handlers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from absl import app
from grr_response_server.gui import api_call_router
from grr_response_server.gui import api_test_lib
from grr_response_server.gui.api_plugins import reflection as reflection_plugin
from grr.test_lib import test_lib
class ApiGetRDFValueDescriptorHandlerTest(api_test_lib.ApiCallHandlerTest):
"""Test for ApiGetRDFValueDescriptorHandler."""
def testSuccessfullyRendersReflectionDataForAllTypes(self):
result = reflection_plugin.ApiListRDFValuesDescriptorsHandler().Handle(
None, context=self.context)
# TODO(user): enhance this test.
self.assertTrue(result)
class DummyApiCallRouter(api_call_router.ApiCallRouter):
"""Dummy ApiCallRouter implementation overriding just 1 method."""
@api_call_router.Http("GET", "/api/method1")
@api_call_router.ArgsType(api_test_lib.SampleGetHandlerArgs)
def SomeRandomMethodWithArgsType(self, args, context=None):
"""Doc 1."""
@api_call_router.Http("GET", "/api/method2")
@api_call_router.ResultType(api_test_lib.SampleGetHandlerArgs)
def SomeRandomMethodWithResultType(self, args, context=None):
"""Doc 2."""
@api_call_router.Http("GET", "/api/method3")
@api_call_router.ArgsType(api_test_lib.SampleGetHandlerArgs)
@api_call_router.ResultType(api_test_lib.SampleGetHandlerArgs)
def SomeRandomMethodWithArgsTypeAndResultType(self, args, context=None):
"""Doc 3."""
class ApiListApiMethodsHandlerTest(api_test_lib.ApiCallHandlerTest):
"""Test for ApiListApiMethodsHandler."""
def setUp(self):
super().setUp()
self.router = DummyApiCallRouter()
self.handler = reflection_plugin.ApiListApiMethodsHandler(self.router)
def testRendersMethodWithArgsCorrectly(self):
result = self.handler.Handle(None, context=self.context)
method = [
item for item in result.items
if item.name == "SomeRandomMethodWithArgsType"
][0]
self.assertEqual(method.doc, "Doc 1.")
self.assertEqual(method.args_type_descriptor.name, "SampleGetHandlerArgs")
self.assertEqual(
method.args_type_descriptor.AsPrimitiveProto().default.type_url,
"type.googleapis.com/grr.SampleGetHandlerArgs")
self.assertEqual(method.result_kind, "NONE")
self.assertFalse(method.HasField("result_type"))
def testRendersMethodWithResultTypeCorrectly(self):
result = self.handler.Handle(None, context=self.context)
method = [
item for item in result.items
if item.name == "SomeRandomMethodWithResultType"
][0]
self.assertEqual(method.doc, "Doc 2.")
self.assertFalse(method.HasField("args_type"))
self.assertEqual(method.result_kind, "VALUE")
self.assertEqual(method.result_type_descriptor.name, "SampleGetHandlerArgs")
self.assertEqual(
method.result_type_descriptor.AsPrimitiveProto().default.type_url,
"type.googleapis.com/grr.SampleGetHandlerArgs")
def testRendersMethodWithArgsTypeAndResultTypeCorrectly(self):
result = self.handler.Handle(None, context=self.context)
method = [
item for item in result.items
if item.name == "SomeRandomMethodWithArgsTypeAndResultType"
][0]
self.assertEqual(method.doc, "Doc 3.")
self.assertEqual(method.args_type_descriptor.name, "SampleGetHandlerArgs")
self.assertEqual(
method.args_type_descriptor.AsPrimitiveProto().default.type_url,
"type.googleapis.com/grr.SampleGetHandlerArgs")
self.assertEqual(method.result_kind, "VALUE")
self.assertEqual(method.result_type_descriptor.name, "SampleGetHandlerArgs")
self.assertEqual(
method.result_type_descriptor.AsPrimitiveProto().default.type_url,
"type.googleapis.com/grr.SampleGetHandlerArgs")
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
app.run(main)
| [
"[email protected]"
]
| |
73d467d6ab8185e9e67e75cb05f4ebc9019517a1 | 1dacbf90eeb384455ab84a8cf63d16e2c9680a90 | /lib/python2.7/site-packages/openopt/__init__.py | 76070a202bb544deddb6eb445f4f4fae6c2a6b53 | [
"Python-2.0",
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-unknown"
]
| permissive | wangyum/Anaconda | ac7229b21815dd92b0bd1c8b7ec4e85c013b8994 | 2c9002f16bb5c265e0d14f4a2314c86eeaa35cb6 | refs/heads/master | 2022-10-21T15:14:23.464126 | 2022-10-05T12:10:31 | 2022-10-05T12:10:31 | 76,526,728 | 11 | 10 | Apache-2.0 | 2022-10-05T12:10:32 | 2016-12-15T05:26:12 | Python | UTF-8 | Python | false | false | 1,119 | py | #! /usr/bin/env python
#from .ooVersionNumber import __version__
import os, sys
curr_dir = ''.join([elem + os.sep for elem in __file__.split(os.sep)[:-1]])
sys.path += [curr_dir, curr_dir + 'kernel']
from ooVersionNumber import __version__
from oo import *
#from kernel.GUI import manage
#from kernel.oologfcn import OpenOptException
#from kernel.nonOptMisc import oosolver
from GUI import manage
from oologfcn import OpenOptException
from nonOptMisc import oosolver
from mfa import MFA
isE = False
try:
import enthought
isE = True
except ImportError:
pass
try:
import envisage
import mayavi
isE = True
except ImportError:
pass
try:
import xy
isE = False
except ImportError:
pass
if isE:
s = """
Seems like you are using OpenOpt from
commercial Enthought Python Distribution;
consider using free GPL-licensed alternatives
PythonXY (http://www.pythonxy.com) or
Sage (http://sagemath.org) instead.
"""
print(s)
#__all__ = filter(lambda s:not s.startswith('_'),dir())
#from numpy.testing import NumpyTest
#test = NumpyTest().test
| [
"[email protected]"
]
| |
bee97ffb8715c6eb2cdc7c66345f36d3e6290a71 | 0f2b08b31fab269c77d4b14240b8746a3ba17d5e | /orttraining/tools/scripts/pipeline_model_split.py | d1ae9dd22bf001ccd5c2e047bb63cb3433d4dfb6 | [
"MIT"
]
| permissive | microsoft/onnxruntime | f75aa499496f4d0a07ab68ffa589d06f83b7db1d | 5e747071be882efd6b54d7a7421042e68dcd6aff | refs/heads/main | 2023-09-04T03:14:50.888927 | 2023-09-02T07:16:28 | 2023-09-02T07:16:28 | 156,939,672 | 9,912 | 2,451 | MIT | 2023-09-14T21:22:46 | 2018-11-10T02:22:53 | C++ | UTF-8 | Python | false | false | 15,126 | py | import os
import sys # noqa: F401
import onnx
from onnx import OperatorSetIdProto, TensorProto, helper # noqa: F401
# Edge that needs to be cut for the split.
# If the edge is feeding into more than one nodes, and not all the nodes belong to the same cut,
# specify those consuming nodes that need to be cut
class CutEdge:
def __init__(self, edgeId, consumingNodes=None):
self.edgeId = edgeId
self.consumingNodes = consumingNodes
def add_expand_type(model, name, type):
expand_edge = model.graph.value_info.add()
expand_edge.name = name
expand_edge.type.CopyFrom(type)
# Add wait/record/send/recv nodes and split the graph into disconnected subgraphs
def split_graph(model, split_edge_groups):
ms_domain = "com.microsoft"
new_send_nodes = []
new_recv_nodes = []
for cut_index in range(len(split_edge_groups)):
edgeIds = split_edge_groups[cut_index] # noqa: N806
# split the graph based on edgeIds
upstream_nodes = []
upstream_nodes_output_index = []
output_shapes = []
element_types = []
for id in edgeIds:
for node in model.graph.node:
if len(node.output) >= 1:
for i, j in enumerate(node.output):
if j == id:
upstream_nodes.append(node)
upstream_nodes_output_index.append(i)
# assuming all tensors are of type float
element_types.append(1)
for info in model.graph.value_info:
if info.name == id:
output_shapes.append(info.type)
send_input_signal_name = "send_input_signal" + str(cut_index)
send_signal = model.graph.input.add()
send_signal.CopyFrom(helper.make_tensor_value_info(send_input_signal_name, onnx.TensorProto.BOOL, None))
send_signal = helper.make_tensor(send_input_signal_name, TensorProto.BOOL, (), (True,))
model.graph.initializer.extend([send_signal])
recv_input_signal_name = "recv_input_signal" + str(cut_index)
recv_signal = model.graph.input.add()
recv_signal.CopyFrom(helper.make_tensor_value_info(recv_input_signal_name, onnx.TensorProto.BOOL, None))
recv_signal = helper.make_tensor(recv_input_signal_name, TensorProto.BOOL, (), (True,))
model.graph.initializer.extend([recv_signal])
send_dst_rank_name = "send_dst_rank" + str(cut_index)
send_dst_rank = model.graph.input.add()
send_dst_rank.CopyFrom(helper.make_tensor_value_info(send_dst_rank_name, onnx.TensorProto.INT64, None))
send_dst_rank = helper.make_tensor(send_dst_rank_name, TensorProto.INT64, (), (cut_index + 1,))
model.graph.initializer.extend([send_dst_rank])
recv_src_rank_name = "recv_src_rank" + str(cut_index)
recv_src_rank = model.graph.input.add()
recv_src_rank.CopyFrom(helper.make_tensor_value_info(recv_src_rank_name, onnx.TensorProto.INT64, None))
recv_src_rank = helper.make_tensor(recv_src_rank_name, TensorProto.INT64, (), (cut_index,))
model.graph.initializer.extend([recv_src_rank])
# output signal from send after cut
send_output_signal = model.graph.output.add()
send_output_signal.CopyFrom(
helper.make_tensor_value_info("send_output_signal" + str(cut_index), onnx.TensorProto.BOOL, None)
)
# output signal from receive after cut
receive_output_signal = model.graph.output.add()
receive_output_signal.CopyFrom(
helper.make_tensor_value_info("receive_output_signal" + str(cut_index), onnx.TensorProto.BOOL, None)
)
new_send = model.graph.node.add()
new_send.CopyFrom(
helper.make_node(
"Send",
inputs=[send_input_signal_name, send_dst_rank_name],
outputs=["send_output_signal" + str(cut_index)],
tag=0,
domain=ms_domain,
element_types=element_types,
name="send",
)
)
new_receive = model.graph.node.add()
new_receive.CopyFrom(
helper.make_node(
"Recv",
inputs=[recv_input_signal_name, recv_src_rank_name],
outputs=["receive_output_signal" + str(cut_index)],
tag=0,
domain=ms_domain,
element_types=element_types,
name="receive",
)
)
for i in range(len(upstream_nodes)):
n = upstream_nodes[i]
idx = upstream_nodes_output_index[i]
output_type = output_shapes[i]
output_edge_name = n.output[idx]
output_nodes = find_all_output_nodes_by_edge(model, output_edge_name)
# deal with shape inference for newly added edge
new_send_input_name = output_edge_name + "_send" + str(cut_index)
add_expand_type(model, new_send_input_name, output_type)
new_receive_output_name = output_edge_name + "_recv" + str(cut_index)
add_expand_type(model, new_receive_output_name, output_type)
# the order of data flow is: node-output -> record -> send -> recv -> wait -> node-input
new_send.input.extend([output_edge_name])
new_receive.output.extend([new_receive_output_name])
for output_node in output_nodes:
for i in range(len(output_node.input)): # noqa: PLW2901
for edgeId in edgeIds: # noqa: N806
if output_node.input[i] == edgeId:
output_node.input[i] = new_receive_output_name
new_send_nodes.append(new_send)
new_recv_nodes.append(new_receive)
model = onnx.shape_inference.infer_shapes(model)
return new_send_nodes, new_recv_nodes
def find_all_input_nodes(model, node):
nodes = []
inputs = []
if node:
for inputId in node.input: # noqa: N806
nodes.extend([n for n in model.graph.node if inputId in n.output])
inputs.extend([n for n in model.graph.input if inputId in n.name])
return nodes, inputs
def find_all_output_nodes(model, node):
nodes = []
outputs = []
if node:
for outputId in node.output: # noqa: N806
nodes.extend([n for n in model.graph.node if outputId in n.input])
outputs.extend([n for n in model.graph.output if outputId in n.name])
return nodes, outputs
def find_all_output_nodes_by_edge(model, arg):
result = [n for n in model.graph.node if arg in n.input]
return result
# Insert identity nodes to separate same output edge which feeds into different sub-graph.
def add_identity(model, cuttingEdge, newEdgeIdName):
output_nodes = None
edgeId = cuttingEdge.edgeId # noqa: N806
for node in model.graph.node:
if len(node.output) >= 1:
for output in node.output:
if output == edgeId:
output_nodes = find_all_output_nodes_by_edge(model, output)
break
assert output_nodes, "no output node"
new_identity = model.graph.node.add()
new_identity.op_type = "Identity"
new_identity.input.extend([edgeId])
new_identity.output.extend([newEdgeIdName])
for i in range(len(output_nodes)):
for output in output_nodes[i].output:
if output in cuttingEdge.consumingNodes:
for j in range(len(output_nodes[i].input)):
if output_nodes[i].input[j] == edgeId:
output_nodes[i].input[j] = newEdgeIdName
return new_identity
def insert_identity(model, all_cut_inputs):
count = 0
updated_edges = {}
new_added_identity = []
split_edge_groups = []
need_shape_inference = False
# Sweep the cut edge to see if there are edges feeding into nodes from two sub-graphs. If so,
# insert identity node after those edges with a new ID to distinguish the rest.
for cut_input in all_cut_inputs:
split_edges = []
for i in cut_input:
if i.consumingNodes:
# if this edge has previously been modified, update its edgeId before inserting new identity
if i.edgeId in updated_edges:
i.edgeId = updated_edges[i.edgeId]
new_edge_name = "identity_output_" + str(count)
new_added_identity.append(add_identity(model, i, new_edge_name))
count += 1
split_edges.append(new_edge_name)
updated_edges[i.edgeId] = new_edge_name
need_shape_inference = True
else:
split_edges.append(i.edgeId)
split_edge_groups.append(split_edges)
return split_edge_groups, new_added_identity, need_shape_inference
# after the graph is split, remove the added identity node because identity op is not registered in gradient builder.
def remove_identity(model, new_added_identity):
for node in new_added_identity:
assert node.op_type == "Identity"
output_nodes = [n for n in model.graph.node if node.output[0] in n.input]
for output_node in output_nodes:
for i in range(len(output_node.input)):
if output_node.input[i] == node.output[0]:
output_node.input[i] = node.input[0]
def find_all_connected_nodes(model, node):
nodes0, inputs = find_all_input_nodes(model, node)
nodes1, outputs = find_all_output_nodes(model, node)
connected_nodes = nodes0 + nodes1
return connected_nodes, inputs, outputs
def get_index(node_list, node):
found = [i for i, n in enumerate(node_list) if n == node]
return found[0] if found else None
def get_identity_index_for_deleting(node_list, node):
for i, n in enumerate(node_list):
# The node's input name has been changed during send/recv insertion,
# but it is sufficient to just compare the type and outputs.
if n.op_type == "Identity" and n.output == node.output:
return i
return None
# traverse the graph, group connected nodes and generate subgraph
def generate_subgraph(model, start_nodes, identity_node_list):
subgraphs = []
main_graph = onnx.ModelProto()
main_graph.CopyFrom(model)
# remove added identity node before copy to subgraph
identity_node_index = []
for n in identity_node_list:
identity_node_index.append(get_identity_index_for_deleting(main_graph.graph.node, n))
identity_node_index.sort(reverse=True)
for i in reversed(range(len(main_graph.graph.node))):
try:
if i in identity_node_index:
del main_graph.graph.node[i]
except Exception:
print("error deleting identity node", i)
all_visited_nodes = []
model_count = len(start_nodes)
for start in reversed(start_nodes):
stack0 = [start]
visited0 = []
tranversed_node = 0
inputs0 = []
outputs0 = []
while stack0:
node = stack0.pop()
if node not in visited0:
tranversed_node += 1
visited0.append(node)
all_visited_nodes.append(node)
connected_nodes, inputs, outputs = find_all_connected_nodes(main_graph, node)
stack0 = stack0 + connected_nodes
inputs0 = inputs0 + inputs
outputs0 = outputs0 + outputs
subgraph = onnx.ModelProto()
subgraph.CopyFrom(main_graph)
# gather visited nodes
visited_nodes = []
for n in visited0:
visited_nodes.append(get_index(main_graph.graph.node, n))
visited_nodes.sort(reverse=True)
# gather visited inputs
visited_inputs = []
for n in inputs0:
visited_inputs.append(get_index(main_graph.graph.input, n))
visited_inputs.sort(reverse=True)
# gather visited outputs
visited_outputs = []
for n in outputs0:
visited_outputs.append(get_index(main_graph.graph.output, n))
visited_outputs.sort(reverse=True)
for i in reversed(range(len(main_graph.graph.node))):
try:
if i not in visited_nodes:
del subgraph.graph.node[i]
else:
del main_graph.graph.node[i]
except Exception:
print("error deleting node", i)
for i in reversed(range(len(main_graph.graph.input))):
try:
if i not in visited_inputs:
del subgraph.graph.input[i]
else:
del main_graph.graph.input[i]
except Exception:
print("error deleting inputs", i)
for i in reversed(range(len(main_graph.graph.output))):
try:
if i not in visited_outputs:
del subgraph.graph.output[i]
else:
del main_graph.graph.output[i]
except Exception:
print("error deleting outputs ", i)
print("model", str(model_count), " length ", len(subgraph.graph.node))
subgraphs.append(subgraph)
model_count -= 1
print("model", str(model_count), " length ", len(main_graph.graph.node))
subgraphs.append(main_graph)
# as the subgraphs were added in reverse order (the last split is added first), reverse the order back before return
subgraphs.reverse()
return subgraphs
def main():
# temporary hard coded the cutting edge structure
# TODO: move this info to a file (json?) and load the data from there.
input_model_name = "bert-tiny-uncased_L_3_H_128_A_2_V_30528_S_512_Dp_0.1.onnx"
stage_count = 3
cut0_input = {CutEdge("186"), CutEdge("71", {"273", "395"})}
cut1_input = {CutEdge("308"), CutEdge("71", {"395"})}
all_cut_inputs = [cut0_input, cut1_input]
model = onnx.load(input_model_name)
if len(model.graph.value_info) == 0:
model = onnx.shape_inference.infer_shapes(model)
print("original model length ", len(model.graph.node))
output_model_names = [os.path.splitext(input_model_name)[0] + "_" + str(i) + ".onnx" for i in range(stage_count)]
split_edge_groups, new_identity, need_shape_inference = insert_identity(model, all_cut_inputs)
# new edge is being added, need to re-inference shape
if need_shape_inference:
model = onnx.shape_inference.infer_shapes(model)
# after all need-to-be-cut edges identified, split the graph
new_sends, new_receives = split_graph(model, split_edge_groups)
remove_identity(model, new_identity)
sub_graphs = generate_subgraph(model, new_receives, new_identity)
for i in range(stage_count):
sub_graphs[i] = onnx.shape_inference.infer_shapes(sub_graphs[i])
onnx.save(sub_graphs[i], output_model_names[i])
print("save to file: ", output_model_names[i])
if __name__ == "__main__":
main()
| [
"[email protected]"
]
| |
ccf7ef2d3e547fc5865b9d05d078122acb39a3a0 | 9aaa39f200ee6a14d7d432ef6a3ee9795163ebed | /Algorithm/Python/624. Maximum Distance in Arrays.py | a024e9db5b888db8f0029cd55f4da18dc8085909 | []
| no_license | WuLC/LeetCode | 47e1c351852d86c64595a083e7818ecde4131cb3 | ee79d3437cf47b26a4bca0ec798dc54d7b623453 | refs/heads/master | 2023-07-07T18:29:29.110931 | 2023-07-02T04:31:00 | 2023-07-02T04:31:00 | 54,354,616 | 29 | 16 | null | null | null | null | UTF-8 | Python | false | false | 799 | py | # -*- coding: utf-8 -*-
# @Author: LC
# @Date: 2017-06-18 16:34:58
# @Last modified by: LC
# @Last Modified time: 2017-06-18 16:38:28
# @Email: [email protected]
# O(n) time
# traverse the arrays,
# keep the min number and max number among the traversed numbers so far and compare them with the current number
class Solution(object):
def maxDistance(self, arrays):
"""
:type arrays: List[List[int]]
:rtype: int
"""
result = 0
curr_min, curr_max = arrays[0][0], arrays[0][-1]
for i in xrange(1, len(arrays)):
result = max(result, abs(arrays[i][0] - curr_max), abs(arrays[i][-1] - curr_min))
curr_max = max(curr_max, arrays[i][-1])
curr_min = min(curr_min, arrays[i][0])
return result | [
"[email protected]"
]
| |
ebb8c77391a9e3bd64b8b627a3638e7999db0425 | c698fb03aa2bf034904a0310931b473b6da66fdc | /com/study/algorithm/daily/73. Set Matrix Zeroes.py | 72c686e73159edddb22b711340fc520e9b884642 | []
| no_license | pi408637535/Algorithm | e46df1d07a519ab110e4f97755f461a1b2b7c308 | 75f4056ec6da01f7466a272871a7f7db579166b4 | refs/heads/master | 2021-08-29T19:19:53.368953 | 2021-08-22T16:30:32 | 2021-08-22T16:30:32 | 213,289,503 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,815 | py | from typing import List
from typing import List
class Solution:
def setZeroes(self, matrix: List[List[int]]) -> None:
"""
Do not return anything, modify matrix in-place instead.
"""
if not matrix or not matrix:
return
m, n = len(matrix), len(matrix[0])
flag = [[False] * n for i in range(m)]
def help(i, j):
flag[i][j] = True
down_i = i + 1
while down_i < m:
if matrix[down_i][j]:
flag[down_i][j] = True
matrix[down_i][j] = 0
down_i += 1
up_i = i - 1
while up_i >= 0:
if matrix[up_i][j]:
flag[up_i][j] = True
matrix[up_i][j] = 0
up_i -= 1
left_j = j - 1
while left_j >= 0:
if matrix[i][left_j]:
flag[i][left_j] = True
matrix[i][left_j] = 0
left_j -= 1
right_j = j + 1
while right_j < n:
if matrix[i][right_j]:
flag[i][right_j] = True
matrix[i][right_j] = 0
right_j += 1
for i in range(m):
for j in range(n):
if matrix[i][j] == 0 and not flag[i][j]:
help(i, j)
import copy
class Solution:
def setZeroes(self, matrix: List[List[int]]) -> None:
"""
Do not return anything, modify matrix in-place instead.
"""
if not matrix or not matrix:
return
m, n = len(matrix), len(matrix[0])
matrix_copy = copy.deepcopy(matrix)
for i in range(m):
for j in range(n):
if matrix_copy[i][j] == 0:
for k in range(m):
matrix[k][j] = 0
for k in range(n):
matrix[i][k] = 0
#空间复杂度
class Solution:
def setZeroes(self, matrix: List[List[int]]) -> None:
"""
Do not return anything, modify matrix in-place instead.
"""
if not matrix or not matrix:
return
m, n = len(matrix), len(matrix[0])
rows, columns = [False] * m, [False] * n
for i in range(m):
for j in range(n):
if not matrix[i][j]:
rows[i] = columns[j] = True
for i in range(m):
for j in range(n):
if rows[i] or columns[j]:
matrix[i][j] = 0
if __name__ == '__main__':
matrix = [[1, 1, 1], [1, 0, 1], [1, 1, 1]]
matrix = [[0,1,2,0],[3,4,5,2],[1,3,1,5]]
matrix = [[1, 2, 3, 4], [5, 0, 7, 8], [0, 10, 11, 12], [13, 14, 15, 0]]
Solution().setZeroes(matrix)
print(matrix) | [
"[email protected]"
]
| |
ea22858c3a3da17f4d8b8806cde34a8f102b33f5 | f4b60f5e49baf60976987946c20a8ebca4880602 | /lib/python2.7/site-packages/acimodel-1.3_2j-py2.7.egg/cobra/modelimpl/fvns/addrinst.py | 45a74f921e5aabecaaba2f160e60df1783f16095 | []
| no_license | cqbomb/qytang_aci | 12e508d54d9f774b537c33563762e694783d6ba8 | a7fab9d6cda7fadcc995672e55c0ef7e7187696e | refs/heads/master | 2022-12-21T13:30:05.240231 | 2018-12-04T01:46:53 | 2018-12-04T01:46:53 | 159,911,666 | 0 | 0 | null | 2022-12-07T23:53:02 | 2018-12-01T05:17:50 | Python | UTF-8 | Python | false | false | 6,765 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2016 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class AddrInst(Mo):
"""
The IP address namespace/IP address range contains unicast and multicast address blocks.
"""
meta = ClassMeta("cobra.model.fvns.AddrInst")
meta.moClassName = "fvnsAddrInst"
meta.rnFormat = "addrinst-%(name)s"
meta.category = MoCategory.REGULAR
meta.label = "IP Address Pool"
meta.writeAccessMask = 0x2001
meta.readAccessMask = 0x900000002001
meta.isDomainable = False
meta.isReadOnly = False
meta.isConfigurable = True
meta.isDeletable = True
meta.isContextRoot = False
meta.childClasses.add("cobra.model.fvns.RtAddrInst")
meta.childClasses.add("cobra.model.fvns.UcastAddrBlk")
meta.childClasses.add("cobra.model.fvns.RtVipAddrNs")
meta.childClasses.add("cobra.model.fault.Delegate")
meta.childNamesAndRnPrefix.append(("cobra.model.fvns.RtVipAddrNs", "rtinfraVipAddrNs-"))
meta.childNamesAndRnPrefix.append(("cobra.model.fvns.RtAddrInst", "rtmgmtAddrInst-"))
meta.childNamesAndRnPrefix.append(("cobra.model.fvns.UcastAddrBlk", "fromaddr-"))
meta.childNamesAndRnPrefix.append(("cobra.model.fault.Delegate", "fd-"))
meta.parentClasses.add("cobra.model.fv.Tenant")
meta.superClasses.add("cobra.model.naming.NamedObject")
meta.superClasses.add("cobra.model.pol.Obj")
meta.superClasses.add("cobra.model.pol.Ns")
meta.superClasses.add("cobra.model.fvns.AAddrInstP")
meta.superClasses.add("cobra.model.pol.Def")
meta.rnPrefixes = [
('addrinst-', True),
]
prop = PropMeta("str", "addr", "addr", 4962, PropCategory.REGULAR)
prop.label = "IP Address"
prop.isConfig = True
prop.isAdmin = True
meta.props.add("addr", prop)
prop = PropMeta("str", "addrType", "addrType", 19828, PropCategory.REGULAR)
prop.label = "Address Type"
prop.isConfig = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "regular"
prop._addConstant("regular", "regular", 0)
prop._addConstant("vip_range", "vip_range", 1)
meta.props.add("addrType", prop)
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "descr", "descr", 5579, PropCategory.REGULAR)
prop.label = "Description"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("descr", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "name", "name", 6566, PropCategory.REGULAR)
prop.label = "Name"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
prop.range = [(1, 64)]
prop.regex = ['[a-zA-Z0-9_.:-]+']
meta.props.add("name", prop)
prop = PropMeta("str", "ownerKey", "ownerKey", 15230, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 128)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("ownerKey", prop)
prop = PropMeta("str", "ownerTag", "ownerTag", 15231, PropCategory.REGULAR)
prop.label = "None"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(0, 64)]
prop.regex = ['[a-zA-Z0-9\\!#$%()*,-./:;@ _{|}~?&+]+']
meta.props.add("ownerTag", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "skipGwVal", "skipGwVal", 16373, PropCategory.REGULAR)
prop.label = "Skip GW Validation"
prop.isConfig = True
prop.isAdmin = True
prop.defaultValue = False
prop.defaultValueStr = "no"
prop._addConstant("no", None, False)
prop._addConstant("yes", None, True)
meta.props.add("skipGwVal", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "uid", "uid", 8, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("uid", prop)
meta.namingProps.append(getattr(meta.props, "name"))
def __init__(self, parentMoOrDn, name, markDirty=True, **creationProps):
namingVals = [name]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"[email protected]"
]
| |
3f518d1941b93c0126a47ddb4ac3959257e6c413 | a830f67a97103b750ed2ced5997285532762f25d | /test_dot_env/test_dot_env/tests.py | 448aa445bfd50deca27632fa9e4bcd5b268942e8 | []
| no_license | Durant21/test_dot_env | 308d29ebbdafa24306cd89e02079d0adbb017fd7 | 40f6894ff7adc91c9870c96bbd44d62410e1eeb0 | refs/heads/master | 2022-12-21T00:04:56.754078 | 2019-10-02T18:59:38 | 2019-10-02T18:59:38 | 212,382,378 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 717 | py | import unittest
from pyramid import testing
class ViewTests(unittest.TestCase):
def setUp(self):
self.config = testing.setUp()
def tearDown(self):
testing.tearDown()
def test_my_view(self):
from .views.default import my_view
request = testing.DummyRequest()
info = my_view(request)
self.assertEqual(info['project'], 'test_dot_env')
class FunctionalTests(unittest.TestCase):
def setUp(self):
from test_dot_env import main
app = main({})
from webtest import TestApp
self.testapp = TestApp(app)
def test_root(self):
res = self.testapp.get('/', status=200)
self.assertTrue(b'Pyramid' in res.body)
| [
"[email protected]"
]
| |
8b5bea4bf2920bf639d60e870173c108a9782dd6 | 0c78c4356f9df3a5c28adc2bdab7bad750b49d35 | /setup.py | 37220f5e42a00c2a7f2a4252bd8d8fed9ffad6a0 | [
"MIT"
]
| permissive | julianblue/agoro-field-boundary-detector | b985513475f4f32973b88d965ed5586d74ecbb01 | 9dd911df096ce865471ed0330174044f4172cc66 | refs/heads/master | 2023-06-06T16:58:46.177772 | 2021-06-22T16:13:27 | 2021-06-22T16:13:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 986 | py | """Setup module for this Python package."""
import pathlib
from setuptools import find_packages, setup
# The directory containing this file
HERE = pathlib.Path(__file__).parent
# The text of the README file
README = (HERE / "README.md").read_text()
INSTALL_REQUIRES = [
"tqdm",
"torch~=1.8.1",
"torchvision~=0.9.1",
"pycocotools~=2.0.2",
"earthengine-api~=0.1.267",
"opencv-python~=4.5.2.52",
]
setup(
name="agoro_field_boundary_detector",
version="0.1.1",
description="Detect field boundaries using satellite imagery.",
long_description=README,
long_description_content_type="text/markdown",
url="https://github.com/radix-ai/agoro-field-boundary-detector",
author="Radix",
author_email="[email protected]",
package_dir={"": "src"},
packages=find_packages(where="src", exclude=("data", "models", "notebooks", "tasks")),
license="LICENSE",
install_requires=INSTALL_REQUIRES,
include_package_data=True,
)
| [
"[email protected]"
]
| |
68f0f8c2e5a4740c22cfcd37baf82be5fea82e65 | 30e1dc84fe8c54d26ef4a1aff000a83af6f612be | /deps/src/libxml2-2.9.1/python/tests/validDTD.py | ee35c067b3a24970df32163eaa39c77880183bb3 | [
"BSD-3-Clause",
"MIT"
]
| permissive | Sitispeaks/turicreate | 0bda7c21ee97f5ae7dc09502f6a72abcb729536d | d42280b16cb466a608e7e723d8edfbe5977253b6 | refs/heads/main | 2023-05-19T17:55:21.938724 | 2021-06-14T17:53:17 | 2021-06-14T17:53:17 | 385,034,849 | 1 | 0 | BSD-3-Clause | 2021-07-11T19:23:21 | 2021-07-11T19:23:20 | null | UTF-8 | Python | false | false | 1,224 | py | #!/usr/bin/python -u
import libxml2
import sys
ARG = 'test string'
class ErrorHandler:
def __init__(self):
self.errors = []
def handler(self, msg, data):
if data != ARG:
raise Exception("Error handler did not receive correct argument")
self.errors.append(msg)
# Memory debug specific
libxml2.debugMemory(1)
dtd="""<!ELEMENT foo EMPTY>"""
valid="""<?xml version="1.0"?>
<foo></foo>"""
invalid="""<?xml version="1.0"?>
<foo><bar/></foo>"""
dtd = libxml2.parseDTD(None, 'test.dtd')
ctxt = libxml2.newValidCtxt()
e = ErrorHandler()
ctxt.setValidityErrorHandler(e.handler, e.handler, ARG)
# Test valid document
doc = libxml2.parseDoc(valid)
ret = doc.validateDtd(ctxt, dtd)
if ret != 1 or e.errors:
print("error doing DTD validation")
sys.exit(1)
doc.freeDoc()
# Test invalid document
doc = libxml2.parseDoc(invalid)
ret = doc.validateDtd(ctxt, dtd)
if ret != 0 or not e.errors:
print("Error: document supposed to be invalid")
doc.freeDoc()
dtd.freeDtd()
del dtd
del ctxt
# Memory debug specific
libxml2.cleanupParser()
if libxml2.debugMemory(1) == 0:
print("OK")
else:
print("Memory leak %d bytes" % (libxml2.debugMemory(1)))
libxml2.dumpMemory()
| [
"[email protected]"
]
| |
e55d95c481ad73ca8bf90ae9a403979a13a469b0 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03821/s969389449.py | aee420fedd1c2214ea27397c8853912146d541c1 | []
| no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 156 | py | N, *AB = [map(int, s.split()) for s in open(0)]
AB = list(AB)[::-1]
bias = 0
for A, B in AB:
bias += (bias + A + B - 1) // B * B - A - bias
print(bias)
| [
"[email protected]"
]
| |
f58cc5da39483f3d604be5366b4693ca0e122b4c | b38fb62950582664158327a2abf29c84cc59178b | /0x02-python-import_modules/2-args.py | 34615c54c5ce8850ce044cc4761e27557383568d | []
| no_license | MiguelCF06/holbertonschool-higher_level_programming | a39129cf355abe15e2caeb41cdef385ace53cfda | 0bc44343cb20c97221d3886bafda6db7235bc13a | refs/heads/master | 2022-12-18T00:12:52.498624 | 2020-09-24T17:00:24 | 2020-09-24T17:00:24 | 259,323,305 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 444 | py | #!/usr/bin/python3
if __name__ == "__main__":
from sys import argv
if (len(argv)-1 == 0):
print("0 arguments.")
elif (len(argv)-1 == 1):
print("{} argument:".format(len(argv) - 1))
print("{}: {}".format(len(argv)-1, argv[1]))
else:
j = 1
print("{} arguments:".format(len(argv) - 1))
while j <= len(argv)-1:
print("{:d}: {}".format(j, argv[j]))
j = j + 1
| [
"[email protected]"
]
| |
e21afb3557c986e0856f76cc979a03e2b8372c33 | d1aa6e7d5631d7806531660febbd1f856eaeece7 | /python/paddle/utils/op_version.py | 575e5f40772eb08ea2c79d4ac73d7d04c5f9cfbf | [
"Apache-2.0"
]
| permissive | gongweibao/Paddle | 510cd4bc0ef89bc6ccee7b6b8eca52c00e014b77 | 60f9c60cd8196c66c391d79c35d341e9072f8838 | refs/heads/develop | 2023-03-13T17:43:35.675875 | 2022-09-20T08:46:15 | 2022-09-20T08:46:15 | 82,279,237 | 3 | 2 | Apache-2.0 | 2021-05-26T06:17:43 | 2017-02-17T09:16:16 | Python | UTF-8 | Python | false | false | 2,306 | py | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..fluid import core
__all__ = []
def Singleton(cls):
_instance = {}
def _singleton(*args, **kargs):
if cls not in _instance:
_instance[cls] = cls(*args, **kargs)
return _instance[cls]
return _singleton
class OpUpdateInfoHelper(object):
def __init__(self, info):
self._info = info
def verify_key_value(self, name=''):
result = False
key_funcs = {
core.OpAttrInfo: 'name',
core.OpInputOutputInfo: 'name',
}
if name == '':
result = True
elif type(self._info) in key_funcs:
if getattr(self._info, key_funcs[type(self._info)])() == name:
result = True
return result
@Singleton
class OpLastCheckpointChecker(object):
def __init__(self):
self.raw_version_map = core.get_op_version_map()
self.checkpoints_map = {}
self._construct_map()
def _construct_map(self):
for op_name in self.raw_version_map:
last_checkpoint = self.raw_version_map[op_name].checkpoints()[-1]
infos = last_checkpoint.version_desc().infos()
self.checkpoints_map[op_name] = infos
def filter_updates(self, op_name, type=core.OpUpdateType.kInvalid, key=''):
updates = []
if op_name in self.checkpoints_map:
for update in self.checkpoints_map[op_name]:
if (update.type() == type) or (type
== core.OpUpdateType.kInvalid):
if OpUpdateInfoHelper(update.info()).verify_key_value(key):
updates.append(update.info())
return updates
| [
"[email protected]"
]
| |
fd4c49e440c3a33e97213c80f5a63d98a62df18e | 6e800b3513537622df14bb598abe9c051116106c | /jianzhioffer/21Exchange.py | dc6f729dfe73659af2d56f13c6281d19f196046d | []
| no_license | Huxhh/LeetCodePy | fd72f03193d1f0b58c44bffc46a9a59ba9714215 | 6a99e84c5742ca68012b14da362f6c3255e10b21 | refs/heads/master | 2023-06-09T09:23:54.209025 | 2023-05-31T16:29:03 | 2023-05-31T16:29:03 | 148,866,001 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 627 | py | # coding=utf-8
# author huxh
# time 2020/3/24 10:44 AM
def exchange(nums):
if not nums:
return []
l = 0
r = len(nums) - 1
while l < r:
while l < r and nums[l] & 1:
l += 1
while l < r and not nums[r] & 1:
r -= 1
nums[l], nums[r] = nums[r], nums[l]
return nums
def exchange2(nums):
if not nums:
return []
l = 0
r = 0
while r < len(nums):
if nums[r] & 1:
nums[r], nums[l] = nums[l], nums[r]
l += 1
r += 1
return nums
if __name__ == '__main__':
print(exchange2([1,3,4,6,7,9])) | [
"[email protected]"
]
| |
3c1e9ea8ef23088232a7a756dcf25ab6717a98cd | 48e124e97cc776feb0ad6d17b9ef1dfa24e2e474 | /sdk/python/pulumi_azure_native/network/get_p2s_vpn_gateway.py | 4d19f9ddc0d7367e2247649325a5fe510349324e | [
"BSD-3-Clause",
"Apache-2.0"
]
| permissive | bpkgoud/pulumi-azure-native | 0817502630062efbc35134410c4a784b61a4736d | a3215fe1b87fba69294f248017b1591767c2b96c | refs/heads/master | 2023-08-29T22:39:49.984212 | 2021-11-15T12:43:41 | 2021-11-15T12:43:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,171 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetP2sVpnGatewayResult',
'AwaitableGetP2sVpnGatewayResult',
'get_p2s_vpn_gateway',
'get_p2s_vpn_gateway_output',
]
@pulumi.output_type
class GetP2sVpnGatewayResult:
"""
P2SVpnGateway Resource.
"""
def __init__(__self__, custom_dns_servers=None, etag=None, id=None, is_routing_preference_internet=None, location=None, name=None, p2_s_connection_configurations=None, provisioning_state=None, tags=None, type=None, virtual_hub=None, vpn_client_connection_health=None, vpn_gateway_scale_unit=None, vpn_server_configuration=None):
if custom_dns_servers and not isinstance(custom_dns_servers, list):
raise TypeError("Expected argument 'custom_dns_servers' to be a list")
pulumi.set(__self__, "custom_dns_servers", custom_dns_servers)
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if is_routing_preference_internet and not isinstance(is_routing_preference_internet, bool):
raise TypeError("Expected argument 'is_routing_preference_internet' to be a bool")
pulumi.set(__self__, "is_routing_preference_internet", is_routing_preference_internet)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if p2_s_connection_configurations and not isinstance(p2_s_connection_configurations, list):
raise TypeError("Expected argument 'p2_s_connection_configurations' to be a list")
pulumi.set(__self__, "p2_s_connection_configurations", p2_s_connection_configurations)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if virtual_hub and not isinstance(virtual_hub, dict):
raise TypeError("Expected argument 'virtual_hub' to be a dict")
pulumi.set(__self__, "virtual_hub", virtual_hub)
if vpn_client_connection_health and not isinstance(vpn_client_connection_health, dict):
raise TypeError("Expected argument 'vpn_client_connection_health' to be a dict")
pulumi.set(__self__, "vpn_client_connection_health", vpn_client_connection_health)
if vpn_gateway_scale_unit and not isinstance(vpn_gateway_scale_unit, int):
raise TypeError("Expected argument 'vpn_gateway_scale_unit' to be a int")
pulumi.set(__self__, "vpn_gateway_scale_unit", vpn_gateway_scale_unit)
if vpn_server_configuration and not isinstance(vpn_server_configuration, dict):
raise TypeError("Expected argument 'vpn_server_configuration' to be a dict")
pulumi.set(__self__, "vpn_server_configuration", vpn_server_configuration)
@property
@pulumi.getter(name="customDnsServers")
def custom_dns_servers(self) -> Optional[Sequence[str]]:
"""
List of all customer specified DNS servers IP addresses.
"""
return pulumi.get(self, "custom_dns_servers")
@property
@pulumi.getter
def etag(self) -> str:
"""
A unique read-only string that changes whenever the resource is updated.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="isRoutingPreferenceInternet")
def is_routing_preference_internet(self) -> Optional[bool]:
"""
Enable Routing Preference property for the Public IP Interface of the P2SVpnGateway.
"""
return pulumi.get(self, "is_routing_preference_internet")
@property
@pulumi.getter
def location(self) -> str:
"""
Resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="p2SConnectionConfigurations")
def p2_s_connection_configurations(self) -> Optional[Sequence['outputs.P2SConnectionConfigurationResponse']]:
"""
List of all p2s connection configurations of the gateway.
"""
return pulumi.get(self, "p2_s_connection_configurations")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning state of the P2S VPN gateway resource.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="virtualHub")
def virtual_hub(self) -> Optional['outputs.SubResourceResponse']:
"""
The VirtualHub to which the gateway belongs.
"""
return pulumi.get(self, "virtual_hub")
@property
@pulumi.getter(name="vpnClientConnectionHealth")
def vpn_client_connection_health(self) -> 'outputs.VpnClientConnectionHealthResponse':
"""
All P2S VPN clients' connection health status.
"""
return pulumi.get(self, "vpn_client_connection_health")
@property
@pulumi.getter(name="vpnGatewayScaleUnit")
def vpn_gateway_scale_unit(self) -> Optional[int]:
"""
The scale unit for this p2s vpn gateway.
"""
return pulumi.get(self, "vpn_gateway_scale_unit")
@property
@pulumi.getter(name="vpnServerConfiguration")
def vpn_server_configuration(self) -> Optional['outputs.SubResourceResponse']:
"""
The VpnServerConfiguration to which the p2sVpnGateway is attached to.
"""
return pulumi.get(self, "vpn_server_configuration")
class AwaitableGetP2sVpnGatewayResult(GetP2sVpnGatewayResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetP2sVpnGatewayResult(
custom_dns_servers=self.custom_dns_servers,
etag=self.etag,
id=self.id,
is_routing_preference_internet=self.is_routing_preference_internet,
location=self.location,
name=self.name,
p2_s_connection_configurations=self.p2_s_connection_configurations,
provisioning_state=self.provisioning_state,
tags=self.tags,
type=self.type,
virtual_hub=self.virtual_hub,
vpn_client_connection_health=self.vpn_client_connection_health,
vpn_gateway_scale_unit=self.vpn_gateway_scale_unit,
vpn_server_configuration=self.vpn_server_configuration)
def get_p2s_vpn_gateway(gateway_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetP2sVpnGatewayResult:
"""
P2SVpnGateway Resource.
API Version: 2020-11-01.
:param str gateway_name: The name of the gateway.
:param str resource_group_name: The resource group name of the P2SVpnGateway.
"""
__args__ = dict()
__args__['gatewayName'] = gateway_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:network:getP2sVpnGateway', __args__, opts=opts, typ=GetP2sVpnGatewayResult).value
return AwaitableGetP2sVpnGatewayResult(
custom_dns_servers=__ret__.custom_dns_servers,
etag=__ret__.etag,
id=__ret__.id,
is_routing_preference_internet=__ret__.is_routing_preference_internet,
location=__ret__.location,
name=__ret__.name,
p2_s_connection_configurations=__ret__.p2_s_connection_configurations,
provisioning_state=__ret__.provisioning_state,
tags=__ret__.tags,
type=__ret__.type,
virtual_hub=__ret__.virtual_hub,
vpn_client_connection_health=__ret__.vpn_client_connection_health,
vpn_gateway_scale_unit=__ret__.vpn_gateway_scale_unit,
vpn_server_configuration=__ret__.vpn_server_configuration)
@_utilities.lift_output_func(get_p2s_vpn_gateway)
def get_p2s_vpn_gateway_output(gateway_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetP2sVpnGatewayResult]:
"""
P2SVpnGateway Resource.
API Version: 2020-11-01.
:param str gateway_name: The name of the gateway.
:param str resource_group_name: The resource group name of the P2SVpnGateway.
"""
...
| [
"[email protected]"
]
| |
d6851302274970ef6f014533abcffa0f53972792 | 9c35adeaa3c73f4d49af6cbe64a63cce1957475a | /views/room.py | 96e8f91783de065c5bf97729497c81bd417f7e95 | []
| no_license | longfeilove7/ClusterManager | a6e275cee8e5381019d539baef184cdb5ac4f078 | d2f8a973c2ddcd75395916974d733f6cfd5346a9 | refs/heads/master | 2020-03-16T22:58:50.085678 | 2019-01-03T01:17:47 | 2019-01-03T01:17:47 | 133,060,028 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,903 | py | """
命名规范:module_name, package_name, ClassName, method_name, ExceptionName, function_name, GLOBAL_VAR_NAME, instance_var_name, function_parameter_name, local_var_name.
"""
from rest_framework_swagger.views import get_swagger_view
from django.db.models import Count, Max, Avg, Min, Sum, F, Q, FloatField
from django.db import models
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.shortcuts import render, redirect
from django.shortcuts import HttpResponse
from django.http import HttpRequest, HttpResponseBadRequest
from HostManager import models
from django_celery_beat.models import PeriodicTask
from django_celery_beat.models import PeriodicTasks
from django_celery_beat.models import CrontabSchedule
from django_celery_beat.models import IntervalSchedule
from django_celery_beat.models import SolarSchedule
from django_celery_results.models import TaskResult
from celery import shared_task
from celery import task
from HostManager import tasks
from celery import Celery
from celery.schedules import crontab
from celery import app
from django.views.decorators.csrf import csrf_protect
from django.views.decorators.csrf import csrf_exempt
import json
import datetime
import pytz
from django.utils import timezone
from itertools import chain
#import django_excel as excel
from HostManager.models import Question, Choice, Host, Rooms
from django import forms
# json can't service datetime format,so use the djangojsonencoder
from django.core.serializers import serialize
from django.core.serializers.json import DjangoJSONEncoder
from decimal import *
#import os, sys, commands
import xmlrpc.server
import xmlrpc.client
from django.contrib.auth.decorators import login_required
# Create your views here.
class ClassRoom:
@login_required
def addRoom(request):
if request.method == 'GET':
room_list = models.Rooms.objects.all()
return render(request, 'add_room.html', {'room_list': room_list})
elif request.method == 'POST':
roomName = request.POST.get('roomName')
cabinetNumber = request.POST.get('cabinetNumber')
floor = request.POST.get('floor')
roomArea = request.POST.get('roomArea')
models.Rooms.objects.create(
roomName=roomName,
cabinetNumber=cabinetNumber,
floor=floor,
roomArea=roomArea)
return redirect('/add_room/')
@login_required
def roomInfoQuery(request):
info_list = models.Rooms.objects.all()
limit = request.GET.get('limit') # how many items per page
#print("the limit :"+limit)
offset = request.GET.get('offset') # how many items in total in the DB
#print("the offset :",offset)
sort_column = request.GET.get('sort') # which column need to sort
search = request.GET.get('search')
if sort_column:
print("the sort_column :" + sort_column)
order = request.GET.get('order') # ascending or descending
print("the order :" + order)
if order == "asc":
info_list = models.Rooms.objects.order_by(sort_column)
else:
info_list = models.Rooms.objects.order_by("-" + sort_column)
print(info_list)
elif search: # 判断是否有搜索字
info_list = models.Rooms.objects.filter(
Q(id__icontains=search)
| Q(roomName__icontains=search)
| Q(cabinetNumber__icontains=search)
| Q(floor__icontains=search)
| Q(roomArea__icontains=search))
else:
info_list = models.Rooms.objects.all(
) # must be wirte the line code here
info_list_count = len(info_list)
print(info_list_count)
if not offset:
offset = 0
if not limit:
limit = 10 # 默认是每页20行的内容,与前端默认行数一致
pageinator = Paginator(info_list, limit) # 利用Django的Painator开始做分页
page = int(int(offset) / int(limit) + 1)
print("the page:", page)
info_list_dict = {
"total": info_list_count,
"rows": []
} # 必须带有rows和total这2个key,total表示总数,rows表示每行的内容
for item in pageinator.page(page):
info_list_dict['rows'].append({
"id": item.id,
"roomName": item.roomName,
"cabinetNumber": item.cabinetNumber,
"floor": item.floor,
"roomArea": item.roomArea
})
info_list_json = json.dumps(info_list_dict)
return HttpResponse(
info_list_json,
content_type="application/json",
)
@login_required
def roomEdit(request, nid):
if request.method == 'POST':
roomName = request.POST.get('roomName')
cabinetNumber = request.POST.get('cabinetNumber')
floor = request.POST.get('floor')
roomArea = request.POST.get('roomArea')
models.Rooms.objects.filter(id=nid).update(
roomName=roomName,
cabinetNumber=cabinetNumber,
floor=floor,
roomArea=roomArea)
print(roomName)
return redirect('/add_room/')
@login_required
def roomDelete(request):
if request.method == 'POST':
ipmiID = request.POST.get('allValue')
obj = models.Host.objects.filter(roomName_id=ipmiID).first()
if obj:
dictDelete = [ipmiID, 0]
else:
models.Rooms.objects.filter(id=ipmiID).delete()
dictDelete = [ipmiID, 1]
data = json.dumps(dictDelete).encode()
return HttpResponse(data)
@login_required
def batchRoomDelete(request):
""""""
context = {}
if request.method == 'POST':
allValue = request.POST.get('allValue')
print("the allValue: ", allValue, type(allValue))
listAllValue = json.loads(allValue)
print("the listAllValue: ", listAllValue, type(listAllValue))
listDelete = []
for dictAllValue in listAllValue:
print(type(dictAllValue))
ipmiID = dictAllValue['id']
print(ipmiID)
obj = models.Host.objects.filter(roomName_id=ipmiID).first()
if obj:
dictDelete = [ipmiID, 0]
listDelete.append(dictDelete)
else:
models.Rooms.objects.filter(id=ipmiID).delete()
dictDelete = [ipmiID, 1]
listDelete.append(dictDelete)
data = json.dumps(listDelete).encode()
return HttpResponse(data)
| [
"[email protected]"
]
| |
9f6df0b9d667e48f2a477fe0fe0a8f9e65ad8660 | 4e382ae46cf997ea2dbdfcfa463a57d3e0e9ad97 | /sols/alien_dictionary.py | 5055b53bd8f24e5405e99010a8f3c21326e00665 | []
| no_license | hayeonk/leetcode | 5136824838eb17ed2e4b7004301ba5bb1037082f | 6485f8f9b5aa198e96fbb800b058d9283a28e4e2 | refs/heads/master | 2020-04-28T03:37:16.800519 | 2019-06-01T14:34:45 | 2019-06-01T14:34:45 | 174,943,756 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,456 | py | from collections import defaultdict
class Solution(object):
def alienOrder(self, words):
def buildGraph(w1, w2):
i = j =0
while i < len(w1) and j < len(w2):
if w1[i] != w2[j]:
graph[w1[i]].append(w2[i])
break
else:
graph[w1[i]]
i += 1
j += 1
while i < len(w1):
graph[w1[i]]
i += 1
while j < len(w2):
graph[w2[j]]
j += 1
graph = defaultdict(list)
last = ""
for i in xrange(len(words)):
buildGraph(last, words[i])
last = words[i]
def dfs(u, recStack):
visited.add(u)
recStack.add(u)
if u in graph:
for v in graph[u]:
if v not in visited:
if not dfs(v, recStack):
return False
elif v in recStack:
return False
recStack.remove(u)
ans.append(u)
return True
ans = []
visited = set()
for c in graph:
if c not in visited:
if not dfs(c, set()):
return ""
return "".join(ans[::-1]) | [
"[email protected]"
]
| |
90e2690474c76dfd0c66852f7808dfb0f2d8a6c3 | 93dd86c8d0eceaee8276a5cafe8c0bfee2a315d3 | /python/paddle/fluid/tests/unittests/test_imperative_layer_apply.py | f61d1ab888a51b2ebe4d1205b30fb84dfa4e7aeb | [
"Apache-2.0"
]
| permissive | hutuxian/Paddle | f8b7693bccc6d56887164c1de0b6f6e91cffaae8 | a1b640bc66a5cc9583de503e7406aeba67565e8d | refs/heads/develop | 2023-08-29T19:36:45.382455 | 2020-09-09T09:19:07 | 2020-09-09T09:19:07 | 164,977,763 | 8 | 27 | Apache-2.0 | 2023-06-16T09:47:39 | 2019-01-10T02:50:31 | Python | UTF-8 | Python | false | false | 2,996 | py | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import paddle
import paddle.nn as nn
import paddle.fluid as fluid
import numpy as np
class LeNetDygraph(fluid.dygraph.Layer):
def __init__(self, num_classes=10, classifier_activation='softmax'):
super(LeNetDygraph, self).__init__()
self.num_classes = num_classes
self.features = nn.Sequential(
nn.Conv2d(
1, 6, 3, stride=1, padding=1),
nn.ReLU(),
nn.Pool2D(2, 'max', 2),
nn.Conv2d(
6, 16, 5, stride=1, padding=0),
nn.ReLU(),
nn.Pool2D(2, 'max', 2))
if num_classes > 0:
self.fc = nn.Sequential(
nn.Linear(400, 120),
nn.Linear(120, 84), nn.Linear(84, 10),
nn.Softmax()) #Todo: accept any activation
def forward(self, inputs):
x = self.features(inputs)
if self.num_classes > 0:
x = fluid.layers.flatten(x, 1)
x = self.fc(x)
return x
def init_weights(layer):
if type(layer) == nn.Linear:
new_weight = paddle.fill_constant(
layer.weight.shape, layer.weight.dtype, value=0.9)
layer.weight.set_value(new_weight)
new_bias = paddle.fill_constant(
layer.bias.shape, layer.bias.dtype, value=-0.1)
layer.bias.set_value(new_bias)
elif type(layer) == nn.Conv2d:
new_weight = paddle.fill_constant(
layer.weight.shape, layer.weight.dtype, value=0.7)
layer.weight.set_value(new_weight)
new_bias = paddle.fill_constant(
layer.bias.shape, layer.bias.dtype, value=-0.2)
layer.bias.set_value(new_bias)
class TestLayerApply(unittest.TestCase):
def test_apply_init_weight(self):
with fluid.dygraph.guard():
net = LeNetDygraph()
net.apply(init_weights)
for layer in net.sublayers():
if type(layer) == nn.Linear:
np.testing.assert_allclose(layer.weight.numpy(), 0.9)
np.testing.assert_allclose(layer.bias.numpy(), -0.1)
elif type(layer) == nn.Conv2d:
np.testing.assert_allclose(layer.weight.numpy(), 0.7)
np.testing.assert_allclose(layer.bias.numpy(), -0.2)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
]
| |
b3c3968421eca0da3d4b2b7b48389e1ed8c6ac29 | c8efab9c9f5cc7d6a16d319f839e14b6e5d40c34 | /source/Clarification/DFS_BFS/127.单词接龙.py | 3fa9f475399d5187665f9e3f4d03a439ad1aaffc | [
"MIT"
]
| permissive | zhangwang0537/LeetCode-Notebook | 73e4a4f2c90738dea4a8b77883b6f2c59e02e9c1 | 1dbd18114ed688ddeaa3ee83181d373dcc1429e5 | refs/heads/master | 2022-11-13T21:08:20.343562 | 2020-04-09T03:11:51 | 2020-04-09T03:11:51 | 277,572,643 | 0 | 0 | MIT | 2020-07-06T14:59:57 | 2020-07-06T14:59:56 | null | UTF-8 | Python | false | false | 3,824 | py | # 给定两个单词(beginWord 和 endWord)和一个字典,找到从 beginWord 到 endWord 的最短转换序列的长度。转换需遵循如下规则:
#
# 每次转换只能改变一个字母。
# 转换过程中的中间单词必须是字典中的单词。
# 说明:
#
# 如果不存在这样的转换序列,返回 0。
# 所有单词具有相同的长度。
# 所有单词只由小写字母组成。
# 字典中不存在重复的单词。
# 你可以假设 beginWord 和 endWord 是非空的,且二者不相同。
# 示例 1:
#
# 输入:
# beginWord = "hit",
# endWord = "cog",
# wordList = ["hot","dot","dog","lot","log","cog"]
#
# 输出: 5
#
# 解释: 一个最短转换序列是 "hit" -> "hot" -> "dot" -> "dog" -> "cog",
# 返回它的长度 5。
# 示例 2:
#
# 输入:
# beginWord = "hit"
# endWord = "cog"
# wordList = ["hot","dot","dog","lot","log"]
#
# 输出: 0
#
# 解释: endWord "cog" 不在字典中,所以无法进行转换。
from collections import defaultdict
class Solution(object):
def __init__(self):
self.length = 0
# Dictionary to hold combination of words that can be formed,
# from any given word. By changing one letter at a time.
self.all_combo_dict = defaultdict(list)
def visitWordNode(self, queue, visited, others_visited):
current_word, level = queue.pop(0)
for i in range(self.length):
# Intermediate words for current word
intermediate_word = current_word[:i] + "*" + current_word[i+1:]
# Next states are all the words which share the same intermediate state.
for word in self.all_combo_dict[intermediate_word]:
# If the intermediate state/word has already been visited from the
# other parallel traversal this means we have found the answer.
if word in others_visited:
return level + others_visited[word]
if word not in visited:
# Save the level as the value of the dictionary, to save number of hops.
visited[word] = level + 1
queue.append((word, level + 1))
return None
def ladderLength(self, beginWord, endWord, wordList):
"""
:type beginWord: str
:type endWord: str
:type wordList: List[str]
:rtype: int
"""
if endWord not in wordList or not endWord or not beginWord or not wordList:
return 0
# Since all words are of same length.
self.length = len(beginWord)
for word in wordList:
for i in range(self.length):
# Key is the generic word
# Value is a list of words which have the same intermediate generic word.
self.all_combo_dict[word[:i] + "*" + word[i+1:]].append(word)
# Queues for birdirectional BFS
queue_begin = [(beginWord, 1)] # BFS starting from beginWord
queue_end = [(endWord, 1)] # BFS starting from endWord
# Visited to make sure we don't repeat processing same word
visited_begin = {beginWord: 1}
visited_end = {endWord: 1}
ans = None
# We do a birdirectional search starting one pointer from begin
# word and one pointer from end word. Hopping one by one.
while queue_begin and queue_end:
# One hop from begin word
ans = self.visitWordNode(queue_begin, visited_begin, visited_end)
if ans:
return ans
# One hop from end word
ans = self.visitWordNode(queue_end, visited_end, visited_begin)
if ans:
return ans
return 0
| [
"[email protected]"
]
| |
3b64861964aacf042ab29afae8f2a3f49608ae1b | f7d0f201f9e4730e334ccd1c0050831af46110c7 | /problem001.py | 89efcbea327cf84c98d78a3d2311ab572552843c | []
| no_license | 1UnboundedSentience/projecteuler | 4f0d0b1a7d289e344543caa7f5695743e122dd53 | 1fd4184a3de9aea07bffa827404a3fdc07178edf | refs/heads/master | 2021-01-15T23:36:04.303115 | 2012-01-13T00:43:20 | 2012-01-13T00:43:20 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 434 | py | """
If we list all the natural numbers below 10 that are multiples of 3 or 5, we
get 3, 5, 6 and 9. The sum of these multiples is 23.
Find the sum of all the multiples of 3 or 5 below 1000.
From http://projecteuler.net/index.php?section=problems&id=1
"""
def problem001(max):
return sum([i for i in range(max) if i % 3 == 0 or i % 5 == 0])
if __name__ == '__main__':
assert problem001(10) == 23
print problem001(1000)
| [
"[email protected]"
]
| |
057df236c8787cde17bb88efa4a7e8f67e6a7230 | e781b0dfd0a193fa229c81dd816f8977529e9c47 | /plenum/test/checkpoints/test_checkpoint_stable_while_unstashing.py | 6eba14a195d8dbca6d3981a7a6b9bfae4d7f566b | [
"Apache-2.0"
]
| permissive | ddntechssi/indy-plenum | b8a2ac597b8249994fa0b9e0aa3bb7965c02a693 | 16868467e1340a5557f7d610370dce5a59c6097b | refs/heads/master | 2020-04-30T04:35:54.054594 | 2019-03-19T18:41:28 | 2019-03-19T18:41:28 | 176,614,246 | 1 | 0 | Apache-2.0 | 2019-03-19T23:27:13 | 2019-03-19T23:27:13 | null | UTF-8 | Python | false | false | 3,072 | py | from plenum.test.checkpoints.helper import chkChkpoints, check_stashed_chekpoints
from plenum.test.delayers import ppDelay, msg_rep_delay
from plenum.test.helper import sdk_send_random_and_check, assertExp
from plenum.test.node_catchup.helper import waitNodeDataEquality
from plenum.test.stasher import delay_rules
from stp_core.loop.eventually import eventually
CHK_FREQ = 5
nodeCount = 7 # it's crucial for this test to have f > 1
def test_stabilize_checkpoint_while_unstashing_when_missing_pre_prepare(looper,
chkFreqPatched,
reqs_for_checkpoint,
txnPoolNodeSet,
sdk_pool_handle,
sdk_wallet_client):
# Prepare nodes
lagging_node = txnPoolNodeSet[-1]
lagging_master_replcia = lagging_node.master_replica
rest_nodes = txnPoolNodeSet[:-1]
# 1. send enough requests so that just 1 is left for checkpoint stabilization
sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
sdk_wallet_client, reqs_for_checkpoint - 1)
# 2. delay PrePrepare on 1 node so that prepares and commits will be stashed
with delay_rules(lagging_node.nodeIbStasher, ppDelay()):
with delay_rules(lagging_node.nodeIbStasher, msg_rep_delay()):
sdk_send_random_and_check(looper, txnPoolNodeSet, sdk_pool_handle,
sdk_wallet_client, 1)
# all good nodes stabilized checkpoint
looper.run(eventually(chkChkpoints, rest_nodes, 1, 0))
# bad node received checkpoints from all nodes but didn't stabilize it
looper.run(eventually(check_stashed_chekpoints, lagging_node, len(rest_nodes)))
looper.run(eventually(chkChkpoints, [lagging_node], 1, None))
# bad node has all commits and prepares for the last request stashed
looper.run(eventually(
lambda: assertExp(
(0, CHK_FREQ) in lagging_master_replcia.preparesWaitingForPrePrepare and
len(lagging_master_replcia.preparesWaitingForPrePrepare[(0, CHK_FREQ)]) == len(rest_nodes) - 1
)
))
looper.run(eventually(
lambda: assertExp(
(0, CHK_FREQ) in lagging_master_replcia.commitsWaitingForPrepare and
len(lagging_master_replcia.commitsWaitingForPrepare[(0, CHK_FREQ)]) == len(rest_nodes)
)
))
# 3. the delayed PrePrepare is processed, and stashed prepares and commits are unstashed
# checkpoint will be stabilized during unstashing, and the request will be ordered
looper.run(eventually(chkChkpoints, [lagging_node], 1, 0))
waitNodeDataEquality(looper, *txnPoolNodeSet, customTimeout=5)
| [
"[email protected]"
]
| |
65370d33521692660b05e62f78118613a7c53563 | 68d464fc0067f32b6d57a50670917bf530df3441 | /httpc/__init__.py | 779f4fcf5d09b3986f186d0d49e810123b6b16ad | []
| no_license | bman7903/zkart | 9f1d5f93b8d36a110aaaffcff5d4d440df9b71e0 | 2665ed4d658135fd68d3036b7c4b9c4de59c384b | refs/heads/master | 2021-01-14T08:09:56.863410 | 2017-02-14T23:20:15 | 2017-02-14T23:20:15 | 81,998,924 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 44 | py | # -*- coding: utf-8 -*-
from . import webc
| [
"root@localhost"
]
| root@localhost |
d0a159f9fc95c2e73fc27e84283c2e3de2f1c27c | f07a42f652f46106dee4749277d41c302e2b7406 | /Data Set/bug-fixing-5/212a1d1010163e9929ec554f66ac5dcc80bedb92-<xs>-bug.py | 4b32b4cae26ae5f156af794f208f9d4593c9fd4d | []
| no_license | wsgan001/PyFPattern | e0fe06341cc5d51b3ad0fe29b84098d140ed54d1 | cc347e32745f99c0cd95e79a18ddacc4574d7faa | refs/heads/main | 2023-08-25T23:48:26.112133 | 2021-10-23T14:11:22 | 2021-10-23T14:11:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,521 | py | def xs(self, key, axis=0, level=None, drop_level=True):
"\n Returns a cross-section (row(s) or column(s)) from the\n Series/DataFrame. Defaults to cross-section on the rows (axis=0).\n\n Parameters\n ----------\n key : object\n Some label contained in the index, or partially in a MultiIndex\n axis : int, default 0\n Axis to retrieve cross-section on\n level : object, defaults to first n levels (n=1 or len(key))\n In case of a key partially contained in a MultiIndex, indicate\n which levels are used. Levels can be referred by label or position.\n drop_level : boolean, default True\n If False, returns object with same levels as self.\n\n Examples\n --------\n >>> df\n A B C\n a 4 5 2\n b 4 0 9\n c 9 7 3\n >>> df.xs('a')\n A 4\n B 5\n C 2\n Name: a\n >>> df.xs('C', axis=1)\n a 2\n b 9\n c 3\n Name: C\n\n >>> df\n A B C D\n first second third\n bar one 1 4 1 8 9\n two 1 7 5 5 0\n baz one 1 6 6 8 0\n three 2 5 3 5 3\n >>> df.xs(('baz', 'three'))\n A B C D\n third\n 2 5 3 5 3\n >>> df.xs('one', level=1)\n A B C D\n first third\n bar 1 4 1 8 9\n baz 1 6 6 8 0\n >>> df.xs(('baz', 2), level=[0, 'third'])\n A B C D\n second\n three 5 3 5 3\n\n Returns\n -------\n xs : Series or DataFrame\n\n Notes\n -----\n xs is only for getting, not setting values.\n\n MultiIndex Slicers is a generic way to get/set values on any level or\n levels. It is a superset of xs functionality, see\n :ref:`MultiIndex Slicers <advanced.mi_slicers>`\n "
axis = self._get_axis_number(axis)
labels = self._get_axis(axis)
if (level is not None):
(loc, new_ax) = labels.get_loc_level(key, level=level, drop_level=drop_level)
indexer = ([slice(None)] * self.ndim)
indexer[axis] = loc
indexer = tuple(indexer)
result = self.iloc[indexer]
setattr(result, result._get_axis_name(axis), new_ax)
return result
if (axis == 1):
return self[key]
self._consolidate_inplace()
index = self.index
if isinstance(index, MultiIndex):
(loc, new_index) = self.index.get_loc_level(key, drop_level=drop_level)
else:
loc = self.index.get_loc(key)
if isinstance(loc, np.ndarray):
if (loc.dtype == np.bool_):
(inds,) = loc.nonzero()
return self._take(inds, axis=axis)
else:
return self._take(loc, axis=axis)
if (not is_scalar(loc)):
new_index = self.index[loc]
if is_scalar(loc):
new_values = self._data.fast_xs(loc)
if ((not is_list_like(new_values)) or (self.ndim == 1)):
return com.maybe_box_datetimelike(new_values)
result = self._constructor_sliced(new_values, index=self.columns, name=self.index[loc], dtype=new_values.dtype)
else:
result = self.iloc[loc]
result.index = new_index
result._set_is_copy(self, copy=(not result._is_view))
return result | [
"[email protected]"
]
| |
68fe1cc9388b76c4e397bfc4c36a42288ee36988 | d375819f9de5760acc860af433b87ed52cfe64e8 | /wyggles/sprite/engine.py | 38a1ea5fb4473586c9053657ed5d1c1649fb3d6b | []
| no_license | kfields/wyggles-old | a6bfc568cd470447da2aaae1a6ad7ca4ca901858 | cb9f7ea9ef47e4b951c8a498952d904f28030317 | refs/heads/master | 2022-04-22T21:11:34.153235 | 2020-03-15T23:08:52 | 2020-03-15T23:08:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,228 | py | import math
import os
#import random
from random import random
import sys
from .layer import Layer
from wyggles.mathutils import *
from .collision import Collision
worldMinX = 0
worldMinY = 0
worldMaxX = 640
worldMaxY = 480
def without( source, element):
temp = source[:]
try:
while temp:
temp.remove( element )
except:
return temp
return temp
def materializeRandomFromCenter(sprite):
halfMaxX = worldMaxX / 2
halfMaxY = worldMaxX / 2
diameter = 400
radius = diameter / 2
sprite.materializeAt( (halfMaxX - radius) + (random() * diameter), (halfMaxY - radius) + (random() * diameter))
class SpriteEngine():
def __init__(self):
self.root = Layer("root")
#
self.actors = []
self.beacons = []
self.bodies = []
self.collisions = []
self.idCounter = 0
#
self.gravityX = 0
#self.gravityY = 9.8 ;
self.gravityY = 0
def addActor(self, actor):
self.actors.append(actor)
def removeActor(self, actor) :
#self.actors = self.actors.without(actor)
self.actors.remove(actor)
def addBeacon(self, beacon) :
self.beacons.append(beacon) ;
def removeBeacon(self, beacon):
#self.beacons = self.beacons.without(beacon)
self.beacons.remove(beacon)
def addBody(self, body):
self.bodies.append(body)
def removeBody(self, body):
#self.bodies = self.bodies.without(body)
self.bodies.remove(body)
def addCollision(self, collision):
self.collisions.append(collision)
def removeCollision(self, collision):
#self.collisions = self.collisions.without(collision)
self.collisions.remove(collision)
def findCollision(self, b1, b2):
collision = None
for collision in self.collisions:
if(collision.b1 == b1 and collision.b2 == b2 or collision.b1 == b2 and collision.b2 == b1):
return collision ;
return None ;
def step(self, dt):
dt = .1 ;
inv_dt = 0
if(dt > 0.0):
inv_dt = 1.0 / dt
b = None
#
self.broadphase() ;
#
for b in self.bodies:
if(b.invMass == 0.0):
continue
b.velX += dt * (self.gravityX + b.invMass * b.forceX)
b.velY += dt * (self.gravityY + b.invMass * b.forceY)
b.angularVel += dt * b.invI * b.torque
# ... insert penetration constraints here ...
for collision in self.collisions:
if(not collision.touched):
continue
collision.preStep(inv_dt)
#
iterations = 1
i = 0;
while(i < iterations):
i = i + 1
for collision in self.collisions:
if(not collision.touched):
continue
collision.applyImpulse()
#
for collision in self.collisions:
if(not collision.touched):
continue
collision.postStep()
#
for b in self.bodies:
if(b.invMass == 0.0):
continue
b.setPos(b.x + dt * (b.velX + b.biasedVelX), b.y + dt * (b.velY + b.biasedVelY)) ;
b.rotation += dt * (b.angularVel + b.biasedAngularVel);
#Bias velocities are reset to zero each step.
b.biasedVelX = 0
b.biasedVelY = 0
b.biasedAngularVel = 0
b.forceX = 0
b.forceY = 0
b.torque = 0
#
b.step()
#
for actor in self.actors:
actor.step()
#
#self.renderer.render()
self.render()
def broadphase(self):
b1 = None
b2 = None
for b1 in self.bodies:
for b2 in self.bodies:
if(b1 == b2):
continue
if (b1.invMass == 0.0 and b2.invMass == 0.0):
continue
if(not b1.intersects(b2)):
continue
collision = self.findCollision(b1, b2)
if(collision == None):
collision = Collision(b1,b2)
self.addCollision(collision)
collision.collide() ;
def query(self, x, y, distance):
beacon = None
result = None
for beacon in self.beacons:
dist = distance2d(x, y, beacon.x, beacon.y)
if(dist < distance):
if(result == None):
result = [beacon]
else:
result.append(beacon)
return result
def genId(self, name):
return name + str(self.idCounter)
self.idCounter += 1
#
def render(self):
self.root.render()
def get_root(self):
return self.root
#fixme
spriteEngine = SpriteEngine()
| [
"[email protected]"
]
| |
8e747d7a1899319d6f0f55134ae42cec7a6a1c63 | 017d82f3e3040fbce485a0135c062061648f91f0 | /013/013.py | c4839965ceb730023dc41e005ff02f424be4d096 | []
| no_license | bashwork/project-euler | 404b7e2bdd99888cdb2dfae6b2272ed3730a5aa0 | 84cc18968a618a17584c4455f94e2e57f9def2cb | refs/heads/master | 2016-09-05T21:17:04.754346 | 2015-02-06T19:49:03 | 2015-02-06T19:49:03 | 379,188 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,641 | py | vector = [
37107287533902102798797998220837590246510135740250,
46376937677490009712648124896970078050417018260538,
74324986199524741059474233309513058123726617309629,
91942213363574161572522430563301811072406154908250,
23067588207539346171171980310421047513778063246676,
89261670696623633820136378418383684178734361726757,
28112879812849979408065481931592621691275889832738,
44274228917432520321923589422876796487670272189318,
47451445736001306439091167216856844588711603153276,
70386486105843025439939619828917593665686757934951,
62176457141856560629502157223196586755079324193331,
64906352462741904929101432445813822663347944758178,
92575867718337217661963751590579239728245598838407,
58203565325359399008402633568948830189458628227828,
80181199384826282014278194139940567587151170094390,
35398664372827112653829987240784473053190104293586,
86515506006295864861532075273371959191420517255829,
71693888707715466499115593487603532921714970056938,
54370070576826684624621495650076471787294438377604,
53282654108756828443191190634694037855217779295145,
36123272525000296071075082563815656710885258350721,
45876576172410976447339110607218265236877223636045,
17423706905851860660448207621209813287860733969412,
81142660418086830619328460811191061556940512689692,
51934325451728388641918047049293215058642563049483,
62467221648435076201727918039944693004732956340691,
15732444386908125794514089057706229429197107928209,
55037687525678773091862540744969844508330393682126,
18336384825330154686196124348767681297534375946515,
80386287592878490201521685554828717201219257766954,
78182833757993103614740356856449095527097864797581,
16726320100436897842553539920931837441497806860984,
48403098129077791799088218795327364475675590848030,
87086987551392711854517078544161852424320693150332,
59959406895756536782107074926966537676326235447210,
69793950679652694742597709739166693763042633987085,
41052684708299085211399427365734116182760315001271,
65378607361501080857009149939512557028198746004375,
35829035317434717326932123578154982629742552737307,
94953759765105305946966067683156574377167401875275,
88902802571733229619176668713819931811048770190271,
25267680276078003013678680992525463401061632866526,
36270218540497705585629946580636237993140746255962,
24074486908231174977792365466257246923322810917141,
91430288197103288597806669760892938638285025333403,
34413065578016127815921815005561868836468420090470,
23053081172816430487623791969842487255036638784583,
11487696932154902810424020138335124462181441773470,
63783299490636259666498587618221225225512486764533,
67720186971698544312419572409913959008952310058822,
95548255300263520781532296796249481641953868218774,
76085327132285723110424803456124867697064507995236,
37774242535411291684276865538926205024910326572967,
23701913275725675285653248258265463092207058596522,
29798860272258331913126375147341994889534765745501,
18495701454879288984856827726077713721403798879715,
38298203783031473527721580348144513491373226651381,
34829543829199918180278916522431027392251122869539,
40957953066405232632538044100059654939159879593635,
29746152185502371307642255121183693803580388584903,
41698116222072977186158236678424689157993532961922,
62467957194401269043877107275048102390895523597457,
23189706772547915061505504953922979530901129967519,
86188088225875314529584099251203829009407770775672,
11306739708304724483816533873502340845647058077308,
82959174767140363198008187129011875491310547126581,
97623331044818386269515456334926366572897563400500,
42846280183517070527831839425882145521227251250327,
55121603546981200581762165212827652751691296897789,
32238195734329339946437501907836945765883352399886,
75506164965184775180738168837861091527357929701337,
62177842752192623401942399639168044983993173312731,
32924185707147349566916674687634660915035914677504,
99518671430235219628894890102423325116913619626622,
73267460800591547471830798392868535206946944540724,
76841822524674417161514036427982273348055556214818,
97142617910342598647204516893989422179826088076852,
87783646182799346313767754307809363333018982642090,
10848802521674670883215120185883543223812876952786,
71329612474782464538636993009049310363619763878039,
62184073572399794223406235393808339651327408011116,
66627891981488087797941876876144230030984490851411,
60661826293682836764744779239180335110989069790714,
85786944089552990653640447425576083659976645795096,
66024396409905389607120198219976047599490197230297,
64913982680032973156037120041377903785566085089252,
16730939319872750275468906903707539413042652315011,
94809377245048795150954100921645863754710598436791,
78639167021187492431995700641917969777599028300699,
15368713711936614952811305876380278410754449733078,
40789923115535562561142322423255033685442488917353,
44889911501440648020369068063960672322193204149535,
41503128880339536053299340368006977710650566631954,
81234880673210146739058568557934581403627822703280,
82616570773948327592232845941706525094512325230608,
22918802058777319719839450180888072429661980811197,
77158542502016545090413245809786882778948721859617,
72107838435069186155435662884062257473692284509516,
20849603980134001723930671666823555245252804609722,
53503534226472524250874054075591789781264330331690,
]
print str(sum(vector))[:10]
| [
"[email protected]"
]
| |
244bb265da9da87390906151f61c5aa088940dec | fd65851c7977176cfa69056ea5d63ca529e74271 | /components/google-cloud/google_cloud_pipeline_components/container/experimental/gcp_launcher/utils/json_util.py | cdc9e256749386fb21acd07efaa92ce44cf45fc0 | [
"Apache-2.0",
"BSD-3-Clause",
"MIT",
"BSD-2-Clause"
]
| permissive | NikeNano/pipelines | dad9f45267a7f4c495a30880dd6fe1570f26fa64 | 73804f8928ce671839d34800627b6d3ea9f820a7 | refs/heads/master | 2022-01-29T21:24:43.693120 | 2021-11-20T18:18:35 | 2021-11-20T18:18:35 | 221,051,451 | 1 | 1 | Apache-2.0 | 2021-04-23T20:07:11 | 2019-11-11T19:11:29 | Python | UTF-8 | Python | false | false | 1,565 | py | # Copyright 2021 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
# TODO(IronPan) This library can be removed once ifPresent is supported within concat[] in component YAML V2.
# Currently the component YAML will generate the payload with all API fields presented,
# and those fields will be left empty if user doesn't specify them in the Python.
def __remove_empty(j):
"""Remove the empty fields in the Json."""
if isinstance(j, list):
return list(filter(None, [__remove_empty(i) for i in j]))
if isinstance(j, dict):
final_dict = {}
for k, v in j.items():
if v:
final_dict[k] = __remove_empty(v)
return final_dict
return j
def recursive_remove_empty(j):
"""Recursively remove the empty fields in the Json until there is no empty fields and sub-fields."""
needs_update = True
while needs_update:
new_j = __remove_empty(j)
needs_update = json.dumps(new_j) != json.dumps(j)
j = new_j
return j
| [
"[email protected]"
]
| |
12357a3852565d74b4832a5ccb00cc3298eb2a2f | fac68cda1a9e79d8f040ca632f0353ccb8d20c8c | /backtesting using zipline/zip2.py | 5269a754a5da563c9b213a17606482a38b07ac13 | []
| no_license | fagan2888/Algo-trading-strategy | 11a9b5f70f53492d5b407ac8593af6921a6d44c1 | 4450f5a28f069e0e695843b0f69197519fa5c1da | refs/heads/master | 2022-01-11T19:07:05.983331 | 2019-05-28T05:15:11 | 2019-05-28T05:15:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,671 | py |
import pytz
from datetime import datetime
from zipline.api import order, symbol, record, order_target
from zipline.algorithm import TradingAlgorithm
from zipline.data.loader import load_bars_from_yahoo
import pyexcel
# Load data manually from Yahoo! finance
start = datetime(2011, 1, 1, 0, 0, 0, 0, pytz.utc).date()
end = datetime(2012,1,1,0,0,0,0, pytz.utc).date()
data = load_bars_from_yahoo(stocks=['SPY'], start=start,end=end)
#code
def initialize(context):
context.security = symbol('SPY')
#code
def handle_data(context, data):
MA1 = data[context.security].mavg(50)
MA2 = data[context.security].mavg(100)
date = str(data[context.security].datetime)[:10]
current_price = data[context.security].price
current_positions = context.portfolio.positions[symbol('SPY')].amount
cash = context.portfolio.cash
value = context.portfolio.portfolio_value
current_pnl = context.portfolio.pnl
if (MA1 > MA2) and current_positions == 0:
number_of_shares = int(cash/current_price)
order(context.security, number_of_shares)
record(date=date,MA1 = MA1, MA2 = MA2, Price=
current_price,status="buy",shares=number_of_shares,PnL=current_pnl,cash=cash,value=value)
elif (MA1 < MA2) and current_positions != 0:
order_target(context.security, 0)
record(date=date,MA1 = MA1, MA2 = MA2, Price= current_price,status="sell",shares="--",PnL=current_pnl,cash=cash,value=value)
else:
record(date=date,MA1 = MA1, MA2 = MA2, Price= current_price,status="--",shares="--",PnL=current_pnl,cash=cash,value=value)
| [
"[email protected]"
]
| |
d35dde704e8464a8f2515d208537bf2f57d470c5 | 9c9c6b8deca524c9401dd24d19510d3843bebe4b | /disposing/librealsense/mobile_platform/strategy/lib/strategy.py | 815cba28f1faccae357b56b1a719abab79168a80 | [
"Apache-2.0",
"Zlib",
"BSL-1.0",
"LicenseRef-scancode-public-domain",
"BSD-3-Clause",
"LGPL-2.1-only",
"BSD-1-Clause",
"MIT"
]
| permissive | tku-iarc/wrs2020 | 3f6473c2f3077400527b5e3008ae8a6e88eb00d6 | a19d1106206e65f9565fa68ad91887e722d30eff | refs/heads/master | 2022-12-12T20:33:10.958300 | 2021-02-01T10:21:09 | 2021-02-01T10:21:09 | 238,463,359 | 3 | 8 | MIT | 2022-12-09T02:09:35 | 2020-02-05T14:00:16 | C++ | UTF-8 | Python | false | false | 18,796 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-+
import math
import numpy as np
# lib
from lib.nodehandle import NodeHandle
from lib.pidcontrol import PIDControl,PIDControl_Y,PIDControl_Yaw
from lib.fuzzycontrol import FUZZYControl
from lib.counter import TimeCounter
# rostopic msg
from geometry_msgs.msg import Twist
from std_msgs.msg import Bool
# define behavior
MOBILE_ROBOT = 0
CORRECTION = 1
PLATFORM = 2
NEXT_POINT = 3
HOME = 4
MANUAL = 5
ROTATE = 6
GO_POINT = 7
RETURN_POINT = 8
CROSS = 9
INIT = 10
# FLAG
CONTROL = 'PIDCONTROL'
# CONTROL = 'FUZZYCONTROL'
IMU_FLAG = True
'''
HOME -> FIRST
INIT -> MOBILE -> CORRECTION_0 -> ROTATE_90 -> CORRECTION_90 -> PLATFORM
FIRST -> SECOND
NEXT -> ROTATE_0 -> CROSS -> MOBILE -> CORRECTION_0 -> ROTATE_90 -> CORRECTION_90 -> PLATFORM
POINT -> HOME
HOME -> ROTATE -> CROSS_FIRST -> MOBILE -> PLATFORM
'''
class Strategy(object):
'''
Offset track(目前停止)
prev_dis:
prev_ang:
prev_vel:
CONTROL
initPID: 初始化不要使PID不斷累加
QRCODE(目前沒用到)
state:
pre_state:
not_find:
ROTATE
rotateAng: 目標角度
CROSS
timer: 計數器
HOME
homeFlag
1: go home
0: 前進
homeTimes: 記錄走到的停止點
'''
def __init__(self):
self._param = NodeHandle()
if(CONTROL == 'PIDCONTROL'):
self.control = PIDControl()
self.controlY = PIDControl_Y()
self.controlYaw = PIDControl_Yaw()
elif(CONTROL == 'FUZZYCONTROL'):
self.control = FUZZYControl()
self.prev_dis = 0
self.prev_ang = 0
self.prev_vel = []
self.initPID = 0
self.state = 0
self.pre_state = 0
self.not_find = 0
# self._kp = 6
# self._ki = 0.1
# self._kd = 4.0
# self.prevIntegral = 0
# self.lastError = 0
''' rotate '''
self.rotateAng = self._param.errorRotate0
''' cross '''
self.timer = TimeCounter(time = self._param.crossTime)
''' home '''
self.homeFlag = 0
self.homeTimes = 0
def Process(self):
if(self._param.behavior == MOBILE_ROBOT):
if(self._param.loadParam):
self.Change_Behavior()
self.Mobile_Strategy()
elif(self._param.behavior == CORRECTION):
if(self._param.loadParam):
self.Change_Behavior()
self.Correction_Strategy()
elif(self._param.behavior == PLATFORM):
if(self._param.loadParam):
self.Change_Behavior()
self.Platform_Strategy()
elif(self._param.behavior == NEXT_POINT):
if(self._param.loadParam):
self.Change_Behavior()
self.Next_Point_Strategy()
elif(self._param.behavior == HOME):
if(self._param.loadParam):
self.Change_Behavior()
self.Home_Strategy()
print('HOME')
elif(self._param.behavior == MANUAL):
if(self._param.loadParam):
self.Change_Behavior()
self.state = 0
self.initPID = 0
self.controlYaw.Init()
self.controlY.Init()
print('MANUAL')
elif(self._param.behavior == ROTATE):
if(self._param.loadParam):
self.Change_Behavior()
self.Rotate_Strategy()
elif(self._param.behavior == GO_POINT):
if(self._param.loadParam):
self.Change_Behavior()
self.Go_Point_Strategy()
print('GO_POINT')
elif(self._param.behavior == RETURN_POINT):
if(self._param.loadParam):
self.Change_Behavior()
self.Return_Point_Strategy()
print('RETURN_POINT')
elif(self._param.behavior == CROSS):
if(self._param.loadParam):
self.Change_Behavior()
self.Cross_Strategy()
elif(self._param.behavior == INIT):
if(self._param.loadParam):
self.Change_Behavior()
self.Init_Strategy()
print('Init')
else:
print("Don't have Behavior")
self.Robot_Stop()
def Mobile_Strategy(self):
if(self._param.scanState):
count = self._param.scanState.count(1)
if(count):
scanNum = len(self._param.scanState)
if(count <= math.ceil((scanNum)*(2./3)) and self._param.stopPoint == 999):
self.state = 0
# Method 3
#if(CONTROL == 'PIDCONTROL'):
# x,y,yaw = self.control.Process(self._param.dis,self._param.ang,self._param.maxVel,self._param.minVel,self._param.velYaw)
#elif(CONTROL == 'FUZZYCONTROL'):
# x,y,yaw = self.control.Process(self._param.dis,self._param.ang)
# yaw = 0
#self.Robot_Vel([y,-x,yaw])
#print(y,-x,yaw)
# Method 4
# x,y,yaw = self.control.Process(self._param.dis,self._param.ang,self._param.maxVel,self._param.minVel,self._param.velYaw)
# if(abs(self._param.ang) > 10.0):
# if(self._param.ang > 0):
# x = -(self._param.minVel*math.cos(math.radians(self._param.ang)))*0.15
# y = -(self._param.minVel*math.sin(math.radians(self._param.ang)))*0.15
# # yaw = self._param.velYaw
# yaw = (self._param.velYaw+abs(yaw))
# else:
# x = -(self._param.minVel*math.cos(math.radians(self._param.ang)))*0.15
# y = (self._param.minVel*math.sin(math.radians(self._param.ang)))*0.15
# # yaw = -self._param.velYaw
# yaw = -(self._param.velYaw+abs(yaw))
# else:
# x,y,_ = self.control.Process(self._param.dis,self._param.ang,self._param.maxVel,self._param.minVel,self._param.velYaw)
# # x,y,_ = self.control.Process(self._param.dis,self._param.ang)
# yaw = 0
''' Method 5 '''
y = self.controlY.Process(self._param.dis,self._param.ang,self._param.minVel)
x = (self._param.minVel - abs(y))*math.cos(math.radians(self._param.ang)) - y*math.sin(math.radians(self._param.ang))
if(abs(self._param.dis) > self._param.errorMoibledis):
yaw = 0
else:
if(abs(self._param.ang) > self._param.errorMoibleAng):
yaw = self.controlYaw.Process(self._param.ang,self._param.velYaw)
else:
yaw = 0
if(self.homeFlag == 0):
self.Robot_Vel([x,y,yaw])
print(x,y,yaw)
else:
self.Robot_Vel([-x,y,yaw])
print(-x,y,yaw)
# self.prev_dis = self._param.dis
# self.prev_ang = self._param.ang
# self.prev_vel = [x,y,yaw]
elif(self._param.stopPoint != 999 and self._param.stopPoint != '91' and self._param.stopPoint != '90'):
print('STOP')
self.state = 1
self.Robot_Stop()
if(self.homeFlag == 1):
self._param.behavior = HOME
elif(self.homeTimes == int(self._param.stopPoint)):
self._param.behavior = CROSS
else:
self._param.behavior = CORRECTION
self.homeTimes += 1
self._param.stopPoint = 999
self.pre_state = self.state
else:
print('Offset track !!!!!!')
if(len(self.prev_vel)):
if(self.prev_vel[2] == 0):
x = -(self.prev_vel[0])*0.8
y = -self.prev_vel[1]*1.5
yaw = 0
else:
x = (self._param.minVel*math.cos(math.radians(self.prev_ang)))*0.5
y = self._param.minVel*math.sin(math.radians(self.prev_ang))
yaw = 0
else:
x = 0
y = 0
yaw = 0
print('No scan line')
# self.Robot_Vel([y,-x,yaw])
self.Robot_Stop()
else:
print('No Scan Info !!!!!!')
self.Robot_Stop()
def Correction_Strategy(self):
y = self.controlY.Process(self._param.dis,self._param.ang,self._param.minVel)
if(self._param.dis < self._param.errorCorrectionDis):
if(self._param.qrang is not None and self._param.qrang != 999):
RPang = self.Norm_Angle(self.rotateAng - self._param.qrang)
if(abs(RPang) > self._param.errorAng):
if(RPang > 0):
x = 0
y = 0
# yaw = self._param.velYaw
yaw = self._param.rotateYaw
else:
x = 0
y = 0
# yaw = -self._param.velYaw
yaw = -self._param.rotateYaw
self.Robot_Vel([x,y,yaw])
print('CORRECTION','FRONT',self._param.qrang)
else:
self.Robot_Stop()
self.Robot_Stop()
self.Robot_Stop()
print('CORRECTION',self.rotateAng,self._param.errorRotate0)
if(self.rotateAng == self._param.errorRotate0):
self._param.behavior = ROTATE
self.rotateAng = self._param.errorRotate90
else:
self._param.behavior = PLATFORM
self.rotateAng = self._param.errorRotate0
self.initPID = 1
self.not_find = 0
else:
print('CORRECTION not find')
if(self.not_find < 100):
self.not_find += 1
self.Robot_Stop()
else:
self.not_find = 0
if(self.rotateAng == self._param.errorRotate0):
self._param.behavior = ROTATE
self.rotateAng = self._param.errorRotate90
else:
self._param.behavior = PLATFORM
self.rotateAng = self._param.errorRotate0
self.initPID = 1
self._param.qrang = 999
else:
x = 0
yaw = 0
self.Robot_Vel([x,y,yaw])
print('CORRECTION','dis',y)
def Platform_Strategy(self):
print('PLATFORM')
self.state = 0
if(self.initPID):
self.controlYaw.Init()
self.controlY.Init()
self.initPID = 0
self.Robot_Stop()
if(self.homeFlag == 0):
self.Dual_Arm_Start()
def Next_Point_Strategy(self):
print('NEXT_POINT')
self.Robot_Stop()
self._param.behavior = ROTATE
self.rotateAng = self._param.errorRotate0
def Rotate_Strategy(self):
# yaw = self.controlYaw(self._param.qrang,self._param.velYaw)
if(self._param.qrang is not None and self._param.qrang != 999):
RPang = self.Norm_Angle(self.rotateAng - self._param.qrang)
if(abs(RPang) > self._param.errorAng and RPang > self._param.rotateSlowAng):
if(RPang > 0):
x = 0
y = 0
# yaw = self._param.velYaw
yaw = self._param.rotateYaw
else:
x = 0
y = 0
# yaw = -self._param.velYaw
yaw = -self._param.rotateYaw
self.Robot_Vel([x,y,yaw])
print('ROTATE','angle',self._param.qrang)
elif((abs(RPang) > self._param.errorAng and RPang <= self._param.rotateSlowAng)):
if(RPang > 0):
x = 0
y = 0
# yaw = self._param.velYaw
yaw = self._param.rotateYaw*0.8
else:
x = 0
y = 0
# yaw = -self._param.velYaw
yaw = -self._param.rotateYaw*0.8
self.Robot_Vel([x,y,yaw])
print('ROTATE','angle',self._param.qrang)
else:
self.Robot_Stop()
self.Robot_Stop()
self.Robot_Stop()
if(self.rotateAng == self._param.errorRotate90):
self._param.behavior = CORRECTION
print('ROTATE COREECTION')
else:
self._param.behavior = CROSS
print('ROTATE CROSS')
self.not_find = 0
else:
print('ROTATE not find')
if(self.not_find < 100):
self.not_find += 1
# self.Robot_Stop()
else:
self.not_find = 0
if(self.rotateAng == self._param.errorRotate90):
self._param.behavior = CORRECTION
print('ROTATE COREECTION')
else:
self._param.behavior = CROSS
print('ROTATE CROSS')
self._param.qrang = 999
def Go_Point_Strategy(self):
time,state = self.timer.Process()
if(state):
self.Robot_Stop()
self._param.behavior = CORRECTION
else:
x = self._param.minVel
y = 0
yaw = 0
self.Robot_Vel([x,y,yaw])
def Return_Point_Strategy(self):
time,state = self.timer.Process()
if(state):
self.Robot_Stop()
self._param.behavior = ROTATE
self.rotateAng = self._param.errorRotate0
else:
x = -self._param.minVel
y = 0
yaw = 0
self.Robot_Vel([x,y,yaw])
def Cross_Strategy(self):
print('CROSS')
time,state = self.timer.Process()
if(state):
self.Robot_Stop()
self._param.behavior = MOBILE_ROBOT
self.rotateAng = self._param.errorRotate0
elif(state == 0 and self.homeFlag == 0):
x = self._param.minVel
y = 0
yaw = 0
self.Robot_Vel([x,y,yaw])
elif(state == 0 and self.homeFlag == 1):
x = -self._param.minVel
y = 0
yaw = 0
self.Robot_Vel([x,y,yaw])
# if(self.pre_state == 1 and self.state == 0):
# if(self._param.scanState):
# if(self._param.qrang is not None and self._param.qrang != 999):
# x = self._param.minVel
# y = 0
# yaw = 0
# self.not_find = 0
# # self.Robot_Vel([y,-x,yaw])
# self.Robot_Vel([x,y,yaw])
# elif(self.not_find > 60):
# self.Robot_Stop()
# self._param.behavior = MOBILE_ROBOT
# self.not_find = 0
# # print('next point not find line')
# else:
# self.not_find +=1
# x = self._param.minVel
# y = 0
# yaw = 0
# # self.Robot_Vel([y,-x,yaw])
# self.Robot_Vel([x,y,yaw])
# self._param.qrang = 999
# else:
# self.Robot_Stop()
# print('fuck Cross')
def Init_Strategy(self):
self.rotateAng = self._param.errorRotate0
self.homeFlag = 0
self.homeTimes = 0
self.Robot_Stop()
self._param.behavior = MOBILE_ROBOT
# self.Reset_IMU()
def Home_Strategy(self):
print('HOME times',self.homeTimes,'HOME stop',self._param.stopPoint)
if(self.homeFlag == 0):
print('HOME',1)
self.homeFlag = 1
self.Robot_Stop()
self._param.behavior = ROTATE
self.rotateAng = self._param.errorRotate0
self.homeTimes -= 1
else:
if(self.homeTimes == 0 and self._param.stopPoint == '0'):
print('home')
self.Robot_Stop()
self._param.behavior = PLATFORM
else:
if(self.homeTimes == int(self._param.stopPoint)):
self.homeTimes -= 1
self._param.behavior = CROSS
else:
self._param.behavior = MOBILE_ROBOT
def Deg2Rad(self,deg):
return deg*math.pi/180
def Norm_Angle(self,angle):
if(angle > 180):
angle -= 360
elif(angle < -180):
angle +=360
return angle
def Robot_Stop(self):
vel = Twist()
vel.linear.x = 0
vel.linear.y = 0
vel.angular.z = 0
self._param.pub_cmdvel.publish(vel)
def Robot_Vel(self,vec):
vel = Twist()
vel.linear.x = vec[0]
vel.linear.y = vec[1]
vel.angular.z = vec[2]
self._param.pub_cmdvel.publish(vel)
def Change_Behavior(self):
self.Robot_Stop()
self.Robot_Stop()
self._param.loadParam = False
def Dual_Arm_Start(self):
start = Bool()
start.data = True
self._param.pub_dualArm.publish(start)
def Scan_Camera_Start(self):
start = Bool()
start.data = True
self._param.pub_startCamera.publish(start)
def Scan_Camera_Stop(self):
start = Bool()
start.data = False
self._param.pub_startCamera.publish(start)
def Reset_IMU(self):
reset = Bool()
reset.data = True
self._param.pub_resetImu.publish(reset)
| [
"[email protected]"
]
| |
3b31b8b5fde1d17b8fc786ed38466bf518c1d6ff | 2251d71bc3ecb589ce1a8b274a08370c3240bf51 | /0238 Product of Array Except Self.py | b83c04107ed1d7745bf70b11871df718760b6139 | []
| no_license | YuanyuanQiu/LeetCode | 3495a3878edc2028f134bddb5b9ec963069562cb | 6f5d0ef6a353713c0b41fa7ec0fb8c43a7e8dc55 | refs/heads/master | 2022-12-11T04:04:01.686226 | 2022-12-06T18:42:14 | 2022-12-06T18:42:14 | 231,168,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,251 | py | #def productExceptSelf(self, nums: List[int]) -> List[int]:
# n = len(nums)
# zeros = nums.count(0)
#
# if zeros > 1:
# return [0] * n
#
# product = 1
# for num in nums:
# if num != 0:
# product *= num
#
# if zeros == 1:
# res = [0] * n
# idx = nums.index(0)
# res[idx] = product
# return res
#
# res = []
# for i in range(n):
# res.append(int(product/nums[i]))
#
# return res
def productExceptSelf(self, nums: List[int]) -> List[int]:
length = len(nums)
answer = [0]*length
# answer[i] 表示索引 i 左侧所有元素的乘积
# 因为索引为 '0' 的元素左侧没有元素, 所以 answer[0] = 1
answer[0] = 1
for i in range(1, length):
answer[i] = nums[i - 1] * answer[i - 1]
# R 为右侧所有元素的乘积
# 刚开始右边没有元素,所以 R = 1
R = 1;
for i in reversed(range(length)):
# 对于索引 i,左边的乘积为 answer[i],右边的乘积为 R
answer[i] = answer[i] * R
# R 需要包含右边所有的乘积,所以计算下一个结果时需要将当前值乘到 R 上
R *= nums[i]
return answer | [
"[email protected]"
]
| |
a2fdbe7e8a4cdb9897e6296c6966bbe1bde3e053 | 8fd695abd0b8b2523e42786d6b90fa99058545c5 | /horsempdc/art.py | 8f87fadafbf7aee0db1a70e2bd9a45a6e8ca83ec | []
| no_license | jbremer/horsempdc | 109eb7ad9fd04124707fbb2955152996d845f3bb | 4ec1fdc6926c3f83face5bcbee16c53a761fc6bf | refs/heads/master | 2016-09-10T02:10:23.101176 | 2015-12-17T14:39:32 | 2015-12-17T14:39:32 | 23,502,118 | 0 | 1 | null | 2015-12-16T13:57:33 | 2014-08-30T22:45:35 | Python | UTF-8 | Python | false | false | 4,491 | py | # Copyright (C) 2014 Jurriaan Bremer.
# This file is part of HorseMPDC - http://www.horsempdc.org/.
# See the file 'docs/LICENSE.txt' for copying permission.
# Thanks to http://www.asciiworld.com/-Horses-.html
_doge_horse = r"""
|\ /|
___| \,,/_/
---__/ \/ \
__--/ (D) \
_ -/ (_ \
// / \_ / ==\
__-------_____--___--/ / \_ O o)
/ / \==/`
/ /
|| ) \_/\
|| / _ / |
| | /--______ ___\ /\ :
| / __- - _/ ------ | | \ \
| - - / | | \ )
| | - | | ) | |
| | | | | | | |
| | < | | | |_/
< | /__\ < \
/__\ /___\
"""
# Thanks to Neil Smith, http://www.ascii-art.de/ascii/ghi/horse.txt
_dumb_horse = r"""
./|,,/|
< o o)
<\ ( |
<\\ |\ |
<\\\ |(__)
<\\\\ |
"""
# Thanks to http://www.asciiworld.com/-Horses-.html
_angry_horse = r"""
,, ,,, ,,,, ,,,,,,,,
/\ /;; ;;;;;;;;;;;;;; ;;;/ ,;`. ,,,,
; `-. /// //////// ///// // ,','`;. ///;;;;,.
,' ,,`-.;;;;;; ;;;;;;; ;;;;// ,' ,' `.`. ///;;//;,
,' ;;;//////// ////// ///////,' ,' ; : ;;// ;//,
`. ;`;;;;;;;: ;;;;:;; ;:;:;;:;: ,' ,' : ;;;;;;;;/,
`. `; :!::::!;;;;;!::::!;!;;!;: `. ,' ,'///!!;;;;;;
`._!!;!!!!;!!!!!;!!!!;!;!!;!!`. `;' ,'-.!!!//;;;////
; . . , ,' ::-!_///;;;;
.' ,%' ,%' `%. `%.;; `%. ;; ,:: `! ////
.', ' ' `%, `:. `::. :: :; %:: `! ;;
,';; `%, `;;. `::. `.;;; `:% %:///
,';;' ; ;; `::; `%, ;%:. :: :: %`!/
,' ;.' .%. ;; `;; ;; ' `; % :: % :
: `;; %%% `:: ;; ;;; ` ` :: % `
; ' .%%' `% ; ' ,., `;; `%, ::' %::%
;`. `. %%%% ;; .___;;;; ' `: `; :: :::
: : ; %%%% ;: ,:' _ `.`. ;;; ;; `:: :::.
`.; ; `%%' ;;' :: (0) ; : ::' ; :: `:::
,' ;' %%' ;;' ;;.___,',; ;; ;; ; ,:::
, ;' :%: ;; ,'------'' ;;;' .;; :::'
,' ;; ;%; ;; ' ::' ,;;; :::
: :' :%: `; ;;;;' ;; ::%
: ;; :%' ;; ;...,,;;'' ;;' ; ; :::
; `; :: ;;' ,:::' . .;; ,' ;; `;;
; ;' :: .;;' ,:::' ,::%. ;;; ,' ;; ,;;
: ;;. .:' ;;' ,:::' ;;:::' ;; ;;' ,' ;;; ;;;'
:`;; :: ;; ;;;' ' . ;; ' _,-' ;;; `;'
: ;' .:' ;; .::: ,%'`; ;;; _,-' .;;;' ;'
,' ;; ;; ;;' :::' ,, .; ;; _,' ; ,;;;' ,;;'
.'~~~~~~~~~._ ,;' ,',' ;; ',-' ,' ,';; ;;;' ;;;
,' `-.,' .' ;; ,' ,' ;;;;;;' ,;; ;;;
.'; . `., ;; ,' ; ,;;% ;;;
: .. _.'; ; '_,' .' ,,,,,,,%;;' `;;;
`. . (_.' . ;' ,-' : ,,,,,;;;;;;;;;' .;;;
`-._ ___,' ,' :..\"\"\"\"\"`````' ,;;;;
`------'____.' : ..;;;;
`---' `. ..;;;;'
:......:::::::::;;;;'
:::::::::::::::;' ,;;;
; ;;;;'
; .;;;;
,'...:::::. ;;;'
.' `;;;;;;''
;
`----------------------------
"""
def load_ascii_art(name):
inventory = {
'doge-horse': _doge_horse,
'dumb-horse': _dumb_horse,
'angry-horse': _angry_horse,
}
lines = inventory[name].split('\n')
if not lines[0]:
lines = lines[1:]
if not lines[-1]:
lines = lines[:-1]
rows = len(lines)
columns = max(len(line) for line in lines)
return rows, columns, lines
| [
"[email protected]"
]
| |
dc2c2119263a0157cb7a145f69ea778a8e49e51b | ec84619271eac42481231218c9ee653dec99adad | /7. Set- Dictionary- Divide - Conquer/469. Same Tree.py | 1cdd5a38d7f4a9d56672c1707ca8e0adaf4f8772 | []
| no_license | LingHsiLiu/Algorithm0 | 19a968fffb5466022f9856c36af0364da6472434 | f438e828dc9dd6196ee5809eb8fac21ccb688bf2 | refs/heads/master | 2020-04-04T17:55:48.182172 | 2019-01-02T19:06:57 | 2019-01-02T19:06:57 | 156,142,530 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 811 | py | # 469. Same Tree
# Check if two binary trees are identical. Identical means the two binary trees have the same structure and every identical position has the same value.
# Example
# 1 1
# / \ / \
# 2 2 and 2 2
# / /
# 4 4
# are identical.
# 1 1
# / \ / \
# 2 3 and 2 3
# / \
# 4 4
# are not identical.
"""
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
"""
class Solution:
"""
@param a: the root of binary tree a.
@param b: the root of binary tree b.
@return: true if they are identical, or false.
"""
def isIdentical(self, a, b):
# write your code here
| [
"[email protected]"
]
| |
a3e5cbc9a8bec44766a705ec15e5e27f2d0c37de | 9a0e2312236b628007a67c07164ea7b97207e47c | /col/apps/syslog_collector/tests/acceptance_tests/test_syslog_collector.py | 330a9acb484bb4b76dbd0bf8fc1ddfe348b58f57 | []
| no_license | laxmi518/network_project | d88b9fe73522deaa90c1dbfd22c6861020a6c7be | 2e998338f3d1142a8098d3dfd35f4c8ad0e4ba00 | refs/heads/master | 2020-05-21T15:48:07.830107 | 2018-05-09T18:58:37 | 2018-05-09T18:58:37 | 84,631,818 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,195 | py |
import os
import time
import unittest
import socket
import ssl
from subprocess import Popen
import re
import gevent
from pylib.wiring import gevent_zmq as zmq
from nose.tools import eq_
from pylib import wiring, disk, conf, inet
class test_syslog_collector(unittest.TestCase):
os.environ["TZ"] = "UTC"
zmq_context = zmq.Context()
def setUp(self):
# syslog collector forwards the received msg to normalizer_in
# starting syslog collector
config_path = disk.get_sibling(__file__, 'test-config.json')
config = conf.load(config_path)
self.port = config['port']
self.ssl_port = config['ssl_port']
self.normalizer = wiring.Wire('norm_front_in',
zmq_context=self.zmq_context)
self.syslog_collector = Popen(['python', 'syslog_collector.py',
config_path])
# Allow to prepare for serving
time.sleep(0.5)
def tearDown(self):
self.syslog_collector.kill()
self.normalizer.close()
time.sleep(0.5)
def send_message(self, address=None, message=None, flow='udp'):
address = address or ('127.0.0.1', self.port)
message = message or "<124> May 06 2012 15:02:24 [emerg] (17)File exists: Couldn't create accept lock (/private/var/log/apache2/accept.lock.19) (5)\n"
host, port = address
if flow == 'tcp':
client, sockaddr = inet.create_address(host, port)
client.connect(sockaddr)
client.send(message)
elif flow == 'ssl':
client, sockaddr = inet.create_address(host, port)
client = ssl.wrap_socket(client)
client.connect(sockaddr)
client.send(message)
elif flow == 'udp':
client, sockaddr = inet.create_address(host, port,
socket.SOCK_DGRAM)
client.sendto(message, sockaddr)
else:
raise ValueError('Unknown flow type: %r' % flow)
event = gevent.with_timeout(5, self.normalizer.recv, timeout_value=None)
mid = event.pop('mid')
assert re.match(r'^LogInspect500\|syslog\|(127.0.0.1|::1)\|\d+\|1$', mid)
eq_(event, dict(
msg=message.rstrip('\n'),
severity=4,
facility=15,
log_ts=1336316544,
device_ip=address[0],
device_name='localhost',
collected_at='LogInspect500',
_type_num='log_ts severity facility',
_type_str='msg device_name collected_at',
_type_ip='device_ip',
))
def test_tcp_basic_flow(self):
self.send_message(flow='tcp')
def test_ssl_flow(self):
self.send_message(('127.0.0.1', self.ssl_port), flow='ssl')
def test_udp_basic_flow(self):
self.send_message(flow='udp')
def test_tcp6_flow(self):
self.send_message(('::1', self.port), flow='tcp')
def test_ssl6_flow(self):
self.send_message(('::1', self.ssl_port), flow='ssl')
def test_udp6_flow(self):
self.send_message(('::1', self.port), flow='udp')
if __name__ == '__main__':
import nose
nose.run(defaultTest=__name__)
| [
"[email protected]"
]
| |
f58ebecf367af70681cae87983a4b286dcad25da | 9cfaffd2e3fe06467d0e4f7e671e459b04d123ea | /extras/management/commands/updates.py | 34dffa7918245bab780b244d8bb583ed6bc223f4 | []
| no_license | montenegrop/djangotravelportal | 80b72b9e3da517885b6d596fad34049545a598a5 | 8a15fc387d20b12d16c171c2d8928a9b9d4ba5e1 | refs/heads/main | 2023-01-29T22:12:58.633181 | 2020-12-05T15:44:39 | 2020-12-05T15:44:39 | 318,826,064 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,405 | py | from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from operators.models import QuoteRequest, TourOperator, Itinerary, ItineraryType
from users.models import UserProfile
import MySQLdb
from django.db.models import Count
from django.contrib.auth.models import User
from places.models import Park, CountryIndex
from photos.models import Photo
from blog.models import Article
from reviews.models import ParkReview, KilimanjaroParkReview, TourOperatorReview
from analytics.models import Analytic
class Command(BaseCommand):
help = ''
def handle(self, *args, **options):
# update tour operators
tour_operators = TourOperator.objects.all()
#tour_operators = tour_operators.filter(slug='africaventure')
for tour_operator in tour_operators:
tour_operator.update_reviews_count()
tour_operator.update_average_rating()
tour_operator.update_parks_count()
tour_operator.update_packages_count()
tour_operator.update_quote_request_count()
tour_operator.update_photos_count()
tour_operator.update_yas_score()
tour_operator.update_vehicle_rating()
tour_operator.update_meet_and_greet_rating()
tour_operator.update_responsiveness()
tour_operator.update_safari_quality()
tour_operator.update_itinerary_quality()
tour_operator.update_packages_count()
for country in tour_operator.country_indexes.all():
tour_operator.update_yas_score(country)
print('Updated', tour_operators.count(), 'tour_operators')
#activity_level
itineraries = Itinerary.objects.filter(date_deleted=None)
for itinerary in itineraries:
itinerary.activity_level = itinerary.calc_max_activity_level()
itinerary.activity_level_name = itinerary.calc_activity_level_string()
itinerary.save()
print('Updated', itineraries.count(), 'itineraries')
# update country
countries = CountryIndex.objects.all()
for country in countries:
country.update_packages_count()
country.update_photos_count()
country.update_parks_count()
country.update_operators_count()
print('Updated', countries.count(), 'countries')
# update articles
articles = Article.objects.all()
for article in articles:
article.update_kudu_count()
article.update_visit_count()
article.update_comments_count()
print('Updated', articles.count(), 'articles')
#parks
parks = Park.objects.all()
for park in parks:
park.update_reviews_count()
park.update_tour_operators_count()
park.update_average_rating()
park.update_packages_count()
park.update_photos_count()
print('Updated', parks.count(), 'parks')
# update park reviews
reviews = ParkReview.objects.all()
for review in reviews:
review.update_views_count()
review.update_kudu_count()
print('Updated', reviews.count(), 'park reviews')
# update tour operator reviews
reviews = TourOperatorReview.objects.all()
for review in reviews:
review.update_views_count()
review.update_kudu_count()
print('Updated', reviews.count(), 'tour op reviews')
# update kilimanjaro reviews
reviews = KilimanjaroParkReview.objects.all()
for review in reviews:
review.update_views_count()
review.update_kudu_count()
print('Updated', reviews.count(), 'kilimanjaro park reviews visit counts')
objs = Itinerary.objects.all()
for obj in objs:
obj.update_visit_count()
print('Updated', objs.count(), 'itinerary views')
objs = UserProfile.objects.all()
for obj in objs:
obj.update_review_count()
obj.update_kudus_count()
print('Updated', objs.count(), 'users reviews and kudus')
objs = Photo.objects.filter(date_deleted__isnull=False)
for obj in objs:
obj.update_kudu_count()
print('Updated', objs.count(), 'photos')
self.stdout.write(self.style.SUCCESS("DONE"))
| [
"[email protected]"
]
| |
434c4fb7ffdbf42ebf6dd49ec11956c764944c9c | 348a4943f9b690e8668f97719fde0713949a0477 | /hatch/files/coverage/__init__.py | e49caa50e6bf8bfb327565eed41ea22cefd4e765 | [
"MIT",
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | DalavanCloud/hatch | 9786d6ad70511113c0db246101d6839168dea345 | 88e173c1dd0e262ec3dee5f9b500983388bc4126 | refs/heads/master | 2020-04-09T06:33:31.462863 | 2018-11-02T21:29:06 | 2018-11-02T21:29:06 | 160,118,052 | 1 | 0 | null | 2018-12-03T01:57:20 | 2018-12-03T01:57:20 | null | UTF-8 | Python | false | false | 108 | py | from hatch.files.coverage.codecov import Codecov
from hatch.files.coverage.coveragerc import CoverageConfig
| [
"[email protected]"
]
| |
f0e094eec95b0e2ba7dc77239adfc658f8b0f713 | 060e99a3935b08f3344f01d3af9a1bf322783b99 | /OOP/encapsulation.py | 0e99667c6a2accaffee794f206cc93fbe9c61a7b | []
| no_license | Lemmah/pyWorkSpace | a2119a6cd2d2695eeb18a1d41400b7fe97a41c70 | ba176a9029f108c39d53970ff5127be7007555ee | refs/heads/master | 2021-01-22T11:10:57.205835 | 2017-09-05T07:57:28 | 2017-09-05T07:57:28 | 92,673,469 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 670 | py | # Encapsulation: data hiding. Encapsulated variables cannot be accessed directly.
class BankAccount:
''' This is a bank account class '''
def __init__(self, accountName="Current Account", balance=200):
''' Constructor with encapsulated attributes '''
self.__accountName = accountName
self.__balance = balance
def getBalance(self):
return self.__balance
accountObject = BankAccount()
'''
If you did this, you will encounter errors...
print(accountObject.__accountName)
print(accountObject.__balance)
'''
# Now, how do we get along? Use getters and setters
accountObject = BankAccount()
print(accountObject.getBalance())
| [
"[email protected]"
]
| |
bf9f50f49f5bbb9df18f6cfac06a5ea8d787c98f | 143eb3ced0ff1f9cad745c620fcb572f72d66048 | /Assignment4/atom3/Kernel/Qoca/runUnitTests.py | 2c224b32ce916eeab7312760d919486bacf2576d | []
| no_license | pombreda/comp304 | 2c283c60ffd7810a1d50b69cab1d5c338563376d | d900f58f0ddc1891831b298d9b37fbe98193719d | refs/heads/master | 2020-12-11T07:26:19.594752 | 2014-11-07T12:29:28 | 2014-11-07T12:29:28 | 35,264,549 | 1 | 1 | null | 2015-05-08T07:18:18 | 2015-05-08T07:18:18 | null | UTF-8 | Python | false | false | 714 | py |
def runUnitTests():
import unittest
suite = unittest.TestSuite()
print 'NOTE: if import fails, try running it from a higher-level directory'
print 'IE: ..\\atom3\\Kernel> python Qoca\\runUnitTests.py\n'
from unittests.QocaBasicConstraints import QocaBasicConstraints
suite.addTest(unittest.makeSuite(QocaBasicConstraints))
from unittests.pipeTest import PipeTest
suite.addTest(unittest.makeSuite(PipeTest))
from unittests.QocaWrapperTest import QocaWrapperTest
suite.addTest(unittest.makeSuite(QocaWrapperTest))
unittest.TextTestRunner(verbosity=2).run(suite)
if __name__ == '__main__':
runUnitTests()
| [
"[email protected]"
]
| |
d588825332a0ef69aeb97056aeff210c8bf6353d | ae7d5d11351af9201ce6181c48b8c60363c7ed00 | /lib/galaxy/workflow/reports/generators/__init__.py | 0fa46d4fb295ed41b79d33adcfbf98752944b597 | [
"CC-BY-2.5",
"AFL-2.1",
"AFL-3.0",
"CC-BY-3.0",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | natefoo/galaxy | 818037d03f39ccfb3714c7e784fd64d7ad8f4d2e | 64150c5bd803e75ed032e9f15acd003bae92b5ef | refs/heads/master | 2023-08-17T02:57:02.580487 | 2020-03-26T13:33:01 | 2020-03-26T13:33:01 | 31,212,836 | 2 | 1 | NOASSERTION | 2019-04-25T12:30:28 | 2015-02-23T15:01:46 | Python | UTF-8 | Python | false | false | 2,374 | py | """Module containing Galaxy workflow report generator plugins.
"""
from abc import (
ABCMeta,
abstractmethod
)
import six
from galaxy.managers.markdown_util import (
internal_galaxy_markdown_to_pdf,
ready_galaxy_markdown_for_export,
resolve_invocation_markdown,
)
@six.add_metaclass(ABCMeta)
class WorkflowReportGeneratorPlugin(object):
"""
"""
@property
@abstractmethod
def plugin_type(self):
"""Short string labelling this plugin."""
@abstractmethod
def generate_report_json(self, trans, invocation, runtime_report_config_json=None):
"""
"""
@abstractmethod
def generate_report_pdf(self, trans, invocation, runtime_report_config_json=None):
"""
"""
@six.add_metaclass(ABCMeta)
class WorkflowMarkdownGeneratorPlugin(WorkflowReportGeneratorPlugin):
"""WorkflowReportGeneratorPlugin that generates markdown as base report."""
def generate_report_json(self, trans, invocation, runtime_report_config_json=None):
"""
"""
internal_markdown = self._generate_internal_markdown(trans, invocation, runtime_report_config_json=runtime_report_config_json)
export_markdown, extra_rendering_data = ready_galaxy_markdown_for_export(trans, internal_markdown)
rval = {
"render_format": "markdown", # Presumably the frontend could render things other ways.
"markdown": export_markdown,
"invocation_markdown": export_markdown,
}
rval.update(extra_rendering_data)
return rval
def generate_report_pdf(self, trans, invocation, runtime_report_config_json=None):
internal_markdown = self._generate_internal_markdown(trans, invocation, runtime_report_config_json=runtime_report_config_json)
return internal_galaxy_markdown_to_pdf(trans, internal_markdown, 'invocation_report')
@abstractmethod
def _generate_report_markdown(self, trans, invocation, runtime_report_config_json=None):
""" """
def _generate_internal_markdown(self, trans, invocation, runtime_report_config_json=None):
workflow_markdown = self._generate_report_markdown(trans, invocation, runtime_report_config_json=runtime_report_config_json)
internal_markdown = resolve_invocation_markdown(trans, invocation, workflow_markdown)
return internal_markdown
| [
"[email protected]"
]
| |
7b43fa6cbd43a380499d2b33f5bcaa34e3eb9afc | c1dab6818d05c52bdc0347150ce700a73d64fa1d | /build/perception_cvut/catkin_generated/pkg.develspace.context.pc.py | 11efc522cab62140ff0a5edf6ef69c6e91725f38 | []
| no_license | Sinchiguano/Perception_ur10 | de5ee83f6e930679c045f96d4d3b6a87caeab452 | 40f18dc771bdcc4372d784f4aa8261774bab2b2a | refs/heads/master | 2022-02-16T05:38:00.578173 | 2019-09-01T15:16:11 | 2019-09-01T15:16:11 | 194,514,569 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 376 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "perception_cvut"
PROJECT_SPACE_DIR = "/home/casch/ws_moveit/devel"
PROJECT_VERSION = "0.0.0"
| [
"[email protected]"
]
| |
39336a1943b085ae0cdf21000d0b5ee2771f5e12 | 8c036299de04b1dd8edeabdd7b265beb4c16f64d | /WebMirror/management/rss_parser_funcs/feed_parse_extractGooseberrytlWordpressCom.py | 69935050ff6657b86bdd6192cf29a7637182ef62 | [
"BSD-3-Clause"
]
| permissive | collegroup/ReadableWebProxy | f2dcc4ce4f32c461388f40890a2997d61b49b28a | bec24610dd52fde5311dfc9b9cb2b388e23727ec | refs/heads/master | 2023-01-11T20:27:38.598545 | 2020-11-16T06:03:57 | 2020-11-16T06:03:57 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 642 | py | def extractGooseberrytlWordpressCom(item):
'''
Parser for 'gooseberrytl.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('tsats', 'The Star Around The Sun', 'translated'),
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False | [
"[email protected]"
]
| |
7f419201fc23a0742c1ddb00244df3d888d47e0e | 7eaeb56a2ed19a30559dac8673a979fc64d76e8a | /tests/parsers/c_parser/stmts/if_stmt_tests.py | 9ac34d7e32ec9b86aab0bdf3fcd755d5c1d2639d | [
"MIT"
]
| permissive | avast/retdec-regression-tests-framework | 95935b6a66bee66f58a9f2ea1296f747536aeaae | f8f43c0870df638d114f685a30f8abf8b51d6d1e | refs/heads/master | 2023-05-30T18:52:37.332065 | 2022-12-05T14:37:40 | 2022-12-05T14:37:40 | 113,967,405 | 8 | 5 | MIT | 2020-04-07T12:28:40 | 2017-12-12T09:01:52 | Python | UTF-8 | Python | false | false | 3,520 | py | """
Tests for the :module`regression_tests.parsers.c_parser.stmts.if_stmt` module.
"""
from tests.parsers.c_parser import WithModuleTests
from regression_tests.parsers.c_parser.stmts.if_stmt import IfStmt
class IfStmtTests(WithModuleTests):
"""Tests for `IfStmt`."""
def get_if_stmt(self, code, func_name):
"""Returns the first if statement in the given code."""
func = self.get_func("""
void %s(void) {
%s
}
""" % (func_name, code), func_name)
return func.if_stmts[0]
def test_if_stmt_is_if_stmt(self):
if_stmt = self.get_if_stmt("if(1) bar();", 'foo')
self.assertTrue(if_stmt.is_if_stmt())
def test_if_stmt_is_no_other_kind_of_statement(self):
if_stmt = self.get_if_stmt("if(1) bar();", 'foo')
self.assertFalse(if_stmt.is_for_loop())
self.assertFalse(if_stmt.is_assign())
self.assertFalse(if_stmt.is_var_def())
self.assertFalse(if_stmt.is_while_loop())
self.assertFalse(if_stmt.is_return_stmt())
self.assertFalse(if_stmt.is_empty_stmt())
self.assertFalse(if_stmt.is_break_stmt())
self.assertFalse(if_stmt.is_continue_stmt())
self.assertFalse(if_stmt.is_switch_stmt())
self.assertFalse(if_stmt.is_goto_stmt())
self.assertFalse(if_stmt.is_do_while_loop())
self.assertFalse(if_stmt.is_loop())
def test_identification_returns_correct_value(self):
if_stmt = self.get_if_stmt("if(1) bar();", 'foo')
self.assertEqual(if_stmt.identification, 'if(1)')
def test_correct_condition_is_extracted(self):
if_stmt = self.get_if_stmt("if(1) bar();", 'foo')
self.assertEqual(if_stmt.condition, '1')
def test_if_stmt_without_else_part_does_not_have_else_part(self):
if_stmt = self.get_if_stmt("if(1) bar();", 'foo')
self.assertFalse(if_stmt.has_else_clause())
def test_if_stmt_with_else_part_has_else_part(self):
if_stmt = self.get_if_stmt("""
if(1) bar();
else foo();
""", 'foo')
self.assertTrue(if_stmt.has_else_clause())
def test_if_stmt_is_equal_to_itself(self):
if_stmt = self.get_if_stmt("if(1) bar();", 'foo')
self.assertEqual(if_stmt, if_stmt)
def test_two_different_if_stmts_are_not_equal(self):
if_stmt1 = self.get_if_stmt("if(1) bar();", 'foo')
if_stmt2 = self.get_if_stmt("if(1) foo();", 'foo')
self.assertNotEqual(if_stmt1, if_stmt2)
def test_two_if_stmts_with_same_string_representation_are_not_equal(self):
if_stmt1 = self.get_if_stmt("if(1) foo();", 'foo')
if_stmt2 = self.get_if_stmt("if(1) foo();", 'bar')
self.assertNotEqual(if_stmt1, if_stmt2)
def test_else_if_statement_is_new_if_statement_in_else_clause(self):
parent_if_stmt = self.get_if_stmt("""
if(1) {
bar();
} else if (2) {
foo();
}
""", 'foo')
child_if_stmt = IfStmt(list(parent_if_stmt._node.get_children())[2])
self.assertEqual(child_if_stmt.condition, '2')
self.assertFalse(child_if_stmt.has_else_clause())
def test_repr_returns_correct_repr(self):
if_stmt = self.get_if_stmt("if(1) foo();", 'foo')
self.assertEqual(repr(if_stmt), '<IfStmt condition=1>')
def test_str_returns_correct_str(self):
if_stmt = self.get_if_stmt("if(1) foo();", 'foo')
self.assertEqual(str(if_stmt), 'if (1)')
| [
"[email protected]"
]
| |
a922c484299fcb82e4c30019d0fbefc8983bd0d4 | 1527d341ec0910426ffede6207232f885b3176a0 | /source/HwSendEmail.py | 8f481d74c8c323f21344e6c81f02ade8d73cdf80 | []
| no_license | eddiewang-wgq/HwUnittestFrameworkPy2 | 47f55c56c3e2c61aa153beb9180fa8247164fdcc | dada7db244f66830ca5a06087822f0b6db6ee512 | refs/heads/master | 2023-03-28T14:55:54.086200 | 2021-03-30T08:34:25 | 2021-03-30T08:34:25 | 352,928,215 | 0 | 0 | null | null | null | null | GB18030 | Python | false | false | 4,032 | py | #!/usr/bin/env python
# coding:gbk
# Created by zhaohongwei on 2016-06-20
# Blog: http://blog.csdn.net/z_johnny
from email.mime.multipart import MIMEMultipart
from email.mime.application import MIMEApplication
from email.utils import COMMASPACE
from email.mime.text import MIMEText
from email.mime.image import MIMEImage
from email.mime.audio import MIMEAudio
import smtplib
import os
import yaml
class HwSendEmail(object):
def __init__(self, email_title, email_content):
"""
init config
"""
with open('./config/configEmail.yaml' ,'rb') as config:
self.allConfig = yaml.load(config)
self.attachment_path = './result'
self.email_title = email_title
self.email_content = email_content
self.smtp = smtplib.SMTP()
self.login_username = self.allConfig['SMTP']['login_username']
self.login_password = self.allConfig['SMTP']['login_password']
self.sender = self.allConfig['SMTP']['login_username']
self.receiver = self.allConfig['SMTP']['receiver']
self.host = self.allConfig['SMTP']['host']
# self.port = self.allConfig['SMTP']['port'] 发现加入端口后有时候发邮件出现延迟,故暂时取消
def connect(self):
"""
connect server
"""
#self.smtp.connect(self.host, self.port)
self.smtp.connect(self.host)
def login(self):
"""
login email
"""
try:
self.smtp.login(self.login_username, self.login_password)
except:
raise AttributeError('Can not login smtp!!!')
def send(self):
"""
send email
"""
msg = MIMEMultipart() # create MIMEMultipart
msg['From'] = self.sender # sender
receiver = self.receiver.split(",") # split receiver to send more user
msg['To'] = COMMASPACE.join(receiver)
msg['Subject'] = self.email_title # email Subject
content = MIMEText(self.email_content, _charset='gbk') # add email content ,coding is gbk, becasue chinese exist
msg.attach(content)
for attachment_name in os.listdir(self.attachment_path):
attachment_file = os.path.join(self.attachment_path,attachment_name)
with open(attachment_file, 'rb') as attachment:
if 'application' == 'text':
attachment = MIMEText(attachment.read(), _subtype='octet-stream', _charset='GB2312')
elif 'application' == 'image':
attachment = MIMEImage(attachment.read(), _subtype='octet-stream')
elif 'application' == 'audio':
attachment = MIMEAudio(attachment.read(), _subtype='octet-stream')
else:
attachment = MIMEApplication(attachment.read(), _subtype='octet-stream')
attachment.add_header('Content-Disposition', 'attachment', filename = ('gbk', '', attachment_name))
# make sure "attachment_name is chinese" right
msg.attach(attachment)
self.smtp.sendmail(self.sender, receiver, msg.as_string()) # format msg.as_string()
def quit(self):
self.smtp.quit()
def sendemail(self):
self.connect()
self.login()
self.send()
self.quit()
if __name__ == "__main__":
# from sendemail import SendEmail
import time
ISOTIMEFORMAT='_%Y-%m-%d_%A'
current_time =str(time.strftime(ISOTIMEFORMAT))
email_config_path = './configEmail.yaml' # config path
email_attachment_path = './result' # attachment path
email_tiltle = 'johnny test'+'%s'%current_time # as johnny test_2016-06-20_Monday ,it can choose only file when add time
email_content = 'python发送邮件测试,包含附件'
myemail = HwSendEmail(email_config_path,email_attachment_path,email_tiltle, email_content)
myemail.connect()
myemail.login()
myemail.send()
myemail.quit()
| [
"[email protected]"
]
| |
4ed17d17fad5aa10e9004e2cd3e4b71e0b4eaa7f | e10c8dbd03117dcf71ae4c5e59863b9268cda514 | /store/migrations/0015_auto_20200617_2230.py | cab6aa810e8b6c38461a407bd24f100c01c4f615 | []
| no_license | linker10/pharmacy | c305eb8304057498ea06008f43715db682e88554 | 8cd30ca6f94f636f45400899f4a9f1c150af3bbf | refs/heads/master | 2022-12-10T01:04:45.154055 | 2020-08-12T18:32:06 | 2020-08-12T18:32:06 | 276,040,272 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 552 | py | # Generated by Django 3.0.6 on 2020-06-17 17:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('store', '0014_auto_20200617_2227'),
]
operations = [
migrations.AlterField(
model_name='item',
name='old_price',
field=models.FloatField(blank=True, default=0, null=True),
),
migrations.AlterField(
model_name='item',
name='price',
field=models.FloatField(default=0),
),
]
| [
"[email protected]"
]
| |
b42a88f8b03d74ac056c53e343c01339be04c77e | 692f77a160798b586f3ef1240c1bdf2bb114c9a0 | /aiopening/__init__.py | 1bd330e0d468a5d48aabf994e80d57f1a099b472 | [
"MIT"
]
| permissive | ducandu/aiopening | 5e16f8240a527da37d622b5445b68083d4fba1e4 | 214d8d6dfc928ab4f8db634018092dc43eaf0e3c | refs/heads/master | 2022-12-30T14:58:45.669350 | 2017-09-06T09:47:57 | 2017-09-06T09:47:57 | 93,327,068 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 677 | py | """
---------------------------------------------------------------------------------
shine - [s]erver [h]osted [i]ntelligent [n]eural-net [e]nvironment :)
---------------------------------------------------------------------------------
by Code Sourcerer
(c) 2017 ducandu GmbH
"""
# "global" classes (that should live in the ai. namespace directly)
from aiopening.labs import Lab
from aiopening.experiments import Experiment
from aiopening.models import Model
# make sure these are available without having to specify them as separate imports
import aiopening.modules
import aiopening.envs
import aiopening.algorithms
# global pack vars
_VERSION = 1 # 00.00.01 = 1
| [
"[email protected]"
]
| |
3c956f411f40e3e79b6b36818f8be195ddf79d03 | 1c751c001357d23fe10e7a42490e3b76434dfa18 | /include/extractor/IQ.py | 72b94176effbdf1a6e0fadb202e23afc747bf877 | []
| no_license | pie-crust/etl | 995925199a71b299544bfac1ed8f504f16fbadc2 | 14b19b542eaa69b8679ce7df4d9a5d2720b3c5c7 | refs/heads/master | 2022-12-12T18:40:31.866907 | 2019-10-14T15:46:16 | 2019-10-14T15:46:16 | 215,082,544 | 0 | 0 | null | 2022-12-08T05:22:54 | 2019-10-14T15:43:04 | Python | UTF-8 | Python | false | false | 11,380 | py |
(cli, conn_pool)=app_init
import os, sys, time
import pyodbc
import collections
from subprocess import Popen, PIPE
from pprint import pprint as pp
e=sys.exit
import logging
DB_READER_DATA_DIR = 'dump'
from include.utils import ctimeit, csource, api
try:
import __builtin__ as builtins
except:
import builtins
builtins.app_init=app_init
from include.Db import Db
log=logging.getLogger('cli')
try:
import cStringIO
except ImportError:
import io as cStringIO
import locale
myLocale=locale.setlocale(category=locale.LC_ALL, locale="en_US.UTF-8");
import datetime
from include.extractor.common.StreamSlicer import StreamSlicer
from include.extractor.common.FileStreamer import FileStreamer
from include.extractor.common.DbStreamer import DbStreamer
from include.extractor.common.Extractor import Extractor
from include.utils import InOut
from include.fmt import pfmtd
class IQ(Db, Extractor):
#@csource
def __init__(self, **kwargs):
Db.__init__(self, **kwargs)
#self.apx= self.cli.apx
#self.conn=self.get_connect()
def desc_cur0(self, cur, colord=True):
rows ={}
for col in cur.description:
rows[col[0]]=[col[0], str(col[1]).split("'")[1] , col[3]]
out=[]
for k in sorted(rows.keys()) if colord else rows.keys():
row=rows[k]
d = collections.OrderedDict()
for i in zip(['Column', 'Type', 'Length'], row):
x,y = i
d[x]=y
out.append(d)
pfmtd(out, 'Procedure')
def desc_cur(self, cur, colord=True):
rows ={}
for col in cur.description:
rows[col[0]]=[col[0], str(col[1]).split("'")[1] , col[3]]
out=[]
for k in sorted(rows.keys()) if colord else rows.keys():
row=rows[k]
d = collections.OrderedDict()
for i in zip(['Column', 'Type', 'Length'], row):
x,y = i
d[x]=y
out.append(d)
pfmtd(out, 'Procedure')
def setKeytabCache(self, *args, **kwargs):
pass
def parse_conn_str(self,connStr):
self.connStr=connStr.format(DB_READ_SERVER,DB_READ_USER,DB_READ_PWD)
@api
@ctimeit
def fetch_stream(self, chunk_size, source , qname, out, skip_header):
assert chunk_size
chunk_size=self.cli.lame_duck if self.cli.lame_duck and chunk_size>self.cli.lame_duck else chunk_size
assert chunk_size
tf = "%Y-%m-%d.%H_%M_%S"
current_ts = time.strftime(tf)
id=0
cur=InOut()
self.open_stream(source, qname, out=cur)
#e()
return None
@api
@ctimeit
def fetch_many0(self, chunk_size, source , qname, out, skip_header):
self.chunk_size=chunk_size
tf = "%Y-%m-%d.%H_%M_%S"
current_ts = time.strftime(tf)
self.id=0
stmt=self.get_query(source, qname)
if not hasattr(self,'cur'):
self.cur=cur=self.conn.cursor()
cur=self.cur
cur.execute(stmt)
self.total_read=0
if skip_header:
cur.fetchone()
apx=self.apx
while True:
out.data=[]
if self.cli.lame_duck and self.cli.lame_duck<self.total_read: break
if self.cli.lame_duck and self.cli.lame_duck-self.total_read <chunk_size: chunk_size=self.cli.lame_duck-self.total_read
rows = cur.fetchmany(chunk_size)
self.total_read +=len(rows)
data=[]
for row in rows:
d=[]
for x in row:
if x==None: d.append(b''); continue;
if isinstance(x, datetime.date) or isinstance(x, datetime.datetime): d.append(str(x).encode('utf-8')); continue;
if isinstance(x, int) or isinstance(x, float) : d.append(repr(x)); continue;
if sys.version_info[0] <3:
d.append(x)
else:
d.append(x.encode())
if apx:
data.append('^'.join(d)+'^'+apx+os.linesep)
else:
data.append('^'.join(d)+os.linesep)
out.data=data
out.chunk_id, out.current_ts, out.actor = self.id, current_ts, self.cln
if not data: break
return out
@api
@ctimeit
def fetch_next(self, out):
self.id +=1
chunk_size=self.chunk_size
apx=self.apx
while True:
out.data=[]
if self.cli.lame_duck and self.cli.lame_duck<self.total_read: break
if self.cli.lame_duck and self.cli.lame_duck-self.total_read <chunk_size: chunk_size=self.cli.lame_duck-self.total_read
rows = self.cur.fetchmany(chunk_size)
self.total_read +=len(rows)
data=[]
for row in rows:
d=[]
for x in row:
if x==None: d.append(b''); continue;
if isinstance(x, datetime.date) or isinstance(x, datetime.datetime): d.append(str(x).encode('utf-8')); continue;
if isinstance(x, int) or isinstance(x, float) : d.append(repr(x)); continue;
if sys.version_info[0] <3:
d.append(x)
else:
d.append(x.encode())
if apx:
data.append('^'.join(d)+'^'+apx+os.linesep)
else:
data.append('^'.join(d)+os.linesep)
out.data=data
out.chunk_id = self.id
if not data: break
return out
@api
@ctimeit
def open_query_stream(self, dbcfg, qname, out):
global actors
cli=self.cli
#Out = collections.namedtuple('Out','pipe actor col_map')
if 1:
cur= self.conn.cursor()
start_time = time.time()
if 1:
stmt=self.get_query(dbcfg,qname)
cur.execute(stmt)
if 0:
from_cols={}
for id,column in enumerate(cur.description):
from_cols[id]=str(column[0]).strip().upper()
#print from_cols
pipe=DbStreamer(self.cli,cur=cur, start_time=start_time)
col_map={}
if 1:
with StreamSlicer(cli, pipe, self.apx, max_rows_to_read=self.cli.max_rows_to_read, col_map=col_map) as pipe:
out.pipe, out.actor, out.col_map= pipe, self.cln, col_map
return out
@api
@ctimeit
def open_query_cur(self, dbcfg, qname, out):
global actors
cli=self.cli
#Out = collections.namedtuple('Out','pipe actor col_map')
if 1:
cur= self.conn.cursor()
start_time = time.time()
if 1:
stmt=self.get_query(dbcfg,qname)
cur.execute(stmt)
if 0:
from_cols={}
for id,column in enumerate(cur.description):
from_cols[id]=str(column[0]).strip().upper()
#print from_cols
pipe=DbStreamer(self.cli,cur=cur, start_time=start_time)
out.pipe=pipe
@api
@ctimeit
def open_stream(self,dbcfg, qname, out):
global actors
cli=self.cli
alt_cols={}
from_cols={}
for id, col in enumerate(cli.scfg["columnMappings"]):
from_cols[int(id)]=col['columnName'].upper().encode()
if col.get('altColName'):
alt_cols[int(id)]= col['columnName'].upper().encode()
assert hasattr(self,'loader'), 'You must call "set_loader" first'
if self.loader.cln not in ['Dir']:
to_cols= self.loader.get_columns()
assert to_cols
#pp(to_cols)
#e()
assert len(from_cols) == len(to_cols), 'Config vs Target column count mismatch (%d != %d)' % (len(from_cols),len(to_cols))
miss=0
for id, col in from_cols.items():
#print (col, to_cols.keys())
assert col in to_cols, 'Config column "%s" does not exists in Target table "%s"' % (col, cli.tcfg['targetTable'])
if not int(id)==int(to_cols[col]):
log.error ('Config column "%s" order is wrong (Config# %d != Target# %d)' % (col, id, to_cols[col]))
miss +=1
assert miss == 0
else:
to_cols= {}
col_map=None
#Out = collections.namedtuple('Out','pipe actor col_map')
cli=self.cli
apx=self.apx
mock_file=cli.mf
if not self.conn:
self.begin_transaction ( env =cli.scfg['sourceDb'] , out = InOut() )
assert self.conn
stmt=self.get_query(dbcfg, qname)
#pp(stmt)
assert stmt
from collections import OrderedDict
from_cols=OrderedDict()
if 1:
if mock_file:
log.info('%s: Using mock file: %s' % (self.cln,mock_file))
assert os.path.isfile(mock_file)
import codecs
mfh = codecs.open(mock_file, encoding='latin-1')
#mfh=open(mock_file,'rb')
if 1:
header=mfh.readline().strip().split(str(self.cli.csep.decode()))
for id,column in enumerate(header):
from_cols[id]=column.encode().upper()
to_cols[to_cols] = id
#to_cols=from_cols
#pp(from_cols)
#e()
col_map=self.get_col_map(from_cols, to_cols)
pipe=FileStreamer(self.cli,fh=mfh)
else:
pyodbc.pooling = False
cur= self.conn.cursor()
start_time = time.time()
if 1:
if 1:
log.debug(stmt)
cur.execute(stmt)
for id,column in enumerate(cur.description):
from_cols[id]=column[0].upper().encode()
if self.loader.cln in ['Dir']:
if id in alt_cols:
cname= alt_cols[id]
else:
cname=column[0].upper().encode()
to_cols[cname] = id
col_map=self.get_col_map(from_cols,to_cols)
pipe=DbStreamer(self.cli,cur=cur, start_time=start_time)
with StreamSlicer(cli, pipe, apx, max_rows_to_read=self.cli.max_rows_to_read, col_map=col_map, stmt=stmt) as pipe:
out.pipe, out.actor, out.col_map = pipe, self.cln,col_map
return out
@api
@ctimeit
def dump_stream(self, _in):
data=_in.pipe.read()
#print(len(data))
def get_col_map(self, from_cols, to_cols):
col_map={}
conf_cols={}
alt_cols={}
pcnt=0
for id, col in enumerate(self.cli.scfg["columnMappings"]):
if col['value'].upper() not in [u'Map'.upper()]:
pcnt +=1
conf_cols[int(id)]=col['columnName'].upper().encode()
if col.get('altColName'):
alt_cols[int(id)]=col.get('altColName').upper().encode()
assert len(conf_cols) - pcnt == len(from_cols), 'Source vs Config column count mismatch (%d != %d). (%d are params)\n Are you sure you have header in your MOCK file?' % (len(from_cols), len(conf_cols), pcnt)
if 1:
miss=0
for id, col in from_cols.items():
if col not in conf_cols.values():
if col not in alt_cols.values():
#print id, col, col in alt_cols.values()
log.info ('Column "%s" is NOT in config' % (col,))
miss +=1
else:
log.info ('Column "%s" is IN ALT config [%s]' % (col,conf_cols[id]))
col_map[to_cols[conf_cols[id]]]=id
else:
#print 'Column "%s" is IN config' % (col,)
col_map[to_cols[col]]=id
assert miss==0, '[%d] Source columns are not in Config.' % miss
sep=str(self.cli.csep.decode())
apx_len=len(self.apx.split(sep))
if apx_len:
log.debug('Increase colmap by apx len [%d]' % apx_len)
map_len=len(col_map)
for x in range(map_len,map_len+apx_len):
col_map[x]=x
#pp(col_map)
else:
print ('APx is empty [%d]' % apx_len)
print(len(col_map), apx_len, map_len, self.apx)
#e()
return col_map
@api
@ctimeit
def insert_data(self, trans , target , source, stmt, skip_header=0):
pipe=source.pipe
skip=str(skip_header).strip()
if skip_header is not None:
skip=str(skip_header).strip()
assert str(skip).strip() in ['0','1'], 'skip_header [%s] should be "0" or "1"' % str(skip).strip()
if str(skip) == '1':
pipe.readline()
assert pipe
start_time = time.time()
sql = self.get_query(target,stmt)
cur= self.conn.cursor()
line=pipe.readline()
rows=[]
#pp(line)
while line:
rows.append([line[x] for x in sorted(line.keys())] +[self.cli.pa[1], self.cli.asod])
line=pipe.readline()
chunk=300
total=0
cid=0
while total<len(rows):
cur.fast_executemany = True
data = rows[total:][:chunk]
cur.executemany(sql, data )
ins=len(data)
total +=ins
cid +=1
log.info('[{}] [{}] {}: Running: {:,.0f}, Rows: {:,.0f}'.format (self.objtype, cid, self.cln, total, ins))
log.info('[{}]: {}: Inserted: {:,.0f}, To-Schema:{}, To-Table:{}, Skipped: {}, Elapsed: {}'.format (self.objtype, self.cln, len(rows), target['targetSchema'], target["targetTable"] , skip, round((time.time() - start_time),2)))
pipe.close()
| [
"[email protected]"
]
| |
ec9155714dc595fe99a631eee20e6a23e915fb67 | ef821468b081ef2a0b81bf08596a2c81e1c1ef1a | /Python OOP/Decorators-LAB/Vowel_Filter.py | 02cd267e9c2e4dadb5ddf5db5dd6fce9504be66b | []
| no_license | Ivaylo-Atanasov93/The-Learning-Process | 71db22cd79f6d961b9852f140f4285ef7820dd80 | 354844e2c686335345f6a54b3af86b78541ed3f3 | refs/heads/master | 2023-03-30T20:59:34.304207 | 2021-03-29T15:23:05 | 2021-03-29T15:23:05 | 294,181,544 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 324 | py | def vowel_filter(function):
def wrapper():
vowels = ['a', 'o', 'u', 'e', 'i', 'y']
result = function()
result = [letter for letter in result if letter in vowels]
return result
return wrapper
@vowel_filter
def get_letters():
return ["a", "b", "c", "d", "e"]
print(get_letters())
| [
"[email protected]"
]
| |
a0f94082909743fec98edbe78c3ed3b4b1dcec26 | 37fe0d74375527f4aaf86857e17b96b675837205 | /aid1805/MongoDB/grid.py | 9f413334eb1c346fa4c34264e5f642ac4adafd87 | []
| no_license | wangleiliugang/data | af8255eb76affa55424979c809c6168a7f3995ea | 375a58b454be38ffa156876a7770f8d6f4345aba | refs/heads/master | 2023-06-05T13:21:43.630854 | 2021-06-21T09:04:39 | 2021-06-21T09:04:39 | 378,862,494 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 831 | py | # 获取数据库中gridfs文件
from pymongo import MongoClient
# 和pymongo模块是绑定在一起的
import gridfs
# 1.创建mongo的连接对象
conn = MongoClient('localhost', 27017)
# 2.数据库mygrid不存在则自动创建
db = conn.mygrid
# 3.获取gridfs对象
fs = gridfs.GridFS(db)
# 4.得到迭代对象
files = fs.find()
# print(files)
# print(files.count())
# files为可迭代对象,每个迭代值代表一个存入文件的对象,通过对象的属性可以获取文件信息
# for file in files:
# print(file.filename)
for file in files:
with open(file.filename, 'wb') as f:
while True:
# file对象有read接口,可以直接从数据库读取内容
data = file.read(2048)
if not data:
break
f.write(data)
conn.close()
| [
"[email protected]"
]
| |
ac4e06fa4a67e9af197caba2daa92a5f5e08fb37 | 5edd3d54b9fb7ef685d7760e03391307374dee73 | /web_flask/100-hbnb.py | de39873740c50d7b60a5c758d756af7a171a7bcf | []
| no_license | PierreBeaujuge/AirBnB_clone_v2 | 3df331aea025f8b216a705bd66bd5203a3b34ec9 | 910d04c08a5f833cd71754a62e74e3b81c601ba2 | refs/heads/master | 2020-11-23T21:30:55.362761 | 2020-10-08T07:37:22 | 2020-10-08T07:37:22 | 227,829,165 | 0 | 3 | null | 2019-12-20T09:10:49 | 2019-12-13T11:52:51 | Python | UTF-8 | Python | false | false | 767 | py | #!/usr/bin/python3
"""
Script that starts a Flask web application
"""
from flask import Flask
from flask import render_template
from models import storage
app = Flask(__name__)
@app.route('/hbnb', strict_slashes=False)
def hbnb():
"""view function that displays [...]"""
all_states = storage.all("State").values()
all_amenities = storage.all("Amenity").values()
all_places = storage.all("Place").values()
return render_template('100-hbnb.html', all_states=all_states,
all_amenities=all_amenities, all_places=all_places)
@app.teardown_appcontext
def teardown(self):
"""function that removes the current SQLAlchemy Session"""
storage.close()
if __name__ == "__main__":
app.run(host='0.0.0.0', port=5000)
| [
"[email protected]"
]
| |
0d0195538784ac551b4ef042046c3d82a141aaf8 | 2775a8306052e727b9a602c7906e64ee44cb4d80 | /dictionaria/scripts/initializedb.py | 6c46022df940c053822222cf7ed1fffad039bcb9 | [
"Apache-2.0"
]
| permissive | pombredanne/dictionaria | f40b45adb93b0733d1c047c338e15e834a2aa6b3 | 9668129e9b856fc5e8e78e15dacb1037621cbeb6 | refs/heads/master | 2021-01-14T14:16:39.221190 | 2015-11-25T22:22:06 | 2015-11-25T22:22:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,846 | py | from __future__ import unicode_literals
from datetime import date
import transaction
from nameparser import HumanName
from sqlalchemy.orm import joinedload_all, joinedload
from clldutils.misc import slug
from clld.util import LGR_ABBRS
from clld.scripts.util import Data, initializedb
from clld.db.meta import DBSession
from clld.db.models import common
from clldclient.concepticon import Concepticon
from clld_glottologfamily_plugin.util import load_families
import dictionaria
from dictionaria.models import ComparisonMeaning, Dictionary, Word, Variety
from dictionaria.lib.submission import REPOS, Submission
def main(args):
data = Data()
dataset = common.Dataset(
id=dictionaria.__name__,
name="Dictionaria",
description="The Dictionary Journal",
published=date(2015, 10, 1),
contact='[email protected]',
domain='dictionaria.clld.org',
license="http://creativecommons.org/licenses/by/4.0/",
jsondata={
'license_icon': 'cc-by.png',
'license_name': 'Creative Commons Attribution 4.0 International License'})
ed = data.add(
common.Contributor, 'hartmanniren', id='hartmanniren', name='Iren Hartmann')
common.Editor(dataset=dataset, contributor=ed)
DBSession.add(dataset)
for id_, name in LGR_ABBRS.items():
DBSession.add(common.GlossAbbreviation(id=id_, name=name))
comparison_meanings = {}
comparison_meanings_alt_labels = {}
print('loading concepts ...')
concepticon = Concepticon()
for i, concept_set in enumerate(concepticon.resources('parameter').members):
concept_set = concepticon.resource(concept_set)
cm = ComparisonMeaning(
id=concept_set.id,
name=concept_set.name.lower(),
description=concept_set.description,
concepticon_url='%s' % concept_set.uriref)
DBSession.add(cm)
comparison_meanings[cm.name] = cm
for label in concept_set.alt_labels:
comparison_meanings_alt_labels.setdefault(label.lower(), cm)
DBSession.flush()
print('... done')
comparison_meanings = {k: v.pk for k, v in comparison_meanings.items()}
comparison_meanings_alt_labels = {
k: v.pk for k, v in comparison_meanings_alt_labels.items()}
submissions = []
for submission in REPOS.joinpath('submissions').glob('*'):
if not submission.is_dir():
continue
try:
submission = Submission(submission)
except ValueError:
continue
md = submission.md
id_ = submission.id
lmd = md['language']
language = data['Variety'].get(lmd['glottocode'])
if not language:
language = data.add(
Variety, lmd['glottocode'], id=lmd['glottocode'], name=lmd['name'])
dictionary = data.add(
Dictionary,
id_,
id=id_,
name=lmd['name'] + ' Dictionary',
language=language,
published=date(*map(int, md['published'].split('-'))))
for i, cname in enumerate(md['authors']):
name = HumanName(cname)
cid = slug('%s%s' % (name.last, name.first))
contrib = data['Contributor'].get(cid)
if not contrib:
contrib = data.add(common.Contributor, cid, id=cid, name=cname)
DBSession.add(common.ContributionContributor(
ord=i + 1,
primary=True,
contributor=contrib,
contribution=dictionary))
submissions.append((dictionary.id, language.id, submission))
transaction.commit()
for did, lid, submission in submissions:
try:
mod = __import__(
'dictionaria.loader.' + submission.id, fromlist=['MARKER_MAP'])
marker_map = mod.MARKER_MAP
except ImportError:
marker_map = {}
transaction.begin()
print('loading %s ...' % submission.id)
submission.load(
did,
lid,
comparison_meanings,
comparison_meanings_alt_labels,
marker_map)
transaction.commit()
print('... done')
#('hoocak', 'Hooca\u0328k', 43.5, -88.5, [('hartmanniren', 'Iren Hartmann')]),
#('yakkha', 'Yakkha', 27.37, 87.93, [('schackowdiana', 'Diana Schackow')]),
#('palula', 'Palula', 35.51, 71.84, [('liljegrenhenrik', 'Henrik Liljegren')], {}),
#('daakaka', 'Daakaka', -16.27, 168.01, [('vonprincekilu', 'Kilu von Prince')],
# {'published': date(2015, 9, 30), 'iso': 'bpa', 'glottocode': 'daka1243'}),
#('teop', 'Teop', -5.67, 154.97, [('moselulrike', 'Ulrike Mosel')],
# {'published': date(2015, 9, 30), 'iso': 'tio', 'glottocode': 'teop1238', 'encoding': 'latin1'}),
transaction.begin()
load_families(Data(), DBSession.query(Variety))
def prime_cache(cfg):
"""If data needs to be denormalized for lookup, do that here.
This procedure should be separate from the db initialization, because
it will have to be run periodiucally whenever data has been updated.
"""
for meaning in DBSession.query(ComparisonMeaning).options(
joinedload_all(common.Parameter.valuesets, common.ValueSet.values)
):
meaning.representation = sum([len(vs.values) for vs in meaning.valuesets])
if meaning.representation == 0:
meaning.active = False
for word in DBSession.query(Word).options(joinedload(Word.meanings)):
word.description = ' / '.join(m.name for m in word.meanings if m.language == 'en')
for d in DBSession.query(Dictionary).options(joinedload(Dictionary.words)):
d.count_words = len(d.words)
if __name__ == '__main__':
initializedb(create=main, prime_cache=prime_cache)
| [
"[email protected]"
]
| |
cb325b725cf13cd0684e1a897ad69aa6f2113cf7 | f84624d2f04c730e411e265e0a2fd97b6cfe6107 | /anomaly_detection/CIFAR/run_cifar_dec.py | 01915d1b0e3b643c119627d251a7659982825755 | [
"Apache-2.0"
]
| permissive | thu-spmi/Inclusive-NRF | 1326cac36140f71bc05f4f71cd35a6024a97b394 | e4e6ae6edca8f8d11a51f649609a8f7675d22f99 | refs/heads/main | 2023-01-20T01:22:34.444134 | 2020-11-21T13:03:02 | 2020-11-21T13:03:02 | 314,812,840 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,386 | py | import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', type=int, default=64)
parser.add_argument('--lrd', type=float, default=1e-3)
parser.add_argument('--lrg', type=float, default=1e-3)
parser.add_argument('--loss', type=str, default='hinge')
parser.add_argument('--gpu', default='0' ,type=str)
parser.add_argument('--opt', type=str, default='rms')
parser.add_argument('--gw', default=1.0 ,type=float)
parser.add_argument('--L', default=10 ,type=int)
parser.add_argument('--fxp', default=0.1 ,type=float)
parser.add_argument('--del_we', default=1 ,type=float)
parser.add_argument('--max_e', default=100 ,type=int)
parser.add_argument('--alpha', default=0 ,type=float)
parser.add_argument('--eta', default=0.03,type=float)
parser.add_argument('--sf', type=str, default='')
parser.add_argument('--load', type=str, default='')
parser.add_argument('--cof', default=0,type=float)
parser.add_argument('--sig', default=0,type=float)
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--method', type=int, default=1)
parser.add_argument('--no', type=str, default='0') #表示训练集只使用某个标签
args = parser.parse_args()
print(args)
if __name__ == '__main__':
import pickle
import numpy as np
import os, sys
import cifar_dec
if not os.path.exists('cifar_result'):
os.mkdir('cifar_result')
if args.method==1:
# 对给定的一个标签与seed进行实验
args.no=int(args.no)
best_score=cifar_dec.main(args.no,args.seed,args)
print("num:",args.no,"seed:",args.seed,"best:",best_score)
sys.stdout.flush()
elif args.method==2:
#对给定的一组标签以及seed 1-10进行实验
if os.path.exists('cifar_result/cifar_nrf_dec_%s'%args.sf):
b_s=pickle.load(open('cifar_result/cifar_nrf_dec_%s'%args.sf,'rb' ))
else:b_s=np.zeros((10,10))
num_all=[int(num) for num in args.no.split(',')]
for num in num_all:
for seed in range(1,11):
best_score=cifar_dec.main(num,seed,args)
print("num:",num,"seed:",seed,"best:",best_score)
sys.stdout.flush()
b_s[num,seed-1]=best_score
print(b_s)
sys.stdout.flush()
pickle.dump(b_s,open('cifar_result/cifar_nrf_dec_%s'%args.sf,'wb' ))
print(np.mean(b_s,1))
| [
"[email protected]"
]
| |
093dc3c5b815c2667c195e70c4bc2fd0a494f163 | fb909b0716f62ae118afa7d505cbcbd28f62bc63 | /main/migrations/0077_auto_20201010_0437.py | 8b774df3242d4e5b429aa27ceafc4c8b4dc7bc60 | []
| no_license | dkalola/JustAsk-Final | a5b951462cd3c88eb84320bb8fcf10c32f959090 | c2e7c2ffae4d3c2d870d5ba5348a6bae62db5319 | refs/heads/main | 2023-05-24T16:02:17.425251 | 2021-06-16T19:33:52 | 2021-06-16T19:33:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,885 | py | # Generated by Django 3.1.1 on 2020-10-10 04:37
import datetime
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('main', '0076_auto_20201010_0435'),
]
operations = [
migrations.AlterField(
model_name='buybook',
name='EndDate',
field=models.DateTimeField(blank=True, default=datetime.datetime(2020, 10, 10, 4, 37, 18, 340261), null=True, verbose_name='End Date of Rental book'),
),
migrations.AlterField(
model_name='buybook',
name='StartDate',
field=models.DateTimeField(blank=True, default=datetime.datetime(2020, 10, 10, 4, 37, 18, 340234), null=True, verbose_name='Start Date Rental book'),
),
migrations.AlterField(
model_name='paper',
name='Date',
field=models.DateTimeField(blank=True, default=datetime.datetime(2020, 10, 10, 4, 37, 18, 339646), null=True, verbose_name='Date Of Paper'),
),
migrations.AlterField(
model_name='question',
name='qid',
field=models.CharField(default='NEPGMHEF', max_length=8, unique=True, verbose_name='Question ID'),
),
migrations.AlterField(
model_name='student',
name='EndDate',
field=models.DateTimeField(blank=True, default=datetime.datetime(2020, 10, 10, 4, 37, 18, 330236), null=True, verbose_name='End Date of Subscription'),
),
migrations.AlterField(
model_name='student',
name='StartDate',
field=models.DateTimeField(blank=True, default=datetime.datetime(2020, 10, 10, 4, 37, 18, 330199), null=True, verbose_name='Start Date of Subscription'),
),
]
| [
"[email protected]"
]
| |
5f24e7bc76f14caf10ab3af146a0abf668fc22b7 | 557a7a2b3e6eb759158e6c135b3abf853ab80fe9 | /utils/fairseq_mod/fairseq_mod/modules/dynamic_convolution.py | dd2fff8fa571869501a36a39e9dbcddfcc2ed02e | [
"MIT"
]
| permissive | fadelmuli/Knowledge-Distillation-Toolkit | 7f23212956773c2015f3a1d220c3b17abee3d17b | e6d6776bb66ff6b16175e707ff3eb19e5ba1c8c0 | refs/heads/main | 2023-08-17T01:46:14.127765 | 2021-09-21T09:20:10 | 2021-09-21T09:20:10 | 408,751,563 | 0 | 0 | MIT | 2021-09-21T09:00:15 | 2021-09-21T09:00:14 | null | UTF-8 | Python | false | false | 11,073 | py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
from fairseq_mod import utils
from .unfold import unfold1d
from fairseq_mod.incremental_decoding_utils import with_incremental_state
from fairseq_mod.modules.fairseq_dropout import FairseqDropout
def DynamicConv(input_size, kernel_size=1, padding_l=None, num_heads=1,
weight_dropout=0., weight_softmax=False,
renorm_padding=False, bias=False, conv_bias=False,
query_size=None, in_proj=False):
if torch.cuda.is_available():
try:
from fairseq_mod.modules.dynamicconv_layer import DynamicconvLayer
return DynamicconvLayer(input_size, kernel_size=kernel_size,
padding_l=padding_l, num_heads=num_heads,
weight_dropout=weight_dropout,
weight_softmax=weight_softmax, bias=bias)
except ImportError as e:
print(e)
return DynamicConv1dTBC(input_size, kernel_size=kernel_size,
padding_l=padding_l, num_heads=num_heads,
weight_dropout=weight_dropout,
weight_softmax=weight_softmax, bias=bias)
def Linear(in_features, out_features, bias=True):
m = nn.Linear(in_features, out_features, bias)
nn.init.xavier_uniform_(m.weight)
if bias:
nn.init.constant_(m.bias, 0.)
return m
@with_incremental_state
class DynamicConv1dTBC(nn.Module):
'''Dynamic lightweight convolution taking T x B x C inputs
Args:
input_size: # of channels of the input
kernel_size: convolution channels
padding_l: padding to the left when using "same" padding
num_heads: number of heads used. The weight is of shape (num_heads, 1, kernel_size)
weight_dropout: the drop rate of the DropConnect to drop the weight
weight_softmax: normalize the weight with softmax before the convolution
renorm_padding: re-normalize the filters to ignore the padded part (only the non-padding parts sum up to 1)
bias: use bias
conv_bias: bias of the convolution
query_size: specified when feeding a different input as the query
in_proj: project the input and generate the filter together
Shape:
Input: TxBxC, i.e. (timesteps, batch_size, input_size)
Output: TxBxC, i.e. (timesteps, batch_size, input_size)
Attributes:
weight: the learnable weights of the module of shape
`(num_heads, 1, kernel_size)`
bias: the learnable bias of the module of shape `(input_size)`
'''
def __init__(self, input_size, kernel_size=1, padding_l=None, num_heads=1,
weight_dropout=0., weight_softmax=False,
renorm_padding=False, bias=False, conv_bias=False,
query_size=None, in_proj=False):
super().__init__()
self.input_size = input_size
self.query_size = input_size if query_size is None else query_size
self.kernel_size = kernel_size
self.padding_l = padding_l
self.num_heads = num_heads
self.weight_dropout_module = FairseqDropout(weight_dropout, module_name=self.__class__.__name__)
self.weight_softmax = weight_softmax
self.renorm_padding = renorm_padding
if in_proj:
self.weight_linear = Linear(self.input_size, self.input_size + num_heads * kernel_size * 1)
else:
self.weight_linear = Linear(self.query_size, num_heads * kernel_size * 1, bias=bias)
if conv_bias:
self.conv_bias = nn.Parameter(torch.Tensor(input_size))
else:
self.conv_bias = None
self.reset_parameters()
@property
def in_proj(self):
return self.weight_linear.out_features == self.input_size + self.num_heads * self.kernel_size
def reset_parameters(self):
self.weight_linear.reset_parameters()
if self.conv_bias is not None:
nn.init.constant_(self.conv_bias, 0.)
def forward(self, x, incremental_state=None, query=None, unfold=None):
'''Assuming the input, x, of the shape T x B x C and producing an output in the shape T x B x C
args:
x: Input of shape T x B x C, i.e. (timesteps, batch_size, input_size)
incremental_state: A dict to keep the state
unfold: unfold the input or not. If not, we use the matrix trick instead
query: use the specified query to predict the conv filters
'''
unfold = x.size(0) > 512 if unfold is None else unfold # use unfold mode as default for long sequence to save memory
unfold = unfold or (incremental_state is not None)
assert query is None or not self.in_proj
if query is None:
query = x
if unfold:
output = self._forward_unfolded(x, incremental_state, query)
else:
output = self._forward_expanded(x, incremental_state, query)
if self.conv_bias is not None:
output = output + self.conv_bias.view(1, 1, -1)
return output
def _forward_unfolded(self, x, incremental_state, query):
'''The conventional implementation of convolutions.
Unfolding the input by having a window shifting to the right.'''
T, B, C = x.size()
K, H = self.kernel_size, self.num_heads
R = C // H
assert R * H == C == self.input_size
if self.in_proj:
proj = self.weight_linear(x)
x = proj.narrow(2, 0, self.input_size).contiguous()
weight = proj.narrow(2, self.input_size, H*K).contiguous().view(T*B*H, -1)
else:
weight = self.weight_linear(query).view(T*B*H, -1)
# renorm_padding is only implemented in _forward_expanded
assert not self.renorm_padding or incremental_state is not None
if incremental_state is not None:
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is None:
input_buffer = x.new()
x_unfold = torch.cat([input_buffer, x.unsqueeze(3)], dim=3)
if self.kernel_size > 1:
self._set_input_buffer(incremental_state, x_unfold[:, :, :, -self.kernel_size+1:])
x_unfold = x_unfold.view(T*B*H, R, -1)
else:
padding_l = self.padding_l
if K > T and padding_l == K-1:
weight = weight.narrow(1, K-T, T)
K, padding_l = T, T-1
# unfold the input: T x B x C --> T' x B x C x K
x_unfold = unfold1d(x, K, padding_l, 0)
x_unfold = x_unfold.view(T*B*H, R, K)
if self.weight_softmax and not self.renorm_padding:
weight = F.softmax(weight, dim=1)
weight = weight.narrow(1, 0, K)
if incremental_state is not None:
weight = weight[:, -x_unfold.size(2):]
K = weight.size(1)
if self.weight_softmax and self.renorm_padding:
weight = F.softmax(weight, dim=1)
weight = self.weight_dropout_module(weight, inplace=False)
output = torch.bmm(x_unfold, weight.unsqueeze(2)) # T*B*H x R x 1
output = output.view(T, B, C)
return output
def _forward_expanded(self, x, incremental_stat, query):
'''Turn the convolution filters into band matrices and do matrix multiplication.
This is faster when the sequence is short, but less memory efficient.
This is not used in the decoder during inference.
'''
T, B, C = x.size()
K, H = self.kernel_size, self.num_heads
R = C // H
assert R * H == C == self.input_size
if self.in_proj:
proj = self.weight_linear(x)
x = proj.narrow(2, 0, self.input_size).contiguous()
weight = proj.narrow(2, self.input_size, H*K).contiguous().view(T*B*H, -1)
else:
weight = self.weight_linear(query).view(T*B*H, -1)
if not self.renorm_padding:
if self.weight_softmax:
weight = F.softmax(weight, dim=1)
weight = self.weight_dropout_module(weight, inplace=False)
weight = weight.narrow(1, 0, K).contiguous()
weight = weight.view(T, B*H, K).transpose(0, 1)
x = x.view(T, B*H, R).transpose(0, 1)
if self.weight_softmax and self.renorm_padding:
# turn the convolution filters into band matrices
weight_expanded = weight.new(B*H, T, T+K-1).fill_(float('-inf'))
weight_expanded.as_strided((B*H, T, K), (T*(T+K-1), T+K, 1)).copy_(weight)
weight_expanded = weight_expanded.narrow(2, self.padding_l, T)
# normalize the weight over valid positions like self-attention
weight_expanded = F.softmax(weight_expanded, dim=2)
weight_expanded = self.weight_dropout_module(weight_expanded, inplace=False)
else:
P = self.padding_l
# For efficieny, we cut the kernel size and reduce the padding when the kernel is larger than the length
if K > T and P == K-1:
weight = weight.narrow(2, K-T, T)
K, P = T, T-1
# turn the convolution filters into band matrices
weight_expanded = weight.new_zeros(B*H, T, T+K-1, requires_grad=False)
weight_expanded.as_strided((B*H, T, K), (T*(T+K-1), T+K, 1)).copy_(weight)
weight_expanded = weight_expanded.narrow(2, P, T) # B*H x T x T
output = torch.bmm(weight_expanded, x)
output = output.transpose(0, 1).contiguous().view(T, B, C)
return output
def reorder_incremental_state(self, incremental_state, new_order):
input_buffer = self._get_input_buffer(incremental_state)
if input_buffer is not None:
input_buffer = input_buffer.index_select(1, new_order)
self._set_input_buffer(incremental_state, input_buffer)
def _get_input_buffer(self, incremental_state):
return utils.get_incremental_state(self, incremental_state, 'input_buffer')
def _set_input_buffer(self, incremental_state, new_buffer):
return utils.set_incremental_state(self, incremental_state, 'input_buffer', new_buffer)
def extra_repr(self):
s = '{}, kernel_size={}, padding_l={}, num_heads={}, weight_softmax={}, conv_bias={}, renorm_padding={}, in_proj={}'.format(
self.input_size, self.kernel_size, self.padding_l,
self.num_heads, self.weight_softmax, self.conv_bias is not None, self.renorm_padding,
self.in_proj,
)
if self.query_size != self.input_size:
s += ', query_size={}'.format(self.query_size)
if self.weight_dropout_module.p > 0.:
s += ', weight_dropout={}'.format(self.weight_dropout_module.p)
return s
| [
"[email protected]"
]
| |
b29b703f1530373509e91bc7c4ff79b5dd754d1a | 71e43068e82c91acbb3849169d1723f1375ac27f | /talon_one/models/feature_flag.py | a4e731d55e6fae5e31d2af2b4a4e26d311e5dfcb | [
"MIT"
]
| permissive | talon-one/talon_one.py | aa08a1dbddd8ea324846ae022e43d441c57028f6 | 917dffb010e3d3e2f841be9cccba5bba1ea6c5c3 | refs/heads/master | 2023-05-11T18:50:00.041890 | 2023-05-03T20:17:39 | 2023-05-03T20:17:39 | 79,575,913 | 1 | 7 | MIT | 2023-05-03T15:10:14 | 2017-01-20T16:29:46 | Python | UTF-8 | Python | false | false | 6,527 | py | # coding: utf-8
"""
Talon.One API
Use the Talon.One API to integrate with your application and to manage applications and campaigns: - Use the operations in the [Integration API section](#integration-api) are used to integrate with our platform - Use the operation in the [Management API section](#management-api) to manage applications and campaigns. ## Determining the base URL of the endpoints The API is available at the same hostname as your Campaign Manager deployment. For example, if you access the Campaign Manager at `https://yourbaseurl.talon.one/`, the URL for the [updateCustomerSessionV2](https://docs.talon.one/integration-api#operation/updateCustomerSessionV2) endpoint is `https://yourbaseurl.talon.one/v2/customer_sessions/{Id}` # noqa: E501
The version of the OpenAPI document:
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from talon_one.configuration import Configuration
class FeatureFlag(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'name': 'str',
'value': 'str',
'created': 'datetime',
'modified': 'datetime'
}
attribute_map = {
'name': 'name',
'value': 'value',
'created': 'created',
'modified': 'modified'
}
def __init__(self, name=None, value=None, created=None, modified=None, local_vars_configuration=None): # noqa: E501
"""FeatureFlag - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._name = None
self._value = None
self._created = None
self._modified = None
self.discriminator = None
self.name = name
self.value = value
if created is not None:
self.created = created
if modified is not None:
self.modified = modified
@property
def name(self):
"""Gets the name of this FeatureFlag. # noqa: E501
The name of the feature flag. # noqa: E501
:return: The name of this FeatureFlag. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this FeatureFlag.
The name of the feature flag. # noqa: E501
:param name: The name of this FeatureFlag. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def value(self):
"""Gets the value of this FeatureFlag. # noqa: E501
The value of the feature flag. # noqa: E501
:return: The value of this FeatureFlag. # noqa: E501
:rtype: str
"""
return self._value
@value.setter
def value(self, value):
"""Sets the value of this FeatureFlag.
The value of the feature flag. # noqa: E501
:param value: The value of this FeatureFlag. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and value is None: # noqa: E501
raise ValueError("Invalid value for `value`, must not be `None`") # noqa: E501
self._value = value
@property
def created(self):
"""Gets the created of this FeatureFlag. # noqa: E501
The time this entity was last created. # noqa: E501
:return: The created of this FeatureFlag. # noqa: E501
:rtype: datetime
"""
return self._created
@created.setter
def created(self, created):
"""Sets the created of this FeatureFlag.
The time this entity was last created. # noqa: E501
:param created: The created of this FeatureFlag. # noqa: E501
:type: datetime
"""
self._created = created
@property
def modified(self):
"""Gets the modified of this FeatureFlag. # noqa: E501
The time this entity was last modified. # noqa: E501
:return: The modified of this FeatureFlag. # noqa: E501
:rtype: datetime
"""
return self._modified
@modified.setter
def modified(self, modified):
"""Sets the modified of this FeatureFlag.
The time this entity was last modified. # noqa: E501
:param modified: The modified of this FeatureFlag. # noqa: E501
:type: datetime
"""
self._modified = modified
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, FeatureFlag):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, FeatureFlag):
return True
return self.to_dict() != other.to_dict()
| [
"[email protected]"
]
| |
278e1903f4c212e40feb622879c936eec4220cdb | 6ee1a53f2da154008a79df40a5a2cb9f77d36409 | /study/threading_learn.py | 8f168445e1d388a3755b4c721ff677314e04f653 | []
| no_license | zuoguagua/demo | 939f219c29cf0eae6c08c96cd578f1e566819243 | 504a551a5153848a9a173d9925132caea9806c25 | refs/heads/master | 2021-01-18T15:06:13.667246 | 2016-01-18T09:09:41 | 2016-01-18T09:09:41 | 44,574,876 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,393 | py | #!/usr/bin/env python
import logging
import Queue
import threading
def func_a(a,b):
return a+b
def func_b():
pass
def func_c(a,b,c):
return a,b,c
_task_queue = Queue.Queue()
def async_call(function,callback,*args,**kwargs):
_task_queue.put({
'function':function,
'callback':callback,
'args':args,
'kwargs':kwargs
})
def _task_queue_consumer():
while True:
try:
task = _task_queue.get()
function = task.get('function')
callback = task.get('callback')
args = task.get('args')
kwargs = task.get('kwargs')
try:
if callback:
callback(function(*args,**kwargs))
except Exception as ex:
if callback:
callback(ex)
finally:
_task_queue.task_done()
except Exception as ex:
logging.warning(ex)
def handle_result(result):
print(type(result),result)
if __name__ == "__main__":
t = threading.Thread(target=_task_queue_consumer)
t.daemon = True
t.start()
async_call(func_a,handle_result,1,2)
async_call(func_b,handle_result)
async_call(func_c,handle_result,1,2,3)
async_call(func_c,handle_result,1,2,3,4)
_task_queue.join()
| [
"[email protected]"
]
| |
c558f48d172a81752e398f010623632a3e38e65e | 3bdcb60b0bffeeb6ff7b0ddca4792b682158bb12 | /4.2.9-AnidamientoDeEstructuras.py | 8a1e89b2e21ae68af8c778aa5476bdd7cc891382 | []
| no_license | FrankCasanova/Python | 03c811801ec8ecd5ace66914f984a94f12befe06 | 03f15100991724a49437df3ce704837812173fc5 | refs/heads/master | 2023-05-23T01:37:12.632204 | 2021-06-10T15:20:38 | 2021-06-10T15:20:38 | 278,167,039 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 273 | py | # ejemplo
limit = int(input('Dame un número: '))
for num in range(2, limit):
primo = True
for divisor in range(2, num):
if num % divisor == 0:
primo = False
break
if primo:
print('el número {0} es primo'.format(num))
| [
"[email protected]"
]
| |
c7dcaec059d5297db4608373da3174aaf6d96ac2 | 7343194126b632ff5ac76fa3291de9ecf5b53e38 | /lib/carbon/tests/benchmark_routers.py | cc29ac29d0e725f8de9bab5434a58880bc3a0df7 | [
"Apache-2.0"
]
| permissive | zillow/carbon | a885f226347d66cebe8dda33573a1efbc44e3078 | 07244f98e8ddf305a0b2cc2da1bcc1a86b613ce6 | refs/heads/master | 2020-12-26T00:46:14.220907 | 2019-10-09T02:23:40 | 2019-10-09T02:23:40 | 46,576,478 | 0 | 0 | Apache-2.0 | 2019-10-09T02:23:41 | 2015-11-20T17:24:21 | Python | UTF-8 | Python | false | false | 2,476 | py | import os
import timeit
from carbon.routers import DatapointRouter
from test_routers import createSettings
REPLICATION_FACTORS = [1, 4]
DIVERSE_REPLICAS = [True, False]
N_DESTINATIONS = [1, 16, 32, 48]
def print_stats(r, t):
usec = t * 1e6
msec = usec / 1000
text = " %s %s datapoints: %d" % (r.plugin_name, r.__id, r.__count)
if usec < 1000:
text += " usecs: %d" % int(usec)
elif msec < 1000:
text += " msecs: %d" % int(msec)
else:
sec = msec / 1000
text += " secs: %3g" % sec
print text
def generateDestinations(n):
for i in xrange(n):
host_id = i % 10
instance_id = i
port = 2000 + i
yield ('carbon%d' % host_id, port, instance_id)
def benchmark(router_class):
for replication_factor in REPLICATION_FACTORS:
for diverse_replicas in DIVERSE_REPLICAS:
for n_destinations in N_DESTINATIONS:
destinations = list(generateDestinations(n_destinations))
settings = createSettings()
settings['REPLICATION_FACTOR'] = replication_factor
settings['DIVERSE_REPLICAS'] = diverse_replicas
settings['DESTINATIONS'] = destinations
router = router_class(settings)
router.__count = 0 # Ugly hack for timeit !
router.__id = (
' deplication_factor: %d' % replication_factor +
' diverse_replicas: %d' % diverse_replicas +
' n_destinations: %-5d' % n_destinations)
settings.DESTINATIONS = []
for destination in destinations:
router.addDestination(destination)
settings.DESTINATIONS.append(
'%s:%s:%s' % (
destination[0], destination[1], destination[2]))
benchmark_router(router)
def benchmark_router(router):
def router_getDestinations():
router.__count += 1
dst = list(router.getDestinations('foo.%d' % router.__count))
assert(len(dst) != 0)
n = 100000
t = timeit.timeit(router_getDestinations, number=n)
print_stats(router, t)
def main():
for router_class in DatapointRouter.plugins.values():
# Skip 'rules' because it's hard to mock.
if router_class.plugin_name == 'rules':
continue
benchmark(router_class)
if __name__ == '__main__':
main()
| [
"[email protected]"
]
| |
d57922de8ff26143079f9fc5a2f6d089beb37cdf | 4f4dc1e6235b068f3346a0df66740e216fd3a993 | /whoosh/support/unicode.py | 2e201929fcc49421f61dccf2a1ecd6b91781db05 | []
| no_license | intabeta/inta | 940dc94ecffde4b82dab87ffc5ca7bfcb1a391fa | 10d479630d8398dfbea34020584eeaaff14961fc | refs/heads/master | 2021-01-23T09:49:15.992045 | 2014-02-08T01:23:49 | 2014-02-08T01:23:49 | 9,431,398 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 27,136 | py | import re
from bisect import bisect_right
from whoosh.compat import text_type, u
# http://unicode.org/Public/UNIDATA/Blocks.txt
_blockdata = '''
# Blocks-5.1.0.txt
# Date: 2008-03-20, 17:41:00 PDT [KW]
#
# Unicode Character Database
# Copyright (c) 1991-2008 Unicode, Inc.
# For terms of use, see http://www.unicode.org/terms_of_use.html
# For documentation, see UCD.html
#
# Note: The casing of block names is not normative.
# For example, "Basic Latin" and "BASIC LATIN" are equivalent.
#
# Format:
# Start Code..End Code; Block Name
# ================================================
# Note: When comparing block names, casing, whitespace, hyphens,
# and underbars are ignored.
# For example, "Latin Extended-A" and "latin extended a" are equivalent
# For more information on the comparison of property values,
# see UCD.html.
#
# All code points not explicitly listed for Block
# have the value No_Block.
# Property: Block
#
# @missing: 0000..10FFFF; No_Block
0000..007F; Basic Latin
0080..00FF; Latin-1 Supplement
0100..017F; Latin Extended-A
0180..024F; Latin Extended-B
0250..02AF; IPA Extensions
02B0..02FF; Spacing Modifier Letters
0300..036F; Combining Diacritical Marks
0370..03FF; Greek and Coptic
0400..04FF; Cyrillic
0500..052F; Cyrillic Supplement
0530..058F; Armenian
0590..05FF; Hebrew
0600..06FF; Arabic
0700..074F; Syriac
0750..077F; Arabic Supplement
0780..07BF; Thaana
07C0..07FF; NKo
0900..097F; Devanagari
0980..09FF; Bengali
0A00..0A7F; Gurmukhi
0A80..0AFF; Gujarati
0B00..0B7F; Oriya
0B80..0BFF; Tamil
0C00..0C7F; Telugu
0C80..0CFF; Kannada
0D00..0D7F; Malayalam
0D80..0DFF; Sinhala
0E00..0E7F; Thai
0E80..0EFF; Lao
0F00..0FFF; Tibetan
1000..109F; Myanmar
10A0..10FF; Georgian
1100..11FF; Hangul Jamo
1200..137F; Ethiopic
1380..139F; Ethiopic Supplement
13A0..13FF; Cherokee
1400..167F; Unified Canadian Aboriginal Syllabics
1680..169F; Ogham
16A0..16FF; Runic
1700..171F; Tagalog
1720..173F; Hanunoo
1740..175F; Buhid
1760..177F; Tagbanwa
1780..17FF; Khmer
1800..18AF; Mongolian
1900..194F; Limbu
1950..197F; Tai Le
1980..19DF; New Tai Lue
19E0..19FF; Khmer Symbols
1A00..1A1F; Buginese
1B00..1B7F; Balinese
1B80..1BBF; Sundanese
1C00..1C4F; Lepcha
1C50..1C7F; Ol Chiki
1D00..1D7F; Phonetic Extensions
1D80..1DBF; Phonetic Extensions Supplement
1DC0..1DFF; Combining Diacritical Marks Supplement
1E00..1EFF; Latin Extended Additional
1F00..1FFF; Greek Extended
2000..206F; General Punctuation
2070..209F; Superscripts and Subscripts
20A0..20CF; Currency Symbols
20D0..20FF; Combining Diacritical Marks for Symbols
2100..214F; Letterlike Symbols
2150..218F; Number Forms
2190..21FF; Arrows
2200..22FF; Mathematical Operators
2300..23FF; Miscellaneous Technical
2400..243F; Control Pictures
2440..245F; Optical Character Recognition
2460..24FF; Enclosed Alphanumerics
2500..257F; Box Drawing
2580..259F; Block Elements
25A0..25FF; Geometric Shapes
2600..26FF; Miscellaneous Symbols
2700..27BF; Dingbats
27C0..27EF; Miscellaneous Mathematical Symbols-A
27F0..27FF; Supplemental Arrows-A
2800..28FF; Braille Patterns
2900..297F; Supplemental Arrows-B
2980..29FF; Miscellaneous Mathematical Symbols-B
2A00..2AFF; Supplemental Mathematical Operators
2B00..2BFF; Miscellaneous Symbols and Arrows
2C00..2C5F; Glagolitic
2C60..2C7F; Latin Extended-C
2C80..2CFF; Coptic
2D00..2D2F; Georgian Supplement
2D30..2D7F; Tifinagh
2D80..2DDF; Ethiopic Extended
2DE0..2DFF; Cyrillic Extended-A
2E00..2E7F; Supplemental Punctuation
2E80..2EFF; CJK Radicals Supplement
2F00..2FDF; Kangxi Radicals
2FF0..2FFF; Ideographic Description Characters
3000..303F; CJK Symbols and Punctuation
3040..309F; Hiragana
30A0..30FF; Katakana
3100..312F; Bopomofo
3130..318F; Hangul Compatibility Jamo
3190..319F; Kanbun
31A0..31BF; Bopomofo Extended
31C0..31EF; CJK Strokes
31F0..31FF; Katakana Phonetic Extensions
3200..32FF; Enclosed CJK Letters and Months
3300..33FF; CJK Compatibility
3400..4DBF; CJK Unified Ideographs Extension A
4DC0..4DFF; Yijing Hexagram Symbols
4E00..9FFF; CJK Unified Ideographs
A000..A48F; Yi Syllables
A490..A4CF; Yi Radicals
A500..A63F; Vai
A640..A69F; Cyrillic Extended-B
A700..A71F; Modifier Tone Letters
A720..A7FF; Latin Extended-D
A800..A82F; Syloti Nagri
A840..A87F; Phags-pa
A880..A8DF; Saurashtra
A900..A92F; Kayah Li
A930..A95F; Rejang
AA00..AA5F; Cham
AC00..D7AF; Hangul Syllables
D800..DB7F; High Surrogates
DB80..DBFF; High Private Use Surrogates
DC00..DFFF; Low Surrogates
E000..F8FF; Private Use Area
F900..FAFF; CJK Compatibility Ideographs
FB00..FB4F; Alphabetic Presentation Forms
FB50..FDFF; Arabic Presentation Forms-A
FE00..FE0F; Variation Selectors
FE10..FE1F; Vertical Forms
FE20..FE2F; Combining Half Marks
FE30..FE4F; CJK Compatibility Forms
FE50..FE6F; Small Form Variants
FE70..FEFF; Arabic Presentation Forms-B
FF00..FFEF; Halfwidth and Fullwidth Forms
FFF0..FFFF; Specials
10000..1007F; Linear B Syllabary
10080..100FF; Linear B Ideograms
10100..1013F; Aegean Numbers
10140..1018F; Ancient Greek Numbers
10190..101CF; Ancient Symbols
101D0..101FF; Phaistos Disc
10280..1029F; Lycian
102A0..102DF; Carian
10300..1032F; Old Italic
10330..1034F; Gothic
10380..1039F; Ugaritic
103A0..103DF; Old Persian
10400..1044F; Deseret
10450..1047F; Shavian
10480..104AF; Osmanya
10800..1083F; Cypriot Syllabary
10900..1091F; Phoenician
10920..1093F; Lydian
10A00..10A5F; Kharoshthi
12000..123FF; Cuneiform
12400..1247F; Cuneiform Numbers and Punctuation
1D000..1D0FF; Byzantine Musical Symbols
1D100..1D1FF; Musical Symbols
1D200..1D24F; Ancient Greek Musical Notation
1D300..1D35F; Tai Xuan Jing Symbols
1D360..1D37F; Counting Rod Numerals
1D400..1D7FF; Mathematical Alphanumeric Symbols
1F000..1F02F; Mahjong Tiles
1F030..1F09F; Domino Tiles
20000..2A6DF; CJK Unified Ideographs Extension B
2F800..2FA1F; CJK Compatibility Ideographs Supplement
E0000..E007F; Tags
E0100..E01EF; Variation Selectors Supplement
F0000..FFFFF; Supplementary Private Use Area-A
100000..10FFFF; Supplementary Private Use Area-B
# EOF
'''
pattern = re.compile(r'([0-9A-F]+)\.\.([0-9A-F]+);\ (\S.*\S)')
_starts = []
_ends = []
_names = []
class blocks(object):
pass
def _init():
count = 0
for line in _blockdata.splitlines():
m = pattern.match(line)
if m:
start, end, name = m.groups()
_starts.append(int(start, 16))
_ends.append(int(end, 16))
_names.append(name)
setattr(blocks, name.replace(" ", "_"), count)
count += 1
_init()
def blockname(ch):
"""Return the Unicode block name for ch, or None if ch has no block.
>>> blockname(u'a')
'Basic Latin'
>>> blockname(unichr(0x0b80))
'Tamil'
>>> block(unichr(2048))
None
"""
assert isinstance(ch, text_type) and len(ch) == 1, repr(ch)
cp = ord(ch)
i = bisect_right(_starts, cp) - 1
end = _ends[i]
if cp > end:
return None
return _names[i]
def blocknum(ch):
"""Returns the unicode block number for ch, or None if ch has no block.
>>> blocknum(u'a')
0
>>> blocknum(unichr(0x0b80))
22
>>> blocknum(unichr(2048))
None
"""
cp = ord(ch)
i = bisect_right(_starts, cp) - 1
end = _ends[i]
if cp > end:
return None
return i
digits = u('0123456789\xb2\xb3\xb9\u0660\u0661\u0662\u0663\u0664\u0665\u0666'
'\u0667\u0668\u0669\u06f0\u06f1\u06f2\u06f3\u06f4\u06f5\u06f6\u06f7'
'\u06f8\u06f9\u07c0\u07c1\u07c2\u07c3\u07c4\u07c5\u07c6\u07c7\u07c8'
'\u07c9\u0966\u0967\u0968\u0969\u096a\u096b\u096c\u096d\u096e\u096f'
'\u09e6\u09e7\u09e8\u09e9\u09ea\u09eb\u09ec\u09ed\u09ee\u09ef\u0a66'
'\u0a67\u0a68\u0a69\u0a6a\u0a6b\u0a6c\u0a6d\u0a6e\u0a6f\u0ae6\u0ae7'
'\u0ae8\u0ae9\u0aea\u0aeb\u0aec\u0aed\u0aee\u0aef\u0b66\u0b67\u0b68'
'\u0b69\u0b6a\u0b6b\u0b6c\u0b6d\u0b6e\u0b6f\u0be6\u0be7\u0be8\u0be9'
'\u0bea\u0beb\u0bec\u0bed\u0bee\u0bef\u0c66\u0c67\u0c68\u0c69\u0c6a'
'\u0c6b\u0c6c\u0c6d\u0c6e\u0c6f\u0ce6\u0ce7\u0ce8\u0ce9\u0cea\u0ceb'
'\u0cec\u0ced\u0cee\u0cef\u0d66\u0d67\u0d68\u0d69\u0d6a\u0d6b\u0d6c'
'\u0d6d\u0d6e\u0d6f\u0e50\u0e51\u0e52\u0e53\u0e54\u0e55\u0e56\u0e57'
'\u0e58\u0e59\u0ed0\u0ed1\u0ed2\u0ed3\u0ed4\u0ed5\u0ed6\u0ed7\u0ed8'
'\u0ed9\u0f20\u0f21\u0f22\u0f23\u0f24\u0f25\u0f26\u0f27\u0f28\u0f29'
'\u1040\u1041\u1042\u1043\u1044\u1045\u1046\u1047\u1048\u1049\u1090'
'\u1091\u1092\u1093\u1094\u1095\u1096\u1097\u1098\u1099\u1369\u136a'
'\u136b\u136c\u136d\u136e\u136f\u1370\u1371\u17e0\u17e1\u17e2\u17e3'
'\u17e4\u17e5\u17e6\u17e7\u17e8\u17e9\u1810\u1811\u1812\u1813\u1814'
'\u1815\u1816\u1817\u1818\u1819\u1946\u1947\u1948\u1949\u194a\u194b'
'\u194c\u194d\u194e\u194f\u19d0\u19d1\u19d2\u19d3\u19d4\u19d5\u19d6'
'\u19d7\u19d8\u19d9\u19da\u1a80\u1a81\u1a82\u1a83\u1a84\u1a85\u1a86'
'\u1a87\u1a88\u1a89\u1a90\u1a91\u1a92\u1a93\u1a94\u1a95\u1a96\u1a97'
'\u1a98\u1a99\u1b50\u1b51\u1b52\u1b53\u1b54\u1b55\u1b56\u1b57\u1b58'
'\u1b59\u1bb0\u1bb1\u1bb2\u1bb3\u1bb4\u1bb5\u1bb6\u1bb7\u1bb8\u1bb9'
'\u1c40\u1c41\u1c42\u1c43\u1c44\u1c45\u1c46\u1c47\u1c48\u1c49\u1c50'
'\u1c51\u1c52\u1c53\u1c54\u1c55\u1c56\u1c57\u1c58\u1c59\u2070\u2074'
'\u2075\u2076\u2077\u2078\u2079\u2080\u2081\u2082\u2083\u2084\u2085'
'\u2086\u2087\u2088\u2089\u2460\u2461\u2462\u2463\u2464\u2465\u2466'
'\u2467\u2468\u2474\u2475\u2476\u2477\u2478\u2479\u247a\u247b\u247c'
'\u2488\u2489\u248a\u248b\u248c\u248d\u248e\u248f\u2490\u24ea\u24f5'
'\u24f6\u24f7\u24f8\u24f9\u24fa\u24fb\u24fc\u24fd\u24ff\u2776\u2777'
'\u2778\u2779\u277a\u277b\u277c\u277d\u277e\u2780\u2781\u2782\u2783'
'\u2784\u2785\u2786\u2787\u2788\u278a\u278b\u278c\u278d\u278e\u278f'
'\u2790\u2791\u2792\ua620\ua621\ua622\ua623\ua624\ua625\ua626\ua627'
'\ua628\ua629\ua8d0\ua8d1\ua8d2\ua8d3\ua8d4\ua8d5\ua8d6\ua8d7\ua8d8'
'\ua8d9\ua900\ua901\ua902\ua903\ua904\ua905\ua906\ua907\ua908\ua909'
'\ua9d0\ua9d1\ua9d2\ua9d3\ua9d4\ua9d5\ua9d6\ua9d7\ua9d8\ua9d9\uaa50'
'\uaa51\uaa52\uaa53\uaa54\uaa55\uaa56\uaa57\uaa58\uaa59\uabf0\uabf1'
'\uabf2\uabf3\uabf4\uabf5\uabf6\uabf7\uabf8\uabf9\uff10\uff11\uff12'
'\uff13\uff14\uff15\uff16\uff17\uff18\uff19')
lowercase = u('abcdefghijklmnopqrstuvwxyz\xaa\xb5\xba\xdf\xe0\xe1\xe2\xe3\xe4'
'\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef\xf0\xf1\xf2\xf3'
'\xf4\xf5\xf6\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff\u0101\u0103\u0105'
'\u0107\u0109\u010b\u010d\u010f\u0111\u0113\u0115\u0117\u0119'
'\u011b\u011d\u011f\u0121\u0123\u0125\u0127\u0129\u012b\u012d'
'\u012f\u0131\u0133\u0135\u0137\u0138\u013a\u013c\u013e\u0140'
'\u0142\u0144\u0146\u0148\u0149\u014b\u014d\u014f\u0151\u0153'
'\u0155\u0157\u0159\u015b\u015d\u015f\u0161\u0163\u0165\u0167'
'\u0169\u016b\u016d\u016f\u0171\u0173\u0175\u0177\u017a\u017c'
'\u017e\u017f\u0180\u0183\u0185\u0188\u018c\u018d\u0192\u0195'
'\u0199\u019a\u019b\u019e\u01a1\u01a3\u01a5\u01a8\u01aa\u01ab'
'\u01ad\u01b0\u01b4\u01b6\u01b9\u01ba\u01bd\u01be\u01bf\u01c6'
'\u01c9\u01cc\u01ce\u01d0\u01d2\u01d4\u01d6\u01d8\u01da\u01dc'
'\u01dd\u01df\u01e1\u01e3\u01e5\u01e7\u01e9\u01eb\u01ed\u01ef'
'\u01f0\u01f3\u01f5\u01f9\u01fb\u01fd\u01ff\u0201\u0203\u0205'
'\u0207\u0209\u020b\u020d\u020f\u0211\u0213\u0215\u0217\u0219'
'\u021b\u021d\u021f\u0221\u0223\u0225\u0227\u0229\u022b\u022d'
'\u022f\u0231\u0233\u0234\u0235\u0236\u0237\u0238\u0239\u023c'
'\u023f\u0240\u0242\u0247\u0249\u024b\u024d\u024f\u0250\u0251'
'\u0252\u0253\u0254\u0255\u0256\u0257\u0258\u0259\u025a\u025b'
'\u025c\u025d\u025e\u025f\u0260\u0261\u0262\u0263\u0264\u0265'
'\u0266\u0267\u0268\u0269\u026a\u026b\u026c\u026d\u026e\u026f'
'\u0270\u0271\u0272\u0273\u0274\u0275\u0276\u0277\u0278\u0279'
'\u027a\u027b\u027c\u027d\u027e\u027f\u0280\u0281\u0282\u0283'
'\u0284\u0285\u0286\u0287\u0288\u0289\u028a\u028b\u028c\u028d'
'\u028e\u028f\u0290\u0291\u0292\u0293\u0295\u0296\u0297\u0298'
'\u0299\u029a\u029b\u029c\u029d\u029e\u029f\u02a0\u02a1\u02a2'
'\u02a3\u02a4\u02a5\u02a6\u02a7\u02a8\u02a9\u02aa\u02ab\u02ac'
'\u02ad\u02ae\u02af\u0371\u0373\u0377\u037b\u037c\u037d\u0390'
'\u03ac\u03ad\u03ae\u03af\u03b0\u03b1\u03b2\u03b3\u03b4\u03b5'
'\u03b6\u03b7\u03b8\u03b9\u03ba\u03bb\u03bc\u03bd\u03be\u03bf'
'\u03c0\u03c1\u03c2\u03c3\u03c4\u03c5\u03c6\u03c7\u03c8\u03c9'
'\u03ca\u03cb\u03cc\u03cd\u03ce\u03d0\u03d1\u03d5\u03d6\u03d7'
'\u03d9\u03db\u03dd\u03df\u03e1\u03e3\u03e5\u03e7\u03e9\u03eb'
'\u03ed\u03ef\u03f0\u03f1\u03f2\u03f3\u03f5\u03f8\u03fb\u03fc'
'\u0430\u0431\u0432\u0433\u0434\u0435\u0436\u0437\u0438\u0439'
'\u043a\u043b\u043c\u043d\u043e\u043f\u0440\u0441\u0442\u0443'
'\u0444\u0445\u0446\u0447\u0448\u0449\u044a\u044b\u044c\u044d'
'\u044e\u044f\u0450\u0451\u0452\u0453\u0454\u0455\u0456\u0457'
'\u0458\u0459\u045a\u045b\u045c\u045d\u045e\u045f\u0461\u0463'
'\u0465\u0467\u0469\u046b\u046d\u046f\u0471\u0473\u0475\u0477'
'\u0479\u047b\u047d\u047f\u0481\u048b\u048d\u048f\u0491\u0493'
'\u0495\u0497\u0499\u049b\u049d\u049f\u04a1\u04a3\u04a5\u04a7'
'\u04a9\u04ab\u04ad\u04af\u04b1\u04b3\u04b5\u04b7\u04b9\u04bb'
'\u04bd\u04bf\u04c2\u04c4\u04c6\u04c8\u04ca\u04cc\u04ce\u04cf'
'\u04d1\u04d3\u04d5\u04d7\u04d9\u04db\u04dd\u04df\u04e1\u04e3'
'\u04e5\u04e7\u04e9\u04eb\u04ed\u04ef\u04f1\u04f3\u04f5\u04f7'
'\u04f9\u04fb\u04fd\u04ff\u0501\u0503\u0505\u0507\u0509\u050b'
'\u050d\u050f\u0511\u0513\u0515\u0517\u0519\u051b\u051d\u051f'
'\u0521\u0523\u0525\u0561\u0562\u0563\u0564\u0565\u0566\u0567'
'\u0568\u0569\u056a\u056b\u056c\u056d\u056e\u056f\u0570\u0571'
'\u0572\u0573\u0574\u0575\u0576\u0577\u0578\u0579\u057a\u057b'
'\u057c\u057d\u057e\u057f\u0580\u0581\u0582\u0583\u0584\u0585'
'\u0586\u0587\u1d00\u1d01\u1d02\u1d03\u1d04\u1d05\u1d06\u1d07'
'\u1d08\u1d09\u1d0a\u1d0b\u1d0c\u1d0d\u1d0e\u1d0f\u1d10\u1d11'
'\u1d12\u1d13\u1d14\u1d15\u1d16\u1d17\u1d18\u1d19\u1d1a\u1d1b'
'\u1d1c\u1d1d\u1d1e\u1d1f\u1d20\u1d21\u1d22\u1d23\u1d24\u1d25'
'\u1d26\u1d27\u1d28\u1d29\u1d2a\u1d2b\u1d62\u1d63\u1d64\u1d65'
'\u1d66\u1d67\u1d68\u1d69\u1d6a\u1d6b\u1d6c\u1d6d\u1d6e\u1d6f'
'\u1d70\u1d71\u1d72\u1d73\u1d74\u1d75\u1d76\u1d77\u1d79\u1d7a'
'\u1d7b\u1d7c\u1d7d\u1d7e\u1d7f\u1d80\u1d81\u1d82\u1d83\u1d84'
'\u1d85\u1d86\u1d87\u1d88\u1d89\u1d8a\u1d8b\u1d8c\u1d8d\u1d8e'
'\u1d8f\u1d90\u1d91\u1d92\u1d93\u1d94\u1d95\u1d96\u1d97\u1d98'
'\u1d99\u1d9a\u1e01\u1e03\u1e05\u1e07\u1e09\u1e0b\u1e0d\u1e0f'
'\u1e11\u1e13\u1e15\u1e17\u1e19\u1e1b\u1e1d\u1e1f\u1e21\u1e23'
'\u1e25\u1e27\u1e29\u1e2b\u1e2d\u1e2f\u1e31\u1e33\u1e35\u1e37'
'\u1e39\u1e3b\u1e3d\u1e3f\u1e41\u1e43\u1e45\u1e47\u1e49\u1e4b'
'\u1e4d\u1e4f\u1e51\u1e53\u1e55\u1e57\u1e59\u1e5b\u1e5d\u1e5f'
'\u1e61\u1e63\u1e65\u1e67\u1e69\u1e6b\u1e6d\u1e6f\u1e71\u1e73'
'\u1e75\u1e77\u1e79\u1e7b\u1e7d\u1e7f\u1e81\u1e83\u1e85\u1e87'
'\u1e89\u1e8b\u1e8d\u1e8f\u1e91\u1e93\u1e95\u1e96\u1e97\u1e98'
'\u1e99\u1e9a\u1e9b\u1e9c\u1e9d\u1e9f\u1ea1\u1ea3\u1ea5\u1ea7'
'\u1ea9\u1eab\u1ead\u1eaf\u1eb1\u1eb3\u1eb5\u1eb7\u1eb9\u1ebb'
'\u1ebd\u1ebf\u1ec1\u1ec3\u1ec5\u1ec7\u1ec9\u1ecb\u1ecd\u1ecf'
'\u1ed1\u1ed3\u1ed5\u1ed7\u1ed9\u1edb\u1edd\u1edf\u1ee1\u1ee3'
'\u1ee5\u1ee7\u1ee9\u1eeb\u1eed\u1eef\u1ef1\u1ef3\u1ef5\u1ef7'
'\u1ef9\u1efb\u1efd\u1eff\u1f00\u1f01\u1f02\u1f03\u1f04\u1f05'
'\u1f06\u1f07\u1f10\u1f11\u1f12\u1f13\u1f14\u1f15\u1f20\u1f21'
'\u1f22\u1f23\u1f24\u1f25\u1f26\u1f27\u1f30\u1f31\u1f32\u1f33'
'\u1f34\u1f35\u1f36\u1f37\u1f40\u1f41\u1f42\u1f43\u1f44\u1f45'
'\u1f50\u1f51\u1f52\u1f53\u1f54\u1f55\u1f56\u1f57\u1f60\u1f61'
'\u1f62\u1f63\u1f64\u1f65\u1f66\u1f67\u1f70\u1f71\u1f72\u1f73'
'\u1f74\u1f75\u1f76\u1f77\u1f78\u1f79\u1f7a\u1f7b\u1f7c\u1f7d'
'\u1f80\u1f81\u1f82\u1f83\u1f84\u1f85\u1f86\u1f87\u1f90\u1f91'
'\u1f92\u1f93\u1f94\u1f95\u1f96\u1f97\u1fa0\u1fa1\u1fa2\u1fa3'
'\u1fa4\u1fa5\u1fa6\u1fa7\u1fb0\u1fb1\u1fb2\u1fb3\u1fb4\u1fb6'
'\u1fb7\u1fbe\u1fc2\u1fc3\u1fc4\u1fc6\u1fc7\u1fd0\u1fd1\u1fd2'
'\u1fd3\u1fd6\u1fd7\u1fe0\u1fe1\u1fe2\u1fe3\u1fe4\u1fe5\u1fe6'
'\u1fe7\u1ff2\u1ff3\u1ff4\u1ff6\u1ff7\u210a\u210e\u210f\u2113'
'\u212f\u2134\u2139\u213c\u213d\u2146\u2147\u2148\u2149\u214e'
'\u2184\u2c30\u2c31\u2c32\u2c33\u2c34\u2c35\u2c36\u2c37\u2c38'
'\u2c39\u2c3a\u2c3b\u2c3c\u2c3d\u2c3e\u2c3f\u2c40\u2c41\u2c42'
'\u2c43\u2c44\u2c45\u2c46\u2c47\u2c48\u2c49\u2c4a\u2c4b\u2c4c'
'\u2c4d\u2c4e\u2c4f\u2c50\u2c51\u2c52\u2c53\u2c54\u2c55\u2c56'
'\u2c57\u2c58\u2c59\u2c5a\u2c5b\u2c5c\u2c5d\u2c5e\u2c61\u2c65'
'\u2c66\u2c68\u2c6a\u2c6c\u2c71\u2c73\u2c74\u2c76\u2c77\u2c78'
'\u2c79\u2c7a\u2c7b\u2c7c\u2c81\u2c83\u2c85\u2c87\u2c89\u2c8b'
'\u2c8d\u2c8f\u2c91\u2c93\u2c95\u2c97\u2c99\u2c9b\u2c9d\u2c9f'
'\u2ca1\u2ca3\u2ca5\u2ca7\u2ca9\u2cab\u2cad\u2caf\u2cb1\u2cb3'
'\u2cb5\u2cb7\u2cb9\u2cbb\u2cbd\u2cbf\u2cc1\u2cc3\u2cc5\u2cc7'
'\u2cc9\u2ccb\u2ccd\u2ccf\u2cd1\u2cd3\u2cd5\u2cd7\u2cd9\u2cdb'
'\u2cdd\u2cdf\u2ce1\u2ce3\u2ce4\u2cec\u2cee\u2d00\u2d01\u2d02'
'\u2d03\u2d04\u2d05\u2d06\u2d07\u2d08\u2d09\u2d0a\u2d0b\u2d0c'
'\u2d0d\u2d0e\u2d0f\u2d10\u2d11\u2d12\u2d13\u2d14\u2d15\u2d16'
'\u2d17\u2d18\u2d19\u2d1a\u2d1b\u2d1c\u2d1d\u2d1e\u2d1f\u2d20'
'\u2d21\u2d22\u2d23\u2d24\u2d25\ua641\ua643\ua645\ua647\ua649'
'\ua64b\ua64d\ua64f\ua651\ua653\ua655\ua657\ua659\ua65b\ua65d'
'\ua65f\ua663\ua665\ua667\ua669\ua66b\ua66d\ua681\ua683\ua685'
'\ua687\ua689\ua68b\ua68d\ua68f\ua691\ua693\ua695\ua697\ua723'
'\ua725\ua727\ua729\ua72b\ua72d\ua72f\ua730\ua731\ua733\ua735'
'\ua737\ua739\ua73b\ua73d\ua73f\ua741\ua743\ua745\ua747\ua749'
'\ua74b\ua74d\ua74f\ua751\ua753\ua755\ua757\ua759\ua75b\ua75d'
'\ua75f\ua761\ua763\ua765\ua767\ua769\ua76b\ua76d\ua76f\ua771'
'\ua772\ua773\ua774\ua775\ua776\ua777\ua778\ua77a\ua77c\ua77f'
'\ua781\ua783\ua785\ua787\ua78c\ufb00\ufb01\ufb02\ufb03\ufb04'
'\ufb05\ufb06\ufb13\ufb14\ufb15\ufb16\ufb17\uff41\uff42\uff43'
'\uff44\uff45\uff46\uff47\uff48\uff49\uff4a\uff4b\uff4c\uff4d'
'\uff4e\uff4f\uff50\uff51\uff52\uff53\uff54\uff55\uff56\uff57'
'\uff58\uff59\uff5a')
uppercase = u('ABCDEFGHIJKLMNOPQRSTUVWXYZ\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7\xc8'
'\xc9\xca\xcb\xcc\xcd\xce\xcf\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd8'
'\xd9\xda\xdb\xdc\xdd\xde\u0100\u0102\u0104\u0106\u0108\u010a'
'\u010c\u010e\u0110\u0112\u0114\u0116\u0118\u011a\u011c\u011e'
'\u0120\u0122\u0124\u0126\u0128\u012a\u012c\u012e\u0130\u0132'
'\u0134\u0136\u0139\u013b\u013d\u013f\u0141\u0143\u0145\u0147'
'\u014a\u014c\u014e\u0150\u0152\u0154\u0156\u0158\u015a\u015c'
'\u015e\u0160\u0162\u0164\u0166\u0168\u016a\u016c\u016e\u0170'
'\u0172\u0174\u0176\u0178\u0179\u017b\u017d\u0181\u0182\u0184'
'\u0186\u0187\u0189\u018a\u018b\u018e\u018f\u0190\u0191\u0193'
'\u0194\u0196\u0197\u0198\u019c\u019d\u019f\u01a0\u01a2\u01a4'
'\u01a6\u01a7\u01a9\u01ac\u01ae\u01af\u01b1\u01b2\u01b3\u01b5'
'\u01b7\u01b8\u01bc\u01c4\u01c7\u01ca\u01cd\u01cf\u01d1\u01d3'
'\u01d5\u01d7\u01d9\u01db\u01de\u01e0\u01e2\u01e4\u01e6\u01e8'
'\u01ea\u01ec\u01ee\u01f1\u01f4\u01f6\u01f7\u01f8\u01fa\u01fc'
'\u01fe\u0200\u0202\u0204\u0206\u0208\u020a\u020c\u020e\u0210'
'\u0212\u0214\u0216\u0218\u021a\u021c\u021e\u0220\u0222\u0224'
'\u0226\u0228\u022a\u022c\u022e\u0230\u0232\u023a\u023b\u023d'
'\u023e\u0241\u0243\u0244\u0245\u0246\u0248\u024a\u024c\u024e'
'\u0370\u0372\u0376\u0386\u0388\u0389\u038a\u038c\u038e\u038f'
'\u0391\u0392\u0393\u0394\u0395\u0396\u0397\u0398\u0399\u039a'
'\u039b\u039c\u039d\u039e\u039f\u03a0\u03a1\u03a3\u03a4\u03a5'
'\u03a6\u03a7\u03a8\u03a9\u03aa\u03ab\u03cf\u03d2\u03d3\u03d4'
'\u03d8\u03da\u03dc\u03de\u03e0\u03e2\u03e4\u03e6\u03e8\u03ea'
'\u03ec\u03ee\u03f4\u03f7\u03f9\u03fa\u03fd\u03fe\u03ff\u0400'
'\u0401\u0402\u0403\u0404\u0405\u0406\u0407\u0408\u0409\u040a'
'\u040b\u040c\u040d\u040e\u040f\u0410\u0411\u0412\u0413\u0414'
'\u0415\u0416\u0417\u0418\u0419\u041a\u041b\u041c\u041d\u041e'
'\u041f\u0420\u0421\u0422\u0423\u0424\u0425\u0426\u0427\u0428'
'\u0429\u042a\u042b\u042c\u042d\u042e\u042f\u0460\u0462\u0464'
'\u0466\u0468\u046a\u046c\u046e\u0470\u0472\u0474\u0476\u0478'
'\u047a\u047c\u047e\u0480\u048a\u048c\u048e\u0490\u0492\u0494'
'\u0496\u0498\u049a\u049c\u049e\u04a0\u04a2\u04a4\u04a6\u04a8'
'\u04aa\u04ac\u04ae\u04b0\u04b2\u04b4\u04b6\u04b8\u04ba\u04bc'
'\u04be\u04c0\u04c1\u04c3\u04c5\u04c7\u04c9\u04cb\u04cd\u04d0'
'\u04d2\u04d4\u04d6\u04d8\u04da\u04dc\u04de\u04e0\u04e2\u04e4'
'\u04e6\u04e8\u04ea\u04ec\u04ee\u04f0\u04f2\u04f4\u04f6\u04f8'
'\u04fa\u04fc\u04fe\u0500\u0502\u0504\u0506\u0508\u050a\u050c'
'\u050e\u0510\u0512\u0514\u0516\u0518\u051a\u051c\u051e\u0520'
'\u0522\u0524\u0531\u0532\u0533\u0534\u0535\u0536\u0537\u0538'
'\u0539\u053a\u053b\u053c\u053d\u053e\u053f\u0540\u0541\u0542'
'\u0543\u0544\u0545\u0546\u0547\u0548\u0549\u054a\u054b\u054c'
'\u054d\u054e\u054f\u0550\u0551\u0552\u0553\u0554\u0555\u0556'
'\u10a0\u10a1\u10a2\u10a3\u10a4\u10a5\u10a6\u10a7\u10a8\u10a9'
'\u10aa\u10ab\u10ac\u10ad\u10ae\u10af\u10b0\u10b1\u10b2\u10b3'
'\u10b4\u10b5\u10b6\u10b7\u10b8\u10b9\u10ba\u10bb\u10bc\u10bd'
'\u10be\u10bf\u10c0\u10c1\u10c2\u10c3\u10c4\u10c5\u1e00\u1e02'
'\u1e04\u1e06\u1e08\u1e0a\u1e0c\u1e0e\u1e10\u1e12\u1e14\u1e16'
'\u1e18\u1e1a\u1e1c\u1e1e\u1e20\u1e22\u1e24\u1e26\u1e28\u1e2a'
'\u1e2c\u1e2e\u1e30\u1e32\u1e34\u1e36\u1e38\u1e3a\u1e3c\u1e3e'
'\u1e40\u1e42\u1e44\u1e46\u1e48\u1e4a\u1e4c\u1e4e\u1e50\u1e52'
'\u1e54\u1e56\u1e58\u1e5a\u1e5c\u1e5e\u1e60\u1e62\u1e64\u1e66'
'\u1e68\u1e6a\u1e6c\u1e6e\u1e70\u1e72\u1e74\u1e76\u1e78\u1e7a'
'\u1e7c\u1e7e\u1e80\u1e82\u1e84\u1e86\u1e88\u1e8a\u1e8c\u1e8e'
'\u1e90\u1e92\u1e94\u1e9e\u1ea0\u1ea2\u1ea4\u1ea6\u1ea8\u1eaa'
'\u1eac\u1eae\u1eb0\u1eb2\u1eb4\u1eb6\u1eb8\u1eba\u1ebc\u1ebe'
'\u1ec0\u1ec2\u1ec4\u1ec6\u1ec8\u1eca\u1ecc\u1ece\u1ed0\u1ed2'
'\u1ed4\u1ed6\u1ed8\u1eda\u1edc\u1ede\u1ee0\u1ee2\u1ee4\u1ee6'
'\u1ee8\u1eea\u1eec\u1eee\u1ef0\u1ef2\u1ef4\u1ef6\u1ef8\u1efa'
'\u1efc\u1efe\u1f08\u1f09\u1f0a\u1f0b\u1f0c\u1f0d\u1f0e\u1f0f'
'\u1f18\u1f19\u1f1a\u1f1b\u1f1c\u1f1d\u1f28\u1f29\u1f2a\u1f2b'
'\u1f2c\u1f2d\u1f2e\u1f2f\u1f38\u1f39\u1f3a\u1f3b\u1f3c\u1f3d'
'\u1f3e\u1f3f\u1f48\u1f49\u1f4a\u1f4b\u1f4c\u1f4d\u1f59\u1f5b'
'\u1f5d\u1f5f\u1f68\u1f69\u1f6a\u1f6b\u1f6c\u1f6d\u1f6e\u1f6f'
'\u1fb8\u1fb9\u1fba\u1fbb\u1fc8\u1fc9\u1fca\u1fcb\u1fd8\u1fd9'
'\u1fda\u1fdb\u1fe8\u1fe9\u1fea\u1feb\u1fec\u1ff8\u1ff9\u1ffa'
'\u1ffb\u2102\u2107\u210b\u210c\u210d\u2110\u2111\u2112\u2115'
'\u2119\u211a\u211b\u211c\u211d\u2124\u2126\u2128\u212a\u212b'
'\u212c\u212d\u2130\u2131\u2132\u2133\u213e\u213f\u2145\u2183'
'\u2c00\u2c01\u2c02\u2c03\u2c04\u2c05\u2c06\u2c07\u2c08\u2c09'
'\u2c0a\u2c0b\u2c0c\u2c0d\u2c0e\u2c0f\u2c10\u2c11\u2c12\u2c13'
'\u2c14\u2c15\u2c16\u2c17\u2c18\u2c19\u2c1a\u2c1b\u2c1c\u2c1d'
'\u2c1e\u2c1f\u2c20\u2c21\u2c22\u2c23\u2c24\u2c25\u2c26\u2c27'
'\u2c28\u2c29\u2c2a\u2c2b\u2c2c\u2c2d\u2c2e\u2c60\u2c62\u2c63'
'\u2c64\u2c67\u2c69\u2c6b\u2c6d\u2c6e\u2c6f\u2c70\u2c72\u2c75'
'\u2c7e\u2c7f\u2c80\u2c82\u2c84\u2c86\u2c88\u2c8a\u2c8c\u2c8e'
'\u2c90\u2c92\u2c94\u2c96\u2c98\u2c9a\u2c9c\u2c9e\u2ca0\u2ca2'
'\u2ca4\u2ca6\u2ca8\u2caa\u2cac\u2cae\u2cb0\u2cb2\u2cb4\u2cb6'
'\u2cb8\u2cba\u2cbc\u2cbe\u2cc0\u2cc2\u2cc4\u2cc6\u2cc8\u2cca'
'\u2ccc\u2cce\u2cd0\u2cd2\u2cd4\u2cd6\u2cd8\u2cda\u2cdc\u2cde'
'\u2ce0\u2ce2\u2ceb\u2ced\ua640\ua642\ua644\ua646\ua648\ua64a'
'\ua64c\ua64e\ua650\ua652\ua654\ua656\ua658\ua65a\ua65c\ua65e'
'\ua662\ua664\ua666\ua668\ua66a\ua66c\ua680\ua682\ua684\ua686'
'\ua688\ua68a\ua68c\ua68e\ua690\ua692\ua694\ua696\ua722\ua724'
'\ua726\ua728\ua72a\ua72c\ua72e\ua732\ua734\ua736\ua738\ua73a'
'\ua73c\ua73e\ua740\ua742\ua744\ua746\ua748\ua74a\ua74c\ua74e'
'\ua750\ua752\ua754\ua756\ua758\ua75a\ua75c\ua75e\ua760\ua762'
'\ua764\ua766\ua768\ua76a\ua76c\ua76e\ua779\ua77b\ua77d\ua77e'
'\ua780\ua782\ua784\ua786\ua78b\uff21\uff22\uff23\uff24\uff25'
'\uff26\uff27\uff28\uff29\uff2a\uff2b\uff2c\uff2d\uff2e\uff2f'
'\uff30\uff31\uff32\uff33\uff34\uff35\uff36\uff37\uff38\uff39'
'\uff3a')
| [
"[email protected]"
]
| |
fbf9692e45b4994a8d39f8cbc34f41bf1bb692ae | d94b6845aeeb412aac6850b70e22628bc84d1d6d | /active_selective_prediction/sampling_methods/__init__.py | f6665d3dc1794f678e706514bb12be34006dff71 | [
"CC-BY-4.0",
"Apache-2.0"
]
| permissive | ishine/google-research | 541aea114a68ced68736340e037fc0f8257d1ea2 | c1ae273841592fce4c993bf35cdd0a6424e73da4 | refs/heads/master | 2023-06-08T23:02:25.502203 | 2023-05-31T01:00:56 | 2023-05-31T01:06:45 | 242,478,569 | 0 | 0 | Apache-2.0 | 2020-06-23T01:55:11 | 2020-02-23T07:59:42 | Jupyter Notebook | UTF-8 | Python | false | false | 1,496 | py | # coding=utf-8
# Copyright 2023 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Import sampling methods."""
from active_selective_prediction.sampling_methods.average_kl_divergence_sampling import AverageKLDivergenceSampling
from active_selective_prediction.sampling_methods.average_margin_sampling import AverageMarginSampling
from active_selective_prediction.sampling_methods.badge_sampling import BADGESampling
from active_selective_prediction.sampling_methods.clue_sampling import CLUESampling
from active_selective_prediction.sampling_methods.confidence_sampling import ConfidenceSampling
from active_selective_prediction.sampling_methods.entropy_sampling import EntropySampling
from active_selective_prediction.sampling_methods.kcenter_greedy_sampling import KCenterGreedySampling
from active_selective_prediction.sampling_methods.margin_sampling import MarginSampling
from active_selective_prediction.sampling_methods.uniform_sampling import UniformSampling
| [
"[email protected]"
]
| |
7c33a2cb3128b036c61416554cb4d258e4f256dd | 740b88ae1307d159fb7f39c455a295155c94d58f | /main.py | a03d136942c1668a9b1e67eeb4a5d60b8dcd520b | []
| no_license | hosmanadam/quote-scraping-game | 4ed578320d3c6872711dadc9f312b7acc6e071c0 | 5d4ecd5f3e2137127190971cfc4da6447f71ffc6 | refs/heads/master | 2020-04-03T10:05:27.242294 | 2019-05-14T11:08:47 | 2019-05-14T11:08:47 | 155,183,546 | 0 | 0 | null | 2018-10-31T23:48:51 | 2018-10-29T09:16:20 | Python | UTF-8 | Python | false | false | 5,892 | py | import os
import unicodedata
from csv import DictReader, DictWriter
from random import choice
from time import sleep
import regex
from pyfiglet import figlet_format
from termcolor import colored
import scraper
import ui
from classes.BadQuoteError import BadQuoteError
PRINT_DELAY = 1
CRAWL_DELAY = 1
def essentialize(full_name):
"""Return the "essence" of a person's name, for fair comparison
- strip whitespace, make lower case
- remove any middle names
- remove punctuation & accents (diacritical marks)
Examples:
(1) `' Emily Jane Brontë'` → `'emilybronte'`
(2) `'J.R.R. Tolkien'` → `'jtolkien'`
"""
names = full_name.strip().lower().replace('.', '. ').split(' ')
no_middle = names[0]
if len(names) > 1:
no_middle += names[-1]
no_punctuation = ''.join(char for char in no_middle if char not in " '.-")
no_accents = unicodedata.normalize('NFKD', no_punctuation).encode('ASCII', 'ignore').decode()
return no_accents
def is_fuzzy_match(a, b):
"""Return `True` if string `a` is "basically the same" as string `b`, else `False`
- fuzzy string matching
- allows 1 mistake for every 6 characters in `a`, but at least 1
- mistake may be insertion, deletion, or substitution
"""
fuzzlimit = round(len(a)/6) or 1
fuzzy = fr'(?:{b}){{i,d,s,e<={fuzzlimit}}}'
return bool(regex.fullmatch(fuzzy, a))
def redact_author_description(author_description, author_name):
"""Return text with all appearences of author's name replaced with name-length blocks of `'█'`"""
for name in author_name.split(' '):
author_description = author_description.replace(name, '█'*len(name))
return author_description
def _give_hint(i, quote):
"""Return `i`th hint for given quote."""
author_first = quote['author_name'].split(' ')[0]
author_last = quote['author_name'].split(' ')[-1]
author_description_redacted = redact_author_description(quote['author_description'], quote['author_name'])
hints = [
colored("\nGuess who this quote is from", attrs=['underline']) + f":\n{ui.format_text_block(quote['text'])}",
colored("Hint", attrs=['underline']) + f": the author was born on {quote['author_born_date']} {quote['author_born_location']}!",
colored("Hint", attrs=['underline']) + f": the author's first name begins with the letter '{author_first[0]}'!",
colored("Hint", attrs=['underline']) + f": the author's last name begins with the letter '{author_last[0]}'!",
colored("Hint", attrs=['underline']) + f": here's some more stuff about the author...\n\n{ui.format_text_block(author_description_redacted)}\n",
]
return hints[i]
def _scrape_and_save():
quotes = scraper.get_quotes(crawl_delay=CRAWL_DELAY, crawl_stop=10)
_save_to_csv(quotes)
return quotes
def _save_to_csv(quotes):
with open('quotes.csv', 'w') as file:
DW_object = DictWriter(file, fieldnames=quotes[0].keys())
DW_object.writeheader()
DW_object.writerows(quotes)
def _load_from_csv():
with open('quotes.csv') as file:
DR_object = DictReader(file)
return [row for row in DR_object]
def _pick_quote(quotes):
"""Return random quote updated with author details, or `None` if details are N/A"""
quote = quotes.pop(choice(range(len(quotes))))
try:
quote.update(scraper.get_quote_details(quote['author_href']))
return quote, quotes
except:
sleep(CRAWL_DELAY)
return None, quotes
def ask_to_play():
"""Ask user to play again, and return `True` or `False` depending on answer"""
wants_to_play = input("\nWould you like to keep playing? (y/n) ")
if not wants_to_play or wants_to_play[0].lower() not in 'yn':
return ask_to_play()
if wants_to_play[0].lower() == 'y':
return True
return False
def enforce_working_directory():
"""Sets working directory to the folder this .py file is in"""
os.chdir(os.sys.path[0])
def play_round(quotes, total_guesses):
"""Selects a quote using _pick_quote().
Conducts a round of the game using _give_hint()."""
quote = {}
while not quote:
quote, quotes = _pick_quote(quotes)
os.system('clear')
print(f"Number of remaining quotes: {len(quotes)}")
sleep(PRINT_DELAY)
for i in range(total_guesses):
print(_give_hint(i, quote))
guess = input(colored("Your guess: ", attrs=['bold']))
if is_fuzzy_match(essentialize(guess), essentialize(quote['author_name'])):
print(colored("\nYou win!", 'magenta', attrs=['bold']))
sleep(PRINT_DELAY)
break
elif i < total_guesses-1:
print(f"\nThat's not the one. {total_guesses-1-i} guesses left!")
else:
print(colored("\nSorry, you lose!", 'red'), end='')
sleep(PRINT_DELAY)
print(f" (The author is {quote['author_name']}.)")
sleep(PRINT_DELAY)
return quotes
def scrape_or_load():
"""Scrape web for quotes or load them from CSV
- scrape without asking if there's no CSV
- user can choose otherwise
"""
if not os.path.exists('quotes.csv'):
return _scrape_and_save()
wants_to_scrape = input("Would you like to scrape the web to update your quotes before playing? (y/n) ")
if not wants_to_scrape or wants_to_scrape[0].lower() not in 'yn':
return scrape_or_load()
if wants_to_scrape[0].lower() == 'y':
return _scrape_and_save()
if wants_to_scrape[0].lower() == 'n':
return _load_from_csv()
def main():
os.system('clear')
print(colored((figlet_format("< Quote game \\>")), 'green', attrs=['bold']))
enforce_working_directory()
quotes = scrape_or_load()
total_guesses = 5 # max.5 unless more hints are added in _give_hint()
wants_to_play = True
while wants_to_play:
quotes = play_round(quotes, total_guesses)
if quotes:
wants_to_play = ask_to_play()
else:
print(colored("\nALL OUT OF QUOTES.", attrs=['bold']))
break
print(colored("\nThanks for playing. Bye!\n", attrs=['bold']))
sleep(PRINT_DELAY)
if __name__ == '__main__':
main()
| [
"[email protected]"
]
| |
c6a432ee79a97806ef61dc314d50de52ccaa5959 | 61004e474b7b2ad0071c16766f0f7874f04f9466 | /examples/dataflow-production-ready/python/ml_preproc/pipeline/beam_classes/clean_records.py | 455ff7e92e93510847b84dce6bfda5dffe4fa050 | [
"Apache-2.0"
]
| permissive | GoogleCloudPlatform/professional-services | eb79751efae765a8c691a745e520f44f51bd715c | 0f51121b945bd74c7f667e74e8861fceda87565c | refs/heads/main | 2023-09-05T02:57:33.328973 | 2023-08-30T14:40:30 | 2023-08-30T14:40:30 | 91,730,359 | 2,626 | 1,381 | Apache-2.0 | 2023-09-14T20:13:42 | 2017-05-18T19:29:27 | Python | UTF-8 | Python | false | false | 2,265 | py | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Iterable, Dict
from apache_beam import DoFn
from ..model.data_classes import Record
from ..features import clean_input
class CleanAndTransfToDictDoFn(DoFn):
def __init__(self, *unused_args, **unused_kwargs):
super().__init__(*unused_args, **unused_kwargs)
def process(self,
element: Record,
abbrev: Dict) -> Iterable[Dict]:
## In this process method we are going to change element. But BEWARE: in Beam, the process method should not
## mutate the input object, it should produce a new object.
## Thankfully for us, named tuples (Record is a named tuple) are immutable; an AttributeError exception
## will be triggered if we try to modify element.
## So let's make a copy as a dict, and then we will return the dictionary.
##
## The transform to dictionary is necessary for two reasons:
## * We will need dicts to write to BigQuery
## * We are going to add some new columns/fields, with the similarity values
# The _asdict method starts with _ to avoid potential conflicts with the named tuple field names
# (its use is not restricted)
mutable_element = element._asdict()
## source and target address
mutable_element['source_address'] = clean_input.clean_text(element.source_address, abbrev)
mutable_element['target_address'] = clean_input.clean_text(element.target_address, abbrev)
## source and target city
mutable_element['source_city'] = clean_input.clean_text(element.source_city)
mutable_element['target_city'] = clean_input.clean_text(element.target_city)
# TODO: transform all the rest of fields
yield mutable_element | [
"[email protected]"
]
| |
0ddbb08c6b7a062673f7f83b9e2f349c32c73b77 | 8ebf6311c3c1db40c7bb56051cf4e37e1b85a4f9 | /rm-server/templatemanager/templatemanager/mongodb.py | 949427035f6d0032ee8be250602351099a125713 | []
| no_license | sq591442679/requirements-manager | e8b074afb7fd2a83632f2546d392dab4c35aeeeb | 6d664ce338b455150dcc9a86145967e8dd67a9dd | refs/heads/master | 2023-07-08T04:38:20.064019 | 2021-08-11T03:41:13 | 2021-08-11T03:41:13 | 392,877,568 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 234 | py | import pymongo
from templatemanager.config import MONGODB_URL
client = pymongo.MongoClient(MONGODB_URL)
database = client['RequirementsManager']
template_collection = database['Template']
document_collection = database['Document']
| [
"[email protected]"
]
| |
729fe63a21b3433191f4134946686e280a343e23 | cb5b76716ac04f9bd2eefc2020d9dea7ae9f2123 | /04判断年月日天数.py | d1e246e9be03df255643a395408e58acdd93f423 | []
| no_license | chengong825/python-test | 6c788e47c2ee71457b77d190759d73954489d1fb | e8ac085386eadb562a125cc4428cad9f7b312c3c | refs/heads/master | 2020-03-29T13:17:50.411249 | 2018-10-26T04:40:42 | 2018-10-26T04:40:42 | 149,950,556 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,295 | py | y=int(input("输入年份"))
m=int(input("输入月份"))
d=int(input("输入日号"))
def jug(y):
if (y%4==0 and y%100 !=0)or(y%400==0):
return 1
else:
return 0
if jug(y)==1:
if m==1:
i=d
elif m==2:
i=31+d
elif m==3:
i=31+29+d
elif m==4:
i=31+29+31+d
elif m==5:
i=31+29+31+30+d
elif m==6:
i=31+29+31+30+31+d
elif m==7:
i=31+29+31+30+31+30+d
elif m==8:
i=31+29+31+30+31+30+31+d
elif m==9:
i=31+29+31+30+31+30+31+31+d
elif m==10:
i=31+29+31+30+31+30+31+31+30+d
elif m==11:
i=31+29+31+30+31+30+31+31+30+31+d
elif m==12:
i=31+29+31+30+31+30+31+31+30+31+30+d
else:
if m==1:
i=d
elif m==2:
i=31+d
elif m==3:
i=31+28+d
elif m==4:
i=31+28+31+d
elif m==5:
i=31+28+31+30+d
elif m==6:
i=31+28+31+30+31+d
elif m==7:
i=31+28+31+30+31+30+d
elif m==8:
i=31+28+31+30+31+30+31+d
elif m==9:
i=31+28+31+30+31+30+31+31+d
elif m==10:
i=31+28+31+30+31+30+31+31+30+d
elif m==11:
i=31+28+31+30+31+30+31+31+30+31+d
elif m==12:
i=31+28+31+30+31+30+31+31+30+31+30+d
print("这一天是这一年的第%d天"%i) | [
"[email protected]"
]
| |
294cb258fe310c29e43889691c9291e31eea57cb | b2db386a35e167dd67d6de90d95c06d5c2ed91cd | /657_judgeCircle.py | de842edbcdbca60309bbad6d4b2c427565ed7dc1 | []
| no_license | rohitmungre/leetcode | 9edb1b8b0cd714eb1a5e1fa847f2e17c455fd624 | d49836b2b46a980f073bb9a6f2e47c4a903e48ac | refs/heads/master | 2020-08-07T16:55:38.699188 | 2020-03-12T11:00:13 | 2020-03-12T11:00:13 | 213,531,119 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 504 | py | class Solution(object):
def judgeCircle(self, moves):
"""
:type moves: str
:rtype: bool
"""
y = 0
x = 0
for item in moves:
if item == 'U':
y = y +1
elif item == 'D':
y = y -1
elif item == 'L':
x = x -1
elif item == 'R':
x = x +1
if x==0 and y==0:
return True
return False
| [
"[email protected]"
]
| |
fb25374076a68ad89abab966d09eacb305fd5bdf | a56252fda5c9e42eff04792c6e16e413ad51ba1a | /resources/usr/local/lib/python2.7/dist-packages/scipy/linalg/_solvers.py | a6de2d946676fb45fe7951ef9d4115fd340500bd | [
"Apache-2.0"
]
| permissive | edawson/parliament2 | 4231e692565dbecf99d09148e75c00750e6797c4 | 2632aa3484ef64c9539c4885026b705b737f6d1e | refs/heads/master | 2021-06-21T23:13:29.482239 | 2020-12-07T21:10:08 | 2020-12-07T21:10:08 | 150,246,745 | 0 | 0 | Apache-2.0 | 2019-09-11T03:22:55 | 2018-09-25T10:21:03 | Python | UTF-8 | Python | false | false | 7,241 | py | """Matrix equation solver routines"""
# Author: Jeffrey Armstrong <[email protected]>
# February 24, 2012
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.linalg import inv, LinAlgError
from .basic import solve
from .lapack import get_lapack_funcs
from .decomp_schur import schur
from .special_matrices import kron
__all__ = ['solve_sylvester', 'solve_lyapunov', 'solve_discrete_lyapunov',
'solve_continuous_are', 'solve_discrete_are']
def solve_sylvester(a,b,q):
"""
Computes a solution (X) to the Sylvester equation (AX + XB = Q).
.. versionadded:: 0.11.0
Parameters
----------
a : (M, M) array_like
Leading matrix of the Sylvester equation
b : (N, N) array_like
Trailing matrix of the Sylvester equation
q : (M, N) array_like
Right-hand side
Returns
-------
x : (M, N) ndarray
The solution to the Sylvester equation.
Raises
------
LinAlgError
If solution was not found
Notes
-----
Computes a solution to the Sylvester matrix equation via the Bartels-
Stewart algorithm. The A and B matrices first undergo Schur
decompositions. The resulting matrices are used to construct an
alternative Sylvester equation (``RY + YS^T = F``) where the R and S
matrices are in quasi-triangular form (or, when R, S or F are complex,
triangular form). The simplified equation is then solved using
``*TRSYL`` from LAPACK directly.
"""
# Compute the Schur decomp form of a
r,u = schur(a, output='real')
# Compute the Schur decomp of b
s,v = schur(b.conj().transpose(), output='real')
# Construct f = u'*q*v
f = np.dot(np.dot(u.conj().transpose(), q), v)
# Call the Sylvester equation solver
trsyl, = get_lapack_funcs(('trsyl',), (r,s,f))
if trsyl is None:
raise RuntimeError('LAPACK implementation does not contain a proper Sylvester equation solver (TRSYL)')
y, scale, info = trsyl(r, s, f, tranb='C')
y = scale*y
if info < 0:
raise LinAlgError("Illegal value encountered in the %d term" % (-info,))
return np.dot(np.dot(u, y), v.conj().transpose())
def solve_lyapunov(a, q):
"""
Solves the continuous Lyapunov equation (AX + XA^H = Q) given the values
of A and Q using the Bartels-Stewart algorithm.
.. versionadded:: 0.11.0
Parameters
----------
a : array_like
A square matrix
q : array_like
Right-hand side square matrix
Returns
-------
x : array_like
Solution to the continuous Lyapunov equation
See Also
--------
solve_sylvester : computes the solution to the Sylvester equation
Notes
-----
Because the continuous Lyapunov equation is just a special form of the
Sylvester equation, this solver relies entirely on solve_sylvester for a
solution.
"""
return solve_sylvester(a, a.conj().transpose(), q)
def solve_discrete_lyapunov(a, q):
"""
Solves the Discrete Lyapunov Equation (A'XA-X=-Q) directly.
.. versionadded:: 0.11.0
Parameters
----------
a : (M, M) array_like
A square matrix
q : (M, M) array_like
Right-hand side square matrix
Returns
-------
x : ndarray
Solution to the continuous Lyapunov equation
Notes
-----
Algorithm is based on a direct analytical solution from:
Hamilton, James D. Time Series Analysis, Princeton: Princeton University
Press, 1994. 265. Print.
http://www.scribd.com/doc/20577138/Hamilton-1994-Time-Series-Analysis
"""
lhs = kron(a, a.conj())
lhs = np.eye(lhs.shape[0]) - lhs
x = solve(lhs, q.flatten())
return np.reshape(x, q.shape)
def solve_continuous_are(a, b, q, r):
"""
Solves the continuous algebraic Riccati equation, or CARE, defined
as (A'X + XA - XBR^-1B'X+Q=0) directly using a Schur decomposition
method.
.. versionadded:: 0.11.0
Parameters
----------
a : (M, M) array_like
Input
b : (M, N) array_like
Input
q : (M, M) array_like
Input
r : (N, N) array_like
Non-singular, square matrix
Returns
-------
x : (M, M) ndarray
Solution to the continuous algebraic Riccati equation
See Also
--------
solve_discrete_are : Solves the discrete algebraic Riccati equation
Notes
-----
Method taken from:
Laub, "A Schur Method for Solving Algebraic Riccati Equations."
U.S. Energy Research and Development Agency under contract
ERDA-E(49-18)-2087.
http://dspace.mit.edu/bitstream/handle/1721.1/1301/R-0859-05666488.pdf
"""
try:
g = inv(r)
except LinAlgError:
raise ValueError('Matrix R in the algebraic Riccati equation solver is ill-conditioned')
g = np.dot(np.dot(b, g), b.conj().transpose())
z11 = a
z12 = -1.0*g
z21 = -1.0*q
z22 = -1.0*a.conj().transpose()
z = np.vstack((np.hstack((z11, z12)), np.hstack((z21, z22))))
# Note: we need to sort the upper left of s to have negative real parts,
# while the lower right is positive real components (Laub, p. 7)
[s, u, sorted] = schur(z, sort='lhp')
(m, n) = u.shape
u11 = u[0:m//2, 0:n//2]
u21 = u[m//2:m, 0:n//2]
u11i = inv(u11)
return np.dot(u21, u11i)
def solve_discrete_are(a, b, q, r):
"""
Solves the disctrete algebraic Riccati equation, or DARE, defined as
(X = A'XA-(A'XB)(R+B'XB)^-1(B'XA)+Q), directly using a Schur decomposition
method.
.. versionadded:: 0.11.0
Parameters
----------
a : (M, M) array_like
Non-singular, square matrix
b : (M, N) array_like
Input
q : (M, M) array_like
Input
r : (N, N) array_like
Non-singular, square matrix
Returns
-------
x : ndarray
Solution to the continuous Lyapunov equation
See Also
--------
solve_continuous_are : Solves the continuous algebraic Riccati equation
Notes
-----
Method taken from:
Laub, "A Schur Method for Solving Algebraic Riccati Equations."
U.S. Energy Research and Development Agency under contract
ERDA-E(49-18)-2087.
http://dspace.mit.edu/bitstream/handle/1721.1/1301/R-0859-05666488.pdf
"""
try:
g = inv(r)
except LinAlgError:
raise ValueError('Matrix R in the algebraic Riccati equation solver is ill-conditioned')
g = np.dot(np.dot(b, g), b.conj().transpose())
try:
ait = inv(a).conj().transpose() # ait is "A inverse transpose"
except LinAlgError:
raise ValueError('Matrix A in the algebraic Riccati equation solver is ill-conditioned')
z11 = a+np.dot(np.dot(g, ait), q)
z12 = -1.0*np.dot(g, ait)
z21 = -1.0*np.dot(ait, q)
z22 = ait
z = np.vstack((np.hstack((z11, z12)), np.hstack((z21, z22))))
# Note: we need to sort the upper left of s to lie within the unit circle,
# while the lower right is outside (Laub, p. 7)
[s, u, sorted] = schur(z, sort='iuc')
(m,n) = u.shape
u11 = u[0:m//2, 0:n//2]
u21 = u[m//2:m, 0:n//2]
u11i = inv(u11)
return np.dot(u21, u11i)
| [
"[email protected]"
]
| |
ed9949ba3d048e798a5ea49c12d9d8ec7b27f360 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_117/ch3_2019_08_26_19_13_09_705496.py | 485c3a0245ff2083c991ece13341974f3cae1c7b | []
| no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 123 | py | import math
def calcula_gaussiana(x,mi,sig):
return 1/(sig*math.sqrt(2*math.pi)) * math.exp ** (-0.5*((x - mi)/sig)**2) | [
"[email protected]"
]
| |
628ddd369f2935bc101b96bbb9d0e02bf2bb1c71 | e6dab5aa1754ff13755a1f74a28a201681ab7e1c | /.parts/lib/django-1.3/django/contrib/admin/forms.py | b4dc31c08ef94ea4b6c425a58432dc01e72e1699 | []
| no_license | ronkagan/Euler_1 | 67679203a9510147320f7c6513eefd391630703e | 022633cc298475c4f3fd0c6e2bde4f4728713995 | refs/heads/master | 2021-01-06T20:45:52.901025 | 2014-09-06T22:34:16 | 2014-09-06T22:34:16 | 23,744,842 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 95 | py | /home/action/.parts/packages/googleappengine/1.9.4/lib/django-1.3/django/contrib/admin/forms.py | [
"[email protected]"
]
| |
a0c96338da0f0f22dc2b8df7de91f0721e962059 | d93bb1975f4a04b6051769fea52a41f4b1920762 | /src/visualpatterns/__main__.py | e7cbaf594e4a1190431b13b6d28b4896ce50a333 | [
"CC-BY-4.0"
]
| permissive | tonyonifo/visualpatterns | 94c5adba71db79dacfb009f01419ea785cb85c56 | d50bc08450e10a3f97f323aa37a6dceefb395270 | refs/heads/main | 2023-07-15T12:15:25.944981 | 2021-09-01T21:29:25 | 2021-09-01T21:29:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 59 | py | import visualpatterns
if __name__ == '__main__':
pass
| [
"[email protected]"
]
| |
a6102dc1cb29b2ad159b1a763872388f8957f2df | 47ca35636ad56f7e3878777fbb85eac9aef32bf7 | /initdjango/new_project/mysite/blog/feeds.py | 862ba8077866af2a1677f8b26f12c7e869d11657 | []
| no_license | vihndsm/Python | 0f6bd7ab7583ff8d078d4abc4fd9053a5f65e5cf | 72291e76fecca0b1a9176f77bd5f042806ec9b27 | refs/heads/master | 2022-12-30T00:43:12.439684 | 2020-10-14T06:35:16 | 2020-10-14T06:35:16 | 287,447,563 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 447 | py | from django.contrib.syndication.views import Feed
from django.template.defaultfilters import truncatewords
from .models import Post
class LatestPostsFeed(Feed):
title = 'My blog'
link = '/blog/'
description = 'New posts of my blog.'
def items(self):
return Post.published.all()[:5]
def item_title(self, item):
return item.title
def item_description(self, item):
return truncatewords(item.body, 30)
| [
"[email protected]"
]
| |
b13ada7f5333de200a2517f5096c67508645daba | 879ac03dd910d152170d6d1e3ff4d5e522b14d79 | /Algorithms/02. Implementation/014. Migratory Birds.py | d67d433a6fd67112bbc60824b76df05b4f1d4592 | []
| no_license | dispe1/Hackerrank-Solutions | ae47920d7761546fd2ef753c1b4f9ae087aaed2a | 67b792dc2cb2933eb1f1565100ea13b0c9783fba | refs/heads/master | 2020-07-11T21:25:39.824667 | 2019-12-10T12:00:12 | 2019-12-10T12:00:12 | 204,646,756 | 4 | 3 | null | null | null | null | UTF-8 | Python | false | false | 602 | py | # Problem: https://www.hackerrank.com/challenges/migratory-birds/problem
# Difficulty : Easy
# Score : 10
import os
import collections
from functools import reduce
def migratoryBirds(arr):
count = collections.Counter(arr)
ar = list(count.items())
ar.sort()
result = reduce(lambda a,b: a if a[1] >= b[1] else b, ar)
return result[0]
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
arr_count = int(input().strip())
arr = list(map(int, input().rstrip().split()))
result = migratoryBirds(arr)
fptr.write(str(result))
fptr.close()
| [
"[email protected]"
]
| |
02cb07a36132dd7d9cf6132a4a9203d970779e44 | b6cdef81a572e02c0cbd795a8fb6bbc74f99d627 | /crypto/urls.py | ac8334a98e1eb790961dc66f3a4ca4105fcf3141 | [
"MIT"
]
| permissive | sodatta/Stocks-Screener | 4afbdd68c1e80dafece50e3e0b967af35dd83c07 | 0b8da91da40b715beaf3a79163b1bdf6ea3be3b9 | refs/heads/master | 2023-07-27T13:14:47.798403 | 2021-05-03T20:04:51 | 2021-05-03T20:04:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 264 | py | from django.urls import path
from crypto.views import list_crypto_currencies, view_crypto_currency
app_name = 'crypto'
urlpatterns = [
path('', list_crypto_currencies, name="home"),
path('<int:pk>', view_crypto_currency, name='view_crypto_currency'),
]
| [
"[email protected]"
]
| |
c8be4c5e699d6d7172c3c72da43067f3395f3401 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/303/usersdata/304/85170/submittedfiles/minha_bib.py | d3fce95fc32ffe08c69a6451d3e38b683df07624 | []
| no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 640 | py | # -*- coding: utf-8 -*-
import time
#COMECE AQUI ABAIXO
def hello_world() :
print('Olá mundo')
return
def hello_world2() :
texto = 'Olá mundo'
return texto
def media(n1,n2) :
m = (n1 + n2)/2.0
return m
def multiplicacao(x,y) :
m = (x*y)
return m
def media(n1,n2) :
m = (n1 + n2)/2.0
return m
def fatorial(n) :
f = 1
for i in range (2, n+1, 1):
f *= i
print ('Estou em %d' %i)
return f
def cronometro(s) :
for i in range (s,-1,-1):
print('Faltam %d segundos' %i)
time.sleep(1)
print('ACABOOU') | [
"[email protected]"
]
| |
770c415c4e2aed55dfbfce8f6afa2f28a83f3cb2 | 726d8518a8c7a38b0db6ba9d4326cec172a6dde6 | /0657. Robot Return to Origin/Solution.py | 81f8ee2aef608a5a9f1aedd420095492bda26e3c | []
| no_license | faterazer/LeetCode | ed01ef62edbcfba60f5e88aad401bd00a48b4489 | d7ba416d22becfa8f2a2ae4eee04c86617cd9332 | refs/heads/master | 2023-08-25T19:14:03.494255 | 2023-08-25T03:34:44 | 2023-08-25T03:34:44 | 128,856,315 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | class Solution:
def judgeCircle_MK1(self, moves: str) -> bool:
x = y = 0
for move in moves:
if move == 'U':
x += 1
elif move == 'D':
x -= 1
elif move == 'R':
y += 1
else:
y -= 1
return x == y == 0
def judgeCircle_MK2(self, moves: str) -> bool:
return (moves.count('U') == moves.count('D')) and (moves.count('R') == moves.count('L'))
| [
"[email protected]"
]
| |
d500eac11046f0eebdbfcef856acfda872d27fab | 83048ab1abb6941ed0b19fb5e5ff4a9d14b48e8c | /fractional_knapsack.py | 1bcfcaa88935e415750cbac10ba752d5fb2affe1 | []
| no_license | harshitalpha/Algorithms | ebad07cc77516ab5c35ae414462d10a38d5ef97e | 2f7dcf4c3bb4390267231c7c96f7e76399c0166e | refs/heads/master | 2021-07-14T17:34:02.546583 | 2020-06-25T06:38:39 | 2020-06-25T06:38:39 | 178,813,562 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,976 | py | '''
KNAPSACK PROBLEM
PARADIGM - GREEDY
Date : 12-Feb-2020
Name - Harshit Singhal
'''
def knapsack(profit, weights, max_weight):
'''
The idea to implement is that take two list of profit and weights
we make list of index = [0,1,2,...]
then we calculate the profit and weight ratio and store in list named ratio
then we will sort index list according to ratio matrix
eg :
index = [0,1,2,3]
ratio = [4,6,1,3]
after sorting
index = [1,0,3,2]
6 is largest and index corrosponding to 6 place first
for this we use following comand
index.sort(key = lambda i:ratio[i], reverse = True)
we will use 'zip' some place in code
use of zip is that in iterable and return iterator
>>> numbers = [1, 2, 3]
>>> letters = ['a', 'b', 'c']
>>> zipped = zip(numbers, letters)
>>> zipped # Holds an iterator object
<zip object at 0x7fa4831153c8>
>>> type(zipped)
<class 'zip'>
>>> list(zipped)
[(1, 'a'), (2, 'b'), (3, 'c')]
then we follow the regular approch of solving greedy problem
'''
print("WEIGHTS GIVEN = {}".format(weights))
print("PROFIT GIVEN = {}".format(profit))
print("MAX WEIGHT CAN CARRY = {}".format(max_weight))
index = list(range(len(weights)))
ratio = [v/w for v,w in zip(profit, weights)]
index.sort(key = lambda i:ratio[i], reverse = True)
ans_weights = [0] * len(weights)
for i in index:
if(weights[i] <= max_weight):
ans_weights[i] = 1
max_weight = max_weight - weights[i]
else:
ans_weights[i] = float(float(max_weight) / float(weights[i]))
break
# Total Profit
final_profit = 0
for i in range(len(weights)):
final_profit = final_profit + (ans_weights[i] * profit[i])
print("WEIGHT OF EACH OBJECT CAN CARRY = {}".format(ans_weights))
print("FINAL PROFIT = {}".format(final_profit))
profit = [10,5,15,7,6,18,3]
weights = [2,3,5,7,1,4,1]
knapsack(profit,weights,15)
| [
"[email protected]"
]
| |
1373c132d750faa29f5c0215c25e9f3c8ae56242 | 330a8979441a9cae2c7af07e6a03080482cfb944 | /src/lib/commands/catch.py | 1a96b88c2fdd44b05f0a3b2c8ebe156ce3df1cb5 | []
| no_license | singlerider/lorenzotherobot | 025770732c3de299437a9d49a4669bcb1b2b7f32 | d0cac10afd19335aad4145c99ffec5413b97a22a | refs/heads/master | 2020-12-26T03:43:35.423157 | 2018-07-21T04:53:57 | 2018-07-21T04:53:57 | 28,792,870 | 26 | 19 | null | 2016-04-17T02:06:14 | 2015-01-05T01:52:04 | Python | UTF-8 | Python | false | false | 1,246 | py | import globals
from src.lib.queries.points_queries import *
from src.lib.queries.pokemon_queries import *
def catch(**kwargs):
channel = kwargs.get("channel", "testchannel").lstrip("#")
if globals.CHANNEL_INFO[channel]['caught'] is False:
pokemon_trainer = kwargs.get("username", "testuser")
# This is here for if the user is brand new. This creates an entry in
# the users table, which userpokemon is dependent on
modify_user_points(pokemon_trainer, 0)
open_position, occupied_positions = find_open_party_positions(
pokemon_trainer)
desired_level = 5
pokemon_id = get_pokemon_id_from_name(
globals.CHANNEL_INFO[channel]['pokemon'])
if pokemon_id is None:
return "Pokemon not found! Check your spelling"
if len(open_position) > 0:
globals.CHANNEL_INFO[channel]['caught'] = True
return insert_user_pokemon(
pokemon_trainer, pokemon_trainer, open_position[0],
pokemon_id, desired_level,
globals.CHANNEL_INFO[channel]['pokemon'],
None, None)
else:
return "No open slots in your party."
else:
return "Too slow!"
| [
"[email protected]"
]
| |
093d87a094d7b4d8250c0340cb4a4b8ade5abff5 | 4c49fdc4608a49dfacea02fba364deb295ef78dc | /backend/texty_friends_22060/urls.py | 8b36763e08c9a1ef7bbc0699bd7e2bbbad8553a9 | []
| no_license | crowdbotics-apps/texty-friends-22060 | 4b9db76082d0b536c24321d4faf6a65c04799b2b | 7cb9bbc9f788844d1fd08172906cac66397f104d | refs/heads/master | 2023-01-08T21:21:17.337089 | 2020-10-28T15:55:31 | 2020-10-28T15:55:31 | 308,067,651 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,949 | py | """texty_friends_22060 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "Texty Friends"
admin.site.site_title = "Texty Friends Admin Portal"
admin.site.index_title = "Texty Friends Admin"
# swagger
api_info = openapi.Info(
title="Texty Friends API",
default_version="v1",
description="API documentation for Texty Friends App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
| [
"[email protected]"
]
| |
d8ca05184091d8e0b194f3a2b89226c67244e6b8 | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /nugyAtjRNQPTHLJNR_15.py | 1fcf64ccafcc2df3b1ed06c5d4dbeb404587d3cc | []
| no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 779 | py | """
Suppose that you add all of the page numbers in a book. If the total is 21,
the book could only have 6 pages because 1 + 2 + 3 + 4 + 5 + 6 = 21. If the
total were 25, that would be impossible because the next number in the series
is 28 (21 + 7).
Create a function that, given the `total` number of pages as an argument,
returns `True` if it is a valid total and `False` if it is not.
Can you devise a solution that is more efficient than simply adding
consecutive integers as I did above?
### Examples
pages_in_book(5) ➞ False
pages_in_book(4005) ➞ True
pages_in_book(9453) ➞ True
### Notes
N/A
"""
def pages_in_book(total):
d = 0
for i in range(1, total+1):
d+=i
if total == d:
return True
else:
return False
| [
"[email protected]"
]
| |
9c91579e1fcf5ada284b7da45dea1444dc10b0e7 | 44bfafa7a3de51e089a470afaa7d37e4e1176777 | /seqspy/Evap_UVMOT_MultiPulse_Image_ZEROCROSSING.py | 9ca09d37b9f68c9699029d2eccfc1c2b090d3609 | []
| no_license | drlightx/apparatus3-seq | b9bc4bd5d9b3a95f8610afff28ee7baea951b641 | 4505a2f484ecea2390482fb4ddf16ac9ca63b02d | refs/heads/master | 2021-01-18T06:41:38.874121 | 2012-03-04T23:03:59 | 2012-03-04T23:03:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,256 | py | """Make sure the report file given by
(L:/data/app3/Savedir)report(L:/data/app3/RunNumber).INI
exists otherwise this code won't compile.
"""
__author__ = "Pedro M Duarte"
import time
t0=time.time()
print "\n----- Evap_UVMOT_Image_ZEROCROSSING.py -----\n"
import sys, math
sys.path.append('L:/software/apparatus3/seq/utilspy')
sys.path.append('L:/software/apparatus3/seq/seqspy')
sys.path.append('L:/software/apparatus3/convert')
import seq, wfm, gen, cnc, odt, andor, highfield_uvmot
report=gen.getreport()
#PARAMETERS
stepsize = float(report['SEQ']['stepsize'])
tof = float(report['ANDOR']['tof'])
exp = float(report['ANDOR']['exp'])
noatoms = float(report['ANDOR']['noatoms'])
#SEQUENCE
s=seq.sequence(stepsize)
s=gen.initial(s)
s.wait(0.0)
s.digichg('hfimg',1)
s.digichg('odt7595',0)
#Do CNC, UVMOT, and field ramps
s, toENDBFIELD = highfield_uvmot.go_to_highfield(s)
# Add evaporation ramp to ODT
free = float(report['EVAP']['free'])
image= float(report['EVAP']['image'])
buffer=10.0 #Time needed to re-latch the trigger for the AOUTS
if free < buffer + toENDBFIELD :
print 'Need at list ' + str(buffer) + 'ms of free evap before evaporation can be triggered'
print 'Currently ramps end at %f , and free is %f' % (toENDBFIELD,free)
exit(1)
s.wait(free)
odtpow, ENDEVAP, cpowend, ipganalog = odt.odt_evap(image)
evap_ss = float(report['EVAP']['evapss'])
bias = float(report['FESHBACH']['bias'])
zcrampdt = float(report['ZEROCROSS']['zcrampdt'])
zcdt = float(report['ZEROCROSS']['zcdt'])
zcbias = float(report['ZEROCROSS']['zcbias'])
bfield = wfm.wave('bfield',bias,evap_ss)
#~ bfield.extend(odtpow.dt()-zcdt-zcrampdt)
#~ bfield.linear(zcbias,zcrampdt)
#~ bfield.extend(odtpow.dt())
bfield.extend(odtpow.dt())
bfield.linear(zcbias,zcrampdt)
bfield.appendhold(zcdt)
odtpow.extend(bfield.dt())
ipganalog.extend(bfield.dt())
#s.analogwfm_add(evap_ss,[odtpow,bfield])
s.analogwfm_add(evap_ss,[odtpow,bfield,ipganalog])
# ENDEVAP should be equal to image
#~ s.wait(image)
s.wait(image+zcdt+zcrampdt)
#RELEASE FROM IR TRAP
s.digichg('odtttl',0)
odttof = float(report['ODT']['odttof'])
s.wait(odttof)
#Shine probe multiple times before taking the final picture
#Test for how far detuned is the phase-contrast imaging
multiN = int(report['ANDOR']['multiN'])
multiDelta = float(report['ANDOR']['multiDelta'])
multidt = float(report['ANDOR']['multidt'])
s = andor.multiProbe(s, 'probe', multiN, multiDelta, multidt)
#TAKE PICTURES
light = 'probe'
#light = 'motswitch'
#light = 'bragg'
trap_on_picture = 1
kinetics = gen.bstr('Kinetics',report)
print '...kinetics = ' + str(kinetics)
if kinetics == True:
s,SERIESDT = andor.KineticSeries4(s,exp,light,noatoms, trap_on_picture)
else:
s,SERIESDT = andor.FKSeries2(s,stepsize,exp,light,noatoms, trap_on_picture)
#After taking a picture sequence returns at time of the last probe strobe
#Wait 30ms to get past the end
s.wait(30.0)
s=gen.shutdown(s)
s.digichg('odtttl',0)
s.digichg('odt7595',0)
s.save('L:/software/apparatus3/seq/seqstxt/expseq.txt')
s.clear_disk()
print '...Compilation = %.2f seconds\n' % (time.time()-t0) | [
"[email protected]"
]
| |
3a6764ab34cea0fccf1ae594504c307f26b60961 | 60e50b82636a7652c0c9d5e4a5fec50d49ac49ae | /src/scraping/models.py | a57819eb7ff44378535b96505f38ccac9cce5fa7 | []
| no_license | SnottyJACK/scraping_service | 5289922529dd8f832c8ecacfc32b66b56276d24d | de887b6b2b9c43e657c56a76038484c53a35afbd | refs/heads/master | 2022-11-08T15:47:18.348350 | 2020-08-10T18:45:04 | 2020-08-10T18:45:04 | 285,887,020 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,113 | py | from django.db import models
from scraping.utils import from_cyrillic_to_eng
# Create your models here.
class City(models.Model):
name = models.CharField(max_length=250,
verbose_name="Название города",
unique=True)
slug = models.CharField(max_length=250, blank=True, unique=True)
class Meta:
verbose_name ='Название города'
verbose_name_plural = 'Названия городов'
def __str__(self):
return self.name
def save(self, *args, **kwargs):
if not self.slug:
self.slug = from_cyrillic_to_eng(str(self.name))
super().save(*args, **kwargs)
class Language(models.Model):
name = models.CharField(max_length=250,
verbose_name="Язык программирования",
unique=True)
slug = models.CharField(max_length=250, blank=True, unique=True)
class Meta:
verbose_name = 'Язык программирования'
verbose_name_plural = 'Языки программирования'
def __str__(self):
return self.name
def save(self, *args, **kwargs):
if not self.slug:
self.slug = from_cyrillic_to_eng(str(self.name))
super().save(*args, **kwargs)
class Vacancy(models.Model):
url = models.URLField(unique=True)
title = models.CharField(max_length=250, verbose_name='Заголовок вакансии')
company = models.CharField(max_length=250, verbose_name='Компания')
description = models.TextField(verbose_name='Описание вакансии')
city = models.ForeignKey('city', on_delete=models.CASCADE, verbose_name='Город')
language = models.ForeignKey('language', on_delete=models.CASCADE, verbose_name='Язык программирования')
timestamp = models.DateField(auto_now_add=True)
class Meta:
verbose_name ='Вакансия'
verbose_name_plural = 'Вакансии'
def __str__(self):
return self.title | [
"[email protected]"
]
| |
542dec1aa09ea22906b5ff9b37e525552243c9d1 | ba2a05f20454bda428f140634bc602699f164fc4 | /00.SSAFY/1.first-semester/01_basic/get_data/project2.py | a55a6a9741d9670b02ec835a95d9dc587d8db3f3 | []
| no_license | snowink1137/TIL | 734da402e99afa52f1af4ef996a6b274b1bcce0b | 9e9c78eb0c892affc88e2d46e143cef98af743fb | refs/heads/master | 2023-01-08T18:26:34.311579 | 2021-11-14T11:04:22 | 2021-11-14T11:04:22 | 162,255,934 | 0 | 0 | null | 2023-01-07T11:09:09 | 2018-12-18T08:32:44 | Jupyter Notebook | UTF-8 | Python | false | false | 3,098 | py | import requests
import datetime
import copy
import csv
import os
KOBIS_KEY = os.getenv('KOBIS_KEY')
# csv 데이터 읽고 영화 code 리스트 만들기
boxoffice = open('boxoffice.csv', 'r', encoding='utf-8')
reader = csv.reader(boxoffice)
movie_code_list = []
for line in reader:
movie_code_list.append(line[0])
del movie_code_list[0]
# 영화진흥위원회 데이터 수집
## url list 만들기
key = KOBIS_KEY
weekGb = '0'
url_list = []
for code in movie_code_list:
url = 'http://www.kobis.or.kr/kobisopenapi/webservice/rest/movie/searchMovieInfo.json?' + 'key=' + key + '&movieCd=' + code
url_list.append(url)
## 상세 정보 데이터 수집
movie_data = ['영화 대표코드', '영화명(국문)', '영화명(영문)', '영화명(원문)', '개봉연도', '상영시간', '장르', '감독명', '배우1', '배우2', '배우3']
for url in url_list:
response = requests.get(url)
response_json = response.json()
movie_data.append(response_json['movieInfoResult']['movieInfo']['movieCd'])
movie_data.append(response_json['movieInfoResult']['movieInfo']['movieNm'])
movie_data.append(response_json['movieInfoResult']['movieInfo']['movieNmEn'])
movie_data.append(response_json['movieInfoResult']['movieInfo']['movieNmOg'])
movie_data.append(response_json['movieInfoResult']['movieInfo']['prdtYear'])
movie_data.append(response_json['movieInfoResult']['movieInfo']['showTm'])
movie_data.append(response_json['movieInfoResult']['movieInfo']['genres'][0]['genreNm'])
movie_data.append(response_json['movieInfoResult']['movieInfo']['directors'][0]['peopleNm'])
if len(response_json['movieInfoResult']['movieInfo']['actors']) > 2:
movie_data.append(response_json['movieInfoResult']['movieInfo']['actors'][0]['peopleNm'])
movie_data.append(response_json['movieInfoResult']['movieInfo']['actors'][1]['peopleNm'])
movie_data.append(response_json['movieInfoResult']['movieInfo']['actors'][2]['peopleNm'])
elif len(response_json['movieInfoResult']['movieInfo']['actors']) == 2:
movie_data.append(response_json['movieInfoResult']['movieInfo']['actors'][0]['peopleNm'])
movie_data.append(response_json['movieInfoResult']['movieInfo']['actors'][1]['peopleNm'])
movie_data.append('')
elif len(response_json['movieInfoResult']['movieInfo']['actors']) == 1:
movie_data.append(response_json['movieInfoResult']['movieInfo']['actors'][0]['peopleNm'])
movie_data.append('')
movie_data.append('')
elif len(response_json['movieInfoResult']['movieInfo']['actors']) == 0:
movie_data.append('')
movie_data.append('')
movie_data.append('')
f = open('movie.csv', 'a+', encoding='utf-8', newline='')
for i in range(44):
writer = csv.writer(f)
writer.writerow(
[movie_data[11*i + 0], movie_data[11*i + 1], movie_data[11*i + 2], movie_data[11*i + 3], movie_data[11*i + 4], movie_data[11*i + 5], movie_data[11*i + 6], movie_data[11*i + 7], movie_data[11*i + 8], movie_data[11*i + 9], movie_data[11*i + 10]]
)
f.close()
| [
"[email protected]"
]
| |
1bb0502f3504e1ee2481c92a2c7b19b89e50a5d0 | 7fbf91c595f3adb67e29ab879a0b215581d260bf | /scrapy爬虫/京东-m/JDSpider/items.py | c4dd966451147a80e44f3bfaf4b1609f9b3c996d | []
| no_license | Randyedu/python | 69947b3836e62d0081d92591ae2acd9a54eadb9a | 5f9e7bec295ae05eadde0f661e7039c2bd08f725 | refs/heads/master | 2021-04-26T22:20:22.555128 | 2018-03-02T07:01:27 | 2018-03-02T07:01:27 | 124,074,741 | 1 | 0 | null | 2018-03-06T12:23:42 | 2018-03-06T12:23:42 | null | UTF-8 | Python | false | false | 3,741 | py | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
from scrapy import Item, Field
class CategoriesItem(Item):
name = Field() #分类名称
url = Field() #分类url
_id = Field() #分类id
index = Field() #分类的index
class ProductsItem(Item):
name = Field() #产品名称
url = Field() #产品url
_id = Field() #产品id
category = Field() #产品分类
reallyPrice = Field() #产品价格
originalPrice = Field() #原价
description = Field() #产品描述
shopId = Field() #shop id
venderId = Field() #vender id
commentCount = Field() #评价总数
goodComment = Field() #好评数
generalComment = Field() #中评数
poolComment = Field() #差评数
favourableDesc1 = Field() #优惠描述1
favourableDesc2 = Field() #优惠描述2
class ShopItem(Item):
_id = Field() #店铺名称
name = Field() #店铺名称
url1 = Field() #店铺url1
url2 = Field() #店铺url2
shopId = Field() #shop id
venderId = Field() #vender id
class CommentItem(Item):
_id = Field()
productId = Field() #同ProductsItem的id相同
guid = Field()
content = Field()
creationTime = Field()
isTop = Field()
referenceId = Field()
referenceName = Field()
referenceType = Field()
referenceTypeId = Field()
firstCategory = Field()
secondCategory = Field()
thirdCategory = Field()
replyCount = Field()
score = Field()
status = Field()
title = Field()
usefulVoteCount = Field()
uselessVoteCount = Field()
userImage = Field()
userImageUrl = Field()
userLevelId = Field()
userProvince = Field()
viewCount = Field()
orderId = Field()
isReplyGrade = Field()
nickname = Field()
userClient = Field()
mergeOrderStatus = Field()
discussionId = Field()
productColor = Field()
productSize = Field()
imageCount = Field()
integral = Field()
userImgFlag = Field()
anonymousFlag = Field()
userLevelName = Field()
plusAvailable = Field()
recommend = Field()
userLevelColor = Field()
userClientShow = Field()
isMobile = Field()
days = Field()
afterDays = Field()
class CommentImageItem(Item):
_id = Field()
associateId = Field() #和CommentItem的discussionId相同
productId = Field() #不是ProductsItem的id,这个值为0
imgUrl = Field()
available = Field()
pin = Field()
dealt = Field()
imgTitle = Field()
isMain = Field()
class CommentSummaryItem(Item):
_id = Field()
goodRateShow = Field()
poorRateShow = Field()
poorCountStr = Field()
averageScore = Field()
generalCountStr = Field()
showCount = Field()
showCountStr = Field()
goodCount = Field()
generalRate = Field()
generalCount = Field()
skuId = Field()
goodCountStr = Field()
poorRate = Field()
afterCount = Field()
goodRateStyle = Field()
poorCount = Field()
skuIds = Field()
poorRateStyle = Field()
generalRateStyle = Field()
commentCountStr = Field()
commentCount = Field()
productId = Field() #同ProductsItem的id相同
afterCountStr = Field()
goodRate = Field()
generalRateShow = Field()
jwotestProduct = Field()
maxPage = Field()
score = Field()
soType = Field()
imageListCount = Field()
class HotCommentTagItem(Item):
_id = Field()
name = Field()
status = Field()
rid = Field()
productId = Field()
count = Field()
created = Field()
modified = Field()
type = Field()
canBeFiltered = Field()
| [
"[email protected]"
]
| |
1d3d203359cf83874e5634e8a51fa648f0e3103b | 72d27c37e605ea43f8052e04d574491cbfd92f11 | /umbra/broker/plugins/fabric.py | 2adfc073262fa3ee418c121f668bb51ed9ffe049 | [
"Apache-2.0",
"CC-BY-4.0",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | banoris/umbra-raph | b1044a28cc1dfd5f3570ecc0c7539fbea21ef9f5 | 78ee22b5f73af70d83b8e6f671853a96274c0692 | refs/heads/master | 2022-12-02T10:00:43.640367 | 2020-07-11T12:57:00 | 2020-07-11T12:57:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,343 | py | import os
import time
import asyncio
import logging
from hfc.fabric import Client
from hfc.fabric_ca.caservice import CAClient, CAService
logger = logging.getLogger(__name__)
class FabricEvents:
def __init__(self):
self._async_loop = asyncio.get_event_loop()
self._configtx_dir = None
self._chaincode_dir = None
self._config_sdk = None
self._cli = None
self._topo = None
def config(self, topology, configsdk, chaincode, configtx):
self._topo = topology
self._configtx_dir = configtx
self._chaincode_dir = chaincode
self._config_sdk = configsdk
if all([topology, configsdk, chaincode, configtx]):
logger.info("FabricEvents configs OK")
logger.info("configsdk %s, chaincode %s, configtx %s", configsdk, chaincode, configtx)
self.config_gopath()
self.build_cli()
return True
else:
logger.info("FabricEvents configs FAILED")
return False
def config_gopath(self):
gopath = os.path.normpath(os.path.join(
self._chaincode_dir
))
os.environ['GOPATH'] = os.path.abspath(gopath)
def build_cli(self):
pathlist = ["$HOME/hl/bin",] # TODO set dynamic config path for configtxgen bin
os.environ["PATH"] += os.pathsep + os.pathsep.join(pathlist)
self._cli = Client(net_profile=self._config_sdk)
logger.debug("Fabric Orgs %s", self._cli.organizations)
logger.debug("Fabric Peers %s", self._cli.peers)
logger.debug("Fabric Orderers %s", self._cli.orderers)
logger.debug("Fabric CAs %s", self._cli.CAs)
logger.info("Fabric Client SDK CLI Started")
def schedule(self, events):
for _id,event in events.items():
event_category = event.get("category")
if event_category == "fabric":
when = event.get("when")
logger.info("Calling at %s event %s", when, event.get("params").get("action"))
self.call_at(when, event.get("params"))
def sched_time(self, when):
if type(when) is float:
if when >= time.time():
rel_when = when - time.time()
else:
rel_when = 0
elif type(when) is str:
if when == "now":
rel_when = 0
else:
rel_when = float(when)
else:
rel_when = 0
return rel_when
def call_at(self, when, event):
rel_when = self.sched_time(when)
self._async_loop.call_later(
max(0, rel_when), self.call, event)
def run_task(self, task):
try:
self._async_loop.create_task(task)
except asyncio.CancelledError:
pass
except Exception:
logger.error("Exception in Fabric Event Task", exc_info=True)
def call(self, event):
task = None
action = event.get("action")
if action == "info_network":
task = self.event_info_network(event)
if action == "create_channel":
task = self.event_create_channel(event)
if action == "join_channel":
task = self.event_join_channel(event)
if action == "info_channels":
task = self.event_info_channels(event)
if action == "info_channel":
task = self.event_info_channel(event)
if action == "info_channel_config":
task = self.event_info_channel_config(event)
if action == "info_channel_chaincodes":
task = self.event_info_channel_chaincodes(event)
if action == "chaincode_install":
task = self.event_chaincode_install(event)
if action == "chaincode_instantiate":
task = self.event_chaincode_instantiate(event)
if action == "chaincode_invoke":
task = self.event_chaincode_invoke(event)
if action == "chaincode_query":
task = self.event_chaincode_query(event)
if task:
self.run_task(task)
else:
logger.info("Unkown task for event %s", event)
async def event_create_channel(self, ev):
org_name = ev.get("org")
user_name = ev.get("user")
orderer_name = ev.get("orderer")
channel = ev.get("channel")
profile = ev.get("profile")
orderer = self._topo.get("orderers").get(orderer_name)
orderer_fqdn = orderer.get("orderer_fqdn")
org = self._topo.get("orgs").get(org_name)
org_fqdn = org.get("org_fqdn")
if org_fqdn and orderer_fqdn:
org_user = self._cli.get_user(org_name=org_fqdn, name=user_name)
response = await self._cli.channel_create(
orderer=orderer_fqdn,
channel_name=channel,
requestor=org_user,
config_yaml=self._configtx_dir,
channel_profile=profile
)
logger.info("Create channel response %s", response)
return response
logger.info("unknown orderer %s and org %s", orderer_name, org_name)
return None
async def event_join_channel(self, ev):
org_name = ev.get("org")
user_name = ev.get("user")
orderer_name = ev.get("orderer")
channel = ev.get("channel")
peers_names = ev.get("peers")
orderer = self._topo.get("orderers").get(orderer_name)
orderer_fqdn = orderer.get("orderer_fqdn")
org = self._topo.get("orgs").get(org_name)
org_fqdn = org.get("org_fqdn")
if org_fqdn and orderer_fqdn:
org_user = self._cli.get_user(org_name=org_fqdn, name=user_name)
peers = org.get("peers")
peers_fqdn = [ peer.get("peer_fqdn") for peer in peers.values() if peer.get("name") in peers_names ]
response = await self._cli.channel_join(
requestor=org_user,
channel_name=channel,
peers=peers_fqdn,
orderer=orderer_fqdn
)
logger.info("Join channel response %s", response)
return response
logger.info("unknown orderer %s and org %s", orderer_name, org_name)
return None
async def event_info_channel(self, ev):
org_name = ev.get("org")
user_name = ev.get("user")
channel = ev.get("channel")
peers_names = ev.get("peers")
org = self._topo.get("orgs").get(org_name)
org_fqdn = org.get("org_fqdn")
peers = org.get("peers")
peers_fqdn = [ peer.get("peer_fqdn") for peer in peers.values() if peer.get("name") in peers_names ]
if org_fqdn and peers_fqdn:
org_user = self._cli.get_user(org_name=org_fqdn, name=user_name)
response = await self._cli.query_info(
requestor=org_user,
channel_name=channel,
peers=peers_fqdn,
decode=True
)
logger.info("Info channel response %s", response)
return response
logger.info("unknown org %s and/org peers %s", org_name, peers_names)
return None
async def event_info_channels(self, ev):
org_name = ev.get("org")
user_name = ev.get("user")
peers_names = ev.get("peers")
org = self._topo.get("orgs").get(org_name)
org_fqdn = org.get("org_fqdn")
peers = org.get("peers")
peers_fqdn = [ peer.get("peer_fqdn") for peer in peers.values() if peer.get("name") in peers_names ]
if org_fqdn and peers_fqdn:
org_user = self._cli.get_user(org_name=org_fqdn, name=user_name)
response = await self._cli.query_channels(
requestor=org_user,
peers=peers_fqdn,
decode=True
)
logger.info("Info channels response %s", response)
return response
logger.info("unknown org %s and/org peers %s", org_name, peers_names)
return None
async def event_info_channel_config(self, ev):
org_name = ev.get("org")
user_name = ev.get("user")
channel = ev.get("channel")
peers_names = ev.get("peers")
org = self._topo.get("orgs").get(org_name)
org_fqdn = org.get("org_fqdn")
peers = org.get("peers")
peers_fqdn = [ peer.get("peer_fqdn") for peer in peers.values() if peer.get("name") in peers_names ]
if org_fqdn and peers_fqdn:
org_user = self._cli.get_user(org_name=org_fqdn, name=user_name)
response = await self._cli.get_channel_config(
requestor=org_user,
channel_name=channel,
peers=peers_fqdn,
decode=True
)
logger.info("Info channel config response %s", response)
return response
logger.info("unknown org %s and/org peers %s", org_name, peers_names)
return None
async def event_info_channel_chaincodes(self, ev):
org_name = ev.get("org")
user_name = ev.get("user")
peers_names = ev.get("peers")
org = self._topo.get("orgs").get(org_name)
org_fqdn = org.get("org_fqdn")
peers = org.get("peers")
peers_fqdn = [ peer.get("peer_fqdn") for peer in peers.values() if peer.get("name") in peers_names ]
if org_fqdn and peers_fqdn:
org_user = self._cli.get_user(org_name=org_fqdn, name=user_name)
response = await self._cli.query_installed_chaincodes(
requestor=org_user,
peers=peers_fqdn,
decode=True
)
logger.info("Info channel chaincodes response %s", response)
return response
logger.info("unknown org %s and/org peers %s", org_name, peers_names)
return None
async def event_info_network(self, ev):
orderer_name = ev.get("orderer")
orderer = self._topo.get("orderers").get(orderer_name)
orderer_fqdn = orderer.get("orderer_fqdn")
if orderer_fqdn:
response = self._cli.get_net_info(
'organizations',
orderer_fqdn,
'mspid'
)
logger.info("Info network response %s", response)
return response
logger.info("unknown orderer %s", orderer_name)
return None
async def event_chaincode_install(self, ev):
org_name = ev.get("org")
user_name = ev.get("user")
peers_names = ev.get("peers")
chaincode_name = ev.get("chaincode_name")
chaincode_path = ev.get("chaincode_path")
chaincode_version = ev.get("chaincode_version")
org = self._topo.get("orgs").get(org_name)
org_fqdn = org.get("org_fqdn")
peers = org.get("peers")
peers_fqdn = [ peer.get("peer_fqdn") for peer in peers.values() if peer.get("name") in peers_names ]
if org_fqdn and peers_fqdn:
org_user = self._cli.get_user(org_name=org_fqdn, name=user_name)
response = await self._cli.chaincode_install(
requestor=org_user,
peers=peers_fqdn,
cc_path=chaincode_path,
cc_name=chaincode_name,
cc_version=chaincode_version
)
logger.info("Chaincode install response %s", response)
return response
logger.info("unknown org %s and/or peers %s", org_name, peers_names)
return None
async def event_chaincode_instantiate(self, ev):
org_name = ev.get("org")
user_name = ev.get("user")
peers_names = ev.get("peers")
channel = ev.get("channel")
chaincode_args = ev.get("chaincode_args")
chaincode_name = ev.get("chaincode_name")
chaincode_version = ev.get("chaincode_version")
org = self._topo.get("orgs").get(org_name)
org_fqdn = org.get("org_fqdn")
peers = org.get("peers")
peers_fqdn = [ peer.get("peer_fqdn") for peer in peers.values() if peer.get("name") in peers_names ]
if org_fqdn and peers_fqdn:
org_user = self._cli.get_user(org_name=org_fqdn, name=user_name)
response = await self._cli.chaincode_instantiate(
requestor=org_user,
channel_name=channel,
peers=peers_fqdn,
args=chaincode_args,
cc_name=chaincode_name,
cc_version=chaincode_version
)
logger.info("Chaincode instantiate response %s", response)
return response
logger.info("unknown org %s and/or peers %s", org_name, peers_names)
return None
async def event_chaincode_invoke(self, ev):
org_name = ev.get("org")
user_name = ev.get("user")
peers_names = ev.get("peers")
channel = ev.get("channel")
chaincode_args = ev.get("chaincode_args")
chaincode_name = ev.get("chaincode_name")
org = self._topo.get("orgs").get(org_name)
org_fqdn = org.get("org_fqdn")
peers = org.get("peers")
peers_fqdn = [ peer.get("peer_fqdn") for peer in peers.values() if peer.get("name") in peers_names ]
if org_fqdn and peers_fqdn:
org_user = self._cli.get_user(org_name=org_fqdn, name=user_name)
response = await self._cli.chaincode_invoke(
requestor=org_user,
channel_name=channel,
peers=peers_fqdn,
args=chaincode_args,
cc_name=chaincode_name
)
logger.info("Chaincode invoke response %s", response)
return response
logger.info("unknown org %s and/or peers %s", org_name, peers_names)
return None
async def event_chaincode_query(self, ev):
org_name = ev.get("org")
user_name = ev.get("user")
peers_names = ev.get("peers")
channel = ev.get("channel")
chaincode_args = ev.get("chaincode_args")
chaincode_name = ev.get("chaincode_name")
org = self._topo.get("orgs").get(org_name)
org_fqdn = org.get("org_fqdn")
peers = org.get("peers")
peers_fqdn = [ peer.get("peer_fqdn") for peer in peers.values() if peer.get("name") in peers_names ]
if org_fqdn and peers_fqdn:
org_user = self._cli.get_user(org_name=org_fqdn, name=user_name)
response = await self._cli.chaincode_query(
requestor=org_user,
channel_name=channel,
peers=peers_fqdn,
args=chaincode_args,
cc_name=chaincode_name
)
logger.info("Chaincode query response %s", response)
return response
logger.info("unknown org %s and/or peers %s", org_name, peers_names)
return None | [
"[email protected]"
]
| |
8324c9345cb516d16e68a611d7bb28b85ab8ec2d | b7da549f454a48132c0d3458474c07a37eae28a2 | /h5pyd/_hl/table.py | 90e34ae5bdf729a6de5f9c7e39e16199be8d28ef | [
"LicenseRef-scancode-warranty-disclaimer"
]
| no_license | qiuwei/h5pyd | 10407ac1751ad6f4f68a9e8c2fcbf40f3d1aba96 | 6c4f0202adbb857fc265456b6fd1f5d572c1a685 | refs/heads/master | 2021-01-01T02:09:05.891492 | 2020-02-06T20:59:39 | 2020-02-06T20:59:39 | 239,133,525 | 1 | 0 | NOASSERTION | 2020-02-08T13:05:39 | 2020-02-08T13:05:38 | null | UTF-8 | Python | false | false | 14,530 | py | ##############################################################################
# Copyright by The HDF Group. #
# All rights reserved. #
# #
# This file is part of H5Serv (HDF5 REST Server) Service, Libraries and #
# Utilities. The full HDF5 REST Server copyright notice, including #
# terms governing use, modification, and redistribution, is contained in #
# the file COPYING, which can be found at the root of the source code #
# distribution tree. If you do not have access to this file, you may #
# request a copy from [email protected]. #
##############################################################################
from __future__ import absolute_import
import numpy
from .base import _decode
from .dataset import Dataset
from .objectid import DatasetID
from . import selections as sel
from .h5type import Reference
from .h5type import check_dtype
class Cursor():
"""
Cursor for retreiving rows from a table
"""
def __init__(self, table, query=None, start=None, stop=None):
self._table = table
self._query = query
if start is None:
self._start = 0
else:
self._start = start
if stop is None:
self._stop = table.nrows
else:
self._stop = stop
def __iter__(self):
""" Iterate over the first axis. TypeError if scalar.
BEWARE: Modifications to the yielded data are *NOT* written to file.
"""
nrows = self._table.nrows
# to reduce round trips, grab BUFFER_SIZE items at a time
# TBD: set buffersize based on size of each row
BUFFER_SIZE = 10000
arr = None
query_complete = False
for indx in range(self._start, self._stop):
if indx%BUFFER_SIZE == 0:
# grab another buffer
read_count = BUFFER_SIZE
if nrows - indx < read_count:
read_count = nrows - indx
if self._query is None:
arr = self._table[indx:read_count+indx]
else:
# call table to return query result
if query_complete:
arr = None # nothing more to fetch
else:
arr = self._table.read_where(self._query, start=indx, limit=read_count)
if arr is not None and arr.shape[0] < read_count:
query_complete = True # we've gotten all the rows
if arr is not None and indx%BUFFER_SIZE < arr.shape[0]:
yield arr[indx%BUFFER_SIZE]
class Table(Dataset):
"""
Represents an HDF5 dataset
"""
def __init__(self, bind):
""" Create a new Table object by binding to a low-level DatasetID.
"""
if not isinstance(bind, DatasetID):
raise ValueError("%s is not a DatasetID" % bind)
Dataset.__init__(self, bind)
if len(self._dtype) < 1:
raise ValueError("Table type must be compound")
if len(self._shape) > 1:
raise ValueError("Table must be one-dimensional")
@property
def colnames(self):
"""Numpy-style attribute giving the number of dimensions"""
names = []
for field in self._dtype.descr:
# each element should be a tuple ('fieldname', dt)
names.append(field[0])
return names
@property
def nrows(self):
return self._shape[0]
def read(self, start=None, stop=None, step=None, field=None, out=None):
if start is None:
start = 0
if stop is None:
stop = self._shape[0]
if step is None:
step = 1
arr = self[start:stop:step]
if field is not None:
#TBD - read just the field once the service supports it
tmp = arr[field]
arr = tmp
if out is not None:
# TBD - read direct
numpy.copyto(out, arr)
else:
return arr
def read_where(self, condition, condvars=None, field=None, start=None, stop=None, step=None, limit=None):
"""Read rows from table using pytable-style condition
"""
names = () # todo
def readtime_dtype(basetype, names):
""" Make a NumPy dtype appropriate for reading """
if len(names) == 0: # Not compound, or we want all fields
return basetype
if basetype.names is None: # Names provided, but not compound
raise ValueError("Field names only allowed for compound types")
for name in names: # Check all names are legal
if not name in basetype.names:
raise ValueError("Field %s does not appear in this type." % name)
return numpy.dtype([(name, basetype.fields[name][0]) for name in names])
new_dtype = getattr(self._local, 'astype', None)
if new_dtype is not None:
new_dtype = readtime_dtype(new_dtype, names)
else:
# This is necessary because in the case of array types, NumPy
# discards the array information at the top level.
new_dtype = readtime_dtype(self.dtype, names)
# todo - will need the following once we have binary transfers
# mtype = h5t.py_create(new_dtype)
mtype = new_dtype
# Perform the dataspace selection
if start or stop:
if not start:
start = 0
if not stop:
stop = self._shape[0]
else:
start = 0
stop = self._shape[0]
selection_arg = slice(start, stop)
selection = sel.select(self, selection_arg)
if selection.nselect == 0:
return numpy.ndarray(selection.mshape, dtype=new_dtype)
# setup for pagination in case we can't read everthing in one go
data = []
cursor = start
page_size = stop - start
while True:
# Perfom the actual read
req = "/datasets/" + self.id.uuid + "/value"
params = {}
params["query"] = condition
self.log.info("req - cursor: {} page_size: {}".format(cursor, page_size))
end_row = cursor+page_size
if end_row > stop:
end_row = stop
selection_arg = slice(cursor, end_row)
selection = sel.select(self, selection_arg)
sel_param = selection.getQueryParam()
self.log.debug("query param: {}".format(sel_param))
if sel_param:
params["select"] = sel_param
try:
self.log.debug("params: {}".format(params))
rsp = self.GET(req, params=params)
values = rsp["value"]
count = len(values)
self.log.info("got {} rows".format(count))
if count > 0:
if limit is None or count + len(data) <= limit:
# add in all the data
data.extend(values)
else:
# we've hit the limit for number of rows to return
add_count = limit - len(data)
self.log.debug("adding {} from {} to rrows".format(add_count, count))
data.extend(values[:add_count])
# advance to next page
cursor += page_size
except IOError as ioe:
if ioe.errno == 413 and page_size > 1024:
# too large a query target, try reducing the page size
# if it is not already relatively small (1024)
page_size //= 2
page_size += 1 # bump up to avoid tiny pages in the last iteration
self.log.info("Got 413, reducing page_size to: {}".format(page_size))
else:
# otherwise, just raise the exception
self.log.info("Unexpected exception: {}".format(ioe.errno))
raise ioe
if cursor >= stop or limit and len(data) == limit:
self.log.info("completed iteration, returning: {} rows".format(len(data)))
break
# need some special conversion for compound types --
# each element must be a tuple, but the JSON decoder
# gives us a list instead.
mshape = (len(data),)
if len(mtype) > 1 and type(data) in (list, tuple):
converted_data = []
for i in range(len(data)):
converted_data.append(self.toTuple(data[i]))
data = converted_data
arr = numpy.empty(mshape, dtype=mtype)
arr[...] = data
# Patch up the output for NumPy
if len(names) == 1:
arr = arr[names[0]] # Single-field recarray convention
if arr.shape == ():
arr = numpy.asscalar(arr)
return arr
def update_where(self, condition, value, start=None, stop=None, step=None, limit=None):
"""Modify rows in table using pytable-style condition
"""
if not isinstance(value, dict):
raise ValueError("expected value to be a dict")
# Perform the dataspace selection
if start or stop:
if not start:
start = 0
if not stop:
stop = self._shape[0]
else:
start = 0
stop = self._shape[0]
selection_arg = slice(start, stop)
selection = sel.select(self, selection_arg)
sel_param = selection.getQueryParam()
params = {}
params["query"] = condition
if limit:
params["Limit"] = limit
self.log.debug("query param: {}".format(sel_param))
if sel_param:
params["select"] = sel_param
req = "/datasets/" + self.id.uuid + "/value"
rsp = self.PUT(req, body=value, format="json", params=params)
indices = None
arr = None
if "index" in rsp:
indices = rsp["index"]
if indices:
arr = numpy.array(indices)
return arr
def create_cursor(self, condition=None, start=None, stop=None):
"""Return a cursor for iteration
"""
return Cursor(self, query=condition, start=start, stop=stop)
def append(self, rows):
""" Append rows to end of table
"""
self.log.info("Table append")
if not self.id.uuid.startswith("d-"):
# Append ops only work with HSDS
raise ValueError("append not supported")
if self._item_size != "H5T_VARIABLE":
use_base64 = True # may need to set this to false below for some types
else:
use_base64 = False # never use for variable length types
self.log.debug("Using JSON since type is variable length")
val = rows # for compatibility with dataset code...
# get the val dtype if we're passed a numpy array
val_dtype = None
try:
val_dtype = val.dtype
except AttributeError:
pass # not a numpy object, just leave dtype as None
if isinstance(val, Reference):
# h5pyd References are just strings
val = val.tolist()
# Generally we try to avoid converting the arrays on the Python
# side. However, for compound literals this is unavoidable.
# For h5pyd, do extra check and convert type on client side for efficiency
vlen = check_dtype(vlen=self.dtype)
if vlen is not None and vlen not in (bytes, str):
self.log.debug("converting ndarray for vlen data")
try:
val = numpy.asarray(val, dtype=vlen)
except ValueError:
try:
val = numpy.array([numpy.array(x, dtype=vlen)
for x in val], dtype=self.dtype)
except ValueError:
pass
if vlen == val_dtype:
if val.ndim > 1:
tmp = numpy.empty(shape=val.shape[:-1], dtype=object)
tmp.ravel()[:] = [i for i in val.reshape(
(numpy.product(val.shape[:-1]), val.shape[-1]))]
else:
tmp = numpy.array([None], dtype=object)
tmp[0] = val
val = tmp
elif isinstance(val, numpy.ndarray):
# convert array if needed
# TBD - need to handle cases where the type shape is different
self.log.debug("got numpy array")
if val.dtype != self.dtype and val.dtype.shape == self.dtype.shape:
self.log.info("converting {} to {}".format(val.dtype, self.dtype))
# convert array
tmp = numpy.empty(val.shape, dtype=self.dtype)
tmp[...] = val[...]
val = tmp
else:
val = numpy.asarray(val, order='C', dtype=self.dtype)
self.log.debug("rows shape: {}".format(val.shape))
self.log.debug("data dtype: {}".format(val.dtype))
if len(val.shape) != 1:
raise ValueError("rows must be one-dimensional")
numrows = val.shape[0]
req = "/datasets/" + self.id.uuid + "/value"
params = {}
body = {}
format = "json"
if use_base64:
# server is HSDS, use binary data, use param values for selection
format = "binary"
body = val.tobytes()
self.log.debug("writing binary data, {} bytes".format(len(body)))
params["append"] = numrows
else:
if type(val) is not list:
val = val.tolist()
val = _decode(val)
self.log.debug("writing json data, {} elements".format(len(val)))
self.log.debug("data: {}".format(val))
body['value'] = val
body['append'] = numrows
self.PUT(req, body=body, format=format, params=params)
# if we get here, the request was successful, adjust the shape
total_rows = self._shape[0] + numrows
self._shape = (total_rows,)
| [
"[email protected]"
]
| |
6829ee431be6466d0b34b15755cf9dcbdc3bf445 | 552d470963e23741762f2c18143557596f9f433f | /catalog/admin.py | cbee91a1712e5fd628da2d19eeac1e506ec0f707 | []
| no_license | WilliamPerezBeltran/django_practice1 | ccab7dac5593b34cebd09b1b6a9576cc9030c228 | e5735e1009faad448c49f61c42afb2fc0db9e48e | refs/heads/master | 2020-04-08T13:57:22.004619 | 2018-11-28T00:30:58 | 2018-11-28T00:30:58 | 159,415,584 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,331 | py | from django.contrib import admin
from catalog.models import Author, Genre, Book, BookInstance
# admin.site.register(Book)
#admin.site.register(Author)
admin.site.register(Genre)
# admin.site.register(BookInstance)
# Define the admin class
class AuthorAdmin(admin.ModelAdmin):
list_display = ('last_name', 'first_name', 'date_of_birth', 'date_of_death')
fields = ['first_name', 'last_name', ('date_of_birth', 'date_of_death')]
# Register the admin class with the associated model
admin.site.register(Author, AuthorAdmin)
class BooksInstanceInline(admin.TabularInline):
model = BookInstance
# @register decorator to register the models (this does exactly
# the same thing as the admin.site.register() syntax)
# @register =admin.site.register()
# Register the Admin classes for Book using the decorator
@admin.register(Book)
class BookAdmin(admin.ModelAdmin):
list_display = ('title', 'author', 'display_genre')
inlines = [BooksInstanceInline]
# Register the Admin classes for BookInstance using the decorator
@admin.register(BookInstance)
class BookInstanceAdmin(admin.ModelAdmin):
list_filter = ('status', 'due_back')
fieldsets = (
(None, {
'fields': ('book', 'imprint', 'id')
}),
('Availability', {
'fields': ('status', 'due_back')
}),
) | [
"[email protected]"
]
| |
fc71831435c912edb73f754731a71ea0dac01637 | 3be42b83a15d022f5863c96ec26e21bac0f7c27e | /tensorflow_probability/python/distributions/power_spherical.py | 9b7c1549caa649d697ef1dfc042f33654b86727c | [
"Apache-2.0"
]
| permissive | ogrisel/probability | 846f5c13cddee5cf167b215e651b7479003f15d2 | 8f67456798615f9bf60ced2ce6db5d3dba3515fe | refs/heads/master | 2022-11-09T10:53:23.000918 | 2020-07-01T23:16:03 | 2020-07-01T23:17:25 | 276,580,359 | 2 | 1 | Apache-2.0 | 2020-07-02T07:37:58 | 2020-07-02T07:37:57 | null | UTF-8 | Python | false | false | 15,894 | py | # Copyright 2020 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""The Power Spherical distribution over vectors on the unit hypersphere."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v2 as tf
from tensorflow_probability.python import math as tfp_math
from tensorflow_probability.python import random as tfp_random
from tensorflow_probability.python.bijectors import chain as chain_bijector
from tensorflow_probability.python.bijectors import invert as invert_bijector
from tensorflow_probability.python.bijectors import softmax_centered as softmax_centered_bijector
from tensorflow_probability.python.bijectors import square as square_bijector
from tensorflow_probability.python.distributions import beta as beta_lib
from tensorflow_probability.python.distributions import distribution
from tensorflow_probability.python.distributions import kullback_leibler
from tensorflow_probability.python.distributions import spherical_uniform
from tensorflow_probability.python.internal import assert_util
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.internal import reparameterization
from tensorflow_probability.python.internal import samplers
from tensorflow_probability.python.internal import tensor_util
from tensorflow_probability.python.internal import tensorshape_util
__all__ = ['PowerSpherical']
def _uniform_unit_norm(dimension, shape, dtype, seed):
"""Returns a batch of points chosen uniformly from the unit hypersphere."""
# This works because the Gaussian distribution is spherically symmetric.
# raw shape: shape + [dimension]
static_dimension = tf.get_static_value(dimension)
if static_dimension is not None and static_dimension == 1:
return tfp_random.rademacher(
tf.concat([shape, [1]], axis=0), dtype=dtype, seed=seed)
raw = samplers.normal(
shape=tf.concat([shape, [dimension]], axis=0), seed=seed, dtype=dtype)
unit_norm = raw / tf.norm(raw, ord=2, axis=-1)[..., tf.newaxis]
return unit_norm
class PowerSpherical(distribution.Distribution):
r"""The Power Spherical distribution over unit vectors on `S^{n-1}`.
The Power Spherical distribution is a distribution over vectors
on the unit hypersphere `S^{n-1}` embedded in `n` dimensions (`R^n`).
It serves as an alternative to the von Mises-Fisher distribution with a
simpler (faster) `log_prob` calculation, as well as a reparameterizable
sampler. In contrast, the Power Spherical distribution does have
-`mean_direction` as a point with zero density (and hence a neighborhood
around that having arbitrarily small density), in contrast with the
von Mises-Fisher distribution which has non-zero density everywhere.
NOTE: `mean_direction` is not in general the mean of the distribution. For
spherical distributions, the mean is generally not in the support of the
distribution.
#### Mathematical details
The probability density function (pdf) is,
```none
pdf(x; mu, kappa) = C(kappa) (1 + mu^T x) ** k
where,
C(kappa) = 2**(a + b) pi**b Gamma(a) / Gamma(a + b)
a = (n - 1) / 2. + k
b = (n - 1) / 2.
```
where:
* `mean_direction = mu`; a unit vector in `R^k`,
* `concentration = kappa`; scalar real >= 0, concentration of samples around
`mean_direction`, where 0 pertains to the uniform distribution on the
hypersphere, and \inf indicates a delta function at `mean_direction`.
#### Examples
A single instance of a PowerSpherical distribution is defined by a mean
direction unit vector.
Extra leading dimensions, if provided, allow for batches.
```python
tfd = tfp.distributions
# Initialize a single 3-dimension PowerSpherical distribution.
mu = [0., 1, 0]
conc = 1.
ps = tfd.PowerSpherical(mean_direction=mu, concentration=conc)
# Evaluate this on an observation in S^2 (in R^3), returning a scalar.
ps.prob([1., 0, 0])
# Initialize a batch of two 3-variate vMF distributions.
mu = [[0., 1, 0],
[1., 0, 0]]
conc = [1., 2]
ps = tfd.PowerSpherical(mean_direction=mu, concentration=conc)
# Evaluate this on two observations, each in S^2, returning a length two
# tensor.
x = [[0., 0, 1],
[0., 1, 0]]
ps.prob(x)
#### References
[1] Nicola de Cao, Wilker Aziz. The Power Spherical distribution.
https://arxiv.org/abs/2006.04437.
"""
def __init__(self,
mean_direction,
concentration,
validate_args=False,
allow_nan_stats=True,
name='PowerSpherical'):
"""Creates a new `PowerSpherical` instance.
Args:
mean_direction: Floating-point `Tensor` with shape [B1, ... Bn, N].
A unit vector indicating the mode of the distribution, or the
unit-normalized direction of the mean.
concentration: Floating-point `Tensor` having batch shape [B1, ... Bn]
broadcastable with `mean_direction`. The level of concentration of
samples around the `mean_direction`. `concentration=0` indicates a
uniform distribution over the unit hypersphere, and `concentration=+inf`
indicates a `Deterministic` distribution (delta function) at
`mean_direction`.
validate_args: Python `bool`, default `False`. When `True` distribution
parameters are checked for validity despite possibly degrading runtime
performance. When `False` invalid inputs may silently render incorrect
outputs.
allow_nan_stats: Python `bool`, default `True`. When `True`,
statistics (e.g., mean, mode, variance) use the value "`NaN`" to
indicate the result is undefined. When `False`, an exception is raised
if one or more of the statistic's batch members are undefined.
name: Python `str` name prefixed to Ops created by this class.
Raises:
ValueError: For known-bad arguments, i.e. unsupported event dimension.
"""
parameters = dict(locals())
with tf.name_scope(name) as name:
dtype = dtype_util.common_dtype([mean_direction, concentration],
tf.float32)
self._mean_direction = tensor_util.convert_nonref_to_tensor(
mean_direction, name='mean_direction', dtype=dtype)
self._concentration = tensor_util.convert_nonref_to_tensor(
concentration, name='concentration', dtype=dtype)
super(PowerSpherical, self).__init__(
dtype=self._concentration.dtype,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
reparameterization_type=reparameterization.FULLY_REPARAMETERIZED,
parameters=parameters,
name=name)
@classmethod
def _params_event_ndims(cls):
return dict(mean_direction=1, concentration=0)
@property
def mean_direction(self):
"""Mean direction parameter."""
return self._mean_direction
@property
def concentration(self):
"""Concentration parameter."""
return self._concentration
def _batch_shape_tensor(self, mean_direction=None, concentration=None):
return tf.broadcast_dynamic_shape(
tf.shape(self.mean_direction if mean_direction is None
else mean_direction)[:-1],
tf.shape(self.concentration if concentration is None
else concentration))
def _batch_shape(self):
return tf.broadcast_static_shape(
tensorshape_util.with_rank_at_least(self.mean_direction.shape, 1)[:-1],
self.concentration.shape)
def _event_shape_tensor(self, mean_direction=None):
return tf.shape(self.mean_direction if mean_direction is None
else mean_direction)[-1:]
def _event_shape(self):
return tensorshape_util.with_rank(self.mean_direction.shape[-1:], rank=1)
def _log_prob(self, x):
concentration = tf.convert_to_tensor(self.concentration)
return (self._log_unnormalized_prob(x, concentration=concentration) -
self._log_normalization(concentration=concentration))
def _log_unnormalized_prob(self, samples, concentration=None):
if concentration is None:
concentration = tf.convert_to_tensor(self.concentration)
inner_product = tf.reduce_sum(samples * self.mean_direction, axis=-1)
inner_product = tf.clip_by_value(inner_product, -1., 1.)
return tf.math.xlog1py(concentration, inner_product)
def _log_normalization(self, concentration=None, mean_direction=None):
"""Computes the log-normalizer of the distribution."""
if concentration is None:
concentration = tf.convert_to_tensor(self.concentration)
event_size = tf.cast(self._event_shape_tensor(
mean_direction=mean_direction)[-1], self.dtype)
concentration1 = concentration + (event_size - 1.) / 2.
concentration0 = (event_size - 1.) / 2.
return ((concentration1 + concentration0) * np.log(2.) +
concentration0 * np.log(np.pi) +
tfp_math.log_gamma_difference(concentration0, concentration1))
def _sample_control_dependencies(self, samples):
"""Check samples for proper shape and whether samples are unit vectors."""
inner_sample_dim = samples.shape[-1]
event_size = self.event_shape[-1]
shape_msg = ('Samples must have innermost dimension matching that of '
'`self.mean_direction`.')
if event_size is not None and inner_sample_dim is not None:
if event_size != inner_sample_dim:
raise ValueError(shape_msg)
assertions = []
if not self.validate_args:
return assertions
assertions.append(assert_util.assert_near(
tf.cast(1., dtype=self.dtype),
tf.linalg.norm(samples, axis=-1),
message='Samples must be unit length.'))
assertions.append(assert_util.assert_equal(
tf.shape(samples)[-1:],
self.event_shape_tensor(),
message=shape_msg))
return assertions
def _mean(self):
mean_direction = tf.convert_to_tensor(self.mean_direction)
concentration = tf.convert_to_tensor(self.concentration)
event_size = tf.cast(self._event_shape_tensor(
mean_direction=mean_direction)[0], dtype=self.dtype)
return (concentration / (
event_size - 1. + concentration))[..., tf.newaxis] * mean_direction
def _sample_n(self, n, seed=None):
mean_direction = tf.convert_to_tensor(self.mean_direction)
concentration = tf.convert_to_tensor(self.concentration)
event_size_int = self._event_shape_tensor(
mean_direction=mean_direction)[0]
event_size = tf.cast(event_size_int, dtype=self.dtype)
beta_seed, uniform_seed = samplers.split_seed(seed, salt='power_spherical')
broadcasted_concentration = tf.broadcast_to(
concentration, self._batch_shape_tensor(
mean_direction=mean_direction, concentration=concentration))
beta = beta_lib.Beta(
(event_size - 1.) / 2. + broadcasted_concentration,
(event_size - 1.) / 2.)
beta_samples = beta.sample(n, seed=beta_seed)
u_shape = tf.concat([[n], self._batch_shape_tensor(
mean_direction=mean_direction, concentration=concentration)], axis=0)
spherical_samples = _uniform_unit_norm(
dimension=event_size - 1,
shape=u_shape,
dtype=self.dtype,
seed=uniform_seed)
t = 2. * beta_samples - 1.
y = tf.concat([
t[..., tf.newaxis],
tf.math.sqrt(1. - tf.math.square(t))[
..., tf.newaxis] * spherical_samples], axis=-1)
modified_mean = tf.concat(
[(1. - mean_direction[..., 0])[..., tf.newaxis],
-mean_direction[..., 1:]], axis=-1)
modified_mean = tf.math.l2_normalize(modified_mean, axis=-1)
householder_transform = tf.linalg.LinearOperatorHouseholder(
modified_mean)
return householder_transform.matvec(y)
def _entropy(self):
concentration = tf.convert_to_tensor(self.concentration)
mean_direction = tf.convert_to_tensor(self.mean_direction)
event_size = tf.cast(self._event_shape_tensor(
mean_direction=mean_direction)[-1], self.dtype)
concentration1 = concentration + (event_size - 1.) / 2.
concentration0 = (event_size - 1.) / 2.
entropy = (self._log_normalization(
concentration=concentration, mean_direction=mean_direction) -
concentration * (
np.log(2.) + tf.math.digamma(concentration1) -
tf.math.digamma(concentration1 + concentration0)))
return tf.broadcast_to(
entropy, self._batch_shape_tensor(
mean_direction=mean_direction, concentration=concentration))
def _default_event_space_bijector(self):
# TODO(b/145620027) Finalize choice of bijector.
return chain_bijector.Chain([
invert_bijector.Invert(
square_bijector.Square(validate_args=self.validate_args),
validate_args=self.validate_args),
softmax_centered_bijector.SoftmaxCentered(
validate_args=self.validate_args)
], validate_args=self.validate_args)
def _parameter_control_dependencies(self, is_init):
if not self.validate_args:
return []
mean_direction = tf.convert_to_tensor(self.mean_direction)
concentration = tf.convert_to_tensor(self.concentration)
assertions = []
if is_init != tensor_util.is_ref(self._mean_direction):
assertions.append(
assert_util.assert_greater(
tf.shape(mean_direction)[-1],
1,
message='`mean_direction` must be a vector of at least size 2.'))
assertions.append(
assert_util.assert_near(
tf.cast(1., self.dtype),
tf.linalg.norm(mean_direction, axis=-1),
message='`mean_direction` must be unit-length'))
if is_init != tensor_util.is_ref(self._concentration):
assertions.append(
assert_util.assert_non_negative(
concentration, message='`concentration` must be non-negative'))
return assertions
@kullback_leibler.RegisterKL(PowerSpherical, spherical_uniform.SphericalUniform)
def _kl_power_uniform_spherical(a, b, name=None):
"""Calculate the batched KL divergence KL(a || b).
Args:
a: instance of a PowerSpherical distribution object.
b: instance of a SphericalUniform distribution object.
name: (optional) Name to use for created operations.
default is "kl_power_uniform_spherical".
Returns:
Batchwise KL(a || b)
Raises:
ValueError: If the two distributions are over spheres of different
dimensions.
#### References
[1] Nicola de Cao, Wilker Aziz. The Power Spherical distribution.
https://arxiv.org/abs/2006.04437.
"""
with tf.name_scope(name or 'kl_power_uniform_spherical'):
msg = (
'Can not compute the KL divergence between a `PowerSpherical` and '
'`SphericalUniform` of different dimensions.')
deps = []
if a.event_shape[-1] is not None:
if a.event_shape[-1] != b.dimension:
raise ValueError(
(msg + 'Got {} vs. {}').format(a.event_shape[-1], b.dimension))
elif a.validate_args or b.validate_args:
deps += [assert_util.assert_equal(
a.event_shape_tensor()[-1], b.dimension, message=msg)]
with tf.control_dependencies(deps):
return b.entropy() - a.entropy()
| [
"[email protected]"
]
| |
a14bdfeb905147643da75e344e16bda463ba04de | 09c39de5aad7b283cfac2f09a2b93e43086846d2 | /Unit 10 Advanced Topics in Python/02 Introduction to Bitwise Operators/The Bitwise Operators/6-Slide to the Left! Slide to the Right!.py | 4be1e2a75a05f98a77cf829b748bf1d8d2ca9dd2 | [
"MIT"
]
| permissive | lpython2006e/python-samples | b4e84080259faf75b41fb2fd4fb9d2fbc9f857aa | b94ba67ce0d7798ecf796dadae206aa75da58301 | refs/heads/master | 2023-01-21T13:16:13.295163 | 2020-11-29T11:01:50 | 2020-11-29T11:01:50 | 278,653,779 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 164 | py | shift_right = 0b1100
shift_left = 0b1
# Your code here!
shift_right = shift_right >> 2
shift_left = shift_left << 2
print(bin(shift_right))
print(bin(shift_left))
| [
"[email protected]"
]
| |
1e28fd5b3d89457860ba8f712835fb26b59c0ce9 | fb4e41a40d82427e3948549653cdf0405e6dba2b | /app/main/routes.py | 275d16634e7603b9970b08849ae8afe2c331cd8a | []
| no_license | Axeh99/Axeh99s-Website | 5afeef75943722a7c3e67554da57daeb01c70f7a | 7704fbaae7aa26bb1c208fbc18bb6679ea9e3215 | refs/heads/master | 2022-12-10T21:37:16.296575 | 2020-09-01T16:12:27 | 2020-09-01T16:12:27 | 283,016,745 | 1 | 0 | null | 2021-03-04T20:03:07 | 2020-07-27T21:07:14 | HTML | UTF-8 | Python | false | false | 455 | py | from flask import Blueprint, render_template, request
from app.models import Post
main = Blueprint("main", __name__)
@main.route("/")
@main.route("/home")
def home():
page = request.args.get("page", 1, type=int)
posts = Post.query.order_by(Post.date_posted.desc()).paginate(page=page, per_page=5)
return render_template("home.html", posts=posts)
@main.route("/about")
def about():
return render_template("about.html", title="About")
| [
"[email protected]"
]
| |
07b01473d13eb88ada3a05a4badbc11557620cc4 | b0a274023658af5202b34772836a17876e2429c4 | /ballet/eng/ts.py | 901571acacbded4a2a29188533e0c7399998ae6d | [
"MIT"
]
| permissive | pvk-developer/ballet | a785a89c5fabf4eac74a732dd81c600b5a4a3761 | 1b720790aff072d1b004c7df0f70860bf4947204 | refs/heads/master | 2021-02-12T17:04:00.317030 | 2020-03-02T05:20:30 | 2020-03-02T05:20:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 978 | py | from sklearn.pipeline import FeatureUnion
from ballet.eng.base import GroupedFunctionTransformer
__all__ = ['SingleLagger', 'make_multi_lagger']
class SingleLagger(GroupedFunctionTransformer):
"""Transformer that applies a lag operator to each group
Args:
lag (int): lag to apply
groupby_kwargs (dict): keyword arguments to pd.DataFrame.groupby
"""
def __init__(self, lag, groupby_kwargs=None):
super().__init__(lambda x: x.shift(lag), groupby_kwargs=groupby_kwargs)
def make_multi_lagger(lags, groupby_kwargs=None):
"""Return a union of transformers that apply different lags
Args:
lags (Collection[int]): collection of lags to apply
groupby_kwargs (dict): keyword arguments to pd.DataFrame.groupby
"""
laggers = [SingleLagger(l, groupby_kwargs=groupby_kwargs) for l in lags]
feature_union = FeatureUnion([
(repr(lagger), lagger) for lagger in laggers
])
return feature_union
| [
"[email protected]"
]
| |
ffd5a7219b0761c0dad26c2d1d21ddc368b93c5b | f4b79529109fbb4055f334d0d9c7c96cb0710447 | /colour/models/rgb/ictcp.py | cc0bd41ea03c9d84fa00b772de1f6d7e5c2eeadd | [
"BSD-3-Clause"
]
| permissive | trevorandersen/colour | 167381b3d03e506a270a8d2a519a164808995437 | 02b595b26313c4b4f55adc41d599f90c4c9edbcd | refs/heads/develop | 2021-07-15T04:48:19.585586 | 2021-01-23T23:51:44 | 2021-01-23T23:51:44 | 230,421,054 | 0 | 0 | BSD-3-Clause | 2019-12-28T12:54:20 | 2019-12-27T10:10:30 | null | UTF-8 | Python | false | false | 17,487 | py | # -*- coding: utf-8 -*-
"""
:math:`IC_TC_P` Colour Encoding
===============================
Defines the :math:`IC_TC_P` colour encoding related transformations:
- :func:`colour.RGB_to_ICtCp`
- :func:`colour.ICtCp_to_RGB`
- :func:`colour.XYZ_to_ICtCp`
- :func:`colour.ICtCp_to_XYZ`
References
----------
- :cite:`Dolby2016a` : Dolby. (2016). WHAT IS ICtCp? - INTRODUCTION.
https://www.dolby.com/us/en/technologies/dolby-vision/ICtCp-white-paper.pdf
- :cite:`InternationalTelecommunicationUnion2018` : International
Telecommunication Union. (2018). Recommendation ITU-R BT.2100-2 - Image
parameter values for high dynamic range television for use in production
and international programme exchange.
https://www.itu.int/dms_pubrec/itu-r/rec/bt/\
R-REC-BT.2100-2-201807-I!!PDF-E.pdf
- :cite:`Lu2016c` : Lu, T., Pu, F., Yin, P., Chen, T., Husak, W., Pytlarz,
J., Atkins, R., Froehlich, J., & Su, G.-M. (2016). ITP Colour Space and Its
Compression Performance for High Dynamic Range and Wide Colour Gamut Video
Distribution. ZTE Communications, 14(1), 32-38.
"""
import numpy as np
from colour.colorimetry import CCS_ILLUMINANTS
from colour.models.rgb import RGB_COLOURSPACES, RGB_to_XYZ, XYZ_to_RGB
from colour.models.rgb.transfer_functions import (eotf_inverse_ST2084,
eotf_ST2084)
from colour.utilities import (domain_range_scale, vector_dot, from_range_1,
to_domain_1)
__author__ = 'Colour Developers'
__copyright__ = 'Copyright (C) 2013-2021 - Colour Developers'
__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'
__maintainer__ = 'Colour Developers'
__email__ = '[email protected]'
__status__ = 'Production'
__all__ = [
'MATRIX_ICTCP_RGB_TO_LMS', 'MATRIX_ICTCP_LMS_TO_RGB',
'MATRIX_ICTCP_LMS_P_TO_ICTCP', 'MATRIX_ICTCP_ICTCP_TO_LMS_P',
'MATRIX_ICTCP_LMS_P_TO_ICTCP_HLG_BT2100_2',
'MATRIX_ICTCP_ICTCP_TO_LMS_P_HLG_BT2100_2', 'RGB_to_ICtCp', 'ICtCp_to_RGB',
'XYZ_to_ICtCp', 'ICtCp_to_XYZ'
]
MATRIX_ICTCP_RGB_TO_LMS = np.array([
[1688, 2146, 262],
[683, 2951, 462],
[99, 309, 3688],
]) / 4096
"""
*ITU-R BT.2020* colourspace to normalised cone responses matrix.
MATRIX_ICTCP_RGB_TO_LMS : array_like, (3, 3)
"""
MATRIX_ICTCP_LMS_TO_RGB = np.linalg.inv(MATRIX_ICTCP_RGB_TO_LMS)
"""
:math:`IC_TC_P` colourspace normalised cone responses to *ITU-R BT.2020*
colourspace matrix.
MATRIX_ICTCP_LMS_TO_RGB : array_like, (3, 3)
"""
MATRIX_ICTCP_LMS_P_TO_ICTCP = np.array([
[2048, 2048, 0],
[6610, -13613, 7003],
[17933, -17390, -543],
]) / 4096
"""
:math:`LMS_p` *SMPTE ST 2084:2014* encoded normalised cone responses to
:math:`IC_TC_P` colour encoding matrix.
MATRIX_ICTCP_LMS_P_TO_ICTCP : array_like, (3, 3)
"""
MATRIX_ICTCP_ICTCP_TO_LMS_P = np.linalg.inv(MATRIX_ICTCP_LMS_P_TO_ICTCP)
"""
:math:`IC_TC_P` colour encoding to :math:`LMS_p` *SMPTE ST 2084:2014* encoded
normalised cone responses matrix.
MATRIX_ICTCP_ICTCP_TO_LMS_P : array_like, (3, 3)
"""
MATRIX_ICTCP_LMS_P_TO_ICTCP_HLG_BT2100_2 = np.array([
[2048, 2048, 0],
[3625, -7465, 3840],
[9500, -9212, -288],
]) / 4096
"""
:math:`LMS_p` *SMPTE ST 2084:2014* encoded normalised cone responses to
:math:`IC_TC_P` colour encoding matrix as given in *ITU-R BT.2100-2*.
MATRIX_ICTCP_LMS_P_TO_ICTCP_HLG_BT2100_2 : array_like, (3, 3)
"""
MATRIX_ICTCP_ICTCP_TO_LMS_P_HLG_BT2100_2 = np.linalg.inv(
MATRIX_ICTCP_LMS_P_TO_ICTCP_HLG_BT2100_2)
"""
:math:`IC_TC_P` colour encoding to :math:`LMS_p` *SMPTE ST 2084:2014* encoded
normalised cone responses matrix as given in *ITU-R BT.2100-2*.
MATRIX_ICTCP_ICTCP_TO_LMS_P_HLG_BT2100_2 : array_like, (3, 3)
"""
def RGB_to_ICtCp(RGB, method='Dolby 2016', L_p=10000):
"""
Converts from *ITU-R BT.2020* colourspace to :math:`IC_TC_P` colour
encoding.
Parameters
----------
RGB : array_like
*ITU-R BT.2020* colourspace array.
method : unicode, optional
**{'Dolby 2016', 'ITU-R BT.2100-2 HLG', 'ITU-R BT.2100-2 PQ'}**,
Computation method.
L_p : numeric, optional
Display peak luminance :math:`cd/m^2` for *SMPTE ST 2084:2014*
non-linear encoding. This parameter should stay at its default
:math:`10000 cd/m^2` value for practical applications. It is exposed so
that the definition can be used as a fitting function.
Returns
-------
ndarray
:math:`IC_TC_P` colour encoding array.
Warnings
--------
The underlying *SMPTE ST 2084:2014* transfer function is an absolute
transfer function.
Notes
-----
- The underlying *SMPTE ST 2084:2014* transfer function is an absolute
transfer function, thus the domain and range values for the *Reference*
and *1* scales are only indicative that the data is not affected by
scale transformations. The effective domain of *SMPTE ST 2084:2014*
inverse electro-optical transfer function (EOTF / EOCF) is
[0.0001, 10000].
- The *ITU-R BT.2100-2 HLG* method uses a different :math:`LMS_p` encoded
normalised cone responses to :math:`IC_TC_P` matrix.
- The *ITU-R BT.2100-2 PQ* method is an alias for the *Dolby 2016*
method.
+------------+-----------------------+------------------+
| **Domain** | **Scale - Reference** | **Scale - 1** |
+============+=======================+==================+
| ``RGB`` | ``UN`` | ``UN`` |
+------------+-----------------------+------------------+
+------------+-----------------------+------------------+
| **Range** | **Scale - Reference** | **Scale - 1** |
+============+=======================+==================+
| ``ICtCp`` | ``I`` : [0, 1] | ``I`` : [0, 1] |
| | | |
| | ``CT`` : [-1, 1] | ``CT`` : [-1, 1] |
| | | |
| | ``CP`` : [-1, 1] | ``CP`` : [-1, 1] |
+------------+-----------------------+------------------+
References
----------
:cite:`Dolby2016a`, :cite:`Lu2016c`
Examples
--------
>>> RGB = np.array([0.45620519, 0.03081071, 0.04091952])
>>> RGB_to_ICtCp(RGB) # doctest: +ELLIPSIS
array([ 0.0735136..., 0.0047525..., 0.0935159...])
>>> RGB_to_ICtCp(RGB, method='ITU-R BT.2100-2 HLG') # doctest: +ELLIPSIS
array([ 0.0735136..., 0.0026085..., 0.0495414...])
"""
RGB = to_domain_1(RGB)
is_dolby_method = method.lower() in ('dolby 2016', 'ITU-R BT.2100-2 PQ')
LMS = vector_dot(MATRIX_ICTCP_RGB_TO_LMS, RGB)
with domain_range_scale('ignore'):
LMS_p = eotf_inverse_ST2084(LMS, L_p)
ICtCp = (vector_dot(MATRIX_ICTCP_LMS_P_TO_ICTCP, LMS_p)
if is_dolby_method else vector_dot(
MATRIX_ICTCP_LMS_P_TO_ICTCP_HLG_BT2100_2, LMS_p))
return from_range_1(ICtCp)
def ICtCp_to_RGB(ICtCp, method='Dolby 2016', L_p=10000):
"""
Converts from :math:`IC_TC_P` colour encoding to *ITU-R BT.2020*
colourspace.
Parameters
----------
ICtCp : array_like
:math:`IC_TC_P` colour encoding array.
method : unicode, optional
**{'Dolby 2016', 'ITU-R BT.2100-2 HLG', 'ITU-R BT.2100-2 PQ'}**,
Computation method.
L_p : numeric, optional
Display peak luminance :math:`cd/m^2` for *SMPTE ST 2084:2014*
non-linear encoding. This parameter should stay at its default
:math:`10000 cd/m^2` value for practical applications. It is exposed so
that the definition can be used as a fitting function.
Returns
-------
ndarray
*ITU-R BT.2020* colourspace array.
Warnings
--------
The underlying *SMPTE ST 2084:2014* transfer function is an absolute
transfer function.
Notes
-----
- The underlying *SMPTE ST 2084:2014* transfer function is an absolute
transfer function, thus the domain and range values for the *Reference*
and *1* scales are only indicative that the data is not affected by
scale transformations.
- The *ITU-R BT.2100-2 HLG* method uses a different :math:`IC_TC_P` to
:math:`LMS_p` encoded normalised cone responses matrix.
- The *ITU-R BT.2100-2 PQ* method is an alias for the *Dolby 2016*
method.
+------------+-----------------------+------------------+
| **Domain** | **Scale - Reference** | **Scale - 1** |
+============+=======================+==================+
| ``ICtCp`` | ``I`` : [0, 1] | ``I`` : [0, 1] |
| | | |
| | ``CT`` : [-1, 1] | ``CT`` : [-1, 1] |
| | | |
| | ``CP`` : [-1, 1] | ``CP`` : [-1, 1] |
+------------+-----------------------+------------------+
+------------+-----------------------+------------------+
| **Range** | **Scale - Reference** | **Scale - 1** |
+============+=======================+==================+
| ``RGB`` | ``UN`` | ``UN`` |
+------------+-----------------------+------------------+
References
----------
:cite:`Dolby2016a`, :cite:`Lu2016c`
Examples
--------
>>> ICtCp = np.array([0.07351364, 0.00475253, 0.09351596])
>>> ICtCp_to_RGB(ICtCp) # doctest: +ELLIPSIS
array([ 0.4562052..., 0.0308107..., 0.0409195...])
>>> ICtCp = np.array([0.07351364, 0.00260851, 0.04954147])
>>> ICtCp_to_RGB(ICtCp, method='ITU-R BT.2100-2 HLG') # doctest: +ELLIPSIS
array([ 0.4562051..., 0.0308107..., 0.0409195...])
"""
ICtCp = to_domain_1(ICtCp)
is_dolby_method = method.lower() in ('dolby 2016', 'ITU-R BT.2100-2 PQ')
LMS_p = (vector_dot(MATRIX_ICTCP_ICTCP_TO_LMS_P, ICtCp)
if is_dolby_method else vector_dot(
MATRIX_ICTCP_ICTCP_TO_LMS_P_HLG_BT2100_2, ICtCp))
with domain_range_scale('ignore'):
LMS = eotf_ST2084(LMS_p, L_p)
RGB = vector_dot(MATRIX_ICTCP_LMS_TO_RGB, LMS)
return from_range_1(RGB)
def XYZ_to_ICtCp(XYZ,
illuminant=CCS_ILLUMINANTS[
'CIE 1931 2 Degree Standard Observer']['D65'],
chromatic_adaptation_transform='CAT02',
method='Dolby 2016',
L_p=10000):
"""
Converts from *CIE XYZ* tristimulus values to :math:`IC_TC_P` colour
encoding.
Parameters
----------
XYZ : array_like
*CIE XYZ* tristimulus values.
illuminant : array_like, optional
Source illuminant chromaticity coordinates.
chromatic_adaptation_transform : unicode, optional
**{'CAT02', 'XYZ Scaling', 'Von Kries', 'Bradford', 'Sharp',
'Fairchild', 'CMCCAT97', 'CMCCAT2000', 'CAT02 Brill 2008',
'Bianco 2010', 'Bianco PC 2010'}**,
*Chromatic adaptation* transform.
method : unicode, optional
**{'Dolby 2016', 'ITU-R BT.2100-2 HLG', 'ITU-R BT.2100-2 PQ'}**,
Computation method.
L_p : numeric, optional
Display peak luminance :math:`cd/m^2` for *SMPTE ST 2084:2014*
non-linear encoding. This parameter should stay at its default
:math:`10000 cd/m^2` value for practical applications. It is exposed so
that the definition can be used as a fitting function.
Returns
-------
ndarray
:math:`IC_TC_P` colour encoding array.
Warnings
--------
The underlying *SMPTE ST 2084:2014* transfer function is an absolute
transfer function.
Notes
-----
- The underlying *SMPTE ST 2084:2014* transfer function is an absolute
transfer function, thus the domain and range values for the *Reference*
and *1* scales are only indicative that the data is not affected by
scale transformations. The effective domain of *SMPTE ST 2084:2014*
inverse electro-optical transfer function (EOTF / EOCF) is
[0.0001, 10000].
- The *ITU-R BT.2100-2 HLG* method uses a different :math:`LMS_p` encoded
normalised cone responses to :math:`IC_TC_P` matrix.
- The *ITU-R BT.2100-2 PQ* method is an alias for the *Dolby 2016*
method.
+------------+-----------------------+------------------+
| **Domain** | **Scale - Reference** | **Scale - 1** |
+============+=======================+==================+
| ``XYZ`` | ``UN`` | ``UN`` |
+------------+-----------------------+------------------+
+------------+-----------------------+------------------+
| **Range** | **Scale - Reference** | **Scale - 1** |
+============+=======================+==================+
| ``ICtCp`` | ``I`` : [0, 1] | ``I`` : [0, 1] |
| | | |
| | ``CT`` : [-1, 1] | ``CT`` : [-1, 1] |
| | | |
| | ``CP`` : [-1, 1] | ``CP`` : [-1, 1] |
+------------+-----------------------+------------------+
References
----------
:cite:`Dolby2016a`, :cite:`Lu2016c`
Examples
--------
>>> XYZ = np.array([0.20654008, 0.12197225, 0.05136952])
>>> XYZ_to_ICtCp(XYZ) # doctest: +ELLIPSIS
array([ 0.0685809..., -0.0028384..., 0.0602098...])
>>> XYZ_to_ICtCp(XYZ, method='ITU-R BT.2100-2 HLG') # doctest: +ELLIPSIS
array([ 0.0685809..., -0.0015547..., 0.0318973...])
"""
BT2020 = RGB_COLOURSPACES['ITU-R BT.2020']
RGB = XYZ_to_RGB(
XYZ,
illuminant,
BT2020.whitepoint,
BT2020.matrix_XYZ_to_RGB,
chromatic_adaptation_transform,
)
return RGB_to_ICtCp(RGB, method, L_p)
def ICtCp_to_XYZ(ICtCp,
illuminant=CCS_ILLUMINANTS[
'CIE 1931 2 Degree Standard Observer']['D65'],
chromatic_adaptation_transform='CAT02',
method='Dolby 2016',
L_p=10000):
"""
Converts from :math:`IC_TC_P` colour encoding to *CIE XYZ* tristimulus
values.
Parameters
----------
ICtCp : array_like
:math:`IC_TC_P` colour encoding array.
illuminant : array_like, optional
Source illuminant chromaticity coordinates.
chromatic_adaptation_transform : unicode, optional
**{'CAT02', 'XYZ Scaling', 'Von Kries', 'Bradford', 'Sharp',
'Fairchild', 'CMCCAT97', 'CMCCAT2000', 'CAT02 Brill 2008',
'Bianco 2010', 'Bianco PC 2010'}**,
*Chromatic adaptation* transform.
method : unicode, optional
**{'Dolby 2016', 'ITU-R BT.2100-2 HLG', 'ITU-R BT.2100-2 PQ'}**,
Computation method.
L_p : numeric, optional
Display peak luminance :math:`cd/m^2` for *SMPTE ST 2084:2014*
non-linear encoding. This parameter should stay at its default
:math:`10000 cd/m^2` value for practical applications. It is exposed so
that the definition can be used as a fitting function.
Returns
-------
ndarray
*CIE XYZ* tristimulus values.
Warnings
--------
The underlying *SMPTE ST 2084:2014* transfer function is an absolute
transfer function.
Notes
-----
- The underlying *SMPTE ST 2084:2014* transfer function is an absolute
transfer function, thus the domain and range values for the *Reference*
and *1* scales are only indicative that the data is not affected by
scale transformations.
- The *ITU-R BT.2100-2 HLG* method uses a different :math:`IC_TC_P` to
:math:`LMS_p` encoded normalised cone responses matrix.
- The *ITU-R BT.2100-2 PQ* method is an alias for the *Dolby 2016*
method.
+------------+-----------------------+------------------+
| **Domain** | **Scale - Reference** | **Scale - 1** |
+============+=======================+==================+
| ``ICtCp`` | ``I`` : [0, 1] | ``I`` : [0, 1] |
| | | |
| | ``CT`` : [-1, 1] | ``CT`` : [-1, 1] |
| | | |
| | ``CP`` : [-1, 1] | ``CP`` : [-1, 1] |
+------------+-----------------------+------------------+
+------------+-----------------------+------------------+
| **Range** | **Scale - Reference** | **Scale - 1** |
+============+=======================+==================+
| ``XYZ`` | ``UN`` | ``UN`` |
+------------+-----------------------+------------------+
References
----------
:cite:`Dolby2016a`, :cite:`Lu2016c`
Examples
--------
>>> ICtCp = np.array([0.06858097, -0.00283842, 0.06020983])
>>> ICtCp_to_XYZ(ICtCp) # doctest: +ELLIPSIS
array([ 0.2065400..., 0.1219722..., 0.0513695...])
>>> ICtCp = np.array([0.06858097, -0.00155479, 0.03189734])
>>> ICtCp_to_XYZ(ICtCp, method='ITU-R BT.2100-2 HLG') # doctest: +ELLIPSIS
array([ 0.2065401..., 0.1219722..., 0.0513695...])
"""
RGB = ICtCp_to_RGB(ICtCp, method, L_p)
BT2020 = RGB_COLOURSPACES['ITU-R BT.2020']
XYZ = RGB_to_XYZ(
RGB,
BT2020.whitepoint,
illuminant,
BT2020.matrix_RGB_to_XYZ,
chromatic_adaptation_transform,
)
return XYZ
| [
"[email protected]"
]
| |
3850bc892d13df29ec269ea05acd45528302e442 | a14dd601cde67f67d0ba38dfd1362f7c0109cef1 | /recursion/leetcode/word-break/count.py | 56dba130f8e99755805eb6a3116f554bd9cee700 | []
| no_license | Meaha7/dsa | d5ea1615f05dae32671af1f1c112f0c759056473 | fa80219ff8a6f4429fcf104310f4169d007af712 | refs/heads/main | 2023-09-03T18:52:41.950294 | 2021-11-05T09:14:42 | 2021-11-05T09:14:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 892 | py | # T=n²+2ⁿ+w,S=2ⁿnw
def x(s, words):
def dfs(s):
if not s:
return 1
count = 0
for word in words:
if s.startswith(word):
count += dfs(s[len(word):])
return count
return dfs(s)
# T=n²+2ⁿ+w,S=2ⁿnw
def y(s, words):
words = set(words)
def dfs(s):
if not s:
return 1
count = 0
for i in range(len(s)):
prefix, suffix = s[:i + 1], s[i + 1:]
if prefix in words:
count += dfs(suffix)
return count
return dfs(s)
for s, words in [
('nikhil', ['nikhil']),
('catsanddog', ['cat', 'cats', 'and', 'sand', 'dog']),
('pineapplepenapple', ['apple', 'pen', 'applepen', 'pine', 'pineapple']),
('catsandog', ['cats', 'dog', 'sand', 'and', 'cat']),
]:
print(x(s, words), end=' ')
print(y(s, words))
| [
"[email protected]"
]
| |
30bdce0bca57d062eace71dc5e23763d2ce913df | de3413c3af4ac0a76d817a7e624d8d2e08379003 | /svm/svm_author_id.py | 4581a12bca152e5b98a1c9173154dcebc7ef9243 | []
| no_license | victorlifan/ud120-projects_mechine_learning_ex | 8aded0e48955e1970ab501567317e660fdce97e9 | 39834cf8607dd448db332937953bf0a8a7303832 | refs/heads/master | 2022-07-10T15:03:04.212606 | 2020-04-14T02:02:22 | 2020-04-14T02:02:22 | 250,891,814 | 0 | 0 | null | 2022-06-22T01:35:58 | 2020-03-28T20:56:13 | Jupyter Notebook | UTF-8 | Python | false | false | 1,319 | py | #!/usr/bin/python
"""
This is the code to accompany the Lesson 2 (SVM) mini-project.
Use a SVM to identify emails from the Enron corpus by their authors:
Sara has label 0
Chris has label 1
"""
import sys
from time import time
sys.path.append("../tools/")
from email_preprocess import preprocess
### features_train and features_test are the features for the training
### and testing datasets, respectively
### labels_train and labels_test are the corresponding item labels
features_train, features_test, labels_train, labels_test = preprocess()
### limit training data to improve training time
#features_train = features_train[:len(features_train)/100]
#labels_train = labels_train[:len(labels_train)/100]
#########################################################
### your code goes here ###
from sklearn.svm import SVC
#clf= SVC(kernel='linear')
clf = SVC(kernel = 'rbf', C= 10000)
t0= time()
clf.fit(features_train,labels_train)
print('training time:', round(time()-t0, 3),'s')
t0= time()
pre = clf.predict(features_test)
print("there are {} emails are predicted to be in 'Chris' and {} in 'Sara'".format(sum(pre), pre.shape[0]-sum(pre)))
print('prediction time:', round(time()-t0,3),'s')
print(clf.score(features_test,labels_test))
#########################################################
| [
"[email protected]"
]
| |
a3dca879c750c6f8833c50913a50f39b7afd6b2c | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02873/s049606753.py | 6b59a11dc7ecdf3b53103e6b4936eef9860697ed | []
| no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 557 | py | s=input()
sl=len(s)
a=[]
count=1
for i in range(sl-1):
if s[i+1]==s[i]:
count+=1
else:
a.append(count)
count=1
a.append(count)
ans=0
al=len(a)
if s[0]=="<":
for i in range(0,al-1,2):
m,n=max(a[i],a[i+1]),min(a[i],a[i+1])
ans+=(m*(m+1)+n*(n-1))/2
if al%2==1:
ans+=a[-1]*(a[-1]+1)/2
elif s[0]==">":
ans+=a[0]*(a[0]+1)/2
for i in range(1,al-1,2):
m,n=max(a[i],a[i+1]),min(a[i],a[i+1])
ans+=(m*(m+1)+n*(n-1))/2
if al%2==0:
ans+=a[-1]*(a[-1]+1)/2
print(int(ans)) | [
"[email protected]"
]
| |
f7f7789d0731133f29e78799dc2f7183155bde34 | 8881a4927d893e1e755c0488f76ba7941b379f26 | /emp_mgmt_sys/poll/migrations/0002_choice.py | f4b08e4cb314699a645b68f87618ecbf13704cf9 | []
| no_license | SatishNitk/Django | 6bb839fcf2bc7d70413e3d56ac98124a7a96a5de | d9260c032322a34410d783c39a8f13e8f63b8be4 | refs/heads/master | 2020-05-24T23:01:35.767388 | 2019-07-06T13:56:50 | 2019-07-06T13:56:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 740 | py | # Generated by Django 2.0.1 on 2019-05-26 13:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('poll', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Choice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.TextField(blank=True, null=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('question', models.ForeignKey(on_delete='CASECADE', to='poll.Question')),
],
),
]
| [
"[email protected]"
]
| |
439a7935a465973070be43a439890fa28f94e6da | 42b9bafc3c757543328d93fb60269ad4255aae17 | /env/lib/python3.7/site-packages/thefuck/rules/git_stash_pop.py | 0e143ffd813b34c0326ae221f2e00e2d36363417 | [
"MIT"
]
| permissive | mejeng/kasir | 4fe66d1828e72b64d770426d71185cdd3c54127e | cc6f9158b61c0cb45078ddf798af9588c8771311 | refs/heads/master | 2020-09-25T03:36:10.144439 | 2019-11-30T07:59:23 | 2019-11-30T07:59:23 | 225,908,795 | 2 | 0 | MIT | 2019-12-04T16:21:15 | 2019-12-04T16:21:15 | null | UTF-8 | Python | false | false | 485 | py | from thefuck.shells import shell
from thefuck.specific.git import git_support
@git_support
def match(command):
return ('stash' in command.script
and 'pop' in command.script
and 'Your local changes to the following files would be overwritten by merge' in command.output)
@git_support
def get_new_command(command):
return shell.and_('git add --update', 'git stash pop', 'git reset .')
# make it come before the other applicable rules
priority = 900
| [
"[email protected]"
]
| |
fdac36dc04d4acfc4251c57b971885680494e3e0 | 2b502aae9bc33bac6c4b28d1e702591f2cbed690 | /terrascript/dme/d.py | 77bc3b53e47c32d22d41ca41ca5f8499317007b5 | [
"Python-2.0",
"BSD-2-Clause"
]
| permissive | LeeroyC710/python-terrascript | 4c8fbe032e9b7dd8844d962f888c28f87a26ff77 | b8f3c3549b149c124e3e48e0cea0396332ad1a1d | refs/heads/master | 2020-12-28T03:58:04.502969 | 2020-01-19T21:46:52 | 2020-01-19T21:46:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 45 | py | # terrascript/dme/d.py
import terrascript
| [
"[email protected]"
]
| |
c0ad9538c01db6eed246cc86917332e8c4e02e0d | ad59fb12042bfd3f5c43eca057d0f747f9e148cf | /Se2iP/usr/lib/enigma2/python/Plugins/Extensions/IPTVPlayer/tsiplayer/addons/resources/hosters/vk.py | 3edd8b9d6e4cf32052f7ca686c076546d5635d52 | []
| no_license | lexlong2007/eePlugins | d62b787100a7069ad5713a47c5688008063b45ec | 167b262fe36901a2d3a2fae6d0f85e2307b3eff7 | refs/heads/master | 2022-03-09T05:37:37.567937 | 2022-02-27T01:44:25 | 2022-02-27T01:44:25 | 253,012,126 | 0 | 0 | null | 2020-04-04T14:03:29 | 2020-04-04T14:03:29 | null | UTF-8 | Python | false | false | 3,404 | py | #-*- coding: utf-8 -*-
# https://github.com/Kodi-vStream/venom-xbmc-addons
from Plugins.Extensions.IPTVPlayer.tsiplayer.addons.resources.lib.handler.requestHandler import cRequestHandler
from Plugins.Extensions.IPTVPlayer.tsiplayer.addons.resources.lib.parser import cParser
from Plugins.Extensions.IPTVPlayer.tsiplayer.addons.resources.hosters.hoster import iHoster
from Plugins.Extensions.IPTVPlayer.tsiplayer.addons.resources.lib.comaddon import xbmcgui
class cHoster(iHoster):
def __init__(self):
self.__sDisplayName = 'Vk'
self.__sFileName = self.__sDisplayName
self.__sHD = ''
def getDisplayName(self):
return self.__sDisplayName
def setDisplayName(self, sDisplayName):
self.__sDisplayName = sDisplayName + ' [COLOR skyblue]' + self.__sDisplayName + '[/COLOR] [COLOR khaki]' + self.__sHD + '[/COLOR]'
def setFileName(self, sFileName):
self.__sFileName = sFileName
def getFileName(self):
return self.__sFileName
def setHD(self, sHD):
if 'hd' in sHD:
self.__sHD = 'HD'
else:
self.__sHD = ''
def getHD(self):
return self.__sHD
def getPluginIdentifier(self):
return 'vk'
def isDownloadable(self):
return True
def isJDownloaderable(self):
return True
def getPattern(self):
return ''
def __getIdFromUrl(self):
sPattern = "?([^<]+)"
oParser = cParser()
aResult = oParser.parse(self.__sUrl, sPattern)
if (aResult[0] == True):
return aResult[1][0]
return ''
def __modifyUrl(self, sUrl):
if (sUrl.startswith('http://')):
oRequestHandler = cRequestHandler(sUrl)
oRequestHandler.request()
sRealUrl = oRequestHandler.getRealUrl()
self.__sUrl = sRealUrl
return self.__getIdFromUrl()
return sUrl
def __getKey(self):
oRequestHandler = cRequestHandler(self.__sUrl)
sHtmlContent = oRequestHandler.request()
sPattern = 'fkzd="(.+?)";'
oParser = cParser()
aResult = oParser.parse(sHtmlContent, sPattern)
if (aResult[0] == True):
aResult = aResult[1][0].replace('.', '%2E')
return aResult
return ''
def setUrl(self, sUrl):
self.__sUrl = sUrl
def checkUrl(self, sUrl):
return True
def getUrl(self):
return self.__sUrl
def getMediaLink(self):
return self.__getMediaLinkForGuest()
def __getMediaLinkForGuest(self):
url=[]
qua=[]
oRequest = cRequestHandler(self.__sUrl)
sHtmlContent = oRequest.request()
sPattern = '"url.+?":"(.+?)\.(\d+).mp4'
oParser = cParser()
aResult = oParser.parse(sHtmlContent, sPattern)
if (aResult[0] == True):
for aEntry in aResult[1]:
url.append(aEntry[0])
qua.append(str(aEntry[1]))
dialog2 = xbmcgui.Dialog()
ret = dialog2.select('Select Quality', qua)
#sUrl = url[ret] + '.' + qua[ret] + '.mp4'
api_call = ('%s.%s.mp4') % (url[ret], qua[ret])
if api_call:
return True, api_call
return False, False
| [
"[email protected]"
]
| |
1b1c9a876ce91e05fab954a1120d2c8366fe8202 | ec291572e354d0718a42929b84f831363cdbeb4b | /djlib/cron_utils.py | 995ed62361da660010f623b0385101d5e64505b0 | []
| no_license | Ishayahu/Mizva-CMS | 4aaffe01093ca807a5cf2fdec45a3e5213938940 | 574cd5363132ea19772221c4a4b27415dbf17814 | refs/heads/master | 2021-01-10T20:26:13.876744 | 2013-11-26T10:47:02 | 2013-11-26T10:47:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,970 | py | # -*- coding:utf-8 -*-
# coding=<utf8>
def decronize(fstring):
minute,hour,day,month,wday = fstring.split('\t')
def get_interval(fstr,min,max):
# max - предел интервала:
# минута
# * или 0-59
# час
# * или 0-23
# число
# * или 1-31
# месяц
# *, 1-12 или имя месяца (см. ниже)
# день-недели
# *, 0-7 или имя дня (воскресенье - это 0 и 7)
max+=1
# если любой интервал
if fstr[0]=='*':
# совсем любой * - от 0 до 59
if len(fstr)==1:
return list(range(0,max))
# любой, но с интервалом */2 - каждые два часа
elif len(fstr)>1:
return list(range(0,max,int(fstr[2:])))
# если перечисление интервала
elif ',' in fstr:
return list(map(int,fstr.split(',')))
# если интервал
elif '-' in fstr:
# если интервал с периодом 2-15/2
if '/' in fstr:
interval = int(fstr.split('/')[1])
start,end = list(map(int,fstr.split('/')[0].split('-')))
return list(range(start,end,interval))
# если интервал просто 2-15
else:
start,end = list(map(int,fstr.split('-')))
return list(range(start,end))
else:
return (int(fstr),)
minute = get_interval(minute,0,59)
hour = get_interval(hour,0,23)
month = get_interval(month,1,12)
if (day !='*' and wday !='*') or (day =='*' and wday =='*'):
day = get_interval(day,1,31)
wday = get_interval(wday,0,6)
elif day=='*' and wday != '*':
day = list()
wday = get_interval(wday,0,6)
elif day != '*' and wday=='*':
day = get_interval(day,1,31)
wday = list()
return {'minute':minute,'hour':hour,'day':day,'month':month,'wday':wday}
def crontab_to_russian(fstr):
result = u'В {hour[0]} часов {minute[0]} минут каждый {day} день месяца или каждый {wday} день недели в месяцах {month}'.format(**decronize(fstr))
return result
def generate_next_reminder(ranges, stop_date):
minute = datetime.datetime.now().minute
hour = datetime.datetime.now().hour
day = datetime.datetime.now().day
month = datetime.datetime.now().month
wday = datetime.datetime.now().weekday()
year = datetime.datetime.now().year
crit_dict = {'month':month,'day':day,'hour':hour,'minute':minute,'wday':wday}
crit_max = {'month':13,'day':32,'hour':24,'minute':60,'wday':7}
crit_min = {'month':1,'day':1,'hour':0,'minute':0,'wday':0}
to_next = False
for criteria in ('minute','hour','day','month'):
if criteria != 'day':
# if criteria == 'month':
# print crit_dict
# print to_next
if to_next:
crit_dict[criteria] += 1
to_next = False
if crit_dict[criteria] == crit_max[criteria]:
crit_dict[criteria] = crit_min[criteria]
to_next = True
while True: #crit_dict[criteria] <= crit_max[criteria]:
if crit_dict[criteria] in ranges[criteria]:
break
crit_dict[criteria] +=1
if crit_dict[criteria] >= crit_max[criteria]:
crit_dict[criteria] = crit_min[criteria]
to_next = True
else:
if to_next:
#print 'here'
crit_dict['day'] += 1
crit_dict['wday'] += 1
if crit_dict['wday'] == 7:
crit_dict['wday'] = 1
to_next = False
while True: # crit_dict['day'] <= crit_max['day'] and crit_dict['wday'] <= crit_max['wday']:
#print crit_dict
if crit_dict['day'] in ranges['day'] or crit_dict['wday'] in ranges['wday']:
break
crit_dict['day'] += 1
crit_dict['wday'] += 1
if crit_dict['day'] >= crit_max['day']:
crit_dict['day'] = crit_min['day']
to_next = True
if crit_dict['wday'] >= crit_max['wday']:
crit_dict['wday'] = crit_min['wday']
# to_next = True
if to_next:
year += 1
next_reminder = datetime.datetime(year,crit_dict['month'],crit_dict['day'],crit_dict['hour'],crit_dict['minute'])
# return crit_dict['minute'],crit_dict['hour'],crit_dict['day'],crit_dict['month'],crit_dict['wday']
if stop_date and next_reminder > stop_date:
return False
return next_reminder | [
"[email protected]"
]
| |
594ef675881837a25b2f2cde141c227cf3093caf | 25f47c750a150727826cbf873a0ac50eb9e97305 | /tests/client/osio_rest_client_test.py | 2a50e09091210e18a41090adf8b1b4493f71d266 | [
"MIT"
]
| permissive | tonywaite/scs_host_rpi | 7d06a41c525202663f73133fbfaada4384e97693 | 5bd09bb9b4410cb47782e2bfab0dd2bbab365b3a | refs/heads/master | 2022-12-05T02:51:23.882694 | 2020-08-19T14:26:53 | 2020-08-19T14:26:53 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 702 | py | #!/usr/bin/env python3
"""
Created on 9 Nov 2016
@author: Bruno Beloff ([email protected])
"""
from scs_core.client.http_client import HTTPClient
from scs_core.osio.client.rest_client import RESTClient
# --------------------------------------------------------------------------------------------------------------------
api_key = "43308b72-ad41-4555-b075-b4245c1971db"
path = "/v1/orgs/south-coast-science-dev/topics"
# --------------------------------------------------------------------------------------------------------------------
rest_client = RESTClient(HTTPClient(False), api_key)
rest_client.connect()
print(rest_client)
data = rest_client.get(path)
print(data)
| [
"[email protected]"
]
| |
ec816035a8401397a7e472bf6a04e7fad8f29e2d | 7159e9970ce2cc58482416392d1b489087b913c1 | /tests/many_to_one/tests.py | 283baae433149312c9430e7d0beca46dbba63880 | [
"BSD-3-Clause"
]
| permissive | ufgstores/django19 | d4a53f12aa6d89405e2332d0c53a29447e6f8650 | 644716493ecac37271610de0e9cf97fc1fe46f10 | refs/heads/master | 2020-09-20T13:54:04.608149 | 2019-11-28T14:07:07 | 2019-11-28T14:29:02 | 224,502,004 | 0 | 0 | BSD-3-Clause | 2019-11-28T14:08:25 | 2019-11-27T19:23:16 | Python | UTF-8 | Python | false | false | 30,333 | py | import datetime
from copy import deepcopy
from django.core.exceptions import FieldError, MultipleObjectsReturned
from django.db import models, transaction
from django.test import TestCase
from django.utils import six
from django.utils.translation import ugettext_lazy
from .models import (
Article, Category, Child, First, Parent, Record, Relation, Reporter,
School, Student, Third, ToFieldChild,
)
class ManyToOneTests(TestCase):
def setUp(self):
# Create a few Reporters.
self.r = Reporter(first_name='John', last_name='Smith', email='[email protected]')
self.r.save()
self.r2 = Reporter(first_name='Paul', last_name='Jones', email='[email protected]')
self.r2.save()
# Create an Article.
self.a = Article(id=None, headline="This is a test",
pub_date=datetime.date(2005, 7, 27), reporter=self.r)
self.a.save()
def test_get(self):
# Article objects have access to their related Reporter objects.
r = self.a.reporter
self.assertEqual(r.id, self.r.id)
# These are strings instead of unicode strings because that's what was used in
# the creation of this reporter (and we haven't refreshed the data from the
# database, which always returns unicode strings).
self.assertEqual((r.first_name, self.r.last_name), ('John', 'Smith'))
def test_create(self):
# You can also instantiate an Article by passing the Reporter's ID
# instead of a Reporter object.
a3 = Article(id=None, headline="Third article",
pub_date=datetime.date(2005, 7, 27), reporter_id=self.r.id)
a3.save()
self.assertEqual(a3.reporter.id, self.r.id)
# Similarly, the reporter ID can be a string.
a4 = Article(id=None, headline="Fourth article",
pub_date=datetime.date(2005, 7, 27), reporter_id=str(self.r.id))
a4.save()
self.assertEqual(repr(a4.reporter), "<Reporter: John Smith>")
def test_add(self):
# Create an Article via the Reporter object.
new_article = self.r.article_set.create(headline="John's second story",
pub_date=datetime.date(2005, 7, 29))
self.assertEqual(repr(new_article), "<Article: John's second story>")
self.assertEqual(new_article.reporter.id, self.r.id)
# Create a new article, and add it to the article set.
new_article2 = Article(headline="Paul's story", pub_date=datetime.date(2006, 1, 17))
msg = "<Article: Paul's story> instance isn't saved. Use bulk=False or save the object first."
with self.assertRaisesMessage(ValueError, msg):
self.r.article_set.add(new_article2)
self.r.article_set.add(new_article2, bulk=False)
self.assertEqual(new_article2.reporter.id, self.r.id)
self.assertQuerysetEqual(self.r.article_set.all(),
[
"<Article: John's second story>",
"<Article: Paul's story>",
"<Article: This is a test>",
])
# Add the same article to a different article set - check that it moves.
self.r2.article_set.add(new_article2)
self.assertEqual(new_article2.reporter.id, self.r2.id)
self.assertQuerysetEqual(self.r2.article_set.all(), ["<Article: Paul's story>"])
# Adding an object of the wrong type raises TypeError.
with transaction.atomic():
with six.assertRaisesRegex(self, TypeError,
"'Article' instance expected, got <Reporter.*"):
self.r.article_set.add(self.r2)
self.assertQuerysetEqual(self.r.article_set.all(),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
def test_set(self):
new_article = self.r.article_set.create(headline="John's second story",
pub_date=datetime.date(2005, 7, 29))
new_article2 = self.r2.article_set.create(headline="Paul's story",
pub_date=datetime.date(2006, 1, 17))
# Assign the article to the reporter.
new_article2.reporter = self.r
new_article2.save()
self.assertEqual(repr(new_article2.reporter), "<Reporter: John Smith>")
self.assertEqual(new_article2.reporter.id, self.r.id)
self.assertQuerysetEqual(self.r.article_set.all(), [
"<Article: John's second story>",
"<Article: Paul's story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(self.r2.article_set.all(), [])
# Set the article back again.
self.r2.article_set.set([new_article, new_article2])
self.assertQuerysetEqual(self.r.article_set.all(), ["<Article: This is a test>"])
self.assertQuerysetEqual(self.r2.article_set.all(),
[
"<Article: John's second story>",
"<Article: Paul's story>",
])
# Funny case - because the ForeignKey cannot be null,
# existing members of the set must remain.
self.r.article_set.set([new_article])
self.assertQuerysetEqual(self.r.article_set.all(),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(self.r2.article_set.all(), ["<Article: Paul's story>"])
def test_assign(self):
new_article = self.r.article_set.create(headline="John's second story",
pub_date=datetime.date(2005, 7, 29))
new_article2 = self.r2.article_set.create(headline="Paul's story",
pub_date=datetime.date(2006, 1, 17))
# Assign the article to the reporter directly using the descriptor.
new_article2.reporter = self.r
new_article2.save()
self.assertEqual(repr(new_article2.reporter), "<Reporter: John Smith>")
self.assertEqual(new_article2.reporter.id, self.r.id)
self.assertQuerysetEqual(self.r.article_set.all(), [
"<Article: John's second story>",
"<Article: Paul's story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(self.r2.article_set.all(), [])
# Set the article back again using set descriptor.
self.r2.article_set = [new_article, new_article2]
self.assertQuerysetEqual(self.r.article_set.all(), ["<Article: This is a test>"])
self.assertQuerysetEqual(self.r2.article_set.all(),
[
"<Article: John's second story>",
"<Article: Paul's story>",
])
# Funny case - assignment notation can only go so far; because the
# ForeignKey cannot be null, existing members of the set must remain.
self.r.article_set = [new_article]
self.assertQuerysetEqual(self.r.article_set.all(),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(self.r2.article_set.all(), ["<Article: Paul's story>"])
# Reporter cannot be null - there should not be a clear or remove method
self.assertFalse(hasattr(self.r2.article_set, 'remove'))
self.assertFalse(hasattr(self.r2.article_set, 'clear'))
def test_selects(self):
self.r.article_set.create(headline="John's second story",
pub_date=datetime.date(2005, 7, 29))
self.r2.article_set.create(headline="Paul's story",
pub_date=datetime.date(2006, 1, 17))
# Reporter objects have access to their related Article objects.
self.assertQuerysetEqual(self.r.article_set.all(), [
"<Article: John's second story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(self.r.article_set.filter(headline__startswith='This'),
["<Article: This is a test>"])
self.assertEqual(self.r.article_set.count(), 2)
self.assertEqual(self.r2.article_set.count(), 1)
# Get articles by id
self.assertQuerysetEqual(Article.objects.filter(id__exact=self.a.id),
["<Article: This is a test>"])
self.assertQuerysetEqual(Article.objects.filter(pk=self.a.id),
["<Article: This is a test>"])
# Query on an article property
self.assertQuerysetEqual(Article.objects.filter(headline__startswith='This'),
["<Article: This is a test>"])
# The API automatically follows relationships as far as you need.
# Use double underscores to separate relationships.
# This works as many levels deep as you want. There's no limit.
# Find all Articles for any Reporter whose first name is "John".
self.assertQuerysetEqual(Article.objects.filter(reporter__first_name__exact='John'),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
# Check that implied __exact also works
self.assertQuerysetEqual(Article.objects.filter(reporter__first_name='John'),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
# Query twice over the related field.
self.assertQuerysetEqual(
Article.objects.filter(reporter__first_name__exact='John',
reporter__last_name__exact='Smith'),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
# The underlying query only makes one join when a related table is referenced twice.
queryset = Article.objects.filter(reporter__first_name__exact='John',
reporter__last_name__exact='Smith')
self.assertNumQueries(1, list, queryset)
self.assertEqual(queryset.query.get_compiler(queryset.db).as_sql()[0].count('INNER JOIN'), 1)
# The automatically joined table has a predictable name.
self.assertQuerysetEqual(
Article.objects.filter(reporter__first_name__exact='John').extra(
where=["many_to_one_reporter.last_name='Smith'"]),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
# ... and should work fine with the unicode that comes out of forms.Form.cleaned_data
self.assertQuerysetEqual(
(Article.objects
.filter(reporter__first_name__exact='John')
.extra(where=["many_to_one_reporter.last_name='%s'" % 'Smith'])),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
# Find all Articles for a Reporter.
# Use direct ID check, pk check, and object comparison
self.assertQuerysetEqual(
Article.objects.filter(reporter__id__exact=self.r.id),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(
Article.objects.filter(reporter__pk=self.r.id),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(
Article.objects.filter(reporter=self.r.id),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(
Article.objects.filter(reporter=self.r),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(
Article.objects.filter(reporter__in=[self.r.id, self.r2.id]).distinct(),
[
"<Article: John's second story>",
"<Article: Paul's story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(
Article.objects.filter(reporter__in=[self.r, self.r2]).distinct(),
[
"<Article: John's second story>",
"<Article: Paul's story>",
"<Article: This is a test>",
])
# You can also use a queryset instead of a literal list of instances.
# The queryset must be reduced to a list of values using values(),
# then converted into a query
self.assertQuerysetEqual(
Article.objects.filter(
reporter__in=Reporter.objects.filter(first_name='John').values('pk').query
).distinct(),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
def test_reverse_selects(self):
a3 = Article.objects.create(id=None, headline="Third article",
pub_date=datetime.date(2005, 7, 27), reporter_id=self.r.id)
Article.objects.create(id=None, headline="Fourth article",
pub_date=datetime.date(2005, 7, 27), reporter_id=str(self.r.id))
# Reporters can be queried
self.assertQuerysetEqual(Reporter.objects.filter(id__exact=self.r.id),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(Reporter.objects.filter(pk=self.r.id),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(Reporter.objects.filter(first_name__startswith='John'),
["<Reporter: John Smith>"])
# Reporters can query in opposite direction of ForeignKey definition
self.assertQuerysetEqual(Reporter.objects.filter(article__id__exact=self.a.id),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(Reporter.objects.filter(article__pk=self.a.id),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(Reporter.objects.filter(article=self.a.id),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(Reporter.objects.filter(article=self.a),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(
Reporter.objects.filter(article__in=[self.a.id, a3.id]).distinct(),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(
Reporter.objects.filter(article__in=[self.a.id, a3]).distinct(),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(
Reporter.objects.filter(article__in=[self.a, a3]).distinct(),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(
Reporter.objects.filter(article__headline__startswith='T'),
["<Reporter: John Smith>", "<Reporter: John Smith>"],
ordered=False
)
self.assertQuerysetEqual(
Reporter.objects.filter(article__headline__startswith='T').distinct(),
["<Reporter: John Smith>"])
# Counting in the opposite direction works in conjunction with distinct()
self.assertEqual(
Reporter.objects.filter(article__headline__startswith='T').count(), 2)
self.assertEqual(
Reporter.objects.filter(article__headline__startswith='T').distinct().count(), 1)
# Queries can go round in circles.
self.assertQuerysetEqual(
Reporter.objects.filter(article__reporter__first_name__startswith='John'),
[
"<Reporter: John Smith>",
"<Reporter: John Smith>",
"<Reporter: John Smith>",
],
ordered=False
)
self.assertQuerysetEqual(
Reporter.objects.filter(article__reporter__first_name__startswith='John').distinct(),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(
Reporter.objects.filter(article__reporter__exact=self.r).distinct(),
["<Reporter: John Smith>"])
# Check that implied __exact also works.
self.assertQuerysetEqual(
Reporter.objects.filter(article__reporter=self.r).distinct(),
["<Reporter: John Smith>"])
# It's possible to use values() calls across many-to-one relations.
# (Note, too, that we clear the ordering here so as not to drag the
# 'headline' field into the columns being used to determine uniqueness)
d = {'reporter__first_name': 'John', 'reporter__last_name': 'Smith'}
self.assertEqual([d],
list(Article.objects.filter(reporter=self.r).distinct().order_by()
.values('reporter__first_name', 'reporter__last_name')))
def test_select_related(self):
# Check that Article.objects.select_related().dates() works properly when
# there are multiple Articles with the same date but different foreign-key
# objects (Reporters).
r1 = Reporter.objects.create(first_name='Mike', last_name='Royko', email='[email protected]')
r2 = Reporter.objects.create(first_name='John', last_name='Kass', email='[email protected]')
Article.objects.create(headline='First', pub_date=datetime.date(1980, 4, 23), reporter=r1)
Article.objects.create(headline='Second', pub_date=datetime.date(1980, 4, 23), reporter=r2)
self.assertEqual(list(Article.objects.select_related().dates('pub_date', 'day')),
[
datetime.date(1980, 4, 23),
datetime.date(2005, 7, 27),
])
self.assertEqual(list(Article.objects.select_related().dates('pub_date', 'month')),
[
datetime.date(1980, 4, 1),
datetime.date(2005, 7, 1),
])
self.assertEqual(list(Article.objects.select_related().dates('pub_date', 'year')),
[
datetime.date(1980, 1, 1),
datetime.date(2005, 1, 1),
])
def test_delete(self):
self.r.article_set.create(headline="John's second story",
pub_date=datetime.date(2005, 7, 29))
self.r2.article_set.create(headline="Paul's story",
pub_date=datetime.date(2006, 1, 17))
Article.objects.create(id=None, headline="Third article",
pub_date=datetime.date(2005, 7, 27), reporter_id=self.r.id)
Article.objects.create(id=None, headline="Fourth article",
pub_date=datetime.date(2005, 7, 27), reporter_id=str(self.r.id))
# If you delete a reporter, his articles will be deleted.
self.assertQuerysetEqual(Article.objects.all(),
[
"<Article: Fourth article>",
"<Article: John's second story>",
"<Article: Paul's story>",
"<Article: Third article>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(Reporter.objects.order_by('first_name'),
[
"<Reporter: John Smith>",
"<Reporter: Paul Jones>",
])
self.r2.delete()
self.assertQuerysetEqual(Article.objects.all(),
[
"<Article: Fourth article>",
"<Article: John's second story>",
"<Article: Third article>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(Reporter.objects.order_by('first_name'),
["<Reporter: John Smith>"])
# You can delete using a JOIN in the query.
Reporter.objects.filter(article__headline__startswith='This').delete()
self.assertQuerysetEqual(Reporter.objects.all(), [])
self.assertQuerysetEqual(Article.objects.all(), [])
def test_explicit_fk(self):
# Create a new Article with get_or_create using an explicit value
# for a ForeignKey.
a2, created = Article.objects.get_or_create(id=None,
headline="John's second test",
pub_date=datetime.date(2011, 5, 7),
reporter_id=self.r.id)
self.assertTrue(created)
self.assertEqual(a2.reporter.id, self.r.id)
# You can specify filters containing the explicit FK value.
self.assertQuerysetEqual(
Article.objects.filter(reporter_id__exact=self.r.id),
[
"<Article: John's second test>",
"<Article: This is a test>",
])
# Create an Article by Paul for the same date.
a3 = Article.objects.create(id=None, headline="Paul's commentary",
pub_date=datetime.date(2011, 5, 7),
reporter_id=self.r2.id)
self.assertEqual(a3.reporter.id, self.r2.id)
# Get should respect explicit foreign keys as well.
self.assertRaises(MultipleObjectsReturned,
Article.objects.get, reporter_id=self.r.id)
self.assertEqual(repr(a3),
repr(Article.objects.get(reporter_id=self.r2.id,
pub_date=datetime.date(2011, 5, 7))))
def test_deepcopy_and_circular_references(self):
# Regression for #12876 -- Model methods that include queries that
# recursive don't cause recursion depth problems under deepcopy.
self.r.cached_query = Article.objects.filter(reporter=self.r)
self.assertEqual(repr(deepcopy(self.r)), "<Reporter: John Smith>")
def test_manager_class_caching(self):
r1 = Reporter.objects.create(first_name='Mike')
r2 = Reporter.objects.create(first_name='John')
# Same twice
self.assertIs(r1.article_set.__class__, r1.article_set.__class__)
# Same as each other
self.assertIs(r1.article_set.__class__, r2.article_set.__class__)
def test_create_relation_with_ugettext_lazy(self):
reporter = Reporter.objects.create(first_name='John',
last_name='Smith',
email='[email protected]')
lazy = ugettext_lazy('test')
reporter.article_set.create(headline=lazy,
pub_date=datetime.date(2011, 6, 10))
notlazy = six.text_type(lazy)
article = reporter.article_set.get()
self.assertEqual(article.headline, notlazy)
def test_values_list_exception(self):
expected_message = "Cannot resolve keyword 'notafield' into field. Choices are: %s"
self.assertRaisesMessage(FieldError,
expected_message % ', '.join(sorted(f.name for f in Reporter._meta.get_fields())),
Article.objects.values_list,
'reporter__notafield')
self.assertRaisesMessage(
FieldError,
expected_message % ', '.join(['EXTRA'] + sorted(f.name for f in Article._meta.get_fields())),
Article.objects.extra(select={'EXTRA': 'EXTRA_SELECT'}).values_list,
'notafield'
)
def test_fk_assignment_and_related_object_cache(self):
# Tests of ForeignKey assignment and the related-object cache (see #6886).
p = Parent.objects.create(name="Parent")
c = Child.objects.create(name="Child", parent=p)
# Look up the object again so that we get a "fresh" object.
c = Child.objects.get(name="Child")
p = c.parent
# Accessing the related object again returns the exactly same object.
self.assertIs(c.parent, p)
# But if we kill the cache, we get a new object.
del c._parent_cache
self.assertIsNot(c.parent, p)
# Assigning a new object results in that object getting cached immediately.
p2 = Parent.objects.create(name="Parent 2")
c.parent = p2
self.assertIs(c.parent, p2)
# Assigning None succeeds if field is null=True.
p.bestchild = None
self.assertIsNone(p.bestchild)
# bestchild should still be None after saving.
p.save()
self.assertIsNone(p.bestchild)
# bestchild should still be None after fetching the object again.
p = Parent.objects.get(name="Parent")
self.assertIsNone(p.bestchild)
# Assigning None fails: Child.parent is null=False.
self.assertRaises(ValueError, setattr, c, "parent", None)
# You also can't assign an object of the wrong type here
self.assertRaises(ValueError, setattr, c, "parent", First(id=1, second=1))
# Nor can you explicitly assign None to Child.parent during object
# creation (regression for #9649).
self.assertRaises(ValueError, Child, name='xyzzy', parent=None)
self.assertRaises(ValueError, Child.objects.create, name='xyzzy', parent=None)
# Creation using keyword argument should cache the related object.
p = Parent.objects.get(name="Parent")
c = Child(parent=p)
self.assertIs(c.parent, p)
# Creation using keyword argument and unsaved related instance (#8070).
p = Parent()
msg = "save() prohibited to prevent data loss due to unsaved related object 'parent'."
with self.assertRaisesMessage(ValueError, msg):
Child.objects.create(parent=p)
msg = "save() prohibited to prevent data loss due to unsaved related object 'parent'."
with self.assertRaisesMessage(ValueError, msg):
ToFieldChild.objects.create(parent=p)
# Creation using attname keyword argument and an id will cause the
# related object to be fetched.
p = Parent.objects.get(name="Parent")
c = Child(parent_id=p.id)
self.assertIsNot(c.parent, p)
self.assertEqual(c.parent, p)
def test_multiple_foreignkeys(self):
# Test of multiple ForeignKeys to the same model (bug #7125).
c1 = Category.objects.create(name='First')
c2 = Category.objects.create(name='Second')
c3 = Category.objects.create(name='Third')
r1 = Record.objects.create(category=c1)
r2 = Record.objects.create(category=c1)
r3 = Record.objects.create(category=c2)
r4 = Record.objects.create(category=c2)
r5 = Record.objects.create(category=c3)
Relation.objects.create(left=r1, right=r2)
Relation.objects.create(left=r3, right=r4)
Relation.objects.create(left=r1, right=r3)
Relation.objects.create(left=r5, right=r2)
Relation.objects.create(left=r3, right=r2)
q1 = Relation.objects.filter(left__category__name__in=['First'], right__category__name__in=['Second'])
self.assertQuerysetEqual(q1, ["<Relation: First - Second>"])
q2 = Category.objects.filter(record__left_set__right__category__name='Second').order_by('name')
self.assertQuerysetEqual(q2, ["<Category: First>", "<Category: Second>"])
p = Parent.objects.create(name="Parent")
c = Child.objects.create(name="Child", parent=p)
self.assertRaises(ValueError, Child.objects.create, name="Grandchild", parent=c)
def test_fk_instantiation_outside_model(self):
# Regression for #12190 -- Should be able to instantiate a FK outside
# of a model, and interrogate its related field.
cat = models.ForeignKey(Category, models.CASCADE)
self.assertEqual('id', cat.remote_field.get_related_field().name)
def test_relation_unsaved(self):
# Test that the <field>_set manager does not join on Null value fields (#17541)
Third.objects.create(name='Third 1')
Third.objects.create(name='Third 2')
th = Third(name="testing")
# The object isn't saved an thus the relation field is null - we won't even
# execute a query in this case.
with self.assertNumQueries(0):
self.assertEqual(th.child_set.count(), 0)
th.save()
# Now the model is saved, so we will need to execute an query.
with self.assertNumQueries(1):
self.assertEqual(th.child_set.count(), 0)
def test_related_object(self):
public_school = School.objects.create(is_public=True)
public_student = Student.objects.create(school=public_school)
private_school = School.objects.create(is_public=False)
private_student = Student.objects.create(school=private_school)
# Only one school is available via all() due to the custom default manager.
self.assertQuerysetEqual(
School.objects.all(),
["<School: School object>"]
)
self.assertEqual(public_student.school, public_school)
# Make sure the base manager is used so that an student can still access
# its related school even if the default manager doesn't normally
# allow it.
self.assertEqual(private_student.school, private_school)
# If the manager is marked "use_for_related_fields", it'll get used instead
# of the "bare" queryset. Usually you'd define this as a property on the class,
# but this approximates that in a way that's easier in tests.
School.objects.use_for_related_fields = True
try:
private_student = Student.objects.get(pk=private_student.pk)
self.assertRaises(School.DoesNotExist, lambda: private_student.school)
finally:
School.objects.use_for_related_fields = False
def test_hasattr_related_object(self):
# The exception raised on attribute access when a related object
# doesn't exist should be an instance of a subclass of `AttributeError`
# refs #21563
self.assertFalse(hasattr(Article(), 'reporter'))
| [
"[email protected]"
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.