blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
781b6ed80d74e389945eae3cb38ae76e1db490f6 | 2a6f1afa7678e5d76efe01b1474eda59d442ae0f | /venv/Lib/site-packages/jesse/store/state_candles.py | ba7f6dbdeeebd1a5b42bae0273167d64478e98c3 | []
| no_license | cagridincel/CagriTrade | 6b50c785efc3eb43487724be59511a5850a92145 | 86839e6604eb18850f6410acf5f6993da59b74ec | refs/heads/master | 2023-03-03T09:16:29.965177 | 2021-02-16T13:01:18 | 2021-02-16T13:01:18 | 338,672,106 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,029 | py | import numpy as np
import jesse.helpers as jh
import jesse.services.selectors as selectors
from jesse.config import config
from jesse.enums import timeframes
from jesse.exceptions import RouteNotFound
from jesse.libs import DynamicNumpyArray
from jesse.models import store_candle_into_db
from jesse.services.candle import generate_candle_from_one_minutes
class CandlesState:
def __init__(self):
self.storage = {}
self.is_initiated = False
def get_storage(self, exchange, symbol, timeframe):
key = jh.key(exchange, symbol, timeframe)
try:
return self.storage[key]
except KeyError:
raise RouteNotFound(
"Bellow route is required but missing in your routes:\n('{}', '{}', '{}')".format(
exchange, symbol, timeframe
)
)
def init_storage(self, bucket_size=1000):
for c in config['app']['considering_candles']:
exchange, symbol = c[0], c[1]
# initiate the '1m' timeframes
key = jh.key(exchange, symbol, timeframes.MINUTE_1)
self.storage[key] = DynamicNumpyArray((bucket_size, 6))
for timeframe in config['app']['considering_timeframes']:
key = jh.key(exchange, symbol, timeframe)
# ex: 1440 / 60 + 1 (reserve one for forming candle)
total_bigger_timeframe = int((bucket_size / jh.timeframe_to_one_minutes(timeframe)) + 1)
self.storage[key] = DynamicNumpyArray((total_bigger_timeframe, 6))
def add_candle(
self,
candle: np.ndarray,
exchange: str,
symbol: str,
timeframe: str,
with_execution=True,
with_generation=True,
is_forming_candle=False
):
if jh.is_collecting_data():
# make sure it's a complete (and not a forming) candle
if jh.now_to_timestamp() >= (candle[0] + 60000):
store_candle_into_db(exchange, symbol, candle)
return
arr: DynamicNumpyArray = self.get_storage(exchange, symbol, timeframe)
if jh.is_live():
self.update_position(exchange, symbol, candle)
# initial
if len(arr) == 0:
arr.append(candle)
# if it's new, add
elif candle[0] > arr[-1][0]:
# in paper mode, check to see if the new candle causes any active orders to be executed
if with_execution and jh.is_paper_trading():
self.simulate_order_execution(exchange, symbol, timeframe, candle)
arr.append(candle)
# generate other timeframes
if with_generation and timeframe == '1m':
self.generate_bigger_timeframes(candle, exchange, symbol, with_execution, is_forming_candle)
# if it's the last candle again, update
elif candle[0] == arr[-1][0]:
# in paper mode, check to see if the new candle causes any active orders to get executed
if with_execution and jh.is_paper_trading():
self.simulate_order_execution(exchange, symbol, timeframe, candle)
arr[-1] = candle
# regenerate other timeframes
if with_generation and timeframe == '1m':
self.generate_bigger_timeframes(candle, exchange, symbol, with_execution, is_forming_candle)
# past candles will be ignored (dropped)
elif candle[0] < arr[-1][0]:
return
@staticmethod
def update_position(exchange: str, symbol: str, candle: np.ndarray):
# get position object
p = selectors.get_position(exchange, symbol)
# for extra_route candles, p == None, hence no further action is required
if p is None:
return
# update position.current_price
p.current_price = jh.round_price_for_live_mode(candle[2], candle[2])
def generate_bigger_timeframes(self, candle: np.ndarray, exchange: str, symbol: str, with_execution: bool,
is_forming_candle: bool):
if not jh.is_live():
return
for timeframe in config['app']['considering_timeframes']:
# skip '1m'
if timeframe == '1m':
continue
last_candle = self.get_current_candle(exchange, symbol, timeframe)
generate_from_count = int((candle[0] - last_candle[0]) / 60_000)
required_for_complete_candle = jh.timeframe_to_one_minutes(timeframe)
short_candles = self.get_candles(exchange, symbol, '1m')[-1 - generate_from_count:]
if generate_from_count == (required_for_complete_candle - 1) and not is_forming_candle:
is_forming_candle = False
else:
is_forming_candle = True
# update latest candle
generated_candle = generate_candle_from_one_minutes(
timeframe,
short_candles,
True
)
self.add_candle(generated_candle, exchange, symbol, timeframe, with_execution, with_generation=False,
is_forming_candle=is_forming_candle)
def simulate_order_execution(self, exchange, symbol, timeframe, new_candle):
previous_candle = self.get_current_candle(exchange, symbol, timeframe)
orders = selectors.get_orders(exchange, symbol)
if previous_candle[2] == new_candle[2]:
return
for o in orders:
# skip inactive orders
if not o.is_active:
continue
if ((o.price >= previous_candle[2]) and (o.price <= new_candle[2])) or (
(o.price <= previous_candle[2]) and (o.price >= new_candle[2])):
o.execute()
def batch_add_candle(self, candles, exchange, symbol, timeframe, with_generation=True):
for c in candles:
self.add_candle(c, exchange, symbol, timeframe, with_execution=False, with_generation=with_generation)
def forming_estimation(self, exchange, symbol, timeframe):
long_key = jh.key(exchange, symbol, timeframe)
short_key = jh.key(exchange, symbol, '1m')
required_1m_to_complete_count = jh.timeframe_to_one_minutes(timeframe)
current_1m_count = len(self.get_storage(exchange, symbol, '1m'))
dif = current_1m_count % required_1m_to_complete_count
return dif, long_key, short_key
# # # # # # # # #
# # # # # getters
# # # # # # # # #
def get_candles(self, exchange, symbol, timeframe) -> np.ndarray:
# no need to worry for forming candles when timeframe == 1m
if timeframe == '1m':
arr: DynamicNumpyArray = self.get_storage(exchange, symbol, '1m')
if len(arr) == 0:
return np.zeros((0, 6))
else:
return arr[:]
# other timeframes
dif, long_key, short_key = self.forming_estimation(exchange, symbol, timeframe)
long_count = len(self.get_storage(exchange, symbol, timeframe))
short_count = len(self.get_storage(exchange, symbol, '1m'))
if dif == 0 and long_count == 0:
return np.zeros((0, 6))
# complete candle
if dif == 0 or self.storage[long_key][:long_count][-1][0] == self.storage[short_key][short_count - dif][0]:
return self.storage[long_key][:long_count]
# generate forming
else:
return np.concatenate(
(
self.storage[long_key][:long_count],
np.array(
(
generate_candle_from_one_minutes(
timeframe,
self.storage[short_key][short_count - dif:short_count],
True
),
)
)
), axis=0
)
def get_current_candle(self, exchange, symbol, timeframe) -> np.ndarray:
# no need to worry for forming candles when timeframe == 1m
if timeframe == '1m':
arr: DynamicNumpyArray = self.get_storage(exchange, symbol, '1m')
if len(arr) == 0:
return np.zeros((0, 6))
else:
return arr[-1]
# other timeframes
dif, long_key, short_key = self.forming_estimation(exchange, symbol, timeframe)
long_count = len(self.get_storage(exchange, symbol, timeframe))
short_count = len(self.get_storage(exchange, symbol, '1m'))
# complete candle
if dif == 0:
if long_count == 0:
return np.zeros((0, 6))
else:
return self.storage[long_key][-1]
# generate forming
else:
return generate_candle_from_one_minutes(
timeframe, self.storage[short_key][short_count - dif:short_count],
True
)
| [
"[email protected]"
]
| |
00440d45b3060c0984ce7edc9e4ac40220ccd1bb | 5ff8cefa68d52d2427bb3d35320cd8bd0d072968 | /Python/function.py | b37cd2866485f3b2690cac066f765b069d08c364 | []
| no_license | gsudarshan1990/PythonSampleProjects | a65a111454f8dc551f1cd29901cead0798ad6dc3 | 3c1a5174c5f966b0eed2828221add76ec0d019d5 | refs/heads/master | 2020-05-09T16:02:37.743568 | 2019-07-14T06:22:55 | 2019-07-14T06:22:55 | 181,255,262 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 796 | py |
def say_hi():
print('Hi! Welcome to python functions')
def program_to_add_two_numbers():
data1=input('Enter the first value')
data2=input('Enter the second value')
data3=int(data1)+int(data2)
print(data3)
def say_hi_from_command_line(name):
print('Hello {} how are you'.format(name))
def say_hi_from_command_line(name='sudarshan'):
print('Hello {} how are you'.format(name))
def say_hi_multiple_arguments(firstname,lastname):
print('Hello {} {}. How are you'.format(firstname,lastname))
say_hi_multiple_arguments('sudarshan','Govindarajan')
def Check_even_or_odd(number):
"""This program is for determining whether a number is even or odd"""
if number%2 == 0:
return 'EVEN'
else:
return 'ODD'
print(Check_even_or_odd(8))
| [
"[email protected]"
]
| |
98646c1825664b848ab78bddca0a52fea3d4f37a | 446b36ebe2eae156fbac7dcf1c50d467bd8bda93 | /artellapipe/__version__.py | 059e18e19b275b625118ed020ba630246c2024ef | [
"MIT"
]
| permissive | ArtellaPipe/artellapipe | cd08f29d4ac8ca0eb304f944864632c1f98f81cb | 3400f6a55f124f639143fe01c559059eaba23b22 | refs/heads/master | 2023-04-06T13:08:34.445823 | 2021-01-29T22:52:23 | 2021-01-29T22:52:23 | 197,077,090 | 8 | 0 | MIT | 2023-03-21T22:35:32 | 2019-07-15T21:48:38 | Python | UTF-8 | Python | false | false | 508 | py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Version module for artellapipe
"""
from __future__ import print_function, division, absolute_import
__author__ = "Tomas Poveda"
__license__ = "MIT"
__maintainer__ = "Tomas Poveda"
__email__ = "[email protected]"
__version__ = None
def get_version():
global __version__
if __version__:
return __version__
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
return __version__
| [
"[email protected]"
]
| |
4742e8ffbe6d84c6a4f32e83697808989bfb9c8c | cd80ef21610010b534430cc9de5c47cddc8b351c | /tests/test_utils/test_load_raw_schema.py | d2e58a61b20637fa70e8e744949bf61779f4f7af | [
"CC0-1.0"
]
| permissive | lushacao/biolinkml | 7124437b0fc6cd2fb8e84fa50e5e187513693fb0 | a492ec8e0d5dc464407b25a70d363674131155bf | refs/heads/master | 2020-05-26T15:13:31.416209 | 2019-04-09T14:53:55 | 2019-04-09T14:53:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,482 | py | import os
import unittest
from typing import Callable
from jsonasobj import as_json, loads, as_dict
from biolinkml import METAMODEL_URI
from biolinkml.meta import SchemaDefinition
from biolinkml.utils.rawloader import load_raw_schema
from biolinkml.utils.schemaloader import SchemaLoader
from tests.test_utils import datadir
class RawLoaderTestCase(unittest.TestCase):
def _verify_schema1_content(self, schema: SchemaDefinition, source_file,
addl_checks: Callable[[SchemaDefinition], None]=None) -> None:
expected = loads(f"""{{
"name": "{source_file}",
"id": "http://example.org/{source_file}",
"title": "Load Raw Schema Test",
"metamodel_version": "0.5.0",
"source_file": "{source_file}.yaml",
"source_file_date": "Mon Dec 31 11:25:38 2018",
"source_file_size": 76,
"generation_date": "2018-12-31 11:50"
}}""")
schema.source_file = os.path.basename(schema.source_file)
if addl_checks:
addl_checks(schema)
self.assertTrue(isinstance(schema.metamodel_version, str))
expected.metamodel_version = schema.metamodel_version
self.assertTrue(isinstance(schema.source_file_date, str))
expected.source_file_date = schema.source_file_date
self.assertTrue(isinstance(schema.source_file_size, int))
expected.source_file_size = schema.source_file_size
self.assertTrue(isinstance(schema.generation_date, str))
expected.generation_date = schema.generation_date
self.assertEqual(expected, loads(as_json(schema)))
def test_load_raw_file(self):
""" Test loading a data file """
self._verify_schema1_content(load_raw_schema(os.path.join(datadir, 'schema1.yaml')), 'schema1')
# Verify that we can't pass source_file parameters when we've got a directory name
with self.assertRaises(AssertionError):
load_raw_schema(os.path.join(datadir, 'schema1.yaml'), source_file_size=117)
def test_explicit_name(self):
""" Test the named schema option """
self._verify_schema1_content(load_raw_schema(os.path.join(datadir, 'schema2.yaml')), 'schema2')
def test_multi_schemas(self):
""" Test multiple schemas in the same file """
def check_types(s: SchemaDefinition) -> None:
self.assertEqual({
'integer': {'base': 'int',
'from_schema': 'http://example.org/schema5',
'name': 'integer'},
'string': {'base': 'str',
'from_schema': 'http://example.org/schema4',
'name': 'string'}},
{k: as_dict(loads(as_json(v))) for k, v in s.types.items()})
s.types = None
self._verify_schema1_content(load_raw_schema(os.path.join(datadir, 'schema4.yaml')), 'schema4', check_types)
def test_base_dir(self):
""" Test the base directory option """
self._verify_schema1_content(load_raw_schema('schema1.yaml', base_dir=datadir), 'schema1')
def test_schema_id(self):
""" Test loading a schema with just an id """
self._verify_schema1_content(load_raw_schema('schema3.yaml', base_dir=datadir), 'schema3')
def test_name_from_sourcefile(self):
""" Test no identifier at all """
with self.assertRaises(ValueError):
load_raw_schema(os.path.join(datadir, 'schema5.yaml'))
def test_load_text(self):
""" Test loading straight text """
with open(os.path.join(datadir, 'schema1.yaml')) as f:
self._verify_schema1_content(load_raw_schema(f.read(), 'schema1.yaml', "Mon Dec 31 11:25:38 2018", 76),
'schema1')
def test_representation_errors(self):
""" Test misformed schema elements """
fn = os.path.join(datadir, 'typeerror1.yaml')
with self.assertRaises(ValueError):
SchemaLoader(fn)
fn = os.path.join(datadir, 'typeerror2.yaml')
with self.assertRaises(ValueError):
SchemaLoader(fn)
fn = os.path.join(datadir, 'typeerror3.yaml')
with self.assertRaises(ValueError):
SchemaLoader(fn)
fn = os.path.join(datadir, 'typeerror4.yaml')
with self.assertRaises(ValueError):
SchemaLoader(fn)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
]
| |
b8a5a42c89dedd95e2f1cfec27a8724602834c02 | 0fb7f29ca82e0588064dfee096d6bbefb5d86771 | /amqp_client/apps.py | 5d2cd1c458f8cfa424424acbf8c0e36e80710f72 | []
| no_license | dschien/minimal_django_celery | f4b9b2d46a068d12db5e65b37ff97145f00b26e1 | 1dd6d4d8afb056207a26f5a84b48fd98deaca5c4 | refs/heads/master | 2021-01-17T17:35:34.674049 | 2016-05-30T05:13:20 | 2016-05-30T05:13:20 | 59,981,483 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 96 | py | from django.apps import AppConfig
class AmqpClientConfig(AppConfig):
name = 'amqp_client'
| [
"[email protected]"
]
| |
1d46722876803893a9031d256552fb3ff8871627 | b92417413ec5b05ca25695de55934ce7072a0f0a | /test/test_v1_source_control_user.py | 272e12fdb5ccbc057c74caf86bff1ed08f2c5e16 | [
"Apache-2.0"
]
| permissive | detiber/lib_openshift | be1f0f1b3eec62c9bbf50a3fcea61303a870c112 | efea21ce6f67e3d48885c03ae22978c576c0b87d | refs/heads/master | 2021-01-18T04:12:00.820052 | 2016-10-04T03:20:43 | 2016-10-04T03:20:43 | 63,102,761 | 0 | 0 | null | 2016-07-11T21:15:36 | 2016-07-11T21:15:36 | null | UTF-8 | Python | false | false | 1,320 | py | # coding: utf-8
"""
OpenAPI spec version:
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
import sys
import unittest
import lib_openshift
from lib_openshift.rest import ApiException
from lib_openshift.models.v1_source_control_user import V1SourceControlUser
class TestV1SourceControlUser(unittest.TestCase):
""" V1SourceControlUser unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1SourceControlUser(self):
"""
Test V1SourceControlUser
"""
model = lib_openshift.models.v1_source_control_user.V1SourceControlUser()
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
]
| |
15ed6ca2de5e81a647689b26bfa84a582b393614 | 8a699595e7f156b1ade42f6042900b3331831fbf | /src/transformers/models/funnel/__init__.py | b9c6b9608d3787ef46211639297da693cde0c8c9 | [
"Apache-2.0"
]
| permissive | stas00/transformers | ab654371a387c5883fc882dd0286177875d6d3b4 | 7c5d79912a21880ce13d77881940458e90d98917 | refs/heads/master | 2023-02-16T00:22:41.298155 | 2022-04-08T20:55:42 | 2022-04-08T20:55:42 | 278,214,696 | 6 | 0 | Apache-2.0 | 2022-01-28T18:39:00 | 2020-07-08T23:24:49 | Python | UTF-8 | Python | false | false | 3,496 | py | # flake8: noqa
# There's no way to ignore "F401 '...' imported but unused" warnings in this
# module, but to preserve other warnings. So, don't check this module at all.
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ...utils import _LazyModule, is_tf_available, is_tokenizers_available, is_torch_available
_import_structure = {
"configuration_funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig"],
"convert_funnel_original_tf_checkpoint_to_pytorch": [],
"tokenization_funnel": ["FunnelTokenizer"],
}
if is_tokenizers_available():
_import_structure["tokenization_funnel_fast"] = ["FunnelTokenizerFast"]
if is_torch_available():
_import_structure["modeling_funnel"] = [
"FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"FunnelBaseModel",
"FunnelForMaskedLM",
"FunnelForMultipleChoice",
"FunnelForPreTraining",
"FunnelForQuestionAnswering",
"FunnelForSequenceClassification",
"FunnelForTokenClassification",
"FunnelModel",
"FunnelPreTrainedModel",
"load_tf_weights_in_funnel",
]
if is_tf_available():
_import_structure["modeling_tf_funnel"] = [
"TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFFunnelBaseModel",
"TFFunnelForMaskedLM",
"TFFunnelForMultipleChoice",
"TFFunnelForPreTraining",
"TFFunnelForQuestionAnswering",
"TFFunnelForSequenceClassification",
"TFFunnelForTokenClassification",
"TFFunnelModel",
"TFFunnelPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
if is_tokenizers_available():
from .tokenization_funnel_fast import FunnelTokenizerFast
if is_torch_available():
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
if is_tf_available():
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| [
"[email protected]"
]
| |
5441121600dc115f36870bfe9ba621150d9a807b | be0c6e2071945edcb47ee4f3fadc1f4629a2c6aa | /grandapp/migrations/0159_gwascata.py | bd3973db6cc1f9639c5721276b1eec0a9fcd0592 | []
| no_license | QuackenbushLab/grand | 9719a395e6a30951c3ffdef1eccdb5e422da737c | f23031d1f240550d25c2842b4af0aae08c653bae | refs/heads/master | 2023-08-10T09:58:58.381264 | 2023-07-25T18:23:26 | 2023-07-25T18:23:26 | 201,113,575 | 5 | 2 | null | 2022-06-24T19:11:29 | 2019-08-07T19:18:58 | JavaScript | UTF-8 | Python | false | false | 622 | py | # Generated by Django 3.0.2 on 2021-04-17 03:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('grandapp', '0158_gobpbygene'),
]
operations = [
migrations.CreateModel(
name='Gwascata',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('term', models.CharField(max_length=400)),
('genelist', models.CharField(max_length=3000)),
('idd', models.IntegerField()),
],
),
]
| [
"[email protected]"
]
| |
0b66e9d9f992078632fda7ba4aab15fea059fe55 | ecf1ce6f8b592f76c7b7c253608c1264ae0676a3 | /days/day033/movie_logbook.py | b380e0535a073660df262734541a8f403ae4d0b4 | []
| permissive | alex-vegan/100daysofcode-with-python-course | 94e99880a50ac412e398ad209ed53796f253641f | b6c12316abe18274b7963371b8f0ed2fd549ef07 | refs/heads/master | 2021-07-20T23:05:59.721661 | 2019-01-21T16:18:25 | 2019-01-21T16:18:25 | 150,115,516 | 0 | 0 | MIT | 2018-09-24T14:28:16 | 2018-09-24T14:28:15 | null | UTF-8 | Python | false | false | 3,017 | py | from typing import List
import requests
import collections
import os
from urllib.request import urlretrieve
from requests.exceptions import ProxyError, ConnectionError, HTTPError, SSLError
import logbook
import sys
FILE = 'movie_logbook.log'
resp_logger = logbook.Logger('Resp')
url_path = 'http://movie_service.talkpython.fm/api/search/'
Movie = collections.namedtuple('Movie', 'imdb_code, title, director, keywords, '
'duration, genres, rating, year, imdb_score')
def main(file_name=None):
level = logbook.TRACE
if file_name:
logbook.TimedRotatingFileHandler(file_name, level=level).push_application()
else:
logbook.StreamHandler(sys.stdout, level=level).push_application()
mode = 'stdout' if not file_name else 'file ' + file_name
msg = f'Logging started. level: {level} mode: {mode}'
logger = logbook.Logger('Startup')
logger.notice(msg)
try:
keyword = input('Keyword of title search: ')
if not keyword or not keyword.strip():
raise ValueError('Must specify a search term.')
url = os.path.join(url_path, keyword)
response = requests.get(url)
response.raise_for_status()
if response.status_code == 200:
print(f"OK!")
resp_logger.notice(f'Connection correctly made. '
f'Status code: {response.status_code}')
results = response.json()
movies = []
for r in results.get('hits'):
movies.append(Movie(**r))
print(f'There are {len(movies)} movies found.')
resp_logger.trace(f'For keyword << {keyword} >> has been found {len(movies)} movies')
for m in movies:
print(f"{m.title} with code {m.imdb_code} has score {m.imdb_score}")
except ProxyError:
error_msg = (f"Could not connect to proxy. "
f"Check your proxy settings.")
print(f"ERROR: " + error_msg)
resp_logger.warn(error_msg)
except ConnectionError:
error_msg = (f"Could not find server. "
f"Check your network connection.")
print(f"ERROR: " + error_msg)
resp_logger.warn(error_msg)
except HTTPError:
error_msg = (f"Could not open the HTTP page. "
f"Error number {response.status_code} "
f"Reason: {response.reason}")
print("ERROR: " + error_msg)
resp_logger.warn(error_msg)
except SSLError:
error_msg = (f"Could not open the HTTPS page. "
f"Check firewall settings and SSL certificates.")
print(f"ERROR: " + error_msg)
resp_logger.warn(error_msg)
except ValueError:
print(f"ERROR: You must specify a search term.")
resp_logger.trace(f'Search term has not been specified')
except Exception as x:
print(f"Oh that didn't work!: {x}")
resp_logger.error(f'!!! System Fatality Crash !!!')
if __name__ == '__main__':
main(FILE)
| [
"[email protected]"
]
| |
116ad72261ac13f32e56b96af2857a706496f8f3 | 4eeb40dcc265caf4a2b84bc90a28d481930d6a8a | /wristwatchesproj/genericviews/urls.py | f85318b0a8fa4ada057cdb8357d59b5e5c64d300 | []
| no_license | mprasu/Sample-Projects | eb7fc46e81b09d7c97c238047e3c93b6fff3fb8d | 7363baf630900ab2babb4af2afe77911d8a548b2 | refs/heads/master | 2020-04-16T06:43:16.345750 | 2019-01-12T07:07:34 | 2019-01-12T07:07:34 | 165,358,055 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 437 | py | from django.conf.urls import url
from django.urls import path
from genericviews import views
#app_name = 'genericviews'
urlpatterns = [
url(r'^$', views.IndexView.as_view(), name='index'),
url(r'^(?P<pk>[0-9]+)/$', views.DetailsView.as_view(), name='detail'),
url(r'^makeentry$', views.MakeEntry.as_view(), name='makeentry'),
url(r'^/(?P<pk>[0-9]+)/delete/$',views.DeleteView.as_view(),name='item-delete'),
] | [
"[email protected]"
]
| |
56f1079c44dd79e3fc140abdf1cdfe15a672ec6e | 33a917d58f796de42d3f90c7ab0a0353066c8a4b | /constants.py | 20f9cd9dd32e0f3ec6ecab79f4d3222233c424e0 | []
| no_license | evanthebouncy/tangram | d214cec6e164e9b21d83cf2fcacf753d8ca837e5 | c8a7c13bcad7c9b4c96a6b296db707ddcf32241b | refs/heads/master | 2021-04-26T23:06:12.465094 | 2018-07-18T13:46:00 | 2018-07-18T13:46:00 | 123,930,799 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 435 | py | # all the constant globals shared across codes
OPS = ['P', 'H', 'V']
n_hidden = 40
# n_hidden = 120 # good embedding bad decomposition
large_hidden = 10 * n_hidden
L = 6
SHAPE_TYPES = ['1', '2', '3', '4', '5']
# SHAPES = ['1', '1', '2', '3', '4', '5', '5']
SHAPES = ['1', '2', '3']
# SHAPES = ['1', '2', '3', '4', '5']
ORIENTATIONS = [1, 2, 3, 4]
SXO = [(s,o) for s in SHAPE_TYPES for o in ORIENTATIONS]
ACTIONS = ['H', 'V'] + SXO
| [
"[email protected]"
]
| |
c15f1497d32ffbd1dd767da2c379c75b43300541 | eec4e9956b360998e34e9a6efd23744b3b0cdf9c | /my_plugins/python-mode/submodules/rope/ropetest/refactor/change_signature_test.py | 944ffb5482f6c2c0cb2289aac309f8143b510830 | [
"MIT",
"LGPL-3.0-only",
"LGPL-2.0-or-later",
"GPL-1.0-or-later",
"GPL-2.0-only"
]
| permissive | dragon7-fc/vimrc | 7f89f8730090e81c8cb78cdc471437e35defd0c1 | d5968c222023bfdbd68b4f047f6e407e978cc82f | refs/heads/master | 2021-06-22T03:35:34.474482 | 2021-01-29T00:34:20 | 2021-01-29T00:34:20 | 182,471,836 | 0 | 0 | MIT | 2019-04-21T01:37:18 | 2019-04-21T01:37:17 | null | UTF-8 | Python | false | false | 22,123 | py | try:
import unittest2 as unittest
except ImportError:
import unittest
import rope.base.exceptions
from rope.refactor import change_signature
from ropetest import testutils
class ChangeSignatureTest(unittest.TestCase):
def setUp(self):
super(ChangeSignatureTest, self).setUp()
self.project = testutils.sample_project()
self.pycore = self.project.pycore
self.mod = testutils.create_module(self.project, 'mod')
def tearDown(self):
testutils.remove_project(self.project)
super(ChangeSignatureTest, self).tearDown()
def test_normalizing_parameters_for_trivial_case(self):
code = 'def a_func():\n pass\na_func()'
self.mod.write(code)
signature = change_signature.ChangeSignature(
self.project, self.mod, self.mod.read().index('a_func') + 1)
self.project.do(signature.get_changes(
[change_signature.ArgumentNormalizer()]))
self.assertEquals(code, self.mod.read())
def test_normalizing_parameters_for_trivial_case2(self):
code = 'def a_func(param):\n pass\na_func(2)'
self.mod.write(code)
signature = change_signature.ChangeSignature(
self.project, self.mod, self.mod.read().index('a_func') + 1)
self.project.do(signature.get_changes(
[change_signature.ArgumentNormalizer()]))
self.assertEquals(code, self.mod.read())
def test_normalizing_parameters_for_unneeded_keyword(self):
self.mod.write('def a_func(param):\n pass\na_func(param=1)')
signature = change_signature.ChangeSignature(
self.project, self.mod, self.mod.read().index('a_func') + 1)
self.project.do(signature.get_changes(
[change_signature.ArgumentNormalizer()]))
self.assertEquals('def a_func(param):\n pass\na_func(1)',
self.mod.read())
def test_normalizing_parameters_for_unneeded_keyword_for_methods(self):
code = 'class A(object):\n' \
' def a_func(self, param):\n' \
' pass\n' \
'a_var = A()\n' \
'a_var.a_func(param=1)\n'
self.mod.write(code)
signature = change_signature.ChangeSignature(
self.project, self.mod, self.mod.read().index('a_func') + 1)
self.project.do(signature.get_changes(
[change_signature.ArgumentNormalizer()]))
expected = 'class A(object):\n' \
' def a_func(self, param):\n' \
' pass\n' \
'a_var = A()\n' \
'a_var.a_func(1)\n'
self.assertEquals(expected, self.mod.read())
def test_normalizing_parameters_for_unsorted_keyword(self):
self.mod.write('def a_func(p1, p2):\n pass\na_func(p2=2, p1=1)')
signature = change_signature.ChangeSignature(
self.project, self.mod, self.mod.read().index('a_func') + 1)
self.project.do(signature.get_changes(
[change_signature.ArgumentNormalizer()]))
self.assertEquals('def a_func(p1, p2):\n pass\na_func(1, 2)',
self.mod.read())
def test_raising_exceptions_for_non_functions(self):
self.mod.write('a_var = 10')
with self.assertRaises(rope.base.exceptions.RefactoringError):
change_signature.ChangeSignature(
self.project, self.mod, self.mod.read().index('a_var') + 1)
def test_normalizing_parameters_for_args_parameter(self):
self.mod.write('def a_func(*arg):\n pass\na_func(1, 2)\n')
signature = change_signature.ChangeSignature(
self.project, self.mod, self.mod.read().index('a_func') + 1)
self.project.do(signature.get_changes(
[change_signature.ArgumentNormalizer()]))
self.assertEquals('def a_func(*arg):\n pass\na_func(1, 2)\n',
self.mod.read())
def test_normalizing_parameters_for_args_parameter_and_keywords(self):
self.mod.write(
'def a_func(param, *args):\n pass\na_func(*[1, 2, 3])\n')
signature = change_signature.ChangeSignature(
self.project, self.mod, self.mod.read().index('a_func') + 1)
self.project.do(signature.get_changes(
[change_signature.ArgumentNormalizer()]))
self.assertEquals('def a_func(param, *args):\n pass\n'
'a_func(*[1, 2, 3])\n', self.mod.read())
def test_normalizing_functions_from_other_modules(self):
mod1 = testutils.create_module(self.project, 'mod1')
mod1.write('def a_func(param):\n pass\n')
self.mod.write('import mod1\nmod1.a_func(param=1)\n')
signature = change_signature.ChangeSignature(
self.project, mod1, mod1.read().index('a_func') + 1)
self.project.do(signature.get_changes(
[change_signature.ArgumentNormalizer()]))
self.assertEquals('import mod1\nmod1.a_func(1)\n', self.mod.read())
def test_normalizing_parameters_for_keyword_parameters(self):
self.mod.write('def a_func(p1, **kwds):\n pass\n'
'a_func(p2=2, p1=1)\n')
signature = change_signature.ChangeSignature(
self.project, self.mod, self.mod.read().index('a_func') + 1)
self.project.do(signature.get_changes(
[change_signature.ArgumentNormalizer()]))
self.assertEquals('def a_func(p1, **kwds):\n pass\n'
'a_func(1, p2=2)\n', self.mod.read())
def test_removing_arguments(self):
self.mod.write('def a_func(p1):\n pass\na_func(1)\n')
signature = change_signature.ChangeSignature(
self.project, self.mod, self.mod.read().index('a_func') + 1)
self.project.do(signature.get_changes(
[change_signature.ArgumentRemover(0)]))
self.assertEquals('def a_func():\n pass\na_func()\n',
self.mod.read())
def test_removing_arguments_with_multiple_args(self):
self.mod.write('def a_func(p1, p2):\n pass\na_func(1, 2)\n')
signature = change_signature.ChangeSignature(
self.project, self.mod, self.mod.read().index('a_func') + 1)
self.project.do(signature.get_changes(
[change_signature.ArgumentRemover(0)]))
self.assertEquals('def a_func(p2):\n pass\na_func(2)\n',
self.mod.read())
def test_removing_arguments_passed_as_keywords(self):
self.mod.write('def a_func(p1):\n pass\na_func(p1=1)\n')
signature = change_signature.ChangeSignature(
self.project, self.mod, self.mod.read().index('a_func') + 1)
self.project.do(signature.get_changes(
[change_signature.ArgumentRemover(0)]))
self.assertEquals('def a_func():\n pass\na_func()\n',
self.mod.read())
def test_removing_arguments_with_defaults(self):
self.mod.write('def a_func(p1=1):\n pass\na_func(1)\n')
signature = change_signature.ChangeSignature(
self.project, self.mod, self.mod.read().index('a_func') + 1)
self.project.do(signature.get_changes(
[change_signature.ArgumentRemover(0)]))
self.assertEquals('def a_func():\n pass\na_func()\n',
self.mod.read())
def test_removing_arguments_star_args(self):
self.mod.write('def a_func(p1, *args):\n pass\na_func(1)\n')
signature = change_signature.ChangeSignature(
self.project, self.mod, self.mod.read().index('a_func') + 1)
self.project.do(signature.get_changes(
[change_signature.ArgumentRemover(1)]))
self.assertEquals('def a_func(p1):\n pass\na_func(1)\n',
self.mod.read())
def test_removing_keyword_arg(self):
self.mod.write('def a_func(p1, **kwds):\n pass\na_func(1)\n')
signature = change_signature.ChangeSignature(
self.project, self.mod, self.mod.read().index('a_func') + 1)
self.project.do(signature.get_changes(
[change_signature.ArgumentRemover(1)]))
self.assertEquals('def a_func(p1):\n pass\na_func(1)\n',
self.mod.read())
def test_removing_keyword_arg2(self):
self.mod.write('def a_func(p1, *args, **kwds):\n pass\na_func(1)\n')
signature = change_signature.ChangeSignature(
self.project, self.mod, self.mod.read().index('a_func') + 1)
self.project.do(signature.get_changes(
[change_signature.ArgumentRemover(2)]))
self.assertEquals('def a_func(p1, *args):\n pass\na_func(1)\n',
self.mod.read())
# XXX: What to do here for star args?
@unittest.skip("How to deal with start args?")
def xxx_test_removing_arguments_star_args2(self):
self.mod.write('def a_func(p1, *args):\n pass\n'
'a_func(2, 3, p1=1)\n')
signature = change_signature.ChangeSignature(
self.project, self.mod, self.mod.read().index('a_func') + 1)
self.project.do(signature.get_changes(
[change_signature.ArgumentRemover(1)]))
self.assertEquals('def a_func(p1):\n pass\na_func(p1=1)\n',
self.mod.read())
# XXX: What to do here for star args?
def xxx_test_removing_arguments_star_args3(self):
self.mod.write('def a_func(p1, *args):\n pass\n'
'a_func(*[1, 2, 3])\n')
signature = change_signature.ChangeSignature(
self.project, self.mod, self.mod.read().index('a_func') + 1)
self.project.do(signature.get_changes(
[change_signature.ArgumentRemover(1)]))
self.assertEquals('def a_func(p1):\n pass\na_func(*[1, 2, 3])\n',
self.mod.read())
def test_adding_arguments_for_normal_args_changing_definition(self):
self.mod.write('def a_func():\n pass\n')
signature = change_signature.ChangeSignature(
self.project, self.mod, self.mod.read().index('a_func') + 1)
self.project.do(signature.get_changes(
[change_signature.ArgumentAdder(0, 'p1')]))
self.assertEquals('def a_func(p1):\n pass\n', self.mod.read())
def test_adding_arguments_for_normal_args_with_defaults(self):
self.mod.write('def a_func():\n pass\na_func()\n')
signature = change_signature.ChangeSignature(
self.project, self.mod, self.mod.read().index('a_func') + 1)
adder = change_signature.ArgumentAdder(0, 'p1', 'None')
self.project.do(signature.get_changes([adder]))
self.assertEquals('def a_func(p1=None):\n pass\na_func()\n',
self.mod.read())
def test_adding_arguments_for_normal_args_changing_calls(self):
self.mod.write('def a_func():\n pass\na_func()\n')
signature = change_signature.ChangeSignature(
self.project, self.mod, self.mod.read().index('a_func') + 1)
adder = change_signature.ArgumentAdder(0, 'p1', 'None', '1')
self.project.do(signature.get_changes([adder]))
self.assertEquals('def a_func(p1=None):\n pass\na_func(1)\n',
self.mod.read())
def test_adding_arguments_for_norm_args_chang_calls_with_kwords(self):
self.mod.write('def a_func(p1=0):\n pass\na_func()\n')
signature = change_signature.ChangeSignature(
self.project, self.mod, self.mod.read().index('a_func') + 1)
adder = change_signature.ArgumentAdder(1, 'p2', '0', '1')
self.project.do(signature.get_changes([adder]))
self.assertEquals('def a_func(p1=0, p2=0):\n pass\na_func(p2=1)\n',
self.mod.read())
def test_adding_arguments_for_norm_args_chang_calls_with_no_value(self):
self.mod.write('def a_func(p2=0):\n pass\na_func(1)\n')
signature = change_signature.ChangeSignature(
self.project, self.mod, self.mod.read().index('a_func') + 1)
adder = change_signature.ArgumentAdder(0, 'p1', '0', None)
self.project.do(signature.get_changes([adder]))
self.assertEquals('def a_func(p1=0, p2=0):\n pass\na_func(p2=1)\n',
self.mod.read())
def test_adding_duplicate_parameter_and_raising_exceptions(self):
self.mod.write('def a_func(p1):\n pass\n')
with self.assertRaises(rope.base.exceptions.RefactoringError):
signature = change_signature.ChangeSignature(
self.project, self.mod, self.mod.read().index('a_func') + 1)
self.project.do(signature.get_changes(
[change_signature.ArgumentAdder(1, 'p1')]))
def test_inlining_default_arguments(self):
self.mod.write('def a_func(p1=0):\n pass\na_func()\n')
signature = change_signature.ChangeSignature(
self.project, self.mod, self.mod.read().index('a_func') + 1)
self.project.do(signature.get_changes(
[change_signature.ArgumentDefaultInliner(0)]))
self.assertEquals('def a_func(p1=0):\n pass\n'
'a_func(0)\n', self.mod.read())
def test_inlining_default_arguments2(self):
self.mod.write('def a_func(p1=0):\n pass\na_func(1)\n')
signature = change_signature.ChangeSignature(
self.project, self.mod, self.mod.read().index('a_func') + 1)
self.project.do(signature.get_changes(
[change_signature.ArgumentDefaultInliner(0)]))
self.assertEquals('def a_func(p1=0):\n pass\n'
'a_func(1)\n', self.mod.read())
def test_preserving_args_and_keywords_order(self):
self.mod.write('def a_func(*args, **kwds):\n pass\n'
'a_func(3, 1, 2, a=1, c=3, b=2)\n')
signature = change_signature.ChangeSignature(
self.project, self.mod, self.mod.read().index('a_func') + 1)
self.project.do(signature.get_changes(
[change_signature.ArgumentNormalizer()]))
self.assertEquals('def a_func(*args, **kwds):\n pass\n'
'a_func(3, 1, 2, a=1, c=3, b=2)\n', self.mod.read())
def test_change_order_for_only_one_parameter(self):
self.mod.write('def a_func(p1):\n pass\na_func(1)\n')
signature = change_signature.ChangeSignature(
self.project, self.mod, self.mod.read().index('a_func') + 1)
self.project.do(signature.get_changes(
[change_signature.ArgumentReorderer([0])]))
self.assertEquals('def a_func(p1):\n pass\na_func(1)\n',
self.mod.read())
def test_change_order_for_two_parameter(self):
self.mod.write('def a_func(p1, p2):\n pass\na_func(1, 2)\n')
signature = change_signature.ChangeSignature(
self.project, self.mod, self.mod.read().index('a_func') + 1)
self.project.do(signature.get_changes(
[change_signature.ArgumentReorderer([1, 0])]))
self.assertEquals('def a_func(p2, p1):\n pass\na_func(2, 1)\n',
self.mod.read())
def test_reordering_multi_line_function_headers(self):
self.mod.write('def a_func(p1,\n p2):\n pass\na_func(1, 2)\n')
signature = change_signature.ChangeSignature(
self.project, self.mod, self.mod.read().index('a_func') + 1)
self.project.do(signature.get_changes(
[change_signature.ArgumentReorderer([1, 0])]))
self.assertEquals('def a_func(p2, p1):\n pass\na_func(2, 1)\n',
self.mod.read())
def test_changing_order_with_static_params(self):
self.mod.write('def a_func(p1, p2=0, p3=0):\n pass\na_func(1, 2)\n')
signature = change_signature.ChangeSignature(
self.project, self.mod, self.mod.read().index('a_func') + 1)
self.project.do(signature.get_changes(
[change_signature.ArgumentReorderer([0, 2, 1])]))
self.assertEquals('def a_func(p1, p3=0, p2=0):\n pass\n'
'a_func(1, p2=2)\n', self.mod.read())
def test_doing_multiple_changes(self):
changers = []
self.mod.write('def a_func(p1):\n pass\na_func(1)\n')
changers.append(change_signature.ArgumentRemover(0))
changers.append(change_signature.ArgumentAdder(0, 'p2', None, None))
signature = change_signature.ChangeSignature(
self.project, self.mod, self.mod.read().index('a_func') + 1)
signature.get_changes(changers).do()
self.assertEquals('def a_func(p2):\n pass\na_func()\n',
self.mod.read())
def test_doing_multiple_changes2(self):
changers = []
self.mod.write('def a_func(p1, p2):\n pass\na_func(p2=2)\n')
changers.append(change_signature.ArgumentAdder(2, 'p3', None, '3'))
changers.append(change_signature.ArgumentReorderer([1, 0, 2]))
changers.append(change_signature.ArgumentRemover(1))
signature = change_signature.ChangeSignature(
self.project, self.mod, self.mod.read().index('a_func') + 1)
signature.get_changes(changers).do()
self.assertEquals('def a_func(p2, p3):\n pass\na_func(2, 3)\n',
self.mod.read())
def test_changing_signature_in_subclasses(self):
self.mod.write(
'class A(object):\n def a_method(self):\n pass\n'
'class B(A):\n def a_method(self):\n pass\n')
signature = change_signature.ChangeSignature(
self.project, self.mod, self.mod.read().index('a_method') + 1)
signature.get_changes([change_signature.ArgumentAdder(1, 'p1')],
in_hierarchy=True).do()
self.assertEquals(
'class A(object):\n def a_method(self, p1):\n pass\n'
'class B(A):\n def a_method(self, p1):\n pass\n',
self.mod.read())
def test_differentiating_class_accesses_from_instance_accesses(self):
self.mod.write(
'class A(object):\n def a_func(self, param):\n pass\n'
'a_var = A()\nA.a_func(a_var, param=1)')
signature = change_signature.ChangeSignature(
self.project, self.mod, self.mod.read().index('a_func') + 1)
self.project.do(signature.get_changes(
[change_signature.ArgumentRemover(1)]))
self.assertEquals(
'class A(object):\n def a_func(self):\n pass\n'
'a_var = A()\nA.a_func(a_var)', self.mod.read())
def test_changing_signature_for_constructors(self):
self.mod.write(
'class C(object):\n def __init__(self, p):\n pass\n'
'c = C(1)\n')
signature = change_signature.ChangeSignature(
self.project, self.mod, self.mod.read().index('C') + 1)
signature.get_changes([change_signature.ArgumentRemover(1)]).do()
self.assertEquals(
'class C(object):\n def __init__(self):\n pass\n'
'c = C()\n',
self.mod.read())
def test_changing_signature_for_constructors2(self):
self.mod.write(
'class C(object):\n def __init__(self, p):\n pass\n'
'c = C(1)\n')
signature = change_signature.ChangeSignature(
self.project, self.mod, self.mod.read().index('__init__') + 1)
signature.get_changes([change_signature.ArgumentRemover(1)]).do()
self.assertEquals(
'class C(object):\n def __init__(self):\n pass\n'
'c = C()\n',
self.mod.read())
def test_changing_signature_for_constructors_when_using_super(self):
self.mod.write(
'class A(object):\n def __init__(self, p):\n pass\n'
'class B(A):\n '
'def __init__(self, p):\n super(B, self).__init__(p)\n')
signature = change_signature.ChangeSignature(
self.project, self.mod, self.mod.read().index('__init__') + 1)
signature.get_changes([change_signature.ArgumentRemover(1)]).do()
self.assertEquals(
'class A(object):\n def __init__(self):\n pass\n'
'class B(A):\n '
'def __init__(self, p):\n super(B, self).__init__()\n',
self.mod.read())
def test_redordering_arguments_reported_by_mft(self):
self.mod.write('def f(a, b, c):\n pass\nf(1, 2, 3)\n')
signature = change_signature.ChangeSignature(
self.project, self.mod, self.mod.read().rindex('f'))
signature.get_changes(
[change_signature.ArgumentReorderer([1, 2, 0])]).do()
self.assertEquals('def f(b, c, a):\n pass\nf(2, 3, 1)\n',
self.mod.read())
def test_resources_parameter(self):
mod1 = testutils.create_module(self.project, 'mod1')
mod1.write('def a_func(param):\n pass\n')
self.mod.write('import mod1\nmod1.a_func(1)\n')
signature = change_signature.ChangeSignature(
self.project, mod1, mod1.read().index('a_func') + 1)
signature.get_changes([change_signature.ArgumentRemover(0)],
resources=[mod1]).do()
self.assertEquals('import mod1\nmod1.a_func(1)\n', self.mod.read())
self.assertEquals('def a_func():\n pass\n', mod1.read())
def test_reordering_and_automatic_defaults(self):
code = 'def f(p1, p2=2):\n' \
' pass\n' \
'f(1, 2)\n'
self.mod.write(code)
signature = change_signature.ChangeSignature(
self.project, self.mod, code.index('f('))
reorder = change_signature.ArgumentReorderer([1, 0], autodef='1')
signature.get_changes([reorder]).do()
expected = 'def f(p2=2, p1=1):\n' \
' pass\n' \
'f(2, 1)\n'
self.assertEquals(expected, self.mod.read())
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
]
| |
71c3e117bf80800cdf39ffb2bccf323f46bcca2c | 981ecc9cf59dd6f839c3e40d26601efb1d073558 | /src/face_recognition/youtube_dl/extractor/ard.py | 2d5599456688eba9756e28c2ffe9dbae48decb2c | [
"MIT"
]
| permissive | lodemo/CATANA | 469e0684b816f09ac74f186552b463cc77db369e | a349f460772511ccbb16429b40bfb50f774d45d4 | refs/heads/master | 2023-03-30T04:07:12.070332 | 2021-02-03T21:47:32 | 2021-02-03T21:47:32 | 102,767,095 | 12 | 6 | MIT | 2023-03-24T21:55:24 | 2017-09-07T17:36:45 | Jupyter Notebook | UTF-8 | Python | false | false | 11,725 | py | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from .generic import GenericIE
from ..utils import (
determine_ext,
ExtractorError,
qualities,
int_or_none,
parse_duration,
unified_strdate,
xpath_text,
update_url_query,
)
from ..compat import compat_etree_fromstring
class ARDMediathekIE(InfoExtractor):
IE_NAME = 'ARD:mediathek'
_VALID_URL = r'^https?://(?:(?:www\.)?ardmediathek\.de|mediathek\.(?:daserste|rbb-online)\.de)/(?:.*/)(?P<video_id>[0-9]+|[^0-9][^/\?]+)[^/\?]*(?:\?.*)?'
_TESTS = [{
'url': 'http://www.ardmediathek.de/tv/Dokumentation-und-Reportage/Ich-liebe-das-Leben-trotzdem/rbb-Fernsehen/Video?documentId=29582122&bcastId=3822114',
'info_dict': {
'id': '29582122',
'ext': 'mp4',
'title': 'Ich liebe das Leben trotzdem',
'description': 'md5:45e4c225c72b27993314b31a84a5261c',
'duration': 4557,
},
'params': {
# m3u8 download
'skip_download': True,
},
'skip': 'HTTP Error 404: Not Found',
}, {
'url': 'http://www.ardmediathek.de/tv/Tatort/Tatort-Scheinwelten-H%C3%B6rfassung-Video/Das-Erste/Video?documentId=29522730&bcastId=602916',
'md5': 'f4d98b10759ac06c0072bbcd1f0b9e3e',
'info_dict': {
'id': '29522730',
'ext': 'mp4',
'title': 'Tatort: Scheinwelten - Hörfassung (Video tgl. ab 20 Uhr)',
'description': 'md5:196392e79876d0ac94c94e8cdb2875f1',
'duration': 5252,
},
'skip': 'HTTP Error 404: Not Found',
}, {
# audio
'url': 'http://www.ardmediathek.de/tv/WDR-H%C3%B6rspiel-Speicher/Tod-eines-Fu%C3%9Fballers/WDR-3/Audio-Podcast?documentId=28488308&bcastId=23074086',
'md5': '219d94d8980b4f538c7fcb0865eb7f2c',
'info_dict': {
'id': '28488308',
'ext': 'mp3',
'title': 'Tod eines Fußballers',
'description': 'md5:f6e39f3461f0e1f54bfa48c8875c86ef',
'duration': 3240,
},
'skip': 'HTTP Error 404: Not Found',
}, {
'url': 'http://mediathek.daserste.de/sendungen_a-z/328454_anne-will/22429276_vertrauen-ist-gut-spionieren-ist-besser-geht',
'only_matching': True,
}, {
# audio
'url': 'http://mediathek.rbb-online.de/radio/Hörspiel/Vor-dem-Fest/kulturradio/Audio?documentId=30796318&topRessort=radio&bcastId=9839158',
'md5': '4e8f00631aac0395fee17368ac0e9867',
'info_dict': {
'id': '30796318',
'ext': 'mp3',
'title': 'Vor dem Fest',
'description': 'md5:c0c1c8048514deaed2a73b3a60eecacb',
'duration': 3287,
},
'skip': 'Video is no longer available',
}]
def _extract_media_info(self, media_info_url, webpage, video_id):
media_info = self._download_json(
media_info_url, video_id, 'Downloading media JSON')
formats = self._extract_formats(media_info, video_id)
if not formats:
if '"fsk"' in webpage:
raise ExtractorError(
'This video is only available after 20:00', expected=True)
elif media_info.get('_geoblocked'):
raise ExtractorError('This video is not available due to geo restriction', expected=True)
self._sort_formats(formats)
duration = int_or_none(media_info.get('_duration'))
thumbnail = media_info.get('_previewImage')
subtitles = {}
subtitle_url = media_info.get('_subtitleUrl')
if subtitle_url:
subtitles['de'] = [{
'ext': 'ttml',
'url': subtitle_url,
}]
return {
'id': video_id,
'duration': duration,
'thumbnail': thumbnail,
'formats': formats,
'subtitles': subtitles,
}
def _extract_formats(self, media_info, video_id):
type_ = media_info.get('_type')
media_array = media_info.get('_mediaArray', [])
formats = []
for num, media in enumerate(media_array):
for stream in media.get('_mediaStreamArray', []):
stream_urls = stream.get('_stream')
if not stream_urls:
continue
if not isinstance(stream_urls, list):
stream_urls = [stream_urls]
quality = stream.get('_quality')
server = stream.get('_server')
for stream_url in stream_urls:
ext = determine_ext(stream_url)
if quality != 'auto' and ext in ('f4m', 'm3u8'):
continue
if ext == 'f4m':
formats.extend(self._extract_f4m_formats(
update_url_query(stream_url, {
'hdcore': '3.1.1',
'plugin': 'aasp-3.1.1.69.124'
}),
video_id, f4m_id='hds', fatal=False))
elif ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
stream_url, video_id, 'mp4', m3u8_id='hls', fatal=False))
else:
if server and server.startswith('rtmp'):
f = {
'url': server,
'play_path': stream_url,
'format_id': 'a%s-rtmp-%s' % (num, quality),
}
elif stream_url.startswith('http'):
f = {
'url': stream_url,
'format_id': 'a%s-%s-%s' % (num, ext, quality)
}
else:
continue
m = re.search(r'_(?P<width>\d+)x(?P<height>\d+)\.mp4$', stream_url)
if m:
f.update({
'width': int(m.group('width')),
'height': int(m.group('height')),
})
if type_ == 'audio':
f['vcodec'] = 'none'
formats.append(f)
return formats
def _real_extract(self, url):
# determine video id from url
m = re.match(self._VALID_URL, url)
numid = re.search(r'documentId=([0-9]+)', url)
if numid:
video_id = numid.group(1)
else:
video_id = m.group('video_id')
webpage = self._download_webpage(url, video_id)
ERRORS = (
('>Leider liegt eine Störung vor.', 'Video %s is unavailable'),
('>Der gewünschte Beitrag ist nicht mehr verfügbar.<',
'Video %s is no longer available'),
)
for pattern, message in ERRORS:
if pattern in webpage:
raise ExtractorError(message % video_id, expected=True)
if re.search(r'[\?&]rss($|[=&])', url):
doc = compat_etree_fromstring(webpage.encode('utf-8'))
if doc.tag == 'rss':
return GenericIE()._extract_rss(url, video_id, doc)
title = self._html_search_regex(
[r'<h1(?:\s+class="boxTopHeadline")?>(.*?)</h1>',
r'<meta name="dcterms.title" content="(.*?)"/>',
r'<h4 class="headline">(.*?)</h4>'],
webpage, 'title')
description = self._html_search_meta(
'dcterms.abstract', webpage, 'description', default=None)
if description is None:
description = self._html_search_meta(
'description', webpage, 'meta description')
# Thumbnail is sometimes not present.
# It is in the mobile version, but that seems to use a different URL
# structure altogether.
thumbnail = self._og_search_thumbnail(webpage, default=None)
media_streams = re.findall(r'''(?x)
mediaCollection\.addMediaStream\([0-9]+,\s*[0-9]+,\s*"[^"]*",\s*
"([^"]+)"''', webpage)
if media_streams:
QUALITIES = qualities(['lo', 'hi', 'hq'])
formats = []
for furl in set(media_streams):
if furl.endswith('.f4m'):
fid = 'f4m'
else:
fid_m = re.match(r'.*\.([^.]+)\.[^.]+$', furl)
fid = fid_m.group(1) if fid_m else None
formats.append({
'quality': QUALITIES(fid),
'format_id': fid,
'url': furl,
})
self._sort_formats(formats)
info = {
'formats': formats,
}
else: # request JSON file
info = self._extract_media_info(
'http://www.ardmediathek.de/play/media/%s' % video_id, webpage, video_id)
info.update({
'id': video_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
})
return info
class ARDIE(InfoExtractor):
_VALID_URL = r'(?P<mainurl>https?://(www\.)?daserste\.de/[^?#]+/videos/(?P<display_id>[^/?#]+)-(?P<id>[0-9]+))\.html'
_TEST = {
'url': 'http://www.daserste.de/information/reportage-dokumentation/dokus/videos/die-story-im-ersten-mission-unter-falscher-flagge-100.html',
'md5': 'd216c3a86493f9322545e045ddc3eb35',
'info_dict': {
'display_id': 'die-story-im-ersten-mission-unter-falscher-flagge',
'id': '100',
'ext': 'mp4',
'duration': 2600,
'title': 'Die Story im Ersten: Mission unter falscher Flagge',
'upload_date': '20140804',
'thumbnail': r're:^https?://.*\.jpg$',
},
'skip': 'HTTP Error 404: Not Found',
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
display_id = mobj.group('display_id')
player_url = mobj.group('mainurl') + '~playerXml.xml'
doc = self._download_xml(player_url, display_id)
video_node = doc.find('./video')
upload_date = unified_strdate(xpath_text(
video_node, './broadcastDate'))
thumbnail = xpath_text(video_node, './/teaserImage//variant/url')
formats = []
for a in video_node.findall('.//asset'):
f = {
'format_id': a.attrib['type'],
'width': int_or_none(a.find('./frameWidth').text),
'height': int_or_none(a.find('./frameHeight').text),
'vbr': int_or_none(a.find('./bitrateVideo').text),
'abr': int_or_none(a.find('./bitrateAudio').text),
'vcodec': a.find('./codecVideo').text,
'tbr': int_or_none(a.find('./totalBitrate').text),
}
if a.find('./serverPrefix').text:
f['url'] = a.find('./serverPrefix').text
f['playpath'] = a.find('./fileName').text
else:
f['url'] = a.find('./fileName').text
formats.append(f)
self._sort_formats(formats)
return {
'id': mobj.group('id'),
'formats': formats,
'display_id': display_id,
'title': video_node.find('./title').text,
'duration': parse_duration(video_node.find('./duration').text),
'upload_date': upload_date,
'thumbnail': thumbnail,
}
| [
"[email protected]"
]
| |
a06cf4d1ec1fe5c2ef51f4f2e617bb1fda1ec0c7 | 137ba8a70dfcf94dfe7aeef1599341ecc06ca48f | /student_result/2018/04_parsing/parsing_13.py | 485d391f72037db4058ecdbac9b7637466f7058f | []
| no_license | smtamh/oop_python_ex | e1d3a16ade54717d6cdf1759b6eba7b27cfc974e | bd58ee3bf13dad3de989d5fd92e503d5ff949dd9 | refs/heads/master | 2020-09-22T08:35:19.847656 | 2019-11-13T02:47:06 | 2019-11-13T02:47:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,971 | py | import bs4
import requests
# pip3 install requests
# pip3 install beautifulsoup4
def get_html(url):
"""웹사이트 주소를 입력받아 html tag를 읽어 반환"""
response = requests.get(url)
response.raise_for_status()
return response.text
# 전달받은 페이지의 제목, 별점, 리뷰를 반환 함수
def movie_review_page(page):
html = get_html(
'https://movie.naver.com/movie/point/af/list.nhn?&page='+str(page))
soup = bs4.BeautifulSoup(html, 'html.parser')
review_point = soup.select('div#old_content td.point')
review_title = soup.select('div#old_content td.title')
review = []
for i, j in zip(review_title, review_point):
content = i.getText().strip().strip("신고").strip().split('\n')
point = int(j.getText().strip())
review.append([content[0], point, content[1]])
return review
# a 페이지부터 b 페이지까지 리뷰 합산해서 반환
def review_index(a, b):
review = []
for i in range(a, b+1):
review.extend(movie_review_page(i))
return review
# 리뷰 출력
def print_review(review, a, b):
print("%d page ~ %d page" % (a, b))
for r in review:
print("%s (%d) : %s" % (r[0], r[1], r[2]))
# 원하는 영화의 평균평점을 확인
def title_avg_point(review, title):
count = 0
sum = 0
for r in review:
if(r[0] == title):
count += 1
sum += r[1]
try:
return sum/count
except ZeroDivisionError:
print('<%s>영화의 리뷰가 없습니다' % (title))
a, b = map(int, input('페이지를 띄어쓰기로 구분해 입력해주세요(ex: 1 20) :').split())
review = review_index(a, b)
# a~b페이지까지 모든 리뷰확인
print('리뷰를 확인합니다.')
print_review(review, a, b)
# 영화 평점 확인
title = input('\n평점을 확인하고싶은 영화를 입력해주세요 : ')
print("%.3f" % title_avg_point(review, title))
| [
"[email protected]"
]
| |
a5c74481adc2762f626d94daf9deb43125532d4c | 162e2588156cb2c0039c926c5c442363d9f77b00 | /data_steward/cdr_cleaner/cleaning_rules/covid_ehr_vaccine_concept_suppression.py | 1601ac11a20c04fcd9a8cadea05debe08ac71228 | [
"MIT"
]
| permissive | nishanthpp93/curation | 38be687240b52decc25ffb7b655f25e9faa40e47 | ac9f38b2f4580ae806121dd929293159132c7d2a | refs/heads/develop | 2022-08-08T20:33:53.125216 | 2021-12-03T21:38:48 | 2021-12-03T21:38:48 | 155,608,471 | 1 | 0 | MIT | 2020-10-09T01:14:39 | 2018-10-31T18:54:34 | Python | UTF-8 | Python | false | false | 6,340 | py | """
Suppress COVID EHR vaccine concepts.
Original Issues: DC-1692
"""
# Python imports
import logging
# Project imports
from cdr_cleaner.cleaning_rules.deid.concept_suppression import AbstractBqLookupTableConceptSuppression
from constants.cdr_cleaner import clean_cdr as cdr_consts
from common import JINJA_ENV, CDM_TABLES
from utils import pipeline_logging
# Third party imports
from google.cloud.exceptions import GoogleCloudError
LOGGER = logging.getLogger(__name__)
SUPPRESSION_RULE_CONCEPT_TABLE = 'covid_vaccine_concepts'
COVID_VACCINE_CONCEPT_QUERY = JINJA_ENV.from_string("""
CREATE OR REPLACE TABLE `{{project_id}}.{{sandbox_id}}.{{concept_suppression_lookup_table}}` AS
with covid_vacc as (
SELECT *
FROM `{{project_id}}.{{dataset_id}}.concept`
WHERE (
-- done by name and vocab --
REGEXP_CONTAINS(concept_name, r'(?i)(COVID)') AND
REGEXP_CONTAINS(concept_name, r'(?i)(VAC)') AND
vocabulary_id not in ('PPI')
) OR (
-- done by code and vocab --
REGEXP_CONTAINS(concept_code, r'(207)|(208)|(210)|(211)|(212)')
and vocabulary_id = 'CVX'
) OR (
-- done by code and vocab --
REGEXP_CONTAINS(concept_code, r'(91300)|(91301)|(91302)|(91303)|(91304)')
and vocabulary_id = 'CPT4'
)
),
concepts_via_cr as (
select distinct c.*
from `{{project_id}}.{{dataset_id}}.concept`as c
left join `{{project_id}}.{{dataset_id}}.concept_relationship`
on c.concept_id = concept_id_1
where concept_id_2 in (select concept_id from covid_vacc)
# and concept_id_1 not in (select concept_id from covid_vacc)
and (
relationship_id not in ('Subsumes', 'RxNorm dose form of', 'Dose form group of', 'RxNorm - SPL') OR
(relationship_id = 'RxNorm - SPL' and REGEXP_CONTAINS(concept_name, r'(?i)(COVID)'))
)
),
concepts_via_ca as (
select c.*
from `{{project_id}}.{{dataset_id}}.concept`as c
left join `{{project_id}}.{{dataset_id}}.concept_ancestor` as ca
on c.concept_id = ca.descendant_concept_id
where ca.ancestor_concept_id in (select concept_id from covid_vacc)
)
select distinct * from covid_vacc
union distinct
select distinct * from concepts_via_ca
union distinct
select distinct * from concepts_via_cr
""")
class CovidEHRVaccineConceptSuppression(AbstractBqLookupTableConceptSuppression
):
def __init__(self,
project_id,
dataset_id,
sandbox_dataset_id,
table_namer=None):
"""
Initialize the class with proper information.
Set the issue numbers, description and affected datasets. As other tickets may affect
this SQL, append them to the list of Jira Issues.
DO NOT REMOVE ORIGINAL JIRA ISSUE NUMBERS!
"""
desc = "Suppress COVID EHR vaccine concepts."
super().__init__(
issue_numbers=['DC1692'],
description=desc,
affected_datasets=[cdr_consts.REGISTERED_TIER_DEID],
affected_tables=CDM_TABLES,
project_id=project_id,
dataset_id=dataset_id,
sandbox_dataset_id=sandbox_dataset_id,
concept_suppression_lookup_table=SUPPRESSION_RULE_CONCEPT_TABLE,
table_namer=table_namer)
def create_suppression_lookup_table(self, client):
concept_suppression_lookup_query = COVID_VACCINE_CONCEPT_QUERY.render(
project_id=self.project_id,
dataset_id=self.dataset_id,
sandbox_id=self.sandbox_dataset_id,
concept_suppression_lookup_table=self.
concept_suppression_lookup_table)
query_job = client.query(concept_suppression_lookup_query)
result = query_job.result()
if hasattr(result, 'errors') and result.errors:
LOGGER.error(f"Error running job {result.job_id}: {result.errors}")
raise GoogleCloudError(
f"Error running job {result.job_id}: {result.errors}")
def validate_rule(self, client, *args, **keyword_args):
"""
Validates the cleaning rule which deletes or updates the data from the tables
Method to run validation on cleaning rules that will be updating the values.
For example:
if your class updates all the datetime fields you should be implementing the
validation that checks if the date time values that needs to be updated no
longer exists in the table.
if your class deletes a subset of rows in the tables you should be implementing
the validation that checks if the count of final final row counts + deleted rows
should equals to initial row counts of the affected tables.
Raises RunTimeError if the validation fails.
"""
raise NotImplementedError("Please fix me.")
def setup_validation(self, client, *args, **keyword_args):
"""
Run required steps for validation setup
Method to run to setup validation on cleaning rules that will be updating or deleting the values.
For example:
if your class updates all the datetime fields you should be implementing the
logic to get the initial list of values which adhere to a condition we are looking for.
if your class deletes a subset of rows in the tables you should be implementing
the logic to get the row counts of the tables prior to applying cleaning rule
"""
raise NotImplementedError("Please fix me.")
if __name__ == '__main__':
import cdr_cleaner.args_parser as parser
import cdr_cleaner.clean_cdr_engine as clean_engine
ARGS = parser.parse_args()
pipeline_logging.configure(level=logging.DEBUG, add_console_handler=True)
if ARGS.list_queries:
clean_engine.add_console_logging()
query_list = clean_engine.get_query_list(
ARGS.project_id, ARGS.dataset_id, ARGS.sandbox_dataset_id,
[(CovidEHRVaccineConceptSuppression,)])
for query in query_list:
LOGGER.info(query)
else:
clean_engine.add_console_logging(ARGS.console_log)
clean_engine.clean_dataset(ARGS.project_id, ARGS.dataset_id,
ARGS.sandbox_dataset_id,
[(CovidEHRVaccineConceptSuppression,)])
| [
"[email protected]"
]
| |
a3da500227494dc20d9023e0ec3170314752b6aa | 6b2c9c056c2fc7a9c622355e9a00da00c22d626a | /venv/Lib/site-packages/pip/_internal/locations.py | 6bfbe43dc4ebb7de0c4a0bda1e64c2459cba740c | []
| no_license | LeoM666/GameKlanRedLion | 0a42f06add1ff396d375c2ace63d7112f41281f6 | 5cb22b8326bffcaf351180497b7c47b43e316621 | refs/heads/master | 2020-06-11T20:21:02.154926 | 2019-08-03T07:45:33 | 2019-08-03T07:45:33 | 194,073,919 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,934 | py | """Locations where we look for configs, install stuff, etc"""
from __future__ import absolute_import
import os
import os.path
import platform
import site
import sys
import sysconfig
from distutils import sysconfig as distutils_sysconfig
from distutils.command.install import SCHEME_KEYS # type: ignore
from pip._internal.utils import appdirs
from pip._internal.utils.compat import WINDOWS, expanduser
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import Any, Union, Dict, List, Optional
# Application Directories
USER_CACHE_DIR = appdirs.user_cache_dir("pip")
DELETE_MARKER_MESSAGE = '''\
This file is placed here by pip to indicate the source was put
here by pip.
Once this package is successfully installed this source code will be
deleted (unless you remove this file).
'''
PIP_DELETE_MARKER_FILENAME = 'pip-delete-this-directory.txt'
def write_delete_marker_file(directory):
# type: (str) -> None
"""
Write the pip delete marker file into this directory.
"""
filepath = os.path.join(directory, PIP_DELETE_MARKER_FILENAME)
with open(filepath, 'w') as marker_fp:
marker_fp.write(DELETE_MARKER_MESSAGE)
def running_under_virtualenv():
# type: () -> bool
"""
Return True if we're running inside a virtualenv, False otherwise.
"""
if hasattr(sys, 'real_prefix'):
return True
elif sys.prefix != getattr(sys, "base_prefix", sys.prefix):
return True
return False
def virtualenv_no_global():
# type: () -> bool
"""
Return True if in a venv and no system site packages.
"""
# this mirrors the logic in virtualenv.py for locating the
# no-global-site-packages.txt file
site_mod_dir = os.path.dirname(os.path.abspath(site.__file__))
no_global_file = os.path.join(site_mod_dir, 'no-global-site-packages.txt')
if running_under_virtualenv() and os.path.isfile(no_global_file):
return True
else:
return False
if running_under_virtualenv():
src_prefix = os.path.join(sys.prefix, 'src')
else:
# FIXME: keep src in cwd for now (it is not a temporary folder)
try:
src_prefix = os.path.join(os.getcwd(), 'src')
except OSError:
# In case the current working directory has been renamed or deleted
sys.exit(
"The folder you are executing pip from can no longer be found."
)
# under macOS + virtualenv sys.prefix is not properly resolved
# it is something like /path/to/python/bin/..
# Note: using realpath due to tmp dirs on OSX being symlinks
src_prefix = os.path.abspath(src_prefix)
# FIXME doesn't account for venv linked to global site-packages
site_packages = sysconfig.get_path("purelib") # type: Optional[str]
# This is because of a bug in PyPy's sysconfig module, see
# https://bitbucket.org/pypy/pypy/issues/2506/sysconfig-returns-incorrect-paths
# for more information.
if platform.python_implementation().lower() == "pypy":
site_packages = distutils_sysconfig.get_python_lib()
try:
# Use getusersitepackages if this is present, as it ensures that the
# value is initialised properly.
user_site = site.getusersitepackages()
except AttributeError:
user_site = site.USER_SITE
user_dir = expanduser('~')
if WINDOWS:
bin_py = os.path.join(sys.prefix, 'Scripts')
bin_user = os.path.join(user_site, 'Scripts')
# buildout uses 'bin' on Windows too?
if not os.path.exists(bin_py):
bin_py = os.path.join(sys.prefix, 'bin')
bin_user = os.path.join(user_site, 'bin')
config_basename = 'pip.ini'
legacy_storage_dir = os.path.join(user_dir, 'pip')
legacy_config_file = os.path.join(
legacy_storage_dir,
config_basename,
)
else:
bin_py = os.path.join(sys.prefix, 'bin')
bin_user = os.path.join(user_site, 'bin')
config_basename = 'pip.conf'
legacy_storage_dir = os.path.join(user_dir, '.pip')
legacy_config_file = os.path.join(
legacy_storage_dir,
config_basename,
)
# Forcing to use /usr/local/bin for standard macOS framework installs
# Also log to ~/MainHall/Logs/ for use with the Console.app log viewer
if sys.platform[:6] == 'darwin' and sys.prefix[:16] == '/System/MainHall/':
bin_py = '/usr/local/bin'
global_config_files = [
os.path.join(path, config_basename)
for path in appdirs.site_config_dirs('pip')
]
site_config_file = os.path.join(sys.prefix, config_basename)
new_config_file = os.path.join(appdirs.user_config_dir("pip"), config_basename)
def distutils_scheme(dist_name, user=False, home=None, root=None,
isolated=False, prefix=None):
# type:(str, bool, str, str, bool, str) -> dict
"""
Return a distutils install scheme
"""
from distutils.dist import Distribution
scheme = {}
if isolated:
extra_dist_args = {"script_args": ["--no-user-cfg"]}
else:
extra_dist_args = {}
dist_args = {'name': dist_name} # type: Dict[str, Union[str, List[str]]]
dist_args.update(extra_dist_args)
d = Distribution(dist_args)
# Ignoring, typeshed issue reported python/typeshed/issues/2567
d.parse_config_files()
# NOTE: Ignoring type since mypy can't find attributes on 'Command'
i = d.get_command_obj('install', create=True) # type: Any
assert i is not None
# NOTE: setting user or home has the side-effect of creating the home dir
# or user base for installations during finalize_options()
# ideally, we'd prefer a scheme class that has no side-effects.
assert not (user and prefix), "user={} prefix={}".format(user, prefix)
i.user = user or i.user
if user:
i.prefix = ""
i.prefix = prefix or i.prefix
i.home = home or i.home
i.root = root or i.root
i.finalize_options()
for key in SCHEME_KEYS:
scheme[key] = getattr(i, 'install_' + key)
# install_lib specified in setup.cfg should install *everything*
# into there (i.e. it takes precedence over both purelib and
# platlib). Note, i.install_lib is *always* set after
# finalize_options(); we only want to override here if the user
# has explicitly requested it hence going back to the config
# Ignoring, typeshed issue reported python/typeshed/issues/2567
if 'install_lib' in d.get_option_dict('install'): # type: ignore
scheme.update(dict(purelib=i.install_lib, platlib=i.install_lib))
if running_under_virtualenv():
scheme['headers'] = os.path.join(
sys.prefix,
'include',
'site',
'python' + sys.version[:3],
dist_name,
)
if root is not None:
path_no_drive = os.path.splitdrive(
os.path.abspath(scheme["headers"]))[1]
scheme["headers"] = os.path.join(
root,
path_no_drive[1:],
)
return scheme
| [
"[email protected]"
]
| |
4cc0e90527df6b74090fa2055c2f1b2087e39b1c | 45da2549e8943738618ad1b774abeaa13ed463b0 | /cian_parser/management/commands/urls_parser_cian.py | f057e37c493dde6d53c5645a5f1fde7ad115c543 | []
| no_license | ShashkinRoman/cian_parser | 02ea484457123915e6c7d3def90f5bc76ae5893b | 8a4bb0b8bb2cf5fd1a059a568bd862bdeafa8bc1 | refs/heads/master | 2023-02-05T09:14:27.748810 | 2020-12-23T11:07:50 | 2020-12-23T11:07:50 | 291,051,494 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 222 | py | from django.core.management.base import BaseCommand
from cian_parser.urls_parser import main as urls_main
class Command(BaseCommand):
help = 'urls parser'
def handle(self, *args, **options):
urls_main()
| [
"[email protected]"
]
| |
c65c2414f43a95a3c537c9d9a57f181522594a14 | 236ed63fc380b10e43fd326e3f17f1ddc8f28b4e | /apps/goods/serializer.py | 7bd4b70073afa4e2aaea35e22f6fd6812815ba2e | []
| no_license | pylarva/restFrameworkShop | 53585caab80b82f82f6d693292ccf4fa8bf33810 | ddc88dc0ebbdd50927f9b4e6f1d8e4a65239cddb | refs/heads/master | 2020-03-24T23:22:13.780622 | 2018-08-01T10:29:35 | 2018-08-01T10:29:35 | 143,130,351 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,431 | py | # !/usr/bin/env python
# -*- coding:utf-8 -*-
from rest_framework import serializers
from .models import Goods, GoodsCategory
# 1) 使用笨办法序列化数据
# class GoodsSerializer(serializers.Serializer):
# name = serializers.CharField(required=True, max_length=108)
# click_num = serializers.IntegerField(default=0)
# goods_front_image = serializers.ImageField()
#
# def create(self, validated_data):
# """
# 接收前端数据生成数据库数据
# """
# return Goods.objects.create(**validated_data)
# 2)使用serializers序列化简化
# 循环嵌套三级商品菜单
class CategorySerializer3(serializers.ModelSerializer):
class Meta:
model = GoodsCategory
fields = "__all__"
class CategorySerializer2(serializers.ModelSerializer):
sub_cat = CategorySerializer3(many=True)
class Meta:
model = GoodsCategory
fields = "__all__"
class CategorySerializer(serializers.ModelSerializer):
# 在一级商品里面嵌套二级商品 many=True表示会有多个
sub_cat = CategorySerializer2(many=True)
class Meta:
model = GoodsCategory
fields = "__all__"
class GoodsSerializer(serializers.ModelSerializer):
category = CategorySerializer()
class Meta:
model = Goods
# fields = ('name', 'click_num', 'code', 'linenos', 'language', 'style')
fields = "__all__"
| [
"[email protected]"
]
| |
3ade350b48eac09ac2875ce074001fd47ea18ca4 | 1ab7b3f2aa63de8488ce7c466a67d367771aa1f2 | /Ricardo_OS/Python_backend/venv/lib/python3.8/site-packages/pandas/tests/groupby/test_bin_groupby.py | f20eed4575e91ce541b6e4f2835eacbe462fdb96 | [
"MIT"
]
| permissive | icl-rocketry/Avionics | 9d39aeb11aba11115826fd73357b415026a7adad | 95b7a061eabd6f2b607fba79e007186030f02720 | refs/heads/master | 2022-07-30T07:54:10.642930 | 2022-07-10T12:19:10 | 2022-07-10T12:19:10 | 216,184,670 | 9 | 1 | MIT | 2022-06-27T10:17:06 | 2019-10-19T09:57:07 | C++ | UTF-8 | Python | false | false | 4,083 | py | import numpy as np
import pytest
from pandas._libs import groupby, lib, reduction as libreduction
from pandas.core.dtypes.common import ensure_int64
import pandas as pd
from pandas import Series, isna
import pandas._testing as tm
def test_series_grouper():
obj = Series(np.random.randn(10))
dummy = obj.iloc[:0]
labels = np.array([-1, -1, -1, 0, 0, 0, 1, 1, 1, 1], dtype=np.int64)
grouper = libreduction.SeriesGrouper(obj, np.mean, labels, 2, dummy)
result, counts = grouper.get_result()
expected = np.array([obj[3:6].mean(), obj[6:].mean()])
tm.assert_almost_equal(result, expected)
exp_counts = np.array([3, 4], dtype=np.int64)
tm.assert_almost_equal(counts, exp_counts)
def test_series_grouper_requires_nonempty_raises():
# GH#29500
obj = Series(np.random.randn(10))
dummy = obj.iloc[:0]
labels = np.array([-1, -1, -1, 0, 0, 0, 1, 1, 1, 1], dtype=np.int64)
with pytest.raises(ValueError, match="SeriesGrouper requires non-empty `series`"):
libreduction.SeriesGrouper(dummy, np.mean, labels, 2, dummy)
def test_series_bin_grouper():
obj = Series(np.random.randn(10))
dummy = obj[:0]
bins = np.array([3, 6])
grouper = libreduction.SeriesBinGrouper(obj, np.mean, bins, dummy)
result, counts = grouper.get_result()
expected = np.array([obj[:3].mean(), obj[3:6].mean(), obj[6:].mean()])
tm.assert_almost_equal(result, expected)
exp_counts = np.array([3, 3, 4], dtype=np.int64)
tm.assert_almost_equal(counts, exp_counts)
def assert_block_lengths(x):
assert len(x) == len(x._mgr.blocks[0].mgr_locs)
return 0
def cumsum_max(x):
x.cumsum().max()
return 0
@pytest.mark.parametrize("func", [cumsum_max, assert_block_lengths])
def test_mgr_locs_updated(func):
# https://github.com/pandas-dev/pandas/issues/31802
# Some operations may require creating new blocks, which requires
# valid mgr_locs
df = pd.DataFrame({"A": ["a", "a", "a"], "B": ["a", "b", "b"], "C": [1, 1, 1]})
result = df.groupby(["A", "B"]).agg(func)
expected = pd.DataFrame(
{"C": [0, 0]},
index=pd.MultiIndex.from_product([["a"], ["a", "b"]], names=["A", "B"]),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"binner,closed,expected",
[
(
np.array([0, 3, 6, 9], dtype=np.int64),
"left",
np.array([2, 5, 6], dtype=np.int64),
),
(
np.array([0, 3, 6, 9], dtype=np.int64),
"right",
np.array([3, 6, 6], dtype=np.int64),
),
(np.array([0, 3, 6], dtype=np.int64), "left", np.array([2, 5], dtype=np.int64)),
(
np.array([0, 3, 6], dtype=np.int64),
"right",
np.array([3, 6], dtype=np.int64),
),
],
)
def test_generate_bins(binner, closed, expected):
values = np.array([1, 2, 3, 4, 5, 6], dtype=np.int64)
result = lib.generate_bins_dt64(values, binner, closed=closed)
tm.assert_numpy_array_equal(result, expected)
def test_group_ohlc():
def _check(dtype):
obj = np.array(np.random.randn(20), dtype=dtype)
bins = np.array([6, 12, 20])
out = np.zeros((3, 4), dtype)
counts = np.zeros(len(out), dtype=np.int64)
labels = ensure_int64(np.repeat(np.arange(3), np.diff(np.r_[0, bins])))
func = getattr(groupby, f"group_ohlc_{dtype}")
func(out, counts, obj[:, None], labels)
def _ohlc(group):
if isna(group).all():
return np.repeat(np.nan, 4)
return [group[0], group.max(), group.min(), group[-1]]
expected = np.array([_ohlc(obj[:6]), _ohlc(obj[6:12]), _ohlc(obj[12:])])
tm.assert_almost_equal(out, expected)
tm.assert_numpy_array_equal(counts, np.array([6, 6, 8], dtype=np.int64))
obj[:6] = np.nan
func(out, counts, obj[:, None], labels)
expected[0] = np.nan
tm.assert_almost_equal(out, expected)
_check("float32")
_check("float64")
class TestMoments:
pass
| [
"[email protected]"
]
| |
c3e17c5290ac282d69fd29259472c75ac146da2c | 0ddcfcbfc3faa81c79e320c34c35a972dab86498 | /puzzles/minimum_score_of_a_path_between_two_cities.py | 8914e671e9bcec78b594033d27ea3e966a60aca7 | []
| no_license | IvanWoo/coding-interview-questions | 3311da45895ac4f3c394b22530079c79a9215a1c | 1312305b199b65a11804a000432ebe28d1fba87e | refs/heads/master | 2023-08-09T19:46:28.278111 | 2023-06-21T01:47:07 | 2023-06-21T01:47:07 | 135,307,912 | 0 | 0 | null | 2023-07-20T12:14:38 | 2018-05-29T14:24:43 | Python | UTF-8 | Python | false | false | 1,899 | py | # https://leetcode.com/problems/minimum-score-of-a-path-between-two-cities/description/
"""
You are given a positive integer n representing n cities numbered from 1 to n. You are also given a 2D array roads where roads[i] = [ai, bi, distancei] indicates that there is a bidirectional road between cities ai and bi with a distance equal to distancei. The cities graph is not necessarily connected.
The score of a path between two cities is defined as the minimum distance of a road in this path.
Return the minimum possible score of a path between cities 1 and n.
Note:
A path is a sequence of roads between two cities.
It is allowed for a path to contain the same road multiple times, and you can visit cities 1 and n multiple times along the path.
The test cases are generated such that there is at least one path between 1 and n.
Example 1:
Input: n = 4, roads = [[1,2,9],[2,3,6],[2,4,5],[1,4,7]]
Output: 5
Explanation: The path from city 1 to 4 with the minimum score is: 1 -> 2 -> 4. The score of this path is min(9,5) = 5.
It can be shown that no other path has less score.
Example 2:
Input: n = 4, roads = [[1,2,2],[1,3,4],[3,4,7]]
Output: 2
Explanation: The path from city 1 to 4 with the minimum score is: 1 -> 2 -> 1 -> 3 -> 4. The score of this path is min(2,2,4,7) = 2.
Constraints:
2 <= n <= 105
1 <= roads.length <= 105
roads[i].length == 3
1 <= ai, bi <= n
ai != bi
1 <= distancei <= 104
There are no repeated edges.
There is at least one path between 1 and n.
"""
from math import inf
from puzzles.union_find import UF
def min_score(n: int, roads: list[list[int]]) -> int:
min_score_map = [inf] * (n + 1)
uf = UF(n + 1)
for u, v, score in roads:
uf.union(u, v)
min_score_map[u] = min(min_score_map[u], score)
min_score_map[v] = min(min_score_map[v], score)
return min((min_score_map[i] for i in range(1, n + 1) if uf.connected(i, 1)))
| [
"[email protected]"
]
| |
1b7f698663463ef8f33f245786a62d037968f89d | c8847d4117204f1d26ad47488152234a64aefb0d | /hypergan/samplers/static_batch_sampler.py | 81931e59ada7b5a027cd5459aa7b45db299cc08f | [
"MIT"
]
| permissive | Solertis/HyperGAN | 851f27aa9ba2ef89b6d50f86987c6746c831502d | 1aceed20c9d9f67de8e3b290ee84f376d64228f0 | refs/heads/master | 2021-01-20T14:01:52.353117 | 2017-02-11T09:48:22 | 2017-02-11T09:48:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 759 | py |
from hypergan.util.ops import *
from hypergan.util.globals import *
from hypergan.samplers.common import *
#mask_noise = None
z = None
y = None
def sample(sample_file, sess, config):
global z, y
generator = get_tensor("g")[0]
y_t = get_tensor("y")
z_t = get_tensor("z")
x = np.linspace(0,1, 4)
if z is None:
z = sess.run(z_t)
y = sess.run(y_t)
g=tf.get_default_graph()
with g.as_default():
tf.set_random_seed(1)
sample = sess.run(generator, feed_dict={z_t: z, y_t: y})
#plot(self.config, sample, sample_file)
stacks = [np.hstack(sample[x*8:x*8+8]) for x in range(4)]
plot(config, np.vstack(stacks), sample_file)
return [{'image':sample_file, 'label':'grid'}]
| [
"[email protected]"
]
| |
572e9188596943b6c163e97b57ca50efcf4e012b | b4ca99fdb2e7f5da8eb559adf647a0bc69e2d7c5 | /StandardSpider/DataAnalyse/dbDataGet/PanasonicDevice_data.py | 645090e5610201dd4ef37d6ecb74a6395b7c3262 | []
| no_license | RoyalClown/MyPython | 4f6b68e0f5f883c187cf8253df0c5feeab3de8fd | baf13612d8d36e519ce54825c4b664597789128a | refs/heads/master | 2021-01-11T13:29:30.332280 | 2017-05-31T02:56:07 | 2017-05-31T02:56:07 | 81,513,648 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 36,577 | py | from DataAnalyse.dbDataGet.ST_data import UtilDataAnalyse
from DataAnalyse.valueProcessing.propertyValueModify import PropertyValueModify
class DataProcessing:
def go(self, task_id):
# 合并
spcap_data = UtilDataAnalyse(task_id=task_id)
table_data = spcap_data.get_from_table()
b2c_brand_json = str(table_data['b2cBrand']).replace("'", "\"")
b2c_kind_json = str(table_data['b2cKind']).replace("'", "\"")
b2c_brid = table_data['b2cBrId']
b2c_kiid = table_data['b2cKiId']
b2c_kind_name = table_data['kindName']
base_properties = table_data['properties']
resource = table_data['resource']
url = table_data['url']
unit = 'PCS'
crawl_components = spcap_data.get_all_components()
for crawl_component in crawl_components:
# 爬取获得
crawl_component_id = crawl_component[0]
crawl_component_attach = crawl_component[2]
crawl_component_img = crawl_component[4]
crawl_component_url = crawl_component[13]
crawl_component_code = crawl_component[7]
cc_flag = crawl_component[17]
# 事先给出
# 查找确认是否存在同品牌同型号器件
component = spcap_data.find_component(b2c_brid, crawl_component_code)
if component is None:
insert_or_update = 0
uuid = spcap_data.make_uuid(b2c_kiid)
cmp_version = 1
#
else:
insert_or_update = 1
uuid = component[28]
cmp_old_version = component[21]
if cmp_old_version is None:
cmp_version = 1
else:
try:
int(cmp_old_version)
cmp_version = cmp_old_version + 1
except:
cmp_version = 1
spcap_data.delete_old_component(uuid)
# 保存component并返回id
component_id = spcap_data.save_to_component(crawl_component_code, b2c_kiid, b2c_brid, uuid,
crawl_component_attach, crawl_component_img,
version=cmp_version)
# 更新爬虫表返回uuid
#
crawl_base_properties = spcap_data.get_single_properties(crawl_component_id)
properties_json = []
cc_modify = 0
for base_property in base_properties:
# 对value值进行分析处理
property_value_modify = PropertyValueModify()
base_property_detno = base_property['detno']
try:
aim_property_name = base_property['name']
except:
aim_property_name = ''
# 判断是否为多个单位
try:
base_property_unit_list = base_property['unit'].split(",")
if len(base_property_unit_list) == 1:
base_property_unit = base_property_unit_list[0]
except:
base_property_unit = ''
base_property_id = base_property['property']['id']
base_property_name = base_property['property']['labelCn']
try:
base_property_type = base_property['type']
except:
base_property_type = ''
# 参数合并
tmp_pv_min = ''
tmp_pv_max = ''
tmp_voltage_min = ''
tmp_voltage_max = ''
tmp_voltage_typ = ''
tmp_communication0 = ''
tmp_communication2 = ''
tmp_communication1 = ''
tmp_sup_voltage_min = ''
tmp_sup_voltage_max = ''
for crawl_property in crawl_base_properties:
crawl_property_name = crawl_property[5]
crawl_property_value = crawl_property[7]
if not crawl_property_value:
continue
# 目标类目匹配
if crawl_property_name.lower().replace(" ", "") in aim_property_name.lower().replace(" ", ""):
# 针对参数合并
# 获取最小值
# if crawl_property_name == "Supply Voltage (V) min":
# tmp_voltage_min = crawl_property_value
# if tmp_voltage_max != '':
# pv_min = tmp_voltage_min
# pv_max = tmp_voltage_max
# save_value = pv_min + base_property_unit + '~' + pv_max + base_property_unit
# pv_id = spcap_data.save_to_property(base_property_id, component_id,
# base_property_detno,
# "'" + save_value + "'", pv_max=pv_max,
# pv_min=pv_min,
# pv_unit="'" + base_property_unit + "'")
# property_json = spcap_data.get_property_json(base_property_detno, pv_id,
# base_property_id,
# base_property_name, save_value,
# min=pv_min, max=pv_max,
# unit=base_property_unit)
# properties_json.append(property_json)
# break
# else:
# continue
# # 获取典型值
# elif crawl_property_name == "Supply Voltage (V) typ":
# tmp_voltage_typ = crawl_property_value
# if tmp_voltage_max != '':
# pv_min = tmp_voltage_min.split(",")[0]
# pv_max = tmp_voltage_max.split(",")[0]
# pv_typ = tmp_voltage_typ
# save_value = pv_min + base_property_unit + '~' + pv_max + base_property_unit + "," + tmp_voltage_typ
# pv_id = spcap_data.save_to_property(base_property_id, component_id,
# base_property_detno,
# "'" + save_value + "'", pv_max=pv_max,
# pv_min=pv_min,
# pv_unit="'" + base_property_unit + "'")
# property_json = spcap_data.get_property_json(base_property_detno, pv_id,
# base_property_id,
# base_property_name, save_value,
# min=pv_min, max=pv_max,
# unit=base_property_unit)
# properties_json.append(property_json)
# break
# else:
# continue
# # 获取最大值
# elif crawl_property_name == "Supply Voltage (V) max":
# tmp_voltage_max = crawl_property_value
# if tmp_voltage_min != '':
# pv_min = tmp_voltage_min.split(",")[0]
# pv_max = tmp_voltage_max.split(",")[0]
# save_value = pv_min + base_property_unit + '~' + pv_max + base_property_unit + "," + tmp_voltage_typ
# pv_id = spcap_data.save_to_property(base_property_id, component_id,
# base_property_detno,
# "'" + save_value + "'", pv_max=pv_max,
# pv_min=pv_min,
# pv_unit="'" + base_property_unit + "'")
# property_json = spcap_data.get_property_json(base_property_detno, pv_id,
# base_property_id,
# base_property_name, save_value,
# min=pv_min, max=pv_max,
# unit=base_property_unit)
# properties_json.append(property_json)
# break
# else:
# continue
if crawl_property_name == "Communication standards supported (ISO7816)":
tmp_communication0 = crawl_property_value
continue
# 获取典型值
elif crawl_property_name == "Communication standards supported (GSM) ":
tmp_communication1 = crawl_property_value
continue
# 获取最大值
elif crawl_property_name == "Communication standards supported (EMV compatible)":
tmp_communication2 = crawl_property_value
pv_min = tmp_communication0
pv_max = tmp_communication1
save_value = pv_min + "," + pv_max + "," + tmp_communication2
pv_id = spcap_data.save_to_property(base_property_id, component_id,
base_property_detno,
"'" + save_value + "'",)
property_json = spcap_data.get_property_json(base_property_detno, pv_id,
base_property_id,
base_property_name, save_value,)
properties_json.append(property_json)
break
# 储存为范围值
# 获取最小值
if crawl_property_name == "Operating Temperature (°C) min" or crawl_property_name == "Operating Ambient Temperature (°C)min":
tmp_pv_min = crawl_property_value
if tmp_pv_max != '':
pv_min = tmp_pv_min.split(",")[0]
pv_max = tmp_pv_max.split(",")[0]
save_value = pv_min + base_property_unit + '~' + pv_max + base_property_unit
pv_id = spcap_data.save_to_property(base_property_id, component_id,
base_property_detno,
"'" + save_value + "'", pv_max=pv_max,
pv_min=pv_min,
pv_unit="'" + base_property_unit + "'")
property_json = spcap_data.get_property_json(base_property_detno, pv_id,
base_property_id,
base_property_name, save_value,
min=pv_min, max=pv_max,
unit=base_property_unit)
properties_json.append(property_json)
break
else:
continue
# 获取最大值
elif crawl_property_name == "Operating Temperature (°C) max" or crawl_property_name == "Operating Ambient Temperature (°C)max":
tmp_pv_max = crawl_property_value
if tmp_pv_max != '':
pv_min = tmp_pv_min.split(",")[0]
if not pv_min:
pv_min = "''"
pv_max = tmp_pv_max.split(",")[0]
save_value = pv_min + base_property_unit + '~' + pv_max + base_property_unit
pv_id = spcap_data.save_to_property(base_property_id, component_id,
base_property_detno,
"'" + save_value + "'", pv_max=pv_max,
pv_min=pv_min,
pv_unit="'" + base_property_unit + "'")
property_json = spcap_data.get_property_json(base_property_detno, pv_id,
base_property_id,
base_property_name, save_value,
min=pv_min, max=pv_max,
unit=base_property_unit)
properties_json.append(property_json)
break
else:
continue
# 获取最小值
if crawl_property_name == "Supplementary supply voltage (V) (VDDP) min":
tmp_sup_voltage_min = crawl_property_value
if tmp_sup_voltage_max != '':
pv_min = tmp_sup_voltage_min.split(",")[0]
pv_max = tmp_sup_voltage_max.split(",")[0]
save_value = pv_min + base_property_unit + '~' + pv_max + base_property_unit
pv_id = spcap_data.save_to_property(base_property_id, component_id,
base_property_detno,
"'" + save_value + "'", pv_max=pv_max,
pv_min=pv_min,
pv_unit="'" + base_property_unit + "'")
property_json = spcap_data.get_property_json(base_property_detno, pv_id,
base_property_id,
base_property_name, save_value,
min=pv_min, max=pv_max,
unit=base_property_unit)
properties_json.append(property_json)
break
else:
continue
# 获取最大值
elif crawl_property_name == "Supplementary supply voltage (V) (VDDP) max":
tmp_sup_voltage_max = crawl_property_value
if tmp_sup_voltage_max != '':
pv_min = tmp_sup_voltage_min.split(",")[0]
if not pv_min:
pv_min = "''"
pv_max = tmp_sup_voltage_max.split(",")[0]
save_value = pv_min + base_property_unit + '~' + pv_max + base_property_unit
pv_id = spcap_data.save_to_property(base_property_id, component_id,
base_property_detno,
"'" + save_value + "'", pv_max=pv_max,
pv_min=pv_min,
pv_unit="'" + base_property_unit + "'")
property_json = spcap_data.get_property_json(base_property_detno, pv_id,
base_property_id,
base_property_name, save_value,
min=pv_min, max=pv_max,
unit=base_property_unit)
properties_json.append(property_json)
break
else:
continue
# 储存为范围值
""" 这里还需要对不同属性值进行处理 """
# F类型
if base_property_type == 'F':
# 尝试解析成min、max
flag = property_value_modify.double_without_unit(crawl_property_value)
if flag:
pv_min, pv_max = flag.group(1), flag.group(2)
save_value = pv_min + base_property_unit + '~' + pv_max + base_property_unit
pv_id = spcap_data.save_to_property(base_property_id, component_id, base_property_detno,
"'" + save_value + "'", pv_max=pv_max,
pv_min=pv_min,
pv_unit="'" + base_property_unit + "'")
property_json = spcap_data.get_property_json(base_property_detno, pv_id,
base_property_id,
base_property_name, save_value,
min=pv_min, max=pv_max,
unit=base_property_unit)
else:
# 为数值类型
try:
crawl_property_value1 = crawl_property_value.replace(base_property_unit, "").strip()
numberic = float(crawl_property_value1)
save_value = crawl_property_value + base_property_unit
pv_id = spcap_data.save_to_property(base_property_id, component_id,
base_property_detno,
"'" + save_value + "'",
pv_numberic=crawl_property_value,
pv_unit="'" + base_property_unit + "'")
property_json = spcap_data.get_property_json(base_property_detno, pv_id,
base_property_id,
base_property_name, save_value,
numberic=crawl_property_value,
unit=base_property_unit)
except:
# value为空的状态
if crawl_property_value == '' or crawl_property_value == '-':
pv_id = spcap_data.save_to_property(base_property_id, component_id,
base_property_detno,
"'" + crawl_property_value + "'",
pv_unit="'" + base_property_unit + "'",
pv_flag=12)
property_json = spcap_data.get_property_json(base_property_detno, pv_id,
base_property_id,
base_property_name,
crawl_property_value,
unit=base_property_unit)
# 无法处理
else:
pv_id = spcap_data.save_to_property(base_property_id, component_id,
base_property_detno,
"'" + crawl_property_value + "'",
pv_unit="'" + base_property_unit + "'",
pv_flag=11)
cc_modify = 1
property_json = spcap_data.get_property_json(base_property_detno, pv_id,
base_property_id,
base_property_name,
crawl_property_value,
unit=base_property_unit)
properties_json.append(property_json)
break
if base_property_type == 'N':
# 尝试将value转化为int,存入numberic值中
try:
crawl_property_value1 = crawl_property_value.replace(base_property_unit, "").strip()
numberic = float(crawl_property_value1)
save_value = crawl_property_value + base_property_unit
pv_id = spcap_data.save_to_property(base_property_id, component_id, base_property_detno,
"'" + save_value + "'",
pv_numberic=crawl_property_value,
pv_unit="'" + base_property_unit + "'")
property_json = spcap_data.get_property_json(base_property_detno, pv_id,
base_property_id,
base_property_name, save_value,
numberic=crawl_property_value,
unit=base_property_unit)
except:
# N类型数值加单位
single_unit_flag = property_value_modify.single_with_unit(crawl_property_value)
if single_unit_flag:
str_numberic = single_unit_flag.group(1)
crawl_unit = single_unit_flag.group(3)
# 单位超过一个
if len(base_property_unit_list) > 1:
for rough_base_property_unit in base_property_unit_list:
if crawl_unit.lower() in rough_base_property_unit.lower():
base_property_unit = rough_base_property_unit
save_value = str_numberic + rough_base_property_unit
pv_id = spcap_data.save_to_property(base_property_id, component_id,
base_property_detno,
"'" + save_value + "'",
pv_numberic=str_numberic,
pv_unit="'" + rough_base_property_unit + "'")
property_json = spcap_data.get_property_json(base_property_detno, pv_id,
base_property_id,
base_property_name,
save_value,
numberic=str_numberic,
unit=rough_base_property_unit)
else:
print("出现异常")
# 只有一个单位
else:
save_value = str_numberic + base_property_unit
pv_id = spcap_data.save_to_property(base_property_id, component_id,
base_property_detno,
"'" + save_value + "'",
pv_numberic=str_numberic,
pv_unit="'" + base_property_unit + "'")
property_json = spcap_data.get_property_json(base_property_detno, pv_id,
base_property_id,
base_property_name,
save_value,
numberic=str_numberic,
unit=base_property_unit)
else:
# N类型范围值
flag = property_value_modify.double_without_unit(crawl_property_value)
if flag:
if abs(float(flag.group(1))) == abs(float(flag.group(2))):
save_value = '+/-' + flag.group(2)
numberic = flag.group(2)
pv_id = spcap_data.save_to_property(base_property_id, component_id,
base_property_detno,
"'" + save_value + "'",
pv_unit="'" + base_property_unit + "'",
pv_numberic=numberic)
property_json = spcap_data.get_property_json(base_property_detno, pv_id,
base_property_id,
base_property_name,
save_value,
unit=base_property_unit,
numberic=numberic)
# N类型无法处理范围值
else:
pv_id = spcap_data.save_to_property(base_property_id, component_id,
base_property_detno,
"'" + crawl_property_value + "'",
pv_unit="'" + base_property_unit + "'",
pv_flag=11)
cc_modify = 1
property_json = spcap_data.get_property_json(base_property_detno, pv_id,
base_property_id,
base_property_name,
crawl_property_value,
unit=base_property_unit)
# N类型非正常数据
else:
# 如果为空
if crawl_property_value == '' or crawl_property_value == '-':
pv_id = spcap_data.save_to_property(base_property_id, component_id,
base_property_detno,
"'" + crawl_property_value + "'",
pv_unit="'" + base_property_unit + "'",
pv_flag=12)
property_json = spcap_data.get_property_json(base_property_detno, pv_id,
base_property_id,
base_property_name,
crawl_property_value,
unit=base_property_unit)
# 无法处理
else:
pv_id = spcap_data.save_to_property(base_property_id, component_id,
base_property_detno,
"'" + crawl_property_value + "'",
pv_unit="'" + base_property_unit + "'",
pv_flag=11)
cc_modify = 1
property_json = spcap_data.get_property_json(base_property_detno, pv_id,
base_property_id,
base_property_name,
crawl_property_value,
unit=base_property_unit)
properties_json.append(property_json)
break
else:
pv_id = spcap_data.save_to_property(base_property_id, component_id,
base_property_detno,
"'" + crawl_property_value + "'",
pv_unit="'" + base_property_unit + "'",
pv_flag=10)
property_json = spcap_data.get_property_json(base_property_detno, pv_id,
base_property_id,
base_property_name,
crawl_property_value,
unit=base_property_unit)
properties_json.append(property_json)
break
else:
pv_id = spcap_data.save_to_property(base_property_id, component_id, base_property_detno, 'null')
# property_json = spcap_data.get_property_json(base_property_detno, pv_id, base_property_id, base_property_name, '')
if not cc_flag:
cc_flag = insert_or_update
spcap_data.update_crawl_uuid(uuid, task_id, crawl_component_code, cc_flag=cc_flag,
cc_modify=cc_modify)
str_properties_json = str(properties_json).replace("'", "\"")
spcap_data.save_to_version(crawl_component_code, crawl_component_attach, crawl_component_img, unit, uuid,
str_properties_json, b2c_brand_json, b2c_kind_json, cmp_version)
spcap_data.conn.commit()
print("come on")
if __name__ == "__main__":
main = DataProcessing()
main.go(29) | [
"[email protected]"
]
| |
b191d1d7c71ffb57ff43d91a0d3c0ec6e2447d4d | d5cc0c9f8d94e9d020b3e50c0a125d2041dd3baa | /AttendifySite(Flask)/env/lib/python3.6/site-packages/turicreate/meta/decompiler/instructions.py | 2b179b8dbcbda593c89a9e2549d77066a4261f79 | [
"MIT"
]
| permissive | arnavgup/Attendify_iOS | be896579de4560cff36a4b163384d0eeabbb7dd9 | c2efc3273a7b99c09d918567718ac87d7f0179d8 | refs/heads/master | 2022-10-31T13:16:11.081902 | 2018-12-09T00:11:42 | 2018-12-09T00:11:42 | 158,432,022 | 3 | 2 | MIT | 2022-10-10T10:53:53 | 2018-11-20T18:10:16 | Swift | UTF-8 | Python | false | false | 15,021 | py | # -*- coding: utf-8 -*-
# Copyright © 2017 Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can
# be found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
'''
Created on Jul 14, 2011
@author: sean
'''
from __future__ import print_function as _
from __future__ import division as _
from __future__ import absolute_import as _
from ..decompiler.simple_instructions import SimpleInstructions
from ..decompiler.control_flow_instructions import CtrlFlowInstructions
import _ast
from ..asttools import print_ast
from ..utils import py3, py3op, py2op
function_ops = ['CALL_FUNCTION', 'CALL_FUNCTION_KW', 'CALL_FUNCTION_VAR', 'CALL_FUNCTION_VAR_KW']
def pop_doc(stmnts):
doc = pop_assignment(stmnts, '__doc__')
assert isinstance(doc, _ast.Str) or doc is None
return doc
def pop_assignment(stmnts, name):
for i in range(len(stmnts)):
stmnt = stmnts[i]
if isinstance(stmnt, _ast.Assign) and len(stmnt.targets) == 1 \
and isinstance(stmnt.targets[0], _ast.Name) \
and isinstance(stmnt.targets[0].ctx, _ast.Store):
if stmnt.targets[0].id == name:
stmnts.pop(i)
return stmnt.value
return None
def pop_return(stmnts):
ns = len(stmnts)
for i in range(ns - 1, -1, -1):
stmnt = stmnts[i]
if isinstance(stmnt, _ast.Return):
return stmnts.pop(i)
return None
def make_module(code):
from ..decompiler.disassemble import disassemble
instructions = Instructions(disassemble(code))
stmnts = instructions.stmnt()
doc = pop_doc(stmnts)
pop_return(stmnts)
# stmnt = ast.Stmt(stmnts, 0)
if doc is not None:
stmnts = [_ast.Expr(value=doc, lineno=doc.lineno, col_offset=0)] + stmnts
ast_obj = _ast.Module(body=stmnts, lineno=0, col_offset=0)
return ast_obj
@py2op
def make_function(code, defaults=None, lineno=0):
from ..decompiler.disassemble import disassemble
instructions = Instructions(disassemble(code))
stmnts = instructions.stmnt()
if code.co_flags & 2:
vararg = None
kwarg = None
varnames = list(code.co_varnames[:code.co_argcount])
co_locals = list(code.co_varnames[code.co_argcount:])
#have var args
if code.co_flags & 4:
vararg = co_locals.pop(0)
#have kw args
if code.co_flags & 8:
kwarg = co_locals.pop()
args = [_ast.Name(id=argname, ctx=_ast.Param(), lineno=lineno, col_offset=0) for argname in varnames]
args = _ast.arguments(args=args,
defaults=defaults if defaults else [],
kwarg=kwarg,
vararg=vararg,
lineno=lineno, col_offset=0
)
if code.co_name == '<lambda>':
if len(stmnts) == 2:
if isinstance(stmnts[0], _ast.If) and isinstance(stmnts[1], _ast.Return):
assert len(stmnts[0].body) == 1
assert isinstance(stmnts[0].body[0], _ast.Return)
stmnts = [_ast.Return(_ast.IfExp(stmnts[0].test, stmnts[0].body[0].value, stmnts[1].value))]
assert len(stmnts) == 1, stmnts
assert isinstance(stmnts[0], _ast.Return)
stmnt = stmnts[0].value
ast_obj = _ast.Lambda(args=args, body=stmnt, lineno=lineno, col_offset=0)
else:
if instructions.seen_yield:
return_ = stmnts[-1]
assert isinstance(return_, _ast.Return)
assert isinstance(return_.value, _ast.Name)
assert return_.value.id == 'None'
return_.value = None
ast_obj = _ast.FunctionDef(name=code.co_name, args=args, body=stmnts, decorator_list=[], lineno=lineno, col_offset=0)
return ast_obj
@make_function.py3op
def make_function(code, defaults=None, annotations=(), kw_defaults=(), lineno=0):
from ..decompiler.disassemble import disassemble
instructions = Instructions(disassemble(code))
stmnts = instructions.stmnt()
if code.co_flags & 2:
vararg = None
kwarg = None
varnames = list(code.co_varnames[:code.co_argcount])
kwonly_varnames = list(code.co_varnames[code.co_argcount:code.co_argcount + code.co_kwonlyargcount])
co_locals = list(code.co_varnames[code.co_argcount + code.co_kwonlyargcount:])
assert (len(kw_defaults) % 2) == 0
kw_defaults = list(kw_defaults)
kw_default_dict = {}
while kw_defaults:
name = kw_defaults.pop(0)
value = kw_defaults.pop(0)
kw_default_dict[name.s] = value
kw_defaults = []
for argname in kwonly_varnames:
kw_defaults.append(kw_default_dict.pop(argname))
#have var args
if code.co_flags & 4:
vararg = co_locals.pop(0)
#have kw args
if code.co_flags & 8:
kwarg = co_locals.pop()
args = []
annotation_names = [annotation.arg for annotation in annotations]
for argname in varnames:
if argname in annotation_names:
arg = [annotation for annotation in annotations if annotation.arg == argname][0]
else:
arg = _ast.arg(annotation=None, arg=argname, lineno=lineno, col_offset=0) #@UndefinedVariable
args.append(arg)
kwonlyargs = []
for argname in kwonly_varnames:
if argname in annotation_names:
arg = [annotation for annotation in annotations if annotation.arg == argname][0]
else:
arg = _ast.arg(annotation=None, arg=argname, lineno=lineno, col_offset=0) #@UndefinedVariable
kwonlyargs.append(arg)
if 'return' in annotation_names:
arg = [annotation for annotation in annotations if annotation.arg == 'return'][0]
returns = arg.annotation
else:
returns = None
if vararg in annotation_names:
arg = [annotation for annotation in annotations if annotation.arg == vararg][0]
varargannotation = arg.annotation
else:
varargannotation = None
if kwarg in annotation_names:
arg = [annotation for annotation in annotations if annotation.arg == kwarg][0]
kwargannotation = arg.annotation
else:
kwargannotation = None
args = _ast.arguments(args=args,
defaults=defaults if defaults else [],
kwarg=kwarg,
vararg=vararg,
kw_defaults=kw_defaults,
kwonlyargs=kwonlyargs,
kwargannotation=kwargannotation,
varargannotation=varargannotation,
lineno=lineno, col_offset=0
)
if code.co_name == '<lambda>':
if len(stmnts) == 2:
if isinstance(stmnts[0], _ast.If) and isinstance(stmnts[1], _ast.Return):
assert len(stmnts[0].body) == 1
assert isinstance(stmnts[0].body[0], _ast.Return)
stmnts = [_ast.Return(_ast.IfExp(stmnts[0].test, stmnts[0].body[0].value, stmnts[1].value))]
assert isinstance(stmnts[0], _ast.Return)
stmnt = stmnts[0].value
ast_obj = _ast.Lambda(args=args, body=stmnt, lineno=lineno, col_offset=0)
else:
if instructions.seen_yield:
return_ = stmnts[-1]
assert isinstance(return_, _ast.Return)
assert isinstance(return_.value, _ast.Name)
assert return_.value.id == 'None'
return_.value = None
ast_obj = _ast.FunctionDef(name=code.co_name, args=args,
body=stmnts, decorator_list=[],
returns=returns,
lineno=lineno, col_offset=0)
return ast_obj
class StackLogger(list):
def append(self, object):
print(' + ', end='')
print_ast(object, indent='', newline='')
print()
list.append(self, object)
def pop(self, *index):
value = list.pop(self, *index)
print(' + ', end='')
print_ast(value, indent='', newline='')
print()
return value
def bitrange(x, start, stop):
return ((1 << (stop - start)) - 1) & (x >> start)
level = 0
class Instructions(CtrlFlowInstructions, SimpleInstructions):
def __init__(self, ilst, stack_items=None, jump_map=False):
self.ilst_processed = []
self.ilst = ilst[:]
self.orig_ilst = ilst
self.seen_yield = False
if jump_map:
self.jump_map = jump_map
else:
self.jump_map = {}
# self.ast_stack = StackLogger()
self.ast_stack = []
if stack_items:
self.ast_stack.extend(stack_items)
@classmethod
def decompile_block(cls, ilst, stack_items=None, jump_map=False):
return Instructions(ilst, stack_items=stack_items, jump_map=jump_map)
def stmnt(self):
while len(self.ilst):
instr = self.ilst.pop(0)
self.visit(instr)
return self.ast_stack
def visit(self, instr):
global level
name = instr.opname.replace('+', '_')
method = getattr(self, name, None)
if method is None:
raise AttributeError('can not handle instruction %r' % (str(instr)))
# print(' ' * level, "+ visit:", repr(instr))
# level += 1
method(instr)
# level -= 1
# print(' ' * level, "- stack:", self.ast_stack)
def make_block(self, to, inclusive=True, raise_=True):
# print("make_block", to,)
block = []
while len(self.ilst):
instr = self.ilst.pop(0)
block.append(instr)
# instr_i = self.jump_map.get(instr.i, instr.i)
instr_i = instr.i
if to == instr_i:
if not inclusive:
instr = block.pop()
self.ilst.insert(0, instr)
break
else:
if raise_:
# print(block)
raise IndexError("no instruction i=%s " % (to,))
return block
@py3op
def MAKE_FUNCTION(self, instr):
code = self.ast_stack.pop()
ndefaults = bitrange(instr.oparg, 0, 8)
nkwonly_defaults = bitrange(instr.oparg, 8, 16)
nannotations = bitrange(instr.oparg, 16, 32) - 1
annotations = []
for i in range(nannotations):
annotations.insert(0, self.ast_stack.pop())
kw_defaults = []
for i in range(nkwonly_defaults * 2):
kw_defaults.insert(0, self.ast_stack.pop())
defaults = []
for i in range(ndefaults):
defaults.insert(0, self.ast_stack.pop())
function = make_function(code, defaults, lineno=instr.lineno, annotations=annotations, kw_defaults=kw_defaults)
doc = code.co_consts[0] if code.co_consts else None
if isinstance(doc, str):
function.body.insert(0, _ast.Expr(value=_ast.Str(s=doc, lineno=instr.lineno, col_offset=0),
lineno=instr.lineno, col_offset=0))
self.ast_stack.append(function)
@MAKE_FUNCTION.py2op
def MAKE_FUNCTION(self, instr):
code = self.ast_stack.pop()
ndefaults = instr.oparg
defaults = []
for i in range(ndefaults):
defaults.insert(0, self.ast_stack.pop())
function = make_function(code, defaults, lineno=instr.lineno)
doc = code.co_consts[0] if code.co_consts else None
if isinstance(doc, str):
function.body.insert(0, _ast.Expr(value=_ast.Str(s=doc, lineno=instr.lineno, col_offset=0),
lineno=instr.lineno, col_offset=0))
self.ast_stack.append(function)
def LOAD_LOCALS(self, instr):
self.ast_stack.append('LOAD_LOCALS')
@py3op
def LOAD_BUILD_CLASS(self, instr):
class_body = []
body_instr = instr
while body_instr.opname not in function_ops:
body_instr = self.ilst.pop(0)
class_body.append(body_instr)
call_func = self.decompile_block(class_body, stack_items=[None]).stmnt()
assert len(call_func) == 1
call_func = call_func[0]
func_def = call_func.args[0]
code = func_def.body
name = call_func.args[1].s
bases = call_func.args[2:]
keywords = call_func.keywords
kwargs = call_func.kwargs
starargs = call_func.starargs
if isinstance(code[0], _ast.Expr):
_name = code.pop(1)
_doc = code.pop(1)
elif isinstance(code[0], _ast.Assign):
_name = code.pop(0)
else:
assert False
ret = code.pop(-1)
assert isinstance(ret, _ast.Return)
class_ = _ast.ClassDef(name=name, bases=bases, body=code, decorator_list=[],
kwargs=kwargs, keywords=keywords, starargs=starargs,
lineno=instr.lineno, col_offset=0,
)
self.ast_stack.append(class_)
@py2op
def BUILD_CLASS(self, instr):
call_func = self.ast_stack.pop()
assert isinstance(call_func, _ast.Call)
func = call_func.func
assert isinstance(func, _ast.FunctionDef)
code = func.body
pop_assignment(code, '__module__')
doc = pop_doc(code)
ret = code.pop()
assert isinstance(ret, _ast.Return) and ret.value == 'LOAD_LOCALS'
bases = self.ast_stack.pop()
assert isinstance(bases, _ast.Tuple)
bases = bases.elts
name = self.ast_stack.pop()
class_ = _ast.ClassDef(name=name, bases=bases, body=code, decorator_list=[],
lineno=instr.lineno, col_offset=0)
self.ast_stack.append(class_)
def LOAD_CLOSURE(self, instr):
self.ast_stack.append('CLOSURE')
def MAKE_CLOSURE(self, instr):
return self.MAKE_FUNCTION(instr)
| [
"[email protected]"
]
| |
c8d9704e46969f972fc3a2f5b971e4f09044ca2b | a904358b873d4881a9e61555bfc7a97169c696a8 | /src/djnydus/db/shards/routers.py | c8d5539eb60791895d7eacc5c92f2c1671386aed | [
"Apache-2.0"
]
| permissive | numan/nydus-django | 945e91df73af34941a2a281c2b5e2a706f87a3b5 | b652ea28d7f801f453a64e4ec78899e2acfc7606 | refs/heads/master | 2021-01-18T08:19:06.320484 | 2013-03-26T01:49:42 | 2013-03-26T01:49:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 142 | py | """
djnydus.shards.router
~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2012 DISQUS.
:license: Apache License 2.0, see LICENSE for more details.
"""
| [
"[email protected]"
]
| |
35b078652ae742c7fdce54d3d18a3085c0dfb8ae | 85671094e4f1c1f221ff078faea3ee9f93795b57 | /examples/dfp/v201311/contact_service/update_contacts.py | e80bc0cd172f861db7dc1944c336f0e6b5f71d55 | [
"Apache-2.0"
]
| permissive | jdilallo/jdilallo-test | 63631c96c8070c60ce7c07512aa51f370d8fbadf | 8fb9bf43e7c99d5cb198c5587897f8b2514ca4c0 | refs/heads/master | 2020-05-18T15:35:37.264949 | 2014-03-04T18:19:31 | 2014-03-04T18:19:31 | 14,376,457 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,282 | py | #!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example updates contact addresses.
To determine which contacts exist, run get_all_contacts.py.
Tags: ContactService.updateContacts
ContactService.getContactsByStatement
"""
__author__ = ('Nicholas Chen',
'Joseph DiLallo')
# Import appropriate classes from the client library.
from googleads import dfp
# Set the ID of the contact to update.
CONTACT_ID = 'INSERT_CONTACT_ID_HERE'
def main(client, contact_id):
# Initialize appropriate service.
contact_service = client.GetService('ContactService', version='v201311')
# Create statement object to select the single contact by ID.
values = [{
'key': 'id',
'value': {
'xsi_type': 'NumberValue',
'value': contact_id
}
}]
query = 'WHERE id = :id'
statement = dfp.FilterStatement(query, values, 1)
# Get contacts by statement.
response = contact_service.getContactsByStatement(
statement.ToStatement())
if 'results' in response:
updated_contacts = []
for contact in response['results']:
contact['address'] = '123 New Street, New York, NY, 10011'
updated_contacts.append(contact)
# Update the contact on the server.
contacts = contact_service.updateContacts(updated_contacts)
# Display results.
for contact in contacts:
print (('Contact with ID \'%s\', name \'%s\', and address \'%s\' '
'was updated.')
% (contact['id'], contact['name'], contact['address']))
else:
print 'No contacts found to update.'
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client, CONTACT_ID)
| [
"[email protected]"
]
| |
5cb2f0cea82e8f0c529c8cc80985dc43f49abcf5 | e23a4f57ce5474d468258e5e63b9e23fb6011188 | /070_oop/001_classes/_exercises/exercises/ITDN Python RUS/001_Vvedenie v OOP/05-__init__.py | 807756be14dc81c13918a5fd56093bd1ff2c31ee | []
| no_license | syurskyi/Python_Topics | 52851ecce000cb751a3b986408efe32f0b4c0835 | be331826b490b73f0a176e6abed86ef68ff2dd2b | refs/heads/master | 2023-06-08T19:29:16.214395 | 2023-05-29T17:09:11 | 2023-05-29T17:09:11 | 220,583,118 | 3 | 2 | null | 2023-02-16T03:08:10 | 2019-11-09T02:58:47 | Python | UTF-8 | Python | false | false | 897 | py | # -*- coding: utf-8 -*-
# Начальное состояние объекта следует создавать в
# специальном методе-конструкторе __init__, который
# вызывается автоматически после создания экземпляра
# класса. Его параметры указываются при создании
# объекта.
# Класс, описывающий человека
class Person:
# Конструктор
def __init__(self, name, age):
self.name = name
self.age = age
# Метод из прошлого примера
def print_info(self):
print(self.name, 'is', self.age)
# Создание экземпляров класса
alex = Person('Alex', 18)
john = Person('John', 20)
# Вызов метода print_info
alex.print_info()
john.print_info() | [
"[email protected]"
]
| |
6ca840de569e5ed1a4938ec9457690c6d2c6b763 | cb08220a43b2ba0808f4decdb674f9ccf06a60cc | /project/backend/config/urls.py | 2f7e3f754d061679631bf1c66f554fcf5092d10a | []
| no_license | Hagen013/django-boilerplate | 06979916cbf2edcd00bf5dbfbc5aef4ef55303dc | e6327717e338c4afebd255d754aefae76e7223a3 | refs/heads/master | 2020-03-25T03:19:01.485758 | 2018-08-08T20:25:07 | 2018-08-08T20:25:07 | 143,334,912 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 113 | py | from django.urls import path, include
urlpatterns = [
path("api/", include("api.urls", namespace="api")),
]
| [
"="
]
| = |
43c4a53b135ce37bc89f7010be67c1fa6fd12754 | 83762a4888dab11be81c3e7f6c4e4357c8079ae8 | /utils/testsuite/testsuite.py | e5f45737cd5701cdadae33479e21ec096a037590 | [
"MIT"
]
| permissive | alloy/hermes | 1dc62f278d3ab11ca8a21868452f87f148500cba | 817d558140a829bdd3094698e20ae476d171bb66 | refs/heads/master | 2023-02-12T08:08:49.896373 | 2020-04-23T18:43:29 | 2020-04-23T18:45:57 | 258,312,171 | 1 | 0 | MIT | 2020-04-23T19:40:28 | 2020-04-23T19:40:27 | null | UTF-8 | Python | false | false | 36,642 | py | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import enum
import os
import re
import subprocess
import sys
import tempfile
import textwrap
import time
from collections import namedtuple
from multiprocessing import Pool, Value
from os.path import basename, isdir, isfile, join, splitext
try:
from testsuite.testsuite_blacklist import (
BLACK_LIST,
PERMANENT_BLACK_LIST,
UNSUPPORTED_FEATURES,
PERMANENT_UNSUPPORTED_FEATURES,
)
import testsuite.esprima_test_runner as esprima
except ImportError:
# Hacky way to handle non-buck builds that call the file immediately.
from testsuite_blacklist import (
BLACK_LIST,
PERMANENT_BLACK_LIST,
UNSUPPORTED_FEATURES,
PERMANENT_UNSUPPORTED_FEATURES,
)
import esprima_test_runner as esprima
## This is a simple script that runs the hermes compiler on
## external test suites. The script expects to find the hermes compiler under
## ./bin/hermes. The script adds
## some basic test built-ins such as assertTrue, assertEquals that are used in
## the V8 test suite.
## How results are computed:
## If a test is blacklisted or contains unsupported ES6 features,
## it is skipped, and thus not executed at all.
## Result classes:
## Compile fail: The Hermes compiler failed when it should have succeeded,
## or vice versa.
## Compile timeout: The Hermes compiler timed out.
## Execute fail: Bytecode execution with the chosen backend
## failed when it should have succeeded, or vice versa.
## Execute timeout: Bytecode execution with the chosen backend timed out.
## Strictness:
## The default strictness mode is currently non-strict.
## For the test suites themselves:
## - test262: Require the test to pass in both strictness modes,
## generating source code for both modes automatically.
## - mjsunit: Run the tests in non-strict mode,
## because the test suite adds its own "use strict" directives.
## The content of this string is prepended to the test files and is used to
## provide the basic test built-ins.
test_builtins_content = """
// v8 test harness:
function internal_arraysEqual(a, b) {
if (a === b) return true;
if (a.length != b.length) return false;
for (var i = 0; i < a.length; ++i) { if (a[i] !== b[i]) return false; }
return true;
}
function builtin_nop(x) { return x; }
function builtin_false() { return false; }
var nopSentinel = {};
function v8pragma_HaveSameMap(obj1, obj2) {
// This function doesn't work for all tests, but works for many.
var keysAreSubset = function(lhs, rhs) {
for (var property in lhs) {
if (lhs[property] !== rhs[property]) {
return false;
}
}
return true;
}
return keysAreSubset(obj1, obj2) && keysAreSubset(obj2, obj1);
}
function v8pragma_FunctionSetPrototype(f, p) {
// Set f.prototype.
f.prototype = p;
}
function v8pragma_ClassOf(obj) {
// Turn "[object ClassName]" into just "ClassName".
return Object.prototype.toString.call(obj).slice(8, -1);
}
function v8pragma_Call(f, thisVal) {
return f.apply(thisVal, Array.prototype.slice.call(arguments, 2));
}
function v8pragma_StringCharFromCode(i) {
return String.fromCharCode(i);
}
function v8pragma_StringCharCodeAt(s, i) {
return s.charCodeAt(i);
}
// debug variable sometimes used in mjsunit.
// Implemented the same way JSC does.
var debug = function(s) {
print('-->', s);
};
// The idea here is that some pragmas are meaningless for our JS interpreter,
// but we don't want to throw out the whole test case. In those cases, just
// throw out the assertions in those test cases resulting from checking the
// results of those pragmas.
function v8pragma_NopSentinel() {
return nopSentinel;
}
// test262 requirements.
// Leave the unimplemented features unset in $262.
var $262 = {};
$262.global = this;
$262.evalScript = eval;
if (typeof HermesInternal === 'object') {
$262.detachArrayBuffer = HermesInternal.detachArrayBuffer;
}
// Browser functions:
var alert = print;
"""
# Colors for stdout.
@enum.unique
class Color(enum.Enum):
RESET = enum.auto()
RED = enum.auto()
GREEN = enum.auto()
def __str__(self):
if not sys.stdout.isatty():
return ""
return {
Color.RESET.value: "\033[0m",
Color.RED.value: "\033[31m",
Color.GREEN.value: "\033[32m",
}[self.value]
# These flags indicate the status of a job.
@enum.unique
class TestFlag(enum.Enum):
TEST_FAILED = enum.auto()
TEST_PASSED = enum.auto()
TEST_SKIPPED = enum.auto()
TEST_PERMANENTLY_SKIPPED = enum.auto()
TEST_UNEXPECTED_PASSED = enum.auto()
COMPILE_FAILED = enum.auto()
COMPILE_TIMEOUT = enum.auto()
EXECUTE_FAILED = enum.auto()
EXECUTE_TIMEOUT = enum.auto()
def __str__(self):
return {
TestFlag.TEST_FAILED.value: "TEST_FAILED",
TestFlag.TEST_PASSED.value: "TEST_PASSED",
TestFlag.TEST_SKIPPED.value: "TEST_SKIPPED",
TestFlag.TEST_PERMANENTLY_SKIPPED.value: "TEST_PERMANENTLY_SKIPPED",
TestFlag.TEST_UNEXPECTED_PASSED.value: "TEST_UNEXPECTED_PASSED",
TestFlag.COMPILE_FAILED.value: "COMPILE_FAILED",
TestFlag.COMPILE_TIMEOUT.value: "COMPILE_TIMEOUT",
TestFlag.EXECUTE_FAILED.value: "EXECUTE_FAILED",
TestFlag.EXECUTE_TIMEOUT.value: "EXECUTE_TIMEOUT",
}[self.value]
TIMEOUT_COMPILER = 40
TIMEOUT_VM = 200
includesMatcher = re.compile(r"includes:\s*\[(.*)\]")
# This matches a special case in which the includes looks like:
# includes:
# - foo.js
# This regex works only because the few cases which use this pattern
# only include one file.
specialIncludesMatcher = re.compile(
"includes:\n" r".*-\s*(.*\.js)" "\n", re.MULTILINE | re.DOTALL
)
def generateSource(content, strict, suite, flags):
"""
Generate the source code for a test case resulting from resolving pragmas in
the given file and adding a use-strict directive, if necessary.
Return a tuple: (source, includes)
"""
# The raw flag specifies that the source code shouldn't be modified.
if "raw" in flags:
return (content, [])
v8_pragmas = {
"%OptimizeObjectForAddingMultipleProperties": "builtin_nop",
"%ClearFunctionTypeFeedback": "builtin_nop",
"%OptimizeFunctionOnNextCall": "builtin_nop",
"%DeoptimizeFunction": "builtin_nop",
"%DeoptimizeNow": "builtin_nop",
"%_DeoptimizeNow": "builtin_nop",
"%NeverOptimizeFunction": "builtin_nop",
"%OptimizeOsr": "builtin_nop",
"%ClearFunctionTypeFeedback": "builtin_nop",
"%BaselineFunctionOnNextCall": "builtin_nop",
"%SetForceInlineFlag": "builtin_nop",
"%OptimizeObjectForAddingMultipleProperties": "builtin_nop",
"%ToFastProperties": "builtin_nop",
"%NormalizeElements": "builtin_nop",
"%ArrayBufferNeuter": "HermesInternal.detachArrayBuffer",
# ArrayBufferDetach is the more modern version of ArrayBufferNeuter.
"%ArrayBufferDetach": "HermesInternal.detachArrayBuffer",
"%RunMicrotasks": "builtin_nop",
"%SetAllocationTimeout": "builtin_nop",
"%UnblockConcurrentRecompilation": "builtin_nop",
"%DebugPrint": "builtin_nop",
"%HaveSameMap": "v8pragma_HaveSameMap",
"%HasFastDoubleElements": "v8pragma_NopSentinel",
"%HasFastSmiElements": "v8pragma_NopSentinel",
"%HasFastObjectElements": "v8pragma_NopSentinel",
"%HasFastHoleyElements": "v8pragma_NopSentinel",
"%HasFastProperties": "v8pragma_NopSentinel",
"%IsAsmWasmCode": "v8pragma_NopSentinel",
"%IsNotAsmWasmCode": "v8pragma_NopSentinel",
"%NotifyContextDisposed": "v8pragma_NopSentinel",
"%FunctionSetPrototype": "v8pragma_FunctionSetPrototype",
"%_ClassOf": "v8pragma_ClassOf",
"%_Call": "v8pragma_Call",
"%RunningInSimulator": "builtin_false",
"%IsConcurrentRecompilationSupported": "builtin_false",
"%_StringCharFromCode": "v8pragma_StringCharFromCode",
"%_StringCharCodeAt": "v8pragma_StringCharCodeAt",
}
for pragma, replacement in v8_pragmas.items():
content = content.replace(pragma, replacement)
source = ""
if strict:
source += "'use strict';\n"
includes = []
if suite:
if "test262" in suite:
match = includesMatcher.search(content)
includes = ["assert.js", "sta.js"]
if match:
includes += [i.strip() for i in match.group(1).split(",")]
match = specialIncludesMatcher.search(content)
if match:
includes.append(match.group(1))
for i in includes:
filepath = join(suite, "harness", i)
with open(filepath, "rb") as f:
source += f.read().decode("utf-8") + "\n"
if "mjsunit" in suite:
filepath = join(suite, "mjsunit.js")
with open(filepath, "rb") as f:
source += f.read().decode("utf-8") + "\n"
source += test_builtins_content
source += content
return (source, includes)
evalMatcher = re.compile(r"\beval\s*\(")
indirectEvalMatcher = re.compile(r"\(.*,\s*eval\)\s*\(")
assignEvalMatcher = re.compile(r"=\s*eval\s*;")
withMatcher = re.compile(r"\bwith\s*\(")
constMatcher = re.compile(r"\bconst\b")
negativeMatcher = re.compile(
r"""
/\*---.*
negative:.*\n
\s*phase:\s*(\S+).*\n
\s*type:\s*(\S+).*\n
---\*/
""",
re.MULTILINE | re.DOTALL | re.VERBOSE,
)
negativeMatcher2 = re.compile(
r"""
/\*---.*
negative:.*\n
\s*type:\s*(\S+).*\n
\s*phase:\s*(\S+).*\n
---\*/
""",
re.MULTILINE | re.DOTALL | re.VERBOSE,
)
flagsMatcher = re.compile(r"\s*flags:\s*\[(.*)\]")
featuresMatcher = re.compile(r"\s*features:\s*\[(.*)\]")
# Alternate features syntax has "features:" and then bullet points using "-".
featuresMatcher2 = re.compile(r"\s*features:\s*\n(.*)\*\/", re.MULTILINE | re.DOTALL)
def getSuite(filename):
suite = None
# Try all possible test suites to see which one we're in.
for s in ["test262", "mjsunit", "CVEs", "esprima"]:
if (s + "/") in filename:
suite = filename[: filename.find(s) + len(s)]
break
return suite
verbose = False
def printVerbose(s):
global verbose
if verbose:
print(s)
istty = sys.stdout.isatty()
completed = Value("i", 0)
ttyWidth = os.get_terminal_size().columns if istty else 0
def showStatus(filename):
global completed, istty, verbose, count
if istty and not verbose and count > 0:
with completed.get_lock():
record = ("\r{:" + str(ttyWidth) + "s}\n").format("Testing " + filename)
status = "{:06.2f}% ({:d} / {:d})".format(
100.0 * completed.value / count, completed.value, count
)
sys.stdout.write(record + status)
sys.stdout.flush()
completed.value += 1
else:
print("Testing " + filename)
es6_args = ["-Xes6-proxy", "-Xes6-symbol"]
extra_run_args = ["-Xhermes-internal-test-methods"]
extra_compile_flags = ["-fno-static-builtins"]
def fileInBlacklist(filename):
for blName in BLACK_LIST + PERMANENT_BLACK_LIST:
if isinstance(blName, str):
if blName in filename:
return True
else:
# Assume it's a regex if it's not a string.
if blName.search(filename):
return True
return False
def fileInPermanentBlacklist(filename):
for blName in PERMANENT_BLACK_LIST:
if blName in filename:
return True
return False
# should_run: bool, If the test should run
# skip_reason: str, Reason for skipping, if the test shouldn't be run.
# Empty if the test should be run (str)
# permanent: bool, If the test shouldn't be run, whether that condition is permanent
# flags: Set[str], The flags that were found for the file
# strict_modes: List[str], The strict modes that this file should be run with
TestContentParameters = namedtuple(
"TestContentFlags",
["should_run", "skip_reason", "permanent", "flags", "strict_modes"],
)
def testShouldRun(filename, content):
suite = getSuite(filename)
# Determine flags and strict modes before deciding to skip a test case.
flags = set()
strictModes = []
if not suite:
strictModes = [False]
else:
if "test262" in suite:
match = flagsMatcher.search(content)
if match:
flags = {flag.strip() for flag in match.group(1).split(",")}
if "onlyStrict" in flags:
strictModes = [True]
elif "noStrict" in flags or "raw" in flags:
strictModes = [False]
else:
strictModes = [True, False]
else:
strictModes = [True, False]
elif "mjsunit" in suite:
strictModes = [False]
elif "CVEs" in suite:
strictModes = [False]
else:
raise Exception("Unknown suite")
# Now find if this test case should be skipped.
if "async" in flags:
# We don't support async operations.
return TestContentParameters(
False, "Skipping test with async", False, flags, strictModes
)
if "module" in flags:
# We don't support module code.
return TestContentParameters(
False, "Skipping test with modules", False, flags, strictModes
)
# Be picky about which tests to run unless we are running the CVEs suite
runAll = "CVEs" in suite
if not runAll:
# Skip tests that use 'eval'.
if evalMatcher.search(content):
return TestContentParameters(
False, "Skipping test with eval()", True, flags, strictModes
)
# Skip tests that use indirect 'eval' that look like (1, eval)(...).
if indirectEvalMatcher.search(content):
return TestContentParameters(
False, "Skipping test with indirect eval()", True, flags, strictModes
)
# Skip tests that use indirect 'eval' by assigning a variable to eval.
if assignEvalMatcher.search(content):
return TestContentParameters(
False, "Skipping test with alias to eval()", True, flags, strictModes
)
# Skip tests that use 'with'.
if withMatcher.search(content):
return TestContentParameters(
False, "Skipping test with with()", True, flags, strictModes
)
if constMatcher.search(content):
return TestContentParameters(
False, "Skipping test with 'const'", False, flags, strictModes
)
if suite and "test262" in suite:
# Skip unsupported features.
match = featuresMatcher.search(content)
match2 = featuresMatcher2.search(content)
features = set()
if match:
features.update(feature.strip() for feature in match.group(1).split(","))
if match2:
features.update(
feature.strip(" \t\n\r-") for feature in match2.group(1).split("\n")
)
features.discard("")
for f in features:
if f in UNSUPPORTED_FEATURES + PERMANENT_UNSUPPORTED_FEATURES:
return TestContentParameters(
False,
"Skipping unsupported feature: " + f,
f in PERMANENT_UNSUPPORTED_FEATURES,
flags,
strictModes,
)
return TestContentParameters(True, "", False, flags, strictModes)
ESPRIMA_TEST_STATUS_MAP = {
esprima.TestStatus.TEST_PASSED: TestFlag.TEST_PASSED,
esprima.TestStatus.TEST_FAILED: TestFlag.COMPILE_FAILED,
esprima.TestStatus.TEST_SKIPPED: TestFlag.TEST_SKIPPED,
esprima.TestStatus.TEST_TIMEOUT: TestFlag.COMPILE_TIMEOUT,
}
def runTest(filename, test_blacklist, keep_tmp, binary_path, hvm, esprima_runner):
"""
Runs a single js test pointed by filename
"""
baseFileName = basename(filename)
suite = getSuite(filename)
blacklisted = fileInBlacklist(filename)
skippedType = (
TestFlag.TEST_PERMANENTLY_SKIPPED
if fileInPermanentBlacklist(filename)
else TestFlag.TEST_SKIPPED
)
if blacklisted and not test_blacklist:
printVerbose(
"Skipping test in blacklist{}: {}".format(
" (permanently)"
if skippedType is TestFlag.TEST_PERMANENTLY_SKIPPED
else "",
filename,
)
)
return (skippedType, "", 0)
showStatus(filename)
if "esprima" in suite:
hermes_path = os.path.join(binary_path, "hermes")
test_res = esprima_runner.run_test(filename, hermes_path)
return (
ESPRIMA_TEST_STATUS_MAP[test_res[0]],
"" if test_res[0] == esprima.TestStatus.TEST_PASSED else test_res[1],
0,
)
content = open(filename, "rb").read().decode("utf-8")
shouldRun, skipReason, permanent, flags, strictModes = testShouldRun(
filename, content
)
if not shouldRun:
skippedType = (
TestFlag.TEST_SKIPPED
if not permanent
else TestFlag.TEST_PERMANENTLY_SKIPPED
)
if not test_blacklist:
printVerbose(
skipReason
+ "{}: ".format(" (permanently)" if permanent else "")
+ filename
)
return (skippedType, "", 0)
# Check if the test is expected to fail, and how.
negativePhase = ""
m = negativeMatcher.search(content)
if m:
negativePhase = m.group(1)
else:
m = negativeMatcher2.search(content)
if m:
negativePhase = m.group(2)
# Report the max duration of any successful run for the variants of a test.
# Unsuccessful runs are ignored for simplicity.
max_duration = 0
for strictEnabled in strictModes:
temp = tempfile.NamedTemporaryFile(
prefix=splitext(baseFileName)[0] + "-", suffix=".js", delete=False
)
source, includes = generateSource(content, strictEnabled, suite, flags)
source = source.encode("utf-8")
if "testIntl.js" in includes:
# No support for multiple Intl constructors in that file.
return (TestFlag.TEST_SKIPPED, "", 0)
temp.write(source)
temp.close()
printVerbose("\n==============")
printVerbose("Strict Mode: {}".format(str(strictEnabled)))
printVerbose("Temp js file name: " + temp.name)
errString = ""
binfile = tempfile.NamedTemporaryFile(
prefix=splitext(baseFileName)[0] + "-", suffix=".hbc", delete=False
)
binfile.close()
for optEnabled in (True, False):
printVerbose("\nRunning with Hermes...")
printVerbose("Optimization: {}".format(str(optEnabled)))
run_vm = True
start = time.time()
# Compile to bytecode with Hermes.
try:
printVerbose("Compiling: {} to {}".format(filename, binfile.name))
args = (
[
os.path.join(binary_path, "hermes"),
temp.name,
"-hermes-parser",
"-emit-binary",
"-out",
binfile.name,
]
+ es6_args
+ extra_compile_flags
)
if optEnabled:
args.append("-O")
else:
args.append("-O0")
if strictEnabled:
args.append("-strict")
else:
args.append("-non-strict")
subprocess.check_output(
args, timeout=TIMEOUT_COMPILER, stderr=subprocess.STDOUT
)
if negativePhase == "early" or negativePhase == "parse":
run_vm = False
printVerbose(
"FAIL: Compilation failure expected on {} with Hermes".format(
baseFileName
)
)
# If the test was in the blacklist, it was possible a
# compiler failure was expected. Else, it is unexpected and
# will return a failure.
return (
(skippedType, "", 0)
if blacklisted
else (TestFlag.COMPILE_FAILED, "", 0)
)
except subprocess.CalledProcessError as e:
run_vm = False
if negativePhase != "early" and negativePhase != "parse":
printVerbose(
"FAIL: Compilation failed on {} with Hermes".format(
baseFileName
)
)
errString = e.output.decode("utf-8").strip()
printVerbose(textwrap.indent(errString, "\t"))
return (
(skippedType, "", 0)
if blacklisted
else (TestFlag.COMPILE_FAILED, errString, 0)
)
printVerbose("PASS: Hermes correctly failed to compile")
except subprocess.TimeoutExpired:
printVerbose("FAIL: Compilation timed out on {}".format(baseFileName))
return (
(skippedType, "", 0)
if blacklisted
else (TestFlag.COMPILE_TIMEOUT, "", 0)
)
# If the compilation succeeded, run the bytecode with the specified VM.
if run_vm:
try:
printVerbose("Running with HBC VM: {}".format(filename))
# Run the hermes vm.
args = (
[os.path.join(binary_path, hvm), binfile.name]
+ es6_args
+ extra_run_args
)
env = {"LC_ALL": "en_US.UTF-8"}
if sys.platform == "linux":
env["ICU_DATA"] = binary_path
subprocess.check_output(
args, timeout=TIMEOUT_VM, stderr=subprocess.STDOUT, env=env
)
if negativePhase == "runtime":
printVerbose("FAIL: Expected execution to throw")
return (
(skippedType, "", 0)
if blacklisted
else (TestFlag.EXECUTE_FAILED, "", 0)
)
else:
printVerbose("PASS: Execution completed successfully")
except subprocess.CalledProcessError as e:
if negativePhase != "runtime":
printVerbose(
"FAIL: Execution of {} threw unexpected error".format(
filename
)
)
printVerbose("Return code: {}".format(e.returncode))
if e.output:
printVerbose("Output:")
errString = e.output.decode("utf-8").strip()
printVerbose(textwrap.indent(errString, "\t"))
else:
printVerbose("No output received from process")
return (
(skippedType, "", 0)
if blacklisted
else (TestFlag.EXECUTE_FAILED, errString, 0)
)
else:
printVerbose(
"PASS: Execution of binary threw an error as expected"
)
except subprocess.TimeoutExpired:
printVerbose("FAIL: Execution of binary timed out")
return (
(skippedType, "", 0)
if blacklisted
else (TestFlag.EXECUTE_TIMEOUT, "", 0)
)
max_duration = max(max_duration, time.time() - start)
if not keep_tmp:
os.unlink(temp.name)
os.unlink(binfile.name)
if blacklisted:
# If the test was blacklisted, but it passed successfully, consider that
# an error case.
printVerbose("FAIL: A blacklisted test completed successfully")
return (TestFlag.TEST_UNEXPECTED_PASSED, "", max_duration)
else:
printVerbose("PASS: Test completed successfully")
return (TestFlag.TEST_PASSED, "", max_duration)
def makeCalls(params, onlyfiles, rangeLeft, rangeRight):
global count
# Store all test parameters in calls[].
calls = []
count = -1
for f in onlyfiles:
count += 1
if count < rangeLeft or count > rangeRight:
continue
calls.append((f,) + params)
return calls
def calcParams(params):
return (params[0], runTest(*params))
def testLoop(calls, jobs, fail_fast, num_slowest_tests):
results = []
# Histogram for results from the Hermes compiler.
resultsHist = {
TestFlag.COMPILE_FAILED: 0,
TestFlag.COMPILE_TIMEOUT: 0,
TestFlag.EXECUTE_FAILED: 0,
TestFlag.EXECUTE_TIMEOUT: 0,
TestFlag.TEST_PASSED: 0,
TestFlag.TEST_SKIPPED: 0,
TestFlag.TEST_PERMANENTLY_SKIPPED: 0,
TestFlag.TEST_UNEXPECTED_PASSED: 0,
}
slowest_tests = [("", 0)] * num_slowest_tests
with Pool(processes=jobs) as pool:
for res in pool.imap_unordered(calcParams, calls, 1):
testname = res[0]
results.append(res)
(hermesStatus, errString, duration) = res[1]
resultsHist[hermesStatus] += 1
insert_pos = len(slowest_tests)
for i, (_, other_duration) in reversed(list(enumerate(slowest_tests))):
if duration < other_duration:
break
else:
insert_pos = i
if insert_pos < len(slowest_tests):
# If this was one of the slowest tests, push it into the list
# and drop the bottom of the list.
slowest_tests = (
slowest_tests[:insert_pos]
+ [(testname, duration)]
+ slowest_tests[insert_pos:-1]
)
if (
fail_fast
and hermesStatus != TestFlag.TEST_PASSED
and hermesStatus != TestFlag.TEST_SKIPPED
and hermesStatus != TestFlag.TEST_PERMANENTLY_SKIPPED
):
break
# Filter out missing test names in case there were fewer tests run than the top slowest tests.
slowest_tests = [
(testName, duration) for testName, duration in slowest_tests if testName
]
return results, resultsHist, slowest_tests
def get_arg_parser():
parser = argparse.ArgumentParser(description="Run javascript tests with Hermes.")
parser.add_argument(
"paths",
type=str,
nargs="+",
help="Paths to test suite, can be either dir or file name",
)
parser.add_argument(
"-c",
"--chunk",
dest="chunk",
default=-1,
type=int,
help="Chunk ID (0, 1, 2), to only process 1/3 of all tests",
)
parser.add_argument(
"-f",
"--fast-fail",
dest="fail_fast",
action="store_true",
help="Exit script immediately when a test failed.",
)
parser.add_argument(
"-k",
"--keep-tmp",
dest="keep_tmp",
action="store_true",
help="Keep temporary files of successful tests.",
)
parser.add_argument(
"--test-blacklist",
dest="test_blacklist",
action="store_true",
help="Also test if tests in the blacklist fail",
)
parser.add_argument(
"-a",
"--show-all",
dest="show_all",
action="store_true",
help="show results of successful tests.",
)
parser.add_argument(
"--hvm-filename",
dest="hvm_filename",
default="hvm",
help="Filename for hvm binary (e.g., hvm-lean)",
)
parser.add_argument(
"-j",
"--jobs",
dest="jobs",
default=None,
type=int,
help="Number of jobs to run simultaneously. By default "
+ "equal to the number of CPUs.",
)
parser.add_argument(
"-m",
"--match",
dest="match",
default=None,
type=str,
help="Optional. Substring that the test filename must "
"contain in order to run.",
)
parser.add_argument(
"-s",
"--source",
dest="source",
action="store_true",
help="Instead of running any tests, print the source of "
"the matched test case (use -m/--match) to standard "
"output, including any generated use-strict "
"directives or stubbed pragmas. (You could then "
"pipe this to hermes.)",
)
parser.add_argument(
"--num-slowest-tests",
dest="num_slowest_tests",
type=int,
default=10,
help="Print the top N tests that take the longest time to execute on "
"average, where N is the option value",
)
parser.add_argument(
"-v",
"--verbose",
dest="verbose",
default=False,
action="store_true",
help="Show intermediate output",
)
return parser
def run(
paths,
chunk,
fail_fast,
binary_path,
hvm,
jobs,
is_verbose,
match,
source,
test_blacklist,
num_slowest_tests,
keep_tmp,
show_all,
):
global count
global verbose
verbose = is_verbose
onlyfiles = []
for path in paths:
if isdir(path):
for root, _dirnames, filenames in os.walk(path):
for filename in filenames:
onlyfiles.append(os.path.join(root, filename))
elif isfile(path):
onlyfiles.append(path)
else:
print("Invalid path: " + path)
sys.exit(1)
onlyfiles = [f for f in onlyfiles if f.endswith(".js") if not match or match in f]
# Generates the source for the single provided file,
# without an extra "use strict" directive prepended to the file.
# Handles [noStrict] and [onlyStrict] flags.
if source:
if len(onlyfiles) != 1:
print("Need exactly one file matched by the -m/--match option.")
print("Got these files: " + ", ".join(onlyfiles))
sys.exit(1)
with open(onlyfiles[0], "rb") as f:
content = f.read().decode("utf-8")
match = flagsMatcher.search(content)
flags = set()
if match:
flags = {flag.strip() for flag in match.group(1).split(",")}
strict = False
if "noStrict" in flags or "raw" in flags:
strict = False
if "onlyStrict" in flags:
strict = True
print(generateSource(content, strict, getSuite(onlyfiles[0]), flags)[0])
sys.exit(0)
rangeLeft = 0
rangeRight = len(onlyfiles) - 1
if chunk != -1:
if chunk == 0:
rangeRight = rangeRight // 3 - 1
elif chunk == 1:
rangeLeft = rangeRight // 3
rangeRight = rangeRight - rangeLeft
elif chunk == 2:
rangeLeft = rangeRight - rangeRight // 3 + 1
else:
print("Invalid chunk ID")
sys.exit(1)
if not os.path.isfile(join(binary_path, "hermes")):
print("{} not found.".format(join(binary_path, "hermes")))
sys.exit(1)
if not os.path.isfile(join(binary_path, hvm)):
print("{} not found.".format(join(binary_path, hvm)))
sys.exit(1)
esprima_runner = esprima.EsprimaTestRunner(verbose)
calls = makeCalls(
(test_blacklist, keep_tmp, binary_path, hvm, esprima_runner),
onlyfiles,
rangeLeft,
rangeRight,
)
results, resultsHist, slowest_tests = testLoop(
calls, jobs, fail_fast, num_slowest_tests
)
# Sort the results for easier reading of failed tests.
results.sort(key=lambda f: f[1][0].value)
if results:
print("")
for testName, (hermesStatus, errString, _) in results:
if show_all or (
(hermesStatus != TestFlag.TEST_PASSED)
and (hermesStatus != TestFlag.TEST_SKIPPED)
and (hermesStatus != TestFlag.TEST_PERMANENTLY_SKIPPED)
):
print("{} {}".format(str(hermesStatus), testName))
if errString:
print("{}".format(textwrap.indent(errString, "\t")))
if slowest_tests:
print()
print("Top {:d} slowest tests".format(len(slowest_tests)))
maxNameWidth = 0
maxNumWidth = 0
for testName, duration in slowest_tests:
maxNameWidth = max(maxNameWidth, len(testName))
maxNumWidth = max(maxNumWidth, len("{:.3f}".format(duration)))
for testName, duration in slowest_tests:
print(
"{:<{testNameWidth}} {:>{durationWidth}.3f}".format(
testName,
duration,
# Add 3 just in case it's right at the borderline
testNameWidth=maxNameWidth + 3,
durationWidth=maxNumWidth,
)
)
print()
total = sum(resultsHist.values())
failed = (
resultsHist[TestFlag.COMPILE_FAILED]
+ resultsHist[TestFlag.COMPILE_TIMEOUT]
+ resultsHist[TestFlag.EXECUTE_FAILED]
+ resultsHist[TestFlag.EXECUTE_TIMEOUT]
+ resultsHist[TestFlag.TEST_UNEXPECTED_PASSED]
)
eligible = (
sum(resultsHist.values())
- resultsHist[TestFlag.TEST_SKIPPED]
- resultsHist[TestFlag.TEST_PERMANENTLY_SKIPPED]
)
if eligible > 0:
passRate = "{0:.2%}".format(resultsHist[TestFlag.TEST_PASSED] / eligible)
else:
passRate = "--"
if (eligible - resultsHist[TestFlag.TEST_PASSED]) > 0:
resultStr = "{}FAIL{}".format(Color.RED, Color.RESET)
else:
resultStr = "{}PASS{}".format(Color.GREEN, Color.RESET)
# Turn off formatting so that the table looks nice in source code.
# fmt: off
print("-----------------------------------")
print("| Results | {} |".format(resultStr))
print("|----------------------+----------|")
print("| Total | {:>8} |".format(total))
print("| Pass | {:>8} |".format(resultsHist[TestFlag.TEST_PASSED]))
print("| Fail | {:>8} |".format(failed))
print("| Skipped | {:>8} |".format(resultsHist[TestFlag.TEST_SKIPPED]))
print("| Permanently Skipped | {:>8} |".format(resultsHist[TestFlag.TEST_PERMANENTLY_SKIPPED]))
print("| Pass Rate | {:>8} |".format(passRate))
print("-----------------------------------")
print("| Failures | |")
print("|----------------------+----------|")
print("| Compile fail | {:>8} |".format(resultsHist[TestFlag.COMPILE_FAILED]))
print("| Compile timeout | {:>8} |".format(resultsHist[TestFlag.COMPILE_TIMEOUT]))
print("| Execute fail | {:>8} |".format(resultsHist[TestFlag.EXECUTE_FAILED]))
print("| Execute timeout | {:>8} |".format(resultsHist[TestFlag.EXECUTE_TIMEOUT]))
if test_blacklist:
print("| Blacklisted passes | {:>8} |".format(resultsHist[TestFlag.TEST_UNEXPECTED_PASSED]))
print("-----------------------------------")
# fmt: on
return (eligible - resultsHist[TestFlag.TEST_PASSED]) > 0
| [
"[email protected]"
]
| |
1dfcaba96653566123575405ff2189cdaf3be51b | 9a9fb43d866dc8fd829211d2b47328ef1f5ed428 | /PI_ROS_WORKSPACES/ros_catkin_ws/devel_isolated/cpp_common/_setup_util.py | af03a4b3b774f1e4d408fd6ce5423881f5aa5d34 | []
| no_license | droter/auto_mow | 326df42a54676079cac61fe63c40d5d04beb049b | 3742cb2ef78bc06d2771ac4c679e5110909774f8 | refs/heads/master | 2022-05-19T20:18:33.409777 | 2020-04-29T00:42:24 | 2020-04-29T00:42:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,898 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
'''This file generates shell code for the setup.SHELL scripts to set environment variables'''
from __future__ import print_function
import argparse
import copy
import errno
import os
import platform
import sys
CATKIN_MARKER_FILE = '.catkin'
system = platform.system()
IS_DARWIN = (system == 'Darwin')
IS_WINDOWS = (system == 'Windows')
# subfolder of workspace prepended to CMAKE_PREFIX_PATH
ENV_VAR_SUBFOLDERS = {
'CMAKE_PREFIX_PATH': '',
'LD_LIBRARY_PATH' if not IS_DARWIN else 'DYLD_LIBRARY_PATH': ['lib', os.path.join('lib', 'arm-linux-gnueabihf')],
'PATH': 'bin',
'PKG_CONFIG_PATH': [os.path.join('lib', 'pkgconfig'), os.path.join('lib', 'arm-linux-gnueabihf', 'pkgconfig')],
'PYTHONPATH': 'lib/python2.7/dist-packages',
}
def rollback_env_variables(environ, env_var_subfolders):
'''
Generate shell code to reset environment variables
by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH.
This does not cover modifications performed by environment hooks.
'''
lines = []
unmodified_environ = copy.copy(environ)
for key in sorted(env_var_subfolders.keys()):
subfolders = env_var_subfolders[key]
if not isinstance(subfolders, list):
subfolders = [subfolders]
value = _rollback_env_variable(unmodified_environ, key, subfolders)
if value is not None:
environ[key] = value
lines.append(assignment(key, value))
if lines:
lines.insert(0, comment('reset environment variables by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH'))
return lines
def _rollback_env_variable(environ, name, subfolders):
'''
For each catkin workspace in CMAKE_PREFIX_PATH remove the first entry from env[NAME] matching workspace + subfolder.
:param subfolders: list of str '' or subfoldername that may start with '/'
:returns: the updated value of the environment variable.
'''
value = environ[name] if name in environ else ''
env_paths = [path for path in value.split(os.pathsep) if path]
value_modified = False
for subfolder in subfolders:
if subfolder:
if subfolder.startswith(os.path.sep) or (os.path.altsep and subfolder.startswith(os.path.altsep)):
subfolder = subfolder[1:]
if subfolder.endswith(os.path.sep) or (os.path.altsep and subfolder.endswith(os.path.altsep)):
subfolder = subfolder[:-1]
for ws_path in _get_workspaces(environ, include_fuerte=True, include_non_existing=True):
path_to_find = os.path.join(ws_path, subfolder) if subfolder else ws_path
path_to_remove = None
for env_path in env_paths:
env_path_clean = env_path[:-1] if env_path and env_path[-1] in [os.path.sep, os.path.altsep] else env_path
if env_path_clean == path_to_find:
path_to_remove = env_path
break
if path_to_remove:
env_paths.remove(path_to_remove)
value_modified = True
new_value = os.pathsep.join(env_paths)
return new_value if value_modified else None
def _get_workspaces(environ, include_fuerte=False, include_non_existing=False):
'''
Based on CMAKE_PREFIX_PATH return all catkin workspaces.
:param include_fuerte: The flag if paths starting with '/opt/ros/fuerte' should be considered workspaces, ``bool``
'''
# get all cmake prefix paths
env_name = 'CMAKE_PREFIX_PATH'
value = environ[env_name] if env_name in environ else ''
paths = [path for path in value.split(os.pathsep) if path]
# remove non-workspace paths
workspaces = [path for path in paths if os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE)) or (include_fuerte and path.startswith('/opt/ros/fuerte')) or (include_non_existing and not os.path.exists(path))]
return workspaces
def prepend_env_variables(environ, env_var_subfolders, workspaces):
'''
Generate shell code to prepend environment variables
for the all workspaces.
'''
lines = []
lines.append(comment('prepend folders of workspaces to environment variables'))
paths = [path for path in workspaces.split(os.pathsep) if path]
prefix = _prefix_env_variable(environ, 'CMAKE_PREFIX_PATH', paths, '')
lines.append(prepend(environ, 'CMAKE_PREFIX_PATH', prefix))
for key in sorted([key for key in env_var_subfolders.keys() if key != 'CMAKE_PREFIX_PATH']):
subfolder = env_var_subfolders[key]
prefix = _prefix_env_variable(environ, key, paths, subfolder)
lines.append(prepend(environ, key, prefix))
return lines
def _prefix_env_variable(environ, name, paths, subfolders):
'''
Return the prefix to prepend to the environment variable NAME, adding any path in NEW_PATHS_STR without creating duplicate or empty items.
'''
value = environ[name] if name in environ else ''
environ_paths = [path for path in value.split(os.pathsep) if path]
checked_paths = []
for path in paths:
if not isinstance(subfolders, list):
subfolders = [subfolders]
for subfolder in subfolders:
path_tmp = path
if subfolder:
path_tmp = os.path.join(path_tmp, subfolder)
# skip nonexistent paths
if not os.path.exists(path_tmp):
continue
# exclude any path already in env and any path we already added
if path_tmp not in environ_paths and path_tmp not in checked_paths:
checked_paths.append(path_tmp)
prefix_str = os.pathsep.join(checked_paths)
if prefix_str != '' and environ_paths:
prefix_str += os.pathsep
return prefix_str
def assignment(key, value):
if not IS_WINDOWS:
return 'export %s="%s"' % (key, value)
else:
return 'set %s=%s' % (key, value)
def comment(msg):
if not IS_WINDOWS:
return '# %s' % msg
else:
return 'REM %s' % msg
def prepend(environ, key, prefix):
if key not in environ or not environ[key]:
return assignment(key, prefix)
if not IS_WINDOWS:
return 'export %s="%s$%s"' % (key, prefix, key)
else:
return 'set %s=%s%%%s%%' % (key, prefix, key)
def find_env_hooks(environ, cmake_prefix_path):
'''
Generate shell code with found environment hooks
for the all workspaces.
'''
lines = []
lines.append(comment('found environment hooks in workspaces'))
generic_env_hooks = []
generic_env_hooks_workspace = []
specific_env_hooks = []
specific_env_hooks_workspace = []
generic_env_hooks_by_filename = {}
specific_env_hooks_by_filename = {}
generic_env_hook_ext = 'bat' if IS_WINDOWS else 'sh'
specific_env_hook_ext = environ['CATKIN_SHELL'] if not IS_WINDOWS and 'CATKIN_SHELL' in environ and environ['CATKIN_SHELL'] else None
# remove non-workspace paths
workspaces = [path for path in cmake_prefix_path.split(os.pathsep) if path and os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE))]
for workspace in reversed(workspaces):
env_hook_dir = os.path.join(workspace, 'etc', 'catkin', 'profile.d')
if os.path.isdir(env_hook_dir):
for filename in sorted(os.listdir(env_hook_dir)):
if filename.endswith('.%s' % generic_env_hook_ext):
# remove previous env hook with same name if present
if filename in generic_env_hooks_by_filename:
i = generic_env_hooks.index(generic_env_hooks_by_filename[filename])
generic_env_hooks.pop(i)
generic_env_hooks_workspace.pop(i)
# append env hook
generic_env_hooks.append(os.path.join(env_hook_dir, filename))
generic_env_hooks_workspace.append(workspace)
generic_env_hooks_by_filename[filename] = generic_env_hooks[-1]
elif specific_env_hook_ext is not None and filename.endswith('.%s' % specific_env_hook_ext):
# remove previous env hook with same name if present
if filename in specific_env_hooks_by_filename:
i = specific_env_hooks.index(specific_env_hooks_by_filename[filename])
specific_env_hooks.pop(i)
specific_env_hooks_workspace.pop(i)
# append env hook
specific_env_hooks.append(os.path.join(env_hook_dir, filename))
specific_env_hooks_workspace.append(workspace)
specific_env_hooks_by_filename[filename] = specific_env_hooks[-1]
env_hooks = generic_env_hooks + specific_env_hooks
env_hooks_workspace = generic_env_hooks_workspace + specific_env_hooks_workspace
count = len(env_hooks)
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_COUNT', count))
for i in range(count):
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_%d' % i, env_hooks[i]))
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_%d_WORKSPACE' % i, env_hooks_workspace[i]))
return lines
def _parse_arguments(args=None):
parser = argparse.ArgumentParser(description='Generates code blocks for the setup.SHELL script.')
parser.add_argument('--extend', action='store_true', help='Skip unsetting previous environment variables to extend context')
return parser.parse_known_args(args=args)[0]
if __name__ == '__main__':
try:
try:
args = _parse_arguments()
except Exception as e:
print(e, file=sys.stderr)
sys.exit(1)
# environment at generation time
CMAKE_PREFIX_PATH = '/home/pi/ros_catkin_ws/devel_isolated/class_loader;/home/pi/ros_catkin_ws/devel_isolated/cmake_modules;/home/pi/ros_catkin_ws/devel_isolated/genpy;/home/pi/ros_catkin_ws/devel_isolated/gennodejs;/home/pi/ros_catkin_ws/devel_isolated/genlisp;/home/pi/ros_catkin_ws/devel_isolated/geneus;/home/pi/ros_catkin_ws/devel_isolated/gencpp;/home/pi/ros_catkin_ws/devel_isolated/genmsg;/home/pi/ros_catkin_ws/devel_isolated/catkin;/home/pi/rosbots_catkin_ws/devel;/home/pi/ros_catkin_ws/build/opt/ros/kinetic'.split(';')
# prepend current workspace if not already part of CPP
base_path = os.path.dirname(__file__)
if base_path not in CMAKE_PREFIX_PATH:
CMAKE_PREFIX_PATH.insert(0, base_path)
CMAKE_PREFIX_PATH = os.pathsep.join(CMAKE_PREFIX_PATH)
environ = dict(os.environ)
lines = []
if not args.extend:
lines += rollback_env_variables(environ, ENV_VAR_SUBFOLDERS)
lines += prepend_env_variables(environ, ENV_VAR_SUBFOLDERS, CMAKE_PREFIX_PATH)
lines += find_env_hooks(environ, CMAKE_PREFIX_PATH)
print('\n'.join(lines))
# need to explicitly flush the output
sys.stdout.flush()
except IOError as e:
# and catch potential "broken pipe" if stdout is not writable
# which can happen when piping the output to a file but the disk is full
if e.errno == errno.EPIPE:
print(e, file=sys.stderr)
sys.exit(2)
raise
sys.exit(0)
| [
"[email protected]"
]
| |
ed33ebf9caedf48ca19abe67d46daec2605f4000 | 3d57578801ffdcfeb09c6b3551a1611b9b28b55d | /cosmeticsyou/accounts/migrations/0003_auto_20180316_2301.py | f8b52532db847607a49cc367e6aea0ba8c4f4d66 | []
| no_license | Wishez/cosmeticsyou | a97f01054c40a9305f7b0274f59812278d3ac593 | c44c177fd3ea52a25003916be9eb49cbeabcbdea | refs/heads/master | 2021-01-18T03:45:11.902558 | 2018-03-16T21:23:24 | 2018-03-16T21:23:24 | 85,789,284 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,791 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2018-03-16 20:01
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0002_auto_20180316_2201'),
]
operations = [
migrations.AlterField(
model_name='refferalconsultant',
name='num_apartment',
field=models.DecimalField(blank=True, decimal_places=1, max_digits=999, null=True, verbose_name='Квартира'),
),
migrations.AlterField(
model_name='refferalconsultant',
name='num_home',
field=models.CharField(blank=True, max_length=5, null=True, verbose_name='Дом'),
),
migrations.AlterField(
model_name='refferalconsultant',
name='passport_data',
field=models.CharField(blank=True, max_length=26, null=True, verbose_name='Серия и номер паспорта'),
),
migrations.AlterField(
model_name='refferalconsultant',
name='region',
field=models.CharField(blank=True, max_length=50, null=True, verbose_name='Почтовый Индекс'),
),
migrations.AlterField(
model_name='refferalconsultant',
name='street',
field=models.CharField(blank=True, max_length=50, null=True, verbose_name='Улица'),
),
migrations.AlterField(
model_name='refferalconsultanttablerelations',
name='num_apartment',
field=models.DecimalField(blank=True, decimal_places=1, max_digits=999, null=True, verbose_name='Квартира'),
),
migrations.AlterField(
model_name='refferalconsultanttablerelations',
name='num_home',
field=models.CharField(blank=True, max_length=5, null=True, verbose_name='Дом'),
),
migrations.AlterField(
model_name='refferalconsultanttablerelations',
name='passport_data',
field=models.CharField(blank=True, max_length=26, null=True, verbose_name='Серия и номер паспорта'),
),
migrations.AlterField(
model_name='refferalconsultanttablerelations',
name='region',
field=models.CharField(blank=True, max_length=50, null=True, verbose_name='Почтовый Индекс'),
),
migrations.AlterField(
model_name='refferalconsultanttablerelations',
name='street',
field=models.CharField(blank=True, max_length=50, null=True, verbose_name='Улица'),
),
migrations.AlterField(
model_name='user',
name='num_apartment',
field=models.DecimalField(blank=True, decimal_places=1, max_digits=999, null=True, verbose_name='Квартира'),
),
migrations.AlterField(
model_name='user',
name='num_home',
field=models.CharField(blank=True, max_length=5, null=True, verbose_name='Дом'),
),
migrations.AlterField(
model_name='user',
name='passport_data',
field=models.CharField(blank=True, max_length=26, null=True, verbose_name='Серия и номер паспорта'),
),
migrations.AlterField(
model_name='user',
name='region',
field=models.CharField(blank=True, max_length=50, null=True, verbose_name='Почтовый Индекс'),
),
migrations.AlterField(
model_name='user',
name='street',
field=models.CharField(blank=True, max_length=50, null=True, verbose_name='Улица'),
),
]
| [
"[email protected]"
]
| |
59553b07726a1959d3ea724fc29aed34185217ec | 439c87c48c6c2c812d1faca73cbf1b632e9403dc | /DAYS/Day4/Remove_Empty_List.py | 17625cb9e9a3ed34eaa82a6e3d2c8f455ce5c064 | [
"MIT"
]
| permissive | saubhagyav/100_Days_Code_Challenge | 14ca8db68e09c7ac7741f164fea8b62cb36bf2c0 | bde41126b9342eacc488c79d01dc4b76a3651c93 | refs/heads/main | 2023-08-05T03:12:18.918079 | 2021-09-12T12:20:41 | 2021-09-12T12:20:41 | 389,375,066 | 2 | 2 | null | 2021-07-25T15:06:18 | 2021-07-25T15:06:17 | null | UTF-8 | Python | false | false | 351 | py | # Approach 1:
def Remove_Empty_List(Given_List):
Result = [ele for ele in Given_List if ele != []]
return Result
Given_List = [5, 6, [], 7, 8, 9, [], 12, [], 4,[]]
print(Remove_Empty_List(Given_List))
# Approach 2:
Given_List = [5, 6, [], 7, 8, 9, [], 12, [], 4, []]
result = list(filter(None, Given_List))
print(result)
| [
"[email protected]"
]
| |
3971a5173cfd3587c142695cad852f22cb9cf9bd | 060660439d4a54dfa74368c03968bee684d74930 | /planscore/website/__init__.py | 886e658812f37ac2edad725ac38e5a73f0f32e4d | []
| no_license | dheerajchand/PlanScore | d0829e22dd1bfd20bbec58d900c4fdfaed8a0ebc | 39b8a173f3a7f9b97db8d138e9e757bb23a0b204 | refs/heads/master | 2020-12-02T18:17:23.747720 | 2017-06-11T04:20:54 | 2017-06-11T04:20:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,393 | py | import flask, os, urllib.parse, markdown
from .. import data, score
MODELS_BASEDIR = os.path.join(os.path.dirname(__file__), 'models')
app = flask.Flask(__name__)
app.config['PLANSCORE_S3_BUCKET'] = os.environ.get('S3_BUCKET', 'planscore')
app.config['PLANSCORE_API_BASE'] = os.environ.get('API_BASE', 'https://api.planscore.org/')
def get_data_url_pattern(bucket):
return 'https://{}.s3.amazonaws.com/{}'.format(bucket, data.UPLOAD_INDEX_KEY)
def get_geom_url_pattern(bucket):
return 'https://{}.s3.amazonaws.com/{}'.format(bucket, data.UPLOAD_GEOMETRY_KEY)
@app.route('/')
def get_index():
return flask.render_template('index.html')
@app.route('/upload.html')
def get_upload():
planscore_api_base = flask.current_app.config['PLANSCORE_API_BASE']
upload_fields_url = urllib.parse.urljoin(planscore_api_base, 'upload')
return flask.render_template('upload.html', upload_fields_url=upload_fields_url)
@app.route('/plan.html')
def get_plan():
data_url_pattern = get_data_url_pattern(flask.current_app.config['PLANSCORE_S3_BUCKET'])
geom_url_pattern = get_geom_url_pattern(flask.current_app.config['PLANSCORE_S3_BUCKET'])
return flask.render_template('plan.html', fields=score.FIELD_NAMES,
data_url_pattern=data_url_pattern, geom_url_pattern=geom_url_pattern)
@app.route('/models/')
def get_models():
model_names = list()
for (base, _, files) in os.walk(MODELS_BASEDIR):
if 'README.md' in files:
model_names.append(os.path.relpath(base, MODELS_BASEDIR))
return flask.render_template('models.html', models=model_names)
@app.route('/models/<name>/')
def get_model(name):
model_basedir = os.path.join(MODELS_BASEDIR, name)
with open(os.path.join(model_basedir, 'README.md')) as file:
model_readme = markdown.markdown(file.read())
model_files = list()
for (base, _, files) in os.walk(model_basedir):
model_files.extend([
os.path.relpath(os.path.join(base, file), model_basedir)
for file in files if file != 'README.md'])
return flask.render_template('model.html', name=name,
readme=model_readme, files=model_files)
@app.route('/models/<name>/<path:path>')
def get_model_file(name, path):
dirname, filename = os.path.split(os.path.join(MODELS_BASEDIR, name, path))
return flask.send_from_directory(dirname, filename)
| [
"[email protected]"
]
| |
6cc768c2c0b18a6842f42aa80378fb57bbb8607e | b7a8d04f9fd88d66ef6d8b83a449105ae31698a4 | /setup.py | 090be41ad536ceb7af61d21bd576ec733d48b86c | []
| no_license | jjkas/eels-analysis-old | d5ce3dbb55ed84921abfcb2476243b6783ab5d52 | 98a2fc8e394060d53f982427dd953b31d56a90fa | refs/heads/master | 2020-09-13T12:39:58.384098 | 2020-01-08T00:13:44 | 2020-01-08T00:13:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 963 | py | # -*- coding: utf-8 -*-
"""
To upload to PyPI, PyPI test, or a local server:
python setup.py bdist_wheel upload -r <server_identifier>
"""
import setuptools
import os
setuptools.setup(
name="nionswift-eels-analysis",
version="0.4.4",
author="Nion Software",
author_email="[email protected]",
description="Library and UI for doing EELS analysis with Nion Swift.",
long_description=open("README.rst").read(),
url="https://github.com/nion-software/eels-analysis",
packages=["nion.eels_analysis", "nion.eels_analysis.test", "nionswift_plugin.nion_eels_analysis", "nionswift_plugin.nion_eels_analysis.test"],
package_data={"nion.eels_analysis": ["resources/*"]},
install_requires=["nionswift>=0.14.0"],
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Programming Language :: Python :: 3.6",
],
include_package_data=True,
test_suite="nion.eels_analysis.test",
python_requires='~=3.6',
)
| [
"[email protected]"
]
| |
b5bcc647a60de463a5d2a205fe7a95114861f91d | 7bd3c35070d40724ab21e83b4d3f5ba39e455818 | /signup/sign_up/views.py | f540eb11011cd77dde5d49812f013f0b1351bbb0 | []
| no_license | shatishdesai202/Django-Project-Practice | 9433004de6fd72dd0cd56cb4ff7770ecded6a054 | f66ee507fcf959d148627c1c2f5d587b10adc996 | refs/heads/main | 2023-03-12T17:14:15.823285 | 2021-03-07T05:32:07 | 2021-03-07T05:32:07 | 345,263,312 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 444 | py | from django.shortcuts import render
from django.contrib.auth.forms import UserCreationForm
from .forms import SignupForm
# Create your views here.
def sign(request):
if request.method == "POST":
form = SignupForm(request.POST)
if form.is_valid():
form.save()
form = SignupForm()
else:
form = SignupForm()
context = {'form':form}
return render(request, 'base.html', context)
| [
"[email protected]"
]
| |
7d7d6b9f2cd5e12cf77ce8b751aa6f9de55971ba | 508321d683975b2339e5292202f3b7a51bfbe22d | /Userset.vim/ftplugin/python/CompletePack/PySide2/QtWidgets/QListWidgetItem.py | c80e9c4f15b5eb988fd6863dcbe0a2da4b62da51 | []
| no_license | cundesi/vimSetSa | 4947d97bcfe89e27fd2727423112bb37aac402e2 | 0d3f9e5724b471ab21aa1199cc3b4676e30f8aab | refs/heads/master | 2020-03-28T05:54:44.721896 | 2018-08-31T07:23:41 | 2018-08-31T07:23:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,389 | py | # encoding: utf-8
# module PySide2.QtWidgets
# from C:\Program Files\Autodesk\Maya2017\Python\lib\site-packages\PySide2\QtWidgets.pyd
# by generator 1.145
# no doc
# imports
import PySide2.QtCore as __PySide2_QtCore
import PySide2.QtGui as __PySide2_QtGui
import Shiboken as __Shiboken
class QListWidgetItem(__Shiboken.Object):
# no doc
def background(self, *args, **kwargs): # real signature unknown
pass
def backgroundColor(self, *args, **kwargs): # real signature unknown
pass
def checkState(self, *args, **kwargs): # real signature unknown
pass
def clone(self, *args, **kwargs): # real signature unknown
pass
def data(self, *args, **kwargs): # real signature unknown
pass
def flags(self, *args, **kwargs): # real signature unknown
pass
def font(self, *args, **kwargs): # real signature unknown
pass
def foreground(self, *args, **kwargs): # real signature unknown
pass
def icon(self, *args, **kwargs): # real signature unknown
pass
def isHidden(self, *args, **kwargs): # real signature unknown
pass
def isSelected(self, *args, **kwargs): # real signature unknown
pass
def listWidget(self, *args, **kwargs): # real signature unknown
pass
def read(self, *args, **kwargs): # real signature unknown
pass
def setBackground(self, *args, **kwargs): # real signature unknown
pass
def setBackgroundColor(self, *args, **kwargs): # real signature unknown
pass
def setCheckState(self, *args, **kwargs): # real signature unknown
pass
def setData(self, *args, **kwargs): # real signature unknown
pass
def setFlags(self, *args, **kwargs): # real signature unknown
pass
def setFont(self, *args, **kwargs): # real signature unknown
pass
def setForeground(self, *args, **kwargs): # real signature unknown
pass
def setHidden(self, *args, **kwargs): # real signature unknown
pass
def setIcon(self, *args, **kwargs): # real signature unknown
pass
def setSelected(self, *args, **kwargs): # real signature unknown
pass
def setSizeHint(self, *args, **kwargs): # real signature unknown
pass
def setStatusTip(self, *args, **kwargs): # real signature unknown
pass
def setText(self, *args, **kwargs): # real signature unknown
pass
def setTextAlignment(self, *args, **kwargs): # real signature unknown
pass
def setTextColor(self, *args, **kwargs): # real signature unknown
pass
def setToolTip(self, *args, **kwargs): # real signature unknown
pass
def setWhatsThis(self, *args, **kwargs): # real signature unknown
pass
def sizeHint(self, *args, **kwargs): # real signature unknown
pass
def statusTip(self, *args, **kwargs): # real signature unknown
pass
def text(self, *args, **kwargs): # real signature unknown
pass
def textAlignment(self, *args, **kwargs): # real signature unknown
pass
def textColor(self, *args, **kwargs): # real signature unknown
pass
def toolTip(self, *args, **kwargs): # real signature unknown
pass
def type(self, *args, **kwargs): # real signature unknown
pass
def whatsThis(self, *args, **kwargs): # real signature unknown
pass
def write(self, *args, **kwargs): # real signature unknown
pass
def __eq__(self, y): # real signature unknown; restored from __doc__
""" x.__eq__(y) <==> x==y """
pass
def __ge__(self, y): # real signature unknown; restored from __doc__
""" x.__ge__(y) <==> x>=y """
pass
def __gt__(self, y): # real signature unknown; restored from __doc__
""" x.__gt__(y) <==> x>y """
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
def __le__(self, y): # real signature unknown; restored from __doc__
""" x.__le__(y) <==> x<=y """
pass
def __lshift__(self, y): # real signature unknown; restored from __doc__
""" x.__lshift__(y) <==> x<<y """
pass
def __lt__(self, y): # real signature unknown; restored from __doc__
""" x.__lt__(y) <==> x<y """
pass
@staticmethod # known case of __new__
def __new__(S, *more): # real signature unknown; restored from __doc__
""" T.__new__(S, ...) -> a new object with type S, a subtype of T """
pass
def __ne__(self, y): # real signature unknown; restored from __doc__
""" x.__ne__(y) <==> x!=y """
pass
def __rlshift__(self, y): # real signature unknown; restored from __doc__
""" x.__rlshift__(y) <==> y<<x """
pass
def __rrshift__(self, y): # real signature unknown; restored from __doc__
""" x.__rrshift__(y) <==> y>>x """
pass
def __rshift__(self, y): # real signature unknown; restored from __doc__
""" x.__rshift__(y) <==> x>>y """
pass
ItemType = None # (!) real value is ''
Type = None # (!) real value is ''
UserType = None # (!) real value is ''
| [
"[email protected]"
]
| |
0c22e89f87f706b7c281d9efdf6c8fb932fb7278 | 7259dbcc9e32502945d362caa43d4ad380cd04ea | /Login_Pingtai_Code/Login_Pingtai_Code/spiders/zujuan.py | 5df1c61f584726695543507746195569516801d9 | [
"MIT"
]
| permissive | Doraying1230/Python-Study | daa143c133262f4305624d180b38205afe241163 | 8dccfa2108002d18251053147ccf36551d90c22b | refs/heads/master | 2020-03-29T13:46:13.061373 | 2018-07-26T15:19:32 | 2018-07-26T15:19:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,032 | py | '''
Created on 2017年10月27日
@author: deppon
'''
from scrapy.spiders import CrawlSpider
import scrapy
from scrapy.http import Request,FormRequest
class ZuJuanSpider(CrawlSpider):
name = "ZuJuanSpider"
account = '13653978879'
pwd = '123456'
headers = {
'User-Agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36',
'Host':'passport.zujuan.com',
'Origin':'http://passport.zujuan.com',
'X - Requested - With': 'XMLHttpRequest',
'Connection': 'keep - alive'
}
def __init__(self, *a, **kw):
super(ZuJuanSpider, self).__init__(*a, **kw)
self.meta = eval(kw['meta'])
def start_requests(self):
"""第一次请求一下登录页面,设置开启cookie使其得到cookie,设置回调函数"""
return [Request(url=self.meta['login_url'],meta={'cookiejar':1},callback=self.parse,headers=self.headers)]
def parse(self, response):
_csrf = self.extract_from_xpath(response, "//input[@name='_csrf']/@value")
print('_csrf ===',_csrf)
formdata={'_csrf': _csrf,
'LoginForm[username]': self.account,
'LoginForm[password]': self.pwd,
'LoginForm[rememberMe]':'0'}
# 响应Cookie
Cookie1 = response.headers.getlist('Set-Cookie') #查看一下响应Cookie,也就是第一次访问注册页面时后台写入浏览器的Cookie
print("响应Cookie ====",Cookie1)
print('登录中')
"""第二次用表单post请求,携带Cookie、浏览器代理、用户登录信息,进行登录给Cookie授权"""
return [FormRequest.from_response(response,
url= self.meta['login_post_url'], #真实post地址
meta={'cookiejar':response.meta['cookiejar']},
formdata=formdata,
callback=self.after_login
)]
def after_login(self,response):
yield scrapy.Request(url=self.meta['login_sucess_url'],callback=self.get_json_data,meta=response.meta)
def get_json_data(self,response):
# 请求Cookie
Cookie2 = response.request.headers.getlist('Cookie')
print("登陸成功後 cookie =====",Cookie2)
a = response.body.decode("utf-8")
print("登录后响应信息 ====",a)
def extract_from_xpath(self, response, xpath, return_first=True, return_selector=False, embedded_content=False):
if return_selector:
return response.xpath(xpath)
else:
if return_first:
if embedded_content:
return response.xpath(xpath).xpath('string(.)').extract()
return response.xpath(xpath).extract_first()
return response.xpath(xpath).extract()
| [
"[email protected]"
]
| |
112a8a4a05140b04fe14ae7faff078f55f0b9100 | 3eda7828febd06dc5173db03a5c9191a60f44c65 | /boyue_index.py | cbe30449aa4b9c9a7da9dec85ede29e53906042d | []
| no_license | xzk-seu/Auto_home_spider | 2dd95fdc35177b1ab5050d8efbd811a51328a570 | d2016cc2de6d214097210e50755819ee5e4ea342 | refs/heads/master | 2020-04-11T06:40:13.405381 | 2019-10-10T08:53:04 | 2019-10-10T08:53:04 | 161,587,505 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 971 | py | import json
import os
from multiprocessing import Pool
from get_index import get_index
def write_result(page):
path = os.path.join(os.getcwd(), 'index', 'boyue')
if not os.path.exists(path):
os.makedirs(path)
file_path = os.path.join(path, str(page)+'.json')
if os.path.exists(file_path) and os.path.getsize(file_path) != 0:
print('file: %d is existing!' % page)
return
url = 'https://club.autohome.com.cn/bbs/forum-c-3788-%d.html'
temp = get_index(url % page)
with open(file_path, 'w') as fw:
json.dump(temp, fw)
print('PAGE: %d done!' % page)
def safe_write(page):
try:
write_result(page)
except Exception as e:
print('in safe_wire: %s' % e)
def get_boyue_index(page_limit):
pool = Pool(8)
for i in range(1, page_limit + 1):
pool.apply_async(safe_write, args=(i,))
pool.close()
pool.join()
if __name__ == '__main__':
get_boyue_index(1000)
| [
"[email protected]"
]
| |
0988de8091599db64228d0877e95f48dc311de48 | 45dd427ec7450d2fac6fe2454f54a130b509b634 | /lecture_10/direct_needle_patch.py | 851a9e8f3135d0940606f49e288d151b46cfc7cf | []
| no_license | weka511/smac | 702fe183e3e73889ec663bc1d75bcac07ebb94b5 | 0b257092ff68058fda1d152d5ea8050feeab6fe2 | refs/heads/master | 2022-07-02T14:24:26.370766 | 2022-06-13T00:07:36 | 2022-06-13T00:07:36 | 33,011,960 | 22 | 8 | null | null | null | null | UTF-8 | Python | false | false | 460 | py | from random import uniform
from math import sqrt
a = 0.6
b = 1.0
n_hits = 0
n_steps = 1000000
for n in range(n_steps):
x_center = uniform(0.0, b * 0.5)
while True:
dx = uniform(0.0, 1.0)
dy = uniform(0.0, 1.0)
rad = sqrt(dx ** 2 + dy ** 2)
if rad <= 1.0: break
x_tip = x_center - a * 0.5 * dx / rad
if x_tip < 0.0: n_hits += 1
print (a * 2.0 * n_steps / float(n_hits) / b)
| [
"[email protected]"
]
| |
f8f9fb74ddf71f3055a2aa88c2b4744aad2d2cfa | 8cc30a27835e205a3476783106ca1605a6a85c48 | /amy/autoemails/tests/test_admin_cancel.py | b31f65598da6d8bf05f56b27fdfde600d4560103 | [
"MIT"
]
| permissive | gaybro8777/amy | d968edc78bbd3f63f3353450334721628dbbc0f4 | 3cf99aed58a0f0acf83d2645a30d8408208ccea9 | refs/heads/develop | 2023-03-07T22:08:28.692700 | 2021-02-23T18:06:06 | 2021-02-23T18:06:06 | 341,930,505 | 0 | 0 | MIT | 2021-02-24T17:22:08 | 2021-02-24T14:40:43 | null | UTF-8 | Python | false | false | 7,731 | py | from datetime import timedelta
from django.test import TestCase
from django.urls import reverse
from rq import Queue
from rq.exceptions import NoSuchJobError
from autoemails import admin
from autoemails.models import EmailTemplate, Trigger, RQJob
from autoemails.job import Job
from autoemails.tests.base import FakeRedisTestCaseMixin, dummy_job
from workshops.tests.base import SuperuserMixin
class TestAdminJobCancel(SuperuserMixin, FakeRedisTestCaseMixin, TestCase):
def setUp(self):
super().setUp()
self._setUpSuperuser() # creates self.admin
# save scheduler and connection data
self._saved_scheduler = admin.scheduler
# overwrite
admin.scheduler = self.scheduler
# fake RQJob
self.email = EmailTemplate.objects.create(slug="test-1")
self.trigger = Trigger.objects.create(action="new-instructor",
template=self.email)
self.rqjob = RQJob.objects.create(job_id="fake-id",
trigger=self.trigger)
def tearDown(self):
super().tearDown()
# bring back saved scheduler
admin.scheduler = self._saved_scheduler
def test_view_doesnt_allow_GET(self):
# log admin user
self._logSuperuserIn()
url = reverse('admin:autoemails_rqjob_cancel', args=[self.rqjob.pk])
rv = self.client.get(url)
self.assertEqual(rv.status_code, 405) # Method not allowed
def test_view_access_by_anonymous(self):
url = reverse('admin:autoemails_rqjob_cancel', args=[self.rqjob.pk])
rv = self.client.post(url)
self.assertEqual(rv.status_code, 302)
# cannot check by assertRedirect because there's additional `?next`
# parameter
self.assertTrue(rv.url.startswith(reverse('login')))
def test_view_access_by_admin(self):
# log admin user
self._logSuperuserIn()
# try accessing the view again
url = reverse('admin:autoemails_rqjob_cancel', args=[self.rqjob.pk])
rv = self.client.post(url)
self.assertEqual(rv.status_code, 302)
self.assertRedirects(rv, reverse('admin:autoemails_rqjob_preview',
args=[self.rqjob.pk]))
def test_no_such_job(self):
# log admin user
self._logSuperuserIn()
with self.assertRaises(NoSuchJobError):
Job.fetch(self.rqjob.job_id, connection=self.scheduler.connection)
url = reverse('admin:autoemails_rqjob_cancel', args=[self.rqjob.pk])
rv = self.client.post(url, follow=True)
self.assertIn(
'The corresponding job in Redis was probably already executed',
rv.content.decode('utf-8'),
)
def test_job_executed(self):
"""Ensure executed job is discovered."""
# log admin user
self._logSuperuserIn()
# enqueue and then create an RQJob
job = self.queue.enqueue(dummy_job)
rqjob = RQJob.objects.create(job_id=job.id, trigger=self.trigger)
Job.fetch(job.id, connection=self.scheduler.connection) # no error
with self.connection.pipeline() as pipe:
pipe.watch(self.scheduler.scheduled_jobs_key)
# no jobs in scheduler
self.assertIsNone(
pipe.zscore(
self.scheduler.scheduled_jobs_key, job.id
)
)
url = reverse('admin:autoemails_rqjob_cancel', args=[rqjob.pk])
rv = self.client.post(url, follow=True)
self.assertIn(
'Job has unknown status or was already executed.',
rv.content.decode('utf-8'),
)
def test_enqueued_job_cancelled(self):
"""Ensure enqueued job is successfully cancelled."""
# log admin user
self._logSuperuserIn()
# enqueue a job to run in future
job = self.scheduler.enqueue_in(
timedelta(minutes=5),
dummy_job,
)
rqjob = RQJob.objects.create(job_id=job.id, trigger=self.trigger)
# fetch job data
job = Job.fetch(rqjob.job_id, connection=self.scheduler.connection)
# `None` status is characteristic to scheduler-queued jobs.
# Jobs added to the queue without scheduler will have different
# status.
self.assertEqual(job.get_status(), None)
# the job is in scheduler's queue
with self.connection.pipeline() as pipe:
pipe.watch(self.scheduler.scheduled_jobs_key)
# job in scheduler
self.assertIsNotNone(
pipe.zscore(
self.scheduler.scheduled_jobs_key, job.id
)
)
# cancel the job
url = reverse('admin:autoemails_rqjob_cancel', args=[rqjob.pk])
rv = self.client.post(url, follow=True)
self.assertIn(
f'The job {rqjob.job_id} was cancelled.',
rv.content.decode('utf-8'),
)
# the job is no longer in scheduler's queue
with self.connection.pipeline() as pipe:
pipe.watch(self.scheduler.scheduled_jobs_key)
# job in scheduler
self.assertIsNone(
pipe.zscore(
self.scheduler.scheduled_jobs_key, job.id
)
)
# job status updated
rqjob.refresh_from_db()
self.assertEqual(rqjob.status, "cancelled")
# job data still available
Job.fetch(rqjob.job_id, connection=self.scheduler.connection)
# ...but nothing is scheduled
self.assertEqual(self.scheduler.count(), 0)
def test_running_job_cancelled(self):
"""Ensure running job is not cancelled."""
# Create an asynchronous queue.
# The name `separate_queue` used here is to ensure the queue isn't
# used anywhere else.
queue = Queue('separate_queue', connection=self.connection)
# log admin user
self._logSuperuserIn()
# add job to the queue
job = queue.enqueue(dummy_job)
self.assertEqual(job.get_status(), 'queued')
# log the job in our system as RQJob
rqjob = RQJob.objects.create(job_id=job.id, trigger=self.trigger)
# force the job status to be "started"
job.set_status('started')
self.assertTrue(job.is_started)
url = reverse('admin:autoemails_rqjob_cancel', args=[rqjob.pk])
rv = self.client.post(url, follow=True)
self.assertIn(
f'Job {rqjob.job_id} has started and cannot be cancelled.',
rv.content.decode('utf-8'),
)
def test_other_status_job(self):
"""Ensure jobs with other statuses are handled."""
# Create an asynchronous queue.
# The name `separate_queue` used here is to ensure the queue isn't
# used anywhere else.
queue = Queue('separate_queue', connection=self.connection)
# log admin user
self._logSuperuserIn()
# add job to the queue
job = queue.enqueue(dummy_job)
self.assertEqual(job.get_status(), 'queued')
# log the job in our system as RQJob
rqjob = RQJob.objects.create(job_id=job.id, trigger=self.trigger)
# force the job status to be "deferred" (could be something else,
# except for "started" and "queued")
job.set_status('deferred')
self.assertTrue(job.is_deferred)
url = reverse('admin:autoemails_rqjob_cancel', args=[rqjob.pk])
rv = self.client.post(url, follow=True)
self.assertIn(
'Job has unknown status or was already executed.',
rv.content.decode('utf-8'),
)
| [
"[email protected]"
]
| |
f4d666432c4c4b022a452ca50ccd90fffad423ab | df3b60c38d22497f3169375491a278255209615b | /mqtt/cloud/testmqtt.py | facb8807f7c49de5770a9876ed3a8536ccfccb9b | []
| no_license | juanengml/Sistema_de_Monitoramento_de_Salas_GPIOT-UTFPR-TD | 2c3c8d67ce8aa555eb07233ba52411cd1314c488 | 23f20e865910f48b0074a35f95ebfae5e6cbbd92 | refs/heads/master | 2022-12-24T01:21:58.551642 | 2020-01-09T21:30:19 | 2020-01-09T21:30:19 | 149,667,511 | 0 | 2 | null | 2022-12-18T14:40:02 | 2018-09-20T20:35:32 | HTML | UTF-8 | Python | false | false | 1,578 | py | import paho.mqtt.client as mqtt
#import RPi.GPIO as GPIO
import json
THINGSBOARD_HOST = 'YOUR_THINGSBOARD_IP_OR_HOSTNAME'
ACCESS_TOKEN = 'RASPBERRY_PI_DEMO_TOKEN'
# The callback for when the client receives a CONNACK response from the server.
def on_connect(client, userdata, rc, *extra_params):
print('Connected with result code ' + str(rc))
# Subscribing to receive RPC requests
client.subscribe('v1/devices/me/rpc/request/+')
# Sending current GPIO status
client.publish('v1/devices/me/attributes', JSON, 1)
# The callback for when a PUBLISH message is received from the server.
def on_message(client, userdata, msg):
print 'Topic: ' + msg.topic + '\nMessage: ' + str(msg.payload)
# Decode JSON request
data = json.loads(msg.payload)
# Check request method
if data['method'] == 'getGpioStatus':
# Reply with GPIO status
client.publish(msg.topic.replace('request', 'response'), JSON, 1)
elif data['method'] == 'setGpioStatus':
# Update GPIO status and reply
client.publish(msg.topic.replace('request', 'response'), JSON, 1)
client.publish('v1/devices/me/attributes', JSON, 1)
client = mqtt.Client()
# Register connect callback
client.on_connect = on_connect
# Registed publish message callback
client.on_message = on_message
# Set access token
client.username_pw_set(ACCESS_TOKEN)
# Connect to ThingsBoard using default MQTT port and 60 seconds keepalive interval
client.connect(THINGSBOARD_HOST, 1883, 60)
try:
client.loop_forever()
except KeyboardInterrupt:
GPIO.cleanup()
| [
"[email protected]"
]
| |
60dc64a27d8279c8669ee5555da915651affded0 | e9dd4ab2ffd84fa6e5c3c5b097aa2b088860e1ec | /btre/urls.py | a566737c4b59c0c84b2813dbcd0ef15ad330cf38 | []
| no_license | AkshayVKumar/btre | f8434195080a597f6d3346c3103574f0d8b26de8 | 4276e710d850ae0f552cf2b1312015a196e3b8f4 | refs/heads/main | 2023-03-29T04:22:49.478403 | 2021-04-01T18:51:16 | 2021-04-01T18:51:16 | 353,791,802 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,178 | py | """btre URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
from django.conf import settings
from django.conf.urls.static import static
import pages
import listings
import realtors
import accounts
urlpatterns = [
path('admin/', admin.site.urls),
path('',include('pages.urls')),
path('listings/',include("listings.urls")),
path('realtors/',include("realtors.urls")),
path('accounts/',include("accounts.urls")),
path('contacts/',include("contacts.urls"))
]+static(settings.MEDIA_URL,document_root=settings.MEDIA_ROOT)
| [
"[email protected]"
]
| |
2579021a8df011296303f7a5a2c966d2a68c05af | a7efc71f80fcf7085d5357c6e41e37a1518413eb | /src/sentimental_analysis/preprocess.py | 2bdfa6f4463419aa94d74d35abf4b3e0fb436ca1 | [
"MIT"
]
| permissive | stormsinbrewing/Real_Time_Social_Media_Mining | 398ad382567d0d1b39bf2a479cf52933f36009b0 | 86b16f763d1f57c1a1f1a26808d3b36bfa364358 | refs/heads/master | 2023-04-23T07:46:01.690005 | 2021-05-03T03:44:18 | 2021-05-03T03:44:18 | 234,296,979 | 25 | 11 | MIT | 2023-03-24T23:56:55 | 2020-01-16T10:39:34 | HTML | UTF-8 | Python | false | false | 3,708 | py | import re
import sys
from utils import write_status
from nltk.stem.porter import PorterStemmer
def preprocess_word(word):
# Remove punctuation
word = word.strip('\'"?!,.():;')
# Convert more than 2 letter repetitions to 2 letter
# funnnnny --> funny
word = re.sub(r'(.)\1+', r'\1\1', word)
# Remove - & '
word = re.sub(r'(-|\')', '', word)
return word
def is_valid_word(word):
# Check if word begins with an alphabet
return (re.search(r'^[a-zA-Z][a-z0-9A-Z\._]*$', word) is not None)
def handle_emojis(tweet):
# Smile -- :), : ), :-), (:, ( :, (-:, :')
tweet = re.sub(r'(:\s?\)|:-\)|\(\s?:|\(-:|:\'\))', ' EMO_POS ', tweet)
# Laugh -- :D, : D, :-D, xD, x-D, XD, X-D
tweet = re.sub(r'(:\s?D|:-D|x-?D|X-?D)', ' EMO_POS ', tweet)
# Love -- <3, :*
tweet = re.sub(r'(<3|:\*)', ' EMO_POS ', tweet)
# Wink -- ;-), ;), ;-D, ;D, (;, (-;
tweet = re.sub(r'(;-?\)|;-?D|\(-?;)', ' EMO_POS ', tweet)
# Sad -- :-(, : (, :(, ):, )-:
tweet = re.sub(r'(:\s?\(|:-\(|\)\s?:|\)-:)', ' EMO_NEG ', tweet)
# Cry -- :,(, :'(, :"(
tweet = re.sub(r'(:,\(|:\'\(|:"\()', ' EMO_NEG ', tweet)
return tweet
def preprocess_tweet(tweet):
processed_tweet = []
# Convert to lower case
tweet = tweet.lower()
# Replaces URLs with the word URL
tweet = re.sub(r'((www\.[\S]+)|(https?://[\S]+))', ' URL ', tweet)
# Replace @handle with the word USER_MENTION
tweet = re.sub(r'@[\S]+', 'USER_MENTION', tweet)
# Replaces #hashtag with hashtag
tweet = re.sub(r'#(\S+)', r' \1 ', tweet)
# Remove RT (retweet)
tweet = re.sub(r'\brt\b', '', tweet)
# Replace 2+ dots with space
tweet = re.sub(r'\.{2,}', ' ', tweet)
# Strip space, " and ' from tweet
tweet = tweet.strip(' "\'')
# Replace emojis with either EMO_POS or EMO_NEG
tweet = handle_emojis(tweet)
# Replace multiple spaces with a single space
tweet = re.sub(r'\s+', ' ', tweet)
words = tweet.split()
for word in words:
word = preprocess_word(word)
if is_valid_word(word):
if use_stemmer:
word = str(porter_stemmer.stem(word))
processed_tweet.append(word)
return ' '.join(processed_tweet)
def preprocess_csv(csv_file_name, processed_file_name, test_file=False):
save_to_file = open(processed_file_name, 'w')
with open(csv_file_name, 'r') as csv:
lines = csv.readlines()
total = len(lines)
for i, line in enumerate(lines):
tweet_id = line[:line.find(',')]
if not test_file:
line = line[1 + line.find(','):]
positive = int(line[:line.find(',')])
line = line[1 + line.find(','):]
tweet = line
processed_tweet = preprocess_tweet(tweet)
if not test_file:
save_to_file.write('%s,%d,%s\n' %
(tweet_id, positive, processed_tweet))
else:
save_to_file.write('%s,%s\n' %
(tweet_id, processed_tweet))
write_status(i + 1, total)
save_to_file.close()
print '\nSaved processed tweets to: %s' % processed_file_name
return processed_file_name
if __name__ == '__main__':
if len(sys.argv) != 2:
print 'Usage: python preprocess.py <raw-CSV>'
exit()
use_stemmer = False
csv_file_name = sys.argv[1]
processed_file_name = sys.argv[1][:-4] + '-processed.csv'
if use_stemmer:
porter_stemmer = PorterStemmer()
processed_file_name = sys.argv[1][:-4] + '-processed-stemmed.csv'
preprocess_csv(csv_file_name, processed_file_name, test_file=False)
| [
"[email protected]"
]
| |
7364facf97bbe797e4688c8529979c572f738f7e | 0a1f8957a798006deaa53d10d09f733fab1e6b05 | /src/Python27Packages/PCC/PCC/SRC_regress.py | c0cdbdc0e11180870ab974a14325a7d483abc881 | [
"LicenseRef-scancode-other-permissive"
]
| permissive | metamorph-inc/meta-core | a89504ccb1ed2f97cc6e792ba52e3a6df349efef | bc7a05e04c7901f477fe553c59e478a837116d92 | refs/heads/master | 2023-03-07T02:52:57.262506 | 2023-03-01T18:49:49 | 2023-03-01T18:49:49 | 40,361,476 | 25 | 15 | NOASSERTION | 2023-01-13T16:54:30 | 2015-08-07T13:21:24 | Python | UTF-8 | Python | false | false | 788 | py | from numpy import *
def SRC_regress(X, Y, otpt, N):
# Add a constant term.
X = insert(X,0,1,1) #insert a column of 1's in the 0th column, axis 1
# Find the least squares solution by the use of Matlab backslash operator.
# b is the vector of regression coefficients.
r2=[]
b=zeros((X.shape[1],otpt))
r=zeros((X.shape[1],otpt))
for p in range(otpt):
b[:,p], resid = linalg.lstsq(X, Y[:, p])[:2]
r2.append((1 - resid / (Y[:, p].size * Y[:, p].var()))[0])
r[:, p] = b[:, p] * asarray((std(X,0).T / std(Y[:, p]) ).T)
# [b(:,p),~,~,~,stats] = regress(Y(:,p),X)
# r(:,p) = b(:,p).*std(X)'/std(Y(:,p))
# stat(p)=stats(1)
# "Standardize" the regression coefficients.
# Remove the constant term.
return r[1:], r2
| [
"[email protected]"
]
| |
ab7e8c8da4c8e78c4b5a9b13b5e3a0c286628d78 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/80/usersdata/160/48865/submittedfiles/moedas.py | fbd54a4f21eb65c2842ba5b47bd06575b228b31a | []
| no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 439 | py | # -*- coding: utf-8 -*-
from __future__ import division
a=int(input('Digite os valores disponíveis das moedas:'))
b=int(input('Digite os valores disponíveis das moedas:'))
c=int(input('Digite a cédula:'))
cont=0
qa=c//a
qb=0
while qa>=0:
troca=c-qa*a
if troca%b==0:
qb=troca//b
cont=cont+1
break
else:
qa=qa-1
if cont>0:
print qa
print qb
else:
print('N') | [
"[email protected]"
]
| |
71c35b3ac6e90b946930f337822763a56891fa6d | ec0b8bfe19b03e9c3bb13d9cfa9bd328fb9ca3f1 | /res/packages/scripts/scripts/client/gui/Scaleform/genConsts/DAMAGE_LOG_SHELL_BG_TYPES.py | fbb14d7600d8dbd920244235784e18d9819b1d2f | []
| no_license | webiumsk/WOT-0.9.20.0 | de3d7441c5d442f085c47a89fa58a83f1cd783f2 | 811cb4e1bca271372a1d837a268b6e0e915368bc | refs/heads/master | 2021-01-20T22:11:45.505844 | 2017-08-29T20:11:38 | 2017-08-29T20:11:38 | 101,803,045 | 0 | 1 | null | null | null | null | WINDOWS-1250 | Python | false | false | 588 | py | # 2017.08.29 21:48:42 Střední Evropa (letní čas)
# Embedded file name: scripts/client/gui/Scaleform/genConsts/DAMAGE_LOG_SHELL_BG_TYPES.py
"""
This file was generated using the wgpygen.
Please, don't edit this file manually.
"""
class DAMAGE_LOG_SHELL_BG_TYPES(object):
GOLD = 'gold'
WHITE = 'white'
EMPTY = 'empty'
# okay decompyling c:\Users\PC\wotmods\files\originals\res\packages\scripts\scripts\client\gui\Scaleform\genConsts\DAMAGE_LOG_SHELL_BG_TYPES.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.08.29 21:48:42 Střední Evropa (letní čas)
| [
"[email protected]"
]
| |
d508b2795f4abce28a1b17b23539bd2b0a9ca0fe | 0c9fc8fa54d2cd7d55ce6485383e10232ff2bc13 | /Packs/CentrifyVault/Integrations/CentrifyVault/CentrifyVault.py | d805ffcdb6f85d608d71e9750381e6e92efcb5e3 | [
"MIT"
]
| permissive | epartington/content | 95211fac55952d6d8bd1f7b021b0b4da44035a63 | a897c9fff2539f75165e0787432fa7a28989bce1 | refs/heads/master | 2023-08-03T18:40:45.812751 | 2023-05-19T08:05:48 | 2023-05-19T08:05:48 | 128,481,558 | 0 | 0 | MIT | 2022-05-24T20:45:55 | 2018-04-06T23:59:05 | Python | UTF-8 | Python | false | false | 33,211 | py | import re
import time
import demistomock as demisto # noqa: F401
# IMPORTS
from CommonServerPython import * # noqa: F401
# Disable insecure warnings
import urllib3
urllib3.disable_warnings()
"""PARAMETERS"""
class Client(BaseClient):
"""
Client will implement the service API, and should not contain any Demisto logic.
Should only do requests and return data.
"""
def __init__(self, token_retrieval_url, data, app_id, use_ssl, proxy):
headers = {'X-CENTRIFY-NATIVE-CLIENT': 'true'}
super().__init__(base_url=token_retrieval_url, headers=headers, verify=use_ssl, proxy=proxy)
self.payload = data
self.app_id = app_id
def http_request(self, *args, headers=None, **kwargs):
"""
Overrides Base client request function, retrieves and adds to headers access token before sending the request.
Returns:
requests.Response: The http response
"""
bearer_token = "Bearer " + str(self.authenticate_oauth())
default_headers = {
'content-type': 'application/json',
'Authorization': bearer_token,
'X-CENTRIFY-NATIVE-CLIENT': 'true'
}
if headers:
default_headers.update(headers)
return super()._http_request(*args, headers=default_headers, **kwargs) # type: ignore[misc]
def authenticate_oauth(self):
"""
Login using the credentials and store the cookie
"""
integration_context = demisto.getIntegrationContext()
bearer_token = integration_context.get('bearer_token')
valid_until = integration_context.get('valid_until')
time_now = int(time.time())
if bearer_token and valid_until:
if time_now < valid_until:
# Bearer Token is still valid - did not expire yet
return bearer_token
response = self.get_token_request()
bearer_token = response.get('access_token')
t = time.time()
expiration_time = t + 1800
integration_context = {
'bearer_token': bearer_token,
'valid_until': expiration_time # Assuming the expiration time is 30 minutes
}
demisto.setIntegrationContext(integration_context)
return bearer_token
def get_token_request(self):
"""
Sends token request
:rtype ``str``
:return: bearer token
"""
urlSuffix = '/oauth2/token/' + self.app_id
fullUrl = f'{self._base_url}{urlSuffix}'
body = self.payload
headers = {
'X-CENTRIFY-NATIVE-CLIENT': 'true'
}
token_response = self._http_request(method='POST', full_url=fullUrl,
url_suffix='', data=body, headers=headers)
if not token_response:
err_msg = 'Authorization Error: User has no authorization to create a token.' \
' Please make sure you entered the credentials correctly.'
raise Exception(err_msg)
return token_response
def request_secret_set_id(self, url_suffix, data):
return self.http_request(method="POST", url_suffix=url_suffix, json_data=data)
def request_set_details(self, url_suffix, data):
return self.http_request(method="POST", url_suffix=url_suffix, json_data=data)
def request_delete_set(self, url_suffix, data):
return self.http_request(method="POST", url_suffix=url_suffix, json_data=data)
def request_create_set(self, url_suffix, data):
return self.http_request(method="POST", url_suffix=url_suffix, json_data=data)
def request_fetch_folderids(self, url_suffix, data):
return self.http_request(method="POST", url_suffix=url_suffix, json_data=data)
def request_fetch_secret_folder_id(self, url_suffix, data):
return self.http_request(method="POST", url_suffix=url_suffix, json_data=data)
def request_delete_folder(self, url_suffix, data):
return self.http_request(method="POST", url_suffix=url_suffix, json_data=data)
def request_create_folder(self, url_suffix, data):
return self.http_request(method="POST", url_suffix=url_suffix, json_data=data)
def request_fetch_secret(self, url_suffix, data):
return self.http_request(method="POST", url_suffix=url_suffix, json_data=data)
def request_fetch_secretids_set(self, url_suffix, data):
return self.http_request(method="POST", url_suffix=url_suffix, json_data=data)
def request_fetch_secretids_folder(self, url_suffix, data):
return self.http_request(method="POST", url_suffix=url_suffix, json_data=data)
def request_delete_secret(self, url_suffix, data):
return self.http_request(method="POST", url_suffix=url_suffix, json_data=data)
def request_add_secret_set(self, url_suffix, data):
return self.http_request(method="POST", url_suffix=url_suffix, json_data=data)
def request_create_secret(self, url_suffix, data):
return self.http_request(method="POST", url_suffix=url_suffix, json_data=data)
"""Demisto Output Entry"""
def create_entry(title, data):
md = tableToMarkdown(title, data, ['FolderName', 'SecretName', 'SecretText', 'SecretType', 'SecretDescription'])\
if data else 'No result were found'
if data:
ec = {'Centrify.Secrets(val.SecretName && val.SecretName == obj.SecretName && val.FolderName &&'
' val.FolderName == obj.FolderName)': data}
return {
'ContentsFormat': formats['json'],
'Type': entryTypes['note'],
'Contents': data,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': md,
'EntryContext': ec
}
else:
return 'No secrets were found'
def test_module(client: Client):
"""test function
Args:
client:
Returns:
ok if successful
"""
try:
client.authenticate_oauth()
except Exception as e:
raise DemistoException(
f"Test failed. Please check your parameters. \n {e}")
return 'ok'
"""Fetches the Centrify set id for the setname provided"""
def fetch_secret_set_id(client: Client, setName):
urlSuffix = '/Collection/GetObjectCollectionsAndFilters'
payload = {"ObjectType": "DataVault", "CollectionType": "ManualBucket"}
centrify_setid_response = client.request_secret_set_id(url_suffix=urlSuffix, data=payload)
for set_item in centrify_setid_response.get('Result').get('Results'):
if set_item.get('Row').get('Name') == setName:
return set_item.get('Row').get('ID')
return "set name not found"
"""Fetches the secret id's list for the setid provided"""
def fetch_secretids_set(client: Client, set_id, secret_ids_list):
urlSuffix = '/Collection/GetMembers'
payload = {"ID": set_id}
centrify_secretids_response = client.request_fetch_secretids_set(url_suffix=urlSuffix, data=payload)
for secret_id_item in centrify_secretids_response.get("Result"):
secret_ids_list.append(secret_id_item['Key'])
return secret_ids_list
"""Fetches the Centrify folder id for the foldername provided"""
def fetch_secret_folder_id(client: Client, folderName):
urlSuffix = '/ServerManage/GetSecretFolder'
payload = {"Name": folderName}
centrify_folderid_response = client.request_fetch_secret_folder_id(url_suffix=urlSuffix, data=payload)
return centrify_folderid_response.get('Result').get('Results')[0].get('Row').get('ID')
"""Fetches the secret id's list recurrsively for the folderid provided"""
def fetch_secretids_folder(client: Client, folder_id, secret_ids_list, recursive):
urlSuffix = '/ServerManage/GetSecretsAndFolders'
payload = {"Parent": folder_id}
centrify_secretids_response = client.request_fetch_secretids_folder(url_suffix=urlSuffix, data=payload)
secret_ids_count = centrify_secretids_response.get('Result').get('FullCount')
for secret_id_item in range(secret_ids_count):
if centrify_secretids_response.get('Result').get('Results')[secret_id_item].get('Row').get('Type') == 'Text':
secret_ids_list.append(centrify_secretids_response.get('Result').get('Results')[secret_id_item].get('Row').get('ID'))
else:
if recursive:
sub_folder_id = centrify_secretids_response.get('Result').get('Results')[secret_id_item].get('Row').get('ID')
fetch_secretids_folder(client, sub_folder_id, secret_ids_list, recursive)
else:
pass
return secret_ids_list
"""Fetches details of all the sets in Centrify Vault"""
def fetch_set_details(client: Client, set_details_list):
urlSuffix = '/Collection/GetObjectCollectionsAndFilters'
payload = {"ObjectType": "DataVault", "CollectionType": "ManualBucket"}
centrify_setdetails_response = client.request_set_details(url_suffix=urlSuffix, data=payload)
centrify_setdetails_response = centrify_setdetails_response.get('Result').get('Results')
for set_item in centrify_setdetails_response:
if 'Description' not in set_item['Row']:
set_description = ""
else:
set_description = set_item['Row']['Description']
set_details_list.append({'SetName': set_item['Row']['Name'], 'SetID': set_item['Row']['ID'],
'SetDescription': set_description})
return set_details_list
"""Fetches the centrify secret details for the secret response received through the fetch_secret() method"""
def centrify_secret_details(centrify_secret):
CENTRIFY_VAULT = {}
CENTRIFY_VAULT['FolderName'] = centrify_secret.get('Result').get('ParentPath')
CENTRIFY_VAULT['SecretName'] = centrify_secret.get('Result').get('SecretName')
CENTRIFY_VAULT['SecretID'] = centrify_secret.get('Result').get('_RowKey')
CENTRIFY_VAULT['SecretText'] = centrify_secret.get('Result').get('SecretText')
CENTRIFY_VAULT['SecretType'] = centrify_secret.get('Result').get('Type')
if 'Description' in centrify_secret.get('Result'):
CENTRIFY_VAULT['SecretDescription'] = centrify_secret.get('Result').get('Description')
else:
CENTRIFY_VAULT['SecretDescription'] = ''
return CENTRIFY_VAULT
"""Fetches the centrify secret details for the secret id and name(optional) provided"""
def fetch_secret(client: Client, secret_id, secret_name, regex_match):
urlSuffix = '/ServerManage/RetrieveDataVaultItemContents'
payload = {"ID": secret_id}
centrify_secret_response = client.request_fetch_secret(url_suffix=urlSuffix, data=payload)
if secret_name:
if regex_match:
if re.search(secret_name, centrify_secret_response.get('Result').get('SecretName'), re.IGNORECASE):
CENTRIFY_VAULT = centrify_secret_details(centrify_secret_response)
else:
return None
else:
if secret_name == centrify_secret_response.get('Result').get('SecretName'):
CENTRIFY_VAULT = centrify_secret_details(centrify_secret_response)
else:
return None
else:
CENTRIFY_VAULT = centrify_secret_details(centrify_secret_response)
return CENTRIFY_VAULT
"""Fetches details of all folders in list recurrsively"""
def fetch_folderids(client: Client, folder_id, folders_list):
urlSuffix = '/ServerManage/GetSecretsAndFolders'
payload = {"Parent": folder_id}
centrify_folderids_response = client.request_fetch_folderids(url_suffix=urlSuffix, data=payload)
folder_ids_count = centrify_folderids_response.get('Result').get('FullCount')
for folder_id_item in range(folder_ids_count):
if centrify_folderids_response.get('Result').get('Results')[folder_id_item].get('Row').get('Type') == 'Folder':
folder_res = centrify_folderids_response.get('Result').get('Results')[folder_id_item].get('Row')
if folder_res.get('ParentPath'):
folder_directory = folder_res.get('ParentPath') + "\\" + folder_res.get('Name')
else:
folder_directory = folder_res.get('Name')
folders_list.append({"FolderName": folder_res.get('Name'), "FolderID": folder_res.get('ID'),
"ParentFolder": folder_res.get('ParentPath'),
"FolderDescription": folder_res.get('Description'), "FolderDirectory": folder_directory})
sub_folder_id = folder_res.get('ID')
fetch_folderids(client, sub_folder_id, folders_list)
else:
pass
return folders_list
"""Creates a centrify folder for the foldername, description and parent foldername(optional) provided"""
def create_folder(client: Client, folderName, description, parent_id):
urlSuffix = '/ServerManage/AddSecretsFolder'
payload = {"Name": folderName, "Description": description, "Parent": parent_id}
centrify_folder_response = client.request_create_folder(url_suffix=urlSuffix, data=payload)
if centrify_folder_response.get('success') is True:
return "Folder Created", centrify_folder_response.get('Result')
else:
return centrify_folder_response.get("MessageID"), "No Folder ID"
"""Creates a centrify set for the setname provided"""
def create_set(client: Client, setName, description):
urlSuffix = '/Collection/CreateManualCollection'
payload = {"ObjectType": "DataVault", "Name": setName, "Description": description}
centrify_set_response = client.request_create_set(url_suffix=urlSuffix, data=payload)
if centrify_set_response.get('success') is True:
return "Set Created", centrify_set_response.get('Result')
else:
return centrify_set_response.get("Message"), "No Set ID"
"""Creates a centrify secret in the folder for the provided foldername, secrettext, secrettype"""
def create_secret(client: Client, folderId, secret_name, secret_text, secret_type, secret_description):
urlSuffix = '/ServerManage/AddSecret'
payload = {"SecretName": secret_name, "SecretText": secret_text, "Type": secret_type,
"FolderId": folderId, "Description": secret_description}
centrify_secret_response = client.request_create_secret(url_suffix=urlSuffix, data=payload)
if centrify_secret_response.get('success') is True:
return "Secret Created", centrify_secret_response.get('Result')
else:
return centrify_secret_response.get("MessageID"), "No Secret ID"
"""Adds a secret to the set for the provided setid, secretid"""
def add_secret_set(client: Client, setId, secretId):
urlSuffix = '/Collection/UpdateMembersCollection'
payload = {"id": setId, "add": [{"MemberType": "Row", "Table": "DataVault", "Key": secretId}]}
add_secretset_response = client.request_add_secret_set(url_suffix=urlSuffix, data=payload)
if add_secretset_response.get('success') is True:
return "Secret added to the set"
else:
return "Failed to add secret to the set"
"""deletes a folder from the vault for the provided folderid"""
def delete_folder(client: Client, folderId):
urlSuffix = '/ServerManage/DeleteSecretsFolder'
payload = {"ID": folderId}
delete_folder_response = client.request_delete_folder(url_suffix=urlSuffix, data=payload)
if delete_folder_response.get('success') is True:
return "Folder Deleted"
else:
return "Failed to delete the folder"
"""deletes a set from the vault for the provided setid"""
def delete_set(client: Client, setId):
urlSuffix = '/Collection/DeleteCollection'
payload = {"ID": setId}
delete_set_response = client.request_delete_set(url_suffix=urlSuffix, data=payload)
if delete_set_response.get('success') is True:
return "Set Deleted"
else:
return "Failed to delete the Set"
"""deletes a secret the vault for the provided secretid"""
def delete_secret(client: Client, secretId):
urlSuffix = '/ServerManage/DeleteSecret'
payload = {"ID": secretId}
delete_secret_response = client.request_delete_secret(url_suffix=urlSuffix, data=payload)
if delete_secret_response.get('success') is True:
return "Secret Deleted"
else:
return "Failed to delete the Secret"
def fetch_secrets(args: dict, client: Client):
try:
holder_type = args.get('holderType')
secret_name = args.get('secretName')
secret_ids_list: list = []
if holder_type == 'Set':
set_name = args.get('holderName')
setId = fetch_secret_set_id(client, set_name)
if setId == 'set name not found':
return_error("Set name not found. Please provide a valid set name")
else:
secret_ids_list = fetch_secretids_set(client, setId, secret_ids_list)
elif holder_type == 'Folder':
folder_name = args.get('holderName')
if folder_name:
folder_id = fetch_secret_folder_id(client, folder_name)
secret_ids_list = fetch_secretids_folder(client, folder_id, secret_ids_list, True)
else:
folder_id = ""
secret_ids_list = fetch_secretids_folder(client, folder_id, secret_ids_list, True)
else:
folder_id = ""
secret_ids_list = fetch_secretids_folder(client, folder_id, secret_ids_list, True)
secret_list = list()
for secret_id in secret_ids_list:
secret_list.append(fetch_secret(client, secret_id, secret_name, True))
secret_list = list(filter(None, secret_list))
return create_entry('Secrets in the Folder/Set', secret_list)
except Exception as e:
return_error("Wrong inputs: Please enter valid foldername/secretname/setname: ", e)
def fetch_secret_by_id(args: dict, client: Client):
try:
secret_id = args.get('secretId')
secret_list: list = []
secret_list.append(fetch_secret(client, secret_id, None, None))
return create_entry('Secrets through the Secret ID', secret_list)
except Exception as e:
return_error("Wrong inputs: ", e)
def create_secret_folder(args: dict, client: Client):
try:
folder_name = args.get('folderName')
parent_folder_name = args.get('parentFolderName')
folder_description = args.get('folderDescription')
if not folder_description:
folder_description = ""
if parent_folder_name:
parent_folder_id = fetch_secret_folder_id(client, parent_folder_name)
else:
parent_folder_id = ""
status, folder_id = create_folder(client, folder_name, folder_description, parent_folder_id)
if status == "Folder Created":
CENTRIFY_VAULT = {}
CENTRIFY_VAULT['FolderName'] = folder_name
CENTRIFY_VAULT['FolderID'] = folder_id
CENTRIFY_VAULT['ParentFolderName'] = parent_folder_name
if folder_description:
CENTRIFY_VAULT['FolderDescription'] = folder_description
else:
CENTRIFY_VAULT['FolderDescription'] = ''
fcreate = [CENTRIFY_VAULT]
md = tableToMarkdown(status, fcreate, ['FolderName', 'FolderID', 'ParentFolderName', 'FolderDescription'])\
if fcreate else 'No result were found'
ec = {'Centrify.Folder(val.FolderName && val.FolderName == obj.FolderName && val.ParentFolderName &&'
' val.ParentFolderName == obj.ParentFolderName)': fcreate}
return {
'ContentsFormat': formats['json'],
'Type': entryTypes['note'],
'Contents': fcreate,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': md,
'EntryContext': ec
}
else:
return 'No result were found: ' + status
except Exception as e:
return_error("Wrong inputs: ", e)
def create_vault_secret(args: dict, client: Client):
try:
holder_type = args.get('holderType')
secret_name = args.get('secretName')
secret_text = args.get('secretText')
secret_type = args.get('secretType')
secret_description = args.get('secretDescription')
if not secret_description:
secret_description = ""
folder_id = ""
if holder_type == 'Folder':
folder_name = args.get('holderName')
folder_id = fetch_secret_folder_id(client, folder_name)
else:
setId_list = list()
set_name_list = list()
if ';' in str(args.get('holderName')):
set_name_list = str(args.get('holderName')).split(';')
for set_item in set_name_list:
set_name = set_item
set_id = fetch_secret_set_id(client, set_name)
setId_list.append(set_id)
else:
set_name = str(args.get('holderName'))
set_name_list.append(set_name)
setId_list.append(fetch_secret_set_id(client, set_name))
if 'set name not found' in setId_list:
return_error("Set name not found. Please provide a valid set name")
status, secret_id = create_secret(client, folder_id, secret_name, secret_text, secret_type, secret_description)
if status == "Secret Created":
CENTRIFY_VAULT = {}
CENTRIFY_VAULT['holderType'] = holder_type
if holder_type == 'Folder':
CENTRIFY_VAULT['FolderName'] = folder_name
CENTRIFY_VAULT['FolderID'] = folder_id
else:
CENTRIFY_VAULT['SetName'] = set_name_list
CENTRIFY_VAULT['SetID'] = setId_list
for set_id in setId_list:
add_secret_set(client, set_id, secret_id)
CENTRIFY_VAULT['SecretName'] = secret_name
CENTRIFY_VAULT['SecretID'] = secret_id
CENTRIFY_VAULT['SecretType'] = secret_type
CENTRIFY_VAULT['SecretDescription'] = secret_description
screate = [CENTRIFY_VAULT]
md = tableToMarkdown(status, screate,
['SecretName', 'FolderName', 'SetName', 'SecretType', 'SecretID', 'SecretDescription'])\
if screate else 'No result were found'
ec = {'Centrify.Secrets(val.SecretID && val.SecretID == obj.SecretID)': screate}
return {
'ContentsFormat': formats['json'],
'Type': entryTypes['note'],
'Contents': screate,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': md,
'EntryContext': ec
}
else:
return 'No result were found: ' + status
except Exception as e:
return_error("Wrong inputs. Please provide valid foldername/setname ", e)
def create_vault_set(args: dict, client: Client):
try:
set_name = args.get('setName')
set_description = args.get('setDescription')
if not set_description:
set_description = ""
status, set_id = create_set(client, set_name, set_description)
if status == "Set Created":
CENTRIFY_VAULT = {}
CENTRIFY_VAULT['SetName'] = set_name
CENTRIFY_VAULT['SetID'] = set_id
if set_description:
CENTRIFY_VAULT['SetDescription'] = set_description
else:
CENTRIFY_VAULT['SetDescription'] = ''
set_create = [CENTRIFY_VAULT]
md = tableToMarkdown(status, set_create, ['SetName', 'SetID', 'SetDescription']) \
if set_create else 'No result were found'
ec = {'Centrify.Set(val.SetID && val.SetID == obj.SetID)': set_create}
return {
'ContentsFormat': formats['json'],
'Type': entryTypes['note'],
'Contents': set_create,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': md,
'EntryContext': ec
}
else:
return 'No result were found: ' + status
except Exception as e:
return_error(e)
def fetch_vault_folders(args: dict, client: Client):
try:
folders_list: list = []
folders_list = fetch_folderids(client, "", folders_list)
if folders_list:
md = tableToMarkdown("List of all folders", folders_list,
['FolderName', 'FolderID', 'ParentFolder', 'FolderDescription',
'FolderDirectory']) if folders_list else 'No result were found'
ec = {'Centrify.Folder(val.FolderID && val.FolderID == obj.FolderID)': folders_list}
return {
'ContentsFormat': formats['json'],
'Type': entryTypes['note'],
'Contents': folders_list,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': md,
'EntryContext': ec
}
else:
return 'No result were found: No folders found'
except Exception as e:
return_error(e)
def fetch_vault_set(args: dict, client: Client):
try:
set_details_list: list = []
set_details_list = fetch_set_details(client, set_details_list)
if set_details_list:
md = tableToMarkdown("List of all sets", set_details_list, ['SetName', 'SetID', 'SetDescription'])\
if set_details_list else 'No result were found'
ec = {'Centrify.Set(val.SetID && val.SetID == obj.SetID)': set_details_list}
return {
'ContentsFormat': formats['json'],
'Type': entryTypes['note'],
'Contents': set_details_list,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': md,
'EntryContext': ec
}
else:
return 'No result were found: No sets found'
except Exception as e:
return_error(e)
def add_vault_secret_set(args: dict, client: Client):
try:
set_name = args.get('setName')
secret_id = args.get('secretId')
set_id = fetch_secret_set_id(client, set_name)
if set_id == "set name not found":
return_error("Set name not found. Please provide a valid set name")
else:
status = add_secret_set(client, set_id, secret_id)
return status
except Exception as e:
return_error(e)
"""New code started"""
def delete_vault_secret(args: dict, client: Client):
try:
folder_name = args.get('folderName')
secret_name = args.get('secretName')
recursive_delete = args.get('recursiveDelete')
regex_match = args.get('matchPartOfSecret')
if regex_match == "Yes":
regex_match = True
else:
regex_match = False
if folder_name:
folder_id = fetch_secret_folder_id(client, folder_name)
else:
folder_id = ""
secret_ids_list: list = []
if recursive_delete == "Yes":
recursive_delete = True
else:
recursive_delete = False
secret_ids_list = fetch_secretids_folder(client, folder_id, secret_ids_list, recursive_delete)
delete_secret_id_list: list = []
for secret_id in secret_ids_list:
secret_item = fetch_secret(client, secret_id, secret_name, regex_match)
if secret_item:
delete_secret(client, secret_item.get('SecretID'))
delete_secret_id_list.append(secret_item)
if delete_secret_id_list:
md = tableToMarkdown("List of Secrets deleted", delete_secret_id_list, ['SecretName', 'SecretID', 'FolderName'])\
if delete_secret_id_list else 'No secrets were deleted'
return {
'ContentsFormat': formats['json'],
'Type': entryTypes['note'],
'Contents': delete_secret_id_list,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': md
}
else:
return 'No result were found: No secrets were deleted'
except Exception as e:
return_error("Please enter a valid secretname/foldername: ", e)
def delete_vault_secretid(args: dict, client: Client):
try:
secret_id = args.get('secretId')
delete_secret_id_list = list()
delete_secret_id_list.append(fetch_secret(client, secret_id, None, None))
delete_secret(client, secret_id)
if delete_secret_id_list:
md = tableToMarkdown("Secrets deleted", delete_secret_id_list, ['SecretName', 'SecretID', 'FolderName'])\
if delete_secret_id_list else 'No secrets were deleted'
return {
'ContentsFormat': formats['json'],
'Type': entryTypes['note'],
'Contents': delete_secret_id_list,
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': md
}
else:
return 'No result were found: No secrets were deleted'
except Exception as e:
return_error("Please enter a valid secretname/foldername: ", e)
def delete_vault_folder(args: dict, client: Client):
try:
folder_name = args.get('folderName')
parent_name = args.get('parentFolderName')
if parent_name:
folder_name = parent_name + "/" + folder_name
folder_id = fetch_secret_folder_id(client, folder_name)
delete_status = delete_folder(client, folder_id)
if delete_status == "Folder Deleted":
return str(folder_name) + " : " + str(delete_status)
else:
return 'No result were found: No folders found to be deleted'
except Exception as e:
return_error("Please enter a valid foldername: ", e)
def delete_vault_set(args: dict, client: Client):
try:
set_name = args.get('setName')
set_id = fetch_secret_set_id(client, set_name)
if set_id == "set name not found":
return 'No result were found: Please enter a valid setname'
else:
delete_status = delete_set(client, set_id)
if delete_status == "Set Deleted":
return str(set_name) + " : " + str(delete_status)
else:
return 'No result were found: No sets found to be deleted'
except Exception as e:
return_error("Please enter a valid setname: ", e)
def main():
"""
PARSE AND VALIDATE INTEGRATION PARAMS
"""
tenant_url = demisto.params().get('tenantUrl', '').rstrip('/')
client_id = demisto.params().get('clientId')
client_secret = demisto.params().get('clientSecret')
scope = demisto.params().get('scope')
app_id = demisto.params().get('appId')
verify_certificate = demisto.params()['insecure'] is False
proxy = demisto.params().get('proxy', False)
payload = {'grant_type': 'client_credentials', 'client_id': client_id, 'client_secret': client_secret, 'scope': scope}
try:
client = Client(
tenant_url,
payload,
app_id,
verify_certificate,
proxy)
command = demisto.command()
args = demisto.args()
LOG(f'Command being called is {command}.')
if command == 'test-module':
result = test_module(client)
elif demisto.command() == 'centrify-retrieve-secrets':
result = fetch_secrets(args, client)
elif demisto.command() == 'centrify-retrieve-secret-by-secretid':
result = fetch_secret_by_id(args, client)
elif demisto.command() == 'centrify-create-secretfolder':
result = create_secret_folder(args, client)
elif demisto.command() == 'centrify-create-secret':
result = create_vault_secret(args, client)
elif demisto.command() == 'centrify-create-set':
result = create_vault_set(args, client)
elif demisto.command() == 'centrify-retrieve-folders':
result = fetch_vault_folders(args, client)
elif demisto.command() == 'centrify-delete-folder':
result = delete_vault_folder(args, client)
elif demisto.command() == 'centrify-delete-secret':
result = delete_vault_secret(args, client)
elif demisto.command() == 'centrify-delete-secret-by-secretid':
result = delete_vault_secretid(args, client)
elif demisto.command() == 'centrify-add-secret-to-set':
result = add_vault_secret_set(args, client)
elif command == 'centrify-retrieve-sets':
result = fetch_vault_set(args, client)
elif demisto.command() == 'centrify-delete-set':
result = delete_vault_set(args, client)
demisto.results(result)
# Log exceptions
except Exception as e:
return_error(f'Failed to execute {demisto.command()} command. Error: {str(e)}')
if __name__ in ('__main__', 'builtins'):
main()
| [
"[email protected]"
]
| |
e5ae2857aa612fac99f501a0c3d3e03e657b5170 | 48e124e97cc776feb0ad6d17b9ef1dfa24e2e474 | /sdk/python/pulumi_azure_native/apimanagement/v20210801/get_subscription.py | ee1f34043a24f2ee42bd142ed298046f34bc485b | [
"BSD-3-Clause",
"Apache-2.0"
]
| permissive | bpkgoud/pulumi-azure-native | 0817502630062efbc35134410c4a784b61a4736d | a3215fe1b87fba69294f248017b1591767c2b96c | refs/heads/master | 2023-08-29T22:39:49.984212 | 2021-11-15T12:43:41 | 2021-11-15T12:43:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,418 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
__all__ = [
'GetSubscriptionResult',
'AwaitableGetSubscriptionResult',
'get_subscription',
'get_subscription_output',
]
@pulumi.output_type
class GetSubscriptionResult:
"""
Subscription details.
"""
def __init__(__self__, allow_tracing=None, created_date=None, display_name=None, end_date=None, expiration_date=None, id=None, name=None, notification_date=None, owner_id=None, primary_key=None, scope=None, secondary_key=None, start_date=None, state=None, state_comment=None, type=None):
if allow_tracing and not isinstance(allow_tracing, bool):
raise TypeError("Expected argument 'allow_tracing' to be a bool")
pulumi.set(__self__, "allow_tracing", allow_tracing)
if created_date and not isinstance(created_date, str):
raise TypeError("Expected argument 'created_date' to be a str")
pulumi.set(__self__, "created_date", created_date)
if display_name and not isinstance(display_name, str):
raise TypeError("Expected argument 'display_name' to be a str")
pulumi.set(__self__, "display_name", display_name)
if end_date and not isinstance(end_date, str):
raise TypeError("Expected argument 'end_date' to be a str")
pulumi.set(__self__, "end_date", end_date)
if expiration_date and not isinstance(expiration_date, str):
raise TypeError("Expected argument 'expiration_date' to be a str")
pulumi.set(__self__, "expiration_date", expiration_date)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if notification_date and not isinstance(notification_date, str):
raise TypeError("Expected argument 'notification_date' to be a str")
pulumi.set(__self__, "notification_date", notification_date)
if owner_id and not isinstance(owner_id, str):
raise TypeError("Expected argument 'owner_id' to be a str")
pulumi.set(__self__, "owner_id", owner_id)
if primary_key and not isinstance(primary_key, str):
raise TypeError("Expected argument 'primary_key' to be a str")
pulumi.set(__self__, "primary_key", primary_key)
if scope and not isinstance(scope, str):
raise TypeError("Expected argument 'scope' to be a str")
pulumi.set(__self__, "scope", scope)
if secondary_key and not isinstance(secondary_key, str):
raise TypeError("Expected argument 'secondary_key' to be a str")
pulumi.set(__self__, "secondary_key", secondary_key)
if start_date and not isinstance(start_date, str):
raise TypeError("Expected argument 'start_date' to be a str")
pulumi.set(__self__, "start_date", start_date)
if state and not isinstance(state, str):
raise TypeError("Expected argument 'state' to be a str")
pulumi.set(__self__, "state", state)
if state_comment and not isinstance(state_comment, str):
raise TypeError("Expected argument 'state_comment' to be a str")
pulumi.set(__self__, "state_comment", state_comment)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="allowTracing")
def allow_tracing(self) -> Optional[bool]:
"""
Determines whether tracing is enabled
"""
return pulumi.get(self, "allow_tracing")
@property
@pulumi.getter(name="createdDate")
def created_date(self) -> str:
"""
Subscription creation date. The date conforms to the following format: `yyyy-MM-ddTHH:mm:ssZ` as specified by the ISO 8601 standard.
"""
return pulumi.get(self, "created_date")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[str]:
"""
The name of the subscription, or null if the subscription has no name.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter(name="endDate")
def end_date(self) -> Optional[str]:
"""
Date when subscription was cancelled or expired. The setting is for audit purposes only and the subscription is not automatically cancelled. The subscription lifecycle can be managed by using the `state` property. The date conforms to the following format: `yyyy-MM-ddTHH:mm:ssZ` as specified by the ISO 8601 standard.
"""
return pulumi.get(self, "end_date")
@property
@pulumi.getter(name="expirationDate")
def expiration_date(self) -> Optional[str]:
"""
Subscription expiration date. The setting is for audit purposes only and the subscription is not automatically expired. The subscription lifecycle can be managed by using the `state` property. The date conforms to the following format: `yyyy-MM-ddTHH:mm:ssZ` as specified by the ISO 8601 standard.
"""
return pulumi.get(self, "expiration_date")
@property
@pulumi.getter
def id(self) -> str:
"""
Fully qualified resource ID for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName}
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="notificationDate")
def notification_date(self) -> Optional[str]:
"""
Upcoming subscription expiration notification date. The date conforms to the following format: `yyyy-MM-ddTHH:mm:ssZ` as specified by the ISO 8601 standard.
"""
return pulumi.get(self, "notification_date")
@property
@pulumi.getter(name="ownerId")
def owner_id(self) -> Optional[str]:
"""
The user resource identifier of the subscription owner. The value is a valid relative URL in the format of /users/{userId} where {userId} is a user identifier.
"""
return pulumi.get(self, "owner_id")
@property
@pulumi.getter(name="primaryKey")
def primary_key(self) -> Optional[str]:
"""
Subscription primary key. This property will not be filled on 'GET' operations! Use '/listSecrets' POST request to get the value.
"""
return pulumi.get(self, "primary_key")
@property
@pulumi.getter
def scope(self) -> str:
"""
Scope like /products/{productId} or /apis or /apis/{apiId}.
"""
return pulumi.get(self, "scope")
@property
@pulumi.getter(name="secondaryKey")
def secondary_key(self) -> Optional[str]:
"""
Subscription secondary key. This property will not be filled on 'GET' operations! Use '/listSecrets' POST request to get the value.
"""
return pulumi.get(self, "secondary_key")
@property
@pulumi.getter(name="startDate")
def start_date(self) -> Optional[str]:
"""
Subscription activation date. The setting is for audit purposes only and the subscription is not automatically activated. The subscription lifecycle can be managed by using the `state` property. The date conforms to the following format: `yyyy-MM-ddTHH:mm:ssZ` as specified by the ISO 8601 standard.
"""
return pulumi.get(self, "start_date")
@property
@pulumi.getter
def state(self) -> str:
"""
Subscription state. Possible states are * active – the subscription is active, * suspended – the subscription is blocked, and the subscriber cannot call any APIs of the product, * submitted – the subscription request has been made by the developer, but has not yet been approved or rejected, * rejected – the subscription request has been denied by an administrator, * cancelled – the subscription has been cancelled by the developer or administrator, * expired – the subscription reached its expiration date and was deactivated.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter(name="stateComment")
def state_comment(self) -> Optional[str]:
"""
Optional subscription comment added by an administrator when the state is changed to the 'rejected'.
"""
return pulumi.get(self, "state_comment")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
class AwaitableGetSubscriptionResult(GetSubscriptionResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetSubscriptionResult(
allow_tracing=self.allow_tracing,
created_date=self.created_date,
display_name=self.display_name,
end_date=self.end_date,
expiration_date=self.expiration_date,
id=self.id,
name=self.name,
notification_date=self.notification_date,
owner_id=self.owner_id,
primary_key=self.primary_key,
scope=self.scope,
secondary_key=self.secondary_key,
start_date=self.start_date,
state=self.state,
state_comment=self.state_comment,
type=self.type)
def get_subscription(resource_group_name: Optional[str] = None,
service_name: Optional[str] = None,
sid: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetSubscriptionResult:
"""
Subscription details.
:param str resource_group_name: The name of the resource group.
:param str service_name: The name of the API Management service.
:param str sid: Subscription entity Identifier. The entity represents the association between a user and a product in API Management.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['serviceName'] = service_name
__args__['sid'] = sid
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:apimanagement/v20210801:getSubscription', __args__, opts=opts, typ=GetSubscriptionResult).value
return AwaitableGetSubscriptionResult(
allow_tracing=__ret__.allow_tracing,
created_date=__ret__.created_date,
display_name=__ret__.display_name,
end_date=__ret__.end_date,
expiration_date=__ret__.expiration_date,
id=__ret__.id,
name=__ret__.name,
notification_date=__ret__.notification_date,
owner_id=__ret__.owner_id,
primary_key=__ret__.primary_key,
scope=__ret__.scope,
secondary_key=__ret__.secondary_key,
start_date=__ret__.start_date,
state=__ret__.state,
state_comment=__ret__.state_comment,
type=__ret__.type)
@_utilities.lift_output_func(get_subscription)
def get_subscription_output(resource_group_name: Optional[pulumi.Input[str]] = None,
service_name: Optional[pulumi.Input[str]] = None,
sid: Optional[pulumi.Input[str]] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetSubscriptionResult]:
"""
Subscription details.
:param str resource_group_name: The name of the resource group.
:param str service_name: The name of the API Management service.
:param str sid: Subscription entity Identifier. The entity represents the association between a user and a product in API Management.
"""
...
| [
"[email protected]"
]
| |
a588d3e28d3956602608435b88c2bdcd3d980823 | 4dbd12da17cc45a5482afc8cea02051e798731a9 | /venv/Scripts/django-admin.py | 9c4c65edbd574a17c31f9ba77b2beca874b75b41 | []
| no_license | tsicroxe/django_projects | 71b9bec6d834f53fde892606799b4bc96ba45a91 | c11036c78d120e5ffa51055e2999dbe05b0d36eb | refs/heads/master | 2021-01-11T07:03:53.045558 | 2016-12-07T20:46:05 | 2016-12-07T20:46:05 | 71,937,705 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 193 | py | #!c:\users\bani_\desktop\codingdojo\djangoprojects\django_test\venv\scripts\python.exe
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| [
"[email protected]"
]
| |
8ecd5eea7039d1b701170a32d86388d3b651d501 | 68ee9027d4f780e1e5248a661ccf08427ff8d106 | /extra/unused/aggregate_icesat.py | aa9940239df97213a6bb5453b7674695ac207a66 | [
"MIT"
]
| permissive | whyjz/CARST | 87fb9a6a62d39fd742bb140bddcb95a2c15a144c | 4fc48374f159e197fa5a9dbf8a867b0a8e0aad3b | refs/heads/master | 2023-05-26T20:27:38.105623 | 2023-04-16T06:34:44 | 2023-04-16T06:34:44 | 58,771,687 | 17 | 4 | MIT | 2021-03-10T01:26:04 | 2016-05-13T20:54:42 | Python | UTF-8 | Python | false | false | 4,152 | py | #!/usr/bin/python
# aggregate_icesat.py
# Author: Andrew Kenneth Melkonian
# All rights reserved
def aggregate_icesat(icesat_path, input_dem_xyz_txt_dir, input_dem_xyz_txt_identifier, output_label):
import os;
assert os.path.exists(icesat_path), "\n***** ERROR: " + icesat_path + " does not exist, exiting...\n";
assert os.path.exists(input_dem_xyz_txt_dir), "\n***** ERROR: " + input_dem_xyz_txt_dir + " does not exist, exiting...\n";
max_elev = "1520";
min_elev = "-100";
interval = 120.;
icesat_unc = "0.5";
coords = {};
xy = "";
import re;
infile = open(icesat_path, "r");
for line in infile:
elements = line.split();
if len(elements) > 2 and elements[2].find("NaN") < 0:
x = elements[0].strip();
y = elements[1].strip();
x = x[ : re.search("0*$",x).start(0)];
y = y[ : re.search("0*$",y).start(0)];
if float(elements[5]) > float(max_elev):
continue;
elif float(elements[5]) <= float(min_elev):
continue;
xy = x + " " + y;
if xy not in coords:
coords[xy] = "";
coords[xy] = coords[xy] + xy + " " + elements[2].strip() + " " + elements[3].strip() + " " + elements[4].strip() + "\n";
infile.close();
contents = os.listdir(input_dem_xyz_txt_dir);
input_dem_xyz_txt_names = [item for item in contents if re.search(".*" + input_dem_xyz_txt_identifier + "\.txt$", item)];
for item in input_dem_xyz_txt_names:
if re.search(icesat_path[icesat_path.rfind("/") + 1 : ], input_dem_xyz_txt_dir + "/" + item):
continue;
infile = open(input_dem_xyz_txt_dir + "/" + item, "r");
for line in infile:
elements = line.split();
if len(elements) > 2 and elements[2].find("NaN") < 0:
x = elements[0].strip();
y = elements[1].strip();
x = x[ : re.search("0*$",x).start(0)];
y = y[ : re.search("0*$",y).start(0)];
if float(elements[2]) > float(max_elev):
continue;
elif float(elements[2]) <= float(min_elev):
continue;
xy = x + " " + y;
if xy not in coords:
continue;
# coords[xy] = "";
coords[xy] = coords[xy] + xy + " " + elements[2].strip() + " " + elements[3].strip() + " " + elements[4].strip() + "\n";
infile.close();
# import math;
# import subprocess;
# x_ref, y_ref = xy.split();
# infile = open(icesat_path, "r");
# for line in infile:
# if line.find("# @D") > -1:
# elements = line.split("|");
# date = elements[0];
# x = elements[3];
# y = elements[4];
# h_ell = elements[5];
# new_x = str(float(math.ceil((float(x) - float(x_ref)) / interval)) * interval + float(x_ref));
# new_y = str(float(math.ceil((float(y) - float(y_ref)) / interval)) * interval + float(y_ref));
# xy = new_x + " " + new_y;
# year = date[4:8];
# month = date[8:10];
# day = date[10:12];
# hour = "12";
# minute = "00";
# second = "00";
# cmd = "\ndate +\"%s\" -d \"" + year + "-" + month + "-" + day + " " + hour + ":" + minute + ":" + second + "\"\n";
# pipe = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE).stdout;
# secs = pipe.read().strip();
# pipe.close();
# cmd = "\ndate +\"%s\" -d \"" + year + "-01-01 00:00:00\"\n";
# pipe = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE).stdout;
# year_secs = pipe.read().strip();
# pipe.close();
# date = str(float(year) + (float(secs) - float(year_secs)) / (24.0 * 60.0 * 60.0 * 365.25));
# if xy not in coords:
# coords[xy] = "";
# coords[xy] = coords[xy] + xy + " " + h_ell + " " + date + " " + icesat_unc + "\n";
# infile.close();
outfile = open(output_label + ".txt", "w");
for xy in coords:
outfile.write(coords[xy]);
outfile.write(">\n");
outfile.close();
return;
if __name__ == "__main__":
import os;
import sys;
assert len(sys.argv) > 4, "\n***** ERROR: aggregate_icesat.py requires 4 arguments, " + str(len(sys.argv) - 1) + " given\n";
assert os.path.exists(sys.argv[1]), "\n***** ERROR: " + sys.argv[1] + " does not exist\n";
assert os.path.exists(sys.argv[2]), "\n***** ERROR: " + sys.argv[2] + " does not exist\n";
aggregate_icesat(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4]);
exit();
| [
"[email protected]"
]
| |
cb82f5cc168695fdf732dccb1eb23dab8368ac8f | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_340/ch18_2020_03_24_20_35_15_928242.py | 929b2954cd5a46a52474fe5ed86685430753dd03 | []
| no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 190 | py | def verifica_idade(idade):
if idade<18:
return "Não está liberado"
elif idade>=18 and idade<21:
return "Liberado no BRASIL"
else:
return "Liberado EUA e BRASIL" | [
"[email protected]"
]
| |
b87d7438f6510080b17e0f5f71038725daf202f8 | 3917a54838a67dafd6b17aa2cba51144f3f242d0 | /demo/amrparsing/vw_pred_to_amr.py | 7d0fff434814f3cfacb9ab73e8309d77623f372c | []
| no_license | raosudha89/vowpal_wabbit | 29e25533113a33a39b64ccacbbef5452e41590a8 | 03e973838e022149d802ec3f5e2817dcbc9019d5 | refs/heads/master | 2021-01-21T13:29:27.941872 | 2016-06-03T01:56:25 | 2016-06-03T01:56:25 | 43,918,094 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,055 | py | import sys, os
import networkx as nx
import pdb
import pickle as p
NULL_CONCEPT = 1
def update_graph(amr_nx_graph, concept_nx_graph, nx_parent, parent, count):
for child in concept_nx_graph.successors(parent):
child_idx = 'n'+str(count)
count += 1
if "\"" in concept_nx_graph.node[child]['instance']:
amr_nx_graph.add_node(child_idx, instance=concept_nx_graph.node[child]['instance'], postprocess=True)
else:
amr_nx_graph.add_node(child_idx, instance=concept_nx_graph.node[child]['instance'], postprocess=False)
amr_nx_graph.add_edge(nx_parent, child_idx, relation=concept_nx_graph[parent][child][0]['relation'])
amr_nx_graph, count = update_graph(amr_nx_graph, concept_nx_graph, child_idx, child, count)
return amr_nx_graph, count
def post_process(amr_nx_graph, concept_graph_fragment_dict):
all_nodes = amr_nx_graph.nodes()
count = 1
for node in all_nodes:
node_concept = amr_nx_graph.node[node]['instance']
if node_concept in concept_graph_fragment_dict.keys():
concept_nx_graph = concept_graph_fragment_dict[node_concept]
concept_nx_graph_root = nx.topological_sort(concept_nx_graph)[0]
amr_nx_graph.node[node]['instance'] = concept_nx_graph.node[concept_nx_graph_root]['instance']
amr_nx_graph.node[node]['postprocess'] = False
amr_nx_graph, count = update_graph(amr_nx_graph, concept_nx_graph, node, concept_nx_graph_root, count)
else:
parts = node_concept.split('_')
if len(parts) >= 3 and parts[1] == 'name':
#new_reverse_map_dict[each_node] = x[0]
amr_nx_graph.node[node]['instance'] = parts[0]
name_node_idx = 'n'+str(count)
count += 1
amr_nx_graph.add_node(name_node_idx, instance='name', postprocess=False)
amr_nx_graph.add_edge(node, name_node_idx, relation='name')
subcount = 1
for child in parts[2:]:
child_idx = 'n'+str(count)
count += 1
amr_nx_graph.add_node(child_idx, instance=child, postprocess=True)
amr_nx_graph.add_edge(name_node_idx, child_idx, relation='op'+str(subcount))
subcount += 1
elif len(parts) > 1:
amr_nx_graph.node[node]['instance'] = parts[0]
for part in parts[1:]:
name_node_idx = 'n'+str(count)
count += 1
amr_nx_graph.add_node(name_node_idx, instance=part, postprocess=False)
amr_nx_graph.add_edge(node, name_node_idx, relation='op1') #TODO: get the k-best set of relation from preprocessing
'''
elif len(parts) == 4 and parts[0] == 'date-entity':
amr_nx_graph.node[node]['instance'] = parts[0]
if parts[1] != 'X':
child_idx = 'n'+str(count)
count += 1
amr_nx_graph.add_node(child_idx, instance=parts[1], postprocess=True)
amr_nx_graph.add_edge(node, child_idx, relation='year')
if parts[2] != 'X':
child_idx = 'n'+str(count)
count += 1
amr_nx_graph.add_node(child_idx, instance=parts[2], postprocess=True)
amr_nx_graph.add_edge(node, child_idx, relation='month')
if parts[3] != 'X':
child_idx = 'n'+str(count)
count += 1
amr_nx_graph.add_node(child_idx, instance=parts[3], postprocess=True)
amr_nx_graph.add_edge(node, child_idx, relation='date')
'''
return amr_nx_graph
def to_nx_graph(all_heads, all_tags, all_concepts, concepts_dict, relations_dict):
#print all_heads
#print all_tags
#print all_concepts
amr_roots = []
amr_nx_graph = nx.MultiDiGraph()
for idx in range(1, len(all_concepts)):
concept = all_concepts[idx]
if concept == NULL_CONCEPT:
continue
amr_nx_graph.add_node(idx, instance=concepts_dict[concept], postprocess=False)
if 0 in all_heads[idx]: #this is the root
amr_roots.append(idx)
continue #so don't add any edge
for i, parent in enumerate(all_heads[idx]):
amr_nx_graph.add_edge(parent, idx, relation=relations_dict[all_tags[idx][i]])
return amr_nx_graph, amr_roots
shortname_dict = {}
def get_amr_string(root, amr_nx_graph, tab_levels=1):
amr_string = ""
#print amr_nx_graph.successors(root)
global shortname_dict
for child in amr_nx_graph.successors(root):
if not child in shortname_dict.keys():
size = len(shortname_dict.keys())
child_amr_string = get_amr_string(child, amr_nx_graph, tab_levels+1)
shortname_dict[child] = "c"+str(size)
amr_string += "\t"*tab_levels + "\t:{0} ".format(amr_nx_graph[root][child][0]['relation']) + child_amr_string
else:
amr_string += "\t"*tab_levels + ":{0} {1}\n".format(amr_nx_graph[root][child][0]['relation'], shortname_dict[child])
if not root in shortname_dict.keys():
size = len(shortname_dict.keys())
shortname_dict[root] = "c"+str(size)
if amr_nx_graph.node[root]['postprocess'] == True: #postprocessed node so don't add shortname
amr_string = "{0} \n".format(amr_nx_graph.node[root]['instance'].replace("/", ""))
else:
amr_string = "({0} / {1}\n".format(shortname_dict[root], amr_nx_graph.node[root]['instance'].replace("/", "")) + amr_string + ")"
else:
amr_string = "{0}".format(amr_nx_graph.node[root]['instance'].replace("/", ""))
return amr_string
def print_nx_graph(nx_graph, amr_roots, output_amr_file):
#print amr_nx_graph.nodes()
#print amr_nx_graph.edges()
#print amr_root
#pdb.set_trace()
if not amr_roots: #Only null concepts predicted
amr_nx_graph.add_node(0, instance='multi-sentence', parents=None, postprocess=False)
output_amr_file.write(get_amr_string(0, amr_nx_graph))
elif len(amr_roots) > 1:
amr_nx_graph.add_node(0, instance='multi-sentence', parents=None, postprocess=False)
for i, amr_root in enumerate(amr_roots):
amr_nx_graph.add_edge(0, amr_root, relation='snt'+str(i+1))
output_amr_file.write(get_amr_string(0, amr_nx_graph))
else:
output_amr_file.write(get_amr_string(amr_roots[0], amr_nx_graph))
output_amr_file.write("\n")
output_amr_file.write("\n")
if __name__ == "__main__":
if len(sys.argv) < 2:
print "usage: vw_pred_to_amr.py <data.pred> <all_concepts.p> <all_relations.p> <concept_graph_fragment_dict.p> <output_amr_file>"
sys.exit(0)
vw_pred_file = open(sys.argv[1], 'r')
concepts_dict = p.load(open(sys.argv[2], 'rb'))
relations_dict = p.load(open(sys.argv[3], 'rb'))
concept_graph_fragment_dict = p.load(open(sys.argv[4], 'rb'))
output_amr_file = open(sys.argv[5], 'w')
all_heads = [[0]]
all_tags = [[0]]
all_concepts = [0]
global shortname_dict
for line in vw_pred_file.readlines():
line = line.strip("\n")
values = line.split(':')
if not values[0].isdigit() or not values[1].isdigit() or not line:
if all_heads:
amr_nx_graph, amr_roots = to_nx_graph(all_heads, all_tags, all_concepts, concepts_dict, relations_dict)
amr_nx_graph = post_process(amr_nx_graph, concept_graph_fragment_dict)
print_nx_graph(amr_nx_graph, amr_roots, output_amr_file)
all_heads = [[0]]
all_tags = [[0]]
all_concepts = [0]
shortname_dict = {}
amr_root = []
else:
values = [int(v.strip()) for v in values]
heads = [values[0]]
tags = [values[1]]
concept = values[2]
for i in range(3, len(values), 2):
heads.append(values[i])
tags.append(values[i+1])
all_heads.append(heads)
all_tags.append(tags)
all_concepts.append(concept)
| [
"[email protected]"
]
| |
af68864d45238c5bae0ebc79100da4dc57b48022 | 63e2bed7329c79bf67279f9071194c9cba88a82c | /TopGear/Python-L2/MathNew/subtraction.py | bafbc388ade9a59b1b9c3287b36e545f4296c30d | []
| no_license | jsthomason/LearningPython | 12422b969dbef89578ed326852dd65f65ab77496 | 2f71223250b6a198f2736bcb1b8681c51aa12c03 | refs/heads/master | 2021-01-21T01:05:46.208994 | 2019-06-27T13:40:37 | 2019-06-27T13:40:37 | 63,447,703 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 59 | py | """
Module subtraction
"""
def sub(x,y):
return x - y
| [
"[email protected]"
]
| |
b2bdf24d06fb5e52cb0d6323da17410e8fa73697 | 9cd0f22b1b335c8dd59addcbe3032141e76af8b5 | /8.Libraries and Functions/Unicodedata – Unicode Database/unicodedata.name(chr[, default]).py | 0049071b9b9c67f3b03cbd540c19830d774cb039 | []
| no_license | Swapnil2095/Python | 06e6249d8f50767fce8fcdddbfd0aefc23d3deae | d11270391c4d3c30f59266a5bc3216dad8fc81b7 | refs/heads/master | 2022-02-28T08:12:27.266950 | 2019-09-24T15:48:20 | 2019-09-24T15:48:20 | 103,416,564 | 0 | 2 | null | 2019-10-24T11:16:40 | 2017-09-13T15:22:54 | HTML | UTF-8 | Python | false | false | 112 | py | import unicodedata
print (unicodedata.name(u'/'))
print (unicodedata.name(u'|'))
print(unicodedata.name(u':'))
| [
"[email protected]"
]
| |
959957ffa9f4cf7e4062352f73c90a7ff06c7e0d | 1796043fc26c958b8fc45d9c058e382473c4f3af | /Fabio 01 Parte 02/f1_p2_q11_media_tresnumeros.py | 91b7d9e889f37d6676a565d66ccb3e47c0964256 | []
| no_license | Lucakurotaki/ifpi-ads-algoritmos2020 | a69adec27dbb10aceab1bc7038a0b56a760f99d1 | 34d5fedd5825a85404cf9340e42be618981679c1 | refs/heads/master | 2022-03-22T04:44:14.211359 | 2022-02-19T18:48:36 | 2022-02-19T18:48:36 | 246,585,463 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 314 | py | #Entrada
num1 = int(input("Digite o primeiro número: "))
num2 = int(input("Digite o segundo número: "))
num3 = int(input("Digite o terceiro número: "))
#Processamento
media = (num1+num2+num3)/3
#Saída
print("A média aritmética dos números {}, {} e {} é: {}".format(num1,num2,num3,media))
| [
"[email protected]"
]
| |
e10541d0e1cc3cb3b76d033be5c42c2e03b341c9 | 64ada708c3ee39c624a223fa4881ce3689041606 | /Appendix/maze_game.py | 194f7ce6a1e7dfd28431a0db34a8a95eaf28369b | []
| no_license | kimcaptin/PythonGame_1 | 1173cf3ac356d29b1cb254b1607bd4528e0a28cc | af32318bf1e6ea73aa00fc4c72d07e1a5d7c5300 | refs/heads/main | 2023-01-04T05:46:02.782910 | 2020-10-28T06:53:30 | 2020-10-28T06:53:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,421 | py | import tkinter
import tkinter.messagebox
idx = 0
tmr = 0
stage = 1
ix = 0
iy = 0
key = 0
def key_down(e):
global key
key = e.keysym
def key_up(e):
global key
key = 0
maze = [[], [], [], [], [], [], [], []]
def stage_data():
global ix, iy
global maze # 리스트 전체를 변경하는 경우 전역 변수 선언 필요
if stage == 1:
ix = 1
iy = 1
# 0: 길, 1: 칠해진 통로, 9: 벽
maze = [
[9, 9, 9, 9, 9, 9, 9, 9, 9, 9],
[9, 0, 9, 0, 0, 0, 9, 0, 0, 9],
[9, 0, 9, 0, 9, 0, 9, 0, 0, 9],
[9, 0, 9, 0, 9, 0, 9, 0, 9, 9],
[9, 0, 9, 0, 9, 0, 9, 0, 0, 9],
[9, 0, 9, 0, 9, 0, 9, 9, 0, 9],
[9, 0, 0, 0, 9, 0, 0, 0, 0, 9],
[9, 9, 9, 9, 9, 9, 9, 9, 9, 9]
]
if stage == 2:
ix = 8
iy = 6
maze = [
[9, 9, 9, 9, 9, 9, 9, 9, 9, 9],
[9, 0, 0, 0, 0, 0, 0, 0, 0, 9],
[9, 0, 0, 0, 0, 0, 0, 9, 0, 9],
[9, 0, 0, 9, 9, 0, 0, 9, 0, 9],
[9, 0, 0, 9, 9, 0, 0, 9, 0, 9],
[9, 9, 9, 9, 9, 0, 0, 9, 0, 9],
[9, 9, 9, 9, 9, 0, 0, 0, 0, 9],
[9, 9, 9, 9, 9, 9, 9, 9, 9, 9]
]
if stage == 3:
ix = 3
iy = 3
maze = [
[9, 9, 9, 9, 9, 9, 9, 9, 9, 9],
[9, 9, 9, 0, 0, 0, 0, 9, 9, 9],
[9, 9, 0, 0, 0, 0, 0, 0, 9, 9],
[9, 0, 0, 0, 0, 0, 0, 0, 0, 9],
[9, 0, 9, 0, 0, 0, 0, 0, 0, 9],
[9, 0, 0, 0, 0, 0, 0, 0, 9, 9],
[9, 9, 0, 0, 0, 0, 0, 9, 9, 9],
[9, 9, 9, 9, 9, 9, 9, 9, 9, 9]
]
if stage == 4:
ix = 4
iy = 3
maze = [
[9, 9, 9, 9, 9, 9, 9, 9, 9, 9],
[9, 0, 0, 0, 0, 0, 0, 0, 0, 9],
[9, 0, 0, 0, 9, 0, 0, 0, 0, 9],
[9, 0, 0, 0, 0, 0, 0, 0, 0, 9],
[9, 0, 0, 9, 0, 0, 0, 9, 0, 9],
[9, 0, 0, 0, 0, 0, 0, 9, 0, 0],
[9, 0, 0, 0, 0, 0, 0, 0, 0, 9],
[9, 9, 9, 9, 9, 9, 9, 9, 9, 9]
]
if stage == 5:
ix = 1
iy = 6
maze = [
[9, 9, 9, 9, 9, 9, 9, 9, 9, 9],
[9, 0, 0, 0, 0, 0, 0, 0, 0, 9],
[9, 0, 9, 0, 0, 0, 0, 0, 0, 9],
[9, 0, 0, 0, 0, 0, 9, 9, 0, 9],
[9, 0, 0, 0, 0, 9, 9, 9, 0, 9],
[9, 0, 0, 9, 0, 0, 0, 0, 0, 9],
[9, 0, 0, 0, 0, 0, 0, 0, 0, 9],
[9, 9, 9, 9, 9, 9, 9, 9, 9, 9]
]
maze[iy][ix] = 1
def draw_bg():
for y in range(8):
for x in range(10):
gx = 80 * x
gy = 80 * y
if maze[y][x] == 0:
cvs.create_rectangle(gx, gy, gx + 80, gy + 80, fill="white", width=0, tag="BG")
if maze[y][x] == 9:
cvs.create_image(gx + 40, gy + 40, image=wall, tag="BG")
cvs.create_text(120, 40, text="STAGE " + str(stage), fill="white", font=("Times New Roman", 30, "bold"), tag="BG")
gx = 80 * ix
gy = 80 * iy
cvs.create_rectangle(gx, gy, gx + 80, gy + 80, fill="pink", width=0, tag="BG")
cvs.create_image(gx + 60, gy + 20, image=pen, tag="PEN")
def erase_bg():
cvs.delete("BG")
cvs.delete("PEN")
def move_pen():
global idx, tmr, ix, iy, key
bx = ix
by = iy
if key == "Left" and maze[iy][ix - 1] == 0:
ix = ix - 1
if key == "Right" and maze[iy][ix + 1] == 0:
ix = ix + 1
if key == "Up" and maze[iy - 1][ix] == 0:
iy = iy - 1
if key == "Down" and maze[iy + 1][ix] == 0:
iy = iy + 1
if ix != bx or iy != by:
maze[iy][ix] = 2
gx = 80 * ix
gy = 80 * iy
cvs.create_rectangle(gx, gy, gx + 80, gy + 80, fill="pink", width=0, tag="BG")
cvs.delete("PEN")
cvs.create_image(gx + 60, gy + 20, image=pen, tag="PEN")
if key == "g" or key == "G" or key == "Shift_L":
key = 0
ret = tkinter.messagebox.askyesno("포기", "다시 하겠습니까?")
# root.focus_force() # for Mac
if ret == True:
stage_data()
erase_bg()
draw_bg()
def count_tile():
cnt = 0
for y in range(8):
for x in range(10):
if maze[y][x] == 0:
cnt = cnt + 1
return cnt
def game_main():
global idx, tmr, stage
if idx == 0: # 초기화
stage_data()
draw_bg()
idx = 1
if idx == 1: # 펜 이동과 클리어 판정
move_pen()
if count_tile() == 0:
txt = "STAGE CLEAR"
if stage == 5:
txt = "ALL STAGE CLEAR!"
cvs.create_text(400, 320, text=txt, fill="white", font=("Times New Roman", 40, "bold"), tag="BG")
idx = 2
tmr = 0
if idx == 2: # 스테이지 클리어
tmr = tmr + 1
if tmr == 30:
if stage < 5:
stage = stage + 1
stage_data()
erase_bg()
draw_bg()
idx = 1
root.after(200, game_main)
root = tkinter.Tk()
root.title("한 번에 미로 칠하기 게임")
root.resizable(False, False)
root.bind("<KeyPress>", key_down)
root.bind("<KeyRelease>", key_up)
cvs = tkinter.Canvas(root, width=800, height=640)
cvs.pack()
pen = tkinter.PhotoImage(file="pen.png")
wall = tkinter.PhotoImage(file="wall.png")
game_main()
root.mainloop()
| [
"[email protected]"
]
| |
1a84f5a49bcf0eab3c43b3ba1db9666ebd94af29 | 59fbeea017110472a788218db3c6459e9130c7fe | /rotate-list/rotate-list.py | be8a63c32a60f946bd7d6f0d6a3529f0a007cea7 | []
| no_license | niufenjujuexianhua/Leetcode | 82b55d9382bc9f63f4d9da9431194e20a4d299f1 | 542c99e038d21429853515f62af51a77deaa4d9c | refs/heads/master | 2022-04-27T16:55:00.035969 | 2022-03-10T01:10:04 | 2022-03-10T01:10:04 | 79,742,663 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 834 | py | # Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution(object):
def rotateRight(self, head, k):
"""
:type head: ListNode
:type k: int
:rtype: ListNode
"""
if not head or not head.next:
return head
d = ListNode
d.next = head
cur = head
cnt = 0
while cur:
cnt += 1
cur = cur.next
k = k % cnt
if k == 0:
return head
s = f = d
for _ in range(k):
f = f.next
while f and f.next:
s, f = s.next, f.next
newhead = s.next
s.next = None
f.next = d.next
return newhead | [
"[email protected]"
]
| |
73585cbc1b80617c5e8e1b4b75573ae0b261b5a9 | aaa22c7aa8d8c6fb2a9d489252d72387c914cfac | /orders/migrations/0001_initial.py | 7091656e024adbcbb40069c9f3359ffdc35672db | []
| no_license | MohamedHany2002/online-shop | dccd55fef192cb94b57a5eca126a85c38c71c0fa | e8db42c17ea6b1cb0b08e6ff0e2e367ce9a118be | refs/heads/master | 2022-09-04T16:24:40.912664 | 2020-05-12T14:53:43 | 2020-05-12T14:53:43 | 263,360,402 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,044 | py | # Generated by Django 3.0.2 on 2020-04-11 14:29
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('cart', '0009_cart_user'),
]
operations = [
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order_id', models.CharField(blank=True, max_length=120)),
('shipping_total', models.DecimalField(decimal_places=2, default=5.99, max_digits=10)),
('order_total', models.DecimalField(decimal_places=2, default=0.0, max_digits=10)),
('status', models.CharField(choices=[('created', 'Created'), ('paid', 'Paid'), ('shipped', 'Shipped'), ('refunded', 'Refunded')], max_length=120)),
('cart', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='cart.cart')),
],
),
]
| [
"[email protected]"
]
| |
92616aa2a4472acc2e3614862941dd9f05ea1934 | b2d3bd39b2de8bcc3b0f05f4800c2fabf83d3c6a | /examples/pwr_run/checkpointing/throughput/final1/job11.py | 0e1c795e3445c10eb7015ace6a5a0516b3146e91 | [
"MIT"
]
| permissive | boringlee24/keras_old | 3bf7e3ef455dd4262e41248f13c04c071039270e | 1e1176c45c4952ba1b9b9e58e9cc4df027ab111d | refs/heads/master | 2021-11-21T03:03:13.656700 | 2021-11-11T21:57:54 | 2021-11-11T21:57:54 | 198,494,579 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,614 | py | """
#Trains a ResNet on the CIFAR10 dataset.
"""
from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau, TensorBoard
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import cifar10
from keras.applications.vgg16 import VGG16
from keras.applications.vgg19 import VGG19
from keras import models, layers, optimizers
from datetime import datetime
import tensorflow as tf
import numpy as np
import os
import pdb
import sys
import argparse
import time
import signal
import glob
import json
import send_signal
parser = argparse.ArgumentParser(description='Tensorflow Cifar10 Training')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name')
parser.add_argument('--resume', dest='resume', action='store_true', help='if True, resume training from a checkpoint')
parser.add_argument('--gpu_num', metavar='GPU_NUMBER', type=str, help='select which gpu to use')
parser.add_argument('--node', metavar='HOST_NODE', type=str, help='node of the host (scheduler)')
parser.set_defaults(resume=False)
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu_num
# Training parameters
batch_size = 64
args_lr = 0.008
args_model = 'vgg16'
epoch_begin_time = 0
job_name = sys.argv[0].split('.')[0]
save_files = '/scratch/li.baol/checkpoint_final1/' + job_name + '*'
total_epochs = 8
starting_epoch = 0
# first step is to update the PID
pid = os.getpid()
message = job_name + ' pid ' + str(pid) # 'job50 pid 3333'
send_signal.send(args.node, 10002, message)
if args.resume:
save_file = glob.glob(save_files)[0]
# epochs = int(save_file.split('/')[4].split('_')[1].split('.')[0])
starting_epoch = int(save_file.split('/')[4].split('.')[0].split('_')[-1])
data_augmentation = True
num_classes = 10
# Subtracting pixel mean improves accuracy
subtract_pixel_mean = True
n = 3
# Model name, depth and version
model_type = args.tc #'P100_resnet50_he_256_1'
# Load the CIFAR10 data.
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# If subtract pixel mean is enabled
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print('y_train shape:', y_train.shape)
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
if args.resume:
print('resume from checkpoint')
message = job_name + ' b_end'
send_signal.send(args.node, 10002, message)
model = keras.models.load_model(save_file)
message = job_name + ' c_end'
send_signal.send(args.node, 10002, message)
else:
print('train from start')
model = models.Sequential()
if '16' in args_model:
base_model = VGG16(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
elif '19' in args_model:
base_model = VGG19(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
#base_model.summary()
#pdb.set_trace()
model.add(base_model)
model.add(layers.Flatten())
model.add(layers.BatchNormalization())
model.add(layers.Dense(128, activation='relu'))#, kernel_initializer='he_uniform'))
#model.add(layers.Dropout(0.2))
model.add(layers.BatchNormalization())
model.add(layers.Dense(64, activation='relu'))#, kernel_initializer='he_uniform'))
#model.add(layers.Dropout(0.2))
model.add(layers.BatchNormalization())
model.add(layers.Dense(10, activation='softmax'))#, kernel_initializer='he_uniform'))
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=args_lr),
metrics=['accuracy'])
#model.summary()
print(model_type)
#pdb.set_trace()
current_epoch = 0
################### connects interrupt signal to the process #####################
def terminateProcess(signalNumber, frame):
# first record the wasted epoch time
global epoch_begin_time
if epoch_begin_time == 0:
epoch_waste_time = 0
else:
epoch_waste_time = int(time.time() - epoch_begin_time)
message = job_name + ' waste ' + str(epoch_waste_time) # 'job50 waste 100'
if epoch_waste_time > 0:
send_signal.send(args.node, 10002, message)
print('checkpointing the model triggered by kill -15 signal')
# delete whatever checkpoint that already exists
for f in glob.glob(save_files):
os.remove(f)
model.save('/scratch/li.baol/checkpoint_final1/' + job_name + '_' + str(current_epoch) + '.h5')
print ('(SIGTERM) terminating the process')
message = job_name + ' checkpoint'
send_signal.send(args.node, 10002, message)
sys.exit()
signal.signal(signal.SIGTERM, terminateProcess)
#################################################################################
logdir = '/scratch/li.baol/tsrbrd_log/job_runs/' + model_type + '/' + job_name
tensorboard_callback = TensorBoard(log_dir=logdir)#, update_freq='batch')
first_epoch_start = 0
class PrintEpoch(keras.callbacks.Callback):
def on_epoch_begin(self, epoch, logs=None):
global current_epoch, first_epoch_start
#remaining_epochs = epochs - epoch
current_epoch = epoch
print('current epoch ' + str(current_epoch))
global epoch_begin_time
epoch_begin_time = time.time()
if epoch == starting_epoch and args.resume:
first_epoch_start = time.time()
message = job_name + ' d_end'
send_signal.send(args.node, 10002, message)
elif epoch == starting_epoch:
first_epoch_start = time.time()
if epoch == starting_epoch:
# send signal to indicate checkpoint is qualified
message = job_name + ' ckpt_qual'
send_signal.send(args.node, 10002, message)
def on_epoch_end(self, epoch, logs=None):
if epoch == starting_epoch:
first_epoch_time = int(time.time() - first_epoch_start)
message = job_name + ' 1st_epoch ' + str(first_epoch_time)
send_signal.send(args.node, 10002, message)
progress = round((epoch+1) / round(total_epochs/2), 2)
message = job_name + ' completion ' + str(progress)
send_signal.send(args.node, 10002, message)
my_callback = PrintEpoch()
callbacks = [tensorboard_callback, my_callback]
#[checkpoint, lr_reducer, lr_scheduler, tensorboard_callback]
# Run training
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=round(total_epochs/2),
validation_data=(x_test, y_test),
shuffle=True,
callbacks=callbacks,
initial_epoch=starting_epoch,
verbose=1
)
# Score trained model.
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
# send signal to indicate job has finished
message = job_name + ' finish'
send_signal.send(args.node, 10002, message)
| [
"[email protected]"
]
| |
0787e5ef5b14556ad5998e2475bd8a2883204da7 | 08ddce92744c78432b69409d197ad1393ca685aa | /api/novel_list_paihangbang.py | 48719b3f009c55127001016d87a147d695a2814d | []
| no_license | baliguan163/PythonDemo | 71255eb21850134b4b6afb2eeed948cc34326e7a | c4fe1b6ea36bec2c531244ef95c809e17b64b727 | refs/heads/master | 2021-01-02T08:13:18.809740 | 2019-05-19T16:28:16 | 2019-05-19T16:28:16 | 98,963,901 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,256 | py | __author__ = 'IBM'
#coding:utf-8
import requests
import time
from bs4 import BeautifulSoup
#抓取网页的函数
def get_html(url):
try:
r = requests.get(url, timeout=30)
r.raise_for_status()
r.encoding = 'utf-8'
return r.text
except:
return " ERROR "
def get_content(url):
'''
爬取每一类型小说排行榜,按顺序写入文件,文件内容为 小说名字+小说链接
将内容保存到列表并且返回一个装满url链接的列表
'''
url_list = []
html = get_html(url)
soup = BeautifulSoup(html, 'lxml')
# 由于小说排版的原因,历史类和完本类小说不在一个div里
category_list = soup.find_all('div', class_='index_toplist mright mbottom')
#历史类和完本类小说
history_finished_list = soup.find_all('div', class_='index_toplist mbottom')
for cate in category_list:
name = cate.find('div', class_='toptab').span.string
with open('novel_list.csv', 'a+') as f:
f.write("\n小说种类:{} \n".format(name))
print('-------------------小说种类1:',name,'-------------------')
# 我们直接通过style属性来定位总排行榜
general_list = cate.find(style='display: block;')
# 找到全部的小说名字,发现他们全部都包含在li标签之中
book_list = general_list.find_all('li')
# 循环遍历出每一个小说的的名字,以及链接
for book in book_list:
link = 'http://www.qu.la/' + book.a['href']
title = book.a['title']
# 我们将所有文章的url地址保存在一个列表变量里
url_list.append(link)
# 这里使用a模式,防止清空文件
with open('novel_list.csv', 'a') as f:
f.write("小说名:{:<} \t 小说地址:{:<} \n".format(title, link))
print('小说名:',title,' 小说地址:',link)
for cate in history_finished_list:
name = cate.find('div', class_='toptab').span.string
with open('novel_list.csv', 'a') as f:
f.write("\n小说种类:{} \n".format(name))
print('-------------------小说种类2:',name,'-------------------')
general_list = cate.find(style='display: block;')
book_list = general_list.find_all('li')
for book in book_list:
link = 'http://www.qu.la/' + book.a['href']
title = book.a['title']
url_list.append(link)
with open('novel_list.csv', 'a') as f:
f.write("小说名:{:<} \t 小说地址:{:<} \n".format(title, link))
print('小说名:',title,' 小说地址:',link)
return url_list
def get_txt_url(url):
'''
获取该小说每个章节的url地址:并创建小说文件
'''
url_list = []
html = get_html(url)
soup = BeautifulSoup(html, 'lxml')
lista = soup.find_all('dd')
txt_name = soup.find('h1').text.strip()
with open('novel/{}.txt'.format(txt_name), "a+", encoding='utf-8') as f:
f.write('小说标题:{} \n'.format(txt_name))
for url in lista:
url_list.append('http://www.qu.la/' + url.a['href'])
return url_list, txt_name
def get_one_txt(url, txt_name):
'''
获取小说每个章节的文本
并写入到本地
'''
#print('下载小说:',txt_name,' ',url)
html = get_html(url).replace('<br/>', '\n')
soup = BeautifulSoup(html, 'lxml')
try:
txt = soup.find('div', id='content').text.replace('chaptererror();', '')
title = soup.find('title').text
with open('novel/{}.txt'.format(txt_name), "a",encoding='utf-8') as f:
f.write(title + '\n\n')
f.write(txt)
print('当前小说:{} 当前章节{} 已经下载完毕'.format(txt_name, title))
except:
print('someting wrong')
url = 'http://www.qu.la/paihangbang/'
if __name__ == "__main__":
url_list = get_content(url)
for url in url_list:
one_novel_url_list = get_txt_url(url)
#print('one_novel_url_list:',one_novel_url_list)
for url in one_novel_url_list[0]:
get_one_txt(url,one_novel_url_list[1])
| [
"[email protected]"
]
| |
17abdcd68276fa7e209abb00e5e0f0fd4af4c524 | 9680c27718346be69cf7695dba674e7a0ec662ca | /Numpy/Numpy Arange Function - Creating NumPy Arrays.py | d5629978efe1c2f1af3df9713de2c0e6dd231f95 | []
| no_license | Md-Monirul-Islam/Python-code | 5a2cdbe7cd3dae94aa63298b5b0ef7e0e31cd298 | df98f37dd9d21784a65c8bb0e46d47a646259110 | refs/heads/main | 2023-01-19T05:15:04.963904 | 2020-11-19T06:10:09 | 2020-11-19T06:10:09 | 314,145,135 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 190 | py | import numpy as np
#print(help(np.arange))
print(np.arange(1,10))
print(np.arange(3.))
print(np.arange(1,20,2))
print(np.arange(20, dtype= "complex"))
print(np.arange(1,10,2,dtype= "float")) | [
"[email protected]"
]
| |
894fe800d424952f5cfadd3b5b2dc93ad384697c | 72ab559d5ce5f02b5ba9b48fa5e51ec69eca34a7 | /jaraco/net/http/cookies.py | 3de1e785e8b34cfe29d4aa08ad1ca3fb324db1ee | [
"MIT"
]
| permissive | jaraco/jaraco.net | a961137b51314faa0c8dda04f71328ca587e9f36 | d2218af49459d38e447c9d977e06f29452f88ff9 | refs/heads/main | 2023-08-31T16:49:35.366220 | 2023-08-06T23:44:43 | 2023-08-06T23:44:43 | 53,204,002 | 0 | 1 | MIT | 2022-12-06T02:52:01 | 2016-03-05T14:01:24 | Python | UTF-8 | Python | false | false | 2,464 | py | import pathlib
import collections
import http.cookiejar
import contextlib
import jsonpickle
class Shelf(collections.abc.MutableMapping):
"""
Similar to Python's shelve.Shelf, implements a persistent
dictionary using jsonpickle.
>>> fn = getfixture('tmp_path') / 'shelf.json'
>>> shelf = Shelf(fn)
>>> shelf['foo'] = 'bar'
>>> copy = Shelf(fn)
>>> copy['foo']
'bar'
>>> shelf['bar'] = 'baz'
>>> Shelf(fn)['bar']
'baz'
"""
def __init__(self, filename):
self.filename = pathlib.Path(filename)
self.store = dict()
with contextlib.suppress(Exception):
self._load()
def _load(self):
self.store = jsonpickle.decode(self.filename.read_text(encoding='utf-8'))
def _save(self):
self.filename.write_text(jsonpickle.encode(self.store), encoding='utf-8')
def __getitem__(self, *args, **kwargs):
return self.store.__getitem__(*args, **kwargs)
def __setitem__(self, *args, **kwargs):
self.store.__setitem__(*args, **kwargs)
self._save()
def __delitem__(self, *args, **kwargs):
self.store.__delitem__(*args, **kwargs)
self._save()
def __iter__(self):
return self.store.__iter__()
def __len__(self):
return self.store.__len__()
class ShelvedCookieJar(http.cookiejar.CookieJar):
"""
Cookie jar backed by a shelf.
Automatically persists cookies to disk.
"""
def __init__(self, shelf: Shelf, **kwargs):
super().__init__(**kwargs)
self._cookies = self.shelf = shelf
@classmethod
def create(cls, root: pathlib.Path = pathlib.Path(), name='cookies.json', **kwargs):
return cls(Shelf(root / name), **kwargs)
def set_cookie(self, cookie):
with self._cookies_lock:
self.shelf.setdefault(cookie.domain, {}).setdefault(cookie.path, {})[
cookie.name
] = cookie
self.shelf._save()
def clear(self, domain=None, path=None, name=None):
super().clear(domain, path, name)
if path is not None or name is not None:
self.shelf._save()
def get(self, name, default=None):
matches = (
cookie.value
for domain in self.shelf
for path in self.shelf[domain]
for cookie in self.shelf[domain][path].values()
if cookie.name == name
)
return next(matches, default)
| [
"[email protected]"
]
| |
f09015ff8ac994c61b10c2fb321256ff3d7e0692 | 3c01d7928029e74a19d646f5a40b3bf099b281a7 | /typeshed/stubs/freezegun/freezegun/api.pyi | df10e569ae1dd5611b057fda44e39fd4cd45791a | [
"MIT"
]
| permissive | arpancodes/protectsql | f3ced238c103fca72615902a9cb719c44ee2b5ba | 6392bb7a86d1f62b86faf98943a302f7ea3fce4c | refs/heads/main | 2023-08-07T16:33:57.496144 | 2021-09-24T19:44:51 | 2021-09-24T19:44:51 | 409,894,807 | 0 | 1 | MIT | 2021-09-24T19:44:52 | 2021-09-24T08:46:02 | Python | UTF-8 | Python | false | false | 2,243 | pyi | from collections.abc import Awaitable, Callable, Iterator, Sequence
from datetime import date, datetime, timedelta
from numbers import Real
from typing import Any, Type, TypeVar, Union, overload
_T = TypeVar("_T")
_Freezable = Union[str, datetime, date, timedelta]
class TickingDateTimeFactory(object):
def __init__(self, time_to_freeze: datetime, start: datetime) -> None: ...
def __call__(self) -> datetime: ...
class FrozenDateTimeFactory(object):
def __init__(self, time_to_freeze: datetime) -> None: ...
def __call__(self) -> datetime: ...
def tick(self, delta: float | Real | timedelta = ...) -> None: ...
def move_to(self, target_datetime: _Freezable | None) -> None: ...
class StepTickTimeFactory(object):
def __init__(self, time_to_freeze: datetime, step_width: float) -> None: ...
def __call__(self) -> datetime: ...
def tick(self, delta: timedelta | None = ...) -> None: ...
def update_step_width(self, step_width: float) -> None: ...
def move_to(self, target_datetime: _Freezable | None) -> None: ...
class _freeze_time:
def __init__(
self,
time_to_freeze_str: _Freezable | None,
tz_offset: float,
ignore: Sequence[str],
tick: bool,
as_arg: bool,
auto_tick_seconds: float,
) -> None: ...
@overload
def __call__(self, func: Type[_T]) -> Type[_T]: ...
@overload
def __call__(self, func: Callable[..., Awaitable[_T]]) -> Callable[..., Awaitable[_T]]: ...
@overload
def __call__(self, func: Callable[..., _T]) -> Callable[..., _T]: ...
def __enter__(self) -> Any: ...
def __exit__(self, *args: Any) -> None: ...
def start(self) -> Any: ...
def stop(self) -> None: ...
def decorate_class(self, klass: Type[_T]) -> _T: ...
def decorate_coroutine(self, coroutine: _T) -> _T: ...
def decorate_callable(self, func: Callable[..., _T]) -> Callable[..., _T]: ...
def freeze_time(
time_to_freeze: _Freezable | Callable[..., _Freezable] | Iterator[_Freezable] | None = ...,
tz_offset: float | None = ...,
ignore: Sequence[str] | None = ...,
tick: bool | None = ...,
as_arg: bool | None = ...,
auto_tick_seconds: float | None = ...,
) -> _freeze_time: ...
| [
"[email protected]"
]
| |
ca192c64fa93c17fa7c3b701c680a16935c6d89e | a86bca3e88fc3012bc9805c74c2e752262370326 | /AI/tab_text_dataset.py | d61515e1e5c05bde943591f38dac98c22c8167a7 | [
"MIT"
]
| permissive | osamhack2021/AI_NoYoutube_60Duo | 4921f7c838776305d8dc00d6ceb04b2190565916 | c1e34b7b506b43c9be6c39da3211fac49bfbcd14 | refs/heads/main | 2023-08-11T19:24:45.560000 | 2021-10-13T15:00:38 | 2021-10-13T15:00:38 | 405,925,929 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 735 | py | # pip install pypiwin32
import win32gui
import time
import csv
file_name = 'tab_text_dataset.csv'
browser_list = [' - Chrome', ' - Internet Explorer']
window = ''
while True:
current_window = win32gui.GetWindowText(win32gui.GetForegroundWindow())
if window != current_window:
window = current_window
print(window)
for browser in browser_list:
if browser in window:
window = window.replace(browser,'')
with open(file_name, 'a', newline='') as f:
wr = csv.writer(f, lineterminator='\n')
wr.writerow([window])
f.close()
window = window + browser
time.sleep(1) | [
"[email protected]"
]
| |
348594c84f7e498712d4b049c30591da6b52c02f | 2b912b088683e2d4d1fa51ebf61c4e53c5058847 | /.PyCharmCE2017.1/system/python_stubs/-1247971765/nis.py | d99a03de2f9f3e21069295858335ebf44134f40a | []
| no_license | ChiefKeith/pycharmprojects | 1e1da8288d85a84a03678d2cae09df38ddb2f179 | 67ddcc81c289eebcfd0241d1435b28cd22a1b9e0 | refs/heads/master | 2021-07-13T00:52:19.415429 | 2017-10-08T23:04:39 | 2017-10-08T23:04:39 | 106,216,016 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,736 | py | # encoding: utf-8
# module nis
# from /usr/lib/python3.4/lib-dynload/nis.cpython-34m-arm-linux-gnueabihf.so
# by generator 1.145
""" This module contains functions for accessing NIS maps. """
# no imports
# functions
def cat(map, domain=None): # real signature unknown; restored from __doc__
"""
cat(map, domain = defaultdomain)
Returns the entire map as a dictionary. Optionally domain can be
specified but it defaults to the system default domain.
"""
pass
def get_default_domain(): # real signature unknown; restored from __doc__
"""
get_default_domain() -> str
Corresponds to the C library yp_get_default_domain() call, returning
the default NIS domain.
"""
return ""
def maps(domain=None): # real signature unknown; restored from __doc__
"""
maps(domain = defaultdomain)
Returns an array of all available NIS maps within a domain. If domain
is not specified it defaults to the system default domain.
"""
pass
def match(key, map, domain=None): # real signature unknown; restored from __doc__
"""
match(key, map, domain = defaultdomain)
Corresponds to the C library yp_match() call, returning the value of
key in the given map. Optionally domain can be specified but it
defaults to the system default domain.
"""
pass
# classes
class error(Exception):
# no doc
def __init__(self, *args, **kwargs): # real signature unknown
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
# variables with complex values
__loader__ = None # (!) real value is ''
__spec__ = None # (!) real value is ''
| [
"[email protected]"
]
| |
48f0170bf6fbdde92cbc13bff5b74a79d5d3677b | d2c4934325f5ddd567963e7bd2bdc0673f92bc40 | /tests/artificial/transf_Quantization/trend_LinearTrend/cycle_5/ar_12/test_artificial_1024_Quantization_LinearTrend_5_12_0.py | b85ea2797b9c7c86df0eee3743a87b299676b7b0 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | jmabry/pyaf | 797acdd585842474ff4ae1d9db5606877252d9b8 | afbc15a851a2445a7824bf255af612dc429265af | refs/heads/master | 2020-03-20T02:14:12.597970 | 2018-12-17T22:08:11 | 2018-12-17T22:08:11 | 137,104,552 | 0 | 0 | BSD-3-Clause | 2018-12-17T22:08:12 | 2018-06-12T17:15:43 | Python | UTF-8 | Python | false | false | 275 | py | import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "LinearTrend", cycle_length = 5, transform = "Quantization", sigma = 0.0, exog_count = 0, ar_order = 12); | [
"[email protected]"
]
| |
80ae8eed0fd27a1c218770252d327ce12836c056 | b812afe2b6e810881f5b0c66e5fe49b88adcd816 | /unsupervised_learning/0x03-hyperparameter_tuning/1-gp.py | 818d56e43ff1c8d6189cc8a3ac4d605d7fa0d856 | []
| no_license | AhmedOmi/holbertonschool-machine_learning | 6b44b1957b6cee291d6dabd19a5bbe535c83881f | f887cfd48bb44bc4ac440e27014c82390994f04d | refs/heads/master | 2023-07-28T07:45:09.886422 | 2021-09-13T13:05:14 | 2021-09-13T13:05:14 | 317,320,504 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 975 | py | #!/usr/bin/env python3
"""Predict mean and standard deviation of points in Gaussian Process"""
import numpy as np
class GaussianProcess:
"""Hold state and data of a gaussian process"""
def __init__(self, X_init, Y_init, l=1, sigma_f=1):
self.X = X_init
self.Y = Y_init
self.l = l
self.sigma_f = sigma_f
self.K = self.kernel(X_init, X_init)
def kernel(self, X1, X2):
"""Radial Basis Function Kernel"""
return pow(self.sigma_f, 2) * np.exp(pow(X1 - X2.T, 2) /
-2 / pow(self.l, 2))
def predict(self, X_s):
"""Predict mean and standard deviation of points in Gaussian Process"""
K_s = self.kernel(X_s, self.X)
K_i = np.linalg.inv(self.K)
mu = np.matmul(np.matmul(K_s, K_i), self.Y)[:, 0]
K_s2 = self.kernel(X_s, X_s)
sigma = K_s2 - np.matmul(np.matmul(K_s, K_i), K_s.T)
return mu, np.diagonal(sigma)
| [
"[email protected]"
]
| |
3943c96057143acaa2de8e328f572962c5b864dc | 4007632edd395d243bca022418848a2ff54409c8 | /123.py | 739ac163e85499ece4b6557edddc23705945ca8a | []
| no_license | 549982170/python_learning | d80a9403cbe2eb8304aba50ff373b2b67df095e2 | 2c3f73718e0a6d9d4923a2e0f22ff2d4230357e9 | refs/heads/master | 2021-06-22T04:32:06.286691 | 2020-12-10T03:29:56 | 2020-12-10T03:29:56 | 101,596,379 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 380 | py | #!/usr/bin/env python
# encoding: utf-8
import time
from selenium import webdriver
# create a new Firefox session
driver = webdriver.Firefox()
#time.sleep(3)
#driver.maximize_window()
# navigate to the application home page
driver.get("http://moxian.com/")
driver.find_elements_by_xpath("/html/body/div[1]/div[1]/div/p/a[1]")
time.sleep(3)
driver.close()
| [
"[email protected]"
]
| |
80a38c34283873686e582d5788cbeeadaf9a19d8 | 2e3d63726c1d05b73b9cc22e5bcbead30246a8dc | /facepad_app/migrations/0005_auto_20160321_2211.py | a3c0a96ba9d6a736b8075f2498310ca66564ec30 | []
| no_license | rolycg/tiny_social_network | 041f6e4ab503bb82eca4cf1efb436d3b5250343a | e7ec45d053d291d53bd9d58bbb882b4b3edb6355 | refs/heads/master | 2021-01-10T04:27:16.344700 | 2016-03-23T18:19:49 | 2016-03-23T18:19:49 | 54,581,800 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 512 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-03-22 04:11
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('facepad_app', '0004_auto_20160321_1853'),
]
operations = [
migrations.AlterField(
model_name='simpleuser',
name='avatar',
field=models.ImageField(blank=True, null=True, upload_to='avatars/', verbose_name='Avatar'),
),
]
| [
"[email protected]"
]
| |
b5c6b72e3cdb5fcf8c1a97044664e4ffdb313025 | 1e177ebdcb470f738c058606ac0f86a36085f661 | /Pico/MicroPython/mqtt/oneWire01.py | 023100ff032111861ebea42335dfc18ee346c6f9 | []
| no_license | robingreig/raspi-git | 5cbdd295c1048a0571aa2c2f8576438269439f07 | 7373bf94557d7a88c8f343362ba64f9cd19c8ce7 | refs/heads/master | 2023-08-31T03:16:17.286700 | 2023-08-26T11:54:23 | 2023-08-26T11:54:23 | 16,873,881 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,461 | py | import time
import network
import machine
from machine import Pin
from onewire import OneWire
from ds18x20 import DS18X20
import binascii
ds = DS18X20(OneWire(Pin(16)))
roms = ds.scan()
sensor = (0x28,0xff,0xa1,0x58,0x74,0x16,0x04,0x24)
while True:
try:
# convert temp in DS18B20
ds.convert_temp()
# have to wait at least 750mS after conver
time.sleep_ms(1000)
# read temp from the sensor
temp1 = ds.read_temp(sensor)
print('sensor temp1 = ',temp1)
time.sleep(2)
# format the value to 2 decimal places
temp1 = "%3.2f" % temp1
print('Formatted temp1 = ',temp1)
time.sleep(2)
# roms is ds.scan()
for rom in roms:
print('rom = ',rom)
# convert from bytearray to bytes
str1 = bytes(rom)
print('Type of str1 = ',(type(str1)))
print('str1 = ',str1)
# convert from bytes to hex string
str2 = binascii.hexlify(rom)
print('Type of str2 = ',(type(str2)))
print('str2 = ',str2)
# remove the b'
str3 = str2.decode()
print('Type of str3 = ',(type(str3)))
print('str3 = ',str3)
# Read the temp from the sensor
temp2 = (ds.read_temp(rom))
print('temp2 = ',temp2)
temp2 = "%3.2f" % temp2
print('Formatted temp2 = ',temp2)
time.sleep(2)
pass
except:
print('Jumped out of Try loop')
break
| [
"[email protected]"
]
| |
195be52232edb0af4b24300004a91908e4f653e4 | c291ba4506a8998df8d7f384c911f6a0a1294001 | /bai__83+84/BaiTapRenLuyenXuLyList.py | 6cf3194271e88b97837cd3eb94d1f5f96c1eff96 | []
| no_license | thanh-falis/Python | f70804ea4a3c127dcb7738d4e7c6ddb4c5a0a9d4 | fa9f98d18e0de66caade7c355aa6084f2d61aab3 | refs/heads/main | 2023-08-18T17:34:29.851365 | 2021-09-30T12:09:30 | 2021-09-30T12:09:30 | 398,952,505 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,091 | py | """
Viết chương trình cho phép:
- Khởi tạo list
- Thêm phần tử vào list
- Nhập K, kiểm tra K xuât hiện bao nhiêu lần trong list
- Tính tổng các số nguyên tố trong list
- Sắp xếp
- Xóa list
"""
from random import randrange
print("Chương trình xử lý list")
n = int(input("Nhập só phần tử: "))
lst = [0] * n
for i in range(n):
lst[i] = randrange(-100, 100)
print("List đa chiều ngẫu nhiên:")
print(lst)
print("Mời bạn nhập thêm số mới:")
value = int(input())
lst.append(value)
print(lst)
print("Bạn muốn đếm số nào")
k = int(input())
cnt = lst.count(k)
print(k, "Xuất hiện trong list: ", cnt, "lần")
def CheckPrime(n):
d = 0
for i in range(1, n + 1):
if n % i == 0:
d += 1
return d == 2
cnt = 0
snt = 0
for j in lst:
if CheckPrime(j):
cnt += 1
snt += j
print("Có ", cnt, "số nguyên tố trong list")
print("Tổng =", snt)
lst.sort()
print("List sau khi sort")
print(lst)
del lst
print("List sau khi xóa")
print(lst)
| [
"thanhelma2020|@gmail.com"
]
| thanhelma2020|@gmail.com |
b407d70bfb53c9202600513cdcd07aed79217a2b | ee89c84c5b2f48d447b7005299b409d61cc4d807 | /venv/Lib/site-packages/humanfriendly/tests.py | 6a09c7a14029dbcc058837d595bf7ba9900f16e6 | [
"BSD-3-Clause",
"Apache-2.0"
]
| permissive | songweiwei/rasa_run | 342075cc645725a042acf273ab6508c5da55cbee | 0cfc0a280b9efea344bacf5f2df5800c32d0b3a8 | refs/heads/master | 2023-05-31T03:23:26.490925 | 2020-04-22T07:56:07 | 2020-04-22T07:56:07 | 257,218,895 | 2 | 2 | null | 2023-05-22T23:23:45 | 2020-04-20T08:31:42 | Python | UTF-8 | Python | false | false | 65,883 | py | #!/usr/bin/env python
# vim: fileencoding=utf-8 :
# Tests for the `humanfriendly' package.
#
# Author: Peter Odding <[email protected]>
# Last Change: March 6, 2020
# URL: https://humanfriendly.readthedocs.io
"""Test suite for the `humanfriendly` package."""
# Standard library modules.
import datetime
import math
import os
import random
import re
import subprocess
import sys
import time
import types
import unittest
import warnings
# Modules included in our package.
from humanfriendly import (
InvalidDate,
InvalidLength,
InvalidSize,
InvalidTimespan,
Timer,
coerce_boolean,
coerce_pattern,
format_length,
format_number,
format_path,
format_size,
format_timespan,
parse_date,
parse_length,
parse_path,
parse_size,
parse_timespan,
prompts,
round_number,
)
from humanfriendly.cli import main
from humanfriendly.compat import StringIO
from humanfriendly.decorators import cached
from humanfriendly.deprecation import DeprecationProxy, define_aliases, deprecated_args, get_aliases
from humanfriendly.prompts import (
TooManyInvalidReplies,
prompt_for_confirmation,
prompt_for_choice,
prompt_for_input,
)
from humanfriendly.sphinx import (
deprecation_note_callback,
man_role,
pypi_role,
setup,
special_methods_callback,
usage_message_callback,
)
from humanfriendly.tables import (
format_pretty_table,
format_robust_table,
format_rst_table,
format_smart_table,
)
from humanfriendly.terminal import (
ANSI_CSI,
ANSI_ERASE_LINE,
ANSI_HIDE_CURSOR,
ANSI_RESET,
ANSI_SGR,
ANSI_SHOW_CURSOR,
ansi_strip,
ansi_style,
ansi_width,
ansi_wrap,
clean_terminal_output,
connected_to_terminal,
find_terminal_size,
get_pager_command,
message,
output,
show_pager,
terminal_supports_colors,
warning,
)
from humanfriendly.terminal.html import html_to_ansi
from humanfriendly.terminal.spinners import AutomaticSpinner, Spinner
from humanfriendly.testing import (
CallableTimedOut,
CaptureOutput,
MockedProgram,
PatchedAttribute,
PatchedItem,
TemporaryDirectory,
TestCase,
retry,
run_cli,
skip_on_raise,
touch,
)
from humanfriendly.text import (
compact,
compact_empty_lines,
concatenate,
dedent,
generate_slug,
pluralize,
random_string,
trim_empty_lines,
)
from humanfriendly.usage import (
find_meta_variables,
format_usage,
parse_usage,
render_usage,
)
# Test dependencies.
from mock import MagicMock
class HumanFriendlyTestCase(TestCase):
"""Container for the `humanfriendly` test suite."""
def test_capture_output(self):
"""Test the CaptureOutput class."""
with CaptureOutput() as capturer:
sys.stdout.write("Something for stdout.\n")
sys.stderr.write("And for stderr.\n")
assert capturer.stdout.get_lines() == ["Something for stdout."]
assert capturer.stderr.get_lines() == ["And for stderr."]
def test_skip_on_raise(self):
"""Test the skip_on_raise() decorator."""
def test_fn():
raise NotImplementedError()
decorator_fn = skip_on_raise(NotImplementedError)
decorated_fn = decorator_fn(test_fn)
self.assertRaises(NotImplementedError, test_fn)
self.assertRaises(unittest.SkipTest, decorated_fn)
def test_retry_raise(self):
"""Test :func:`~humanfriendly.testing.retry()` based on assertion errors."""
# Define a helper function that will raise an assertion error on the
# first call and return a string on the second call.
def success_helper():
if not hasattr(success_helper, 'was_called'):
setattr(success_helper, 'was_called', True)
assert False
else:
return 'yes'
assert retry(success_helper) == 'yes'
# Define a helper function that always raises an assertion error.
def failure_helper():
assert False
with self.assertRaises(AssertionError):
retry(failure_helper, timeout=1)
def test_retry_return(self):
"""Test :func:`~humanfriendly.testing.retry()` based on return values."""
# Define a helper function that will return False on the first call and
# return a number on the second call.
def success_helper():
if not hasattr(success_helper, 'was_called'):
# On the first call we return False.
setattr(success_helper, 'was_called', True)
return False
else:
# On the second call we return a number.
return 42
assert retry(success_helper) == 42
with self.assertRaises(CallableTimedOut):
retry(lambda: False, timeout=1)
def test_mocked_program(self):
"""Test :class:`humanfriendly.testing.MockedProgram`."""
name = random_string()
script = dedent('''
# This goes to stdout.
tr a-z A-Z
# This goes to stderr.
echo Fake warning >&2
''')
with MockedProgram(name=name, returncode=42, script=script) as directory:
assert os.path.isdir(directory)
assert os.path.isfile(os.path.join(directory, name))
program = subprocess.Popen(name, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = program.communicate(input=b'hello world\n')
assert program.returncode == 42
assert stdout == b'HELLO WORLD\n'
assert stderr == b'Fake warning\n'
def test_temporary_directory(self):
"""Test :class:`humanfriendly.testing.TemporaryDirectory`."""
with TemporaryDirectory() as directory:
assert os.path.isdir(directory)
temporary_file = os.path.join(directory, 'some-file')
with open(temporary_file, 'w') as handle:
handle.write("Hello world!")
assert not os.path.exists(temporary_file)
assert not os.path.exists(directory)
def test_touch(self):
"""Test :func:`humanfriendly.testing.touch()`."""
with TemporaryDirectory() as directory:
# Create a file in the temporary directory.
filename = os.path.join(directory, random_string())
assert not os.path.isfile(filename)
touch(filename)
assert os.path.isfile(filename)
# Create a file in a subdirectory.
filename = os.path.join(directory, random_string(), random_string())
assert not os.path.isfile(filename)
touch(filename)
assert os.path.isfile(filename)
def test_patch_attribute(self):
"""Test :class:`humanfriendly.testing.PatchedAttribute`."""
class Subject(object):
my_attribute = 42
instance = Subject()
assert instance.my_attribute == 42
with PatchedAttribute(instance, 'my_attribute', 13) as return_value:
assert return_value is instance
assert instance.my_attribute == 13
assert instance.my_attribute == 42
def test_patch_item(self):
"""Test :class:`humanfriendly.testing.PatchedItem`."""
instance = dict(my_item=True)
assert instance['my_item'] is True
with PatchedItem(instance, 'my_item', False) as return_value:
assert return_value is instance
assert instance['my_item'] is False
assert instance['my_item'] is True
def test_run_cli_intercepts_exit(self):
"""Test that run_cli() intercepts SystemExit."""
returncode, output = run_cli(lambda: sys.exit(42))
self.assertEqual(returncode, 42)
def test_run_cli_intercepts_error(self):
"""Test that run_cli() intercepts exceptions."""
returncode, output = run_cli(self.run_cli_raise_other)
self.assertEqual(returncode, 1)
def run_cli_raise_other(self):
"""run_cli() sample that raises an exception."""
raise ValueError()
def test_run_cli_intercepts_output(self):
"""Test that run_cli() intercepts output."""
expected_output = random_string() + "\n"
returncode, output = run_cli(lambda: sys.stdout.write(expected_output))
self.assertEqual(returncode, 0)
self.assertEqual(output, expected_output)
def test_caching_decorator(self):
"""Test the caching decorator."""
# Confirm that the caching decorator works.
a = cached(lambda: random.random())
b = cached(lambda: random.random())
assert a() == a()
assert b() == b()
# Confirm that functions have their own cache.
assert a() != b()
def test_compact(self):
"""Test :func:`humanfriendly.text.compact()`."""
assert compact(' a \n\n b ') == 'a b'
assert compact('''
%s template notation
''', 'Simple') == 'Simple template notation'
assert compact('''
More {type} template notation
''', type='readable') == 'More readable template notation'
def test_compact_empty_lines(self):
"""Test :func:`humanfriendly.text.compact_empty_lines()`."""
# Simple strings pass through untouched.
assert compact_empty_lines('foo') == 'foo'
# Horizontal whitespace remains untouched.
assert compact_empty_lines('\tfoo') == '\tfoo'
# Line breaks should be preserved.
assert compact_empty_lines('foo\nbar') == 'foo\nbar'
# Vertical whitespace should be preserved.
assert compact_empty_lines('foo\n\nbar') == 'foo\n\nbar'
# Vertical whitespace should be compressed.
assert compact_empty_lines('foo\n\n\nbar') == 'foo\n\nbar'
assert compact_empty_lines('foo\n\n\n\nbar') == 'foo\n\nbar'
assert compact_empty_lines('foo\n\n\n\n\nbar') == 'foo\n\nbar'
def test_dedent(self):
"""Test :func:`humanfriendly.text.dedent()`."""
assert dedent('\n line 1\n line 2\n\n') == 'line 1\n line 2\n'
assert dedent('''
Dedented, %s text
''', 'interpolated') == 'Dedented, interpolated text\n'
assert dedent('''
Dedented, {op} text
''', op='formatted') == 'Dedented, formatted text\n'
def test_pluralization(self):
"""Test :func:`humanfriendly.text.pluralize()`."""
assert pluralize(1, 'word') == '1 word'
assert pluralize(2, 'word') == '2 words'
assert pluralize(1, 'box', 'boxes') == '1 box'
assert pluralize(2, 'box', 'boxes') == '2 boxes'
def test_generate_slug(self):
"""Test :func:`humanfriendly.text.generate_slug()`."""
# Test the basic functionality.
self.assertEqual('some-random-text', generate_slug('Some Random Text!'))
# Test that previous output doesn't change.
self.assertEqual('some-random-text', generate_slug('some-random-text'))
# Test that inputs which can't be converted to a slug raise an exception.
with self.assertRaises(ValueError):
generate_slug(' ')
with self.assertRaises(ValueError):
generate_slug('-')
def test_boolean_coercion(self):
"""Test :func:`humanfriendly.coerce_boolean()`."""
for value in [True, 'TRUE', 'True', 'true', 'on', 'yes', '1']:
self.assertEqual(True, coerce_boolean(value))
for value in [False, 'FALSE', 'False', 'false', 'off', 'no', '0']:
self.assertEqual(False, coerce_boolean(value))
with self.assertRaises(ValueError):
coerce_boolean('not a boolean')
def test_pattern_coercion(self):
"""Test :func:`humanfriendly.coerce_pattern()`."""
empty_pattern = re.compile('')
# Make sure strings are converted to compiled regular expressions.
assert isinstance(coerce_pattern('foobar'), type(empty_pattern))
# Make sure compiled regular expressions pass through untouched.
assert empty_pattern is coerce_pattern(empty_pattern)
# Make sure flags are respected.
pattern = coerce_pattern('foobar', re.IGNORECASE)
assert pattern.match('FOOBAR')
# Make sure invalid values raise the expected exception.
with self.assertRaises(ValueError):
coerce_pattern([])
def test_format_timespan(self):
"""Test :func:`humanfriendly.format_timespan()`."""
minute = 60
hour = minute * 60
day = hour * 24
week = day * 7
year = week * 52
assert '1 nanosecond' == format_timespan(0.000000001, detailed=True)
assert '500 nanoseconds' == format_timespan(0.0000005, detailed=True)
assert '1 microsecond' == format_timespan(0.000001, detailed=True)
assert '500 microseconds' == format_timespan(0.0005, detailed=True)
assert '1 millisecond' == format_timespan(0.001, detailed=True)
assert '500 milliseconds' == format_timespan(0.5, detailed=True)
assert '0.5 seconds' == format_timespan(0.5, detailed=False)
assert '0 seconds' == format_timespan(0)
assert '0.54 seconds' == format_timespan(0.54321)
assert '1 second' == format_timespan(1)
assert '3.14 seconds' == format_timespan(math.pi)
assert '1 minute' == format_timespan(minute)
assert '1 minute and 20 seconds' == format_timespan(80)
assert '2 minutes' == format_timespan(minute * 2)
assert '1 hour' == format_timespan(hour)
assert '2 hours' == format_timespan(hour * 2)
assert '1 day' == format_timespan(day)
assert '2 days' == format_timespan(day * 2)
assert '1 week' == format_timespan(week)
assert '2 weeks' == format_timespan(week * 2)
assert '1 year' == format_timespan(year)
assert '2 years' == format_timespan(year * 2)
assert '6 years, 5 weeks, 4 days, 3 hours, 2 minutes and 500 milliseconds' == \
format_timespan(year * 6 + week * 5 + day * 4 + hour * 3 + minute * 2 + 0.5, detailed=True)
assert '1 year, 2 weeks and 3 days' == \
format_timespan(year + week * 2 + day * 3 + hour * 12)
# Make sure milliseconds are never shown separately when detailed=False.
# https://github.com/xolox/python-humanfriendly/issues/10
assert '1 minute, 1 second and 100 milliseconds' == format_timespan(61.10, detailed=True)
assert '1 minute and 1.1 second' == format_timespan(61.10, detailed=False)
# Test for loss of precision as reported in issue 11:
# https://github.com/xolox/python-humanfriendly/issues/11
assert '1 minute and 0.3 seconds' == format_timespan(60.300)
assert '5 minutes and 0.3 seconds' == format_timespan(300.300)
assert '1 second and 15 milliseconds' == format_timespan(1.015, detailed=True)
assert '10 seconds and 15 milliseconds' == format_timespan(10.015, detailed=True)
assert '1 microsecond and 50 nanoseconds' == format_timespan(0.00000105, detailed=True)
# Test the datetime.timedelta support:
# https://github.com/xolox/python-humanfriendly/issues/27
now = datetime.datetime.now()
then = now - datetime.timedelta(hours=23)
assert '23 hours' == format_timespan(now - then)
def test_parse_timespan(self):
"""Test :func:`humanfriendly.parse_timespan()`."""
self.assertEqual(0, parse_timespan('0'))
self.assertEqual(0, parse_timespan('0s'))
self.assertEqual(0.000000001, parse_timespan('1ns'))
self.assertEqual(0.000000051, parse_timespan('51ns'))
self.assertEqual(0.000001, parse_timespan('1us'))
self.assertEqual(0.000052, parse_timespan('52us'))
self.assertEqual(0.001, parse_timespan('1ms'))
self.assertEqual(0.001, parse_timespan('1 millisecond'))
self.assertEqual(0.5, parse_timespan('500 milliseconds'))
self.assertEqual(0.5, parse_timespan('0.5 seconds'))
self.assertEqual(5, parse_timespan('5s'))
self.assertEqual(5, parse_timespan('5 seconds'))
self.assertEqual(60 * 2, parse_timespan('2m'))
self.assertEqual(60 * 2, parse_timespan('2 minutes'))
self.assertEqual(60 * 3, parse_timespan('3 min'))
self.assertEqual(60 * 3, parse_timespan('3 mins'))
self.assertEqual(60 * 60 * 3, parse_timespan('3 h'))
self.assertEqual(60 * 60 * 3, parse_timespan('3 hours'))
self.assertEqual(60 * 60 * 24 * 4, parse_timespan('4d'))
self.assertEqual(60 * 60 * 24 * 4, parse_timespan('4 days'))
self.assertEqual(60 * 60 * 24 * 7 * 5, parse_timespan('5 w'))
self.assertEqual(60 * 60 * 24 * 7 * 5, parse_timespan('5 weeks'))
with self.assertRaises(InvalidTimespan):
parse_timespan('1z')
def test_parse_date(self):
"""Test :func:`humanfriendly.parse_date()`."""
self.assertEqual((2013, 6, 17, 0, 0, 0), parse_date('2013-06-17'))
self.assertEqual((2013, 6, 17, 2, 47, 42), parse_date('2013-06-17 02:47:42'))
self.assertEqual((2016, 11, 30, 0, 47, 17), parse_date(u'2016-11-30 00:47:17'))
with self.assertRaises(InvalidDate):
parse_date('2013-06-XY')
def test_format_size(self):
"""Test :func:`humanfriendly.format_size()`."""
self.assertEqual('0 bytes', format_size(0))
self.assertEqual('1 byte', format_size(1))
self.assertEqual('42 bytes', format_size(42))
self.assertEqual('1 KB', format_size(1000 ** 1))
self.assertEqual('1 MB', format_size(1000 ** 2))
self.assertEqual('1 GB', format_size(1000 ** 3))
self.assertEqual('1 TB', format_size(1000 ** 4))
self.assertEqual('1 PB', format_size(1000 ** 5))
self.assertEqual('1 EB', format_size(1000 ** 6))
self.assertEqual('1 ZB', format_size(1000 ** 7))
self.assertEqual('1 YB', format_size(1000 ** 8))
self.assertEqual('1 KiB', format_size(1024 ** 1, binary=True))
self.assertEqual('1 MiB', format_size(1024 ** 2, binary=True))
self.assertEqual('1 GiB', format_size(1024 ** 3, binary=True))
self.assertEqual('1 TiB', format_size(1024 ** 4, binary=True))
self.assertEqual('1 PiB', format_size(1024 ** 5, binary=True))
self.assertEqual('1 EiB', format_size(1024 ** 6, binary=True))
self.assertEqual('1 ZiB', format_size(1024 ** 7, binary=True))
self.assertEqual('1 YiB', format_size(1024 ** 8, binary=True))
self.assertEqual('45 KB', format_size(1000 * 45))
self.assertEqual('2.9 TB', format_size(1000 ** 4 * 2.9))
def test_parse_size(self):
"""Test :func:`humanfriendly.parse_size()`."""
self.assertEqual(0, parse_size('0B'))
self.assertEqual(42, parse_size('42'))
self.assertEqual(42, parse_size('42B'))
self.assertEqual(1000, parse_size('1k'))
self.assertEqual(1024, parse_size('1k', binary=True))
self.assertEqual(1000, parse_size('1 KB'))
self.assertEqual(1000, parse_size('1 kilobyte'))
self.assertEqual(1024, parse_size('1 kilobyte', binary=True))
self.assertEqual(1000 ** 2 * 69, parse_size('69 MB'))
self.assertEqual(1000 ** 3, parse_size('1 GB'))
self.assertEqual(1000 ** 4, parse_size('1 TB'))
self.assertEqual(1000 ** 5, parse_size('1 PB'))
self.assertEqual(1000 ** 6, parse_size('1 EB'))
self.assertEqual(1000 ** 7, parse_size('1 ZB'))
self.assertEqual(1000 ** 8, parse_size('1 YB'))
self.assertEqual(1000 ** 3 * 1.5, parse_size('1.5 GB'))
self.assertEqual(1024 ** 8 * 1.5, parse_size('1.5 YiB'))
with self.assertRaises(InvalidSize):
parse_size('1q')
with self.assertRaises(InvalidSize):
parse_size('a')
def test_format_length(self):
"""Test :func:`humanfriendly.format_length()`."""
self.assertEqual('0 metres', format_length(0))
self.assertEqual('1 metre', format_length(1))
self.assertEqual('42 metres', format_length(42))
self.assertEqual('1 km', format_length(1 * 1000))
self.assertEqual('15.3 cm', format_length(0.153))
self.assertEqual('1 cm', format_length(1e-02))
self.assertEqual('1 mm', format_length(1e-03))
self.assertEqual('1 nm', format_length(1e-09))
def test_parse_length(self):
"""Test :func:`humanfriendly.parse_length()`."""
self.assertEqual(0, parse_length('0m'))
self.assertEqual(42, parse_length('42'))
self.assertEqual(1.5, parse_length('1.5'))
self.assertEqual(42, parse_length('42m'))
self.assertEqual(1000, parse_length('1km'))
self.assertEqual(0.153, parse_length('15.3 cm'))
self.assertEqual(1e-02, parse_length('1cm'))
self.assertEqual(1e-03, parse_length('1mm'))
self.assertEqual(1e-09, parse_length('1nm'))
with self.assertRaises(InvalidLength):
parse_length('1z')
with self.assertRaises(InvalidLength):
parse_length('a')
def test_format_number(self):
"""Test :func:`humanfriendly.format_number()`."""
self.assertEqual('1', format_number(1))
self.assertEqual('1.5', format_number(1.5))
self.assertEqual('1.56', format_number(1.56789))
self.assertEqual('1.567', format_number(1.56789, 3))
self.assertEqual('1,000', format_number(1000))
self.assertEqual('1,000', format_number(1000.12, 0))
self.assertEqual('1,000,000', format_number(1000000))
self.assertEqual('1,000,000.42', format_number(1000000.42))
def test_round_number(self):
"""Test :func:`humanfriendly.round_number()`."""
self.assertEqual('1', round_number(1))
self.assertEqual('1', round_number(1.0))
self.assertEqual('1.00', round_number(1, keep_width=True))
self.assertEqual('3.14', round_number(3.141592653589793))
def test_format_path(self):
"""Test :func:`humanfriendly.format_path()`."""
friendly_path = os.path.join('~', '.vimrc')
absolute_path = os.path.join(os.environ['HOME'], '.vimrc')
self.assertEqual(friendly_path, format_path(absolute_path))
def test_parse_path(self):
"""Test :func:`humanfriendly.parse_path()`."""
friendly_path = os.path.join('~', '.vimrc')
absolute_path = os.path.join(os.environ['HOME'], '.vimrc')
self.assertEqual(absolute_path, parse_path(friendly_path))
def test_pretty_tables(self):
"""Test :func:`humanfriendly.tables.format_pretty_table()`."""
# The simplest case possible :-).
data = [['Just one column']]
assert format_pretty_table(data) == dedent("""
-------------------
| Just one column |
-------------------
""").strip()
# A bit more complex: two rows, three columns, varying widths.
data = [['One', 'Two', 'Three'], ['1', '2', '3']]
assert format_pretty_table(data) == dedent("""
---------------------
| One | Two | Three |
| 1 | 2 | 3 |
---------------------
""").strip()
# A table including column names.
column_names = ['One', 'Two', 'Three']
data = [['1', '2', '3'], ['a', 'b', 'c']]
assert ansi_strip(format_pretty_table(data, column_names)) == dedent("""
---------------------
| One | Two | Three |
---------------------
| 1 | 2 | 3 |
| a | b | c |
---------------------
""").strip()
# A table that contains a column with only numeric data (will be right aligned).
column_names = ['Just a label', 'Important numbers']
data = [['Row one', '15'], ['Row two', '300']]
assert ansi_strip(format_pretty_table(data, column_names)) == dedent("""
------------------------------------
| Just a label | Important numbers |
------------------------------------
| Row one | 15 |
| Row two | 300 |
------------------------------------
""").strip()
def test_robust_tables(self):
"""Test :func:`humanfriendly.tables.format_robust_table()`."""
column_names = ['One', 'Two', 'Three']
data = [['1', '2', '3'], ['a', 'b', 'c']]
assert ansi_strip(format_robust_table(data, column_names)) == dedent("""
--------
One: 1
Two: 2
Three: 3
--------
One: a
Two: b
Three: c
--------
""").strip()
column_names = ['One', 'Two', 'Three']
data = [['1', '2', '3'], ['a', 'b', 'Here comes a\nmulti line column!']]
assert ansi_strip(format_robust_table(data, column_names)) == dedent("""
------------------
One: 1
Two: 2
Three: 3
------------------
One: a
Two: b
Three:
Here comes a
multi line column!
------------------
""").strip()
def test_smart_tables(self):
"""Test :func:`humanfriendly.tables.format_smart_table()`."""
column_names = ['One', 'Two', 'Three']
data = [['1', '2', '3'], ['a', 'b', 'c']]
assert ansi_strip(format_smart_table(data, column_names)) == dedent("""
---------------------
| One | Two | Three |
---------------------
| 1 | 2 | 3 |
| a | b | c |
---------------------
""").strip()
column_names = ['One', 'Two', 'Three']
data = [['1', '2', '3'], ['a', 'b', 'Here comes a\nmulti line column!']]
assert ansi_strip(format_smart_table(data, column_names)) == dedent("""
------------------
One: 1
Two: 2
Three: 3
------------------
One: a
Two: b
Three:
Here comes a
multi line column!
------------------
""").strip()
def test_rst_tables(self):
"""Test :func:`humanfriendly.tables.format_rst_table()`."""
# Generate a table with column names.
column_names = ['One', 'Two', 'Three']
data = [['1', '2', '3'], ['a', 'b', 'c']]
self.assertEqual(
format_rst_table(data, column_names),
dedent("""
=== === =====
One Two Three
=== === =====
1 2 3
a b c
=== === =====
""").rstrip(),
)
# Generate a table without column names.
data = [['1', '2', '3'], ['a', 'b', 'c']]
self.assertEqual(
format_rst_table(data),
dedent("""
= = =
1 2 3
a b c
= = =
""").rstrip(),
)
def test_concatenate(self):
"""Test :func:`humanfriendly.text.concatenate()`."""
assert concatenate([]) == ''
assert concatenate(['one']) == 'one'
assert concatenate(['one', 'two']) == 'one and two'
assert concatenate(['one', 'two', 'three']) == 'one, two and three'
def test_split(self):
"""Test :func:`humanfriendly.text.split()`."""
from humanfriendly.text import split
self.assertEqual(split(''), [])
self.assertEqual(split('foo'), ['foo'])
self.assertEqual(split('foo, bar'), ['foo', 'bar'])
self.assertEqual(split('foo, bar, baz'), ['foo', 'bar', 'baz'])
self.assertEqual(split('foo,bar,baz'), ['foo', 'bar', 'baz'])
def test_timer(self):
"""Test :func:`humanfriendly.Timer`."""
for seconds, text in ((1, '1 second'),
(2, '2 seconds'),
(60, '1 minute'),
(60 * 2, '2 minutes'),
(60 * 60, '1 hour'),
(60 * 60 * 2, '2 hours'),
(60 * 60 * 24, '1 day'),
(60 * 60 * 24 * 2, '2 days'),
(60 * 60 * 24 * 7, '1 week'),
(60 * 60 * 24 * 7 * 2, '2 weeks')):
t = Timer(time.time() - seconds)
self.assertEqual(round_number(t.elapsed_time, keep_width=True), '%i.00' % seconds)
self.assertEqual(str(t), text)
# Test rounding to seconds.
t = Timer(time.time() - 2.2)
self.assertEqual(t.rounded, '2 seconds')
# Test automatic timer.
automatic_timer = Timer()
time.sleep(1)
# XXX The following normalize_timestamp(ndigits=0) calls are intended
# to compensate for unreliable clock sources in virtual machines
# like those encountered on Travis CI, see also:
# https://travis-ci.org/xolox/python-humanfriendly/jobs/323944263
self.assertEqual(normalize_timestamp(automatic_timer.elapsed_time, 0), '1.00')
# Test resumable timer.
resumable_timer = Timer(resumable=True)
for i in range(2):
with resumable_timer:
time.sleep(1)
self.assertEqual(normalize_timestamp(resumable_timer.elapsed_time, 0), '2.00')
# Make sure Timer.__enter__() returns the timer object.
with Timer(resumable=True) as timer:
assert timer is not None
def test_spinner(self):
"""Test :func:`humanfriendly.Spinner`."""
stream = StringIO()
spinner = Spinner(label='test spinner', total=4, stream=stream, interactive=True)
for progress in [1, 2, 3, 4]:
spinner.step(progress=progress)
time.sleep(0.2)
spinner.clear()
output = stream.getvalue()
output = (output.replace(ANSI_SHOW_CURSOR, '')
.replace(ANSI_HIDE_CURSOR, ''))
lines = [line for line in output.split(ANSI_ERASE_LINE) if line]
self.assertTrue(len(lines) > 0)
self.assertTrue(all('test spinner' in l for l in lines))
self.assertTrue(all('%' in l for l in lines))
self.assertEqual(sorted(set(lines)), sorted(lines))
def test_automatic_spinner(self):
"""
Test :func:`humanfriendly.AutomaticSpinner`.
There's not a lot to test about the :class:`.AutomaticSpinner` class,
but by at least running it here we are assured that the code functions
on all supported Python versions. :class:`.AutomaticSpinner` is built
on top of the :class:`.Spinner` class so at least we also have the
tests for the :class:`.Spinner` class to back us up.
"""
with AutomaticSpinner(label='test spinner'):
time.sleep(1)
def test_prompt_for_choice(self):
"""Test :func:`humanfriendly.prompts.prompt_for_choice()`."""
# Choice selection without any options should raise an exception.
with self.assertRaises(ValueError):
prompt_for_choice([])
# If there's only one option no prompt should be rendered so we expect
# the following code to not raise an EOFError exception (despite
# connecting standard input to /dev/null).
with open(os.devnull) as handle:
with PatchedAttribute(sys, 'stdin', handle):
only_option = 'only one option (shortcut)'
assert prompt_for_choice([only_option]) == only_option
# Choice selection by full string match.
with PatchedAttribute(prompts, 'interactive_prompt', lambda p: 'foo'):
assert prompt_for_choice(['foo', 'bar']) == 'foo'
# Choice selection by substring input.
with PatchedAttribute(prompts, 'interactive_prompt', lambda p: 'f'):
assert prompt_for_choice(['foo', 'bar']) == 'foo'
# Choice selection by number.
with PatchedAttribute(prompts, 'interactive_prompt', lambda p: '2'):
assert prompt_for_choice(['foo', 'bar']) == 'bar'
# Choice selection by going with the default.
with PatchedAttribute(prompts, 'interactive_prompt', lambda p: ''):
assert prompt_for_choice(['foo', 'bar'], default='bar') == 'bar'
# Invalid substrings are refused.
replies = ['', 'q', 'z']
with PatchedAttribute(prompts, 'interactive_prompt', lambda p: replies.pop(0)):
assert prompt_for_choice(['foo', 'bar', 'baz']) == 'baz'
# Choice selection by substring input requires an unambiguous substring match.
replies = ['a', 'q']
with PatchedAttribute(prompts, 'interactive_prompt', lambda p: replies.pop(0)):
assert prompt_for_choice(['foo', 'bar', 'baz', 'qux']) == 'qux'
# Invalid numbers are refused.
replies = ['42', '2']
with PatchedAttribute(prompts, 'interactive_prompt', lambda p: replies.pop(0)):
assert prompt_for_choice(['foo', 'bar', 'baz']) == 'bar'
# Test that interactive prompts eventually give up on invalid replies.
with PatchedAttribute(prompts, 'interactive_prompt', lambda p: ''):
with self.assertRaises(TooManyInvalidReplies):
prompt_for_choice(['a', 'b', 'c'])
def test_prompt_for_confirmation(self):
"""Test :func:`humanfriendly.prompts.prompt_for_confirmation()`."""
# Test some (more or less) reasonable replies that indicate agreement.
for reply in 'yes', 'Yes', 'YES', 'y', 'Y':
with PatchedAttribute(prompts, 'interactive_prompt', lambda p: reply):
assert prompt_for_confirmation("Are you sure?") is True
# Test some (more or less) reasonable replies that indicate disagreement.
for reply in 'no', 'No', 'NO', 'n', 'N':
with PatchedAttribute(prompts, 'interactive_prompt', lambda p: reply):
assert prompt_for_confirmation("Are you sure?") is False
# Test that empty replies select the default choice.
for default_choice in True, False:
with PatchedAttribute(prompts, 'interactive_prompt', lambda p: ''):
assert prompt_for_confirmation("Are you sure?", default=default_choice) is default_choice
# Test that a warning is shown when no input nor a default is given.
replies = ['', 'y']
with PatchedAttribute(prompts, 'interactive_prompt', lambda p: replies.pop(0)):
with CaptureOutput(merged=True) as capturer:
assert prompt_for_confirmation("Are you sure?") is True
assert "there's no default choice" in capturer.get_text()
# Test that the default reply is shown in uppercase.
with PatchedAttribute(prompts, 'interactive_prompt', lambda p: 'y'):
for default_value, expected_text in (True, 'Y/n'), (False, 'y/N'), (None, 'y/n'):
with CaptureOutput(merged=True) as capturer:
assert prompt_for_confirmation("Are you sure?", default=default_value) is True
assert expected_text in capturer.get_text()
# Test that interactive prompts eventually give up on invalid replies.
with PatchedAttribute(prompts, 'interactive_prompt', lambda p: ''):
with self.assertRaises(TooManyInvalidReplies):
prompt_for_confirmation("Are you sure?")
def test_prompt_for_input(self):
"""Test :func:`humanfriendly.prompts.prompt_for_input()`."""
with open(os.devnull) as handle:
with PatchedAttribute(sys, 'stdin', handle):
# If standard input isn't connected to a terminal the default value should be returned.
default_value = "To seek the holy grail!"
assert prompt_for_input("What is your quest?", default=default_value) == default_value
# If standard input isn't connected to a terminal and no default value
# is given the EOFError exception should be propagated to the caller.
with self.assertRaises(EOFError):
prompt_for_input("What is your favorite color?")
def test_cli(self):
"""Test the command line interface."""
# Test that the usage message is printed by default.
returncode, output = run_cli(main)
assert 'Usage:' in output
# Test that the usage message can be requested explicitly.
returncode, output = run_cli(main, '--help')
assert 'Usage:' in output
# Test handling of invalid command line options.
returncode, output = run_cli(main, '--unsupported-option')
assert returncode != 0
# Test `humanfriendly --format-number'.
returncode, output = run_cli(main, '--format-number=1234567')
assert output.strip() == '1,234,567'
# Test `humanfriendly --format-size'.
random_byte_count = random.randint(1024, 1024 * 1024)
returncode, output = run_cli(main, '--format-size=%i' % random_byte_count)
assert output.strip() == format_size(random_byte_count)
# Test `humanfriendly --format-size --binary'.
random_byte_count = random.randint(1024, 1024 * 1024)
returncode, output = run_cli(main, '--format-size=%i' % random_byte_count, '--binary')
assert output.strip() == format_size(random_byte_count, binary=True)
# Test `humanfriendly --format-length'.
random_len = random.randint(1024, 1024 * 1024)
returncode, output = run_cli(main, '--format-length=%i' % random_len)
assert output.strip() == format_length(random_len)
random_len = float(random_len) / 12345.6
returncode, output = run_cli(main, '--format-length=%f' % random_len)
assert output.strip() == format_length(random_len)
# Test `humanfriendly --format-table'.
returncode, output = run_cli(main, '--format-table', '--delimiter=\t', input='1\t2\t3\n4\t5\t6\n7\t8\t9')
assert output.strip() == dedent('''
-------------
| 1 | 2 | 3 |
| 4 | 5 | 6 |
| 7 | 8 | 9 |
-------------
''').strip()
# Test `humanfriendly --format-timespan'.
random_timespan = random.randint(5, 600)
returncode, output = run_cli(main, '--format-timespan=%i' % random_timespan)
assert output.strip() == format_timespan(random_timespan)
# Test `humanfriendly --parse-size'.
returncode, output = run_cli(main, '--parse-size=5 KB')
assert int(output) == parse_size('5 KB')
# Test `humanfriendly --parse-size'.
returncode, output = run_cli(main, '--parse-size=5 YiB')
assert int(output) == parse_size('5 YB', binary=True)
# Test `humanfriendly --parse-length'.
returncode, output = run_cli(main, '--parse-length=5 km')
assert int(output) == parse_length('5 km')
returncode, output = run_cli(main, '--parse-length=1.05 km')
assert float(output) == parse_length('1.05 km')
# Test `humanfriendly --run-command'.
returncode, output = run_cli(main, '--run-command', 'bash', '-c', 'sleep 2 && exit 42')
assert returncode == 42
# Test `humanfriendly --demo'. The purpose of this test is
# to ensure that the demo runs successfully on all versions
# of Python and outputs the expected sections (recognized by
# their headings) without triggering exceptions. This was
# written as a regression test after issue #28 was reported:
# https://github.com/xolox/python-humanfriendly/issues/28
returncode, output = run_cli(main, '--demo')
assert returncode == 0
lines = [ansi_strip(l) for l in output.splitlines()]
assert "Text styles:" in lines
assert "Foreground colors:" in lines
assert "Background colors:" in lines
assert "256 color mode (standard colors):" in lines
assert "256 color mode (high-intensity colors):" in lines
assert "256 color mode (216 colors):" in lines
assert "256 color mode (gray scale colors):" in lines
def test_ansi_style(self):
"""Test :func:`humanfriendly.terminal.ansi_style()`."""
assert ansi_style(bold=True) == '%s1%s' % (ANSI_CSI, ANSI_SGR)
assert ansi_style(faint=True) == '%s2%s' % (ANSI_CSI, ANSI_SGR)
assert ansi_style(italic=True) == '%s3%s' % (ANSI_CSI, ANSI_SGR)
assert ansi_style(underline=True) == '%s4%s' % (ANSI_CSI, ANSI_SGR)
assert ansi_style(inverse=True) == '%s7%s' % (ANSI_CSI, ANSI_SGR)
assert ansi_style(strike_through=True) == '%s9%s' % (ANSI_CSI, ANSI_SGR)
assert ansi_style(color='blue') == '%s34%s' % (ANSI_CSI, ANSI_SGR)
assert ansi_style(background='blue') == '%s44%s' % (ANSI_CSI, ANSI_SGR)
assert ansi_style(color='blue', bright=True) == '%s94%s' % (ANSI_CSI, ANSI_SGR)
assert ansi_style(color=214) == '%s38;5;214%s' % (ANSI_CSI, ANSI_SGR)
assert ansi_style(background=214) == '%s39;5;214%s' % (ANSI_CSI, ANSI_SGR)
assert ansi_style(color=(0, 0, 0)) == '%s38;2;0;0;0%s' % (ANSI_CSI, ANSI_SGR)
assert ansi_style(color=(255, 255, 255)) == '%s38;2;255;255;255%s' % (ANSI_CSI, ANSI_SGR)
assert ansi_style(background=(50, 100, 150)) == '%s48;2;50;100;150%s' % (ANSI_CSI, ANSI_SGR)
with self.assertRaises(ValueError):
ansi_style(color='unknown')
def test_ansi_width(self):
"""Test :func:`humanfriendly.terminal.ansi_width()`."""
text = "Whatever"
# Make sure ansi_width() works as expected on strings without ANSI escape sequences.
assert len(text) == ansi_width(text)
# Wrap a text in ANSI escape sequences and make sure ansi_width() treats it as expected.
wrapped = ansi_wrap(text, bold=True)
# Make sure ansi_wrap() changed the text.
assert wrapped != text
# Make sure ansi_wrap() added additional bytes.
assert len(wrapped) > len(text)
# Make sure the result of ansi_width() stays the same.
assert len(text) == ansi_width(wrapped)
def test_ansi_wrap(self):
"""Test :func:`humanfriendly.terminal.ansi_wrap()`."""
text = "Whatever"
# Make sure ansi_wrap() does nothing when no keyword arguments are given.
assert text == ansi_wrap(text)
# Make sure ansi_wrap() starts the text with the CSI sequence.
assert ansi_wrap(text, bold=True).startswith(ANSI_CSI)
# Make sure ansi_wrap() ends the text by resetting the ANSI styles.
assert ansi_wrap(text, bold=True).endswith(ANSI_RESET)
def test_html_to_ansi(self):
"""Test the :func:`humanfriendly.terminal.html_to_ansi()` function."""
assert html_to_ansi("Just some plain text") == "Just some plain text"
# Hyperlinks.
assert html_to_ansi('<a href="https://python.org">python.org</a>') == \
'\x1b[0m\x1b[4;94mpython.org\x1b[0m (\x1b[0m\x1b[4;94mhttps://python.org\x1b[0m)'
# Make sure `mailto:' prefixes are stripped (they're not at all useful in a terminal).
assert html_to_ansi('<a href="mailto:[email protected]">[email protected]</a>') == \
'\x1b[0m\x1b[4;[email protected]\x1b[0m'
# Bold text.
assert html_to_ansi("Let's try <b>bold</b>") == "Let's try \x1b[0m\x1b[1mbold\x1b[0m"
assert html_to_ansi("Let's try <span style=\"font-weight: bold\">bold</span>") == \
"Let's try \x1b[0m\x1b[1mbold\x1b[0m"
# Italic text.
assert html_to_ansi("Let's try <i>italic</i>") == \
"Let's try \x1b[0m\x1b[3mitalic\x1b[0m"
assert html_to_ansi("Let's try <span style=\"font-style: italic\">italic</span>") == \
"Let's try \x1b[0m\x1b[3mitalic\x1b[0m"
# Underlined text.
assert html_to_ansi("Let's try <ins>underline</ins>") == \
"Let's try \x1b[0m\x1b[4munderline\x1b[0m"
assert html_to_ansi("Let's try <span style=\"text-decoration: underline\">underline</span>") == \
"Let's try \x1b[0m\x1b[4munderline\x1b[0m"
# Strike-through text.
assert html_to_ansi("Let's try <s>strike-through</s>") == \
"Let's try \x1b[0m\x1b[9mstrike-through\x1b[0m"
assert html_to_ansi("Let's try <span style=\"text-decoration: line-through\">strike-through</span>") == \
"Let's try \x1b[0m\x1b[9mstrike-through\x1b[0m"
# Pre-formatted text.
assert html_to_ansi("Let's try <code>pre-formatted</code>") == \
"Let's try \x1b[0m\x1b[33mpre-formatted\x1b[0m"
# Text colors (with a 6 digit hexadecimal color value).
assert html_to_ansi("Let's try <span style=\"color: #AABBCC\">text colors</s>") == \
"Let's try \x1b[0m\x1b[38;2;170;187;204mtext colors\x1b[0m"
# Background colors (with an rgb(N, N, N) expression).
assert html_to_ansi("Let's try <span style=\"background-color: rgb(50, 50, 50)\">background colors</s>") == \
"Let's try \x1b[0m\x1b[48;2;50;50;50mbackground colors\x1b[0m"
# Line breaks.
assert html_to_ansi("Let's try some<br>line<br>breaks") == \
"Let's try some\nline\nbreaks"
# Check that decimal entities are decoded.
assert html_to_ansi("&") == "&"
# Check that named entities are decoded.
assert html_to_ansi("&") == "&"
assert html_to_ansi(">") == ">"
assert html_to_ansi("<") == "<"
# Check that hexadecimal entities are decoded.
assert html_to_ansi("&") == "&"
# Check that the text callback is actually called.
def callback(text):
return text.replace(':wink:', ';-)')
assert ':wink:' not in html_to_ansi('<b>:wink:</b>', callback=callback)
# Check that the text callback doesn't process preformatted text.
assert ':wink:' in html_to_ansi('<code>:wink:</code>', callback=callback)
# Try a somewhat convoluted but nevertheless real life example from my
# personal chat archives that causes humanfriendly releases 4.15 and
# 4.15.1 to raise an exception.
assert html_to_ansi(u'''
Tweakers zit er idd nog steeds:<br><br>
peter@peter-work> curl -s <a href="tweakers.net">tweakers.net</a> | grep -i hosting<br>
<a href="<a href="http://www.true.nl/webhosting/">http://www.true.nl/webhosting/</a>"
rel="external" id="true" title="Hosting door True"></a><br>
Hosting door <a href="<a href="http://www.true.nl/vps/">http://www.true.nl/vps/</a>"
title="VPS hosting" rel="external">True</a>
''')
def test_generate_output(self):
"""Test the :func:`humanfriendly.terminal.output()` function."""
text = "Standard output generated by output()"
with CaptureOutput(merged=False) as capturer:
output(text)
self.assertEqual([text], capturer.stdout.get_lines())
self.assertEqual([], capturer.stderr.get_lines())
def test_generate_message(self):
"""Test the :func:`humanfriendly.terminal.message()` function."""
text = "Standard error generated by message()"
with CaptureOutput(merged=False) as capturer:
message(text)
self.assertEqual([], capturer.stdout.get_lines())
self.assertEqual([text], capturer.stderr.get_lines())
def test_generate_warning(self):
"""Test the :func:`humanfriendly.terminal.warning()` function."""
from capturer import CaptureOutput
text = "Standard error generated by warning()"
with CaptureOutput(merged=False) as capturer:
warning(text)
self.assertEqual([], capturer.stdout.get_lines())
self.assertEqual([ansi_wrap(text, color='red')], self.ignore_coverage_warning(capturer.stderr))
def ignore_coverage_warning(self, stream):
"""
Filter out coverage.py warning from standard error.
This is intended to remove the following line from the lines captured
on the standard error stream:
Coverage.py warning: No data was collected. (no-data-collected)
"""
return [line for line in stream.get_lines() if 'no-data-collected' not in line]
def test_clean_output(self):
"""Test :func:`humanfriendly.terminal.clean_terminal_output()`."""
# Simple output should pass through unharmed (single line).
assert clean_terminal_output('foo') == ['foo']
# Simple output should pass through unharmed (multiple lines).
assert clean_terminal_output('foo\nbar') == ['foo', 'bar']
# Carriage returns and preceding substrings are removed.
assert clean_terminal_output('foo\rbar\nbaz') == ['bar', 'baz']
# Carriage returns move the cursor to the start of the line without erasing text.
assert clean_terminal_output('aaa\rab') == ['aba']
# Backspace moves the cursor one position back without erasing text.
assert clean_terminal_output('aaa\b\bb') == ['aba']
# Trailing empty lines should be stripped.
assert clean_terminal_output('foo\nbar\nbaz\n\n\n') == ['foo', 'bar', 'baz']
def test_find_terminal_size(self):
"""Test :func:`humanfriendly.terminal.find_terminal_size()`."""
lines, columns = find_terminal_size()
# We really can't assert any minimum or maximum values here because it
# simply doesn't make any sense; it's impossible for me to anticipate
# on what environments this test suite will run in the future.
assert lines > 0
assert columns > 0
# The find_terminal_size_using_ioctl() function is the default
# implementation and it will likely work fine. This makes it hard to
# test the fall back code paths though. However there's an easy way to
# make find_terminal_size_using_ioctl() fail ...
saved_stdin = sys.stdin
saved_stdout = sys.stdout
saved_stderr = sys.stderr
try:
# What do you mean this is brute force?! ;-)
sys.stdin = StringIO()
sys.stdout = StringIO()
sys.stderr = StringIO()
# Now find_terminal_size_using_ioctl() should fail even though
# find_terminal_size_using_stty() might work fine.
lines, columns = find_terminal_size()
assert lines > 0
assert columns > 0
# There's also an ugly way to make `stty size' fail: The
# subprocess.Popen class uses os.execvp() underneath, so if we
# clear the $PATH it will break.
saved_path = os.environ['PATH']
try:
os.environ['PATH'] = ''
# Now find_terminal_size_using_stty() should fail.
lines, columns = find_terminal_size()
assert lines > 0
assert columns > 0
finally:
os.environ['PATH'] = saved_path
finally:
sys.stdin = saved_stdin
sys.stdout = saved_stdout
sys.stderr = saved_stderr
def test_terminal_capabilities(self):
"""Test the functions that check for terminal capabilities."""
from capturer import CaptureOutput
for test_stream in connected_to_terminal, terminal_supports_colors:
# This test suite should be able to run interactively as well as
# non-interactively, so we can't expect or demand that standard streams
# will always be connected to a terminal. Fortunately Capturer enables
# us to fake it :-).
for stream in sys.stdout, sys.stderr:
with CaptureOutput():
assert test_stream(stream)
# Test something that we know can never be a terminal.
with open(os.devnull) as handle:
assert not test_stream(handle)
# Verify that objects without isatty() don't raise an exception.
assert not test_stream(object())
def test_show_pager(self):
"""Test :func:`humanfriendly.terminal.show_pager()`."""
original_pager = os.environ.get('PAGER', None)
try:
# We specifically avoid `less' because it would become awkward to
# run the test suite in an interactive terminal :-).
os.environ['PAGER'] = 'cat'
# Generate a significant amount of random text spread over multiple
# lines that we expect to be reported literally on the terminal.
random_text = "\n".join(random_string(25) for i in range(50))
# Run the pager command and validate the output.
with CaptureOutput() as capturer:
show_pager(random_text)
assert random_text in capturer.get_text()
finally:
if original_pager is not None:
# Restore the original $PAGER value.
os.environ['PAGER'] = original_pager
else:
# Clear the custom $PAGER value.
os.environ.pop('PAGER')
def test_get_pager_command(self):
"""Test :func:`humanfriendly.terminal.get_pager_command()`."""
# Make sure --RAW-CONTROL-CHARS isn't used when it's not needed.
assert '--RAW-CONTROL-CHARS' not in get_pager_command("Usage message")
# Make sure --RAW-CONTROL-CHARS is used when it's needed.
assert '--RAW-CONTROL-CHARS' in get_pager_command(ansi_wrap("Usage message", bold=True))
# Make sure that less-specific options are only used when valid.
options_specific_to_less = ['--no-init', '--quit-if-one-screen']
for pager in 'cat', 'less':
original_pager = os.environ.get('PAGER', None)
try:
# Set $PAGER to `cat' or `less'.
os.environ['PAGER'] = pager
# Get the pager command line.
command_line = get_pager_command()
# Check for less-specific options.
if pager == 'less':
assert all(opt in command_line for opt in options_specific_to_less)
else:
assert not any(opt in command_line for opt in options_specific_to_less)
finally:
if original_pager is not None:
# Restore the original $PAGER value.
os.environ['PAGER'] = original_pager
else:
# Clear the custom $PAGER value.
os.environ.pop('PAGER')
def test_find_meta_variables(self):
"""Test :func:`humanfriendly.usage.find_meta_variables()`."""
assert sorted(find_meta_variables("""
Here's one example: --format-number=VALUE
Here's another example: --format-size=BYTES
A final example: --format-timespan=SECONDS
This line doesn't contain a META variable.
""")) == sorted(['VALUE', 'BYTES', 'SECONDS'])
def test_parse_usage_simple(self):
"""Test :func:`humanfriendly.usage.parse_usage()` (a simple case)."""
introduction, options = self.preprocess_parse_result("""
Usage: my-fancy-app [OPTIONS]
Boring description.
Supported options:
-h, --help
Show this message and exit.
""")
# The following fragments are (expected to be) part of the introduction.
assert "Usage: my-fancy-app [OPTIONS]" in introduction
assert "Boring description." in introduction
assert "Supported options:" in introduction
# The following fragments are (expected to be) part of the documented options.
assert "-h, --help" in options
assert "Show this message and exit." in options
def test_parse_usage_tricky(self):
"""Test :func:`humanfriendly.usage.parse_usage()` (a tricky case)."""
introduction, options = self.preprocess_parse_result("""
Usage: my-fancy-app [OPTIONS]
Here's the introduction to my-fancy-app. Some of the lines in the
introduction start with a command line option just to confuse the
parsing algorithm :-)
For example
--an-awesome-option
is still part of the introduction.
Supported options:
-a, --an-awesome-option
Explanation why this is an awesome option.
-b, --a-boring-option
Explanation why this is a boring option.
""")
# The following fragments are (expected to be) part of the introduction.
assert "Usage: my-fancy-app [OPTIONS]" in introduction
assert any('still part of the introduction' in p for p in introduction)
assert "Supported options:" in introduction
# The following fragments are (expected to be) part of the documented options.
assert "-a, --an-awesome-option" in options
assert "Explanation why this is an awesome option." in options
assert "-b, --a-boring-option" in options
assert "Explanation why this is a boring option." in options
def test_parse_usage_commas(self):
"""Test :func:`humanfriendly.usage.parse_usage()` against option labels containing commas."""
introduction, options = self.preprocess_parse_result("""
Usage: my-fancy-app [OPTIONS]
Some introduction goes here.
Supported options:
-f, --first-option
Explanation of first option.
-s, --second-option=WITH,COMMA
This should be a separate option's description.
""")
# The following fragments are (expected to be) part of the introduction.
assert "Usage: my-fancy-app [OPTIONS]" in introduction
assert "Some introduction goes here." in introduction
assert "Supported options:" in introduction
# The following fragments are (expected to be) part of the documented options.
assert "-f, --first-option" in options
assert "Explanation of first option." in options
assert "-s, --second-option=WITH,COMMA" in options
assert "This should be a separate option's description." in options
def preprocess_parse_result(self, text):
"""Ignore leading/trailing whitespace in usage parsing tests."""
return tuple([p.strip() for p in r] for r in parse_usage(dedent(text)))
def test_format_usage(self):
"""Test :func:`humanfriendly.usage.format_usage()`."""
# Test that options are highlighted.
usage_text = "Just one --option"
formatted_text = format_usage(usage_text)
assert len(formatted_text) > len(usage_text)
assert formatted_text.startswith("Just one ")
# Test that the "Usage: ..." line is highlighted.
usage_text = "Usage: humanfriendly [OPTIONS]"
formatted_text = format_usage(usage_text)
assert len(formatted_text) > len(usage_text)
assert usage_text in formatted_text
assert not formatted_text.startswith(usage_text)
# Test that meta variables aren't erroneously highlighted.
usage_text = (
"--valid-option=VALID_METAVAR\n"
"VALID_METAVAR is bogus\n"
"INVALID_METAVAR should not be highlighted\n"
)
formatted_text = format_usage(usage_text)
formatted_lines = formatted_text.splitlines()
# Make sure the meta variable in the second line is highlighted.
assert ANSI_CSI in formatted_lines[1]
# Make sure the meta variable in the third line isn't highlighted.
assert ANSI_CSI not in formatted_lines[2]
def test_render_usage(self):
"""Test :func:`humanfriendly.usage.render_usage()`."""
assert render_usage("Usage: some-command WITH ARGS") == "**Usage:** `some-command WITH ARGS`"
assert render_usage("Supported options:") == "**Supported options:**"
assert 'code-block' in render_usage(dedent("""
Here comes a shell command:
$ echo test
test
"""))
assert all(token in render_usage(dedent("""
Supported options:
-n, --dry-run
Don't change anything.
""")) for token in ('`-n`', '`--dry-run`'))
def test_deprecated_args(self):
"""Test the deprecated_args() decorator function."""
@deprecated_args('foo', 'bar')
def test_function(**options):
assert options['foo'] == 'foo'
assert options.get('bar') in (None, 'bar')
return 42
fake_fn = MagicMock()
with PatchedAttribute(warnings, 'warn', fake_fn):
assert test_function('foo', 'bar') == 42
with self.assertRaises(TypeError):
test_function('foo', 'bar', 'baz')
assert fake_fn.was_called
def test_alias_proxy_deprecation_warning(self):
"""Test that the DeprecationProxy class emits deprecation warnings."""
fake_fn = MagicMock()
with PatchedAttribute(warnings, 'warn', fake_fn):
module = sys.modules[__name__]
aliases = dict(concatenate='humanfriendly.text.concatenate')
proxy = DeprecationProxy(module, aliases)
assert proxy.concatenate == concatenate
assert fake_fn.was_called
def test_alias_proxy_sphinx_compensation(self):
"""Test that the DeprecationProxy class emits deprecation warnings."""
with PatchedItem(sys.modules, 'sphinx', types.ModuleType('sphinx')):
define_aliases(__name__, concatenate='humanfriendly.text.concatenate')
assert "concatenate" in dir(sys.modules[__name__])
assert "concatenate" in get_aliases(__name__)
def test_alias_proxy_sphinx_integration(self):
"""Test that aliases can be injected into generated documentation."""
module = sys.modules[__name__]
define_aliases(__name__, concatenate='humanfriendly.text.concatenate')
lines = module.__doc__.splitlines()
deprecation_note_callback(app=None, what=None, name=None, obj=module, options=None, lines=lines)
# Check that something was injected.
assert "\n".join(lines) != module.__doc__
def test_sphinx_customizations(self):
"""Test the :mod:`humanfriendly.sphinx` module."""
class FakeApp(object):
def __init__(self):
self.callbacks = {}
self.roles = {}
def __documented_special_method__(self):
"""Documented unofficial special method."""
pass
def __undocumented_special_method__(self):
# Intentionally not documented :-).
pass
def add_role(self, name, callback):
self.roles[name] = callback
def connect(self, event, callback):
self.callbacks.setdefault(event, []).append(callback)
def bogus_usage(self):
"""Usage: This is not supposed to be reformatted!"""
pass
# Test event callback registration.
fake_app = FakeApp()
setup(fake_app)
assert man_role == fake_app.roles['man']
assert pypi_role == fake_app.roles['pypi']
assert deprecation_note_callback in fake_app.callbacks['autodoc-process-docstring']
assert special_methods_callback in fake_app.callbacks['autodoc-skip-member']
assert usage_message_callback in fake_app.callbacks['autodoc-process-docstring']
# Test that `special methods' which are documented aren't skipped.
assert special_methods_callback(
app=None, what=None, name=None,
obj=FakeApp.__documented_special_method__,
skip=True, options=None,
) is False
# Test that `special methods' which are undocumented are skipped.
assert special_methods_callback(
app=None, what=None, name=None,
obj=FakeApp.__undocumented_special_method__,
skip=True, options=None,
) is True
# Test formatting of usage messages. obj/lines
from humanfriendly import cli, sphinx
# We expect the docstring in the `cli' module to be reformatted
# (because it contains a usage message in the expected format).
assert self.docstring_is_reformatted(cli)
# We don't expect the docstring in the `sphinx' module to be
# reformatted (because it doesn't contain a usage message).
assert not self.docstring_is_reformatted(sphinx)
# We don't expect the docstring of the following *method* to be
# reformatted because only *module* docstrings should be reformatted.
assert not self.docstring_is_reformatted(fake_app.bogus_usage)
def docstring_is_reformatted(self, entity):
"""Check whether :func:`.usage_message_callback()` reformats a module's docstring."""
lines = trim_empty_lines(entity.__doc__).splitlines()
saved_lines = list(lines)
usage_message_callback(
app=None, what=None, name=None,
obj=entity, options=None, lines=lines,
)
return lines != saved_lines
def normalize_timestamp(value, ndigits=1):
"""
Round timestamps to the given number of digits.
This helps to make the test suite less sensitive to timing issues caused by
multitasking, processor scheduling, etc.
"""
return '%.2f' % round(float(value), ndigits=ndigits)
| [
"[email protected]"
]
| |
2ff4e36d146af072dbdcbaa1de46ca96971cfa6e | c4ecc70400f3c4375dd4b2335673137dd36b72b4 | /aggregator.py | da003c58f45e82acd48d5a01021b9f07bfba9137 | [
"MIT"
]
| permissive | TippyFlitsUK/FarmXero | 1bb3496d164d66c940bd3012e36e1763990ff30d | 881b1e6648e927631b276e66a4c5287e4de2cbc1 | refs/heads/main | 2023-07-05T14:49:57.186130 | 2021-08-19T19:33:48 | 2021-08-19T19:33:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,668 | py | # This aggregates the Table data from the scrapers and creates journals
# Also writes data to a file for accurate record keeping.
import json
import datetime
import time
import os
import sys
import argparse
import FilfoxScraper
import Addresses
import coingeckoScraper
import xeroAccounts as xa
import data_folders
try:
from xero_python.accounting import ManualJournal, ManualJournalLine
except:
print('you need to activate the environment run the following:')
print('source venv/bin/activate')
# from xero_python.accounting import AccountingApi, ManualJournal, ManualJournalLine
def nanoFilToFil(nanoFil):
return nanoFil*(10**-18)
def getJournalForDay(day, printJnl=True, archive=data_folders.JOURNAL_ARCHIVE):
walletAddress = Addresses.minerAddress
startDate = day
endDate = day + datetime.timedelta(days=1)
# Generate the miner wallet table
table = FilfoxScraper.getMessageTableForDateRange(startDate, endDate, walletAddress)
# Append transactions from the other wallets
for w in Addresses.wallets:
wTable = FilfoxScraper.getMessageTableForDateRange(startDate, endDate, w)
table += wTable
msgFn = data_folders.MESSAGE_ARCHIVE + 'msgs_' + startDate.strftime('%Y-%m-%d') + '.csv'
FilfoxScraper.writeTableToCSV(msgFn, table)
blocksWon = FilfoxScraper.getBlocksTableForDateRange(startDate, endDate, walletAddress)
blockFn = data_folders.BLOCKS_ARCHIVE + 'blocks_' + startDate.strftime('%Y-%m-%d') + '.csv'
FilfoxScraper.writeBlockTableToCSV(blockFn, blocksWon)
transfers = 0
collat = 0
minerFee = 0
burnFee = 0
slash = 0
numTransactions = 0
blockRewards = 0
numBlocksWon = 0
for r in table:
transfers = transfers + r['transfer']
collat = collat + r['collateral']
minerFee = minerFee + r['miner-fee']
burnFee = burnFee + r['burn-fee']
slash = slash + r['slash']
numTransactions = numTransactions + 1
for b in blocksWon:
blockRewards = blockRewards + int(b['win'])
numBlocksWon = numBlocksWon + 1
nCollat = "Collat: " + str(nanoFilToFil(collat)) + " FIL"
nMinerFee = "Miner Fee: " + str(nanoFilToFil(minerFee)) + " FIL"
nBurnFee = "Burn Fee: " + str(nanoFilToFil(burnFee)) + " FIL"
nSlash = "Slash: " + str(nanoFilToFil(slash)) + " FIL"
nTransfers = "Transfers: " + str(nanoFilToFil(transfers)) + " FIL"
nBlockRewards = "Block Rewards: " + str(nanoFilToFil(blockRewards)) + " FIL (" + str(numBlocksWon)+") blocks won"
nMinerBalance = "Miner Balance: " #+ str(nanoFilToFil(minerBalance)) + "FIL"
exchRate = coingeckoScraper.getFilecoinNZDPriceOnDay(day)
collatNzd = round(nanoFilToFil(collat) * exchRate, 2)
minerFeeNzd = round(nanoFilToFil(minerFee) * exchRate, 2)
burnFeeNzd = round(nanoFilToFil(burnFee) * exchRate, 2)
slashNzd = round(nanoFilToFil(slash) * exchRate, 2)
transfersNzd = -round(nanoFilToFil(transfers) * exchRate, 2)#positive transfers (into miner) come from credits therefore -ve
blockRewardsNzd = -round(nanoFilToFil(blockRewards) * exchRate, 2)#Rewards are credits therefore are -ve
minerBalanceNzd = -(transfersNzd + collatNzd + minerFeeNzd + burnFeeNzd + slashNzd + blockRewardsNzd)
jnlNarration = 'Filfox data for the day ' + startDate.strftime('%d-%m-%Y') #+ ' to ' + endDate.strftime('%d-%m-%Y')
jnlLinesAll = [
ManualJournalLine(line_amount=collatNzd, account_code=xa.COLLAT, description=nCollat),
ManualJournalLine(line_amount=minerFeeNzd, account_code=xa.MINER_FEE, description=nMinerFee),
ManualJournalLine(line_amount=burnFeeNzd, account_code=xa.BURN_FEE, description=nBurnFee),
ManualJournalLine(line_amount=slashNzd, account_code=xa.SLASH, description=nSlash),
ManualJournalLine(line_amount=transfersNzd, account_code=xa.TRANSFERS, description=nTransfers),
ManualJournalLine(line_amount=blockRewardsNzd, account_code=xa.BLOCK_REWARDS, description=nBlockRewards),
ManualJournalLine(line_amount=minerBalanceNzd, account_code=xa.MINER_BALANCE, description=nMinerBalance)
]
jnlLines = []
for l in jnlLinesAll:
if(abs(l.line_amount) >= 0.01):
jnlLines.append(l)
mj = ManualJournal(narration=jnlNarration, journal_lines=jnlLines, date=startDate)
if(archive != 'none'):
ARCHIVE_HEADER = 'date, narration, \
collat, Miner Fee, Burn Fee, Slash, Transfers, Block rewards, \
Blocks won, exch rate, \
NZD collat, NZD Miner Fee, NZD Burn Fee, NZD Slash, NZD Transfers, NZD Block rewards, NZD Balance\n'
if(os.path.exists(archive) == False):
with open(archive, 'w+') as f:
f.write(ARCHIVE_HEADER)
csvLine = startDate.strftime('%d-%m-%Y')+','+str(jnlNarration)+','+\
str(collat)+','+str(minerFee)+','+str(burnFee)+','+str(slash)+','+str(transfers)+','+str(blockRewards)+','+\
str(numBlocksWon)+','+str(exchRate)+','+\
str(collatNzd)+','+str(minerFeeNzd)+','+str(burnFeeNzd)+','+str(slashNzd)+','+str(transfersNzd)+','+str(blockRewardsNzd)+','+str(minerBalanceNzd)+'\n'
with open(archive, 'a') as f:
f.write(csvLine)
if(printJnl):
print(jnlNarration)
print('Dr collat (601)' + str(collatNzd)) # collat is represented within miner balance
print('Dr miner fee (311)' + str(minerFeeNzd))
print('Dr burn fee (312)' + str(burnFeeNzd))
print('Dr slash (319)' + str(slashNzd))
print('Dr/cr transfers (990)' + str(transfersNzd)) #These are transferred out of info.farm accounts for now
print(' Cr block rewards (200)' + str(blockRewardsNzd))
print(' Cr minerbalance (601) ' + str(minerBalanceNzd))
print('values in NZD')
print('blocks won: ' + str(numBlocksWon))
return mj
if __name__ == '__main__':
#print('you ran the aggregator stand alone: warning no journals posted to Xero')
p = argparse.ArgumentParser(description='Python Aggregator')
p.add_argument('-d', '--day', help='Day you want in format yyyy-mm-dd', required=True)
p.add_argument('-p', '--print', help='Print the journal to std out', required=False, default=True)
p.add_argument('-a', '--archive',
help='Path for CSV output (default '+data_folders.JOURNAL_ARCHIVE+') or "none" for no archive',
required=False, default=data_folders.JOURNAL_ARCHIVE)
args = p.parse_args()
day = datetime.datetime.strptime(args.day, "%Y-%m-%d")
getJournalForDay(day, args.print, args.archive)
# getJournalForDay(datetime.date(2020,11,1))
| [
"[email protected]"
]
| |
c53abe85917f5c583e0e2c182c85a9f49ef08c4f | e2062cd61cccc19cb71282278b4df47cd18dfc67 | /protected/api/urls.py | e7949075aa3f8e20b7fe02d14784f12461e41db6 | []
| no_license | Ryanb58/cas-app | 4dde3be9fc8b96a599c2c030d0055a53ec34d76a | 703cd3e3a460429fab9f77ea09a7cfcae741fead | refs/heads/master | 2020-03-08T06:14:05.973338 | 2018-04-23T15:51:13 | 2018-04-23T15:51:13 | 127,966,350 | 0 | 0 | null | 2018-04-24T19:47:55 | 2018-04-03T20:40:49 | Python | UTF-8 | Python | false | false | 128 | py | """
"""
from django.conf.urls import url, include
from api.views import Me
urlpatterns = [
url('^me/$', Me.as_view()),
]
| [
"[email protected]"
]
| |
b5d479d41474d8731c2cfd3f260974e30e31840c | 51d602577affebc8d91ffe234f926469d389dc75 | /lis/specimen/lab_aliquot_list/admin/main.py | 9476608e28ed9df6cca8fc8f6b9db9afd9ab3633 | []
| no_license | botswana-harvard/lis | 5ac491373f74eaf3855f173580b000539d7f4740 | 48dc601ae05e420e8f3ebb5ea398f44f02b2e5e7 | refs/heads/master | 2020-12-29T01:31:07.821681 | 2018-06-24T06:06:57 | 2018-06-24T06:06:57 | 35,820,860 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 523 | py | from django.contrib import admin
from ..models import AliquotCondition, AliquotType
class AliquotTypeAdmin(admin.ModelAdmin):
list_display = ('alpha_code', 'numeric_code', 'name', 'created', 'modified')
ordering = ['name']
admin.site.register(AliquotType, AliquotTypeAdmin)
class AliquotConditionAdmin(admin.ModelAdmin):
list_display = ('display_index', 'name', 'short_name', 'field_name', 'created', 'modified')
ordering = ['display_index']
admin.site.register(AliquotCondition, AliquotConditionAdmin)
| [
"[email protected]"
]
| |
9e89e57942498b3484116d12eb5f43f7357868ed | 748e074552291b5aacacce30f53c8c55302a7629 | /src/fava/core/watcher.py | 5bfb2fd3312641ec413142768279b6f282098444 | [
"MIT"
]
| permissive | dallaslu/fava | fb5d2eeb53f8f58a40fa80f7111a255b1aaf1a7f | e96b784d960c9981bb566b595b2edb543b63b9a0 | refs/heads/main | 2023-08-23T19:29:52.357402 | 2021-10-16T02:35:21 | 2021-10-16T02:35:21 | 417,691,770 | 0 | 0 | MIT | 2021-10-16T02:22:01 | 2021-10-16T02:22:00 | null | UTF-8 | Python | false | false | 1,644 | py | """A simple file and folder watcher."""
import os
from typing import Iterable
from typing import List
class Watcher:
"""A simple file and folder watcher.
For folders, only checks mtime of the folder and all subdirectories.
So a file change won't be noticed, but only new/deleted files.
"""
__slots__ = ["_files", "_folders", "_last_checked"]
def __init__(self) -> None:
self._files: List[str] = []
self._folders: List[str] = []
self._last_checked = 0
def update(self, files: Iterable[str], folders: Iterable[str]) -> None:
"""Update the folders/files to watch.
Args:
files: A list of file paths.
folders: A list of paths to folders.
"""
self._files = list(files)
self._folders = list(folders)
self.check()
def check(self) -> bool:
"""Check for changes.
Returns:
`True` if there was a file change in one of the files or folders,
`False` otherwise.
"""
latest_mtime = 0
for path in self._files:
try:
mtime = os.stat(path).st_mtime_ns
except FileNotFoundError:
return True
if mtime > latest_mtime:
latest_mtime = mtime
for path in self._folders:
for dirpath, _, _ in os.walk(path):
mtime = os.stat(dirpath).st_mtime_ns
if mtime > latest_mtime:
latest_mtime = mtime
changed = bool(latest_mtime != self._last_checked)
self._last_checked = latest_mtime
return changed
| [
"[email protected]"
]
| |
fd9772a2e0b4d8536ec7184cd8ddcbf7aaf8502e | d60f686fbc9287c1fb30defa17f731542c49ffb1 | /mitmproxy/tools/web/webaddons.py | 6b52188c2b6c383f9d5a5c8823cb7643e51530af | [
"MIT"
]
| permissive | tinycarrot/mitmproxy | f49b71fb8b15f523a3d9f9732f721b1b1dadc2d5 | db32d0522c2cc89e13af083372dbb3ba50a5d27f | refs/heads/master | 2020-06-23T00:59:14.425231 | 2019-11-29T15:49:24 | 2019-11-29T15:49:24 | 198,452,537 | 2 | 1 | MIT | 2019-07-23T14:57:17 | 2019-07-23T14:57:16 | null | UTF-8 | Python | false | false | 482 | py | class WebAddon:
def load(self, loader):
loader.add_option(
"web_open_browser", bool, True,
"Start a browser."
)
loader.add_option(
"web_debug", bool, False,
"Enable mitmweb debugging."
)
loader.add_option(
"web_port", int, 8081,
"Web UI port."
)
loader.add_option(
"web_iface", str, "127.0.0.1",
"Web UI interface."
)
| [
"[email protected]"
]
| |
bfb0da6d46807c7ad21b0e0d2e50682075561a3f | 07bd6d166bfe69f62559d51476ac724c380f932b | /devel/lib/python2.7/dist-packages/webots_ros/msg/_StringStamped.py | 4ab6e7193960537058f1be891960c38d08d48a87 | []
| no_license | Dangko/webots_differential_car | 0efa45e1d729a14839e6e318da64c7f8398edd17 | 188fe93c2fb8d2e681b617df78b93dcdf52e09a9 | refs/heads/master | 2023-06-02T16:40:58.472884 | 2021-06-14T09:19:58 | 2021-06-14T09:19:58 | 376,771,194 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,773 | py | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from webots_ros/StringStamped.msg. Do not edit."""
import codecs
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import std_msgs.msg
class StringStamped(genpy.Message):
_md5sum = "c99a9440709e4d4a9716d55b8270d5e7"
_type = "webots_ros/StringStamped"
_has_header = True # flag to mark the presence of a Header object
_full_text = """Header header
string data
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')
# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
string frame_id
"""
__slots__ = ['header','data']
_slot_types = ['std_msgs/Header','string']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
header,data
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(StringStamped, self).__init__(*args, **kwds)
# message fields cannot be None, assign default values for those that are
if self.header is None:
self.header = std_msgs.msg.Header()
if self.data is None:
self.data = ''
else:
self.header = std_msgs.msg.Header()
self.data = ''
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self.data
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
codecs.lookup_error("rosmsg").msg_type = self._type
try:
if self.header is None:
self.header = std_msgs.msg.Header()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8', 'rosmsg')
else:
self.header.frame_id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.data = str[start:end].decode('utf-8', 'rosmsg')
else:
self.data = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_get_struct_3I().pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
_x = self.data
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
buff.write(struct.Struct('<I%ss'%length).pack(length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
codecs.lookup_error("rosmsg").msg_type = self._type
try:
if self.header is None:
self.header = std_msgs.msg.Header()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _get_struct_3I().unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8', 'rosmsg')
else:
self.header.frame_id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.data = str[start:end].decode('utf-8', 'rosmsg')
else:
self.data = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) # most likely buffer underfill
_struct_I = genpy.struct_I
def _get_struct_I():
global _struct_I
return _struct_I
_struct_3I = None
def _get_struct_3I():
global _struct_3I
if _struct_3I is None:
_struct_3I = struct.Struct("<3I")
return _struct_3I
| [
"[email protected]"
]
| |
273ce68382911346102b3d60d58097e4e67c9a88 | 548c26cc8e68c3116cecaf7e5cd9aadca7608318 | /feeds/migrations/0022_auto__chg_field_featuremapping_sku_type.py | ad3ea1223a9ca8f44c2ac18fd76c2e3e23aa9c99 | []
| no_license | Morphnus-IT-Solutions/riba | b69ecebf110b91b699947b904873e9870385e481 | 90ff42dfe9c693265998d3182b0d672667de5123 | refs/heads/master | 2021-01-13T02:18:42.248642 | 2012-09-06T18:20:26 | 2012-09-06T18:20:26 | 4,067,896 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 34,138 | py | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'FeatureMapping.sku_type'
db.alter_column('feeds_featuremapping', 'sku_type', self.gf('django.db.models.fields.CharField')(max_length=50, null=True, blank=True))
def backwards(self, orm):
# Changing field 'FeatureMapping.sku_type'
db.alter_column('feeds_featuremapping', 'sku_type', self.gf('django.db.models.fields.CharField')(max_length=30, null=True, blank=True))
models = {
'accounts.account': {
'Meta': {'object_name': 'Account'},
'client': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['accounts.Client']"}),
'code': ('django.db.models.fields.CharField', [], {'max_length': '200', 'blank': 'True'}),
'confirmed_order_email': ('django.db.models.fields.CharField', [], {'default': "'<Chaupaati Bazaar> [email protected]'", 'max_length': '500'}),
'confirmed_order_helpline': ('django.db.models.fields.CharField', [], {'default': "'0-922-222-1947'", 'max_length': '25'}),
'customer_support_no': ('django.db.models.fields.CharField', [], {'max_length': '150', 'blank': 'True'}),
'dni': ('django.db.models.fields.CharField', [], {'max_length': '5', 'blank': 'True'}),
'greeting_text': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'greeting_title': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_exclusive': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'pending_order_email': ('django.db.models.fields.CharField', [], {'default': "'<Chaupaati Bazaar> [email protected]'", 'max_length': '500'}),
'pending_order_helpline': ('django.db.models.fields.CharField', [], {'default': "'0-922-222-1947'", 'max_length': '25'}),
'pg_return_url': ('django.db.models.fields.URLField', [], {'default': "'http://www.chaupaati.in'", 'max_length': '200', 'blank': 'True'}),
'primary_email': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'primary_phone': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'}),
'returns_policy': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'secondary_email': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'secondary_phone': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'}),
'share_product_email': ('django.db.models.fields.CharField', [], {'default': "'<Chaupaati Bazaar> [email protected]'", 'max_length': '500'}),
'shipping_policy': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'signature': ('django.db.models.fields.TextField', [], {}),
'sms_mask': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'tos': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'Channel'", 'max_length': '100'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'accounts.client': {
'Meta': {'object_name': 'Client'},
'clientdomain_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'confirmed_order_email': ('django.db.models.fields.CharField', [], {'default': "'<Chaupaati Bazaar> [email protected]'", 'max_length': '500'}),
'confirmed_order_helpline': ('django.db.models.fields.CharField', [], {'default': "'0-922-222-1947'", 'max_length': '25'}),
'feedback_email': ('django.db.models.fields.CharField', [], {'default': "'<Chaupaati Bazaar> [email protected]'", 'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'list_pricelist': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'noreply_email': ('django.db.models.fields.CharField', [], {'default': "'<Chaupaati Bazaar> [email protected]'", 'max_length': '200'}),
'pending_order_email': ('django.db.models.fields.CharField', [], {'default': "'<Chaupaati Bazaar> [email protected]'", 'max_length': '500'}),
'pending_order_helpline': ('django.db.models.fields.CharField', [], {'default': "'0-922-222-1947'", 'max_length': '25'}),
'promotions_email': ('django.db.models.fields.CharField', [], {'default': "'<Chaupaati Bazaar> [email protected]'", 'max_length': '200'}),
'sale_pricelist': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'}),
'share_product_email': ('django.db.models.fields.CharField', [], {'default': "'<Chaupaati Bazaar> [email protected]'", 'max_length': '500'}),
'signature': ('django.db.models.fields.TextField', [], {}),
'sms_mask': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'catalog.availability': {
'Meta': {'object_name': 'Availability'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'})
},
'catalog.brand': {
'Meta': {'object_name': 'Brand'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'moderate': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'tagline': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'})
},
'catalog.product': {
'Meta': {'object_name': 'Product'},
'brand': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Brand']"}),
'cart_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['categories.Category']"}),
'confirmed_order_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'inr'", 'max_length': '3'}),
'description': ('tinymce.models.HTMLField', [], {}),
'ext_large_image_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'ext_medium_image_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'ext_small_image_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'has_images': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'meta_description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'moderate': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True', 'blank': 'True'}),
'modified_on': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'page_title': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'pending_order_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'product_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['categories.ProductType']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '150', 'db_index': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'active'", 'max_length': '15', 'db_index': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'normal'", 'max_length': '10'}),
'video_embed': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'view_count': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'catalog.sellerratechart': {
'Meta': {'object_name': 'SellerRateChart'},
'article_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '100', 'blank': 'True'}),
'availability': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Availability']"}),
'cashback_amount': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'cod_available_at': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'cod_available_at'", 'null': 'True', 'to': "orm['catalog.Availability']"}),
'cod_charge': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '6', 'decimal_places': '2', 'blank': 'True'}),
'condition': ('django.db.models.fields.CharField', [], {'default': "'new'", 'max_length': '5', 'db_index': 'True'}),
'detailed_desc': ('tinymce.models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'external_product_id': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'external_product_link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'gift_desc': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'gift_title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'home_deliverable': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_cod_available': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_fmemi_available': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_free_shipping': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_prefered': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_so_available': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'key_feature': ('tinymce.models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'list_price': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2'}),
'max_shipping': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2'}),
'min_shipping': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2'}),
'offer_price': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2'}),
'otc': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'payment_charges_paid_by': ('django.db.models.fields.CharField', [], {'default': "'chaupaati'", 'max_length': '15'}),
'payment_collection_charges': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"}),
'seller': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'products_offered'", 'to': "orm['accounts.Account']"}),
'ship_local_only': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'shipping_charges': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2'}),
'shipping_duration': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'shipping_paid_by': ('django.db.models.fields.CharField', [], {'default': "'vendor'", 'max_length': '15'}),
'shipping_percent': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '4', 'decimal_places': '2'}),
'short_desc': ('tinymce.models.HTMLField', [], {'null': 'True', 'blank': 'True'}),
'sku': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'}),
'stock_status': ('django.db.models.fields.CharField', [], {'default': "'instock'", 'max_length': '100'}),
'transfer_price': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2'}),
'visibility_status': ('django.db.models.fields.CharField', [], {'default': "'always_visible'", 'max_length': '100', 'db_index': 'True'}),
'warranty': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'whats_in_the_box': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'categories.category': {
'Meta': {'object_name': 'Category'},
'client': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['accounts.Client']"}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'ext_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'google_conversion_label': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'moderate': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['accounts.Account']", 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '100', 'db_index': 'True'}),
'store': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['categories.Store']", 'null': 'True', 'blank': 'True'}),
'tagline': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'})
},
'categories.feature': {
'Meta': {'object_name': 'Feature'},
'allow_multiple_select': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['categories.Category']", 'null': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['categories.FeatureGroup']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index_for_presence': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_visible': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'max': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'min': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'product_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['categories.ProductType']", 'null': 'True', 'blank': 'True'}),
'sort_order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['categories.Unit']", 'null': 'True', 'blank': 'True'}),
'use_as_key_features': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'use_for_icons': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'})
},
'categories.featurechoice': {
'Meta': {'unique_together': "(('name', 'feature'),)", 'object_name': 'FeatureChoice'},
'feature': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['categories.Feature']"}),
'icon': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '150'})
},
'categories.featuregroup': {
'Meta': {'unique_together': "(('product_type', 'name'),)", 'object_name': 'FeatureGroup'},
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['categories.Category']", 'null': 'True', 'blank': 'True'}),
'hide_unavailable_features': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'product_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['categories.ProductType']", 'null': 'True', 'blank': 'True'}),
'sort_order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
'categories.producttype': {
'Meta': {'unique_together': "(('type', 'client'),)", 'object_name': 'ProductType'},
'client': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['accounts.Client']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'categories.store': {
'Meta': {'object_name': 'Store'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'})
},
'categories.unit': {
'Meta': {'object_name': 'Unit'},
'base': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['categories.Unit']", 'null': 'True', 'blank': 'True'}),
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '150', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'inverse_multipler': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'multiplier': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '12', 'decimal_places': '2', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '150', 'db_index': 'True'})
},
'feeds.apiresponse': {
'Meta': {'object_name': 'APIResponse'},
'client': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'login': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '200', 'blank': 'True'}),
'order_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'}),
'post': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'response': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'session_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '50', 'blank': 'True'}),
'url': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1000', 'blank': 'True'})
},
'feeds.availabilitymap': {
'Meta': {'object_name': 'AvailabilityMap'},
'account': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'}),
'applies_to': ('django.db.models.fields.CharField', [], {'default': "'account'", 'max_length': '25', 'db_index': 'True'}),
'availability': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Availability']"}),
'brand': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '200', 'blank': 'True'}),
'category': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '200', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sku': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '200', 'blank': 'True'})
},
'feeds.brandblacklist': {
'Meta': {'unique_together': "(('brand', 'account'),)", 'object_name': 'BrandBlackList'},
'account': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'}),
'brand': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'feeds.brandmapping': {
'Meta': {'unique_together': "(('brand', 'account'),)", 'object_name': 'BrandMapping'},
'account': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'}),
'brand': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mapped_to': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Brand']"})
},
'feeds.categoryblacklist': {
'Meta': {'unique_together': "(('category', 'account'),)", 'object_name': 'CategoryBlackList'},
'account': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'}),
'category': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'feeds.categorymapping': {
'Meta': {'unique_together': "(('category', 'account'),)", 'object_name': 'CategoryMapping'},
'account': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'}),
'category': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mapped_to': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['categories.Category']"})
},
'feeds.extpricelist': {
'Meta': {'object_name': 'ExtPricelist'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['accounts.Account']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'list_price': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'offer_price': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'priceList': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['pricing.PriceList']"}),
'rate_chart': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.SellerRateChart']"})
},
'feeds.featuremapping': {
'Meta': {'object_name': 'FeatureMapping'},
'account': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'action': ('django.db.models.fields.CharField', [], {'default': "'add'", 'max_length': '10'}),
'bool': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'brand_mapping': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['feeds.BrandMapping']", 'null': 'True', 'blank': 'True'}),
'category_mapping': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['feeds.CategoryMapping']", 'null': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1000', 'blank': 'True'}),
'feature': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['categories.Feature']"}),
'feature_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sku_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'skuinfo': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['feeds.SKUInfo']", 'null': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '22', 'decimal_places': '2', 'blank': 'True'})
},
'feeds.featureselectedchoice': {
'Meta': {'unique_together': "(('choice', 'feature_mapping'),)", 'object_name': 'FeatureSelectedChoice'},
'choice': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['categories.FeatureChoice']"}),
'feature_mapping': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['feeds.FeatureMapping']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'feeds.skublacklist': {
'Meta': {'unique_together': "(('sku', 'account'),)", 'object_name': 'SKUBlackList'},
'account': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sku': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_index': 'True'})
},
'feeds.skuinfo': {
'Meta': {'unique_together': "(('sku', 'account'),)", 'object_name': 'SKUInfo'},
'account': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'}),
'brand': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Brand']", 'null': 'True', 'blank': 'True'}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['categories.Category']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']", 'null': 'True', 'blank': 'True'}),
'sku': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_index': 'True'})
},
'feeds.skutypeproducttypemapping': {
'Meta': {'object_name': 'SkuTypeProductTypeMapping'},
'account': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['categories.ProductType']"}),
'sku_type': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_index': 'True'})
},
'feeds.subscriptionsync': {
'Meta': {'object_name': 'SubscriptionSync'},
'account': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'}),
'ext_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'feeds.syncevent': {
'Meta': {'object_name': 'SyncEvent'},
'account': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'}),
'adds': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'deletes': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'edits': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'ended_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'found': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'new_masters': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'stack_trace': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'started_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'running'", 'max_length': '25'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'unavailable': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'feeds.synceventproductmapping': {
'Meta': {'object_name': 'SyncEventProductMapping'},
'action': ('django.db.models.fields.CharField', [], {'max_length': '25', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1000', 'blank': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.Product']"}),
'sku': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'db_index': 'True', 'blank': 'True'}),
'sync_event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['feeds.SyncEvent']"})
},
'feeds.synceventratechartmapping': {
'Meta': {'object_name': 'SyncEventRateChartMapping'},
'action': ('django.db.models.fields.CharField', [], {'max_length': '25', 'db_index': 'True'}),
'change_log': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1000', 'blank': 'True'}),
'rate_chart': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['catalog.SellerRateChart']"}),
'sku': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'db_index': 'True', 'blank': 'True'}),
'sync_event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['feeds.SyncEvent']"})
},
'pricing.pricelist': {
'Meta': {'object_name': 'PriceList'},
'currency': ('django.db.models.fields.CharField', [], {'default': "'inr'", 'max_length': '3'}),
'exchange_rate': ('django.db.models.fields.DecimalField', [], {'default': '1', 'max_digits': '4', 'decimal_places': '2'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'list_price_label': ('django.db.models.fields.CharField', [], {'default': "'List Price'", 'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'offer_price_label': ('django.db.models.fields.CharField', [], {'default': "'Offer Price'", 'max_length': '100'})
}
}
complete_apps = ['feeds']
| [
"[email protected]"
]
| |
1feb58bb4dc7ff07d48036a6486d2d0671724a17 | 8eab8ab725c2132bb8d090cdb2d23a5f71945249 | /virt/Lib/site-packages/numpy/tests/test_reloading.py | 8d8c8aa34be8cc90d783139224604c4f9c6f955f | [
"GPL-3.0-only",
"BSD-3-Clause-Open-MPI",
"GPL-3.0-or-later",
"GCC-exception-3.1",
"BSD-3-Clause",
"MIT"
]
| permissive | JoaoSevergnini/metalpy | 6c88a413a82bc25edd9308b8490a76fae8dd76ca | c2d0098a309b6ce8c756ff840bfb53fb291747b6 | refs/heads/main | 2023-04-18T17:25:26.474485 | 2022-09-18T20:44:45 | 2022-09-18T20:44:45 | 474,773,752 | 3 | 1 | MIT | 2022-11-03T20:07:50 | 2022-03-27T22:21:01 | Python | UTF-8 | Python | false | false | 2,244 | py | from numpy.testing import assert_raises, assert_warns, assert_, assert_equal
from numpy.compat import pickle
import sys
import subprocess
import textwrap
from importlib import reload
def test_numpy_reloading():
# gh-7844. Also check that relevant globals retain their identity.
import numpy as np
import numpy._globals
_NoValue = np._NoValue
VisibleDeprecationWarning = np.VisibleDeprecationWarning
ModuleDeprecationWarning = np.ModuleDeprecationWarning
with assert_warns(UserWarning):
reload(np)
assert_(_NoValue is np._NoValue)
assert_(ModuleDeprecationWarning is np.ModuleDeprecationWarning)
assert_(VisibleDeprecationWarning is np.VisibleDeprecationWarning)
assert_raises(RuntimeError, reload, numpy._globals)
with assert_warns(UserWarning):
reload(np)
assert_(_NoValue is np._NoValue)
assert_(ModuleDeprecationWarning is np.ModuleDeprecationWarning)
assert_(VisibleDeprecationWarning is np.VisibleDeprecationWarning)
def test_novalue():
import numpy as np
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
assert_equal(repr(np._NoValue), '<no value>')
assert_(pickle.loads(pickle.dumps(np._NoValue,
protocol=proto)) is np._NoValue)
def test_full_reimport():
"""At the time of writing this, it is *not* truly supported, but
apparently enough users rely on it, for it to be an annoying change
when it started failing previously.
"""
# Test within a new process, to ensure that we do not mess with the
# global state during the test run (could lead to cryptic test failures).
# This is generally unsafe, especially, since we also reload the C-modules.
code = textwrap.dedent(r"""
import sys
from pytest import warns
import numpy as np
for k in list(sys.modules.keys()):
if "numpy" in k:
del sys.modules[k]
with warns(UserWarning):
import numpy as np
""")
p = subprocess.run([sys.executable, '-c', code], capture_output=True)
if p.returncode:
raise AssertionError(
f"Non-zero return code: {p.returncode!r}\n\n{p.stderr.decode()}"
)
| [
"[email protected]"
]
| |
24a3b7de73fba71d6d59d3be753483a2c737d4ef | 76029924b4bad545111f393a707b24eadfc85277 | /lab2/turtle_7.py | e38a4c03450b679d6be2a25e7a4d761d88567e2c | []
| no_license | python-practice-b02-927/Syrovatkin-Stepan | e69067895d7cfd3d1c15ba0b9f40600895023e8f | c05baa4cc2ff16af45bf031d30597234781fa75b | refs/heads/master | 2020-07-23T11:34:43.988759 | 2019-11-12T17:03:52 | 2019-11-12T17:03:52 | 207,544,772 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 118 | py | import turtle
turtle.shape('turtle')
fi=20
n=1
k=500
for i in range(k):
turtle.forward(n)
turtle.left(fi)
n+=0.25
| [
"[email protected]"
]
| |
287c9bb18313fe4a10e9970681fa5be809d31ad2 | 109a2b213d0c2e4798aa419d47682e2c28ab98f4 | /archimedean_spiral.py | bd12b17761b26bda454b80ed3b93505a05f32bb8 | []
| no_license | browlm13/cartesian_coordinates_to_single_number | d165d0a3638e7177d9b839de5da7df721bc18ad0 | 779abe0c960dab0ec045c6fa08d2b2930b079c16 | refs/heads/master | 2021-01-24T11:20:34.084792 | 2018-02-27T05:47:57 | 2018-02-27T05:47:57 | 123,077,082 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 300 | py | #!/usr/bin/env python
import math
#import numpy as np
import sys
"""
cartesean coordinates, time parameter conversion using Archimedean Spiral
"""
def archimedean_spiral_to_cartesean(t, a=sys.float_info.min): pass
def archimedean_spiral_from_cartesean(cartesean_pair, a=sys.float_info.min): pass
| [
"[email protected]"
]
| |
52c895be2c125d0a78aff00b4134616b48103225 | 98dae6deaf31bcacc078eeb1bdbdb8bd3ac3784f | /dace/frontend/common/op_impl.py | 061b8e72849ba031189e21924aab3c8e25525f3e | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
]
| permissive | cpenny42/dace | da9b241ea0808f1798645ab917e1484c45a3a748 | 2c7814b4f02a6870bb25ae08113c0cc3791e1178 | refs/heads/master | 2020-06-24T09:06:23.091624 | 2019-05-10T11:11:14 | 2019-05-10T11:11:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 73,114 | py | ''' DaCe SDFG linear algebra operation library. '''
import copy
import dace
import dace.sdfg as sd
import dace.subsets as sbs
from dace import symbolic
import typing
import numpy as np
State = dace.sdfg.SDFGState
Shape = typing.List[typing.Union[int, dace.symbol]]
Index = typing.List[typing.Union[int, str, dace.symbol]]
Node = dace.graph.nodes.Node
DNode = dace.graph.nodes.AccessNode
# TODO: Most of the external operations here emit Z (complex double) ops, fix
def _to_blastype(dtype):
""" Returns a BLAS character that corresponds to the input type.
Used in MKL/CUBLAS calls. """
if dtype == np.float16:
return 'H'
elif dtype == np.float32:
return 'S'
elif dtype == np.float64:
return 'D'
elif dtype == np.complex64:
return 'C'
elif dtype == np.complex128:
return 'Z'
else:
raise TypeError(
'Type %s not supported in BLAS operations' % dtype.__name__)
def _to_cudatype(dtype):
""" Returns a CUDA typename that corresponds to the input type.
Used in CUBLAS calls. """
if dtype == np.float16:
return '__half'
elif dtype == np.float32:
return 'float'
elif dtype == np.float64:
return 'double'
elif dtype == np.complex64:
return 'cuComplex'
elif dtype == np.complex128:
return 'cuDoubleComplex'
else:
raise TypeError(
'Type %s not supported in BLAS operations' % dtype.__name__)
# TODO: Refactor to use GPUTransformLocalStorage?
def gpu_transform_tasklet(sdfg, graph, tasklet_node):
""" Transforms a tasklet to run on the GPU. Adapted from
`GPUTransformLocalStorage`.
@see: dace.transformation.dataflow.GPUTransformLocalStorage
"""
cnode = tasklet_node
exit_nodes = [tasklet_node]
gpu_storage_types = [
dace.types.StorageType.GPU_Global, dace.types.StorageType.GPU_Shared,
dace.types.StorageType.GPU_Stack
]
#######################################################
# Add GPU copies of CPU arrays (i.e., not already on GPU)
# First, understand which arrays to clone
all_out_edges = []
for enode in exit_nodes:
all_out_edges.extend(list(graph.out_edges(enode)))
in_arrays_to_clone = set()
out_arrays_to_clone = set()
for e in graph.in_edges(cnode):
data_node = sd.find_input_arraynode(graph, e)
if data_node.desc(sdfg).storage not in gpu_storage_types:
in_arrays_to_clone.add((data_node, e.data))
for e in all_out_edges:
data_node = sd.find_output_arraynode(graph, e)
if data_node.desc(sdfg).storage not in gpu_storage_types:
out_arrays_to_clone.add((data_node, e.data))
# Second, create a GPU clone of each array
# TODO: Overapproximate union of memlets
cloned_arrays = {}
in_cloned_arraynodes = {}
out_cloned_arraynodes = {}
for array_node, memlet in in_arrays_to_clone:
array = array_node.desc(sdfg)
cloned_name = 'gpu_' + array_node.data
for i, r in enumerate(memlet.bounding_box_size()):
size = symbolic.overapproximate(r)
try:
if int(size) == 1:
suffix = []
for c in str(memlet.subset[i][0]):
if c.isalpha() or c.isdigit() or c == '_':
suffix.append(c)
elif c == '+':
suffix.append('p')
elif c == '-':
suffix.append('m')
elif c == '*':
suffix.append('t')
elif c == '/':
suffix.append('d')
cloned_name += '_' + ''.join(suffix)
except:
continue
if cloned_name in sdfg.arrays.keys():
cloned_array = sdfg.arrays[cloned_name]
elif array_node.data in cloned_arrays:
cloned_array = cloned_arrays[array_node.data]
else:
full_shape = []
for r in memlet.bounding_box_size():
size = symbolic.overapproximate(r)
try:
full_shape.append(int(size))
except:
full_shape.append(size)
actual_dims = [
idx for idx, r in enumerate(full_shape)
if not (isinstance(r, int) and r == 1)
]
if len(actual_dims) == 0: # abort
actual_dims = [len(full_shape) - 1]
if isinstance(array, dace.data.Scalar):
cloned_array = sdfg.add_array(
name=cloned_name,
shape=[1],
dtype=array.dtype,
transient=True,
storage=dace.types.StorageType.GPU_Global)
else:
cloned_array = sdfg.add_array(
name=cloned_name,
shape=[full_shape[d] for d in actual_dims],
dtype=array.dtype,
materialize_func=array.materialize_func,
transient=True,
storage=dace.types.StorageType.GPU_Global,
allow_conflicts=array.allow_conflicts,
access_order=tuple(
[array.access_order[d] for d in actual_dims]),
strides=[array.strides[d] for d in actual_dims],
offset=[array.offset[d] for d in actual_dims])
cloned_arrays[array_node.data] = cloned_name
cloned_node = type(array_node)(cloned_name)
in_cloned_arraynodes[array_node.data] = cloned_node
for array_node, memlet in out_arrays_to_clone:
array = array_node.desc(sdfg)
cloned_name = 'gpu_' + array_node.data
for i, r in enumerate(memlet.bounding_box_size()):
size = symbolic.overapproximate(r)
try:
if int(size) == 1:
suffix = []
for c in str(memlet.subset[i][0]):
if c.isalpha() or c.isdigit() or c == '_':
suffix.append(c)
elif c == '+':
suffix.append('p')
elif c == '-':
suffix.append('m')
elif c == '*':
suffix.append('t')
elif c == '/':
suffix.append('d')
cloned_name += '_' + ''.join(suffix)
except:
continue
if cloned_name in sdfg.arrays.keys():
cloned_array = sdfg.arrays[cloned_name]
elif array_node.data in cloned_arrays:
cloned_array = cloned_arrays[array_node.data]
else:
full_shape = []
for r in memlet.bounding_box_size():
size = symbolic.overapproximate(r)
try:
full_shape.append(int(size))
except:
full_shape.append(size)
actual_dims = [
idx for idx, r in enumerate(full_shape)
if not (isinstance(r, int) and r == 1)
]
if len(actual_dims) == 0: # abort
actual_dims = [len(full_shape) - 1]
if isinstance(array, dace.data.Scalar):
cloned_array = sdfg.add_array(
name=cloned_name,
shape=[1],
dtype=array.dtype,
transient=True,
storage=dace.types.StorageType.GPU_Global)
else:
cloned_array = sdfg.add_array(
name=cloned_name,
shape=[full_shape[d] for d in actual_dims],
dtype=array.dtype,
materialize_func=array.materialize_func,
transient=True,
storage=dace.types.StorageType.GPU_Global,
allow_conflicts=array.allow_conflicts,
access_order=tuple(
[array.access_order[d] for d in actual_dims]),
strides=[array.strides[d] for d in actual_dims],
offset=[array.offset[d] for d in actual_dims])
cloned_arrays[array_node.data] = cloned_name
cloned_node = type(array_node)(cloned_name)
cloned_node.setzero = True
out_cloned_arraynodes[array_node.data] = cloned_node
# Third, connect the cloned arrays to the originals
for array_name, node in in_cloned_arraynodes.items():
graph.add_node(node)
is_scalar = isinstance(sdfg.arrays[array_name], dace.data.Scalar)
for edge in graph.in_edges(cnode):
if edge.data.data == array_name:
graph.remove_edge(edge)
newmemlet = copy.deepcopy(edge.data)
newmemlet.data = node.data
if is_scalar:
newmemlet.subset = sbs.Indices([0])
else:
offset = []
lost_dims = []
lost_ranges = []
newsubset = [None] * len(edge.data.subset)
for ind, r in enumerate(edge.data.subset):
offset.append(r[0])
if isinstance(edge.data.subset[ind], tuple):
begin = edge.data.subset[ind][0] - r[0]
end = edge.data.subset[ind][1] - r[0]
step = edge.data.subset[ind][2]
if begin == end:
lost_dims.append(ind)
lost_ranges.append((begin, end, step))
else:
newsubset[ind] = (begin, end, step)
else:
newsubset[ind] -= r[0]
if len(lost_dims) == len(edge.data.subset):
newmemlet.subset = type(
edge.data.subset)([lost_ranges[-1]])
else:
newmemlet.subset = type(edge.data.subset)(
[r for r in newsubset if r is not None])
graph.add_edge(node, edge.src_conn, edge.dst, edge.dst_conn,
newmemlet)
edge.data.other_subset = newmemlet.subset
graph.add_edge(edge.src, None, node, None, edge.data)
for array_name, node in out_cloned_arraynodes.items():
graph.add_node(node)
is_scalar = isinstance(sdfg.arrays[array_name], dace.data.Scalar)
for edge in all_out_edges:
if edge.data.data == array_name:
graph.remove_edge(edge)
newmemlet = copy.deepcopy(edge.data)
newmemlet.data = node.data
if is_scalar:
newmemlet.subset = sbs.Indices([0])
else:
offset = []
lost_dims = []
lost_ranges = []
newsubset = [None] * len(edge.data.subset)
for ind, r in enumerate(edge.data.subset):
offset.append(r[0])
if isinstance(edge.data.subset[ind], tuple):
begin = edge.data.subset[ind][0] - r[0]
end = edge.data.subset[ind][1] - r[0]
step = edge.data.subset[ind][2]
if begin == end:
lost_dims.append(ind)
lost_ranges.append((begin, end, step))
else:
newsubset[ind] = (begin, end, step)
else:
newsubset[ind] -= r[0]
if len(lost_dims) == len(edge.data.subset):
newmemlet.subset = type(
edge.data.subset)([lost_ranges[-1]])
else:
newmemlet.subset = type(edge.data.subset)(
[r for r in newsubset if r is not None])
graph.add_edge(edge.src, edge.src_conn, node, edge.dst_conn,
newmemlet)
edge.data.data = node.data
edge.data.other_subset = edge.data.subset
edge.data.subset = newmemlet.subset
graph.add_edge(node, None, edge.dst, None, edge.data)
class ValidationError(Exception):
""" An exception raised when inputs are not validated in SDFG library
calls. """
def __init__(self, message):
super().__init__(message)
def validate_matrix_multiplication(
A_shape: Shape,
B_shape: Shape,
C_shape: Shape,
A_index: Index = None,
B_index: Index = None,
C_index: Index = None
) -> ((str, str, str), (str, str, str), (str, str, str), (str, str, str)):
""" Validates a matrix multiplication operation, based on the shapes and
indices of the arrays involved. Returns the ranges of the maps and
memlets at all levels as strings.
"""
# Validate input
if len(A_shape) < 2:
raise ValidationError(
'Array A has less than 2 dimensions: {}'.format(A_shape))
A_mm_shape = A_shape[-2:]
if len(B_shape) < 2:
raise ValidationError(
'Array B has less than 2 dimensions: {}'.format(B_shape))
B_mm_shape = B_shape[-2:]
if A_mm_shape[-1] != B_mm_shape[0]:
raise ValidationError(
'N-dimension mismatch between arrays A and B: {} != {}'.format(
A_mm_shape[-1], B_mm_shape[0]))
# Dimension sizes and ranges
M = A_mm_shape[0]
N = A_mm_shape[-1]
K = B_mm_shape[-1]
M_range = '0:{}'.format(M)
N_range = '0:{}'.format(N)
K_range = '0:{}'.format(K)
# Validate slices and set input array access ranges
A_outer_range = '{}, {}'.format(M_range, N_range)
A_middle_range = '{}, ik'.format(M_range)
A_inner_range = 'ii, ik'
if len(A_shape) > 2:
if A_index is None or len(A_index) != len(A_shape) - 2:
raise ValidationError(
'Invalid slice {} for array A with dimensions {}'.format(
A_index, A_shape))
A_index = [str(idx) for idx in A_index]
A_outer_range = '{}, {}'.format(', '.join(A_index), A_outer_range)
A_middle_range = '{}, {}'.format(', '.join(A_index), A_middle_range)
A_inner_range = '{}, {}'.format(', '.join(A_index), A_inner_range)
B_outer_range = '{}, {}'.format(N_range, K_range)
B_middle_range = 'ik, {}'.format(K_range)
B_inner_range = 'ik, ij'
if len(B_shape) > 2:
if B_index is None or len(B_index) != len(B_shape) - 2:
raise ValidationError(
'Invalid slice {} for array B with dimensions {}'.format(
B_index, B_shape))
B_index = [str(idx) for idx in B_index]
B_outer_range = '{}, {}'.format(', '.join(B_index), B_outer_range)
B_middle_range = '{}, {}'.format(', '.join(B_index), B_middle_range)
B_inner_range = '{}, {}'.format(', '.join(B_index), B_inner_range)
# Validate output
C_mm_shape = [M, K]
if len(C_shape) < 2:
raise ValidationError(
'Array C has less than 2 dimensions: {}'.format(C_shape))
if list(C_shape[-2:]) != C_mm_shape:
raise ValidationError(
'Shape mismatch in array C: expected {}, but got {}'.format(
C_mm_shape, C_shape[-2:]))
C_outer_range = '{}, {}'.format(M_range, K_range)
C_middle_range = '{}, {}'.format(M_range, K_range)
C_inner_range = 'ii, ij'
if len(C_shape) > 2:
if C_index is None or len(C_index) != len(C_shape) - 2:
raise ValidationError(
'Invalid slice {} for array C with dimensions {}'.format(
C_index, C_shape))
C_index = [str(idx) for idx in C_index]
C_outer_range = '{}, {}'.format(', '.join(C_index), C_outer_range)
C_middle_range = '{}, {}'.format(', '.join(C_index), C_middle_range)
C_inner_range = '{}, {}'.format(', '.join(C_index), C_inner_range)
return ((M_range, N_range, K_range), (A_outer_range, A_middle_range,
A_inner_range),
(B_outer_range, B_middle_range,
B_inner_range), (C_outer_range, C_middle_range, C_inner_range))
def matrix_multiplication(state: State,
A_src: Node,
A_node: DNode,
B_src: Node,
B_node: DNode,
C_dst: Node,
C_node: DNode,
accumulate: bool = False,
interchange: bool = True,
A_index: Index = None,
B_index: Index = None,
C_index: Index = None,
label: str = None):
""" Adds a matrix multiplication operation to an existing SDFG state.
@param A_src: The source node from which the memlet of matrix A is
connected.
@param A_node: The Access Node for matrix A.
@param B_src: The source node from which the memlet of matrix B is
connected.
@param B_node: The Access Node for matrix B.
@param C_dst: The destination node to which the memlet of matrix C is
connected.
@param C_node: The Access Node for matrix C.
@param accumulate: Whether to accumulate to C or store to it.
@param interchange: If True, interchanges the multiplication maps for
performance (in some cases).
@param A_index: Slice of matrix A to use for multiplication.
@param B_index: Slice of matrix B to use for multiplication.
@param C_index: Slice of matrix C to use for multiplication.
@param label: Optional label for the maps and tasklet.
"""
# Validate input
sdfg = state.parent
map_ranges, A_ranges, B_ranges, C_ranges = validate_matrix_multiplication(
A_node.desc(sdfg).shape,
B_node.desc(sdfg).shape,
C_node.desc(sdfg).shape, A_index, B_index, C_index)
# Extract ranges
M_range, N_range, K_range = map_ranges
A_outer_range, A_middle_range, A_inner_range = A_ranges
B_outer_range, B_middle_range, B_inner_range = B_ranges
C_outer_range, C_middle_range, C_inner_range = C_ranges
# Set label
if label is None:
label = state.label
# Create maps/tasklet
k_entry, k_exit = state.add_map(
name=label + '_' + 'k_map',
ndrange=dict(ik=N_range),
schedule=dace.types.ScheduleType.Sequential)
k_entry.in_connectors = {'IN_1', 'IN_2'}
k_entry.out_connectors = {'OUT_1', 'OUT_2'}
k_exit.in_connectors = {'IN_1'}
k_exit.out_connectors = {'OUT_1'}
ij_entry, ij_exit = state.add_map(
name=label + '_' + 'ij_map', ndrange=dict(ii=M_range, ij=K_range))
tasklet = state.add_tasklet(
name=label + '_' + 'tasklet',
inputs={'a', 'b'},
outputs={'c'},
code='c = a * b')
ij_entry.in_connectors = {'IN_1', 'IN_2'}
ij_entry.out_connectors = {'OUT_1', 'OUT_2'}
ij_exit.in_connectors = {'IN_1'}
ij_exit.out_connectors = {'OUT_1'}
# Add edges
if interchange:
state.add_edge(A_src, None, k_entry, 'IN_1',
dace.Memlet.simple(A_node, A_outer_range))
state.add_edge(B_src, None, k_entry, 'IN_2',
dace.Memlet.simple(B_node, B_outer_range))
state.add_edge(k_entry, 'OUT_1', ij_entry, 'IN_1',
dace.Memlet.simple(A_node, A_middle_range))
state.add_edge(k_entry, 'OUT_2', ij_entry, 'IN_2',
dace.Memlet.simple(B_node, B_middle_range))
state.add_edge(ij_entry, 'OUT_1', tasklet, 'a',
dace.Memlet.simple(A_node, A_inner_range))
state.add_edge(ij_entry, 'OUT_2', tasklet, 'b',
dace.Memlet.simple(B_node, B_inner_range))
wcr = 0
if accumulate:
wcr = None
state.add_edge(
tasklet, 'c', ij_exit, 'IN_1',
dace.Memlet.simple(
C_node,
C_inner_range,
wcr_str='lambda x, y: x + y',
wcr_identity=wcr,
wcr_conflict=False))
state.add_edge(ij_exit, 'OUT_1', k_exit, 'IN_1',
dace.Memlet.simple(C_node, C_middle_range))
state.add_edge(k_exit, 'OUT_1', C_dst, None,
dace.Memlet.simple(C_node, C_outer_range))
else:
state.add_edge(A_src, None, ij_entry, 'IN_1',
dace.Memlet.simple(A_node, A_outer_range))
state.add_edge(B_src, None, ij_entry, 'IN_2',
dace.Memlet.simple(B_node, B_outer_range))
state.add_edge(ij_entry, 'OUT_1', k_entry, 'IN_1',
dace.Memlet.simple(A_node, A_middle_range))
state.add_edge(ij_entry, 'OUT_2', k_entry, 'IN_2',
dace.Memlet.simple(B_node, B_middle_range))
state.add_edge(k_entry, 'OUT_1', tasklet, 'a',
dace.Memlet.simple(A_node, A_inner_range))
state.add_edge(k_entry, 'OUT_2', tasklet, 'b',
dace.Memlet.simple(B_node, B_inner_range))
wcr = 0
if accumulate:
wcr = None
state.add_edge(
tasklet, 'c', k_exit, 'IN_1',
dace.Memlet.simple(
C_node,
C_inner_range,
wcr_str='lambda x, y: x + y',
wcr_identity=wcr,
wcr_conflict=False))
state.add_edge(k_exit, 'OUT_1', ij_exit, 'IN_1',
dace.Memlet.simple(C_node, C_middle_range))
state.add_edge(ij_exit, 'OUT_1', C_dst, None,
dace.Memlet.simple(C_node, C_outer_range))
def matrix_transpose_cublas(state: State,
A_src: Node,
A_node: DNode,
B_dst: Node,
B_node: DNode,
alpha: str = 'const_pone',
label: str = None):
""" Adds a matrix transposition operation to an existing SDFG state,
using CUBLAS as the implementation.
@param A_src: The source node from which the memlet of matrix A is
connected.
@param A_node: The Access Node for matrix A.
@param B_dst: The destination node to which the memlet of matrix B is
connected.
@param B_node: The Access Node for matrix B.
@param alpha: Multiplier for input matrix.
@param label: Optional label for the tasklet.
"""
sdfg = state.parent
# Validate inputs
A = A_node.desc(sdfg)
B = B_node.desc(sdfg)
if len(A.shape) != 2 or len(B.shape) != 2:
raise ValidationError('Only matrices are supported for CUBLAS '
'transpose')
if A.shape[0] != B.shape[1] or A.shape[1] != B.shape[0]:
raise ValidationError('Shape mismatch for transpose')
if A.dtype.type != B.dtype.type:
raise ValidationError('Type mismatch for transpose')
# Create tasklet
tasklet = state.add_tasklet(
name=label + '_' + 'tasklet',
inputs={'a'},
outputs={'b'},
# cuBLAS is column-major, so we switch the arguments
code='''
cublasSetStream(handle, __dace_current_stream);
cublasStatus_t status = cublas{btype}geam(
handle,
CUBLAS_OP_T, CUBLAS_OP_N,
{cols}, {rows},
{alpha},
({cutype}*)a, {astride},
const_zero,
({cutype}*)b, {bstride},
({cutype}*)b, {bstride}
);
'''.format(
btype=_to_blastype(A.dtype.type),
cutype=_to_cudatype(A.dtype.type),
rows=A.shape[1],
cols=A.shape[0],
astride=A.strides[1],
bstride=B.strides[1],
alpha=alpha),
language=dace.types.Language.CPP)
state.add_edge(A_src, None, tasklet, 'a',
dace.Memlet.simple(A_node, '0:%s,0:%s' % A.shape))
state.add_edge(tasklet, 'b', B_dst, None,
dace.Memlet.simple(B_node, '0:%s,0:%s' % B.shape))
gpu_transform_tasklet(sdfg, state, tasklet)
def matrix_multiplication_cublas(state: State,
A_src: Node,
A_node: DNode,
B_src: Node,
B_node: DNode,
C_dst: Node,
C_node: DNode,
accumulate: bool = False,
interchange: bool = True,
alpha: str = 'const_pone',
beta: str = 'const_zero',
A_index: Index = None,
B_index: Index = None,
C_index: Index = None,
label: str = None):
""" Adds a matrix multiplication operation to an existing SDFG state,
using CUBLAS as the implementation.
@param A_src: The source node from which the memlet of matrix A is
connected.
@param A_node: The Access Node for matrix A.
@param B_src: The source node from which the memlet of matrix B is
connected.
@param B_node: The Access Node for matrix B.
@param C_dst: The destination node to which the memlet of matrix C is
connected.
@param C_node: The Access Node for matrix C.
@param accumulate: Whether to accumulate to C or store to it.
@param interchange: If True, interchanges the multiplication maps for
performance (in some cases).
@param alpha: Alpha value for GEMM.
@param beta: Beta value for GEMM.
@param A_index: Slice of matrix A to use for multiplication.
@param B_index: Slice of matrix B to use for multiplication.
@param C_index: Slice of matrix C to use for multiplication.
@param label: Optional label for the maps and tasklet.
"""
# Validate input
sdfg = state.parent
map_ranges, A_ranges, B_ranges, C_ranges = validate_matrix_multiplication(
A_node.desc(sdfg).shape,
B_node.desc(sdfg).shape,
C_node.desc(sdfg).shape, A_index, B_index, C_index)
# Extract ranges
M_range, N_range, K_range = map_ranges
A_outer_range, A_middle_range, A_inner_range = A_ranges
B_outer_range, B_middle_range, B_inner_range = B_ranges
C_outer_range, C_middle_range, C_inner_range = C_ranges
# Set label
if label is None:
label = state.label
# Create tasklet
tasklet = state.add_tasklet(
name=label + '_' + 'tasklet',
inputs={'a', 'b'},
outputs={'c'},
code='''
//cuDoubleComplex alpha = make_cuDoubleComplex(1, 0);
//cuDoubleComplex beta = make_cuDoubleComplex(0, 0);
cublasSetStream(handle, __dace_current_stream);
cublasStatus_t status = cublasZgemm(
handle,
CUBLAS_OP_N, CUBLAS_OP_N,
bsize, bsize, bsize,
const_pone,
(cuDoubleComplex*)b, bsize,
(cuDoubleComplex*)a, bsize,
const_zero,
(cuDoubleComplex*)c, bsize
);
''', # cuBLAS is column-major, so we switch the arguments
language=dace.types.Language.CPP)
state.add_edge(A_src, None, tasklet, 'a',
dace.Memlet.simple(A_node, A_outer_range))
state.add_edge(B_src, None, tasklet, 'b',
dace.Memlet.simple(B_node, B_outer_range))
state.add_edge(tasklet, 'c', C_dst, None,
dace.Memlet.simple(C_node, C_outer_range))
gpu_transform_tasklet(sdfg, state, tasklet)
def matrix_multiplication_cublas_v2(state: State,
A_src: Node,
A_node: DNode,
B_src: Node,
B_node: DNode,
C_src: Node,
C_src_node: DNode,
C_dst: Node,
C_dst_node: DNode,
accumulate: bool = False,
interchange: bool = True,
alpha: str = 'const_pone',
beta: str = 'const_zero',
A_index: Index = None,
B_index: Index = None,
C_index: Index = None,
label: str = None):
""" Adds a matrix multiplication operation to an existing SDFG state,
using CUBLAS as the implementation, and providing a separate source
and destination nodes for the output matrix.
@param A_src: The source node from which the memlet of matrix A is
connected.
@param A_node: The Access Node for matrix A.
@param B_src: The source node from which the memlet of matrix B is
connected.
@param B_node: The Access Node for matrix B.
@param C_src: The node from which the memlet of matrix C is
connected into the multiplication.
@param C_src_node: The input Access Node for matrix C.
@param C_dst: The node to which the memlet of matrix C is
connected out of the multiplication.
@param C_dst_node: The output Access Node for matrix C.
@param accumulate: Whether to accumulate to C or store to it.
@param interchange: If True, interchanges the multiplication maps for
performance (in some cases).
@param alpha: Alpha value for GEMM.
@param beta: Beta value for GEMM.
@param A_index: Slice of matrix A to use for multiplication.
@param B_index: Slice of matrix B to use for multiplication.
@param C_index: Slice of matrix C to use for multiplication.
@param label: Optional label for the maps and tasklet.
"""
# Validate input
sdfg = state.parent
map_ranges, A_ranges, B_ranges, C_ranges = validate_matrix_multiplication(
A_node.desc(sdfg).shape,
B_node.desc(sdfg).shape,
C_src_node.desc(sdfg).shape, A_index, B_index, C_index)
# Extract ranges
M_range, N_range, K_range = map_ranges
A_outer_range, A_middle_range, A_inner_range = A_ranges
B_outer_range, B_middle_range, B_inner_range = B_ranges
C_outer_range, C_middle_range, C_inner_range = C_ranges
# Set label
if label is None:
label = state.label
# Create tasklet
tasklet = state.add_tasklet(
name=label + '_' + 'tasklet',
inputs={'a', 'b', 'cin'},
outputs={'c'},
code='''
//cuDoubleComplex alpha = make_cuDoubleComplex(1, 0);
//cuDoubleComplex beta = make_cuDoubleComplex(0, 0);
cublasSetStream(handle, __dace_current_stream);
cublasStatus_t status = cublasZgemm(
handle,
CUBLAS_OP_N, CUBLAS_OP_N,
bsize, bsize, bsize,
{alpha},
(cuDoubleComplex*)b, bsize,
(cuDoubleComplex*)a, bsize,
{beta},
(cuDoubleComplex*)c, bsize
);
'''.format(
alpha=alpha,
beta=beta), # cuBLAS is column-major, so we switch the arguments
language=dace.types.Language.CPP)
state.add_edge(A_src, None, tasklet, 'a',
dace.Memlet.simple(A_node, A_outer_range))
state.add_edge(B_src, None, tasklet, 'b',
dace.Memlet.simple(B_node, B_outer_range))
state.add_edge(C_src, None, tasklet, 'cin',
dace.Memlet.simple(C_src_node, C_outer_range))
state.add_edge(tasklet, 'c', C_dst, None,
dace.Memlet.simple(C_dst_node, C_outer_range))
gpu_transform_tasklet(sdfg, state, tasklet)
def matrix_multiplication_mkl(state: State,
A_src: Node,
A_node: DNode,
B_src: Node,
B_node: DNode,
C_dst: Node,
C_node: DNode,
accumulate: bool = False,
interchange: bool = True,
A_index: Index = None,
B_index: Index = None,
C_index: Index = None,
label: str = None):
""" Adds a matrix multiplication operation to an existing SDFG state,
using MKL as the implementation.
@param A_src: The source node from which the memlet of matrix A is
connected.
@param A_node: The Access Node for matrix A.
@param B_src: The source node from which the memlet of matrix B is
connected.
@param B_node: The Access Node for matrix B.
@param C_dst: The destination node to which the memlet of matrix C is
connected.
@param C_node: The Access Node for matrix C.
@param accumulate: Whether to accumulate to C or store to it.
@param interchange: If True, interchanges the multiplication maps for
performance (in some cases).
@param A_index: Slice of matrix A to use for multiplication.
@param B_index: Slice of matrix B to use for multiplication.
@param C_index: Slice of matrix C to use for multiplication.
@param label: Optional label for the maps and tasklet.
"""
# Validate input
sdfg = state.parent
map_ranges, A_ranges, B_ranges, C_ranges = validate_matrix_multiplication(
A_node.desc(sdfg).shape,
B_node.desc(sdfg).shape,
C_node.desc(sdfg).shape, A_index, B_index, C_index)
# Extract ranges
M = A_node.desc(sdfg).shape[-2]
N = A_node.desc(sdfg).shape[-1]
K = B_node.desc(sdfg).shape[-1]
M_range, N_range, K_range = map_ranges
A_outer_range, A_middle_range, A_inner_range = A_ranges
B_outer_range, B_middle_range, B_inner_range = B_ranges
C_outer_range, C_middle_range, C_inner_range = C_ranges
# Set label
if label is None:
label = state.label
# Create tasklet
tasklet = state.add_tasklet(
name=label + '_' + 'tasklet',
inputs={'a', 'b'},
outputs={'c'},
code='''
std::complex<double> alpha(1, 0);
std::complex<double> beta(0, 0);
char opa = 'N';
char opb = 'N';
zgemm(
&opa, &opb,
&{m}, &{n}, &{k},
(MKL_Complex16*)&alpha,
(MKL_Complex16*)a, &{m},
(MKL_Complex16*)b, &{n},
(MKL_Complex16*)&beta,
(MKL_Complex16*)c, &{m}
);
'''.format(m=M, n=N, k=K),
language=dace.types.Language.CPP)
state.add_edge(A_src, None, tasklet, 'a',
dace.Memlet.simple(A_node, A_outer_range))
state.add_edge(B_src, None, tasklet, 'b',
dace.Memlet.simple(B_node, B_outer_range))
state.add_edge(tasklet, 'c', C_dst, None,
dace.Memlet.simple(C_node, C_outer_range))
def matrix_multiplication_s(A_label: str,
A_shape: Shape,
A_type: dace.types.typeclass,
B_label: str,
B_shape: Shape,
B_type: dace.types.typeclass,
create_C: bool = True,
C_label: str = None,
C_shape: Shape = None,
C_type: dace.types.typeclass = None,
is_A_transient: bool = False,
is_B_transient: bool = False,
is_C_transient: bool = False,
accumulate: bool = False,
interchange: bool = True,
A_index: Index = None,
B_index: Index = None,
C_index: Index = None,
label: str = None) -> State:
""" Creates a new state with a matrix multiplication operation. """
# Set output attributes
if create_C:
if C_label is None:
C_label = A_label + B_label
if C_type is None:
C_type = A_type
C_shape = [A_shape[-2], B_shape[-1]]
else:
if C_shape is None:
raise ValidationError(
'Array C is not transient, but its shape is not set')
# Validate input
map_ranges, A_ranges, B_ranges, C_ranges = validate_matrix_multiplication(
A_shape, B_shape, C_shape, A_index, B_index, C_index)
# Extract ranges
M_range, N_range, K_range = map_ranges
A_outer_range, A_middle_range, A_inner_range = A_ranges
B_outer_range, B_middle_range, B_inner_range = B_ranges
C_outer_range, C_middle_range, C_inner_range = C_ranges
# Set label
if label is None:
label = A_label + B_label
# Create state
state = State(label=label)
# Create data nodes
A_node = state.add_array(
A_label, A_shape, A_type, transient=is_A_transient)
B_node = state.add_array(
B_label, B_shape, B_type, transient=is_B_transient)
C_node = state.add_array(
C_label, C_shape, C_type, transient=is_C_transient or create_C)
# Create maps/tasklet
k_entry, k_exit = state.add_map(
name=label + '_' + 'k_map',
ndrange=dict(ik=N_range),
schedule=dace.types.ScheduleType.Sequential)
k_entry.in_connectors = {'IN_1', 'IN_2'}
k_entry.out_connectors = {'OUT_1', 'OUT_2'}
k_exit.in_connectors = {'IN_1'}
k_exit.out_connectors = {'OUT_1'}
ij_entry, ij_exit = state.add_map(
name=label + '_' + 'ij_map', ndrange=dict(ii=M_range, ij=K_range))
tasklet = state.add_tasklet(
name=label + '_' + 'tasklet',
inputs={'a', 'b'},
outputs={'c'},
code='c = a * b')
ij_entry.in_connectors = {'IN_1', 'IN_2'}
ij_entry.out_connectors = {'OUT_1', 'OUT_2'}
ij_exit.in_connectors = {'IN_1'}
ij_exit.out_connectors = {'OUT_1'}
# Add edges
if interchange:
state.add_edge(A_node, None, k_entry, 'IN_1',
dace.Memlet.simple(A_node, A_outer_range))
state.add_edge(B_node, None, k_entry, 'IN_2',
dace.Memlet.simple(B_node, B_outer_range))
state.add_edge(k_entry, 'OUT_1', ij_entry, 'IN_1',
dace.Memlet.simple(A_node, A_middle_range))
state.add_edge(k_entry, 'OUT_2', ij_entry, 'IN_2',
dace.Memlet.simple(B_node, B_middle_range))
state.add_edge(ij_entry, 'OUT_1', tasklet, 'a',
dace.Memlet.simple(A_node, A_inner_range))
state.add_edge(ij_entry, 'OUT_2', tasklet, 'b',
dace.Memlet.simple(B_node, B_inner_range))
wcr = 0
if accumulate:
wcr = None
state.add_edge(
tasklet, 'c', ij_exit, 'IN_1',
dace.Memlet.simple(
C_node,
C_inner_range,
wcr_str='lambda x, y: x + y',
wcr_identity=wcr,
wcr_conflict=False))
state.add_edge(ij_exit, 'OUT_1', k_exit, 'IN_1',
dace.Memlet.simple(C_node, C_middle_range))
state.add_edge(k_exit, 'OUT_1', C_node, None,
dace.Memlet.simple(C_node, C_outer_range))
else:
state.add_edge(A_node, None, ij_entry, 'IN_1',
dace.Memlet.simple(A_node, A_outer_range))
state.add_edge(B_node, None, ij_entry, 'IN_2',
dace.Memlet.simple(B_node, B_outer_range))
state.add_edge(ij_entry, 'OUT_1', k_entry, 'IN_1',
dace.Memlet.simple(A_node, A_middle_range))
state.add_edge(ij_entry, 'OUT_2', k_entry, 'IN_2',
dace.Memlet.simple(B_node, B_middle_range))
state.add_edge(k_entry, 'OUT_1', tasklet, 'a',
dace.Memlet.simple(A_node, A_inner_range))
state.add_edge(k_entry, 'OUT_2', tasklet, 'b',
dace.Memlet.simple(B_node, B_inner_range))
wcr = 0
if accumulate:
wcr = None
state.add_edge(
tasklet, 'c', k_exit, 'IN_1',
dace.Memlet.simple(
C_node,
C_inner_range,
wcr_str='lambda x, y: x + y',
wcr_identity=wcr,
wcr_conflict=False))
state.add_edge(k_exit, 'OUT_1', ij_exit, 'IN_1',
dace.Memlet.simple(C_node, C_middle_range))
state.add_edge(ij_exit, 'OUT_1', C_node, None,
dace.Memlet.simple(C_node, C_outer_range))
return state
def validate_scalar_array_multiplication(
alpha_shape: Shape,
A_shape: Shape,
B_shape: Shape,
alpha_index: Index = None,
A_index: Index = None,
B_index: Index = None
) -> (typing.Dict[str, str], (str, str), (str, str), (str, str)):
""" Validates a scalar-array multiplication operation, based on the shapes
and indices of the arrays involved. Returns the ranges of the maps and
memlets at all levels as strings. """
# Validate data
if alpha_shape != [1]:
if alpha_index is None or len(alpha_shape) != len(alpha_index):
raise ValidationError(
'Slice of alpha is not a scalar: {}, {}'.format(
alpha_shape, alpha_index))
if A_index is not None:
true_A_shape = A_shape[len(A_index):]
else:
true_A_shape = A_shape
if B_index is not None:
true_B_shape = B_shape[len(B_index):]
else:
true_B_shape = B_shape
if true_A_shape != true_B_shape:
raise ValidationError('Dimension mismatch between arrays A and B: '
'{}({}) != {}({})'.format(
true_A_shape, A_shape, true_B_shape,
B_shape))
# Map ranges
map_ranges = dict()
for i, dim in enumerate(true_A_shape):
map_ranges['i{}'.format(i)] = '0:{}'.format(dim)
# Memlet ranges
alpha_outer_range = '0'
alpha_inner_range = '0'
if alpha_index is not None:
alpha_index = [str(idx) for idx in alpha_index]
alpha_outer_range = ', '.join(alpha_index)
alpha_inner_range = ', '.join(alpha_index)
A_outer_range = ', '.join(map_ranges.values())
A_inner_range = ', '.join(map_ranges.keys())
if A_index is not None:
A_index = [str(idx) for idx in A_index]
A_outer_range = '{}, {}'.format(', '.join(A_index), A_outer_range)
A_inner_range = '{}, {}'.format(', '.join(A_index), A_inner_range)
B_outer_range = ', '.join(map_ranges.values())
B_inner_range = ', '.join(map_ranges.keys())
if B_index is not None:
B_index = [str(idx) for idx in B_index]
B_outer_range = '{}, {}'.format(', '.join(B_index), B_outer_range)
B_inner_range = '{}, {}'.format(', '.join(B_index), B_inner_range)
return (map_ranges, (alpha_outer_range, alpha_inner_range),
(A_outer_range, A_inner_range), (B_outer_range, B_inner_range))
def scalar_array_multiplication(state: State,
alpha_src: Node,
alpha_node: DNode,
A_src: Node,
A_node: DNode,
B_dst: Node,
B_node: DNode,
accumulate: bool = False,
wcr_conflict: bool = False,
alpha_index: Index = None,
A_index: Index = None,
B_index: Index = None,
label: str = None):
""" Adds a scalar-array multiplication operation to an exisiting state. """
# Validate data
sdfg = state.parent
alpha_shape = [1]
if hasattr(alpha_node, 'shape'):
alpha_shape = alpha_node.shape
ranges = validate_scalar_array_multiplication(
alpha_shape,
A_node.desc(sdfg).shape,
B_node.desc(sdfg).shape, alpha_index, A_index, B_index)
map_ranges, alpha_ranges, A_ranges, B_ranges = ranges
alpha_outer_range, alpha_inner_range = alpha_ranges
A_outer_range, A_inner_range = A_ranges
A_outer_range, A_inner_range = A_ranges
B_outer_range, B_inner_range = B_ranges
# Set label
if label is None:
label = state.label
# Create map/tasklet
map_entry, map_exit = state.add_map(
name=label + '_map', ndrange=map_ranges)
map_entry.in_connectors = {'IN_1', 'IN_2'}
map_entry.out_connectors = {'OUT_1', 'OUT_2'}
map_exit.in_connectors = {'IN_1'}
map_exit.out_connectors = {'OUT_1'}
tasklet = state.add_tasklet(
name=label + '_tasklet',
inputs={'scalar', 'a'},
outputs={'b'},
code='b = scalar * a')
# Add edges
state.add_edge(alpha_src, None, map_entry, 'IN_1',
dace.Memlet.simple(alpha_node, alpha_outer_range))
state.add_edge(A_src, None, map_entry, 'IN_2',
dace.Memlet.simple(A_node, A_outer_range))
state.add_edge(map_exit, 'OUT_1', B_dst, None,
dace.Memlet.simple(B_node, B_outer_range))
state.add_edge(map_entry, 'OUT_1', tasklet, 'scalar',
dace.Memlet.simple(alpha_node, alpha_inner_range))
state.add_edge(map_entry, 'OUT_2', tasklet, 'a',
dace.Memlet.simple(A_node, A_inner_range))
if accumulate:
state.add_edge(
tasklet, 'b', map_exit, 'IN_1',
dace.Memlet.simple(
B_node,
B_inner_range,
wcr_str='lambda x, y: x + y',
wcr_identity=None,
wcr_conflict=wcr_conflict))
else:
state.add_edge(tasklet, 'b', map_exit, 'IN_1',
dace.Memlet.simple(B_node, B_inner_range))
def scalar_array_multiplication_s(alpha_label: str,
alpha_shape: Shape,
alpha_type: dace.types.typeclass,
A_label: str,
A_shape: Shape,
A_type: dace.types.typeclass,
create_B: bool = True,
B_label: str = None,
B_shape: Shape = None,
B_type: dace.types.typeclass = None,
is_alpha_transient: bool = False,
is_A_transient: bool = False,
is_B_transient: bool = False,
accumulate: bool = False,
wcr_conflict: bool = False,
alpha_index: Index = None,
A_index: Index = None,
B_index: Index = None,
label: str = None) -> State:
""" Creates a new state with a scalar-array multiplication operation. """
# Set output attributes
if create_B:
if B_label is None:
B_label = alpha_label + A_label
if B_type is None:
B_type = A_type
B_shape = A_shape
else:
if B_shape is None:
raise ValidationError(
'Array B is not transient, but its shape is not set')
# Validate data
ranges = validate_scalar_array_multiplication(
alpha_shape, A_shape, B_shape, alpha_index, A_index, B_index)
map_ranges, alpha_ranges, A_ranges, B_ranges = ranges
alpha_outer_range, alpha_inner_range = alpha_ranges
A_outer_range, A_inner_range = A_ranges
A_outer_range, A_inner_range = A_ranges
B_outer_range, B_inner_range = B_ranges
# Set label
if label is None:
label = alpha_label + A_label
# Create state
state = State(label=label)
# Create data nodes
alpha_node = state.add_array(
alpha_label, alpha_shape, alpha_type, transient=is_alpha_transient)
A_node = state.add_array(
A_label, A_shape, A_type, transient=is_A_transient)
B_node = state.add_array(
B_label, B_shape, B_type, transient=is_B_transient or create_B)
# Create map/tasklet
map_entry, map_exit = state.add_map(
name=label + '_map', ndrange=map_ranges)
map_entry.in_connectors = {'IN_1', 'IN_2'}
map_entry.out_connectors = {'OUT_1', 'OUT_2'}
map_exit.in_connectors = {'IN_1'}
map_exit.out_connectors = {'OUT_1'}
tasklet = state.add_tasklet(
name=label + '_tasklet',
inputs={'scalar', 'a'},
outputs={'b'},
code='b = scalar * a')
# Add edges
state.add_edge(alpha_node, None, map_entry, 'IN_1',
dace.Memlet.simple(alpha_node, alpha_outer_range))
state.add_edge(A_node, None, map_entry, 'IN_2',
dace.Memlet.simple(A_node, A_outer_range))
state.add_edge(map_exit, 'OUT_1', B_node, None,
dace.Memlet.simple(B_node, B_outer_range))
state.add_edge(map_entry, 'OUT_1', tasklet, 'scalar',
dace.Memlet.simple(alpha_node, alpha_inner_range))
state.add_edge(map_entry, 'OUT_2', tasklet, 'a',
dace.Memlet.simple(A_node, A_inner_range))
if accumulate:
state.add_edge(
tasklet, 'b', map_exit, 'IN_1',
dace.Memlet.simple(
B_node,
B_inner_range,
wcr_str='lambda x, y: x + y',
wcr_identity=None,
wcr_conflict=wcr_conflict))
else:
state.add_edge(tasklet, 'b', map_exit, 'IN_1',
dace.Memlet.simple(B_node, B_inner_range))
return state
def constant_array_multiplication(state: State,
constant,
A_src: Node,
A_node: DNode,
B_dst: Node,
B_node: DNode,
accumulate: bool = False,
A_index: Index = None,
B_index: Index = None,
label: str = None):
""" Adds a scalar-array multiplication operation to an exisiting state. """
# Validate data
# ranges = validate_scalar_array_multiplication(
# [1], A_node.shape, B_node.shape,
# None, A_index, B_index
# )
sdfg = state.parent
ranges = validate_scalar_array_multiplication([1],
A_node.desc(sdfg).shape,
B_node.desc(sdfg).shape,
None, A_index, B_index)
map_ranges, _, A_ranges, B_ranges = ranges
A_outer_range, A_inner_range = A_ranges
B_outer_range, B_inner_range = B_ranges
# Set label
if label is None:
label = state.label
# Create map/tasklet
map_entry, map_exit = state.add_map(
name=label + '_map', ndrange=map_ranges)
map_entry.in_connectors = {'IN_1'}
map_entry.out_connectors = {'OUT_1'}
map_exit.in_connectors = {'IN_1'}
map_exit.out_connectors = {'OUT_1'}
tasklet = state.add_tasklet(
name=label + '_tasklet',
inputs={'a'},
outputs={'b'},
code='b = {} * a'.format(constant))
# Add edges
state.add_edge(A_src, None, map_entry, 'IN_1',
dace.Memlet.simple(A_node, A_outer_range))
state.add_edge(map_exit, 'OUT_1', B_dst, None,
dace.Memlet.simple(B_node, B_outer_range))
state.add_edge(map_entry, 'OUT_1', tasklet, 'a',
dace.Memlet.simple(A_node, A_inner_range))
if accumulate:
state.add_edge(
tasklet, 'b', map_exit, 'IN_1',
dace.Memlet.simple(
B_node,
B_inner_range,
wcr_str='lambda x, y: x + y',
wcr_identity=None,
wcr_conflict=False))
else:
state.add_edge(tasklet, 'b', map_exit, 'IN_1',
dace.Memlet.simple(B_node, B_inner_range))
def unary_array_op(state: State,
A_src: Node,
A_node: DNode,
B_dst: Node,
B_node: DNode,
code: str,
lang=dace.types.Language.Python,
accumulate: bool = False,
A_index: Index = None,
B_index: Index = None,
label: str = None):
""" Adds a unary array operation to an exisiting state. """
# Validate data
sdfg = state.parent
ranges = validate_scalar_array_multiplication([1],
A_node.desc(sdfg).shape,
B_node.desc(sdfg).shape,
None, A_index, B_index)
map_ranges, _, A_ranges, B_ranges = ranges
A_outer_range, A_inner_range = A_ranges
B_outer_range, B_inner_range = B_ranges
# Set label
if label is None:
label = state.label
# Create map/tasklet
map_entry, map_exit = state.add_map(
name=label + '_map', ndrange=map_ranges)
map_entry.in_connectors = {'IN_1'}
map_entry.out_connectors = {'OUT_1'}
map_exit.in_connectors = {'IN_1'}
map_exit.out_connectors = {'OUT_1'}
tasklet = state.add_tasklet(
name=label + '_tasklet',
inputs={'a'},
outputs={'b'},
code=code,
language=lang)
# Add edges
state.add_edge(A_src, None, map_entry, 'IN_1',
dace.Memlet.simple(A_node, A_outer_range))
state.add_edge(map_exit, 'OUT_1', B_dst, None,
dace.Memlet.simple(B_node, B_outer_range))
state.add_edge(map_entry, 'OUT_1', tasklet, 'a',
dace.Memlet.simple(A_node, A_inner_range))
if accumulate:
state.add_edge(
tasklet, 'b', map_exit, 'IN_1',
dace.Memlet.simple(
B_node,
B_inner_range,
wcr_str='lambda x, y: x + y',
wcr_identity=None,
wcr_conflict=False))
else:
state.add_edge(tasklet, 'b', map_exit, 'IN_1',
dace.Memlet.simple(B_node, B_inner_range))
def validate_matrix_transpose(
A_shape: Shape,
B_shape: Shape,
A_index: Index = None,
B_index: Index = None
) -> (typing.Dict[str, str], (str, str), (str, str)):
""" Validates a matrix transpose operation, based on the shapes and indices
of the arrays involved. Returns the ranges of the maps and memlets at
all levels as strings. """
# Validate data
if len(A_shape) < 2:
raise ValidationError(
'Array A has less than 2 dimensions: {}'.format(A_shape))
A_tr_shape = A_shape[-2:]
if len(B_shape) < 2:
raise ValidationError(
'Array B has less than 2 dimensions: {}'.format(B_shape))
B_tr_shape = B_shape[-2:]
if A_tr_shape[0] != B_tr_shape[-1] or A_tr_shape[-1] != B_tr_shape[0]:
raise ValidationError(
'Dimension mismatch between arrays A and B: {} != {}'.format(
A_tr_shape, B_tr_shape))
# Map ranges
map_ranges = dict(
ii='0:{}'.format(A_tr_shape[0]), ij='0:{}'.format(A_tr_shape[-1]))
# Validate slices and set array access ranges
A_outer_range = '0:{}, 0:{}'.format(A_tr_shape[0], A_tr_shape[-1])
A_inner_range = 'ii, ij'
if len(A_shape) > 2:
if A_index is None or len(A_index) != len(A_shape) - 2:
raise ValidationError(
'Invalid slice {} for array A with dimensions {}'.format(
A_index, A_shape))
A_index = [str(idx) for idx in A_index]
A_outer_range = '{}, {}'.format(', '.join(A_index), A_outer_range)
A_inner_range = '{}, {}'.format(', '.join(A_index), A_inner_range)
B_outer_range = '0:{}, 0:{}'.format(A_tr_shape[-1], A_tr_shape[0])
B_inner_range = 'ij, ii'
if len(B_shape) > 2:
if B_index is None or len(B_index) != len(B_shape) - 2:
raise ValidationError(
'Invalid slice {} for array B with dimensions {}'.format(
B_index, B_shape))
B_index = [str(idx) for idx in B_index]
B_outer_range = '{}, {}'.format(', '.join(B_index), B_outer_range)
B_inner_range = '{}, {}'.format(', '.join(B_index), B_inner_range)
return (map_ranges, (A_outer_range, A_inner_range), (B_outer_range,
B_inner_range))
def matrix_transpose(state: State,
A_src: Node,
A_node: DNode,
B_dst: Node,
B_node: DNode,
A_index: Index = None,
B_index: Index = None,
code: str = None,
lang=dace.types.Language.Python,
label: str = None):
""" Adds a matrix transpose operation to an existing state. """
# Validate data
sdfg = state.parent
map_ranges, A_ranges, B_ranges = validate_matrix_transpose(
A_node.desc(sdfg).shape,
B_node.desc(sdfg).shape, A_index, B_index)
A_outer_range, A_inner_range = A_ranges
B_outer_range, B_inner_range = B_ranges
# Set label
if label is None:
label = state.label
# Create map/tasklet
if code is None:
code = 'b = a'
_, map_entry, map_exit = state.add_mapped_tasklet(
name=label,
map_ranges=map_ranges,
inputs=dict(a=dace.Memlet.simple(A_node, A_inner_range)),
outputs=dict(b=dace.Memlet.simple(B_node, B_inner_range)),
code=code,
language=lang)
# Add edges
state.add_nedge(A_src, map_entry, dace.Memlet.simple(
A_node, A_outer_range))
state.add_nedge(map_exit, B_dst, dace.Memlet.simple(B_node, B_outer_range))
return state
def matrix_transpose_double(state: State,
A_src: Node,
A_node: DNode,
B_dst: Node,
B_node: DNode,
C_dst: Node,
C_node: DNode,
A_index: Index = None,
B_index: Index = None,
C_index: Index = None,
code: str = None,
lang=dace.types.Language.Python,
label: str = None):
""" Adds a matrix transpose operation, which transposes to two different
matrices, to an existing state. """
# Validate data
sdfg = state.parent
map_ranges, A_ranges, B_ranges = validate_matrix_transpose(
A_node.desc(sdfg).shape,
B_node.desc(sdfg).shape, A_index, B_index)
A_outer_range, A_inner_range = A_ranges
B_outer_range, B_inner_range = B_ranges
_, _, C_ranges = validate_matrix_transpose(
A_node.desc(sdfg).shape,
C_node.desc(sdfg).shape, A_index, C_index)
C_outer_range, C_inner_range = C_ranges
# Set label
if label is None:
label = state.label
# Create map/tasklet
if code is None:
code = '''
b = a
c = a
'''
_, map_entry, map_exit = state.add_mapped_tasklet(
name=label,
map_ranges=map_ranges,
inputs=dict(a=dace.Memlet.simple(A_node, A_inner_range)),
outputs=dict(
b=dace.Memlet.simple(B_node, B_inner_range),
c=dace.Memlet.simple(C_node, C_inner_range),
),
code=code,
language=lang)
# Add edges
state.add_nedge(A_src, map_entry, dace.Memlet.simple(
A_node, A_outer_range))
state.add_nedge(map_exit, B_dst, dace.Memlet.simple(B_node, B_outer_range))
state.add_nedge(map_exit, C_dst, dace.Memlet.simple(C_node, C_outer_range))
return state
def matrix_transpose_s(A_label: str,
A_shape: Shape,
A_type: dace.types.typeclass,
create_B: bool = True,
B_label: str = None,
B_shape: Shape = None,
B_type: dace.types.typeclass = None,
is_alpha_transient: bool = False,
is_A_transient: bool = False,
is_B_transient: bool = False,
A_index: Index = None,
B_index: Index = None,
label: str = None) -> State:
""" Creates a new state with a matrix transpose operation. """
# Set output attributes
if create_B:
if B_label is None:
B_label = A_label + '^T'
if B_type is None:
B_type = A_type
B_shape = list(A_shape).reverse()
else:
if B_shape is None:
raise ValidationError(
'Array B is not transient, but its shape is not set')
# Validate data
map_ranges, A_ranges, B_ranges = validate_matrix_transpose(
A_shape, B_shape, A_index, B_index)
A_outer_range, A_inner_range = A_ranges
B_outer_range, B_inner_range = B_ranges
# Set label
if label is None:
label = A_label + '^T'
# Create state
state = State(label=label)
# Create datanodes
A_node = state.add_array(
A_label, A_shape, A_type, transient=is_A_transient)
B_node = state.add_array(
B_label, B_shape, B_type, transient=is_B_transient or create_B)
# Create map/tasklet
_, map_entry, map_exit = state.add_mapped_tasklet(
name=label,
map_ranges=map_ranges,
inputs=dict(a=dace.Memlet.simple(A_node, A_inner_range)),
outputs=dict(b=dace.Memlet.simple(B_node, B_inner_range)),
code='b = a')
# Add edges
state.add_nedge(A_node, map_entry, dace.Memlet.simple(
A_node, A_outer_range))
state.add_nedge(map_exit, B_node, dace.Memlet.simple(
B_node, B_outer_range))
return state
def validate_matrix_pointwise_op(
A_shape: Shape,
B_shape: Shape,
C_shape: Shape,
reduce: bool = False,
A_index: Index = None,
B_index: Index = None,
C_index: Index = None
) -> (typing.Dict[str, str], (str, str), (str, str), (str, str)):
""" Validates a point-wise matrix operation. """
# Validate data
if A_index is not None:
true_A_shape = A_shape[len(A_index):]
else:
true_A_shape = A_shape
if B_index is not None:
true_B_shape = B_shape[len(B_index):]
else:
true_B_shape = B_shape
if true_A_shape != true_B_shape:
raise ValidationError('Dimension mismatch between arrays A and B: '
'{}({}) != {}({})'.format(
true_A_shape, A_shape, true_B_shape,
B_shape))
if reduce:
if C_index is None or len(C_shape) != len(C_index):
raise ValidationError(
'Point-wise matrix operation result cannot be reduced: '
'{}({})'.format(C_shape, C_index))
else:
if C_index is not None:
true_C_shape = C_shape[len(C_index):]
else:
true_C_shape = C_shape
if true_A_shape != true_B_shape:
raise ValidationError('Dimension mismatch between arrays A and C: '
'{}({}) != {}({})'.format(
true_A_shape, A_shape, true_C_shape,
C_shape))
# Map ranges
map_ranges = dict()
for i, dim in enumerate(true_A_shape):
map_ranges['i{}'.format(i)] = '0:{}'.format(dim)
# Memlet ranges
A_outer_range = ', '.join(map_ranges.values())
A_inner_range = ', '.join(map_ranges.keys())
if A_index is not None:
A_index = [str(idx) for idx in A_index]
A_outer_range = '{}, {}'.format(', '.join(A_index), A_outer_range)
A_inner_range = '{}, {}'.format(', '.join(A_index), A_inner_range)
B_outer_range = ', '.join(map_ranges.values())
B_inner_range = ', '.join(map_ranges.keys())
if B_index is not None:
B_index = [str(idx) for idx in B_index]
B_outer_range = '{}, {}'.format(', '.join(B_index), B_outer_range)
B_inner_range = '{}, {}'.format(', '.join(B_index), B_inner_range)
if reduce:
C_index = [str(idx) for idx in C_index]
C_outer_range = ', '.join(C_index)
C_inner_range = ', '.join(C_index)
else:
C_outer_range = ', '.join(map_ranges.values())
C_inner_range = ', '.join(map_ranges.keys())
if C_index is not None:
C_index = [str(idx) for idx in C_index]
C_outer_range = '{}, {}'.format(', '.join(C_index), C_outer_range)
C_inner_range = '{}, {}'.format(', '.join(C_index), C_inner_range)
return (map_ranges, (A_outer_range, A_inner_range),
(B_outer_range, B_inner_range), (C_outer_range, C_inner_range))
def matrix_pointwise_op(state: State,
A_src: Node,
A_node: DNode,
B_src: Node,
B_node: DNode,
C_dst: Node,
C_node: DNode,
op: str,
reduce: bool = False,
reduce_op: str = None,
accumulate: bool = False,
A_index: Index = None,
B_index: Index = None,
C_index: Index = None,
label: str = None):
""" Adds a matrix point-wise operation to an existing state. """
# Validate data
sdfg = state.parent
C_shape = None
if reduce and not hasattr(C_node.desc(sdfg), 'shape'):
C_shape = [1]
else:
C_shape = C_node.desc(sdfg).shape
map_ranges, A_ranges, B_ranges, C_ranges = validate_matrix_pointwise_op(
A_node.desc(sdfg).shape,
B_node.desc(sdfg).shape, C_shape, reduce, A_index, B_index, C_index)
A_outer_range, A_inner_range = A_ranges
B_outer_range, B_inner_range = B_ranges
C_outer_range, C_inner_range = C_ranges
# Set label
if label is None:
label = state.label
# Create map/tasklet
if reduce:
schedule = dace.types.ScheduleType.Sequential
else:
schedule = dace.types.ScheduleType.Default
map_entry, map_exit = state.add_map(
name=label + '_map', ndrange=map_ranges, schedule=schedule)
map_entry.in_connectors = {'IN_1', 'IN_2'}
map_entry.out_connectors = {'OUT_1', 'OUT_2'}
map_exit.in_connectors = {'IN_1'}
map_exit.out_connectors = {'OUT_1'}
tasklet = state.add_tasklet(
name=label + '_tasklet',
inputs={'a', 'b'},
outputs={'c'},
code='c = a ' + op + ' b')
# Add edges
state.add_edge(A_src, None, map_entry, 'IN_1',
dace.Memlet.simple(A_node, A_outer_range))
state.add_edge(B_src, None, map_entry, 'IN_2',
dace.Memlet.simple(B_node, B_outer_range))
state.add_edge(map_exit, 'OUT_1', C_dst, None,
dace.Memlet.simple(C_node, C_outer_range))
state.add_edge(map_entry, 'OUT_1', tasklet, 'a',
dace.Memlet.simple(A_node, A_inner_range))
state.add_edge(map_entry, 'OUT_2', tasklet, 'b',
dace.Memlet.simple(B_node, B_inner_range))
if reduce:
wcr = 0
if accumulate:
wcr = None
state.add_edge(
tasklet, 'c', map_exit, 'IN_1',
dace.Memlet.simple(
C_node,
C_inner_range,
wcr_str='lambda x, y: x ' + reduce_op + ' y',
wcr_identity=wcr,
wcr_conflict=False))
else:
state.add_edge(tasklet, 'c', map_exit, 'IN_1',
dace.Memlet.simple(C_node, C_inner_range))
def csr2dense_cusparse(state: State, val: DNode, rowptr: DNode, colind: DNode,
dense: DNode):
""" Adds a CSR->Dense data layout transformation to a state, using
CUSPARSE for the implementation. """
sdfg = state.parent
dense_array = dense.desc(sdfg)
d_shape = dense_array.shape
d_dtype = dense_array.dtype
T = state.add_transient(dense.data + 'T', d_shape, d_dtype)
tasklet = state.add_tasklet(
name=dense.data + '_csr2dense',
inputs={'val', 'rowptr', 'colind'},
outputs={'dense'},
code='''
cusparseSetStream(sparse_handle, __dace_current_stream);
cusparseZcsr2dense(
sparse_handle,
{m}, {n},
sparse_mat_descr,
(cuDoubleComplex*)val,
rowptr,
colind,
(cuDoubleComplex*)dense,
{m}
);
'''.format(m=str(d_shape[0]), n=str(d_shape[1])),
language=dace.types.Language.CPP)
state.add_edge(val, None, tasklet, 'val',
dace.Memlet.from_array(val.data, val.desc(sdfg)))
state.add_edge(rowptr, None, tasklet, 'rowptr',
dace.Memlet.from_array(rowptr.data, rowptr.desc(sdfg)))
state.add_edge(colind, None, tasklet, 'colind',
dace.Memlet.from_array(colind.data, colind.desc(sdfg)))
state.add_edge(tasklet, 'dense', T, None,
dace.Memlet.from_array(T.data, T.desc(sdfg)))
gpu_transform_tasklet(sdfg, state, tasklet)
matrix_transpose(state, T, T, dense, dense, label=T.data)
def matrix_inversion_cusolver(state, arg, mat_inv, mat_index, label):
""" Adds a matrix inverse operation to a state, using CUSOLVER
for the implementation. """
sdfg = state.parent
m_shape = mat_inv.desc(sdfg).shape
inv_range = '0 : {sz}, 0 : {sz}'.format(sz=m_shape[-1])
if mat_index is not None:
index = [str(idx) for idx in mat_index]
inv_range = '{}, {}'.format(', '.join(index), inv_range)
inv_task = state.add_tasklet(
name=label,
inputs={'a'},
outputs={'b'},
code='''
cusolverDnSetStream(solver_handle, __dace_current_stream);
int new_lwork = 0;
cusolverDnZgetrf_bufferSize(
solver_handle,
{n}, {n},
(cuDoubleComplex*)a,
{n},
&new_lwork
);
//cudaDeviceSynchronize();
if (new_lwork > lwork) {{
lwork = new_lwork;
cudaFree(dwork);
cudaMalloc<cuDoubleComplex>(&dwork, sizeof(cuDoubleComplex) * lwork);
}}
cusolverDnZgetrf(
solver_handle,
{n}, {n},
(cuDoubleComplex*)a,
{n},
dwork, ipiv, info
);
//cudaDeviceSynchronize();
cudaMemcpyAsync(b, dev_I, sizeof(cuDoubleComplex) * {n} * {n}, cudaMemcpyDeviceToDevice, __dace_current_stream);
cusolverDnZgetrs(
solver_handle,
CUBLAS_OP_N,
{n},
{n}, /* nrhs */
(cuDoubleComplex*)a,
{n},
ipiv,
(cuDoubleComplex*)b,
{n},
info
);
//cudaDeviceSynchronize();
'''.format(n=m_shape[-1]),
language=dace.types.Language.CPP)
state.add_edge(arg, None, inv_task, 'a',
dace.Memlet.from_array(arg.data, arg.desc(sdfg)))
state.add_edge(inv_task, 'b', mat_inv, None,
dace.Memlet.simple(mat_inv, inv_range))
gpu_transform_tasklet(sdfg, state, inv_task)
| [
"[email protected]"
]
| |
c84c382a50edac58f7055825c0db098e94619340 | a140fe192fd643ce556fa34bf2f84ddbdb97f091 | /.history/집합_20200628155700.py | 7bd16c297e8415080fd75020ab5d8a29f35c6239 | []
| no_license | sangha0719/py-practice | 826f13cb422ef43992a69f822b9f04c2cb6d4815 | 6d71ce64bf91cc3bccee81378577d84ba9d9c121 | refs/heads/master | 2023-03-13T04:40:55.883279 | 2021-02-25T12:02:04 | 2021-02-25T12:02:04 | 342,230,484 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 85 | py | # 집합 (set)
# 중복 안됨, 순서 없음
my_set = {1, 2, 3, 3, 3}
print(my_set)
| [
"[email protected]"
]
| |
e035bce4e99565c49e21eb92c6219a75cf255440 | bff5db2a3d7d9b698fbf7512de6ddb87b1f5d45b | /python/frequency.py | 8b67b7a6b5ab4a61d87f414960e1cb017ef74393 | []
| no_license | aslamup/huffman-coding | 38e107d1439369e914b4cf5966cf2dfbaa89e300 | 73d90bffd3d5f05df5df55cf2e9e4abcd8826e24 | refs/heads/master | 2020-06-04T20:52:21.243970 | 2014-10-19T19:31:09 | 2014-10-19T19:31:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 131 | py | def frequency(str):
freqs = {}
for ch in str:
freqs[ch] = freqs.get(ch,0) + 1
return freqs
print frequency('aaabccdeeeeeffg')
| [
"[email protected]"
]
| |
10df12ad6bb711a63239c8ec9f4619c7ab6e695b | 5e726f41a95e1fc79ed98b777ec85a386f7c7a13 | /Model/SqlAlchemy/Weixin/WeixinModel.py | f5802fdf5fc3961cf0a68f3fb26063a215bc973f | []
| permissive | 825477418/XX | a3b43ff2061f2ec7e148671db26722e1e6c27195 | bf46e34749394002eec0fdc65e34c339ce022cab | refs/heads/master | 2022-08-02T23:51:31.009837 | 2020-06-03T13:54:09 | 2020-06-03T13:54:09 | 262,987,137 | 0 | 0 | MIT | 2020-06-03T13:54:10 | 2020-05-11T08:43:30 | null | UTF-8 | Python | false | false | 2,004 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/1/3 10:25
# @Email : [email protected]
# @Des :
# @File : WeixinModel
# @Software: PyCharm
from sqlalchemy import Column, Date, String
from sqlalchemy.dialects.mysql import INTEGER, TINYINT
from sqlalchemy.ext.declarative import declarative_base
import XX.Model.SqlAlchemy.BaseModel as BaseModel
Base = declarative_base()
metadata = Base.metadata
class WeixinModel(Base, BaseModel.BaseModel):
__tablename__ = 'weixin'
id = Column(INTEGER(11), primary_key=True)
wx_id = Column(INTEGER(11))
biz = Column(String(32), unique=True)
name = Column(String(255))
gh_id = Column(INTEGER(11))
weixin_id = Column(INTEGER(11))
head_img = Column(String(255))
head_img_circle = Column(String(255))
intro = Column(String(255))
no1 = Column(INTEGER(11))
no2 = Column(String(255))
no3 = Column(INTEGER(11))
no4 = Column(INTEGER(11))
no5 = Column(INTEGER(11))
is_del = Column(TINYINT(1))
update_Ts = Column(INTEGER(11))
create_ts = Column(INTEGER(11))
def __init__(self, *arg, **kw):
self.biz = kw.get("biz", None)
self.create_ts = kw.get("create_ts", None)
self.gh_id = kw.get("gh_id", None)
self.head_img = kw.get("head_img", None)
self.head_img_circle = kw.get("head_img_circle", None)
self.id = kw.get("id", None)
self.intro = kw.get("intro", None)
self.is_del = kw.get("is_del", None)
self.metadata = kw.get("metadata", None)
self.name = kw.get("name", None)
self.no1 = kw.get("no1", None)
self.no2 = kw.get("no2", None)
self.no3 = kw.get("no3", None)
self.no4 = kw.get("no4", None)
self.no5 = kw.get("no5", None)
self.update_Ts = kw.get("update_Ts", None)
self.weixin_id = kw.get("weixin_id", None)
self.wx_id = kw.get("wx_id", None)
if __name__ == '__main__':
BaseModel.createInitFunction(WeixinModel)
| [
"[email protected]"
]
| |
e86db6ca7ee45232db2129e0dbeb2cd18798e760 | c6053ad14e9a9161128ab43ced5604d801ba616d | /Lemon/Python_Base/Lesson5_20181105/homework3.py | 397cb31981d6b19af4d231ec6df519c528727998 | []
| no_license | HesterXu/Home | 0f6bdace39f15e8be26031f88248f2febf33954d | ef8fa0becb687b7b6f73a7167bdde562b8c539be | refs/heads/master | 2020-04-04T00:56:35.183580 | 2018-12-25T02:48:51 | 2018-12-25T02:49:05 | 155,662,403 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,543 | py | # -*- coding: utf-8 -*-
# @Time : 2018/11/6 11:12
# @Author : Hester Xu
# @Email : [email protected]
# @File : homework3.py
# @Software: PyCharm
'''
有一组用户的登录信息存储在字典 login_info 里面,
字典格式如下:login_info={"admin":"root","user_1":"123456"}
key表示用户名,value表示密码,请编写函数满足如下条件:
1)设计1个登陆的程序, 不同的用户名和对成密码存在个字典里面, 输入正确的用户名和密码去登陆,
2)首先输入用户名,如果用户名不存在或者为空,则一直提示输入正确的用户名
3)当用户名正确的时候,提示去输入密码,如果密码跟用户名不对应,则提示密码错误请重新输入。
4)如果密码输入错误超过三次,中断程序运行。
5)当输入密码错误时,提示还有几次机会
6)用户名和密码都输入正确的时候,提示登陆成功!
'''
login_info={"admin":"root","user_1":"123456"}
username = input("请输入用户名:")
while username not in login_info.keys() or False:
username = input("请输入正确的用户名:")
pwd = input("请输入密码:")
def fac(pwd):
i = 3
while i <= 3:
if pwd in login_info[username]:
print("登录成功")
break
elif i == 0:
break
print("密码错误,还有{}次机会".format(i))
pwd = input("请重新输入密码:")
i -= 1
if pwd in login_info[username]:
print("登录成功")
else:
fac(pwd)
| [
"[email protected]"
]
| |
ef9b12909b95ce4e5116906a628c74c5eac7abc0 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02866/s920983279.py | fb63213ca0825c257ed3b28e23543535b9e73049 | []
| no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,119 | py | import sys, re
from collections import deque, defaultdict, Counter
from math import ceil, sqrt, hypot, factorial, pi, sin, cos, tan, asin, acos, atan, radians, degrees, log2, gcd
from itertools import accumulate, permutations, combinations, combinations_with_replacement, product, groupby
from operator import itemgetter, mul
from copy import deepcopy
from string import ascii_lowercase, ascii_uppercase, digits
from bisect import bisect, bisect_left, insort, insort_left
from heapq import heappush, heappop
from functools import reduce
def input(): return sys.stdin.readline().strip()
def INT(): return int(input())
def MAP(): return map(int, input().split())
def LIST(): return list(map(int, input().split()))
def ZIP(n): return zip(*(MAP() for _ in range(n)))
sys.setrecursionlimit(10 ** 9)
INF = float('inf')
#mod = 10 ** 9 + 7
mod = 998244353
from decimal import *
#import numpy as np
#decimal.getcontext().prec = 10
N = INT()
D = LIST()
d = Counter(D)
if D[0] != 0 or 2 <= d[0]:
print(0)
exit()
ans = 1
for i in range(1, max(d.keys())+1):
ans = ans * pow(d[i-1], d[i], mod) % mod
tmp = d[i]
print(ans)
| [
"[email protected]"
]
| |
f78a25cb4f415024b1c9170cad8fd8e15dfcd751 | f17c78929df552050b3769611b5dfed1b942aa5d | /Learning Records and Essays/python/program.py | 2ab8f506897b246b05560536ba744caeb203d7d9 | []
| no_license | chirsxjh/My-project | f5d4d45e2673898f5fe7aace0d3101efdf847841 | b9644f268f8c3ec22f47cc7b0a61b66572f1f67a | refs/heads/master | 2022-12-03T16:26:37.391733 | 2020-06-08T07:43:51 | 2020-06-08T07:43:51 | 103,375,743 | 1 | 0 | null | 2022-11-22T01:06:51 | 2017-09-13T08:43:16 | Python | UTF-8 | Python | false | false | 299 | py | '''
def ADD(x, list=[]):
list.append(x)
return list
list1 = ADD(10)
list2 = ADD(123, [])
list3 = ADD('a')
print "list1 = %s" %list1
print "list2 = %s" %list2
print "list3 = %s" %list3
'''
a = ['天','干','地','址']
a.reverse()
print (a)
b = ''
for i in a:
b = b + str(i)
print (b)
| [
"[email protected]"
]
| |
c816092f5dd0202ad4d6b7c5b4abd35bbfb25cf2 | 77e303d8353170f4181ab9ff66ac77cb57d46caf | /src/629A.py | 1c9f11924c54c36ccc22a0416876514f665be01c | [
"MIT"
]
| permissive | viing937/codeforces | 14f689f2e3360939912e927fb830c69f7116b35c | 5bd8c2bec0e48cb2b4830c26849ea7fda447267c | refs/heads/master | 2022-09-25T19:51:03.891702 | 2022-08-15T15:32:54 | 2022-08-15T15:32:54 | 32,905,529 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 228 | py | n = int(input())
c = [list(input()) for i in range(n)]
ans = 0
for i in range(n):
t = c[i].count('C')
ans += t*(t-1)//2
c = list(zip(*c))
for i in range(n):
t = c[i].count('C')
ans += t*(t-1)//2
print(ans)
| [
"[email protected]"
]
| |
23b6e3836ef36023c6255d9903431b590aaea51e | e3565e1ce607f60745f2a045aae8026661a6b99b | /resources/Onyx-1.0.511/py/onyx/signalprocessing/vocalsource.py | 9863cde71cd98b28ea7fe3c186ac6c9216905a01 | [
"Apache-2.0"
]
| permissive | eternity668/speechAD | 4c08d953b2ed06b3357b1c39d8709dd088a2471c | f270a1be86372b7044615e4fd82032029e123bc1 | refs/heads/master | 2021-01-12T22:10:33.358500 | 2014-02-03T16:03:28 | 2014-02-03T16:03:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,268 | py | ###########################################################################
#
# File: vocalsource.py (directory: py/onyx/signalprocessing)
# Date: 2008-07-21 Mon 18:01:50
# Author: Hugh Secker-Walker
# Description: Toying around with one-poles for a vocal source
#
# This file is part of Onyx http://onyxtools.sourceforge.net
#
# Copyright 2007 - 2009 The Johns Hopkins University
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
###########################################################################
"""
Use of coincident one-pole filters to generate reasonable, reversed,
glotal pulse waveforms.
"""
def make_one_pole(alpha):
def one_pole(alpha):
one_minus_alpha = 1 - alpha
y_n = 0
while True:
x = yield y_n
y_n = alpha * y_n + one_minus_alpha * x
send = one_pole(alpha).send
send(None)
return send
def chain(seq):
seq = tuple(seq)
def gen():
x = None
while True:
x = yield x
for h in seq:
x = h(x)
send = gen().send
send(None)
return send
def test():
"""
>>> op = chain(make_one_pole(0.8) for i in xrange(2))
>>> for x in (1,) + 30 * (0,): print ' ', ' ' * int(1000 * op(x)), '*'
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
>>> for x in (1,) + 25 * (0,) + (.25,) + 30 * (0,): print ' ', ' ' * int(1000 * op(x)), '*'
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
>>> op = chain(make_one_pole(0.6) for i in xrange(3))
>>> for x in (1,) + 20 * (0,): print ' ', ' ' * int(1000 * op(x)), '*'
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
*
"""
if __name__ == '__main__':
from onyx import onyx_mainstartup
onyx_mainstartup()
| [
"[email protected]"
]
| |
6e7840f3b7e05d1ddf4e6c47d41b89b1a83737bf | a559b7a111bf95aa301080c34766ca0f16aa7329 | /Programmers/파이썬을파이썬답게/introduction.py | a18bfb35e3fed3b8b4a9cda373735d40f90ef143 | []
| no_license | arara90/AlgorithmAndDataStructure | ccda81d858fdf52aa15d22924b18e7487f659400 | 27280bcc64923f966b854f810560c51e08f3c5be | refs/heads/master | 2022-07-15T05:02:17.157871 | 2022-06-29T14:13:54 | 2022-06-29T14:13:54 | 190,366,467 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 243 | py | #나쁜예 => C, java에 가까움
def Badsolution(mylist):
answer=[]
for i in mylist:
answer.append(len(i))
return answer
def solution(mylist):
return list(map(len, mylist))
print(solution([[1,2], [1], [1,2,3]]))
| [
"[email protected]"
]
| |
1ad6937e8bdea506f7697898bd9d808c4fa4f815 | 5717eefe96f447e4229a5d6fb3fe92a0a34ad123 | /SelectionProc/asgi.py | a94e39c16311db507e9a5cccf757d9ef20f9c847 | []
| no_license | nayneshrathod/SelectionProces | 901b4c8063cc036f31604bf9b7f2b6dec4e36f3e | 0e28e97952f7f8fa5c8490d9fced5c866d0be41b | refs/heads/master | 2022-09-03T18:35:05.642945 | 2020-05-18T09:51:11 | 2020-05-18T09:51:11 | 264,897,005 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 403 | py | """
ASGI config for SelectionProc project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'SelectionProc.settings')
application = get_asgi_application()
| [
"[email protected]"
]
| |
1435ac835f13f7cf7b2db59ce4d4a74dd0730448 | 00414b9d72c922b873cc2ebcb4d1ce068de5007f | /src/backend/partaj/core/migrations/0039_generate_initial_topic_materialzed_paths.py | ada91317724825f40c9b247507671d4440208481 | [
"MIT"
]
| permissive | MTES-MCT/partaj | 1de9691dc6e7615c1d228a0e39c9208b97222dab | 22e4afa728a851bb4c2479fbb6f5944a75984b9b | refs/heads/main | 2023-08-07T08:22:30.290701 | 2023-08-04T16:57:38 | 2023-08-04T17:22:26 | 237,007,942 | 4 | 3 | MIT | 2023-09-14T19:10:26 | 2020-01-29T14:54:46 | Python | UTF-8 | Python | false | false | 1,358 | py | # Generated by Django 3.0.5 on 2021-01-11 16:29
from django.db import migrations
def forwards(apps, schema_editor):
"""
As we add explicitly non-nullable Materialized Paths for Topics in 0038, we need to generate
initial values for them once as the app migrates along.
This replaces the insignificant "0000" default set in 0039 with real values.
"""
# Important: this migration must be ran with the Partaj code (especially the Topic model) in
# the state of the code at the time of this commit.
# We cannot use the regular `Topic = apps.get_model("core", "Topic")` to get the correct
# version if the model as we need the custom manager for Topic which is not available from
# `apps.get_model`
from partaj.core.models.unit import Topic
Topic.objects.build_materialized_paths()
def backwards(apps, schema_editor):
"""
As topic Materialized Path fields are added with insignificant values in migration 0038,
we can just ignore them here as they should be removed in a migration that goes to 0037,
and it would break Partaj to remove them and stay at 0038.
"""
pass
class Migration(migrations.Migration):
dependencies = [
("core", "0038_add_materialized_path_to_topics"),
]
operations = [
migrations.RunPython(forwards, reverse_code=backwards),
]
| [
"[email protected]"
]
| |
1e8a3348441cdd66ee1ada2eecdd32cfe1cb121c | 42e66cd537c357e7cb98081a6ebf80c23a8a8613 | /.history/real_estate/settings_20201111113422.py | b872cb7082440c6f663854d2548a252564c4a520 | []
| no_license | David-Uk/django-real-estate | bec5d38379f32e63110a59a32a10a64b1107adca | d2d7f4320d7aadde719a48c4c67eb39c22096e2d | refs/heads/main | 2023-01-24T08:58:58.935034 | 2020-12-08T14:19:55 | 2020-12-08T14:19:55 | 310,417,389 | 0 | 0 | null | 2020-12-08T14:19:57 | 2020-11-05T21:07:38 | HTML | UTF-8 | Python | false | false | 3,316 | py | """
Django settings for real_estate project.
Generated by 'django-admin startproject' using Django 3.0.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '1r6gx$qnirqznxr*b^+81t&(s@bwfcwa14zy1+10k=jyn=*tae'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'pserver',
'pages',
'listings',
'realtor',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'real_estate.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'real_estate.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'real_estate/static')
]
| [
"[email protected]"
]
| |
35ac9fb57d8681c94e0cff9605084217104cdb7b | 242a8ad0b0939473269a14a02097ede7fe298c80 | /venv/Scripts/django-admin.py | c71cadfc8e6c758496a288e4f8135d1b9e6ba082 | []
| no_license | Md-Jahid-Hasan/recipe_api | f97b2d6c0c5a65c2c52ee572a2885dbae15021e4 | 5815f442820b05ab36747cf52e1b990cd3f6f2d3 | refs/heads/master | 2023-04-30T11:44:03.391725 | 2021-05-08T15:31:02 | 2021-05-08T15:31:02 | 365,547,544 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 705 | py | #!F:\New folder (2)\Projects\Django\recepi_api\venv\Scripts\python.exe
# When the django-admin.py deprecation ends, remove this script.
import warnings
from django.core import management
try:
from django.utils.deprecation import RemovedInDjango40Warning
except ImportError:
raise ImportError(
'django-admin.py was deprecated in Django 3.1 and removed in Django '
'4.0. Please manually remove this script from your virtual environment '
'and use django-admin instead.'
)
if __name__ == "__main__":
warnings.warn(
'django-admin.py is deprecated in favor of django-admin.',
RemovedInDjango40Warning,
)
management.execute_from_command_line()
| [
"[email protected]"
]
| |
ca84e03841878e26147430f70953307c781e0d13 | 9c73dd3043f7db7c9ec76d560484e99ad134fdb6 | /students/douglas_klos/lesson01/assignment/pytests/test_integration.py | ec086b6cc4992e206ac1bb6bd5d2d8197cbecf22 | []
| no_license | UWPCE-PythonCert-ClassRepos/py220-online-201904-V2 | 546b316025b680ca28d24b523663095398616b13 | ac12beeae8aa57135bbcd03ac7a4f977fa3bdb56 | refs/heads/master | 2022-12-10T03:14:25.514630 | 2019-06-11T02:14:17 | 2019-06-11T02:14:17 | 179,139,181 | 1 | 19 | null | 2022-12-08T01:43:38 | 2019-04-02T18:49:10 | Python | UTF-8 | Python | false | false | 6,476 | py | #pylint: disable=W0201
"""Pytest cases for integration testing"""
# Douglas Klos
# April 5th, 2019
# Python 220, Lesson 01
# test_integration.py
from inventory_management.inventory_class import Inventory
from inventory_management.furniture_class import Furniture
from inventory_management.electric_appliances_class import ElectricAppliances
# Feels like integration tests are just rebuilding main
# with predefined values and no user input.
# Validates that the modules work together though I suppose.
# Also 80 width linting does not improve readability
# in a language that is all about readability.
class TestClass():
"""Integration tests for inventory_management
Attributes:
item_chair (dict) : dictionary for chair item
item_microwave (dict) : dictionary for microwave electric appliance
item_sofa (dict) : dictionary for sofa furniture
full_inventory (dict) : dictionary database of above items
inventory_string (str) : string containing all data in full_inventory
"""
def setup_method(self):
"""Initialize before each test method"""
self.item_chair = {}
self.item_microwave = {}
self.item_sofa = {}
self.full_inventory = {}
self.inventory_string = ''
self.item_chair['product_code'] = 100
self.item_chair['description'] = 'Chair'
self.item_chair['market_price'] = 111
self.item_chair['rental_price'] = 11
self.full_inventory[self.item_chair['product_code']] = \
Inventory(**self.item_chair).return_as_dictionary()
self.item_microwave['product_code'] = 200
self.item_microwave['description'] = 'Microwave'
self.item_microwave['market_price'] = 222
self.item_microwave['rental_price'] = 22
self.item_microwave['brand'] = 'Samsung'
self.item_microwave['voltage'] = 230
self.full_inventory[self.item_microwave['product_code']] = \
ElectricAppliances(**self.item_microwave).return_as_dictionary()
self.item_sofa['product_code'] = 300
self.item_sofa['description'] = 'Sofa'
self.item_sofa['market_price'] = 333
self.item_sofa['rental_price'] = 33
self.item_sofa['material'] = 'Leather'
self.item_sofa['size'] = 'XL'
self.full_inventory[self.item_sofa['product_code']] = \
Furniture(**self.item_sofa).return_as_dictionary()
for item_code in self.full_inventory:
for value in self.full_inventory[item_code].values():
self.inventory_string += f'{value}'
def test_integration_chair(self):
"""Integration test for chair inventory
Verifies that all chair related data is present.
"""
assert str(self.item_chair['product_code']) in self.inventory_string
assert str(self.item_chair['description']) in self.inventory_string
assert str(self.item_chair['market_price']) in self.inventory_string
assert str(self.item_chair['rental_price']) in self.inventory_string
def test_integration_microwave(self):
"""Integration test for microwave electrical applicance
Verifies that all microwave related data is present.
"""
assert str(self.item_microwave['product_code']) in self.inventory_string
assert str(self.item_microwave['description']) in self.inventory_string
assert str(self.item_microwave['market_price']) in self.inventory_string
assert str(self.item_microwave['rental_price'])in self.inventory_string
assert str(self.item_microwave['brand']) in self.inventory_string
assert str(self.item_microwave['voltage']) in self.inventory_string
def test_integration_sofa(self):
"""Integration test for sofa furniture
Verifies that all sofa related data is present.
"""
assert str(self.item_sofa['product_code']) in self.inventory_string
assert str(self.item_sofa['description']) in self.inventory_string
assert str(self.item_sofa['market_price']) in self.inventory_string
assert str(self.item_sofa['rental_price']) in self.inventory_string
assert str(self.item_sofa['material']) in self.inventory_string
assert str(self.item_sofa['size']) in self.inventory_string
def test_full_string(self):
"""Integration test
We build up a string of all the values in the database
then we go through each expected value and remove it.
If there's nothing left at the end then we pass
"""
self.inventory_string = self.inventory_string.replace(
str(self.item_chair['product_code']), '')
self.inventory_string = self.inventory_string.replace(
str(self.item_chair['description']), '')
self.inventory_string = self.inventory_string.replace(
str(self.item_chair['market_price']), '')
self.inventory_string = self.inventory_string.replace(
str(self.item_chair['rental_price']), '')
self.inventory_string = self.inventory_string.replace(
str(self.item_microwave['product_code']), '')
self.inventory_string = self.inventory_string.replace(
str(self.item_microwave['description']), '')
self.inventory_string = self.inventory_string.replace(
str(self.item_microwave['market_price']), '')
self.inventory_string = self.inventory_string.replace(
str(self.item_microwave['rental_price']), '')
self.inventory_string = self.inventory_string.replace(
str(self.item_microwave['brand']), '')
self.inventory_string = self.inventory_string.replace(
str(self.item_microwave['voltage']), '')
self.inventory_string = self.inventory_string.replace(
str(self.item_sofa['product_code']), '')
self.inventory_string = self.inventory_string.replace(
str(self.item_sofa['description']), '')
self.inventory_string = self.inventory_string.replace(
str(self.item_sofa['market_price']), '')
self.inventory_string = self.inventory_string.replace(
str(self.item_sofa['rental_price']), '')
self.inventory_string = self.inventory_string.replace(
str(self.item_sofa['material']), '')
self.inventory_string = self.inventory_string.replace(
str(self.item_sofa['size']), '')
assert self.inventory_string == ''
| [
"[email protected]"
]
| |
b0e25979e15ea5efd7427c6038b6c11cb3178ac7 | dc025df4a433b82c96fa7a4e064f46ecc948d1a2 | /subsets.py | db716e80dbae2dc466bda23b9e555097d492879d | []
| no_license | bingh0616/algorithms | c9d3babd6cbf3aefd40fa28a3c839c7201f1028c | 3b16c72d9361c4bb063e4b2789db695f1e0149bf | refs/heads/master | 2021-01-18T14:10:45.125905 | 2015-11-19T06:03:48 | 2015-11-19T06:03:48 | 35,512,148 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 681 | py | # problem description: https://leetcode.com/problems/subsets/
class Solution:
# @param {integer[]} nums
# @return {integer[][]}
def subsets(self, nums):
nums.sort()
return self.helper(nums)
def helper(self, nums):
if not nums: return [[]]
res = []
for r in self.helper(nums[1:]):
res.append(r)
res.append([nums[0]]+r)
return res
class Solution:
# @param {integer[]} nums
# @return {integer[][]}
def subsets(self, nums):
nums.sort()
res = [[]]
for n in nums:
res = res + [r + [n] for r in res]
return res
| [
"[email protected]"
]
| |
0e9cd66bbd0054c01e59c474b682f77a6ffa0e06 | 70f5f279e051360310f95be895320d8fa6cd8d93 | /extraPackages/matplotlib-3.0.2/examples/text_labels_and_annotations/text_fontdict.py | ad6fa8cc972b272be0283fbb0c838ad95c23a5c4 | [
"BSD-3-Clause"
]
| permissive | spacetime314/python3_ios | 4b16ab3e81c31213b3db1e1eb00230621b0a7dc8 | e149f1bc2e50046c8810f83dae7739a8dea939ee | refs/heads/master | 2020-05-09T20:39:14.980041 | 2019-04-08T15:07:53 | 2019-04-08T15:07:53 | 181,415,024 | 2 | 0 | BSD-3-Clause | 2019-04-15T05:00:14 | 2019-04-15T05:00:12 | null | UTF-8 | Python | false | false | 865 | py | """
=======================================================
Controlling style of text and labels using a dictionary
=======================================================
This example shows how to share parameters across many text objects and labels
by creating a dictionary of options passed across several functions.
"""
import numpy as np
import matplotlib.pyplot as plt
font = {'family': 'serif',
'color': 'darkred',
'weight': 'normal',
'size': 16,
}
x = np.linspace(0.0, 5.0, 100)
y = np.cos(2*np.pi*x) * np.exp(-x)
plt.plot(x, y, 'k')
plt.title('Damped exponential decay', fontdict=font)
plt.text(2, 0.65, r'$\cos(2 \pi t) \exp(-t)$', fontdict=font)
plt.xlabel('time (s)', fontdict=font)
plt.ylabel('voltage (mV)', fontdict=font)
# Tweak spacing to prevent clipping of ylabel
plt.subplots_adjust(left=0.15)
plt.show()
| [
"[email protected]"
]
| |
b9b7099438fdf0d48829c0a48561dbbc3874bb41 | 865bd0c84d06b53a39943dd6d71857e9cfc6d385 | /200-number-of-islands/number-of-islands.py | 5fc729f5608542946c98442cec6944f6321aa2a1 | []
| no_license | ANDYsGUITAR/leetcode | 1fd107946f4df50cadb9bd7189b9f7b7128dc9f1 | cbca35396738f1fb750f58424b00b9f10232e574 | refs/heads/master | 2020-04-01T18:24:01.072127 | 2019-04-04T08:38:44 | 2019-04-04T08:38:44 | 153,473,780 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,227 | py | # Given a 2d grid map of '1's (land) and '0's (water), count the number of islands. An island is surrounded by water and is formed by connecting adjacent lands horizontally or vertically. You may assume all four edges of the grid are all surrounded by water.
#
# Example 1:
#
#
# Input:
# 11110
# 11010
# 11000
# 00000
#
# Output: 1
#
#
# Example 2:
#
#
# Input:
# 11000
# 11000
# 00100
# 00011
#
# Output: 3
#
class Solution:
def numIslands(self, grid):
"""
:type grid: List[List[str]]
:rtype: int
"""
if len(grid) == 0:
return 0
n = len(grid)
m = len(grid[0])
result = 0
for i in range(n):
for j in range(m):
if grid[i][j] == '1':
self.DFSisland(grid,i,j)
result += 1
return result
def DFSisland(self,grid,i,j):
if i < 0 or j<0 or i>=len(grid) or j>=len(grid[0]):
return
if grid[i][j] == '1':
grid[i][j] = 0
self.DFSisland(grid,i+1,j)
self.DFSisland(grid,i-1,j)
self.DFSisland(grid,i,j+1)
self.DFSisland(grid,i,j-1)
| [
"[email protected]"
]
| |
80640c531be3ce486871f2c0e1d8a4fe3315e162 | 297efd4afeb46c0b56d9a975d76665caef213acc | /src/core/migrations/0123_auto_20191204_1711.py | 81bd185dc755bfa58df30b1e75a3d95810624907 | [
"MIT"
]
| permissive | metabolism-of-cities/metabolism-of-cities-platform-v3 | 67716c3daae86a0fe527c18aef26ce29e069cbcc | c754d3b1b401906a21640b8eacb6b724a448b31c | refs/heads/master | 2022-12-06T22:56:22.207853 | 2020-08-25T09:53:51 | 2020-08-25T09:53:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,218 | py | # Generated by Django 2.2.7 on 2019-12-04 17:11
from django.db import migrations, models
import django.db.models.deletion
import tinymce.models
class Migration(migrations.Migration):
dependencies = [
('core', '0122_reference_cityloops_comments_import'),
]
operations = [
migrations.CreateModel(
name='MethodCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('description', tinymce.models.HTMLField(blank=True, null=True, verbose_name='description')),
],
),
migrations.AddField(
model_name='method',
name='method_class',
field=models.CharField(blank=True, choices=[('3', 'Relation in UM systems'), ('2', 'Flows of substances'), ('1', 'Environmental impacts')], max_length=1, null=True),
),
migrations.AddField(
model_name='method',
name='category',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='core.MethodCategory'),
),
]
| [
"[email protected]"
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.