max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
openspeech/modules/add_normalization.py | CanYouImagine/openspeech | 207 | 12778839 | <filename>openspeech/modules/add_normalization.py
# MIT License
#
# Copyright (c) 2021 <NAME> and <NAME> and <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import torch.nn as nn
class AddNorm(nn.Module):
"""
Add & Normalization layer proposed in "Attention Is All You Need".
Transformer employ a residual connection around each of the two sub-layers,
(Multi-Head Attention & Feed-Forward) followed by layer normalization.
"""
def __init__(self, sublayer: nn.Module, d_model: int = 512) -> None:
super(AddNorm, self).__init__()
self.sublayer = sublayer
self.layer_norm = nn.LayerNorm(d_model)
def forward(self, *args):
residual = args[0]
outputs = self.sublayer(*args)
if isinstance(outputs, tuple):
return self.layer_norm(outputs[0] + residual), outputs[1]
return self.layer_norm(outputs + residual)
|
external/iotivity/iotivity_1.2-rel/build_common/iotivityconfig/compiler/default_configuration.py | SenthilKumarGS/TizenRT | 1,433 | 12778905 | # ------------------------------------------------------------------------
# Copyright 2015 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------
from configuration import Configuration
# Default (very simple) compiler configuration
class DefaultConfiguration(Configuration):
def __init__(self, context):
Configuration.__init__(self, context)
# ------------------------------------------------------------
# Return test program to be used when checking for basic C++11
# support.
# ------------------------------------------------------------
def _c99_test_program(self):
return """
// Some headers found in C99.
#include <stdbool.h>
#include <stdint.h>
int main()
{
struct foo
{
bool b; // C99 type
int i;
uint64_t q; // C99 type
};
// Designated initializer.
struct foo bar = { .b = false, .q = UINT64_MAX };
// Implicitly initialized field.
return bar.i != 0;
}
"""
# --------------------------------------------------------------
# Get list of flags that could potentially enable C99 support.
#
# The default configuration assumes that no flag is needed to
# enable C99 support.
# --------------------------------------------------------------
def _c99_flags(self):
return []
# ------------------------------------------------------------
# Return test program to be used when checking for basic C++11
# support.
# ------------------------------------------------------------
def _cxx11_test_program(self):
return """
int main()
{
int x = 3210;
auto f = [x](){
return x;
};
return f() != x;
}
"""
# --------------------------------------------------------------
# Get list of flags that could potentially enable C++11 support.
#
# The default configuration assumes that no flag is needed to
# enable C++11 support.
# --------------------------------------------------------------
def _cxx11_flags(self):
return []
|
tests/test_util.py | magicalyak/blinkpy | 272 | 12778913 | <gh_stars>100-1000
"""Test various api functions."""
import unittest
from unittest import mock
import time
from blinkpy.helpers.util import json_load, Throttle, time_to_seconds, gen_uid
class TestUtil(unittest.TestCase):
"""Test the helpers/util module."""
def setUp(self):
"""Initialize the blink module."""
def tearDown(self):
"""Tear down blink module."""
def test_throttle(self):
"""Test the throttle decorator."""
calls = []
@Throttle(seconds=5)
def test_throttle():
calls.append(1)
now = int(time.time())
now_plus_four = now + 4
now_plus_six = now + 6
test_throttle()
self.assertEqual(1, len(calls))
# Call again, still shouldn't fire
test_throttle()
self.assertEqual(1, len(calls))
# Call with force
test_throttle(force=True)
self.assertEqual(2, len(calls))
# Call without throttle, shouldn't fire
test_throttle()
self.assertEqual(2, len(calls))
# Fake time as 4 seconds from now
with mock.patch("time.time", return_value=now_plus_four):
test_throttle()
self.assertEqual(2, len(calls))
# Fake time as 6 seconds from now
with mock.patch("time.time", return_value=now_plus_six):
test_throttle()
self.assertEqual(3, len(calls))
def test_throttle_per_instance(self):
"""Test that throttle is done once per instance of class."""
class Tester:
"""A tester class for throttling."""
def test(self):
"""Test the throttle."""
return True
tester = Tester()
throttled = Throttle(seconds=1)(tester.test)
self.assertEqual(throttled(), True)
self.assertEqual(throttled(), None)
def test_throttle_multiple_objects(self):
"""Test that function is throttled even if called by multiple objects."""
@Throttle(seconds=5)
def test_throttle_method():
return True
class Tester:
"""A tester class for throttling."""
def test(self):
"""Test function for throttle."""
return test_throttle_method()
tester1 = Tester()
tester2 = Tester()
self.assertEqual(tester1.test(), True)
self.assertEqual(tester2.test(), None)
def test_throttle_on_two_methods(self):
"""Test that throttle works for multiple methods."""
class Tester:
"""A tester class for throttling."""
@Throttle(seconds=3)
def test1(self):
"""Test function for throttle."""
return True
@Throttle(seconds=5)
def test2(self):
"""Test function for throttle."""
return True
tester = Tester()
now = time.time()
now_plus_4 = now + 4
now_plus_6 = now + 6
self.assertEqual(tester.test1(), True)
self.assertEqual(tester.test2(), True)
self.assertEqual(tester.test1(), None)
self.assertEqual(tester.test2(), None)
with mock.patch("time.time", return_value=now_plus_4):
self.assertEqual(tester.test1(), True)
self.assertEqual(tester.test2(), None)
with mock.patch("time.time", return_value=now_plus_6):
self.assertEqual(tester.test1(), None)
self.assertEqual(tester.test2(), True)
def test_time_to_seconds(self):
"""Test time to seconds conversion."""
correct_time = "1970-01-01T00:00:05+00:00"
wrong_time = "1/1/1970 00:00:03"
self.assertEqual(time_to_seconds(correct_time), 5)
self.assertFalse(time_to_seconds(wrong_time))
def test_json_load_bad_data(self):
"""Check that bad file is handled."""
self.assertEqual(json_load("fake.file"), None)
with mock.patch("builtins.open", mock.mock_open(read_data="")):
self.assertEqual(json_load("fake.file"), None)
def test_gen_uid(self):
"""Test gen_uid formatting."""
val1 = gen_uid(8)
val2 = gen_uid(8, uid_format=True)
self.assertEqual(len(val1), 16)
self.assertTrue(val2.startswith("BlinkCamera_"))
val2_cut = val2.split("_")
val2_split = val2_cut[1].split("-")
self.assertEqual(len(val2_split[0]), 8)
self.assertEqual(len(val2_split[1]), 4)
self.assertEqual(len(val2_split[2]), 4)
self.assertEqual(len(val2_split[3]), 4)
self.assertEqual(len(val2_split[4]), 12)
|
Packs/Imperva_WAF/Integrations/ImpervaWAF/ImpervaWAF.py | diCagri/content | 799 | 12778944 | import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
''' IMPORTS '''
import json
import requests
import traceback
# Disable insecure warnings
requests.packages.urllib3.disable_warnings()
''' CONSTANTS '''
INTEGRATION_CONTEXT_NAME = 'ImpervaWAF'
class Client(BaseClient):
"""
Client will implement the service API, and should not contain any Demisto logic.
Should only do requests and return data.
"""
session_id = ''
def do_request(self, method, url_suffix, json_data=None):
if not self.session_id:
self.login()
res = self._http_request(method, f'SecureSphere/api/v1/{url_suffix}', json_data=json_data,
headers={'Cookie': self.session_id}, ok_codes=(200, 401, 406), resp_type='response')
if res.status_code == 401:
self.login()
res = self._http_request(method, f'SecureSphere/api/v1/{url_suffix}', json_data=json_data,
headers={'Cookie': self.session_id}, ok_codes=(200, 401, 406),
resp_type='response')
if res.text:
res = res.json()
else:
res = {}
extract_errors(res)
return res
def login(self):
res = self._http_request('POST', 'SecureSphere/api/v1/auth/session', auth=self._auth)
extract_errors(res)
self.session_id = res.get('session-id')
def get_ip_group_entities(self, group_name, table_name):
raw_res = self.do_request('GET', f'conf/ipGroups/{group_name}')
entries = []
for entry in raw_res.get('entries'):
entries.append({'Type': entry.get('type'),
'IpAddressFrom': entry.get('ipAddressFrom'),
'IpAddressTo': entry.get('ipAddressTo'),
'NetworkAddress': entry.get('networkAddress'),
'CidrMask': entry.get('cidrMask')})
human_readable = tableToMarkdown(table_name, entries, removeNull=True,
headers=['Type', 'IpAddressFrom', 'IpAddressTo', 'NetworkAddress', 'CidrMask'])
entry_context = {f'{INTEGRATION_CONTEXT_NAME}.IpGroup(val.Name===obj.Name)':
{'Name': group_name, 'Entries': entries}}
return human_readable, entry_context, raw_res
def get_custom_policy_outputs(self, policy_name, table_name):
raw_res = self.do_request('GET', f'conf/policies/security/webServiceCustomPolicies/{policy_name}')
policy = {'Name': policy_name,
'Enabled': raw_res.get('enabled'),
'OneAlertPerSession': raw_res.get('oneAlertPerSession'),
'DisplayResponsePage': raw_res.get('displayResponsePage'),
'Severity': raw_res.get('severity'),
'Action': raw_res.get('action'),
'FollowedAction': raw_res.get('followedAction'),
'ApplyTo': raw_res.get('applyTo'),
'MatchCriteria': raw_res.get('matchCriteria')}
hr_policy = policy.copy()
del hr_policy['MatchCriteria']
del hr_policy['ApplyTo']
human_readable = tableToMarkdown(table_name, hr_policy, removeNull=True)
if raw_res.get('applyTo'):
human_readable += '\n\n' + tableToMarkdown('Services to apply the policy to', raw_res.get('applyTo'),
removeNull=True)
for match in raw_res.get('matchCriteria', []):
tmp_match = match.copy()
operation = match['operation']
match_type = match['type']
# generate human readable for sourceIpAddresses type
if match_type == 'sourceIpAddresses':
if tmp_match.get('userDefined'):
for i, element in enumerate(tmp_match['userDefined']):
tmp_match['userDefined'][i] = {'IP Address': tmp_match['userDefined'][i]}
human_readable += '\n\n' + tableToMarkdown(f'Match operation: {operation}\n Source IP addresses:',
tmp_match['userDefined'], removeNull=True)
if tmp_match.get('ipGroups'):
for i, element in enumerate(tmp_match['ipGroups']):
tmp_match['ipGroups'][i] = {'Group name': tmp_match['ipGroups'][i]}
human_readable += '\n\n' + tableToMarkdown(f'Match operation: {operation}\n IP Groups:',
tmp_match['ipGroups'], removeNull=True)
# generate human readable for sourceGeolocation type
elif match_type == 'sourceGeolocation':
if tmp_match.get('values'):
for i, element in enumerate(tmp_match['values']):
tmp_match['values'][i] = {'Country name': tmp_match['values'][i]}
human_readable += '\n\n' + tableToMarkdown(f'Match operation: {operation}\n Countries to match:',
tmp_match['values'], removeNull=True)
entry_context = {f'{INTEGRATION_CONTEXT_NAME}.CustomWebPolicy(val.Name===obj.Name)': policy}
return human_readable, entry_context, raw_res
def extract_errors(res):
if not isinstance(res, list) and res.get('errors'):
error_message = ''
for err in res['errors']:
error_message += f'error-code: {err.get("error-code")}, description: {err.get("description")}'
raise Exception(error_message)
def generate_policy_data_body(args):
severity = args.get('severity')
action = args.get('action')
followed_action = args.get('followed-action')
body = {}
if args.get('enabled'):
body['enabled'] = args['enabled'] == 'True'
if args.get('one-alert-per-session'):
body['oneAlertPerSession'] = args['one-alert-per-session'] == 'True'
if args.get('display-response-page'):
body['displayResponsePage'] = args['display-response-page'] == 'True'
if severity:
body['severity'] = severity
if action:
body['action'] = action
if followed_action:
body['followedAction'] = followed_action
return body
def generate_match_criteria(body, args):
geo_location_criteria_operation = args.get('geo-location-criteria-operation')
ip_addresses_criteria_operation = args.get('ip-addresses-criteria-operation')
ip_groups = args.get('ip-groups', '')
ip_addreses = args.get('ip-addresses', '')
country_names = args.get('country-names', '')
match_criteria = []
if geo_location_criteria_operation:
if not country_names:
raise Exception('country-names argument is empty')
geo_location_match_item = {'type': 'sourceGeolocation',
'operation': geo_location_criteria_operation,
'values': country_names.split(',')}
match_criteria.append(geo_location_match_item)
if ip_addresses_criteria_operation:
if not ip_groups and not ip_addreses:
raise Exception('ip-groups and ip-addresses arguments are empty, please fill at least one of them')
ip_addresses_match_item = {'type': 'sourceIpAddresses',
'operation': ip_addresses_criteria_operation}
if ip_groups:
ip_addresses_match_item['ipGroups'] = ip_groups.split(',')
if ip_addreses:
ip_addresses_match_item['userDefined'] = ip_addreses.split(',')
match_criteria.append(ip_addresses_match_item)
body['matchCriteria'] = match_criteria
return body
def generate_ip_groups_entries(args):
entry_type = args.get('entry-type')
ip_from = args.get('ip-address-from')
ip_to = args.get('ip-address-to')
network_address = args.get('network-address')
cidr_mask = args.get('cidr-mask')
operation = args.get('operation')
json_entries = args.get('json-entries')
if not json_entries:
entry = {}
if entry_type == 'single':
entry['ipAddressFrom'] = ip_from
elif entry_type == 'range':
entry['ipAddressFrom'] = ip_from
entry['ipAddressTo'] = ip_to
elif entry_type == 'network':
entry['networkAddress'] = network_address
entry['cidrMask'] = cidr_mask
else:
raise Exception('entry-type argument is invalid')
entry['type'] = entry_type
entry['operation'] = operation
body = {'entries': [entry]}
else:
try:
json_entries = json.loads(json_entries)
except Exception:
raise Exception(f'Failed to parse json-entries as JSON data, 'f' received object:\n{json_entries}')
body = {'entries': json_entries}
return body
@logger
def test_module(client, args):
raw_res = client.do_request('GET', 'conf/sites')
if raw_res.get('sites'):
demisto.results('ok')
@logger
def ip_group_list_command(client, args):
raw_res = client.do_request('GET', 'conf/ipGroups')
groups = []
if raw_res.get('names'):
groups = raw_res['names']
for i, element in enumerate(groups):
groups[i] = {'Name': groups[i]}
human_readable = tableToMarkdown('IP groups', groups, removeNull=True)
entry_context = {f'{INTEGRATION_CONTEXT_NAME}.IpGroup(val.Name===obj.Name)': groups}
return_outputs(human_readable, entry_context, raw_res)
@logger
def ip_group_list_entries_command(client, args):
group_name = args.get('ip-group-name')
human_readable, entry_context, raw_res = \
client.get_ip_group_entities(group_name, f'IP group entries for {group_name}')
return_outputs(human_readable, entry_context, raw_res)
@logger
def ip_group_remove_entries_command(client, args):
group_name = args.get('ip-group-name')
raw_res = client.do_request('DELETE', f'conf/ipGroups/{group_name}/clear')
return_outputs(f'The IP group {group_name} is now empty', {}, raw_res)
@logger
def sites_list_command(client, args):
raw_res = client.do_request('GET', 'conf/sites')
sites = [{'Name': site} for site in raw_res.get('sites', [])]
human_readable = tableToMarkdown('All sites in the system', sites, removeNull=True)
entry_context = {f'{INTEGRATION_CONTEXT_NAME}.Site(val.Name===obj.Name)': sites}
return_outputs(human_readable, entry_context, raw_res)
@logger
def server_groups_list_command(client, args):
site = args.get('site-name')
raw_res = client.do_request('GET', f'conf/serverGroups/{site}')
server_groups = []
if raw_res.get('server-groups'):
server_groups = raw_res['server-groups']
for i, element in enumerate(server_groups):
server_groups[i] = {'Name': server_groups[i], 'SiteName': site}
human_readable = tableToMarkdown(f'Server groups in {site}', server_groups, removeNull=True)
entry_context = {f'{INTEGRATION_CONTEXT_NAME}.ServerGroup(val.Name===obj.Name)': server_groups}
return_outputs(human_readable, entry_context, raw_res)
@logger
def server_group_policies_list_command(client, args):
site = args.get('site-name')
server_group = args.get('server-group-name')
raw_res = client.do_request('GET', f'conf/serverGroups/{site}/{server_group}/securityPolicies')
policies = []
for policy in raw_res:
policies.append({'System': policy.get('system'),
'PolicyName': policy.get('policy-name'),
'PolicyType': policy.get('policy-type'),
'ServerGroup': server_group,
'SiteName': site})
human_readable = tableToMarkdown(f'Policies for {server_group}', policies, removeNull=True)
entry_context = {f'{INTEGRATION_CONTEXT_NAME}.SecurityPolicy(val.PolicyName===obj.PolicyName)': policies}
return_outputs(human_readable, entry_context, raw_res)
@logger
def custom_policy_list_command(client, args):
raw_res = client.do_request('GET', 'conf/policies/security/webServiceCustomPolicies')
policies = []
if raw_res.get('customWebPolicies'):
policies = raw_res['customWebPolicies']
for i, element in enumerate(policies):
policies[i] = {'Name': policies[i]}
human_readable = tableToMarkdown('Custom web policies', policies, removeNull=True)
entry_context = {f'{INTEGRATION_CONTEXT_NAME}.CustomWebPolicy(val.Name===obj.Name)': policies}
return_outputs(human_readable, entry_context, raw_res)
@logger
def get_custom_policy_command(client, args):
policy_name = args.get('policy-name')
human_readable, entry_context, raw_res = \
client.get_custom_policy_outputs(policy_name, f'Policy data for {policy_name}')
return_outputs(human_readable, entry_context, raw_res)
@logger
def create_ip_group_command(client, args):
group_name = args.get('group-name')
body = generate_ip_groups_entries(args)
client.do_request('POST', f'conf/ipGroups/{group_name}', json_data=body)
human_readable, entry_context, raw_res = \
client.get_ip_group_entities(group_name, f'Group {group_name} created successfully')
return_outputs(human_readable, entry_context, raw_res)
@logger
def update_ip_group_command(client, args):
group_name = args.get('group-name')
body = generate_ip_groups_entries(args)
client.do_request('PUT', f'conf/ipGroups/{group_name}/data', json_data=body)
human_readable, entry_context, raw_res = \
client.get_ip_group_entities(group_name, f'Group {group_name} updated successfully')
return_outputs(human_readable, entry_context, raw_res)
@logger
def delete_ip_group_command(client, args):
group_name = args.get('group-name')
raw_res = client.do_request('DELETE', f'conf/ipGroups/{group_name}')
return_outputs(f'Group {group_name} deleted successfully', {}, raw_res)
@logger
def create_custom_policy_command(client, args):
policy_name = args.get('policy-name')
site = args.get('site-name-to-apply')
server_group = args.get('server-group-name-to-apply')
web_service = args.get('web-service-name-to-apply')
match_criteria_json = args.get('match-criteria-json')
body = generate_policy_data_body(args)
if match_criteria_json and not isinstance(match_criteria_json, dict):
try:
match_criteria_json = json.loads(match_criteria_json)
except Exception:
raise Exception(f'Failed to parse match-criteria-json as JSON data,'
f' received object:\n{match_criteria_json}')
body['matchCriteria'] = match_criteria_json
else:
body = generate_match_criteria(body, args)
body['applyTo'] = [{'siteName': site, 'serverGroupName': server_group, 'webServiceName': web_service}]
client.do_request('POST', f'conf/policies/security/webServiceCustomPolicies/{policy_name}', json_data=body)
human_readable, entry_context, raw_res = \
client.get_custom_policy_outputs(policy_name, f'Policy {policy_name} created successfully')
return_outputs(human_readable, entry_context, raw_res)
@logger
def update_custom_policy_command(client, args):
policy_name = args.get('policy-name')
site = args.get('site-name-to-apply')
server_group = args.get('server-group-name-to-apply', '')
web_service = args.get('web-service-name-to-apply', '')
apply_operation = args.get('apply-operation', '')
match_criteria_json = args.get('match-criteria-json')
body = generate_policy_data_body(args)
if match_criteria_json and not isinstance(match_criteria_json, dict):
try:
match_criteria_json = json.loads(match_criteria_json)
except Exception:
raise DemistoException(f'Failed to parse match-criteria-json as JSON data,'
f' received object:\n{match_criteria_json}')
body['matchCriteria'] = match_criteria_json
else:
body = generate_match_criteria(body, args)
if apply_operation:
body['applyTo'] = [{'operation': apply_operation, 'siteName': site, 'serverGroupName': server_group,
'webServiceName': web_service}]
client.do_request('PUT', f'conf/policies/security/webServiceCustomPolicies/{policy_name}', json_data=body)
human_readable, entry_context, raw_res = \
client.get_custom_policy_outputs(policy_name, f'Policy {policy_name} updated successfully')
return_outputs(human_readable, entry_context, raw_res)
@logger
def delete_custom_policy_command(client, args):
policy_name = args.get('policy-name')
raw_res = client.do_request('DELETE', f'conf/policies/security/webServiceCustomPolicies/{policy_name}')
return_outputs(f'Policy {policy_name} deleted successfully', {}, raw_res)
def main():
params = demisto.params()
# get the service API url
base_url = params.get('url')
verify_certificate = not params.get('insecure', False)
proxy = params.get('proxy', False)
credentials = params.get('credentials')
username = credentials['identifier'] if credentials else ''
password = credentials['password'] if credentials else ''
LOG(f'Command being called is {demisto.command()}')
try:
client = Client(
base_url=base_url,
verify=verify_certificate,
auth=(username, password),
proxy=proxy)
command = demisto.command()
args = demisto.args()
commands = {'test-module': test_module,
'imperva-waf-ip-group-list': ip_group_list_command,
'imperva-waf-ip-group-list-entries': ip_group_list_entries_command,
'imperva-waf-ip-group-remove-entries': ip_group_remove_entries_command,
'imperva-waf-sites-list': sites_list_command,
'imperva-waf-server-group-list': server_groups_list_command,
'imperva-waf-server-group-list-policies': server_group_policies_list_command,
'imperva-waf-web-service-custom-policy-list': custom_policy_list_command,
'imperva-waf-web-service-custom-policy-get': get_custom_policy_command,
'imperva-waf-ip-group-create': create_ip_group_command,
'imperva-waf-ip-group-update-entries': update_ip_group_command,
'imperva-waf-ip-group-delete': delete_ip_group_command,
'imperva-waf-web-service-custom-policy-create': create_custom_policy_command,
'imperva-waf-web-service-custom-policy-update': update_custom_policy_command,
'imperva-waf-web-service-custom-policy-delete': delete_custom_policy_command,
}
if command in commands:
commands[command](client, args)
else:
raise NotImplementedError(f'Command "{command}" is not implemented.')
# Log exceptions
except Exception as e:
return_error(f'Unexpected error: {str(e)}', error=traceback.format_exc())
if __name__ in ('__main__', '__builtin__', 'builtins'):
main()
|
example/trade/post_batch_create_order.py | bailzx5522/huobi_Python | 611 | 12778947 | <gh_stars>100-1000
import time
from huobi.client.trade import TradeClient
from huobi.constant import *
from huobi.utils import *
trade_client = TradeClient(api_key=g_api_key, secret_key=g_secret_key)
client_order_id_header = str(int(time.time()))
symbol_eosusdt = "eosusdt"
client_order_id_eos_01 = client_order_id_header + symbol_eosusdt + "01"
client_order_id_eos_02 = client_order_id_header + symbol_eosusdt + "02"
client_order_id_eos_03 = client_order_id_header + symbol_eosusdt + "03"
buy_limit_eos_01 = {
"account_id":g_account_id,
"symbol":symbol_eosusdt,
"order_type":OrderType.BUY_LIMIT,
"source":OrderSource.API,
"amount":50,
"price": 0.12,
"client_order_id" : client_order_id_eos_01
}
buy_limit_eos_02 = {
"account_id":g_account_id,
"symbol":symbol_eosusdt,
"order_type":OrderType.BUY_LIMIT,
"source": OrderSource.API,
"amount":7,
"price": 0.80,
"client_order_id" : client_order_id_eos_02
}
buy_limit_eos_03 = {
"account_id":g_account_id,
"symbol":symbol_eosusdt,
"order_type":OrderType.BUY_LIMIT,
"source": OrderSource.API,
"amount":20,
"price": 0.252,
"client_order_id" : client_order_id_eos_03
}
order_config_list = [
buy_limit_eos_01,
buy_limit_eos_02,
buy_limit_eos_03
]
create_result = trade_client.batch_create_order(order_config_list=order_config_list)
LogInfo.output_list(create_result)
order_id_list = []
if create_result and len(create_result):
for item in create_result:
order_id_list.append(item.order_id)
result = trade_client.cancel_orders(symbol_eosusdt, order_id_list)
result.print_object()
|
evosax/experimental/decodings/random.py | RobertTLange/evosax | 102 | 12778956 | import jax
import chex
from typing import Union, Optional
from .decoder import Decoder
from ...utils import ParameterReshaper
class RandomDecoder(Decoder):
def __init__(
self,
num_encoding_dims: int,
placeholder_params: Union[chex.ArrayTree, chex.Array],
rng: chex.PRNGKey = jax.random.PRNGKey(0),
rademacher: bool = False,
identity: bool = False,
n_devices: Optional[int] = None,
):
super().__init__(
num_encoding_dims, placeholder_params, identity, n_devices
)
self.rademacher = rademacher
# Instantiate base reshaper class
self.base_reshaper = ParameterReshaper(
placeholder_params, identity, n_devices
)
# Sample a random matrix - Gaussian or Rademacher (+1/-1)
if not self.rademacher:
self.project_matrix = jax.random.normal(
rng, (self.num_encoding_dims, self.base_reshaper.total_params)
)
else:
self.project_matrix = jax.random.rademacher(
rng, (self.num_encoding_dims, self.base_reshaper.total_params)
)
def reshape(self, x: chex.Array) -> chex.ArrayTree:
"""Perform reshaping for random projection case."""
# 1. Project parameters to raw dimensionality using pre-sampled matrix
project_x = (
x @ self.project_matrix
) # (popsize, num_enc_dim) x (num_enc_dim, num_dims)
# 2. Reshape using base reshaper class
x_reshaped = self.base_reshaper.reshape(project_x)
return x_reshaped
def reshape_single(self, x: chex.Array) -> chex.ArrayTree:
"""Reshape a single flat vector using random projection matrix."""
x_re = x.reshape(1, self.num_encoding_dims)
# 1. Project parameters to raw dimensionality using pre-sampled matrix
project_x = (x_re @ self.project_matrix).squeeze()
# 2. Reshape using base reshaper class
x_reshaped = self.base_reshaper.reshape_single(project_x)
return x_reshaped
|
splashgen/components/CTAButton.py | ndejong/splashgen | 246 | 12778993 | from splashgen import Component
class CTAButton(Component):
def __init__(self, link: str, text: str) -> None:
self.link = link
self.text = text
def render(self) -> str:
return f'<a href="{self.link}" class="btn btn-primary btn-lg px-4">{self.text}</a>'
|
myia/operations/macro_embed.py | strint/myia | 222 | 12779031 | """Implementation of the 'embed' operation."""
from ..lib import Constant, SymbolicKeyInstance, macro, sensitivity_transform
@macro
async def embed(info, x):
"""Return a constant that embeds the identity of the input node."""
typ = sensitivity_transform(await x.get())
key = SymbolicKeyInstance(x.node, typ)
return Constant(key)
__operation_defaults__ = {
"name": "embed",
"registered_name": "embed",
"mapping": embed,
"python_implementation": None,
}
|
scripts/tldr_analyze_nuggets.py | allenai/scitldr | 628 | 12779035 | """
Some analysis of informational content of TLDR-Auth and TLDR-PR
"""
import os
import csv
from collections import Counter, defaultdict
INFILE = 'tldr_analyze_nuggets/tldr_auth_pr_gold_nuggets_2020-03-31.csv'
# Q1: How many nuggets do TLDRs contain?
# A: Interesting, both author and PR have nearly identical distributions:
# From most to least common: 3 nuggets -> 2 nuggets -> 4 nuggets -> 1 nugget -> ...
# Auth proportions: (34%) (26%) (18%) (11%)
# PR proportions: (32%) (30%) (26%) ( 9%)
author_num_nuggets_to_count = {i: 0 for i in range(0,7)}
pr_num_nuggets_to_count = {i: 0 for i in range(0,7)}
with open(INFILE) as f_in:
reader = csv.DictReader(f_in)
for row in reader:
num_nuggets = sum(map(int, [row['area_field_topic'], row['problem_motivation'], row['mode_of_contrib'], row['details_descrip'], row['results_findings'], row['value_signif']]))
if row['auth_pr'] == 'auth_gold':
author_num_nuggets_to_count[num_nuggets] += 1
if row['auth_pr'] == 'pr_gold':
pr_num_nuggets_to_count[num_nuggets] += 1
print({k: f'{100*v/76:.2f}' for k, v in author_num_nuggets_to_count.items()})
print({k: f'{100*v/76:.2f}' for k, v in pr_num_nuggets_to_count.items()})
# Q2: What are the most common TLDR templates?
# A: Interesting, the top 2 templates (total 42 occurrences) are same between Authors and PRs.
# a) (area_field_topic, mode_of_contrib, details_descrip)
# b) (area_field_topic, mode_of_contrib)
# After that, next 3 starts deviating a bit, but still with the same base:
# authors = (area_field_topic, mode_of_contrib, results_findings)
# (area_field_topic, problem_motivation, mode_of_contrib)
# (area_field_topic, mode_of_contrib, details_descrip, value_signif)
# pr = (area_field_topic, problem_motivation, mode_of_contrib, details_descrip)
# = (area_field_topic, details_descrip)
# = (area_field_topic, mode_of_contrib, results_findings) # same as top 3rd in Auth
author_template_to_count = Counter()
pr_template_to_count = Counter()
with open(INFILE) as f_in:
reader = csv.DictReader(f_in)
for row in reader:
template = (row['area_field_topic'], row['problem_motivation'], row['mode_of_contrib'], row['details_descrip'], row['results_findings'], row['value_signif'])
if row['auth_pr'] == 'auth_gold':
author_template_to_count[template] += 1
if row['auth_pr'] == 'pr_gold':
pr_template_to_count[template] += 1
print(author_template_to_count.most_common())
print(pr_template_to_count.most_common())
# Q3: How often does 'area_field_topic' and 'mode_of_contrib' co-occur?
# n_auth = 48/76 = 63%
# n_pr = 54/76 = 71%
n_auth = 0
n_pr = 0
with open(INFILE) as f_in:
reader = csv.DictReader(f_in)
for row in reader:
if row['area_field_topic'] == '1' and row['mode_of_contrib'] == '1':
if row['auth_pr'] == 'auth_gold':
n_auth += 1
if row['auth_pr'] == 'pr_gold':
n_pr += 1
# Q4: Find examples with exactly the same nuggets but different styles
#
# H1-IBSgMz
# B16yEqkCZ
# SySpa-Z0Z
# rJegl2C9K7
# HJWpQCa7z
# rkgpCoRctm
# rkxkHnA5tX
# B1e9csRcFm
# r1kj4ACp-
# Hk91SGWR-
# r1GaAjRcF7
# SkGMOi05FQ
#
pid_to_templates = defaultdict(set)
with open(INFILE) as f_in:
reader = csv.DictReader(f_in)
for row in reader:
template = (row['area_field_topic'], row['problem_motivation'], row['mode_of_contrib'], row['details_descrip'], row['results_findings'], row['value_signif'])
pid_to_templates[row['paper_id']].add(template)
for pid, templates in pid_to_templates.items():
if len(templates) == 1:
print(pid)
|
SubredditBirthdays/sb.py | voussoir/redd | 444 | 12779036 | import argparse
import bot3
import datetime
import praw3 as praw
import random
import sqlite3
import string
import subprocess
import sys
import time
import tkinter
import traceback
import types
from voussoirkit import betterhelp
from voussoirkit import mutables
from voussoirkit import operatornotify
from voussoirkit import pipeable
from voussoirkit import sqlhelpers
from voussoirkit import vlogging
log = vlogging.getLogger(__name__, 'sb')
USERAGENT = '''
/u/GoldenSights SubredditBirthdays data collection:
Gathering the creation dates of subreddits for visualization.
More at https://github.com/voussoir/reddit/tree/master/SubredditBirthdays
'''.replace('\n', ' ').strip()
LOWERBOUND_STR = '2qh0j'
LOWERBOUND_INT = 4594339
FORMAT_MEMBER = '{idstr:>5s}, {human}, {nsfw}, {name:<25s} {subscribers:>10,}'
FORMAT_MESSAGE_NEW = 'New: {idstr:>5s} : {human} : {nsfw} : {name} : {subscribers}'
FORMAT_MESSAGE_UPDATE = 'Upd: {idstr:>5s} : {human} : {nsfw} : {name} : {subscribers} ({subscriber_diff})'
RANKS_UP_TO = 20000
# For the files sorted by subscriber count, display ranks up to this many.
GOODCHARS = string.ascii_letters + string.digits + '_'
DB_INIT = '''
BEGIN;
--------------------------------------------------------------------------------
CREATE TABLE IF NOT EXISTS subreddits(
idint INT,
idstr TEXT,
created INT,
human TEXT,
name TEXT,
nsfw INT,
subscribers INT,
jumble INT,
subreddit_type INT,
submission_type INT,
last_scanned INT
);
CREATE INDEX IF NOT EXISTS index_subreddits_idstr ON subreddits(idstr);
CREATE INDEX IF NOT EXISTS index_subreddits_name ON subreddits(name);
CREATE INDEX IF NOT EXISTS index_subreddits_created ON subreddits(created);
CREATE INDEX IF NOT EXISTS index_subreddits_subscribers ON subreddits(subscribers);
--CREATE INDEX IF NOT EXISTS index_subreddits_idint ON subreddits(idint);
--CREATE INDEX IF NOT EXISTS index_subreddits_last_scanned ON subreddits(last_scanned);
--------------------------------------------------------------------------------
CREATE TABLE IF NOT EXISTS suspicious(
idint INT,
idstr TEXT,
name TEXT,
subscribers INT,
noticed INT
);
--------------------------------------------------------------------------------
CREATE TABLE IF NOT EXISTS popular(
idstr TEXT,
last_seen INT
);
CREATE INDEX IF NOT EXISTS index_popular_idstr on popular(idstr);
CREATE INDEX IF NOT EXISTS index_popular_last_seen on popular(last_seen);
--------------------------------------------------------------------------------
CREATE TABLE IF NOT EXISTS jumble(
idstr TEXT,
last_seen INT
);
CREATE INDEX IF NOT EXISTS index_jumble_idstr on jumble(idstr);
CREATE INDEX IF NOT EXISTS index_jumble_last_seen on jumble(last_seen);
--------------------------------------------------------------------------------
COMMIT;
'''
sql = sqlite3.connect('D:\\git\\reddit\\subredditbirthdays\\sb.db')
sqlhelpers.executescript(conn=sql, script=DB_INIT)
cur = sql.cursor()
# These numbers are used for interpreting the tuples that come from SELECT
SQL_SUBREDDIT_COLUMNS = [
'idint',
'idstr',
'created',
'human',
'name',
'nsfw',
'subscribers',
'subreddit_type',
'submission_type',
'last_scanned',
]
SQL_SUSPICIOUS_COLUMNS = [
'idint',
'idstr',
'name',
'subscribers',
'noticed',
]
SQL_SUBREDDIT = {key: index for (index, key) in enumerate(SQL_SUBREDDIT_COLUMNS)}
noinfolist = []
monthnumbers = {
'Jan': '01',
'Feb': '02',
'Mar': '03',
'Apr': '04',
'May': '05',
'Jun': '06',
'Jul': '07',
'Aug': '08',
'Sep': '09',
'Oct': '10',
'Nov': '11',
'Dec': '12',
}
SUBREDDIT_TYPE = {
'public': 0,
'restricted': 1,
'private': 2,
'archived': 3,
None: 4,
'employees_only': 5,
'gold_restricted': 6,
'gold_only': 7,
'user': 8,
}
SUBMISSION_TYPE = {
'any': 0,
'link': 1,
'self': 2,
None: 3,
}
SUBREDDIT_TYPE_REVERSE = {v: k for (k, v) in SUBREDDIT_TYPE.items()}
SUBMISSION_TYPE_REVERSE = {v: k for (k, v) in SUBMISSION_TYPE.items()}
SUBMISSION_OBJ = praw.objects.Submission
SUBREDDIT_OBJ = praw.objects.Subreddit
COMMENT_OBJ = praw.objects.Comment
r = None
def login():
global r
print('Logging in.')
r = praw.Reddit(USERAGENT)
bot3.login(r)
def base36encode(number, alphabet='0123456789abcdefghijklmnopqrstuvwxyz'):
'''Converts an integer to a base36 string.'''
if not isinstance(number, (int)):
raise TypeError('number must be an integer')
base36 = ''
sign = ''
if number < 0:
sign = '-'
number = -number
if 0 <= number < len(alphabet):
return sign + alphabet[number]
while number != 0:
number, i = divmod(number, len(alphabet))
base36 = alphabet[i] + base36
return sign + base36
def base36decode(number):
return int(number, 36)
def b36(i):
if type(i) == int:
return base36encode(i)
if type(i) == str:
return base36decode(i)
def chunklist(inputlist, chunksize):
if len(inputlist) < chunksize:
return [inputlist]
else:
outputlist = []
while len(inputlist) > 0:
outputlist.append(inputlist[:chunksize])
inputlist = inputlist[chunksize:]
return outputlist
def completesweep(sleepy=0, orderby='subscribers desc', query=None):
cur = sql.cursor()
if query is None:
if orderby is None:
cur.execute('SELECT idstr FROM subreddits WHERE created > 0')
else:
cur.execute('SELECT idstr FROM subreddits WHERE created > 0 ORDER BY %s' % orderby)
elif query == 'restricted':
cur.execute('SELECT idstr FROM subreddits WHERE created > 0 AND subreddit_type != 0 ORDER BY subscribers DESC')
else:
cur.execute(query)
try:
while True:
hundred = (cur.fetchone() for x in range(100))
hundred = (row for row in hundred if row is not None)
hundred = [idstr for (idstr,) in hundred]
if len(hundred) == 0:
break
for retry in range(20):
try:
processmega(hundred, commit=False)
break
except Exception:
traceback.print_exc()
time.sleep(sleepy)
except KeyboardInterrupt:
pass
except Exception:
traceback.print_exc()
sql.commit()
def fetchgenerator(cur):
while True:
fetch = cur.fetchone()
if fetch is None:
break
yield fetch
def get_jumble_subreddits():
cur.execute('SELECT idstr FROM jumble')
fetch = [x[0] for x in cur.fetchall()]
fetch = ['\'%s\'' % x for x in fetch]
fetch = '(' + ','.join(fetch) + ')'
query = 'SELECT * FROM subreddits WHERE idstr IN %s' % fetch
cur.execute(query)
subreddits = cur.fetchall()
#subreddits = []
#for subreddit in fetch:
# cur.execute('SELECT * FROM subreddits WHERE idstr == ?', [subreddit])
# subreddits.append(cur.fetchone())
return subreddits
def get_newest_sub():
brandnewest = list(r.get_new_subreddits(limit=1))[0]
return brandnewest.id
def get_now():
return datetime.datetime.now(datetime.timezone.utc).timestamp()
def humanize(timestamp):
day = datetime.datetime.utcfromtimestamp(timestamp)
human = datetime.datetime.strftime(day, "%b %d %Y %H:%M:%S UTC")
return human
def modernize(limit=None):
cur.execute('SELECT * FROM subreddits ORDER BY created DESC LIMIT 1')
finalitem = cur.fetchone()
print('Current final item:')
print(finalitem[SQL_SUBREDDIT['idstr']], finalitem[SQL_SUBREDDIT['human']], finalitem[SQL_SUBREDDIT['name']])
finalid = finalitem[SQL_SUBREDDIT['idint']]
print('Newest item:')
newestid = get_newest_sub()
print(newestid)
newestid = b36(newestid)
if limit is not None:
newestid = min(newestid, finalid+limit-1)
modernlist = [b36(x) for x in range(finalid, newestid+1)]
if len(modernlist) > 0:
processmega(modernlist, commit=False)
sql.commit()
def modernize_forever(limit=10000):
while True:
try:
modernize(limit=limit)
except Exception:
log.warning(traceback.format_exc())
time.sleep(300)
def modsfromid(subid):
if 't5_' not in subid:
subid = 't5_' + subid
subreddit = r.get_info(thing_id=subid)
mods = list(subreddit.get_moderators())
for m in mods:
print(m)
return mods
def normalize_subreddit_object(thing):
'''
Given a string, Subreddit, Submission, or Comment object, return
a Subreddit object.
'''
if isinstance(thing, SUBREDDIT_OBJ):
return thing
if isinstance(thing, str):
return r.get_subreddit(thing)
if isinstance(thing, (SUBMISSION_OBJ, COMMENT_OBJ)):
return thing.subreddit
raise ValueError('Dont know how to normalize', type(thing))
def process(
subreddit,
commit=True,
):
'''
Retrieve the API info for the subreddit and save it to the database
subreddit:
The subreddit(s) to process. Can be an individual or list of:
strings or Subreddit, Submission, or Comment objects.
'''
subreddits = []
processed_subreddits = []
if isinstance(subreddit, (tuple, list, set, types.GeneratorType)):
subreddits = iter(subreddit)
else:
subreddits = [subreddit]
for subreddit in subreddits:
subreddit = normalize_subreddit_object(subreddit)
processed_subreddits.append(subreddit)
created = subreddit.created_utc
created_human = humanize(subreddit.created_utc)
idstr = subreddit.id
is_nsfw = int(subreddit.over18 or 0)
name = subreddit.display_name
subscribers = subreddit.subscribers or 0
subreddit_type = SUBREDDIT_TYPE[subreddit.subreddit_type]
submission_type = SUBMISSION_TYPE[subreddit.submission_type]
now = int(get_now())
cur.execute('SELECT * FROM subreddits WHERE idstr == ?', [idstr])
f = cur.fetchone()
if f is None:
message = FORMAT_MESSAGE_NEW.format(
idstr=idstr,
human=created_human,
nsfw=is_nsfw,
name=name,
subscribers=subscribers,
)
print(message)
data = {
'idint': b36(idstr),
'idstr': idstr,
'created': created,
'human': created_human,
'nsfw': is_nsfw,
'name': name,
'subscribers': subscribers,
'subreddit_type': subreddit_type,
'submission_type': submission_type,
'last_scanned': now,
}
(qmarks, bindings) = sqlhelpers.insert_filler(SQL_SUBREDDIT_COLUMNS, data)
query = 'INSERT INTO subreddits VALUES(%s)' % qmarks
cur.execute(query, bindings)
else:
old_subscribers = f[SQL_SUBREDDIT['subscribers']]
subscriber_diff = subscribers - old_subscribers
if subscribers == 0 and old_subscribers > 2 and subreddit_type != SUBREDDIT_TYPE['private']:
print('SUSPICIOUS %s' % name)
data = {
'idint': b36(idstr),
'idstr': idstr,
'name': name,
'subscribers': old_subscribers,
'noticed': int(get_now()),
}
(qmarks, bindings) = sqlhelpers.insert_filler(SQL_SUSPICIOUS_COLUMNS, data)
query = 'INSERT INTO suspicious VALUES(%s)' % qmarks
cur.execute(query, bindings)
message = FORMAT_MESSAGE_UPDATE.format(
idstr=idstr,
human=created_human,
nsfw=is_nsfw,
name=name,
subscribers=subscribers,
subscriber_diff=subscriber_diff
)
print(message)
data = {
'idstr': idstr,
'subscribers': subscribers,
'subreddit_type': subreddit_type,
'submission_type': submission_type,
'last_scanned': now,
}
(query, bindings) = sqlhelpers.update_filler(data, where_key='idstr')
query = 'UPDATE subreddits %s' % query
cur.execute(query, bindings)
#cur.execute('''
# UPDATE subreddits SET
# subscribers = @subscribers,
# subreddit_type = @subreddit_type,
# submission_type = @submission_type,
# last_scanned = @last_scanned
# WHERE idstr == @idstr
# ''', data)
processed_subreddits.append(subreddit)
if commit:
sql.commit()
return processed_subreddits
def process_input():
while True:
x = input('p> ')
try:
process(x)
except KeyboardInterrupt:
break
except Exception:
traceback.print_exc()
def processmega(srinput, isrealname=False, chunksize=100, docrash=False, commit=True):
'''
`srinput` can be a list of subreddit IDs or fullnames, or display names
if `isrealname` is also True.
isrealname:
Interpret `srinput` as a list of actual subreddit names, not IDs.
chunksize:
The number of fullnames to get from api/info at once.
docrash:
If False, ignore HTTPExceptions and keep moving forward.
'''
global noinfolist
if type(srinput) == str:
srinput = srinput.replace(' ', '')
srinput = srinput.split(',')
if isrealname:
for subname in srinput:
process(subname)
return
processed_subreddits = []
remaining = len(srinput)
for x in range(len(srinput)):
if 't5_' not in srinput[x]:
srinput[x] = 't5_' + srinput[x]
srinput = chunklist(srinput, chunksize)
for subset in srinput:
try:
print(subset[0] + ' - ' + subset[-1], remaining)
subreddits = r.get_info(thing_id=subset)
try:
for sub in subreddits:
processed_subreddits.extend(process(sub, commit=commit))
except TypeError:
traceback.print_exc()
noinfolist = subset[:]
if len(noinfolist) == 1:
print('Received no info. See variable `noinfolist`')
else:
#for item in noinfolist:
# processmega([item])
pass
remaining -= len(subset)
except praw.errors.HTTPException as e:
traceback.print_exc()
print(vars(e))
if docrash:
raise
return processed_subreddits
def processrand(count, doublecheck=False, sleepy=0):
'''
Gets random IDs between a known lower bound and the newest collection, and
pass them into processmega().
count:
How many you want
doublecheck:
Should it reroll duplicates before running
sleepy:
Used to sleep longer than the required 2 seconds
'''
lower = LOWERBOUND_INT
cur.execute('SELECT * FROM subreddits ORDER BY idstr DESC LIMIT 1')
upper = cur.fetchone()[SQL_SUBREDDIT['idstr']]
print('<' + b36(lower) + ',', upper + '>', end=', ')
upper = b36(upper)
totalpossible = upper - lower
print(totalpossible, 'possible')
rands = set()
for x in range(count):
rand = random.randint(lower, upper)
rand = b36(rand)
if doublecheck:
while rand in rands:
rand = random.randint(lower, upper)
rand = b36(rand)
rands.add(rand)
processmega(rands)
def show():
file_all_time = open('show\\all-time.txt', 'w')
file_all_name = open('show\\all-name.txt', 'w')
file_all_subscribers = open('show\\all-subscribers.txt', 'w')
file_dirty_time = open('show\\dirty-time.txt', 'w')
file_dirty_name = open('show\\dirty-name.txt', 'w')
file_dirty_subscribers = open('show\\dirty-subscribers.txt', 'w')
file_jumble_sfw = open('show\\jumble.txt', 'w')
file_jumble_nsfw = open('show\\jumble-nsfw.txt', 'w')
file_duplicates = open('show\\duplicates.txt', 'w')
file_missing = open('show\\missing.txt', 'w')
file_stats = open('show\\statistics.txt', 'w')
file_readme = open('README.md', 'r')
cur.execute('SELECT COUNT(idstr) FROM subreddits WHERE created != 0')
itemcount_valid = cur.fetchone()[0]
itemcount_nsfw = 0
name_lengths = {}
print(itemcount_valid, 'subreddits')
print('Writing time files.')
cur.execute('SELECT * FROM subreddits WHERE created !=0 ORDER BY created ASC')
for item in fetchgenerator(cur):
itemf = memberformat(item)
print(itemf, file=file_all_time)
if int(item[SQL_SUBREDDIT['nsfw']]) == 1:
print(itemf, file=file_dirty_time)
itemcount_nsfw += 1
file_all_time.close()
file_dirty_time.close()
print('Writing name files and duplicates.')
previousitem = None
inprogress = False
cur.execute('SELECT * FROM subreddits WHERE created != 0 ORDER BY LOWER(name) ASC')
for item in fetchgenerator(cur):
if previousitem is not None and item[SQL_SUBREDDIT['name']] == previousitem[SQL_SUBREDDIT['name']]:
print(memberformat(previousitem), file=file_duplicates)
inprogress = True
elif inprogress:
print(memberformat(previousitem), file=file_duplicates)
inprogress = False
previousitem = item
name_length = len(item[SQL_SUBREDDIT['name']])
name_lengths[name_length] = name_lengths.get(name_length, 0) + 1
itemf = memberformat(item)
print(itemf, file=file_all_name)
if int(item[SQL_SUBREDDIT['nsfw']]) == 1:
print(itemf, file=file_dirty_name)
file_duplicates.close()
file_all_name.close()
file_dirty_name.close()
name_lengths = {'%02d'%k: v for (k, v) in name_lengths.items()}
print('Writing subscriber files.')
ranks = {'all': 1, 'nsfw': 1}
def write_with_rank(itemf, ranktype, filehandle):
index = ranks[ranktype]
if index <= RANKS_UP_TO:
itemf += '{:>9,}'.format(index)
print(itemf, file=filehandle)
ranks[ranktype] += 1
cur.execute('SELECT * FROM subreddits WHERE created != 0 ORDER BY subscribers DESC')
for item in fetchgenerator(cur):
itemf = memberformat(item)
write_with_rank(itemf, 'all', file_all_subscribers)
if int(item[SQL_SUBREDDIT['nsfw']]) == 1:
write_with_rank(itemf, 'nsfw', file_dirty_subscribers)
file_all_subscribers.close()
file_dirty_subscribers.close()
print('Writing jumble.')
for item in get_jumble_subreddits():
itemf = memberformat(item)
if int(item[SQL_SUBREDDIT['nsfw']]) == 0:
print(itemf, file=file_jumble_sfw)
else:
print(itemf, file=file_jumble_nsfw)
file_jumble_sfw.close()
file_jumble_nsfw.close()
print('Writing missing.')
cur.execute('SELECT * FROM subreddits WHERE created == 0 ORDER BY idstr ASC')
for item in fetchgenerator(cur):
print(item[SQL_SUBREDDIT['idstr']], file=file_missing)
file_missing.close()
print('Writing statistics.')
headline = 'Collected {0:,} subreddits\n'.format(itemcount_valid)
statisticoutput = headline + '\n\n'
statisticoutput += ' SFW: {0:,}\n'.format(itemcount_valid - itemcount_nsfw)
statisticoutput += 'NSFW: {0:,}\n\n\n'.format(itemcount_nsfw)
statisticoutput += 'Subreddit type:\n'
subreddit_types = list(SUBREDDIT_TYPE_REVERSE.keys())
subreddit_types.sort()
subreddit_types = [SUBREDDIT_TYPE_REVERSE[k] for k in subreddit_types]
for subreddit_type in subreddit_types:
index = SUBREDDIT_TYPE[subreddit_type]
cur.execute('SELECT COUNT(*) FROM subreddits WHERE created != 0 AND subreddit_type == ?', [index])
count = cur.fetchone()[0]
statisticoutput += '{:>16s}: {:,}\n'.format(str(subreddit_type), count)
statisticoutput += '\n'
statisticoutput += 'Submission type (None means approved submitters only or inaccessible):\n'
submission_types = list(SUBMISSION_TYPE_REVERSE.keys())
submission_types.sort()
submission_types = [SUBMISSION_TYPE_REVERSE[k] for k in submission_types]
for submission_type in submission_types:
index = SUBMISSION_TYPE[submission_type]
cur.execute('SELECT COUNT(*) FROM subreddits WHERE created != 0 AND submission_type == ?', [index])
count = cur.fetchone()[0]
statisticoutput += '{:>16s}: {:,}\n'.format(str(submission_type), count)
statisticoutput += '\n\n'
cur.execute('SELECT * FROM subreddits WHERE created != 0 ORDER BY created DESC limit 20000')
last20k = cur.fetchall()
timediff = last20k[0][SQL_SUBREDDIT['created']] - last20k[-1][SQL_SUBREDDIT['created']]
statisticoutput += 'Over the last 20,000 subreddits:\n'
statisticoutput += '%.2f subs are created each hour\n' % (20000 / (timediff/3600))
statisticoutput += '%.2f subs are created each day\n\n\n' % (20000 / (timediff/86400))
################################
# Breakdown by time period
# hour of day, day of week, day of month, month of year, month-year, year
def datetimedict(statsdict, strf):
statsdict[strf] = statsdict.get(strf, 0) + 1
hoddict = {}
dowdict = {}
domdict = {}
moydict = {}
myrdict = {}
yerdict = {}
print(' performing time breakdown')
cur.execute('SELECT * FROM subreddits WHERE created != 0')
for item in fetchgenerator(cur):
dt = datetime.datetime.utcfromtimestamp(item[SQL_SUBREDDIT['created']])
datetimedict(hoddict, dt.strftime('%H')) # 01
datetimedict(dowdict, dt.strftime('%A')) # Monday
datetimedict(domdict, dt.strftime('%d')) # 01
datetimedict(moydict, dt.strftime('%B')) # January
datetimedict(myrdict, dt.strftime('%b%Y')) # Jan2015
datetimedict(yerdict, dt.strftime('%Y')) # 2015
print(' forming columns')
plotnum = 0
mapping = [
{'label': 'hour of day', 'specialsort': None, 'dict': hoddict},
{'label': 'day of week', 'specialsort': 'day', 'dict': dowdict},
{'label': 'day of month', 'specialsort': None, 'dict': domdict},
{'label': 'month of year', 'specialsort': 'month', 'dict': moydict},
{'label': 'year', 'specialsort': None, 'dict': yerdict},
{'label': 'month-year', 'specialsort': 'monthyear', 'dict': myrdict},
{'label': 'name length', 'specialsort': None, 'dict': name_lengths},
]
for (index, collection) in enumerate(mapping):
d = collection['dict']
dkeys_primary = list(d.keys())
dkeys_primary.sort(key=d.get)
dkeys_secondary = specialsort(dkeys_primary, collection['specialsort'])
dvals = [d[x] for x in dkeys_secondary]
statisticoutput += collection['label'] + '\n'
for (keyindex, key) in enumerate(dkeys_primary):
val = d[key]
val = '{0:,}'.format(val)
spacer = 34 - (len(key) + len(val))
spacer = '.' * spacer
statisticoutput += key + spacer + val
statisticoutput += ' ' * 8
key = dkeys_secondary[keyindex]
val = d[key]
val = '{0:,}'.format(val)
spacer = 34 - (len(key) + len(val))
spacer = '.' * spacer
statisticoutput += key + spacer + val
statisticoutput += '\n'
statisticoutput += '\n'
if d is name_lengths:
upperlabel = 'Name Lengths'
else:
upperlabel = 'Subreddits created - %s' % collection['label']
plotbars(
filename=upperlabel,
upperlabel=upperlabel,
inputdata=[dkeys_secondary, dvals],
colormid='#43443a',
forcezero=True,
)
plotnum += 1
if d is myrdict:
# In addition to the total month graph, plot the last 15 months
plotbars(
filename=upperlabel + ' short',
upperlabel=upperlabel + ' short',
inputdata=[dkeys_secondary[-15:], dvals[-15:]],
colorbg='#272822',
colorfg='#000',
colormid='#43443a',
forcezero=True,
)
plotnum += 1
#
# Breakdown by time period
################################
print(statisticoutput, file=file_stats)
file_stats.close()
print('Updating Readme')
readmelines = file_readme.readlines()
file_readme.close()
readmelines[3] = '#####' + headline
readmelines[5] = '#####[Today\'s jumble](http://reddit.com/r/%s)\n' % jumble(nsfw=False)
file_readme = open('README.md', 'w')
file_readme.write(''.join(readmelines))
file_readme.close()
time.sleep(2)
subprocess.call('PNGCREATOR.bat', shell=True, cwd='spooky')
print()
def memberformat(member):
member = FORMAT_MEMBER.format(
idstr=member[SQL_SUBREDDIT['idstr']],
human=member[SQL_SUBREDDIT['human']],
nsfw=member[SQL_SUBREDDIT['nsfw']],
name=member[SQL_SUBREDDIT['name']],
subscribers=member[SQL_SUBREDDIT['subscribers']],
)
return member
def dictadding(targetdict, item):
if item not in targetdict:
targetdict[item] = 1
else:
targetdict[item] = targetdict[item] + 1
return targetdict
def specialsort(inlist, mode=None):
if mode == 'month':
return [
'January',
'February',
'March', 'April',
'May',
'June',
'July',
'August',
'September',
'October',
'November',
'December'
]
if mode == 'day':
return [
'Sunday',
'Monday',
'Tuesday',
'Wednesday',
'Thursday',
'Friday',
'Saturday'
]
if mode == 'monthyear':
td = {}
for item in inlist:
nitem = item
nitem = item.replace(item[:3], monthnumbers[item[:3]])
nitem = nitem[3:] + nitem[:3]
td[item] = nitem
tdkeys = list(td.keys())
#print(td)
tdkeys.sort(key=td.get)
#print(tdkeys)
return tdkeys
if mode is None:
return sorted(inlist)
def search(
query='',
casesense=False,
filterout=[],
subscribers=0,
nsfwmode=2,
doreturn=False,
sort=None,
):
'''
Search for a subreddit by name
*str query = The search query
"query" = results where "query" is in the name
"*query" = results where "query" is at the end of the name
"query*" = results where "query" is at the beginning of the name
"*query*" = results where "query" is in the middle of the name
bool casesense = is the search case sensitive
list filterout = [list, of, words] to omit from search. Follows casesense
int subscribers = minimum number of subscribers
int nsfwmode =
0 - Clean only
1 - Dirty only
2 - All
int sort = The integer representing the sql column to sort by. Defaults
to no sort.
'''
querys = ''.join([c for c in query if c in GOODCHARS])
queryx = '%%{term}%%'.format(term=querys)
if '!' in query:
cur.execute('SELECT * FROM subreddits WHERE name LIKE ?', [querys])
return cur.fetchone()
if nsfwmode in [0, 1]:
cur.execute('SELECT * FROM subreddits WHERE name LIKE ? AND subscribers > ? AND nsfw=?', [queryx, subscribers, nsfwmode])
else:
cur.execute('SELECT * FROM subreddits WHERE name LIKE ? AND subscribers > ?', [queryx, subscribers])
results = []
if casesense is False:
querys = querys.lower()
filterout = [x.lower() for x in filterout]
if '*' in query:
positional = True
front = query[-1] == '*'
back = query[0] == '*'
if front and back:
mid = True
front = False
back = False
else:
mid = False
else:
positional = False
lenq = len(querys)
for item in fetchgenerator(cur):
name = item[SQL_SUBREDDIT['name']]
if casesense is False:
name = name.lower()
if querys not in name:
#print('%s not in %s' % (querys, name))
continue
if (positional and front) and (name[:lenq] != querys):
#print('%s not front %s (%s)' % (querys, name, name[:lenq]))
continue
if (positional and back) and (name[-lenq:] != querys):
#print('%s not back %s (%s)' % (querys, name, name[-lenq:]))
continue
if (positional and mid) and (querys not in name[1:-1]):
#print('%s not mid %s (%s)' % (querys, name, name[1:-1]))
continue
if any(filters in name for filters in filterout):
#print('%s not filter %s' % (querys, name))
continue
results.append(item)
if len(results) == 0:
if doreturn:
return []
else:
return
if sort is not None:
is_numeric = isinstance(results[0][sort], int)
if is_numeric:
results.sort(key=lambda x: x[sort], reverse=True)
else:
results.sort(key=lambda x: x[sort].lower())
if doreturn is True:
return results
else:
for item in results:
print(item)
def findwrong():
cur.execute('SELECT * FROM subreddits WHERE name != ?', ['?'])
fetch = cur.fetchall()
fetch.sort(key=lambda x: x[SQL_SUBREDDIT['idstr']])
#sorted by ID
fetch = fetch[25:]
pos = 0
wrongs = []
while pos < len(fetch)-5:
if fetch[pos][1] > fetch[pos+1][1]:
wrongs.append(str(fetch[pos-1]))
wrongs.append(str(fetch[pos]))
wrongs.append(str(fetch[pos+1]) + "\n")
pos += 1
for wrong in wrongs:
print(wrong)
def processjumble(count, nsfw=False):
for x in range(count):
sub = r.get_random_subreddit(nsfw=nsfw)
process(sub, commit=False)
last_seen = int(get_now())
cur.execute('SELECT * FROM jumble WHERE idstr == ?', [sub.id])
if cur.fetchone() is None:
cur.execute('INSERT INTO jumble VALUES(?, ?)', [sub.id, last_seen])
else:
cur.execute(
'UPDATE jumble SET last_seen = ? WHERE idstr == ?',
[sub.id, last_seen]
)
sql.commit()
def processpopular(count, sort='hot'):
subreddit = r.get_subreddit('popular')
if sort == 'hot':
submissions = subreddit.get_hot(limit=count)
elif sort == 'new':
submissions = subreddit.get_new(limit=count)
else:
raise ValueError(sort)
submissions = list(submissions)
subreddit_ids = list({submission.subreddit_id for submission in submissions})
subreddits = processmega(subreddit_ids, commit=False)
last_seen = int(get_now())
for subreddit in subreddits:
cur.execute('SELECT * FROM popular WHERE idstr == ?', [subreddit.id])
if cur.fetchone() is None:
cur.execute('INSERT INTO popular VALUES(?, ?)', [subreddit.id, last_seen])
else:
cur.execute(
'UPDATE popular SET last_seen = ? WHERE idstr == ?',
[last_seen, subreddit.id]
)
sql.commit()
def jumble(count=20, nsfw=False):
subreddits = get_jumble_subreddits()
if nsfw is not None:
subreddits = [x for x in subreddits if x[SQL_SUBREDDIT['nsfw']] == int(bool(nsfw))]
random.shuffle(subreddits)
subreddits = subreddits[:count]
subreddits = [f[:-1] for f in subreddits]
jumble_string = [x[SQL_SUBREDDIT['name']] for x in subreddits]
jumble_string = '+'.join(jumble_string)
output = [jumble_string, subreddits]
return jumble_string
def rounded(x, rounding=100):
return int(round(x/rounding)) * rounding
def plotbars(
filename,
inputdata,
upperlabel='Subreddits created',
colorbg="#fff",
colorfg="#000",
colormid="#888",
forcezero=False,
):
'''
Create postscript vectors of data
filename = Name of the file without extension
inputdata = A list of two lists. First list has the x axis labels, second list
has the y axis data. x label 14 coresponds to y datum 14, etc.
'''
print(' Printing', filename)
t=tkinter.Tk()
canvas = tkinter.Canvas(t, width=3840, height=2160, bg=colorbg)
canvas.pack()
#Y axis
canvas.create_line(430, 250, 430, 1755, width=10, fill=colorfg)
#X axis
canvas.create_line(430, 1750, 3590, 1750, width=10, fill=colorfg)
dkeys = inputdata[0]
dvals = inputdata[1]
entrycount = len(dkeys)
availablespace = 3140
availableheight= 1490
entrywidth = availablespace / entrycount
#print(dkeys, dvals, "Width:", entrywidth)
smallest = min(dvals)
bottom = int(smallest*0.75) - 5
bottom = 0 if bottom < 8 else rounded(bottom, 10)
if forcezero:
bottom = 0
largest = max(dvals)
top = int(largest + (largest / 5))
top = rounded(top, 10)
print(bottom, top)
span = top - bottom
perpixel = span / availableheight
curx = 445
cury = 1735
labelx = 420
labely = 255
#canvas.create_text(labelx, labely, text=str(top), font=("Consolas", 72), anchor="e")
labelspan = 130
canvas.create_text(175, 100, text=upperlabel, font=("Consolas", 72), anchor="w", fill=colorfg)
for x in range(12):
value = int(top -((labely - 245) * perpixel))
value = rounded(value, 10)
value = '{0:,}'.format(value)
canvas.create_text(labelx, labely, text=value, font=("Consolas", 72), anchor="e", fill=colorfg)
canvas.create_line(430, labely, 3590, labely, width=2, fill=colormid)
labely += labelspan
for entrypos in range(entrycount):
entry = dkeys[entrypos]
entryvalue = dvals[entrypos]
entryx0 = curx + 10
entryx1 = entryx0 + (entrywidth-10)
curx += entrywidth
entryy0 = cury
entryy1 = entryvalue - bottom
entryy1 = entryy1/perpixel
#entryy1 -= bottom
#entryy1 /= perpixel
entryy1 = entryy0 - entryy1
#print(perpixel, entryy1)
#print(entry, entryx0,entryy0, entryx1, entryy1)
canvas.create_rectangle(entryx0, entryy0, entryx1, entryy1, fill=colorfg, outline=colorfg)
font0x = entryx0 + (entrywidth / 2)
font0y = entryy1 - 5
font1y = 1760
entryvalue = round(entryvalue)
fontsize0 = len(str(entryvalue))
fontsize0 = round(entrywidth / fontsize0) + 3
fontsize0 = 100 if fontsize0 > 100 else fontsize0
fontsize1 = len(str(entry))
fontsize1 = round(1.5 * entrywidth / fontsize1) + 5
fontsize1 = 60 if fontsize1 > 60 else fontsize1
canvas.create_text(font0x, font0y, text=entryvalue, font=("Consolas", fontsize0), anchor="s", fill=colorfg)
canvas.create_text(font0x, font1y, text=entry, font=("Consolas", fontsize1), anchor="n", fill=colorfg)
canvas.update()
print(' Done')
canvas.postscript(file=f'spooky\\{filename}.ps', width=3840, height=2160)
t.geometry("1x1+1+1")
t.update()
t.destroy()
def _idle():
while True:
try:
modernize()
processpopular(100, 'new')
processjumble(30, nsfw=False)
processjumble(30, nsfw=True)
print('Great job!')
except Exception:
traceback.print_exc()
time.sleep(180)
# Command line #####################################################################################
DOCSTRING = '''
Subreddit Birthdays
===================
{modernize_forever}
{modernize_once}
'''
SUB_DOCSTRINGS = dict(
modernize_forever='''
modernize_forever:
Gather new subreddits forever.
''',
modernize_once='''
modernize_once:
Gather new subreddits once.
''',
)
DOCSTRING = betterhelp.add_previews(DOCSTRING, SUB_DOCSTRINGS)
NOTIFY_EVERY_LINE = mutables.Boolean(False)
@pipeable.ctrlc_return1
def modernize_once_argparse(args):
login()
modernize(limit=args.limit)
return 0
@pipeable.ctrlc_return1
def modernize_forever_argparse(args):
login()
NOTIFY_EVERY_LINE.set(True)
modernize_forever()
return 0
@operatornotify.main_decorator(subject='sb', notify_every_line=NOTIFY_EVERY_LINE)
@vlogging.main_decorator
def main(argv):
parser = argparse.ArgumentParser(description=DOCSTRING)
subparsers = parser.add_subparsers()
p_modernize_once = subparsers.add_parser('modernize_once', aliases=['modernize-once'])
p_modernize_once.add_argument('--limit', default=None)
p_modernize_once.set_defaults(func=modernize_once_argparse)
p_modernize_forever = subparsers.add_parser('modernize_forever', aliases=['modernize-forever'])
p_modernize_forever.set_defaults(func=modernize_forever_argparse)
return betterhelp.subparser_main(argv, parser, DOCSTRING, SUB_DOCSTRINGS)
if __name__ == '__main__':
raise SystemExit(main(sys.argv[1:]))
|
src/promnesia/__init__.py | halhenke/promnesia | 1,327 | 12779090 | from pathlib import Path
from .common import PathIsh, Visit, Source, last, Loc, Results, DbVisit, Context, Res
# add deprecation warning so eventually this may converted to a namespace package?
import warnings
warnings.warn("DEPRECATED! Please import directly from 'promnesia.common', e.g. 'from promnesia.common import Visit, Source, Results'", DeprecationWarning)
|
examples/fantom/rename.py | pkuksa/FILER_giggle | 210 | 12779108 | import sys
if len(sys.argv) != 4:
sys.stderr.write('usage:\t' + \
sys.argv[0] + \
' <name2library file>' + \
' <expression count matrix file>' + \
' <out dir>\n')
sys.exit(1)
name2library_file=sys.argv[1]
expression_count_matrix_file=sys.argv[2]
out_dir=sys.argv[3]
files={}
names = {}
for l in open(name2library_file, 'r'):
A = l.rstrip().split('\t')
names[A[1]] = A[0]
header = []
for l in open(expression_count_matrix_file, 'r'):
A = l.rstrip().split()
if A[0] == 'Id':
header = A[1:]
print len(header)
0/1
else:
i = 0
for a in A[1:]:
if a != '0':
if names[header[i]] not in files:
files[names[header[i]]] = \
open(out_dir + \
'/' + \
names[header[i]] + \
'.bed',
'w')
files[names[header[i]]].write( \
A[0].replace(':','\t').replace('-','\t') + \
'\t' + a + '\n')
i+=1
|
plots/lasium_paper/plot_accuracy_based_on_gan.py | siavash-khodadadeh/MetaLearning-TF2.0 | 102 | 12779122 | <gh_stars>100-1000
import cv2
import matplotlib.pyplot as plt
from matplotlib.offsetbox import OffsetImage, AnnotationBbox
import numpy as np
def plot_img(img_name, location, index, zoom=0.1):
plt.scatter(index, accs[epochs.index(index)] + 1, color='#0D7377', linewidths=0.5, marker='v')
plt.plot((index, location[0]), (accs[epochs.index(index)], location[1]), '--', color='#0D7377', alpha=1)
img = plt.imread(f'./gan_images/{img_name}.png')
img = cv2.resize(img, (350, 350))
# img = img[50:-50, 50:-50, :]
ax = plt.gca()
im = OffsetImage(img, zoom=zoom)
ab = AnnotationBbox(im, location, xycoords='data', frameon=True, pad=0.2)
ax.add_artist(ab)
ax.update_datalim(np.column_stack(list(location)))
ax.autoscale()
return ab
def smooth_data(accs, weight):
last = accs[0]
for i in range(1, len(accs)):
accs[i] = last * weight + (1 - weight) * accs[i]
last = accs[i]
return accs
epochs = [0, 10, 20, 30, 40, 50, 100, 150, 200, 300, 400, 500]
accs = [51.95, 67.50, 71.26, 77.34, 77.67, 77.35, 78.14, 79.99, 78.21, 77.94, 80.51, 76.49]
accs = smooth_data(accs, 0.7)
accs_ci = [0.66, 0.71, 0.68, 0.62, 0.63, 0.64, 0.63, 0.60, 0.63, 0.64, 0.60, 0.67]
training_from_scratch = [51.64] * len(accs)
bottom = [acc - ci for acc, ci in zip(accs, accs_ci)]
top = [acc + ci for acc, ci in zip(accs, accs_ci)]
plt.plot(epochs, accs, color='b', label='LASIUM-N')
plt.plot(epochs, bottom, '--', color='#32E0C4', alpha=0.2)
plt.plot(epochs, top, '--', color='#32E0C4', alpha=0.2)
plt.plot(epochs, training_from_scratch, '--', color='r', alpha=0.5, label='baseline')
plt.fill_between(epochs, bottom, top, color='#32E0C4', alpha=.1)
plt.xticks([10, 30, 50, 100, 200, 300, 400, 500])
plt.xlabel('# GAN training epochs', fontsize=14)
plt.yticks([40, 50, 60, 70, 80, 100])
plt.ylabel('Accuracy (%)', fontsize=14)
# plt images
plot_img('00_4', location=(10, 85), index=0)
plot_img('10_4', location=(40, 90), index=10)
plot_img('30_4', location=(70, 85), index=30)
plot_img('50_4', location=(100, 90), index=50)
plot_img('100_4', location=(130, 85), index=100)
plot_img('200_4', location=(190, 90), index=200)
plot_img('300_4', location=(300, 85), index=300)
plot_img('400_4', location=(400, 90), index=400)
plot_img('500_4', location=(500, 85), index=500)
plt.scatter(
0, accs[epochs.index(0)] + 1, color='#0D7377', linewidths=0.5, marker='v', label='Generated image at epoch'
)
plt.subplots_adjust(bottom=0.1, top=0.9, right= 0.98, left=0.1)
plt.legend(loc='best')
# plt.show()
plt.savefig('./outputs/accuracy_based_on_gan.pdf', dpi=300)
|
WebMirror/management/rss_parser_funcs/feed_parse_extractKitchennovelCom.py | fake-name/ReadableWebProxy | 193 | 12779136 | def extractKitchennovelCom(item):
'''
Parser for 'kitchennovel.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('Strange World Alchemist Chef', 'Strange World Alchemist Chef', 'translated'),
('Imperial Chef Rookie', 'Imperial Chef Rookie', 'translated'),
('Daddy Fantasy World', 'Daddy Fantasy World', 'translated'),
('Here Comes the Lady Chef', 'Here Comes the Lady Chef', 'translated'),
('Different World Okonomiyaki Chain Store', 'Different World Okonomiyaki Chain Store', 'translated'),
('Strange World Little Cooking Saint', 'Strange World Little Cooking Saint', 'translated'),
('Fine Food Broadcastor', 'Fine Food Broadcaster', 'translated'),
('Kitchen Xiuzhen', 'Kitchen Xiuzhen', 'translated'),
('Reborn - Super Chef', 'Reborn - Super Chef', 'translated'),
('The Taming of the Black Bellied Scholar', 'The Taming of the Black Bellied Scholar', 'translated'),
('The Feast', 'The Feast', 'translated'),
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False |
week7/lecture13/test2.py | nobodysshadow/edX_MITx_6.00.1x | 622 | 12779153 | <filename>week7/lecture13/test2.py
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 12 07:17:17 2016
@author: ericgrimson
"""
#import numpy as np
import pylab as plt
mySamples = []
myLinear = []
myQuadratic = []
myCubic = []
myExponential = []
for i in range(0, 30):
mySamples.append(i)
myLinear.append(i)
myQuadratic.append(i**2)
myCubic.append(i**3)
myExponential.append(1.5**i)
# first trial
plt.plot(mySamples, myLinear)
plt.plot(mySamples, myQuadratic)
plt.plot(mySamples, myCubic)
plt.plot(mySamples, myExponential)
# second trial
#plt.figure('lin')
#plt.plot(mySamples, myLinear)
#plt.figure('quad')
#plt.plot(mySamples, myQuadratic)
#plt.figure('cube')
#plt.plot(mySamples, myCubic)
#plt.figure('expo')
#plt.plot(mySamples, myExponential)
# third trial
#plt.figure('lin')
#plt.xlabel('sample points')
#plt.ylabel('linear function')
#plt.plot(mySamples, myLinear)
#plt.figure('quad')
#plt.plot(mySamples, myQuadratic)
#plt.figure('cube')
#plt.plot(mySamples, myCubic)
#plt.figure('expo')
#plt.plot(mySamples, myExponential)
#plt.figure('quad')
#plt.ylabel('quadratic function')
# fourth trial
#plt.figure('lin')
#plt.plot(mySamples, myLinear)
#plt.figure('quad')
#plt.plot(mySamples, myQuadratic)
#plt.figure('cube')
#plt.plot(mySamples, myCubic)
#plt.figure('expo')
#plt.plot(mySamples, myExponential)
#plt.figure('lin')
#plt.title('Linear')
#plt.figure('quad')
#plt.title('Quadratic')
#plt.figure('cube')
#plt.title('Cubic')
#plt.figure('expo')
#plt.title('Exponential')
# fifth trial
#plt.figure('lin')
#plt.clf()
#plt.plot(mySamples, myLinear)
#plt.figure('quad')
#plt.clf()
#plt.plot(mySamples, myQuadratic)
#plt.figure('cube')
#plt.clf()
#plt.plot(mySamples, myCubic)
#plt.figure('expo')
#plt.clf()
#plt.plot(mySamples, myExponential)
#plt.figure('lin')
#plt.title('Linear')
#plt.figure('quad')
#plt.title('Quadratic')
#plt.figure('cube')
#plt.title('Cubic')
#plt.figure('expo')
#plt.title('Exponential')
# sixth trial
#plt.figure('lin')
#plt.clf()
#plt.ylim(0,1000)
#plt.plot(mySamples, myLinear)
#plt.figure('quad')
#plt.clf()
#plt.ylim(0,1000)
#plt.plot(mySamples, myQuadratic)
#plt.figure('lin')
#plt.title('Linear')
#plt.figure('quad')
#plt.title('Quadratic')
# seventh trial
#plt.figure('lin quad')
#plt.clf()
#plt.plot(mySamples, myLinear)
#plt.plot(mySamples, myQuadratic)
#plt.figure('cube exp')
#plt.clf()
#plt.plot(mySamples, myCubic)
#plt.plot(mySamples, myExponential)
#plt.figure('lin quad')
#plt.title('Linear vs. Quadratic')
#plt.figure('cube exp')
#plt.title('Cubic vs. Exponential')
# eighth trial
#plt.figure('lin quad')
#plt.clf()
#plt.plot(mySamples, myLinear, label = 'linear')
#plt.plot(mySamples, myQuadratic, label = 'quadratic')
#plt.legend(loc = 'upper left')
#plt.title('Linear vs. Quadratic')
#plt.figure('cube exp')
#plt.clf()
#plt.plot(mySamples, myCubic, label = 'cubic')
#plt.plot(mySamples, myExponential, label = 'exponential')
#plt.legend()
#plt.title('Cubic vs. Exponential')
# ninth trial
#plt.figure('lin quad')
#plt.clf()
#plt.plot(mySamples, myLinear, 'b-', label = 'linear')
#plt.plot(mySamples, myQuadratic,'ro', label = 'quadratic')
#plt.legend(loc = 'upper left')
#plt.title('Linear vs. Quadratic')
#plt.figure('cube exp')
#plt.clf()
#plt.plot(mySamples, myCubic, 'g^', label = 'cubic')
#plt.plot(mySamples, myExponential, 'r--',label = 'exponential')
#plt.legend()
#plt.title('Cubic vs. Exponential')
# tenth trial
#plt.figure('lin quad')
#plt.clf()
#plt.plot(mySamples, myLinear, 'b-', label = 'linear', linewidth = 2.0)
#plt.plot(mySamples, myQuadratic,'r', label = 'quadratic', linewidth = 3.0)
#plt.legend(loc = 'upper left')
#plt.title('Linear vs. Quadratic')
#plt.figure('cube exp')
#plt.clf()
#plt.plot(mySamples, myCubic, 'g--', label = 'cubic', linewidth = 4.0)
#plt.plot(mySamples, myExponential, 'r',label = 'exponential', linewidth = 5.0)
#plt.legend()
#plt.title('Cubic vs. Exponential')
# eleventh trial
#plt.figure('lin quad')
#plt.clf()
#plt.subplot(211)
#plt.ylim(0, 900)
#plt.plot(mySamples, myLinear, 'b-', label = 'linear', linewidth = 2.0)
#plt.subplot(212)
#plt.ylim(0, 900)
#plt.plot(mySamples, myQuadratic,'r', label = 'quadratic', linewidth = 3.0)
#plt.legend(loc = 'upper left')
#plt.title('Linear vs. Quadratic')
#plt.figure('cube exp')
#plt.clf()
#plt.subplot(121)
#plt.ylim(0, 140000)
#plt.plot(mySamples, myCubic, 'g--', label = 'cubic', linewidth = 4.0)
#plt.subplot(122)
#plt.ylim(0, 140000)
#plt.plot(mySamples, myExponential, 'r',label = 'exponential', linewidth = 5.0)
#plt.legend()
#plt.title('Cubic vs. Exponential')
# twelfth trial
#plt.figure('cube exp log')
#plt.clf()
#plt.plot(mySamples, myCubic, 'g--', label = 'cubic', linewidth = 2.0)
#plt.plot(mySamples, myExponential, 'r',label = 'exponential', linewidth = 4.0)
#plt.yscale('log')
#plt.legend()
#plt.title('Cubic vs. Exponential')
#plt.figure('cube exp linear')
#plt.clf()
#plt.plot(mySamples, myCubic, 'g--', label = 'cubic', linewidth = 2.0)
#plt.plot(mySamples, myExponential, 'r',label = 'exponential', linewidth = 4.0)
#plt.legend()
#plt.title('Cubic vs. Exponential')
|
atx/record/scene_detector.py | jamjven/ATX | 1,132 | 12779179 | #-*- encoding: utf-8 -*-
import os
import cv2
import yaml
import numpy as np
from collections import defaultdict
def find_match(img, tmpl, rect=None, mask=None):
if rect is not None:
h, w = img.shape[:2]
x, y, x1, y1 = rect
if x1 > w or y1 > h:
return 0, None
img = img[y:y1, x:x1, :]
if mask is not None:
img = img.copy()
img[mask!=0] = 0
tmpl = tmpl.copy()
tmpl[mask!=0] = 0
s_bgr = cv2.split(tmpl) # Blue Green Red
i_bgr = cv2.split(img)
weight = (0.3, 0.3, 0.4)
resbgr = [0, 0, 0]
for i in range(3): # bgr
resbgr[i] = cv2.matchTemplate(i_bgr[i], s_bgr[i], cv2.TM_CCOEFF_NORMED)
match = resbgr[0]*weight[0] + resbgr[1]*weight[1] + resbgr[2]*weight[2]
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(match)
confidence = max_val
x, y = max_loc
h, w = tmpl.shape[:2]
if rect is None:
rect = (x, y, x+w, y+h)
# cv2.rectangle(img, (x,y), (x+w,y+h), (0,255,0) ,2)
# cv2.imshow('test', img)
# cv2.waitKey(20)
return confidence, rect
def get_mask(img1, img2, thresh=20):
if img1.shape != img2.shape:
return
diff = cv2.absdiff(img1, img2)
diff = np.mean(diff, axis=2)
diff[diff<=thresh] = 0
diff[diff>thresh] = 255
mask = np.dstack([diff]*3)
return mask
def get_match_confidence(img1, img2, mask=None):
if img1.shape != img2.shape:
return False
## first try, using absdiff
# diff = cv2.absdiff(img1, img2)
# h, w, d = diff.shape
# total = h*w*d
# num = (diff<20).sum()
# print 'is_match', total, num
# return num > total*0.90
if mask is not None:
img1 = img1.copy()
img1[mask!=0] = 0
img2 = img2.copy()
img2[mask!=0] = 0
## using match
match = cv2.matchTemplate(img1, img2, cv2.TM_CCOEFF_NORMED)
_, confidence, _, _ = cv2.minMaxLoc(match)
# print confidence
return confidence
class SceneDetector(object):
'''detect game scene from screen image'''
def __init__(self, scene_directory):
self.scene_touches = {}
self.scene_directory = scene_directory
self.build_tree(scene_directory)
def build_tree(self, directory):
'''build scene tree from images'''
confile = os.path.join(directory, 'config.yml')
conf = {}
if os.path.exists(confile):
conf = yaml.load(open(confile).read())
class node(defaultdict):
name = ''
parent = None
tmpl = None
rect = None
mask = None
def __str__(self):
obj = self
names = []
while obj.parent is not None:
names.append(obj.name)
obj = obj.parent
return '-'.join(names[::-1])
def tree():
return node(tree)
root = tree()
for s in os.listdir(directory):
if not s.endswith('.png') or s.endswith('_mask.png'):
continue
obj = root
for i in s[:-4].split('-'):
obj[i].name = i
obj[i].parent = obj
obj = obj[i]
obj.tmpl = cv2.imread(os.path.join(directory, s))
obj.rect = conf.get(s[:-4], {}).get('rect')
maskimg = conf.get(s[:-4], {}).get('mask')
if maskimg is not None:
maskimg = os.path.join(directory, maskimg)
if os.path.exists(maskimg):
obj.mask = cv2.imread(maskimg)
self.tree = root
self.current_scene = []
self.confile = confile
self.conf = conf
def match_child(self, img, node):
c, s, r = (0, None, None)
for scene in node.itervalues():
if scene.tmpl is None:
continue
print str(scene), scene.rect, img.shape
confidence, rect = find_match(img, scene.tmpl, scene.rect, scene.mask)
# print scene.name, confidence, rect
if confidence > c:
c, s, r = (confidence, scene, rect)
if c > 0.95:
key = str(s)
if key not in self.conf:
self.conf[key] = {}
changed = False
if c > self.conf[key].get('confidence', 0):
s.rect = r
self.conf[key]['confidence'] = c
self.conf[key]['rect'] = list(r)
changed = True
if changed or s.mask is None:
x, y, x1, y1 = r
s.mask = get_mask(img[y:y1, x:x1, :], s.tmpl, 20)
maskimg = os.path.join(self.scene_directory, '%s_mask.png' % key)
cv2.imwrite(maskimg, s.mask)
self.conf[key]['mask'] = maskimg
changed = True
if changed:
self.save_config()
return c, s, r
def save_config(self):
print 'save config', self.conf
with open(self.confile, 'w') as f:
yaml.dump(self.conf, f)
def detect(self, img):
# check current scene path
# print 'checking current scene'
if self.current_scene:
for i in range(len(self.current_scene)):
s, r = self.current_scene[i]
x, y, x1, y1 = r
c = get_match_confidence(img[y:y1, x:x1, :], s.tmpl, s.mask)
if c < 0.75:
break
else:
# print 'current scene ok'
s = self.current_scene[-1][0]
if len(s.values()) == 0:
return s
self.current_scene = self.current_scene[:i]
# top scene has changed
if not self.current_scene:
c, s, r = self.match_child(img, self.tree)
if c < 0.75:
return
self.current_scene = [(s, r)]
s = self.current_scene[-1][0]
while True:
c, s, r = self.match_child(img, s)
if c < 0.75:
break
self.current_scene.append((s, r))
return s |
Giveme5W1H/extractor/extractors/action_extractor.py | bkrrr/Giveme5W | 410 | 12779214 | <reponame>bkrrr/Giveme5W
import re
from nltk.tree import ParentedTree
from Giveme5W1H.extractor.candidate import Candidate
from Giveme5W1H.extractor.extractors.abs_extractor import AbsExtractor
class ActionExtractor(AbsExtractor):
"""
The ActionExtractor tries to extract the main actor and his action.
"""
def __init__(self, weights: (float, float, float) = (0.9, 0.095, 0.005), minimal_length_of_tokens: int = 3):
self._minimal_length_of_tokens = minimal_length_of_tokens
# weights used in the candidate evaluation:
# (position, frequency, named entity)
self.weights = weights
def _extract_candidates(self, document):
"""
Extracts possible agents/actions pairs from a given document.
Candidates are chosen if they belong to an coref-chain and is part of a NP-VP-NP pattern
:param document: The Document to be analyzed.
:type document: Document
:return: A List of Tuples containing all agents, actions and their position in the document.
"""
# retrieve results from preprocessing
corefs = document.get_corefs()
trees = document.get_trees()
candidates = []
for cluster in corefs:
for mention in corefs[cluster]:
# Check if mention is the subject of the sentence by matching the NP-VP-NP pattern.
#
# "One common way of defining the subject of a sentence S in English is as the noun phrase that is the
# child of S and the sibling of VP" (http://www.nltk.org/book/ch08.html)
for pattern in self._evaluate_tree(trees[mention['sentNum'] - 1]):
np_string = ''.join([p[0]['nlpToken']['originalText'] for p in pattern[0]])
if re.sub(r'\s+', '', mention['text']) in np_string:
candidate_object = Candidate()
candidate_object.set_sentence_index(pattern[2])
candidate_object.set_raw([pattern[0], pattern[1], cluster, mention['id']])
candidates.append(candidate_object)
document.set_candidates(self.get_id(), candidates)
def _evaluate_tree(self, sentence_root):
"""
Examines the passed syntactic tree to determine if it matches a NP-VP-NP pattern
This is executed per sentence
:param sentence_root: A tree to be analyzed
:type sentence_root: ParentedTree
:return: A list of Tuples containing the agent and the action described in the sentence.
"""
candidates = []
for subtree in sentence_root.subtrees():
if subtree.label() == 'NP' and subtree.parent().label() == 'S':
# Skip NPs containing a VP
if any(list(subtree.subtrees(filter=lambda t: t.label() == 'VP'))):
continue
# check siblings for VP
sibling = subtree.right_sibling()
while sibling is not None:
if sibling.label() == 'VP':
# this gives a tuple to find the way from sentence to leaf
# tree_position = subtree.leaf_treeposition(0)
entry = [subtree.pos(), self.cut_what(sibling, self._minimal_length_of_tokens).pos(),
sentence_root.stanfordCoreNLPResult['index']]
candidates.append(entry)
break
sibling = sibling.right_sibling()
return candidates
def _evaluate_candidates(self, document):
"""
Calculate a confidence score based on number of mentions, position in text and entailment of named entities
for extracted candidates.
:param document: The parsed document
:type document: Document
:param candidates: Extracted candidates to evaluate.
:type candidates:[([(String,String)], ([(String,String)])]
:return: A list of evaluated and ranked candidates
"""
ranked_candidates = []
doc_len = document.get_len()
doc_ner = document.get_ner()
doc_coref = document.get_corefs()
if any(doc_coref.values()):
# get length of longest coref chain for normalization
max_len = len(max(doc_coref.values(), key=len))
else:
max_len = 1
for candidate in document.get_candidates(self.get_id()):
candidateParts = candidate.get_raw()
verb = candidateParts[1][0][0]['nlpToken']['originalText'].lower()
# VP beginning with say/said often contain no relevant action and are therefor skipped.
if verb.startswith('say') or verb.startswith('said'):
continue
coref_chain = doc_coref[candidateParts[2]]
# first parameter used for ranking is the number of mentions, we use the length of the coref chain
score = (len(coref_chain) / max_len) * self.weights[1]
representative = None
contains_ne = False
mention_type = ''
for mention in coref_chain:
if mention['id'] == candidateParts[3]:
mention_type = mention['type']
if mention['sentNum'] < doc_len:
# The position (sentence number) is another important parameter for scoring.
# This is inspired by the inverted pyramid.
score += ((doc_len - mention['sentNum'] + 1) / doc_len) * self.weights[0]
if mention['isRepresentativeMention']:
# The representative name for this chain has been found.
tmp = document._sentences[mention['sentNum'] - 1]['tokens'][mention['headIndex'] - 1]
representative = ((tmp['originalText'], tmp), tmp['pos'])
try:
# these dose`t work, if some special characters are present
if representative[-1][1] == 'POS':
representative = representative[:-1]
except IndexError:
pass
if not contains_ne:
# If the current mention doesn't contain a named entity, check the other members of the chain
for token in doc_ner[mention['sentNum'] - 1][mention['headIndex'] - 1:mention['endIndex'] - 1]:
if token[1] in ['PERSON', 'ORGANIZATION', 'LOCATION']:
contains_ne = True
break
if contains_ne:
# the last important parameter is the entailment of a named entity
score += self.weights[2]
if score > 0:
# normalize the scoring
score /= sum(self.weights)
if mention_type == 'PRONOMINAL':
# use representing mention if the agent is only a pronoun
rp_format_fix = [(({'nlpToken': representative[0][1]}, representative[0][1]['pos']))]
ranked_candidates.append((rp_format_fix, candidateParts[1], score, candidate.get_sentence_index()))
else:
ranked_candidates.append((candidateParts[0], candidateParts[1], score, candidate.get_sentence_index()))
# split results
who = [(c[0], c[2], c[3]) for c in ranked_candidates]
what = [(c[1], c[2], c[3]) for c in ranked_candidates]
# Transform who to object oriented list
o_who = self._filterAndConvertToObjectOrientedList(who)
# Filter by text
o_who_clean = self._filter_candidate_dublicates(o_who)
document.set_answer('who', o_who_clean)
# Transform who to object oriented list
o_what = self._filterAndConvertToObjectOrientedList(what)
# Filter by text
o_what_clean = self._filter_candidate_dublicates(o_what)
document.set_answer('what', o_what_clean)
def _filterAndConvertToObjectOrientedList(self, list):
max = 0
candidates = self._filter_duplicates(list)
for candidate in candidates:
if candidate.get_score() > max:
max = candidate.get_score()
# normalize
for candidate in candidates:
score = candidate.get_score()
candidate.set_score(score / max)
# sort
candidates.sort(key=lambda x: x.get_score(), reverse=True)
return candidates
def cut_what(self, tree, min_length=0, length=0):
"""
This function is used to shorten verbphrases, it recursively traverses the parse tree depth first.
:param tree: Tree to cut
:type tree: ParentedTree
:param min_length: Desired minimal length of tokens
:type min_length: Integer
:param length: Number of tokens already included by the upper level function
:type length: Integer
:return: A subtree
"""
if type(tree[0]) is not ParentedTree:
# we found a leaf
return ParentedTree(tree.label(), [tree[0]])
else:
children = []
for sub in tree:
child = self.cut_what(sub, min_length, length)
length += len(child.leaves())
children.append(child)
if sub.label() == 'NP':
sibling = sub.right_sibling()
if length < min_length and sibling is not None and sibling.label() == 'PP':
children.append(sibling.copy(deep=True))
break
return ParentedTree(tree.label(), children)
|
src/mcedit2/util/settings.py | elcarrion06/mcedit2 | 673 | 12779215 | <gh_stars>100-1000
"""
settings
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import json
import os
from PySide import QtCore
import logging
from mcedit2.util import directories
log = logging.getLogger(__name__)
_settings = None
def Settings():
global _settings
if _settings is None:
_settings = MCESettings()
return _settings
class MCESettingsOption(QtCore.QObject):
def __init__(self, settings, key, valueType=None, default=None, *args, **kwargs):
super(MCESettingsOption, self).__init__(*args, **kwargs)
self.settings = settings
self.key = key
self.valueType = valueType
self.default = default
def value(self, default=None):
if default is None:
default = self.default
if self.valueType == "json":
value = self.settings.jsonValue(self.key, default)
else:
value = self.settings.value(self.key, default)
if self.valueType is bool:
if isinstance(value, basestring):
value = value.lower() == "true"
elif self.valueType:
value = self.valueType(value)
return value
def setValue(self, value):
if self.valueType == "json":
return self.settings.setJsonValue(self.key, value)
else:
return self.settings.setValue(self.key, value)
valueChanged = QtCore.Signal(object)
def jsonValue(self, default=None):
return self.settings.jsonValue(self.key, default)
def setJsonValue(self, value):
return self.settings.setJsonValue(self.key, value)
def connectAndCall(self, callback):
"""
Connect `callback` to this option's `valueChanged` signal, then call it with the value of this option.
:param callback:
:type callback:
:return:
:rtype:
"""
self.valueChanged.connect(callback)
callback(self.value())
class MCESettingsNamespace(object):
def __init__(self, rootSettings, prefix):
self.rootSettings = rootSettings
if not prefix.endswith("/"):
prefix = prefix + "/"
self.prefix = prefix
def getOption(self, key, type=None, default=None):
"""
Parameters
----------
key: str
type: bool | int | float | str
default: Any
Returns
-------
option: MCESettingsOption
"""
return self.rootSettings.getOption(self.prefix + key, type, default)
class MCESettings(QtCore.QSettings):
def __init__(self, *args, **kwargs):
"""
Subclass of QSettings. Adds a `getOption` method which returns an individual option as its own object. Adds
one signal for each setting, emitted when its value is changed. Also provides json encoded methods to work
around a bug in PySide.
QSettings, under PySide, does not reliably infer that a settings value should be read as a QStringList.
jsonValue and setJsonValue methods are provided that will automatically encode/decode the given value to or from json
:rtype: MCESettings
"""
dataDir = directories.getUserFilesDirectory()
iniPath = os.path.join(dataDir, "mcedit2.ini")
log.info("Loading app settings from %s", iniPath)
super(MCESettings, self).__init__(iniPath, QtCore.QSettings.IniFormat, *args,
**kwargs)
self.options = {}
#= defaultdict(lambda: QtCore.Signal(object))
def getNamespace(self, prefix):
"""
Return an MCESettingsNamespace object which can be used to access settings whose keys are all prefixed by
the given prefix
:param prefix:
:type prefix:
:return:
:rtype:
"""
return MCESettingsNamespace(self, prefix)
def getSignal(self, key):
"""
Returns a signal to be triggered when the setting `key` is changed.
The signal handler receives one argument: the setting's new value.
:param key: Settings key
:type key: str
:rtype: None
"""
return self.getOption(key).valueChanged
def emitSignal(self, key, val):
option = self.options.get(key)
if option:
option.valueChanged.emit(val)
def setValue(self, key, val):
old = self.value(key)
if old != val:
log.info("Setting %r changed to (%.40r)(...) (was (%.40r)(...))", key, val, old)
super(MCESettings, self).setValue(key, val)
self.emitSignal(key, val)
def jsonValue(self, key, default=None):
value = self.value(key, None)
if value is not None:
try:
return json.loads(value)
except ValueError as e: # No JSON object could be decoded
log.error("Failed to decode setting %s: %s", key, e)
return default
else:
return default
def setJsonValue(self, key, value):
self.setValue(key, json.dumps(value))
def getOption(self, key, type=None, default=None):
"""
Return an object that represents the setting at 'key'. The object may be used to get and set the value and
get the value's valueChanged signal. Among other uses, the object's setValue attribute may be connected to the
valueChanged signal of an input field.
:param key:
:type key:
:return:
:rtype:
"""
option = self.options.get(key)
if option:
return option
option = MCESettingsOption(self, key, type, default)
self.options[key] = option
return option
|
scanners/zap-advanced/scanner/tests/test_zap_spider_http.py | kevin-yen/secureCodeBox | 488 | 12779241 | #!/usr/bin/env python
# SPDX-FileCopyrightText: 2021 iteratec GmbH
#
# SPDX-License-Identifier: Apache-2.0
# -*- coding: utf-8 -*-
import pytest
from unittest.mock import MagicMock, Mock
from unittest import TestCase
from zapclient.configuration import ZapConfiguration
class ZapSpiderHttpTests(TestCase):
@pytest.mark.unit
def test_has_spider_configurations(self):
config = ZapConfiguration("./tests/mocks/context-with-overlay/", "https://www.secureCodeBox.io/")
self.assertIsNone(config.get_active_spider_config)
config = ZapConfiguration("./tests/mocks/scan-full-bodgeit-docker/", "http://bodgeit:8080/")
self.assertIsNotNone(config.get_active_spider_config)
|
asv_oggm_plugin.py | skachuck/oggm | 156 | 12779262 | <reponame>skachuck/oggm
import subprocess
import requests
import tempfile
import os
import logging
from asv.plugins.conda import _find_conda, Conda
from asv.console import log
from asv import util
logging.getLogger("requests").setLevel(logging.WARNING)
OGGM_CONDA_ENV_URL = ("https://raw.githubusercontent.com/OGGM/"
"OGGM-dependency-list/master/Linux-64/{0}")
OGGM_CONDA_ENVS = {
"36": "oggmdev-1.2.0.202002022248_20200202_py36.yml",
"37": "oggmdev-1.2.0.202002022248_20200202_py37.yml",
}
class OggmVirtualenv(Conda):
tool_name = "oggm_conda"
def _setup(self):
log.info("Creating oggm conda environment for {0}".format(self.name))
env_file = tempfile.NamedTemporaryFile(mode="w", delete=False,
suffix=".yml")
try:
pyver = str(self._python).replace(".", "")[:2]
oggm_env = OGGM_CONDA_ENVS[pyver]
req = requests.get(OGGM_CONDA_ENV_URL.format(oggm_env))
req.raise_for_status()
for line in req.text.splitlines():
if line.startswith("prefix:"):
continue
elif line.startswith("name:"):
env_file.write("name: {0}\n".format(self.name))
else:
env_file.write(line + "\n")
env_file.close()
self._conda_channels = ["conda-forge", "defaults"]
self._conda_environment_file = env_file.name
return super()._setup()
except Exception as exc:
if os.path.isfile(env_file.name):
with open(env_file.name, "r") as f:
text = f.read()
log.info("oggm conda env create failed: in {} with:\n{}"
.format(self._path, text))
raise
finally:
os.unlink(env_file.name)
|
dockerfiles/examples/read-bytes-seed/scale-job.py | kaydoh/scale | 121 | 12779313 | <gh_stars>100-1000
import argparse
import datetime
import json
import logging
import sys
import os
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s',
level=logging.DEBUG, stream=sys.stdout)
def run_algorithm(bytes_total, input_file, out_dir):
"""Read the indicated number of bytes from input file and store in output directory
:param bytes_total:
:param input_file:
:param out_dir:
:return:
"""
bytes_read = 0
chunk_size = 512
logging.info('Reading %s bytes from %s and storing at %s.' % (bytes_total, input_file, out_dir))
base_path = os.path.join(out_dir, 'output_file')
start = datetime.datetime.utcnow().isoformat()
os.makedirs(base_path)
output_file = os.path.join(base_path, os.path.basename(input_file))
logging.info('Data being stored in %s' % output_file)
with open(input_file, 'rb') as infile:
with open(output_file, 'wb') as outfile:
while bytes_read <= bytes_total:
if bytes_read + chunk_size > bytes_total:
chunk_size = bytes_total - bytes_read
chunk = infile.read(chunk_size)
# Break if EOF is encountered
if not chunk: break
outfile.write(chunk)
bytes_read += chunk_size
logging.info('Copy complete')
end = datetime.datetime.utcnow().isoformat()
# Output metadata file for testing capture
metadata = {
'type': 'Feature',
'geometry': None,
'properties':
{
'dataStarted': start + 'Z',
'dataEnded': end + 'Z'
}
}
metadata_file = output_file + '.metadata.json'
with open(metadata_file, 'w') as outfile:
json.dump(metadata, outfile)
logging.info('Metadata written to %s' % metadata_file)
return output_file
if __name__ == '__main__':
for key in os.environ.keys():
print "%30s %s" % (key,os.environ[key])
parser = argparse.ArgumentParser(description='Copy x number of bytes from input file to output file.')
parser.add_argument('bytes_total', type=int, help='number of bytes to copy from input to output file')
parser.add_argument('input_file', help='absolute path to input file')
parser.add_argument('output_dir', help='absolute output directory path')
args = parser.parse_args()
logging.debug('Bytes to copy: {}'.format(args.bytes_total))
logging.debug('Input file: {}'.format(args.input_file))
logging.debug('Output directory: {}'.format(args.output_dir))
output_file = run_algorithm(args.bytes_total, args.input_file, args.output_dir)
# Write an output manifest for testing JSON property capture
with open(os.path.join(args.output_dir, 'seed.outputs.json'), 'w') as output_json:
input_size = os.path.getsize(args.input_file)
contents = {'INPUT_FILE_NAME': args.input_file, 'INPUT_SIZE': input_size}
json.dump(contents, output_json)
sys.exit(0)
|
examples/notebooks/test_notebooks.py | mjc87/SHTOOLS | 251 | 12779326 | #!/usr/bin/env python3
"""
This script will run all jupyter notebooks in order to test for errors.
"""
import sys
import os
import nbformat
from nbconvert.preprocessors import ExecutePreprocessor
if os.path.dirname(sys.argv[0]) != '':
os.chdir(os.path.dirname(sys.argv[0]))
notebooks = ('grids-and-coefficients.ipynb',
'localized-spectral-analysis.ipynb',
'gravity-and-magnetic-fields.ipynb',
'plotting-maps.ipynb',
'low-level-spherical-harmonic-analyses.ipynb',
'advanced-localized-spectral-analysis.ipynb',
'advanced-shcoeffs-and-shgrid-usage.ipynb',
'spherical-harmonic-normalizations.ipynb',
'advanced-shwindow-usage.ipynb',
'3d-plots.ipynb')
if sys.version_info.major == 3:
kname = 'python3'
else:
raise ('Python version {:d} not supported.'.format(sys.version_info.major))
for i in range(len(notebooks)):
with open(notebooks[i]) as f:
nb = nbformat.read(f, as_version=4)
ep = ExecutePreprocessor(timeout=240, kernel_name=kname)
print('Processing file {:s}'.format(notebooks[i]))
ep.preprocess(nb, {'metadata': {'path': '.'}})
|
networking-calico/networking_calico/timestamp.py | mikestephen/calico | 3,973 | 12779383 | # -*- coding: utf-8 -*-
# Copyright (c) 2018 Tigera, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
ZERO = datetime.timedelta(0)
class UTC(datetime.tzinfo):
def utcoffset(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return ZERO
utc = UTC()
def timestamp_now():
utc_now = datetime.datetime.now(utc)
return utc_now.strftime('%Y-%m-%dT%H:%M:%SZ')
# e.g. 2015-05-19T20:32:12Z
|
PseudoGenerator.py | yxn-coder/Inf-Net | 273 | 12779427 | # -*- coding: utf-8 -*-
"""Preview
Code for 'Inf-Net: Automatic COVID-19 Lung Infection Segmentation from CT Scans'
submit to Transactions on Medical Imaging, 2020.
First Version: Created on 2020-05-13 (@author: <NAME>)
"""
# ---- base lib -----
import os
import argparse
from datetime import datetime
import cv2
import numpy as np
import random
import shutil
from scipy import misc
# ---- torch lib ----
import torch
from torch.autograd import Variable
import torch.nn.functional as F
# ---- custom lib ----
# NOTES: Here we nly provide Res2Net, you can also replace it with other backbones
from Code.model_lung_infection.InfNet_Res2Net import Inf_Net as Network
from Code.utils.dataloader_LungInf import get_loader, test_dataset
from Code.utils.utils import clip_gradient, adjust_lr, AvgMeter
from Code.utils.format_conversion import binary2edge
def joint_loss(pred, mask):
weit = 1 + 5*torch.abs(F.avg_pool2d(mask, kernel_size=31, stride=1, padding=15) - mask)
wbce = F.binary_cross_entropy_with_logits(pred, mask, reduce='none')
wbce = (weit*wbce).sum(dim=(2, 3)) / weit.sum(dim=(2, 3))
pred = torch.sigmoid(pred)
inter = ((pred * mask)*weit).sum(dim=(2, 3))
union = ((pred + mask)*weit).sum(dim=(2, 3))
wiou = 1 - (inter + 1)/(union - inter+1)
return (wbce + wiou).mean()
def trainer(train_loader, model, optimizer, epoch, opt, total_step):
model.train()
# ---- multi-scale training ----
size_rates = [0.75, 1, 1.25] # replace your desired scale
loss_record1, loss_record2, loss_record3, loss_record4, loss_record5 = AvgMeter(), AvgMeter(), AvgMeter(), AvgMeter(), AvgMeter()
for i, pack in enumerate(train_loader, start=1):
for rate in size_rates:
optimizer.zero_grad()
# ---- data prepare ----
images, gts, edges = pack
images = Variable(images).cuda()
gts = Variable(gts).cuda()
edges = Variable(edges).cuda()
# ---- rescale ----
trainsize = int(round(opt.trainsize*rate/32)*32)
if rate != 1:
images = F.upsample(images, size=(trainsize, trainsize), mode='bilinear', align_corners=True)
gts = F.upsample(gts, size=(trainsize, trainsize), mode='bilinear', align_corners=True)
edges = F.upsample(edges, size=(trainsize, trainsize), mode='bilinear', align_corners=True)
# ---- forward ----
lateral_map_5, lateral_map_4, lateral_map_3, lateral_map_2, lateral_edge = model(images)
# ---- loss function ----
loss5 = joint_loss(lateral_map_5, gts)
loss4 = joint_loss(lateral_map_4, gts)
loss3 = joint_loss(lateral_map_3, gts)
loss2 = joint_loss(lateral_map_2, gts)
loss1 = torch.nn.BCEWithLogitsLoss()(lateral_edge, edges)
loss = loss1 + loss2 + loss3 + loss4 + loss5
# ---- backward ----
loss.backward()
clip_gradient(optimizer, opt.clip)
optimizer.step()
# ---- recording loss ----
if rate == 1:
loss_record1.update(loss1.data, opt.batchsize)
loss_record2.update(loss2.data, opt.batchsize)
loss_record3.update(loss3.data, opt.batchsize)
loss_record4.update(loss4.data, opt.batchsize)
loss_record5.update(loss5.data, opt.batchsize)
# ---- train visualization ----
if i % 5 == 0 or i == total_step:
print('{} Epoch [{:03d}/{:03d}], Step [{:04d}/{:04d}], [lateral-edge: {:.4f}, '
'lateral-2: {:.4f}, lateral-3: {:0.4f}, lateral-4: {:0.4f}, lateral-5: {:0.4f}]'.
format(datetime.now(), epoch, opt.epoch, i, total_step, loss_record1.show(),
loss_record2.show(), loss_record3.show(), loss_record4.show(), loss_record5.show()))
# ---- save model_lung_infection ----
save_path = 'Snapshots/{}/'.format(opt.train_save)
os.makedirs(save_path, exist_ok=True)
if (epoch+1) % 10 == 0:
torch.save(model.state_dict(), save_path + 'Semi-Inf-Net-%d.pth' % (epoch+1))
print('[Saving Snapshot:]', save_path + 'Semi-Inf-Net-%d.pth' % (epoch+1))
def train_module(_train_path, _train_save, _resume_snapshot):
parser = argparse.ArgumentParser()
parser.add_argument('--epoch', type=int, default=10, help='epoch number')
parser.add_argument('--lr', type=float, default=3e-4, help='learning rate')
parser.add_argument('--batchsize', type=int, default=16, help='training batch size')
parser.add_argument('--trainsize', type=int, default=352, help='training dataset size')
parser.add_argument('--clip', type=float, default=0.5, help='gradient clipping margin')
parser.add_argument('--decay_rate', type=float, default=0.1, help='decay rate of learning rate')
parser.add_argument('--decay_epoch', type=int, default=50, help='every n epochs decay learning rate')
parser.add_argument('--train_path', type=str, default=_train_path)
parser.add_argument('--train_save', type=str, default=_train_save)
parser.add_argument('--resume_snapshot', type=str, default=_resume_snapshot)
opt = parser.parse_args()
# ---- build models ----
torch.cuda.set_device(0)
model = Network(channel=32, n_class=1).cuda()
model.load_state_dict(torch.load(opt.resume_snapshot))
params = model.parameters()
optimizer = torch.optim.Adam(params, opt.lr)
image_root = '{}/Imgs/'.format(opt.train_path)
gt_root = '{}/GT/'.format(opt.train_path)
edge_root = '{}/Edge/'.format(opt.train_path)
train_loader = get_loader(image_root, gt_root, edge_root, batchsize=opt.batchsize, trainsize=opt.trainsize)
total_step = len(train_loader)
print("#"*20, "Start Training", "#"*20)
for epoch in range(1, opt.epoch):
adjust_lr(optimizer, opt.lr, epoch, opt.decay_rate, opt.decay_epoch)
trainer(train_loader=train_loader, model=model, optimizer=optimizer,
epoch=epoch, opt=opt, total_step=total_step)
def inference_module(_data_path, _save_path, _pth_path):
model = Network(channel=32, n_class=1)
model.load_state_dict(torch.load(_pth_path))
model.cuda()
model.eval()
os.makedirs(_save_path, exist_ok=True)
# FIXME
image_root = '{}/'.format(_data_path)
# gt_root = '{}/mask/'.format(data_path)
test_loader = test_dataset(image_root, image_root, 352)
for i in range(test_loader.size):
image, name = test_loader.load_data()
#gt = np.asarray(gt, np.float32)
#gt /= (gt.max() + 1e-8)
image = image.cuda()
lateral_map_5, lateral_map_4, lateral_map_3, lateral_map_2, lateral_edge = model(image)
res = lateral_map_2 # final segmentation
#res = F.upsample(res, size=gt.shape, mode='bilinear', align_corners=False)
res = res.sigmoid().data.cpu().numpy().squeeze()
res = (res - res.min()) / (res.max() - res.min() + 1e-8)
misc.imsave(_save_path + '/' + name, res)
def movefiles(_src_dir, _dst_dir):
os.makedirs(_dst_dir, exist_ok=True)
for file_name in os.listdir(_src_dir):
shutil.copyfile(os.path.join(_src_dir, file_name),
os.path.join(_dst_dir, file_name))
if __name__ == '__main__':
slices = './Dataset/TrainingSet/LungInfection-Train/Pseudo-label/DataPrepare'
slices_dir = slices + '/Imgs_split'
slices_pred_seg_dir = slices + '/pred_seg_split'
slices_pred_edge_dir = slices + '/pred_edge_split'
# NOTES: Hybrid-label = Doctor-label + Pseudo-label
semi = './Dataset/TrainingSet/LungInfection-Train/Pseudo-label/DataPrepare/Hybrid-label'
semi_img = semi + '/Imgs'
semi_mask = semi + '/GT'
semi_edge = semi + '/Edge'
if (not os.path.exists(semi_img)) or (len(os.listdir(semi_img)) != 50):
shutil.copytree('Dataset/TrainingSet/LungInfection-Train/Doctor-label/Imgs',
semi_img)
shutil.copytree('Dataset/TrainingSet/LungInfection-Train/Doctor-label/GT',
semi_mask)
shutil.copytree('Dataset/TrainingSet/LungInfection-Train/Doctor-label/Edge',
semi_edge)
print('Copy done')
else:
print('Check done')
slices_lst = os.listdir(slices_dir)
random.shuffle(slices_lst)
print("#" * 20, "\nStart Training (Inf-Net)\nThis code is written for 'Inf-Net: Automatic COVID-19 Lung "
"Infection Segmentation from CT Scans', 2020, arXiv.\n"
"----\nPlease cite the paper if you use this code and dataset. "
"And any questions feel free to contact me "
"via E-mail (<EMAIL>)\n----\n", "#" * 20)
for i, split_name in enumerate(slices_lst):
print('\n[INFO] {} ({}/320)'.format(split_name, i))
# ---- inference ----
test_aux_dir = os.path.join(slices_dir, split_name)
test_aux_save_dir = os.path.join(slices_pred_seg_dir, split_name)
if i == 0:
snapshot_dir = './Snapshots/save_weights/Inf-Net/Inf-Net-100.pth'
else:
snapshot_dir = './Snapshots/semi_training/Semi-Inf-Net_{}/Semi-Inf-Net-10.pth'.format(i-1)
inference_module(_data_path=test_aux_dir, _save_path=test_aux_save_dir, _pth_path=snapshot_dir)
os.makedirs(os.path.join(slices_pred_edge_dir, split_name), exist_ok=True)
for pred_name in os.listdir(test_aux_save_dir):
edge_tmp = binary2edge(os.path.join(test_aux_save_dir, pred_name))
cv2.imwrite(os.path.join(slices_pred_edge_dir, split_name, pred_name), edge_tmp)
# ---- move generation ----
movefiles(test_aux_dir, semi_img)
movefiles(test_aux_save_dir, semi_mask)
movefiles(os.path.join(slices_pred_edge_dir, split_name), semi_edge)
# ---- training ----
train_module(_train_path=semi,
_train_save='semi_training/Semi-Inf-Net_{}'.format(i),
_resume_snapshot=snapshot_dir)
# move img/pseudo-label into `./Dataset/TrainingSet/LungInfection-Train/Pseudo-label`
shutil.copytree(semi_img, './Dataset/TrainingSet/LungInfection-Train/Pseudo-label/Imgs')
shutil.copytree(semi_mask, './Dataset/TrainingSet/LungInfection-Train/Pseudo-label/GT')
shutil.copytree(semi_edge, 'Dataset/TrainingSet/LungInfection-Train/Pseudo-label/Edge')
print('Pseudo Label Generated!')
|
tests/wrappers.py | blacksph3re/garage | 1,500 | 12779452 | """Test environment wrapper."""
import gym
class AutoStopEnv(gym.Wrapper):
"""Environment wrapper that stops episode at step max_episode_length."""
def __init__(self, env=None, env_name='', max_episode_length=100):
"""Create an AutoStepEnv.
Args:
env (gym.Env): Environment to be wrapped.
env_name (str): Name of the environment.
max_episode_length (int): Maximum length of the episode.
"""
if env_name:
super().__init__(gym.make(env_name))
else:
super().__init__(env)
self._episode_step = 0
self._max_episode_length = max_episode_length
def step(self, action):
"""Step the wrapped environment.
Args:
action (np.ndarray): the action.
Returns:
np.ndarray: Next observation
float: Reward
bool: Termination signal
dict: Environment information
"""
self._episode_step += 1
next_obs, reward, done, info = self.env.step(action)
if self._episode_step == self._max_episode_length:
done = True
self._episode_step = 0
return next_obs, reward, done, info
def reset(self, **kwargs):
"""Reset the wrapped environment.
Args:
**kwargs: Keyword arguments.
Returns:
np.ndarray: Initial observation.
"""
return self.env.reset(**kwargs)
|
kmip/tests/integration/conftest.py | ondrap/PyKMIP | 179 | 12779471 | <reponame>ondrap/PyKMIP<filename>kmip/tests/integration/conftest.py<gh_stars>100-1000
# Copyright (c) 2015 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pytest
from kmip.services import kmip_client
from kmip.pie import client as pclient
def pytest_addoption(parser):
parser.addoption(
"--config",
action="store",
default="client",
help="Config file section name for client configuration settings")
@pytest.fixture(scope="class")
def client(request):
config = request.config.getoption("--config")
client = kmip_client.KMIPProxy(config=config)
client.open()
def finalize():
client.close()
request.addfinalizer(finalize)
request.cls.client = client
@pytest.fixture(scope="class")
def simple(request):
config = request.config.getoption("--config")
client = pclient.ProxyKmipClient(config=config)
client.open()
def finalize():
client.close()
request.addfinalizer(finalize)
request.cls.client = client
|
libtrellis/examples/graph.py | Keno/prjtrellis | 256 | 12779485 | #!/usr/bin/env python3
"""
Testing the routing graph generator
"""
import pytrellis
import sys
pytrellis.load_database("../../database")
chip = pytrellis.Chip("LFE5U-45F")
rg = chip.get_routing_graph()
tile = rg.tiles[pytrellis.Location(9, 71)]
for wire in tile.wires:
print("Wire {}:".format(rg.to_str(wire.key())))
for dh in wire.data().downhill:
arc = rg.tiles[dh.loc].arcs[dh.id]
print(" --> R{}C{}_{}".format(arc.sink.loc.y, arc.sink.loc.x, rg.to_str(arc.sink.id)))
for bdh in wire.data().belsDownhill:
print(" ->| R{}C{}_{}.{}".format(bdh.bel.loc.y, bdh.bel.loc.x, rg.to_str(bdh.bel.id), rg.to_str(bdh.pin)))
print()
for uh in wire.data().uphill:
arc = rg.tiles[uh.loc].arcs[uh.id]
print(" <-- R{}C{}_{}".format(arc.source.loc.y, arc.source.loc.x, rg.to_str(arc.source.id)))
for buh in wire.data().belsUphill:
print(" <-| R{}C{}_{}.{}".format(buh.bel.loc.y, buh.bel.loc.x, rg.to_str(buh.bel.id), rg.to_str(buh.pin)))
print()
|
python/ql/test/query-tests/Summary/my_file.py | timoles/codeql | 4,036 | 12779507 | """
module level docstring
is not included
"""
# this line is not code
# `tty` was chosen for stability over python versions (so we don't get diffrent results
# on different computers, that has different versions of Python).
#
# According to https://github.com/python/cpython/tree/master/Lib (at 2021-04-23) `tty`
# was last changed in 2001, so chances of this being changed in the future are slim.
import tty
s = """
all these lines are code
"""
print(s)
def func():
"""
this string is a doc-string. Although the module-level docstring is not considered
code, this one apparently is ¯\_(ツ)_/¯
"""
pass
|
semseg/models/modules/common.py | Genevievekim/semantic-segmentation-1 | 196 | 12779536 | <reponame>Genevievekim/semantic-segmentation-1<filename>semseg/models/modules/common.py
import torch
from torch import nn, Tensor
class ConvModule(nn.Sequential):
def __init__(self, c1, c2, k, s=1, p=0, d=1, g=1):
super().__init__(
nn.Conv2d(c1, c2, k, s, p, d, g, bias=False),
nn.BatchNorm2d(c2),
nn.ReLU(True)
) |
src/main/resources/resource/AndroidSpeechRecognition/AndroidSpeechRecognition.py | holgerfriedrich/myrobotlab | 179 | 12779544 | #########################################
# AndroidSpeechRecognition.py
# more info @: http://myrobotlab.org/service/AndroidSpeechRecognition
#########################################
# start the service
androidspeechrecognition = Runtime.start("androidspeechrecognition","AndroidSpeechRecognition")
# start mouth
marySpeech = Runtime.start("marySpeech", "MarySpeech")
# shutdown microphone if robot speaking
androidspeechrecognition.attach(marySpeech)
# auto rearm microphone
androidspeechrecognition.setAutoListen(True)
androidspeechrecognition.addCommand("turn on the light", "python", "lightOn")
androidspeechrecognition.addCommand("turn off the light", "python", "lightOff")
def lightOn():
marySpeech.speakBlocking("light is on")
def lightOff():
marySpeech.speakBlocking("light is off")
|
src/garage/torch/modules/cnn_module.py | blacksph3re/garage | 1,500 | 12779546 | """CNN Module."""
import warnings
import akro
import numpy as np
import torch
from torch import nn
from garage import InOutSpec
from garage.torch import (expand_var, NonLinearity, output_height_2d,
output_width_2d)
# pytorch v1.6 issue, see https://github.com/pytorch/pytorch/issues/42305
# pylint: disable=abstract-method
class CNNModule(nn.Module):
"""Convolutional neural network (CNN) model in pytorch.
Args:
spec (garage.InOutSpec): Specification of inputs and outputs.
The input should be in 'NCHW' format: [batch_size, channel, height,
width]. Will print a warning if the channel size is not 1 or 3.
If output_space is specified, then a final linear layer will be
inserted to map to that dimensionality.
If output_space is None, it will be filled in with the computed
output space.
image_format (str): Either 'NCHW' or 'NHWC'. Should match the input
specification. Gym uses NHWC by default, but PyTorch uses NCHW by
default.
hidden_channels (tuple[int]): Number of output channels for CNN.
For example, (3, 32) means there are two convolutional layers.
The filter for the first conv layer outputs 3 channels
and the second one outputs 32 channels.
kernel_sizes (tuple[int]): Dimension of the conv filters.
For example, (3, 5) means there are two convolutional layers.
The filter for first layer is of dimension (3 x 3)
and the second one is of dimension (5 x 5).
strides (tuple[int]): The stride of the sliding window. For example,
(1, 2) means there are two convolutional layers. The stride of the
filter for first layer is 1 and that of the second layer is 2.
paddings (tuple[int]): Amount of zero-padding added to both sides of
the input of a conv layer.
padding_mode (str): The type of padding algorithm to use, i.e.
'constant', 'reflect', 'replicate' or 'circular' and
by default is 'zeros'.
hidden_nonlinearity (callable or torch.nn.Module):
Activation function for intermediate dense layer(s).
It should return a torch.Tensor. Set it to None to maintain a
linear activation.
hidden_b_init (callable): Initializer function for the bias
of intermediate dense layer(s). The function should return a
torch.Tensor.
max_pool (bool): Bool for using max-pooling or not.
pool_shape (tuple[int]): Dimension of the pooling layer(s). For
example, (2, 2) means that all pooling layers are of the same
shape (2, 2).
pool_stride (tuple[int]): The strides of the pooling layer(s). For
example, (2, 2) means that all the pooling layers have
strides (2, 2).
layer_normalization (bool): Bool for using layer normalization or not.
hidden_w_init (callable): Initializer function for the weight
of intermediate dense layer(s). The function should return a
torch.Tensor.
enable_cudnn_benchmarks (bool): Whether to enable cudnn benchmarks
in `torch`. If enabled, the backend selects the CNN benchamark
algorithm with the best performance.
"""
def __init__(
self,
spec,
image_format,
hidden_channels,
*, # Many things after this are ints or tuples of ints.
kernel_sizes,
strides,
paddings=0,
padding_mode='zeros',
hidden_nonlinearity=nn.ReLU,
hidden_w_init=nn.init.xavier_uniform_,
hidden_b_init=nn.init.zeros_,
max_pool=False,
pool_shape=None,
pool_stride=1,
layer_normalization=False,
enable_cudnn_benchmarks=True):
super().__init__()
assert len(hidden_channels) > 0
# PyTorch forces us to use NCHW internally.
in_channels, height, width = _check_spec(spec, image_format)
self._format = image_format
kernel_sizes = expand_var('kernel_sizes', kernel_sizes,
len(hidden_channels), 'hidden_channels')
strides = expand_var('strides', strides, len(hidden_channels),
'hidden_channels')
paddings = expand_var('paddings', paddings, len(hidden_channels),
'hidden_channels')
pool_shape = expand_var('pool_shape', pool_shape, len(hidden_channels),
'hidden_channels')
pool_stride = expand_var('pool_stride', pool_stride,
len(hidden_channels), 'hidden_channels')
self._cnn_layers = nn.Sequential()
torch.backends.cudnn.benchmark = enable_cudnn_benchmarks
# In case there are no hidden channels, handle output case.
out_channels = in_channels
for i, out_channels in enumerate(hidden_channels):
conv_layer = nn.Conv2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_sizes[i],
stride=strides[i],
padding=paddings[i],
padding_mode=padding_mode)
height = output_height_2d(conv_layer, height)
width = output_width_2d(conv_layer, width)
hidden_w_init(conv_layer.weight)
hidden_b_init(conv_layer.bias)
self._cnn_layers.add_module(f'conv_{i}', conv_layer)
if layer_normalization:
self._cnn_layers.add_module(
f'layer_norm_{i}',
nn.LayerNorm((out_channels, height, width)))
if hidden_nonlinearity:
self._cnn_layers.add_module(f'non_linearity_{i}',
NonLinearity(hidden_nonlinearity))
if max_pool:
pool = nn.MaxPool2d(kernel_size=pool_shape[i],
stride=pool_stride[i])
height = output_height_2d(pool, height)
width = output_width_2d(pool, width)
self._cnn_layers.add_module(f'max_pooling_{i}', pool)
in_channels = out_channels
output_dims = out_channels * height * width
if spec.output_space is None:
final_spec = InOutSpec(
spec.input_space,
akro.Box(low=-np.inf, high=np.inf, shape=(output_dims, )))
self._final_layer = None
else:
final_spec = spec
# Checked at start of __init__
self._final_layer = nn.Linear(output_dims,
spec.output_space.shape[0])
self.spec = final_spec
# pylint: disable=arguments-differ
def forward(self, x):
"""Forward method.
Args:
x (torch.Tensor): Input values. Should match image_format
specified at construction (either NCHW or NCWH).
Returns:
List[torch.Tensor]: Output values
"""
# Transform single values into batch, if necessary.
if len(x.shape) == 3:
x = x.unsqueeze(0)
# This should be the single place in torch that image normalization
# happens
if isinstance(self.spec.input_space, akro.Image):
x = torch.div(x, 255.0)
assert len(x.shape) == 4
if self._format == 'NHWC':
# Convert to internal NCHW format
x = x.permute((0, 3, 1, 2))
for layer in self._cnn_layers:
x = layer(x)
if self._format == 'NHWC':
# Convert back to NHWC (just in case)
x = x.permute((0, 2, 3, 1))
# Remove non-batch dimensions
x = x.reshape(x.shape[0], -1)
# Apply final linearity, if it was requested.
if self._final_layer is not None:
x = self._final_layer(x)
return x
def _check_spec(spec, image_format):
"""Check that an InOutSpec is suitable for a CNNModule.
Args:
spec (garage.InOutSpec): Specification of inputs and outputs. The
input should be in 'NCHW' format: [batch_size, channel, height,
width]. Will print a warning if the channel size is not 1 or 3.
If output_space is specified, then a final linear layer will be
inserted to map to that dimensionality. If output_space is None,
it will be filled in with the computed output space.
image_format (str): Either 'NCHW' or 'NHWC'. Should match the input
specification. Gym uses NHWC by default, but PyTorch uses NCHW by
default.
Returns:
tuple[int, int, int]: The input channels, height, and width.
Raises:
ValueError: If spec isn't suitable for a CNNModule.
"""
# pylint: disable=no-else-raise
input_space = spec.input_space
output_space = spec.output_space
# Don't use isinstance, since akro.Space is guaranteed to inherit from
# gym.Space
if getattr(input_space, 'shape', None) is None:
raise ValueError(
f'input_space to CNNModule is {input_space}, but should be an '
f'akro.Box or akro.Image')
elif len(input_space.shape) != 3:
raise ValueError(
f'Input to CNNModule is {input_space}, but should have three '
f'dimensions.')
if (output_space is not None and not (hasattr(output_space, 'shape')
and len(output_space.shape) == 1)):
raise ValueError(
f'output_space to CNNModule is {output_space}, but should be '
f'an akro.Box with a single dimension or None')
if image_format == 'NCHW':
in_channels = spec.input_space.shape[0]
height = spec.input_space.shape[1]
width = spec.input_space.shape[2]
elif image_format == 'NHWC':
height = spec.input_space.shape[0]
width = spec.input_space.shape[1]
in_channels = spec.input_space.shape[2]
else:
raise ValueError(
f'image_format has value {image_format!r}, but must be either '
f"'NCHW' or 'NHWC'")
if in_channels not in (1, 3):
warnings.warn(
f'CNNModule input has {in_channels} channels, but '
f'1 or 3 channels are typical. Consider changing the CNN '
f'image_format.')
return in_channels, height, width
|
tensornetwork/backends/tensorflow/tensorflow_tensornetwork_test.py | khanhgithead/TensorNetwork | 1,681 | 12779575 | """Tests for graphmode_tensornetwork."""
import numpy as np
import tensorflow as tf
from tensornetwork import (contract, connect, flatten_edges_between,
contract_between, Node)
import pytest
class GraphmodeTensorNetworkTest(tf.test.TestCase):
def test_basic_graphmode(self):
# pylint: disable=not-context-manager
with tf.compat.v1.Graph().as_default():
a = Node(tf.ones(10), backend="tensorflow")
b = Node(tf.ones(10), backend="tensorflow")
e = connect(a[0], b[0])
final_tensor = contract(e).get_tensor()
sess = tf.compat.v1.Session()
final_val = sess.run(final_tensor)
self.assertAllClose(final_val, 10.0)
def test_gradient_decent(self):
# pylint: disable=not-context-manager
with tf.compat.v1.Graph().as_default():
a = Node(tf.Variable(tf.ones(10)), backend="tensorflow")
b = Node(tf.ones(10), backend="tensorflow")
e = connect(a[0], b[0])
final_tensor = contract(e).get_tensor()
opt = tf.compat.v1.train.GradientDescentOptimizer(0.001)
train_op = opt.minimize(final_tensor)
sess = tf.compat.v1.Session()
sess.run(tf.compat.v1.global_variables_initializer())
self.assertAllClose(sess.run(final_tensor), 10.0)
sess.run(train_op)
self.assertLess(sess.run(final_tensor), 10.0)
def test_dynamic_network_sizes(self):
@tf.function
def f(x, n):
x_slice = x[:n]
n1 = Node(x_slice, backend="tensorflow")
n2 = Node(x_slice, backend="tensorflow")
e = connect(n1[0], n2[0])
return contract(e).get_tensor()
x = np.ones(10)
self.assertAllClose(f(x, tf.convert_to_tensor(2)), 2.0)
self.assertAllClose(f(x, tf.convert_to_tensor(3)), 3.0)
@pytest.mark.skip(reason="Test fails due to probable bug in tensorflow 2.0.0")
def test_dynamic_network_sizes_contract_between(self):
@tf.function
def f(x, n):
x_slice = x[..., :n]
n1 = Node(x_slice, backend="tensorflow")
n2 = Node(x_slice, backend="tensorflow")
connect(n1[0], n2[0])
connect(n1[1], n2[1])
connect(n1[2], n2[2])
return contract_between(n1, n2).get_tensor()
x = tf.ones((3, 4, 5))
self.assertAllClose(f(x, tf.convert_to_tensor(2)), 24.0)
self.assertAllClose(f(x, tf.convert_to_tensor(3)), 36.0)
def test_dynamic_network_sizes_flatten_standard(self):
@tf.function
def f(x, n):
x_slice = x[..., :n]
n1 = Node(x_slice, backend="tensorflow")
n2 = Node(x_slice, backend="tensorflow")
connect(n1[0], n2[0])
connect(n1[1], n2[1])
connect(n1[2], n2[2])
return contract(flatten_edges_between(n1, n2)).get_tensor()
x = np.ones((3, 4, 5))
self.assertAllClose(f(x, tf.convert_to_tensor(2)), 24.0)
self.assertAllClose(f(x, tf.convert_to_tensor(3)), 36.0)
def test_dynamic_network_sizes_flatten_trace(self):
@tf.function
def f(x, n):
x_slice = x[..., :n]
n1 = Node(x_slice, backend="tensorflow")
connect(n1[0], n1[2])
connect(n1[1], n1[3])
return contract(flatten_edges_between(n1, n1)).get_tensor()
x = np.ones((3, 4, 3, 4, 5))
self.assertAllClose(f(x, tf.convert_to_tensor(2)), np.ones((2,)) * 12)
self.assertAllClose(f(x, tf.convert_to_tensor(3)), np.ones((3,)) * 12)
def test_batch_usage(self,):
def build_tensornetwork(tensors):
a = Node(tensors[0], backend="tensorflow")
b = Node(tensors[1], backend="tensorflow")
e = connect(a[0], b[0])
return contract(e).get_tensor()
tensors = [np.ones((5, 10)), np.ones((5, 10))]
result = tf.map_fn(build_tensornetwork, tensors, dtype=tf.float64)
np.testing.assert_allclose(result, np.ones(5) * 10)
if __name__ == '__main__':
tf.test.main()
|
runtests.py | prohfesor/tapiriik | 1,445 | 12779581 | <reponame>prohfesor/tapiriik
import tapiriik.database
tapiriik.database.db = tapiriik.database._connection["tapiriik_test"]
tapiriik.database.cachedb = tapiriik.database._connection["tapiriik_cache_test"]
from tapiriik.testing import *
import unittest
unittest.main()
tapiriik.database._connection.drop_database("tapiriik_test")
tapiriik.database._connection.drop_database("tapiriik_cache_test")
|
koku/providers/azure/client.py | rubik-ai/koku | 157 | 12779586 | #
# Copyright 2021 Red Hat Inc.
# SPDX-License-Identifier: Apache-2.0
#
"""Azure Client Configuration."""
from azure.common.credentials import ServicePrincipalCredentials
from azure.mgmt.costmanagement import CostManagementClient
from azure.mgmt.resource import ResourceManagementClient
from azure.mgmt.storage import StorageManagementClient
from azure.storage.blob import BlobServiceClient
from msrestazure.azure_cloud import AZURE_CHINA_CLOUD
from msrestazure.azure_cloud import AZURE_GERMAN_CLOUD
from msrestazure.azure_cloud import AZURE_PUBLIC_CLOUD
from msrestazure.azure_cloud import AZURE_US_GOV_CLOUD
class AzureClientFactory:
"""Azure client factory.
This class holds the Azure credentials and can create Service Clients for
querying the Azure Service APIs.
Args:
subscription_id (str): Subscription ID
tenant_id (str): Tenant ID for your Azure Subscription
client_id (str): Service Principal Application ID
client_secret (str): Service Principal Password
cloud (str): Cloud selector, must be one of ['china', 'germany', 'public', 'usgov']
"""
def __init__(self, subscription_id, tenant_id, client_id, client_secret, cloud="public"):
"""Constructor."""
self._subscription_id = subscription_id
clouds = {
"china": AZURE_CHINA_CLOUD,
"germany": AZURE_GERMAN_CLOUD,
"public": AZURE_PUBLIC_CLOUD,
"usgov": AZURE_US_GOV_CLOUD,
}
self._credentials = ServicePrincipalCredentials(
client_id=client_id, secret=client_secret, tenant=tenant_id, cloud_environment=clouds.get(cloud, "public")
)
@property
def credentials(self):
"""Service Principal Credentials property."""
return self._credentials
@property
def cost_management_client(self):
"""Get cost management client with subscription and credentials."""
return CostManagementClient(self.credentials, self.subscription_id)
@property
def resource_client(self):
"""Return a resource client."""
return ResourceManagementClient(self.credentials, self.subscription_id)
@property
def storage_client(self):
"""Get storage client with subscription and credentials."""
return StorageManagementClient(self.credentials, self.subscription_id)
@property
def subscription_id(self):
"""Subscription ID property."""
return self._subscription_id
def cloud_storage_account(self, resource_group_name, storage_account_name):
"""Get a BlobServiceClient."""
storage_account_keys = self.storage_client.storage_accounts.list_keys(
resource_group_name, storage_account_name
)
# Add check for keys and a get value
key = storage_account_keys.keys[0]
connect_str = (
f"DefaultEndpointsProtocol=https;"
f"AccountName={storage_account_name};"
f"AccountKey={key.value};"
f"EndpointSuffix=core.windows.net"
)
return BlobServiceClient.from_connection_string(connect_str)
|
high-availability-endpoint/python/region_lookup.py | fortunecookiezen/aws-health-tools | 825 | 12779599 | <filename>high-availability-endpoint/python/region_lookup.py
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import dns.resolver
class RegionLookupError(Exception):
"""Rasied when there was a problem when looking up the active region"""
pass
def active_region():
qname = 'global.health.amazonaws.com'
try:
answers = dns.resolver.resolve(qname, 'CNAME')
except Exception as e:
raise RegionLookupError('Failed to resolve {}'.format(qname), e)
if len(answers) != 1:
raise RegionLookupError('Failed to get a single answer when resolving {}'.format(qname))
name = str(answers[0].target) # e.g. health.us-east-1.amazonaws.com.
region_name = name.split('.')[1] # Region name is the 1st in split('.') -> ['health', 'us-east-1', 'amazonaws', 'com', '']
return region_name |
Z - Tool Box/x2john/ccache2john.py | dfirpaul/Active-Directory-Exploitation-Cheat-Sheet-1 | 1,290 | 12779605 | #!/usr/bin/env python2
"""
This script extracts crackable hashes from krb5's credential cache files (e.g.
/tmp/krb5cc_1000).
NOTE: This attack technique only works against MS Active Directory servers.
This was tested with CentOS 7.4 client running krb5-1.15.1 software against a
Windows 2012 R2 Active Directory server.
Usage: python ccache2john.py ccache_file
Upstream: https://github.com/rvazarkar/KrbCredExport
Authors: <NAME> (main author), <NAME> (splitting support), and <NAME> (misc. glue)
Resources,
https://lapo.it/asn1js/
https://tools.ietf.org/html/rfc1510#section-5.8.1
https://github.com/CoreSecurity/impacket/tree/master/impacket/krb5
https://www.gnu.org/software/shishi/manual/html_node/The-Credential-Cache-Binary-File-Format.html
https://github.com/wireshark/wireshark/blob/master/epan/dissectors/asn1/kerberos/KerberosV5Spec2.asn
"""
import sys
import os.path
import time
import struct
import datetime
from pyasn1.codec.ber import decoder
# LB is a single byte representing the length of the rest of the section
# LT is a 3 byte structure consisting of the byte 82 followed by 2 bytes representing the length of the rest of the file
# header {
# uint16 tag
# uint16 taglen
# uint8[taglen] tagdata
# }
class Header:
def __init__(self):
self.tag = None
self.taglen = None
self.deltatime = DeltaTime()
def parsefile(self, f):
self.tag, self.taglen = struct.unpack(">HH", f.read(4))
self.deltatime.parsefile(f)
def tostring(self):
r = ''
r += struct.pack(">HH", self.tag, self.taglen)
r += self.deltatime.tostring()
return r
# deltatime {
# uint32 time_offset
# uint32 usec_offset
# }
class DeltaTime:
def __init__(self):
self.usec_offset = None
self.time_offset = None
def parsefile(self, f):
self.time_offset, self.usec_offset = struct.unpack(">LL", f.read(8))
def tostring(self):
r = ''
r += struct.pack(">LL", self.time_offset, self.usec_offset)
return r
# ccacheheader {
# uint16 version
# uint16 header_len
# header[] headers
# principal primary_principal
# }
class CCacheHeader:
def __init__(self):
self.version = None
self.header_length = None
self.header = Header()
def parsefile(self, f):
self.version, = struct.unpack(">H", f.read(2))
self.header_length, = struct.unpack(">H", f.read(2))
# self.header.parsefile(f) # this is perhaps buggy?
f.read(self.header_length)
def tostring(self):
r = ''
r += struct.pack(">HH", self.version, self.header_length)
r += self.header.tostring()
return r
# times {
# uint32 authtime
# uint32 starttime
# uint32 endtime
# uint32 renew_till
# }
class KerbTimes:
def __init__(self):
self.authtime = None
self.starttime = None
self.endtime = None
self.renew_till = None
def parsefile(self, f):
self.authtime, self.starttime, self.endtime, self.renew_till = struct.unpack(">IIII", f.read(16))
def tostring(self):
return struct.pack(">IIII", self.authtime, self.starttime, self.endtime, self.renew_till)
# counted_octet {
# uint32 length
# uint8[char] data
# }
class CountedOctet:
def __init__(self):
self.length = None
self.data = None
def parsefile(self, f):
self.length, = struct.unpack(">L", f.read(4))
self.data, = struct.unpack(">%ds" % self.length, f.read(self.length))
def tostring(self):
r = b''
r += struct.pack(">L", self.length)
r += struct.pack(">%ds" % self.length, self.data)
return r
# keyblock {
# uint16 keytype
# uint16 etype
# uint16 keylen
# uint8[keylen] key
# }
class Keyblock:
def __init__(self):
self.keytype = None
self.etype = None
self.keylen = None
self.key = None
def parsefile(self, f):
self.keytype, self.etype, self.keylen = struct.unpack(">HHH", f.read(6))
self.key, = struct.unpack(">%ds" % self.keylen, f.read(self.keylen))
def tostring(self):
r = ''
r += struct.pack(">HHH", self.keytype, self.etype, self.keylen)
r += struct.pack(">%ds" % self.keylen, self.key)
return r
# principal {
# uint32 name_type
# uint32 num_components
# counted_octet realm
# counted_octet[num_components] components
# }
class Principal:
def __init__(self):
self.name_type = None
self.num_components = None
self.realm = CountedOctet()
self.components = []
def parsefile(self, f):
self.name_type, self.num_components = struct.unpack(">LL", f.read(8))
self.realm.parsefile(f)
for i in range(0, self.num_components):
component = CountedOctet()
component.parsefile(f)
self.components.append(component.data)
def tostring(self):
r = ''
r += struct.pack(">LL", self.name_type, self.num_components)
r += self.realm.tostring()
for i in self.components:
r += struct.pack(">L", len(i))
r += i
return r
# address {
# uint16 address_type
# counted_octet address
# }
class Address:
def __init__(self):
self.address_type = None
self.address = CountedOctet()
def parsefile(self, f):
self.address_type, = struct.unpack(">H", f.read(2))
self.address.parsefile(f)
def tostring(self):
r = ''
r += struct.pack(">H", self.address_type)
r += self.address.tostring()
return r
# authdata {
# uint16 authtype
# counted_octet authdata
# }
class AuthData:
def __init__(self):
self.authtype = None
self.authdata = CountedOctet()
def parsefile(self, f):
self.authtype, = struct.unpack(">H", f.read(2))
self.authdata.parsefile(f)
def tostring(self):
r = ''
r += struct.pack(">H", self.authtype)
r += self.authdata.tostring()
return r
# credential {
# principal client
# principal server
# keyblock key
# times timedata
# uint8 skey
# uint32 tktFlags (Reverse Byte Order!)
# uint32 num_address
# address[num_address] addresses
# uint32 num_authdata
# authdata[num_authdata] auths
# counted_octet ticket_1
# counted_octet ticket_2 (nothing here in what I've seen)
# }
class Credential:
def __init__(self):
self.client = Principal()
self.server = Principal()
self.keyblock = Keyblock()
self.times = KerbTimes()
self.is_skey = None
self.tktFlags = None
self.num_address = None
self.address = []
self.num_authdata = None
self.authdata = []
self.ticket = CountedOctet()
self.secondticket = CountedOctet()
def parsefile(self, f):
self.client.parsefile(f)
self.server.parsefile(f)
self.keyblock.parsefile(f)
self.times.parsefile(f)
self.is_skey, = struct.unpack(">B", f.read(1))
self.tktFlags, = struct.unpack("<I", f.read(4))
self.num_address, = struct.unpack(">I", f.read(4))
for i in range(0, self.num_address):
self.address.append(Address().parsefile(f))
self.num_authdata, = struct.unpack(">I", f.read(4))
for i in range(0, self.num_authdata):
self.authdata.append(AuthData().parsefile(f))
self.ticket.parsefile(f)
self.secondticket.parsefile(f)
def tostring(self):
r = ''
r += self.client.tostring()
r += self.server.tostring()
r += self.keyblock.tostring()
r += self.times.tostring()
r += struct.pack(">B", self.is_skey)
r += struct.pack("<I", self.tktFlags)
r += struct.pack(">I", self.num_address)
for i in self.address:
r += i.tostring()
r += struct.pack(">I", self.num_authdata)
for i in self.authdata:
r += i.tostring()
r += self.ticket.tostring()
r += self.secondticket.tostring()
return r
# Prepend, shortened for convenience
def p(a, b):
return b + a
# Returns the length of s as a single byte
def clen(s):
return chr(len(s))
# key {
# 0xA0 LB
# 0x30 LB
# 0xA0 0x03 0x02 0x01
# uint8 key_type
# 0xA1 LB
# 0x03 LB
# keydata
# }
class Key:
def __init__(self):
self.key = None
self.keytype = None
def parsefile(self, f):
f.read(8)
self.keytype, = struct.unpack('>B', f.read(1))
f.read(3)
keylen, = struct.unpack('>B', f.read(1))
self.key, = struct.unpack(">%ds" % keylen, f.read(keylen))
def tostring(self):
r = ''
r += self.key
r = p(r, clen(r))
r = p(r, '\x04')
r = p(r, clen(r))
r = p(r, '\xA1')
r = p(r, chr(self.keytype))
r = p(r, '\xA0\x03\x02\x01')
r = p(r, clen(r))
r = p(r, '\x30')
r = p(r, clen(r))
r = p(r, '\xA0')
return r
# This section represents the primary principal realm. Corresponds to the domain name
# prealm {
# 0xA1 LB
# 0x1B LB
# Primary Principal Realm
# }
class PRealm:
def __init__(self):
self.principal_realm = None
def parsefile(self, f):
f.read(3)
length, = struct.unpack(">b", f.read(1))
self.principal_realm, = struct.unpack(">%ds" % length, f.read(length))
def tostring(self):
r = ''
r += self.principal_realm
r = p(r, clen(r))
r = p(r, '\x1B')
r = p(r, clen(r))
r = p(r, '\xA1')
return r
# This section represents the primary principal realm
# pname {
# 0xA2 LB
# 0x30 LB
# 0xA0 0x03 0x02 0x01
# uint8 name_type
# 0xA1 LB
# 0x30 LB
# 0x1B LB
# Primary Principal Name
# }
class PName:
def __init__(self):
self.principal_components = []
self.principal_name_type = None
def parsefile(self, f):
f.read(8)
self.principal_name_type, = struct.unpack(">B", f.read(1))
f.read(3)
rem_length, = struct.unpack(">B", f.read(1))
while (rem_length > 0):
f.read(1)
l, = struct.unpack(">B", f.read(1))
component, = struct.unpack("%ds" % l, f.read(l))
self.principal_components.append(component)
rem_length -= (2 + l)
def tostring(self):
r = ''
for s in self.principal_components:
r += '\x1B' + chr(len(s)) + s
r = p(r, clen(r))
r = p(r, '\x30')
r = p(r, clen(r))
r = p(r, '\xA1')
r = p(r, chr(self.principal_name_type))
r = p(r, '\xA0\x03\x02\x01')
r = p(r, clen(r))
r = p(r, '\x30')
r = p(r, clen(r))
r = p(r, '\xA2')
return r
# This section details flags for the ticket
# tktflags {
# 0xA3 LB
# 0x03 LB
# 0x00 Always 0, apparently number of unused bytes. tktFlags is always a uint32
# uint32 Ticket Flags
# }
class TicketFlags:
def __init__(self):
self.ticket_flags = None
def parsefile(self, f):
f.read(5)
self.ticket_flags, = struct.unpack("I", f.read(4))
def tostring(self):
r = ''
r += struct.pack("I", self.ticket_flags)
r = p(r, '\x00')
r = p(r, clen(r))
r = p(r, '\x03')
r = p(r, clen(r))
r = p(r, '\xA3')
return r
# These sections contain the ticket timestamps. Note that the timestamps are in a consistent format, so length tags are always the same
# Timestamp format is YYYYmmddHHMMSSZ and must be UTC!
# 0xA5 is starttime, 0xA6 is endtime, 0xA7 is renew_till
# time {
# uint8 Identifier
# LB (Always 0x11)
# 0x18 LB (Always 0x0F)
# start_time
# }
class Time:
def __init__(self, identifier):
self.identifier = identifier
self.time = None
@staticmethod
def convert_to_unix(timestr):
epoch = datetime.datetime(1970, 1, 1)
t = datetime.datetime.strptime(timestr[:-1], '%Y%m%d%H%M%S')
td = t - epoch
return int((td.microseconds + (td.seconds + td.days * 24 * 3600) * 10 ** 6) / 1e6)
@staticmethod
def convert_to_kerbtime(unixtime):
t = datetime.datetime.utcfromtimestamp(unixtime)
t = ''.join([t.strftime('%Y'), t.strftime('%m'), t.strftime('%d'),
t.strftime('%H'), t.strftime('%M'), t.strftime('%S'), 'Z'])
return t
def parsefile(self, f):
self.identifier, = struct.unpack(">B", f.read(1))
f.read(3)
strtime, = struct.unpack(">15s", f.read(15))
self.time = Time.convert_to_unix(strtime)
def tostring(self):
r = ''
r += struct.pack(">15s", Time.convert_to_kerbtime(self.time))
r = p(r, '\x11\x18\x0F')
r = p(r, chr(self.identifier))
return r
# This section represents the server realm (domain)
# srealm {
# 0xA8 LB
# 0x1B LB
# server_realm (domain name of server)
# }
class SRealm:
def __init__(self):
self.server_realm = None
def parsefile(self, f):
f.read(3)
length, = struct.unpack(">B", f.read(1))
self.server_realm, = struct.unpack(">%ds" % length, f.read(length))
def tostring(self):
r = ''
r += self.server_realm
r = p(r, clen(r))
r = p(r, '\x1B')
r = p(r, clen(r))
r = p(r, '\xA8')
return r
# This section represents the server name components
# sname {
# 0xA9 LB
# 0x30 LB
# 0xA0 0x03 0x02 0x01
# uint8 server_name_type
# 0xA1 LB
# 0x30 LB
# components[]
# }
#
# components {
# 0x1B
# uint8 Component Length
# Component
# }
class SName:
def __init__(self):
self.server_components = []
self.server_name_type = None
def parsefile(self, f):
f.read(8)
self.server_name_type, = struct.unpack(">B", f.read(1))
f.read(3)
rem_length, = struct.unpack(">B", f.read(1))
while rem_length > 0:
f.read(1)
l, = struct.unpack(">B", f.read(1))
component, = struct.unpack(">%ds" % l, f.read(l))
self.server_components.append(component)
rem_length -= (2 + l)
def tostring(self):
r = ''
for s in self.server_components:
r += '\x1B' + chr(len(s)) + s
r = p(r, clen(r))
r = p(r, '\x30')
r = p(r, clen(r))
r = p(r, '\xA1')
r = p(r, chr(self.server_name_type))
r = p(r, '\xA0\x03\x02\x01')
r = p(r, clen(r))
r = p(r, '\x30')
r = p(r, clen(r))
r = p(r, '\xA9')
return r
# header {
# 0x7D LT
# 0x30 LT
# 0xA0 LT
# 0x30 LT
# 0x30 LT
# }
class KrbCredInfo:
def __init__(self):
self.krbcredinfo = None
self.key = Key()
self.prealm = PRealm()
self.pname = PName()
self.flags = TicketFlags()
self.starttime = Time(165)
self.endtime = Time(166)
self.renew_till = Time(167)
self.srealm = SRealm()
self.sname = SName()
def parsefile(self, f):
f.read(20)
self.key.parsefile(f)
self.prealm.parsefile(f)
self.pname.parsefile(f)
self.flags.parsefile(f)
self.starttime.parsefile(f)
self.endtime.parsefile(f)
self.renew_till.parsefile(f)
self.srealm.parsefile(f)
self.sname.parsefile(f)
self.krbcredinfo = self.key.tostring() + self.prealm.tostring() + self.pname.tostring() + self.flags.tostring() + \
self.starttime.tostring() + self.endtime.tostring() + \
self.renew_till.tostring() + self.srealm.tostring() + \
self.sname.tostring()
def tostring(self):
r = self.krbcredinfo
r = p(r, struct.pack(">H", len(r)))
r = p(r, '\x30\x82')
r = p(r, struct.pack(">H", len(r)))
r = p(r, '\x30\x82')
r = p(r, struct.pack(">H", len(r)))
r = p(r, '\xA0\x82')
r = p(r, struct.pack(">H", len(r)))
r = p(r, '\x30\x82')
r = p(r, struct.pack(">H", len(r)))
r = p(r, '\x7D\x82')
return r
def createkrbcrdinfo(self):
self.krbcredinfo = self.key.tostring() + self.prealm.tostring() + self.pname.tostring() + self.flags.tostring() + \
self.starttime.tostring() + self.endtime.tostring() + \
self.renew_till.tostring() + self.srealm.tostring() + \
self.sname.tostring()
# The encpart serves as a sort of header for the EncKrbCredPart
# encpart {
# 0xA0 0x03 0x02 0x01
# uint8 etype (Seems to always be 0 in my testing)
# 0xA2 LT
# 0x04 LT
# }
class EncPart:
def __init__(self):
self.krbcredinfo = KrbCredInfo()
self.etype = None
def parsefile(self, f):
f.read(4)
self.etype, = struct.unpack(">B", f.read(1))
f.read(8)
self.krbcredinfo.parsefile(f)
def tostring(self):
r = self.krbcredinfo.tostring()
r = p(r, struct.pack(">H", len(r)))
r = p(r, '\x04\x82')
r = p(r, struct.pack(">H", len(r)))
r = p(r, '\xA2\x82')
r = p(r, chr(self.etype))
r = p(r, '\xA0\x03\x02\x01')
return r
# This section represents the tickets section of the overall KrbCred
# tickets {
# 0xA2 0x82
# uint16 ticket_length + 4
# 0x30 0x82
# uint16 ticket_length
# ticket
# 0xA3 LT
# 0x30 LT
# }
class TicketPart:
def __init__(self):
self.ticket = None
self.encpart = EncPart()
def parsefile(self, f):
f.read(6)
ticketlen, = struct.unpack(">H", f.read(2))
self.ticket, = struct.unpack(">%ds" % ticketlen, f.read(ticketlen))
f.read(8)
self.encpart.parsefile(f)
def tostring(self):
r = self.encpart.tostring()
r = p(r, struct.pack(">H", len(r)))
r = p(r, '\x30\x82')
r = p(r, struct.pack(">H", len(r)))
r = p(r, '\xA3\x82')
r = p(r, self.ticket)
r = p(r, struct.pack(">H", len(self.ticket)))
r = p(r, '\x30\x82')
r = p(r, struct.pack(">H", len(self.ticket) + 4))
r = p(r, '\xA2\x82')
return r
# This is the header for the kerberos ticket, and the final section
# header {
# 0x76 LT
# 0x30 LT
# 0xA0 0x03 0x02 0x01
# uint8 pvno (Protocol Version, always 0x05)
# 0xA1 0x03 0x02 0x01
# uint8 msg-type (Always 0x16 for krbcred)
# }
class KrbCredHeader:
def __init__(self):
self.ticketpart = TicketPart()
def parsefile(self, f):
f.read(18)
self.ticketpart.parsefile(f)
def tostring(self):
r = self.ticketpart.tostring()
r = p(r, '\xA1\x03\x02\x01\x16')
r = p(r, '\xA0\x03\x02\x01\x05')
r = p(r, struct.pack(">H", len(r)))
r = p(r, '\x30\x82')
r = p(r, struct.pack(">H", len(r)))
r = p(r, '\x76\x82')
return r
# borrowed from https://stackoverflow.com
def swap32(i):
return struct.unpack("<I", struct.pack(">I", i))[0]
# src/include/krb5/krb5.h
"""
#define TKT_FLG_FORWARDABLE 0x40000000
#define TKT_FLG_FORWARDED 0x20000000
#define TKT_FLG_PROXIABLE 0x10000000
#define TKT_FLG_PROXY 0x08000000
#define TKT_FLG_MAY_POSTDATE 0x04000000
#define TKT_FLG_POSTDATED 0x02000000
#define TKT_FLG_INVALID 0x01000000
#define TKT_FLG_RENEWABLE 0x00800000
#define TKT_FLG_PRE_AUTH 0x00200000
#define TKT_FLG_HW_AUTH 0x00100000
#define TKT_FLG_TRANSIT_POLICY_CHECKED 0x00080000
#define TKT_FLG_OK_AS_DELEGATE 0x00040000
#define TKT_FLG_ENC_PA_REP 0x00010000
#define TKT_FLG_ANONYMOUS 0x00008000
"""
TKT_FLG_INITIAL = 0x00400000
if __name__ == "__main__":
if len(sys.argv) < 2:
print("Usage: {0} <input credential cache file>".format(sys.argv[0]))
print("\nExample: {0} /tmp/krb5cc_1000".format(sys.argv[0]))
sys.exit(0)
with open(sys.argv[1], 'rb') as f:
fileid, = struct.unpack(">B", f.read(1))
if fileid == 0x5: # Credential Cache (ccache)
f.seek(0)
header = CCacheHeader()
primary_principal = Principal()
credential = Credential()
header.parsefile(f)
primary_principal.parsefile(f)
i = 0
sys.stderr.write("WARNING: Not all the hashes generated by this program are crackable. Please select the relevant hashes manually!\n")
time.sleep(2)
# Check if you've reached the end of the file. If not get the next credential
while(f.read(1) != ''):
f.seek(-1, 1)
credential.parsefile(f)
out = []
KrbCred = KrbCredHeader()
KrbCred.ticketpart.ticket = credential.ticket.data # extract hash from here!
try:
# this code is terrible!
etype = str(decoder.decode(credential.ticket.data)[0][3][0])
data = str(decoder.decode(credential.ticket.data)[0][3][2])
if etype != "23":
sys.stderr.write("Unsupported etype %s found. Such hashes can't be cracked it seems.\n" % etype)
continue
except:
continue
# print(credential.ticket.data.encode("hex"))
KrbCred.ticketpart.encpart.etype = credential.keyblock.etype
krbcredinfo = KrbCred.ticketpart.encpart.krbcredinfo
krbcredinfo.key.key = credential.keyblock.key
krbcredinfo.key.keytype = credential.keyblock.keytype
# print(credential.keyblock.keytype)
krbcredinfo.prealm.principal_realm = primary_principal.realm.data
# print(primary_principal.realm.data)
krbcredinfo.pname.principal_components = primary_principal.components
# print(primary_principal.components)
krbcredinfo.pname.principal_name_type = primary_principal.name_type
krbcredinfo.flags.ticket_flags = credential.tktFlags
tktFlags = swap32(credential.tktFlags)
if tktFlags & TKT_FLG_INITIAL:
continue
krbcredinfo.starttime.time = credential.times.starttime
krbcredinfo.endtime.time = credential.times.endtime
krbcredinfo.renew_till.time = credential.times.renew_till
krbcredinfo.srealm.server_realm = credential.server.realm.data
# print(credential.server.realm.data)
krbcredinfo.sname.server_components = credential.server.components
for c in credential.server.components: # dirty hack
if c not in ['krbtgt', 'krb5_ccache_conf_data', 'pa_type']:
out.append(c)
name = b"-".join(out[-2:])
krbcredinfo.sname.server_name_type = credential.server.name_type
krbcredinfo.createkrbcrdinfo()
sys.stdout.write("%s:$krb5tgs$%s$%s$%s\n" % (os.path.basename(name), etype, data[:16].encode("hex"), data[16:].encode("hex")))
"""
# Write seperate files for each ticket found. postfix is just a number for now.
with open(sys.argv[2] + "_" + str(i), 'wb') as o:
o.write(KrbCred.tostring())
i = i + 1
"""
sys.exit(0)
elif fileid == 0x76: # untested code, don't use!
f.seek(0)
KrbCred = KrbCredHeader()
KrbCred.parsefile(f)
header = CCacheHeader()
primary_principal = Principal()
credential = Credential()
header.version = 0x504
header.header_length = 0xC
header.header.deltatime.time_offset = 4294967295
header.header.deltatime.usec_offset = 0
header.header.tag = 0x01
header.header.taglen = 0x08
KrbCredInfo_ = KrbCred.ticketpart.encpart.krbcredinfo
primary_principal.name_type = KrbCredInfo_.pname.principal_name_type
primary_principal.components = KrbCredInfo_.pname.principal_components
primary_principal.num_components = len(primary_principal.components)
primary_principal.realm.data = KrbCredInfo.prealm.principal_realm
primary_principal.realm.length = len(primary_principal.realm.data)
credential.client.name_type = KrbCredInfo.pname.principal_name_type
credential.client.components = KrbCredInfo.pname.principal_components
credential.client.num_components = len(credential.client.components)
credential.client.realm.data = KrbCredInfo.prealm.principal_realm
credential.client.realm.length = len(credential.client.realm.data)
credential.server.name_type = KrbCredInfo.sname.server_name_type
credential.server.components = KrbCredInfo.sname.server_components
credential.server.num_components = len(credential.server.components)
credential.server.realm.data = KrbCredInfo.srealm.server_realm
credential.server.realm.length = len(credential.server.realm.data)
credential.keyblock.etype = KrbCred.ticketpart.encpart.etype
credential.keyblock.key = KrbCredInfo.key.key
credential.keyblock.keylen = len(credential.keyblock.key)
credential.keyblock.keytype = KrbCredInfo.key.keytype
credential.times.authtime = KrbCredInfo.starttime.time
credential.times.starttime = KrbCredInfo.starttime.time
credential.times.endtime = KrbCredInfo.endtime.time
credential.times.renew_till = KrbCredInfo.renew_till.time
credential.is_skey = 0
credential.tktFlags = KrbCredInfo.flags.ticket_flags
credential.num_address = 0
credential.address = []
credential.num_authdata = 0
credential.authdata = []
credential.ticket.data = KrbCred.ticketpart.ticket
credential.ticket.length = len(credential.ticket.data)
credential.secondticket.length = 0
credential.secondticket.data = ''
with open(sys.argv[2], 'wb') as o:
o.write(header.tostring())
o.write(primary_principal.tostring())
o.write(credential.tostring())
sys.exit(0)
else:
print('Unknown File Type!')
sys.exit(0)
|
dual_encoder/keras_layers_test.py | garyxcheng/federated | 330 | 12779623 | # Copyright 2021, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl.testing import absltest
import tensorflow as tf
from dual_encoder import keras_layers
l2_normalize_fn = lambda x: tf.keras.backend.l2_normalize(x, axis=-1)
class KerasLayersTest(absltest.TestCase):
def test_masked_average_3d(self):
masked_average_layer = keras_layers.MaskedAverage(1)
inputs = tf.constant([
[[0.5, 0.3], [0.4, 0.1], [0.4, 0.1]],
[[0.6, 0.8], [0.5, 0.4], [0.4, 0.1]],
[[0.9, 0.4], [0.4, 0.1], [0.4, 0.1]],
[[0.9, 0.4], [0.4, 0.1], [0.4, 0.1]],
])
mask = tf.constant([[True, True, True],
[False, False, True],
[True, False, False],
[False, False, False]])
output_average = masked_average_layer.call(inputs, mask=mask)
output_mask = masked_average_layer.compute_mask(inputs, mask=mask)
expected_average = tf.constant([
[1.3 / 3, 0.5 / 3],
[0.4, 0.1],
[0.9, 0.4],
[0.0, 0.0]
])
expected_mask = None
tf.debugging.assert_near(expected_average, output_average)
self.assertEqual(expected_mask, output_mask)
def test_masked_average_4d(self):
masked_average_layer = keras_layers.MaskedAverage(2)
inputs = tf.constant([
[[[0.5, 0.3], [0.4, 0.1], [0.4, 0.1]],
[[0.6, 0.8], [0.5, 0.4], [0.4, 0.1]]],
[[[0.6, 0.8], [0.5, 0.4], [0.4, 0.1]],
[[0.6, 0.8], [0.5, 0.4], [0.4, 0.1]]],
[[[0.9, 0.4], [0.4, 0.1], [0.4, 0.1]],
[[0.6, 0.8], [0.5, 0.4], [0.4, 0.1]]],
[[[0.9, 0.4], [0.4, 0.1], [0.4, 0.1]],
[[0.6, 0.8], [0.5, 0.4], [0.4, 0.1]]],
])
mask = tf.constant([[[True, True, True], [True, False, True]],
[[False, False, True], [False, False, False]],
[[True, False, False], [True, True, True]],
[[False, False, False], [True, False, False]]])
output_average = masked_average_layer.call(inputs, mask=mask)
output_mask = masked_average_layer.compute_mask(inputs, mask=mask)
expected_average = tf.constant([
[[1.3 / 3, 0.5 / 3], [0.5, 0.45]],
[[0.4, 0.1], [0.0, 0.0]],
[[0.9, 0.4], [0.5, 1.3 / 3]],
[[0.0, 0.0], [0.6, 0.8]],
])
expected_mask = tf.constant([[True, True],
[True, False],
[True, True],
[False, True]])
tf.debugging.assert_near(expected_average, output_average)
tf.debugging.assert_equal(expected_mask, output_mask)
def test_masked_average_raises_error(self):
masked_average_layer = keras_layers.MaskedAverage(1)
inputs = tf.constant([
[[0.5, 0.3], [0.4, 0.1], [0.4, 0.1]],
[[0.6, 0.8], [0.5, 0.4], [0.4, 0.1]],
[[0.9, 0.4], [0.4, 0.1], [0.4, 0.1]],
])
mask = None
with self.assertRaises(ValueError):
masked_average_layer.call(inputs, mask=mask)
with self.assertRaises(ValueError):
masked_average_layer.compute_mask(inputs, mask=mask)
def test_masked_reshape(self):
masked_reshape_layer = keras_layers.MaskedReshape((4, 4, 2, 1), (4, 4, 2))
inputs = tf.constant([
[[1.0], [2.0], [0.5], [0.4], [0.4], [0.1], [0.0], [0.0]],
[[0.4], [0.1], [0.0], [0.0], [0.0], [0.0], [0.6], [0.8]],
[[0.9], [0.4], [0.5], [3.0], [0.9], [0.4], [0.5], [3.0]],
[[0.0], [0.0], [0.6], [0.8], [0.4], [0.1], [0.0], [0.0]],
])
mask = tf.constant(
[[True, False, True, True, True, False, False, False],
[True, False, True, True, True, True, False, True],
[False, True, True, False, True, True, True, True],
[False, True, True, True, True, False, False, True]])
output = masked_reshape_layer.call(inputs, mask=mask)
output_mask = masked_reshape_layer.compute_mask(inputs, mask=mask)
expected_output = tf.constant([
[[[1.0], [2.0]], [[0.5], [0.4]], [[0.4], [0.1]], [[0.0], [0.0]]],
[[[0.4], [0.1]], [[0.0], [0.0]], [[0.0], [0.0]], [[0.6], [0.8]]],
[[[0.9], [0.4]], [[0.5], [3.0]], [[0.9], [0.4]], [[0.5], [3.0]]],
[[[0.0], [0.0]], [[0.6], [0.8]], [[0.4], [0.1]], [[0.0], [0.0]]],
])
expected_mask = tf.constant(
[[[True, False], [True, True], [True, False], [False, False]],
[[True, False], [True, True], [True, True], [False, True]],
[[False, True], [True, False], [True, True], [True, True]],
[[False, True], [True, True], [True, False], [False, True]]])
tf.debugging.assert_near(expected_output, output)
tf.debugging.assert_equal(expected_mask, output_mask)
def test_masked_reshape_unknown_batch_size(self):
masked_reshape_layer = keras_layers.MaskedReshape((-1, 4, 2, 1), (-1, 4, 2))
inputs = tf.constant([
[[1.0], [2.0], [0.5], [0.4], [0.4], [0.1], [0.0], [0.0]],
[[0.4], [0.1], [0.0], [0.0], [0.0], [0.0], [0.6], [0.8]],
[[0.9], [0.4], [0.5], [3.0], [0.9], [0.4], [0.5], [3.0]],
[[0.0], [0.0], [0.6], [0.8], [0.4], [0.1], [0.0], [0.0]],
])
mask = tf.constant(
[[True, False, True, True, True, False, False, False],
[True, False, True, True, True, True, False, True],
[False, True, True, False, True, True, True, True],
[False, True, True, True, True, False, False, True]])
output = masked_reshape_layer.call(inputs, mask=mask)
output_mask = masked_reshape_layer.compute_mask(inputs, mask=mask)
expected_output = tf.constant([
[[[1.0], [2.0]], [[0.5], [0.4]], [[0.4], [0.1]], [[0.0], [0.0]]],
[[[0.4], [0.1]], [[0.0], [0.0]], [[0.0], [0.0]], [[0.6], [0.8]]],
[[[0.9], [0.4]], [[0.5], [3.0]], [[0.9], [0.4]], [[0.5], [3.0]]],
[[[0.0], [0.0]], [[0.6], [0.8]], [[0.4], [0.1]], [[0.0], [0.0]]],
])
expected_mask = tf.constant(
[[[True, False], [True, True], [True, False], [False, False]],
[[True, False], [True, True], [True, True], [False, True]],
[[False, True], [True, False], [True, True], [True, True]],
[[False, True], [True, True], [True, False], [False, True]]])
tf.debugging.assert_near(expected_output, output)
tf.debugging.assert_equal(expected_mask, output_mask)
def test_masked_reshape_raises_error(self):
masked_reshape_layer = keras_layers.MaskedReshape((-1, 4, 2, 1), (-1, 4, 2))
inputs = tf.constant([
[[1.0], [2.0], [0.5], [0.4], [0.4], [0.1], [0.0], [0.0]],
[[0.4], [0.1], [0.0], [0.0], [0.0], [0.0], [0.6], [0.8]],
[[0.9], [0.4], [0.5], [3.0], [0.9], [0.4], [0.5], [3.0]],
[[0.0], [0.0], [0.6], [0.8], [0.4], [0.1], [0.0], [0.0]],
])
mask = None
with self.assertRaises(ValueError):
masked_reshape_layer.call(inputs, mask=mask)
with self.assertRaises(ValueError):
masked_reshape_layer.compute_mask(inputs, mask=mask)
def test_embedding_spreadout_regularizer_dot_product(self):
weights = tf.constant(
[[1.0, 0.0, 0.0],
[2.0, 2.0, 2.0],
[0.1, 0.2, 0.3],
[0.3, 0.2, 0.1],
[0.0, 1.0, 0.0]])
regularizer = keras_layers.EmbeddingSpreadoutRegularizer(
spreadout_lambda=0.1,
normalization_fn=None,
l2_regularization=0.0)
# Similarities without diagonal looks like:
# 0.0 2.0 0.1 0.3 0.0
# 2.0 0.0 1.2 1.2 2.0
# 0.1 1.2 0.0 0.1 0.2
# 0.3 1.2 0.1 0.0 0.2
# 0.0 2.0 0.2 0.2 0.0
loss = regularizer(weights)
# L2 norm of above similarities.
expected_loss = 0.47053161424
tf.debugging.assert_near(expected_loss, loss)
regularizer = keras_layers.EmbeddingSpreadoutRegularizer(
spreadout_lambda=0.1,
normalization_fn=None,
l2_regularization=1.0)
l2_regularizer = tf.keras.regularizers.l2(1.0)
loss = regularizer(weights)
expected_loss = 0.47053161424 + l2_regularizer(weights)
tf.debugging.assert_near(expected_loss, loss)
def test_embedding_spreadout_regularizer_cosine_similarity(self):
weights = tf.constant(
[[1.0, 0.0, 0.0],
[2.0, 2.0, 2.0],
[0.1, 0.2, 0.3],
[0.3, 0.2, 0.1],
[0.0, 1.0, 0.0]])
regularizer = keras_layers.EmbeddingSpreadoutRegularizer(
spreadout_lambda=0.1,
normalization_fn=l2_normalize_fn,
l2_regularization=0.0)
loss = regularizer(weights)
# L2 norm of above similarities.
expected_loss = 0.2890284
tf.debugging.assert_near(expected_loss, loss)
regularizer = keras_layers.EmbeddingSpreadoutRegularizer(
spreadout_lambda=0.1,
normalization_fn=l2_normalize_fn,
l2_regularization=1.0)
l2_regularizer = tf.keras.regularizers.l2(1.0)
loss = regularizer(weights)
expected_loss = 0.2890284 + l2_regularizer(weights)
tf.debugging.assert_near(expected_loss, loss)
def test_embedding_spreadout_regularizer_no_spreadout(self):
weights = tf.constant(
[[1.0, 0.0, 0.0],
[2.0, 2.0, 2.0],
[0.1, 0.2, 0.3],
[0.3, 0.2, 0.1],
[0.0, 1.0, 0.0]])
regularizer = keras_layers.EmbeddingSpreadoutRegularizer(
spreadout_lambda=0.0,
normalization_fn=None,
l2_regularization=0.0)
loss = regularizer(weights)
expected_loss = 0.0
tf.debugging.assert_near(expected_loss, loss)
# Test that L2 normalization behaves normally.
regularizer = keras_layers.EmbeddingSpreadoutRegularizer(
spreadout_lambda=0.0,
normalization_fn=None,
l2_regularization=0.1)
l2_regularizer = tf.keras.regularizers.l2(0.1)
loss = regularizer(weights)
l2_loss = l2_regularizer(weights)
tf.debugging.assert_near(l2_loss, loss)
# Test that normalization_fn has no effect.
regularizer = keras_layers.EmbeddingSpreadoutRegularizer(
spreadout_lambda=0.0,
normalization_fn=l2_normalize_fn,
l2_regularization=0.1)
l2_regularizer = tf.keras.regularizers.l2(0.1)
loss = regularizer(weights)
l2_loss = l2_regularizer(weights)
tf.debugging.assert_near(l2_loss, loss)
def test_embedding_spreadout_regularizer_get_config(self):
weights = tf.constant(
[[1.0, 0.0, 0.0],
[2.0, 2.0, 2.0],
[0.1, 0.2, 0.3],
[0.3, 0.2, 0.1],
[0.0, 1.0, 0.0]])
regularizer = keras_layers.EmbeddingSpreadoutRegularizer(
spreadout_lambda=0.0,
normalization_fn=l2_normalize_fn,
l2_regularization=0.1)
config = regularizer.get_config()
expected_config = {
'spreadout_lambda': 0.0,
'normalization_fn': l2_normalize_fn,
'l2_regularization': 0.1
}
new_regularizer = (
keras_layers.EmbeddingSpreadoutRegularizer.from_config(config))
l2_regularizer = tf.keras.regularizers.l2(0.1)
loss = new_regularizer(weights)
l2_loss = l2_regularizer(weights)
self.assertEqual(config, expected_config)
tf.debugging.assert_near(l2_loss, loss)
if __name__ == '__main__':
absltest.main()
|
pcbmode/utils/footprint.py | Hylian/pcbmode | 370 | 12779628 | #!/usr/bin/python
import os
import re
import json
from lxml import etree as et
import pcbmode.config as config
from . import messages as msg
# pcbmode modules
from . import svg
from . import utils
from . import place
import copy
from .style import Style
from .point import Point
from .shape import Shape
class Footprint():
"""
"""
def __init__(self, footprint):
self._footprint = footprint
self._shapes = {'conductor': {},
'pours': {},
'soldermask': {},
'silkscreen': {},
'assembly': {},
'solderpaste': {},
'drills': {}}
self._processPins()
self._processPours()
self._processShapes()
self._processAssemblyShapes()
def getShapes(self):
return self._shapes
def _processPins(self):
"""
Converts pins into 'shapes'
"""
pins = self._footprint.get('pins') or {}
for pin in pins:
pin_location = pins[pin]['layout']['location'] or [0, 0]
try:
pad_name = pins[pin]['layout']['pad']
except:
msg.error("Each defined 'pin' must have a 'pad' name that is defined in the 'pads' dection of the footprint.")
try:
pad_dict = self._footprint['pads'][pad_name]
except:
msg.error("There doesn't seem to be a pad definition for pad '%s'." % pad_name)
# Get the pin's rotation, if any
pin_rotate = pins[pin]['layout'].get('rotate') or 0
shapes = pad_dict.get('shapes') or []
for shape_dict in shapes:
shape_dict = shape_dict.copy()
# Which layer(s) to place the shape on
layers = utils.getExtendedLayerList(shape_dict.get('layers') or ['top'])
# Add the pin's location to the pad's location
shape_location = shape_dict.get('location') or [0, 0]
shape_dict['location'] = [shape_location[0] + pin_location[0],
shape_location[1] + pin_location[1]]
# Add the pin's rotation to the pad's rotation
shape_dict['rotate'] = (shape_dict.get('rotate') or 0) + pin_rotate
# Determine if and which label to show
show_name = pins[pin]['layout'].get('show-label') or True
if show_name == True:
pin_label = pins[pin]['layout'].get('label') or pin
for layer in layers:
shape = Shape(shape_dict)
style = Style(shape_dict, 'conductor')
shape.setStyle(style)
try:
self._shapes['conductor'][layer].append(shape)
except:
self._shapes['conductor'][layer] = []
self._shapes['conductor'][layer].append(shape)
for stype in ['soldermask','solderpaste']:
# Get a custom shape specification if it exists
sdict_list = shape_dict.get(stype)
# Not defined; default
if sdict_list == None:
# Use default settings for shape based on
# the pad shape
sdict = shape_dict.copy()
# Which shape type is the pad?
shape_type = shape.getType()
# Apply modifier based on shape type
if shape_type == 'path':
sdict['scale'] = shape.getScale()*config.brd['distances'][stype]['path-scale']
elif shape_type in ['rect', 'rectangle']:
sdict['width'] += config.brd['distances'][stype]['rect-buffer']
sdict['height'] += config.brd['distances'][stype]['rect-buffer']
elif shape_type in ['circ', 'circle']:
sdict['diameter'] += config.brd['distances'][stype]['circle-buffer']
else:
pass
# Create shape based on new dictionary
sshape = Shape(sdict)
# Define style
sstyle = Style(sdict, stype)
# Apply style
sshape.setStyle(sstyle)
# Add shape to footprint's shape dictionary
#self._shapes[stype][layer].append(sshape)
try:
self._shapes[stype][layer].append(sshape)
except:
self._shapes[stype][layer] = []
self._shapes[stype][layer].append(sshape)
# Do not place shape
elif (sdict_list == {}) or (sdict_list == []):
pass
# Custom shape definition
else:
# If dict (as before support of multiple
# shapes) then append to a single element
# list
if type(sdict_list) is dict:
sdict_list = [sdict_list]
# Process list of shapes
for sdict_ in sdict_list:
sdict = sdict_.copy()
shape_loc = utils.toPoint(sdict.get('location') or [0, 0])
# Apply rotation
sdict['rotate'] = (sdict.get('rotate') or 0) + pin_rotate
# Rotate location
shape_loc.rotate(pin_rotate, Point())
sdict['location'] = [shape_loc.x + pin_location[0],
shape_loc.y + pin_location[1]]
# Create new shape
sshape = Shape(sdict)
# Create new style
sstyle = Style(sdict, stype)
# Apply style
sshape.setStyle(sstyle)
# Add shape to footprint's shape dictionary
#self._shapes[stype][layer].append(sshape)
try:
self._shapes[stype][layer].append(sshape)
except:
self._shapes[stype][layer] = []
self._shapes[stype][layer].append(sshape)
# Add pin label
if (pin_label != None):
shape.setLabel(pin_label)
drills = pad_dict.get('drills') or []
for drill_dict in drills:
drill_dict = drill_dict.copy()
drill_dict['type'] = drill_dict.get('type') or 'drill'
drill_location = drill_dict.get('location') or [0, 0]
drill_dict['location'] = [drill_location[0] + pin_location[0],
drill_location[1] + pin_location[1]]
shape = Shape(drill_dict)
style = Style(drill_dict, 'drills')
shape.setStyle(style)
try:
self._shapes['drills']['top'].append(shape)
except:
self._shapes['drills']['top'] = []
self._shapes['drills']['top'].append(shape)
def _processPours(self):
"""
"""
try:
shapes = self._footprint['layout']['pours']['shapes']
except:
return
for shape_dict in shapes:
layers = utils.getExtendedLayerList(shape_dict.get('layers') or ['top'])
for layer in layers:
shape = Shape(shape_dict)
style = Style(shape_dict, 'conductor', 'pours')
shape.setStyle(style)
try:
self._shapes['pours'][layer].append(shape)
except:
self._shapes['pours'][layer] = []
self._shapes['pours'][layer].append(shape)
def _processShapes(self):
"""
"""
sheets = ['conductor', 'silkscreen', 'soldermask']
for sheet in sheets:
try:
shapes = self._footprint['layout'][sheet]['shapes']
except:
shapes = []
for shape_dict in shapes:
layers = utils.getExtendedLayerList(shape_dict.get('layers') or ['top'])
for layer in layers:
# Mirror the shape if it's text and on bottom later,
# but let explicit shape setting override
if layer == 'bottom':
if shape_dict['type'] == 'text':
shape_dict['mirror'] = shape_dict.get('mirror') or 'True'
shape = Shape(shape_dict)
style = Style(shape_dict, sheet)
shape.setStyle(style)
try:
self._shapes[sheet][layer].append(shape)
except:
self._shapes[sheet][layer] = []
self._shapes[sheet][layer].append(shape)
def _processAssemblyShapes(self):
"""
"""
try:
shapes = self._footprint['layout']['assembly']['shapes']
except:
return
for shape_dict in shapes:
layers = utils.getExtendedLayerList(shape_dict.get('layer') or ['top'])
for layer in layers:
shape = Shape(shape_dict)
style = Style(shape_dict, 'assembly')
shape.setStyle(style)
try:
self._shapes['assembly'][layer].append(shape)
except:
self._shapes['assembly'][layer] = []
self._shapes['assembly'][layer].append(shape)
|
tests/test_examples.py | banesullivan/localtileserver | 105 | 12779642 | from localtileserver import examples
def test_get_blue_marble():
client = examples.get_blue_marble()
assert client.metadata()
def test_get_virtual_earth():
client = examples.get_virtual_earth()
assert client.metadata()
def test_get_arcgis():
client = examples.get_arcgis()
assert client.metadata()
def test_get_elevation():
client = examples.get_elevation()
assert client.metadata()
def test_get_bahamas():
client = examples.get_bahamas()
assert client.metadata()
def test_get_pine_gulch():
client = examples.get_pine_gulch()
assert client.metadata()
def test_get_landsat():
client = examples.get_landsat()
assert client.metadata()
def test_get_san_francisco():
client = examples.get_san_francisco()
assert client.metadata()
def test_get_oam2():
client = examples.get_oam2()
assert client.metadata()
def test_get_elevation_us():
client = examples.get_elevation_us()
assert client.metadata()
|
tools/clang/scripts/generate_compdb.py | zipated/src | 2,151 | 12779656 | #!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Helper for generating compile DBs for clang tooling. On non-Windows platforms,
this is pretty straightforward. On Windows, the tool does a bit of extra work to
integrate the content of response files, force clang tooling to run in clang-cl
mode, etc.
"""
import argparse
import json
import os
import sys
script_dir = os.path.dirname(os.path.realpath(__file__))
tool_dir = os.path.abspath(os.path.join(script_dir, '../pylib'))
sys.path.insert(0, tool_dir)
from clang import compile_db
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument(
'-p',
required=True,
help='Path to build directory')
args = parser.parse_args()
print json.dumps(compile_db.GenerateWithNinja(args.p))
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
utility_scripts/configureCMK.py | jjk-dev/aws-qnabot | 197 | 12779704 | <reponame>jjk-dev/aws-qnabot
import boto3
from botocore.config import Config
import argparse
import json
import base64
import sys
parser = argparse.ArgumentParser(description='Uses a specified CMK to encrypt QnABot Lambdas and Parameter Store settings')
parser.add_argument("region", help="AWS Region")
parser.add_argument("stack_arn", help="the arn of the QnABot CloudFormation Stack")
parser.add_argument("cmk_arn", help="the ARN of the Customer Master Key to use for encryption")
parser.add_argument("target_s3_bucket", help="the Name of the S3 bucket to use for server access logs")
args = type('', (), {})()
args = parser.parse_args()
client_config = Config(
region_name = args.region
)
lambda_client = boto3.client('lambda', config=client_config)
iam_client = boto3.client('iam', config=client_config)
role_paginator = iam_client.get_paginator('list_role_policies')
kms_client = boto3.client("kms", config=client_config)
cloudformation_client = boto3.client('cloudformation', config=client_config)
ssm_client = boto3.client('ssm', config=client_config)
s3_client = boto3.client('s3', config=client_config)
ddb_client = boto3.client('dynamodb', config=client_config)
sts_client = boto3.client('sts', config=client_config)
kinesis_client = boto3.client('firehose', config=client_config)
policy_name = "CMKPolicy4"
policy_document = {
"Version":"2012-10-17",
"Statement":[
{
"Effect":"Allow",
"Action":[
"kms:Decrypt",
"kms:Encrypt",
"kms:GenerateDataKey"
],
"Resource":args.cmk_arn
}
]
}
cmk_roles_logical_ids = [
'S3AccessRole',
'FirehoseESS3Role',
'AdminRole',
'ExportRole',
'ImportRole',
'ApiGatewayRole',
'ESCognitoRole',
'KibanaRole',
]
cmk_roles_physical_ids = []
def assign_role(role_name):
role_iterator = role_paginator.paginate(
RoleName=role_name,
PaginationConfig={
'MaxItems': 1000,
'PageSize': 1000
}
)
print(f"Updating role {role_name}...")
cmk_policy_exists = False
for role in role_iterator:
if policy_name in role["PolicyNames"]:
cmk_policy_exists = True
break
if not cmk_policy_exists:
iam_client.put_role_policy(RoleName=role_name, PolicyName = policy_name,PolicyDocument=json.dumps(policy_document))
def put_key_policy (stackname,roles):
response = kms_client.get_key_policy(KeyId = args.cmk_arn, PolicyName='default')
policy = response['Policy'].replace("\n","")
policy = json.loads(policy)
caller_identity = sts_client.get_caller_identity()
new_statement = []
for statement in policy["Statement"]:
if(statement["Sid"] != stackname):
new_statement.append(statement)
policy["Statement"] = new_statement
formatted_roles = []
for role in roles:
formatted_roles.append(f"arn:aws:iam::{caller_identity['Account']}:role/{role}")
policy["Statement"].append(
{
"Sid": stackname,
"Effect": "Allow",
"Principal": {
"AWS": formatted_roles
},
"Action": [
"kms:Encrypt",
"kms:Decrypt",
"kms:GenerateDataKey"
],
"Resource": "*"
}
)
print(f"Updating policy for key {args.cmk_arn}")
kms_client.put_key_policy(
KeyId = args.cmk_arn,
PolicyName = "default",
Policy = json.dumps(policy)
)
print(f"Policy for key {args.cmk_arn} updated.")
def process_stacks(stackname):
paginator = cloudformation_client.get_paginator('list_stack_resources')
response_iterator = paginator.paginate(
StackName=stackname,
PaginationConfig={
'MaxItems': 10000#,
}
)
for response in response_iterator:
lambda_resources = filter(lambda x: x["ResourceType"] == "AWS::Lambda::Function",response["StackResourceSummaries"])
for lambda_func in lambda_resources:
lambda_client.update_function_configuration(FunctionName=lambda_func["PhysicalResourceId"],KMSKeyArn=args.cmk_arn)
print(f"Updated function {lambda_func['PhysicalResourceId']} in stack {stackname}")
lambda_configuration = lambda_client.get_function_configuration(FunctionName=lambda_func["PhysicalResourceId"])
role_name = lambda_configuration["Role"].split("/")[-1]
assign_role(role_name)
ssm_parameters = filter(lambda x: x["ResourceType"] == "AWS::SSM::Parameter",response["StackResourceSummaries"])
for parameter in ssm_parameters:
parameter_name = parameter["PhysicalResourceId"]
parameter_response = ssm_client.get_parameter(
Name=parameter_name,
WithDecryption=True
)
parameter_value = parameter_response['Parameter']['Value']
description = parameter_response['Parameter']["Description"] if "Decription" in parameter_response['Parameter'] else ""
ssm_client.put_parameter(
Name=parameter_name,
Description=description,
Value=parameter_value,
Type='SecureString',
KeyId=args.cmk_arn,
Overwrite=True,
)
s3_buckets = filter(lambda x: x["ResourceType"] == "AWS::S3::Bucket",response["StackResourceSummaries"])
for bucket in s3_buckets:
s3_client.put_bucket_encryption(
Bucket=bucket["PhysicalResourceId"],
ServerSideEncryptionConfiguration={
'Rules': [
{
'ApplyServerSideEncryptionByDefault': {
'SSEAlgorithm': 'aws:kms',
'KMSMasterKeyID': args.cmk_arn
}
},
]
}
)
print(f"Encryption set for {bucket['PhysicalResourceId']}")
s3_client.put_bucket_logging(
Bucket=bucket["PhysicalResourceId"],
BucketLoggingStatus={
'LoggingEnabled': {
'TargetBucket': args.target_s3_bucket,
'TargetPrefix': bucket["PhysicalResourceId"] + '/'
}
}
)
print(f"Access Logs set for {bucket['PhysicalResourceId']}")
ddb_tables = filter(lambda x: x["ResourceType"] == "AWS::DynamoDB::Table",response["StackResourceSummaries"])
for table in ddb_tables:
table_description = ddb_client.describe_table(TableName = table["PhysicalResourceId"])
if('SSEDescription' not in table_description["Table"] or 'KMSMasterKeyArn' not in table_description["Table"]['SSEDescription'] or table_description["Table"]['SSEDescription']['KMSMasterKeyArn']!= args.cmk_arn ):
ddb_client.update_table(
TableName = table["PhysicalResourceId"],
SSESpecification ={
'Enabled': True,
'SSEType': 'KMS',
'KMSMasterKeyId': args.cmk_arn
}
)
kinesis_streams = filter(lambda x: x["ResourceType"] == "AWS::KinesisFirehose::DeliveryStream",response["StackResourceSummaries"])
for stream in kinesis_streams:
stream_response = kinesis_client.describe_delivery_stream(
DeliveryStreamName=stream["PhysicalResourceId"])
if('KeyType' not in stream_response['DeliveryStreamDescription']['DeliveryStreamEncryptionConfiguration']
or ( stream_response['DeliveryStreamDescription']['DeliveryStreamEncryptionConfiguration']['KeyType'] != "CUSTOMER_MANAGED_CMK"
and stream_response['DeliveryStreamDescription']['DeliveryStreamEncryptionConfiguration']['KeyARN'] != args.cmk_arn)):
kinesis_client.start_delivery_stream_encryption(
DeliveryStreamName=stream["PhysicalResourceId"],
DeliveryStreamEncryptionConfigurationInput={
'KeyARN': args.cmk_arn,
'KeyType': 'CUSTOMER_MANAGED_CMK'})
role_resources = filter(lambda x: 'LambdaRole' in x["LogicalResourceId"] or x["LogicalResourceId"] in cmk_roles_logical_ids , response["StackResourceSummaries"])
for role_resource in role_resources:
print(f"role_resource: {role_resource['PhysicalResourceId']}")
cmk_roles_physical_ids.append(role_resource["PhysicalResourceId"])
assign_role(role_resource["PhysicalResourceId"])
process_stacks(args.stack_arn)
paginator = cloudformation_client.get_paginator('list_stack_resources')
response_iterator = paginator.paginate(
StackName=args.stack_arn,
PaginationConfig={
'MaxItems': 10000,
}
)
for response in response_iterator:
stacks = filter(lambda x: x["ResourceType"] == "AWS::CloudFormation::Stack",response["StackResourceSummaries"])
for stack in stacks:
print(f"Processing stack {stack['PhysicalResourceId']}")
process_stacks(stack["PhysicalResourceId"])
put_key_policy(args.stack_arn,cmk_roles_physical_ids)
|
baiduocr.py | wangtonghe/hq-answer-assist | 119 | 12779725 | # coding=utf-8
from aip import AipOcr
import re
opt_aux_word = ['《', '》']
def get_file_content(file):
with open(file, 'rb') as fp:
return fp.read()
def image_to_str(name, client):
image = get_file_content(name)
text_result = client.basicGeneral(image)
print(text_result)
result = get_question_and_options(text_result)
return result
def init_baidu_ocr(baidu_ocr_config):
app_id, api_key, secret_key = baidu_ocr_config
client = AipOcr(app_id, api_key, secret_key)
return client
# {'words_result': [{'words': '11.代表作之一是《蒙娜丽莎的眼'},
# {'words': '泪》的歌手是?'}, {'words': '林志颖'},
# {'words': '林志炫'}, {'words': '林志玲'}],
# 'log_id': 916087026228727188, 'words_result_num': 5}
def get_question_and_options(text):
if 'error_code' in text:
print('请确保百度OCR配置正确')
exit(-1)
if text['words_result_num'] == 0:
return '', []
result_arr = text['words_result']
option_arr = []
question_str = ''
question_obj, options_obj = get_question(result_arr)
for question in question_obj:
word = question['words']
word = re.sub('^\d+\.*', '', word)
question_str += word
for option in options_obj:
word = option['words']
if word.startswith('《'):
word = word[1:]
if word.endswith('》'):
word = word[:-1]
print(word)
option_arr.append(word)
print(question_str)
print(option_arr)
return question_str, option_arr
# 先按'?'分割问题和答案,若无问号,用索引分割
def get_question(result_arr):
result_num = len(result_arr)
index = -1
question_obj, options_obj = [], []
for i, result in enumerate(result_arr):
if '?' in result['words']:
index = i
break
if index > -1:
question_obj = result_arr[:index + 1]
options_obj = result_arr[index + 1:]
return question_obj, options_obj
else:
# 按照经验,4个结果为1行问题,5、6个为2行问题,8个以上为公布答案
if result_num <= 4:
question_obj = result_arr[:1]
options_obj = result_arr[1:]
elif result_num == 5:
question_obj = result_arr[:2]
options_obj = result_arr[2:]
elif result_num == 6: # 暂时
question_obj = result_arr[:2]
options_obj = result_arr[2:]
elif result_num == 7 or result_num == 8:
question_obj = result_arr[:3]
options_obj = result_arr[3:]
return question_obj, options_obj
|
src/gat.py | simoneazeglio/DeepInf | 258 | 12779735 | <reponame>simoneazeglio/DeepInf
#!/usr/bin/env python
# encoding: utf-8
# File Name: gat.py
# Author: <NAME>
# Create Time: 2017/12/18 21:40
# TODO:
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
from gat_layers import BatchMultiHeadGraphAttention
class BatchGAT(nn.Module):
def __init__(self, pretrained_emb, vertex_feature, use_vertex_feature,
n_units=[1433, 8, 7], n_heads=[8, 1],
dropout=0.1, attn_dropout=0.0, fine_tune=False,
instance_normalization=False):
super(BatchGAT, self).__init__()
self.n_layer = len(n_units) - 1
self.dropout = dropout
self.inst_norm = instance_normalization
if self.inst_norm:
self.norm = nn.InstanceNorm1d(pretrained_emb.size(1), momentum=0.0, affine=True)
# https://discuss.pytorch.org/t/can-we-use-pre-trained-word-embeddings-for-weight-initialization-in-nn-embedding/1222/2
self.embedding = nn.Embedding(pretrained_emb.size(0), pretrained_emb.size(1))
self.embedding.weight = nn.Parameter(pretrained_emb)
self.embedding.weight.requires_grad = fine_tune
n_units[0] += pretrained_emb.size(1)
self.use_vertex_feature = use_vertex_feature
if self.use_vertex_feature:
self.vertex_feature = nn.Embedding(vertex_feature.size(0), vertex_feature.size(1))
self.vertex_feature.weight = nn.Parameter(vertex_feature)
self.vertex_feature.weight.requires_grad = False
n_units[0] += vertex_feature.size(1)
self.layer_stack = nn.ModuleList()
for i in range(self.n_layer):
# consider multi head from last layer
f_in = n_units[i] * n_heads[i - 1] if i else n_units[i]
self.layer_stack.append(
BatchMultiHeadGraphAttention(n_heads[i], f_in=f_in,
f_out=n_units[i + 1], attn_dropout=attn_dropout)
)
def forward(self, x, vertices, adj):
emb = self.embedding(vertices)
if self.inst_norm:
emb = self.norm(emb.transpose(1, 2)).transpose(1, 2)
x = torch.cat((x, emb), dim=2)
if self.use_vertex_feature:
vfeature = self.vertex_feature(vertices)
x = torch.cat((x, vfeature), dim=2)
bs, n = adj.size()[:2]
for i, gat_layer in enumerate(self.layer_stack):
x = gat_layer(x, adj) # bs x n_head x n x f_out
if i + 1 == self.n_layer:
x = x.mean(dim=1)
else:
x = F.elu(x.transpose(1, 2).contiguous().view(bs, n, -1))
x = F.dropout(x, self.dropout, training=self.training)
return F.log_softmax(x, dim=-1)
|
models/pose/loss/pose_modules.py | raviv/torchcv | 308 | 12779791 | <reponame>raviv/torchcv
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author: <NAME>(<EMAIL>)
# Loss function for Pose Estimation.
import torch
import torch.nn as nn
from torch.autograd import Variable
class OPMseLoss(nn.Module):
def __init__(self, configer):
super(OPMseLoss, self).__init__()
self.configer = configer
reduction = 'elementwise_mean'
if self.configer.exists('loss', 'params') and 'mse_reduction' in self.configer.get('loss', 'params'):
reduction = self.configer.get('loss', 'params')['mse_reduction']
self.mse_loss = nn.MSELoss(reduction=reduction)
def forward(self, inputs, *targets, mask=None, weights=None):
loss = 0.0
if isinstance(inputs, list):
if weights is not None:
for i in range(len(inputs)):
if mask is not None:
loss += weights[i] * self.mse_loss(inputs[i]*mask, targets)
else:
loss += weights[i] * self.mse_loss(inputs[i], targets)
else:
for i in range(len(inputs)):
if mask is not None:
loss += self.mse_loss(inputs[i]*mask, targets)
else:
loss += self.mse_loss(inputs[i], targets)
else:
if mask is not None:
loss = self.mse_loss(inputs*mask, targets)
else:
loss = self.mse_loss(inputs, targets)
if self.configer.get('mse_loss', 'reduction') == 'sum':
loss = loss / targets.size(0)
return loss
class PartLoss(nn.Module):
def __init__(self, configer):
super(PartLoss, self).__init__()
self.configer = configer
self.mse_loss = nn.MSELoss(size_average=False)
def forward(self, inputs, targets, mask=None):
inputs = inputs.view(inputs.size(0), -1, 6, inputs.size(2), inputs.size(3))
targets = targets.view(targets.size(0), -1, 6, targets.size(2), targets.size(3))
paf_loss = self.mse_loss(inputs[:, :, 0:2, :, :], targets[:, :, 0:2, :, :])
part_loss = self.mse_loss(inputs[:, :, 2:6, :, :], targets[:, :, 2:6, :, :])
loss = paf_loss + part_loss * 6.0
loss = loss / targets.size(0)
return loss
class CapsuleLoss(nn.Module):
def __init__(self, configer):
super(CapsuleLoss, self).__init__()
self.configer = configer
self.mse_loss = nn.MSELoss(reduction=self.configer.get('capsule_loss', 'reduction'))
def forward(self, inputs, targets, masks=None, is_focal=False):
preds = torch.sqrt((inputs**2).sum(dim=1, keepdim=False))
if masks is not None:
preds = preds * masks
if is_focal:
loss = self.mse_loss(preds, targets)
else:
diff = preds - targets
diff = diff ** 2
alpha = 2.0
weights = targets * alpha
weights = torch.exp(weights)
diff = weights * diff
loss = diff.mean()
return loss
class EmbeddingLoss(nn.Module):
def __init__(self, configer):
super(EmbeddingLoss, self).__init__()
self.configer = configer
self.num_keypoints = self.configer.get('data', 'num_keypoints')
self.l_vec = self.configer.get('capsule', 'l_vec')
self.mse_loss = nn.MSELoss(size_average=False)
def forward(self, inputs, tags, numH, sigma=0.1):
batch_size = inputs.size(0)
h_tag_means = [[Variable(torch.zeros(self.l_vec,), requires_grad=True).cuda()
for h in range(numH[b].numpy()[0])] for b in range(inputs.size()[0])]
for b in range(batch_size):
for n in range(numH[b].numpy()[0]):
valik = 0
for k in range(self.num_keypoints):
tag = inputs[b].masked_select(tags[b][k].eq(n+1).unsqueeze(0))
if tag.size() != torch.Size([]):
h_tag_means[b][n] += tag
valik = valik + 1
h_tag_means[b][n] = h_tag_means[b][n] / max(valik, 1)
loss_list = list()
for b in range(batch_size):
for n in range(numH[b].numpy()[0]):
for k in range(self.num_keypoints):
tag = inputs[b].masked_select(tags[b][k].eq(n+1).unsqueeze(0))
if tag.size() != torch.Size([]):
loss_list.append(self.mse_loss(tag, h_tag_means[b][n]))
for b in range(batch_size):
for n1 in range(numH[b].numpy()[0]):
for n2 in range(numH[b].numpy()[0]):
if n1 != n2:
loss_same = torch.exp(-self.mse_loss(h_tag_means[b][n1], h_tag_means[b][n2]) / sigma / sigma)
loss_list.append(loss_same)
if len(loss_list) == 0:
loss = 0.0
else:
loss = loss_list[0]
for i in range(len(loss_list)-1):
loss += loss_list[i+1]
return loss
|
tools/tensorflow/cnn/alexnet/unpickle.py | feifeibear/dlbench | 181 | 12779820 | <gh_stars>100-1000
import cPickle
import numpy as np
import tensorflow as tf
PATH = './cifar-10-batches-py'
TARGETPATH = '/home/comp/csshshi/tensorflow/cifar-10-batches-py'
TEST_FILES = ['test_batch']
FILES = ['data_batch_1', 'data_batch_2', 'data_batch_3', 'data_batch_4', 'data_batch_5']
TRAIN_COUNT = 50000
EVAL_COUNT = 10000
IMAGE_SIZE = 32
NUM_CLASSES = 10
unpickled = {}
def unpickle(file):
dict = unpickled.get(file)
if dict:
return dict
fo = open(file, 'rb')
dict = cPickle.load(fo)
fo.close()
unpickled[file] = dict
return dict
def get_next_batch(batch_size, step, is_test=False):
files = FILES
if is_test:
files = TEST_FILES
file_index = step % len(FILES)
filename = files[file_index]
filename = '%s/%s'%(PATH, filename)
dict = unpickle(filename)
data_index = step/len(files) * batch_size
images = dict['data'][data_index:data_index+batch_size]
labels = dict['labels'][data_index:data_index+batch_size]
reshaped_images = [np.reshape(image, (IMAGE_SIZE, IMAGE_SIZE, 3)) for image in images]
return reshaped_images, labels
|
workshop_sections/extras/lstm_text_classification/trainer/task.py | CharleyGuo/tensorflow-workshop | 691 | 12779822 | import model
import tensorflow as tf
import utils
def train(target,
num_param_servers,
is_chief,
lstm_size=64,
input_filenames=None,
sentence_length=128,
vocab_size=2**15,
learning_rate=0.01,
output_dir=None,
batch_size=1024,
embedding_size=128,
num_epochs=2):
graph = tf.Graph()
with graph.as_default():
sentences, scores = model.get_inputs(
input_filenames, batch_size, num_epochs, sentence_length)
with tf.device(tf.train.replica_device_setter()):
lstm = model.BasicRegressionLSTM(
sentences,
scores,
num_param_servers,
vocab_size,
learning_rate,
embedding_size,
lstm_size
)
tf.contrib.learn.train(
graph,
output_dir,
lstm.train_op,
lstm.loss,
global_step_tensor=lstm.global_step,
supervisor_is_chief=is_chief,
supervisor_master=target
)
if __name__ == "__main__":
parser = utils.base_parser()
parser.add_argument(
'--learning-rate',
type=float,
default=0.01
)
utils.dispatch(
train,
**parser.parse_args().__dict__
)
|
extractor/open163.py | pwh19920920/spiders | 390 | 12779839 | <filename>extractor/open163.py
# pylint: disable=W0123
import re
import requests
def get(url: str) -> dict:
"""
videos
"""
data = {}
data["videos"] = []
headers = {
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36"
}
re_url = r'mid:(.*?),.*?mp4SdUrlOrign:(.*?),.*?mp4HdUrlOrign:(.*?),.*?mp4ShdUrlOrign:(.*?),'
rep = requests.get(url, headers=headers, timeout=10)
items = re.findall(re_url, rep.text)
for item in items:
# 倒序取最高画质
for video_url in item[::-1]: # type: str
# print(url)
if "http" in video_url:
video_url = eval(video_url).replace("\\u002F", "/")
data["videos"].append(video_url)
break
return data
if __name__ == "__main__":
url = "http://open.163.com/newview/movie/free?pid=M8LI1JCE6&mid=M8LI3BQ60"
print(get(url))
|
examples/apps/kinesis-analytics-process-kpl-record/aws_kinesis_agg/__init__.py | eugeniosu/serverless-application-model | 326 | 12779877 | #Kinesis Aggregation/Deaggregation Libraries for Python
#
#Copyright 2014, Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
#Licensed under the Amazon Software License (the "License").
#You may not use this file except in compliance with the License.
#A copy of the License is located at
#
# http://aws.amazon.com/asl/
#
#or in the "license" file accompanying this file. This file is distributed
#on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
#express or implied. See the License for the specific language governing
#permissions and limitations under the License.
import md5
#Message aggregation protocol-specific constants
#(https://github.com/awslabs/amazon-kinesis-producer/blob/master/aggregation-format.md)
MAGIC = '\xf3\x89\x9a\xc2'
DIGEST_SIZE = md5.digest_size
#Kinesis Limits
#(https://docs.aws.amazon.com/kinesis/latest/APIReference/API_PutRecord.html)
MAX_BYTES_PER_RECORD = 1024*1024 # 1 MB
|
geocoder/geolytica.py | termim/geocoder | 1,506 | 12779894 | #!/usr/bin/python
# coding: utf8
from __future__ import absolute_import
import logging
from geocoder.base import OneResult, MultipleResultsQuery
class GeolyticaResult(OneResult):
def __init__(self, json_content):
# create safe shortcuts
self._standard = json_content.get('standard', {})
# proceed with super.__init__
super(GeolyticaResult, self).__init__(json_content)
@property
def lat(self):
lat = self.raw.get('latt', '').strip()
if lat:
return float(lat)
@property
def lng(self):
lng = self.raw.get('longt', '').strip()
if lng:
return float(lng)
@property
def postal(self):
return self.raw.get('postal', '').strip()
@property
def housenumber(self):
return self._standard.get('stnumber', '').strip()
@property
def street(self):
return self._standard.get('staddress', '').strip()
@property
def city(self):
return self._standard.get('city', '').strip()
@property
def state(self):
return self._standard.get('prov', '').strip()
@property
def address(self):
if self.street_number:
return u'{0} {1}, {2}'.format(self.street_number, self.route, self.locality)
elif self.route and self.route != 'un-known':
return u'{0}, {1}'.format(self.route, self.locality)
else:
return self.locality
class GeolyticaQuery(MultipleResultsQuery):
"""
Geocoder.ca
===========
A Canadian and US location geocoder.
API Reference
-------------
http://geocoder.ca/?api=1
"""
provider = 'geolytica'
method = 'geocode'
_URL = 'http://geocoder.ca'
_RESULT_CLASS = GeolyticaResult
_KEY_MANDATORY = False
def _build_params(self, location, provider_key, **kwargs):
params = {
'json': 1,
'locate': location,
'geoit': 'xml'
}
if 'strictmode' in kwargs:
params.update({'strictmode': kwargs.pop('strictmode')})
if 'strict' in kwargs:
params.update({'strict': kwargs.pop('strict')})
if 'auth' in kwargs:
params.update({'auth': kwargs.pop('auth')})
return params
def _adapt_results(self, json_response):
return [json_response]
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
g = GeolyticaQuery('1552 Payette dr., Ottawa')
g.debug()
|
lib/python/frugal/tests/transport/test_http_transport.py | ariasheets-wk/frugal | 144 | 12779997 | <gh_stars>100-1000
# Copyright 2017 Workiva
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from base64 import b64encode
from struct import pack_into
import unittest
from mock import Mock
from mock import patch
from thrift.transport.TTransport import TTransportException
from frugal.exceptions import TTransportExceptionType
from frugal.transport.http_transport import THttpTransport
@patch('frugal.transport.http_transport.requests')
class TestTHttpTransport(unittest.TestCase):
def test_request(self, mock_requests):
url = 'http://localhost:8080/frugal'
headers = {'foo': 'bar'}
resp = Mock(status_code=200)
response = b'response'
buff = bytearray(4)
pack_into('!I', buff, 0, len(response))
resp.content = b64encode(buff + response)
mock_requests.post.return_value = resp
def get_headers():
return {'baz': 'qux'}
tr = THttpTransport(url, headers=headers, get_headers=get_headers,
response_capacity=500)
tr.open()
self.assertTrue(tr.isOpen())
data = b'helloworld'
buff = bytearray(4)
pack_into('!I', buff, 0, len(data))
encoded_frame = b64encode(buff + data)
tr.write(data)
tr.flush()
mock_requests.post.assert_called_once_with(
url, data=encoded_frame, timeout=None,
headers={'foo': 'bar', 'baz': 'qux', 'Content-Length': '20',
'Content-Type': 'application/x-frugal',
'Content-Transfer-Encoding': 'base64',
'User-Agent': 'Python/TBaseHttpTransport',
'x-frugal-payload-limit': '500'})
resp = tr.read(len(response))
self.assertEqual(response, resp)
tr.close()
self.assertTrue(tr.isOpen()) # open/close are no-ops
def test_request_timeout(self, mock_requests):
url = 'http://localhost:8080/frugal'
headers = {'foo': 'bar'}
resp = Mock(status_code=200)
response = b'response'
buff = bytearray(4)
pack_into('!I', buff, 0, len(response))
resp.content = b64encode(buff + response)
mock_requests.post.return_value = resp
def get_headers():
return {'baz': 'qux'}
tr = THttpTransport(url, headers=headers, get_headers=get_headers,
response_capacity=500)
tr.open()
self.assertTrue(tr.isOpen())
data = b'helloworld'
buff = bytearray(4)
pack_into('!I', buff, 0, len(data))
encoded_frame = b64encode(buff + data)
tr.set_timeout(5000)
tr.write(data)
tr.flush()
mock_requests.post.assert_called_once_with(
url, data=encoded_frame, timeout=5,
headers={'foo': 'bar', 'baz': 'qux', 'Content-Length': '20',
'Content-Type': 'application/x-frugal',
'Content-Transfer-Encoding': 'base64',
'User-Agent': 'Python/TBaseHttpTransport',
'x-frugal-payload-limit': '500'})
resp = tr.read(len(response))
self.assertEqual(response, resp)
tr.close()
self.assertTrue(tr.isOpen()) # open/close are no-ops
def test_flush_no_body(self, mock_requests):
url = 'http://localhost:8080/frugal'
tr = THttpTransport(url)
tr.flush()
self.assertFalse(mock_requests.post.called)
def test_flush_bad_response(self, mock_requests):
url = 'http://localhost:8080/frugal'
resp = Mock(status_code=500)
mock_requests.post.return_value = resp
tr = THttpTransport(url)
data = b'helloworld'
buff = bytearray(4)
pack_into('!I', buff, 0, len(data))
encoded_frame = b64encode(buff + data)
tr.write(data)
with self.assertRaises(TTransportException):
tr.flush()
mock_requests.post.assert_called_once_with(
url, data=encoded_frame, timeout=None,
headers={'Content-Length': '20',
'Content-Type': 'application/x-frugal',
'Content-Transfer-Encoding': 'base64',
'User-Agent': 'Python/TBaseHttpTransport'})
def test_flush_bad_oneway_response(self, mock_requests):
url = 'http://localhost:8080/frugal'
resp = Mock(status_code=200)
buff = bytearray(4)
pack_into('!I', buff, 0, 10)
resp.content = b64encode(buff)
mock_requests.post.return_value = resp
tr = THttpTransport(url)
data = b'helloworld'
buff = bytearray(4)
pack_into('!I', buff, 0, len(data))
encoded_frame = b64encode(buff + data)
tr.write(data)
with self.assertRaises(TTransportException):
tr.flush()
mock_requests.post.assert_called_once_with(
url, data=encoded_frame, timeout=None,
headers={'Content-Length': '20',
'Content-Type': 'application/x-frugal',
'Content-Transfer-Encoding': 'base64',
'User-Agent': 'Python/TBaseHttpTransport'})
def test_flush_oneway(self, mock_requests):
url = 'http://localhost:8080/frugal'
resp = Mock(status_code=200)
buff = bytearray(4)
pack_into('!I', buff, 0, 0)
resp.content = b64encode(buff)
mock_requests.post.return_value = resp
tr = THttpTransport(url)
data = b'helloworld'
buff = bytearray(4)
pack_into('!I', buff, 0, len(data))
encoded_frame = b64encode(buff + data)
tr.write(data)
tr.flush()
mock_requests.post.assert_called_once_with(
url, data=encoded_frame, timeout=None,
headers={'Content-Length': '20',
'Content-Type': 'application/x-frugal',
'Content-Transfer-Encoding': 'base64',
'User-Agent': 'Python/TBaseHttpTransport'})
resp = tr.read(10)
self.assertEqual(b'', resp)
def test_write_limit_exceeded(self, mock_requests):
url = 'http://localhost:8080/frugal'
resp = Mock(status_code=200)
buff = bytearray(4)
pack_into('!I', buff, 0, 0)
resp.content = b64encode(buff)
mock_requests.post.return_value = resp
tr = THttpTransport(url, request_capacity=5)
data = b'helloworld'
with self.assertRaises(TTransportException) as cm:
tr.write(data)
self.assertEqual(TTransportExceptionType.REQUEST_TOO_LARGE,
cm.exception.type)
self.assertFalse(mock_requests.post.called)
|
alipay/aop/api/domain/InsCoverage.py | snowxmas/alipay-sdk-python-all | 213 | 12780018 | <filename>alipay/aop/api/domain/InsCoverage.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class InsCoverage(object):
def __init__(self):
self._coverage_name = None
self._coverage_no = None
self._effect_end_time = None
self._effect_start_time = None
self._iop = None
self._iop_premium = None
self._premium = None
self._sum_insured = None
@property
def coverage_name(self):
return self._coverage_name
@coverage_name.setter
def coverage_name(self, value):
self._coverage_name = value
@property
def coverage_no(self):
return self._coverage_no
@coverage_no.setter
def coverage_no(self, value):
self._coverage_no = value
@property
def effect_end_time(self):
return self._effect_end_time
@effect_end_time.setter
def effect_end_time(self, value):
self._effect_end_time = value
@property
def effect_start_time(self):
return self._effect_start_time
@effect_start_time.setter
def effect_start_time(self, value):
self._effect_start_time = value
@property
def iop(self):
return self._iop
@iop.setter
def iop(self, value):
self._iop = value
@property
def iop_premium(self):
return self._iop_premium
@iop_premium.setter
def iop_premium(self, value):
self._iop_premium = value
@property
def premium(self):
return self._premium
@premium.setter
def premium(self, value):
self._premium = value
@property
def sum_insured(self):
return self._sum_insured
@sum_insured.setter
def sum_insured(self, value):
self._sum_insured = value
def to_alipay_dict(self):
params = dict()
if self.coverage_name:
if hasattr(self.coverage_name, 'to_alipay_dict'):
params['coverage_name'] = self.coverage_name.to_alipay_dict()
else:
params['coverage_name'] = self.coverage_name
if self.coverage_no:
if hasattr(self.coverage_no, 'to_alipay_dict'):
params['coverage_no'] = self.coverage_no.to_alipay_dict()
else:
params['coverage_no'] = self.coverage_no
if self.effect_end_time:
if hasattr(self.effect_end_time, 'to_alipay_dict'):
params['effect_end_time'] = self.effect_end_time.to_alipay_dict()
else:
params['effect_end_time'] = self.effect_end_time
if self.effect_start_time:
if hasattr(self.effect_start_time, 'to_alipay_dict'):
params['effect_start_time'] = self.effect_start_time.to_alipay_dict()
else:
params['effect_start_time'] = self.effect_start_time
if self.iop:
if hasattr(self.iop, 'to_alipay_dict'):
params['iop'] = self.iop.to_alipay_dict()
else:
params['iop'] = self.iop
if self.iop_premium:
if hasattr(self.iop_premium, 'to_alipay_dict'):
params['iop_premium'] = self.iop_premium.to_alipay_dict()
else:
params['iop_premium'] = self.iop_premium
if self.premium:
if hasattr(self.premium, 'to_alipay_dict'):
params['premium'] = self.premium.to_alipay_dict()
else:
params['premium'] = self.premium
if self.sum_insured:
if hasattr(self.sum_insured, 'to_alipay_dict'):
params['sum_insured'] = self.sum_insured.to_alipay_dict()
else:
params['sum_insured'] = self.sum_insured
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = InsCoverage()
if 'coverage_name' in d:
o.coverage_name = d['coverage_name']
if 'coverage_no' in d:
o.coverage_no = d['coverage_no']
if 'effect_end_time' in d:
o.effect_end_time = d['effect_end_time']
if 'effect_start_time' in d:
o.effect_start_time = d['effect_start_time']
if 'iop' in d:
o.iop = d['iop']
if 'iop_premium' in d:
o.iop_premium = d['iop_premium']
if 'premium' in d:
o.premium = d['premium']
if 'sum_insured' in d:
o.sum_insured = d['sum_insured']
return o
|
python/searching/interpolation.py | nikitanamdev/AlgoBook | 191 | 12780063 | <gh_stars>100-1000
#Interpolation search is an improved version of binary search.
#Its time complexity is O(log(log n)) as compared to log(n) of binary search.
#following is the code of interpolation search:
# Python program to implement interpolation search
#Variable naming:
"""
1) lys - our input array
2) val - the element we are searching for
2) index - the probable index of the search element. This is computed to be a higher value when val
is closer in value to the element at the end of the array (lys[high]), and lower when val
is closer in value to the element at the start of the array (lys[low])
4) low - the starting index of the array
5) high - the last index of the array"""
def InterpolationSearch(lys, val):
low = 0
high = (len(lys) - 1)
while low <= high and val >= lys[low] and val <= lys[high]:
index = low + int(((float(high - low) / ( lys[high] - lys[low])) * ( val - lys[low])))
if lys[index] == val:
return index
if lys[index] < val:
low = index + 1
else:
high = index - 1
return -1
print(InterpolationSearch([1,2,3,4,5,6,7,8], 6)) |
Drake-Z/0015/0015.py | saurabh896/python-1 | 3,976 | 12780086 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''第 0015 题: 纯文本文件 city.txt为城市信息, 里面的内容(包括花括号)如下所示:
{
"1" : "上海",
"2" : "北京",
"3" : "成都"
}
请将上述内容写到 city.xls 文件中。'''
__author__ = 'Drake-Z'
import json
from collections import OrderedDict
from openpyxl import Workbook
def txt_to_xlsx(filename):
file = open(filename, 'r', encoding = 'UTF-8')
file_cintent = json.load(file, encoding = 'UTF-8')
print(file_cintent)
workbook = Workbook()
worksheet = workbook.worksheets[0]
for i in range(1, len(file_cintent)+1):
worksheet.cell(row = i, column = 1).value = i
worksheet.cell(row = i, column = 2).value = file_cintent[str(i)]
workbook.save(filename = 'city.xls')
if __name__ == '__main__':
txt_to_xlsx('city.txt') |
veles/ocl_blas.py | AkshayJainG/veles | 1,007 | 12780102 | # -*- coding: utf-8 -*-
"""
.. invisible:
_ _ _____ _ _____ _____
| | | | ___| | | ___/ ___|
| | | | |__ | | | |__ \ `--.
| | | | __|| | | __| `--. \
\ \_/ / |___| |___| |___/\__/ /
\___/\____/\_____|____/\____/
Created on Apr 13, 2015
BLAS class to use with ocl backend.
███████████████████████████████████████████████████████████████████████████████
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
███████████████████████████████████████████████████████████████████████████████
"""
from cuda4py.blas import CUBLAS_OP_N, CUBLAS_OP_T
import numpy
import opencl4py.blas as clblas
import os
import threading
import weakref
from zope.interface import implementer
from veles.accelerated_units import AcceleratedUnit, IOpenCLUnit
from veles.config import root
from veles.dummy import DummyWorkflow
from veles.logger import Logger
from veles.numpy_ext import roundup
@implementer(IOpenCLUnit)
class Builder(AcceleratedUnit):
"""Dummy unit for building OpenCL kernels.
"""
def __init__(self, workflow, **kwargs):
super(Builder, self).__init__(workflow, **kwargs)
self.source = kwargs["source"]
self.defines = kwargs["defines"]
self.kernel_name = kwargs["kernel_name"]
self.cache_file_name = kwargs["cache_file_name"]
self.dtype = kwargs["dtype"]
@property
def kernel(self):
return self._kernel_
def ocl_init(self):
self.sources_[self.source] = {}
self.build_program(self.defines, self.cache_file_name, self.dtype)
self.assign_kernel(self.kernel_name)
def ocl_run(self):
pass
class OCLBLAS(Logger):
"""Class with BLAS functionality similar to CUBLAS.
It uses CLBLAS when available or custom kernels otherwise.
"""
@staticmethod
def attach_to_device(device):
if device.blas is None:
device.blas = OCLBLAS(device)
def __init__(self, device):
super(OCLBLAS, self).__init__()
self._lock_ = threading.Lock()
self._device = weakref.ref(device)
self.kernels = {}
self._const_i = numpy.zeros(3, dtype=numpy.uint64)
try:
if (root.common.engine.ocl.clBLAS is not True or
root.common.engine.precision_level > 0):
raise ValueError()
if "CLBLAS_STORAGE_PATH" not in os.environ:
found = False
for dirnme in root.common.engine.device_dirs:
for path, _, files in os.walk(dirnme):
for f in files:
if f.endswith(".kdb"):
found = True
os.environ["CLBLAS_STORAGE_PATH"] = path
break
if found:
break
if found:
break
self.blas = clblas.CLBLAS()
self._sgemm = self.clblas_sgemm
self._dgemm = self.clblas_dgemm
self.debug("Using clBLAS for matrix multiplication")
except (OSError, RuntimeError, ValueError):
self._sgemm = self.veles_gemm
self._dgemm = self.veles_gemm
self.debug("Using Veles OpenCL kernels for matrix multiplication")
@property
def device(self):
return self._device()
@staticmethod
def gemm(dtype):
if dtype == numpy.float32:
return OCLBLAS.sgemm
if dtype == numpy.float64:
return OCLBLAS.dgemm
raise ValueError("Invalid dtype %s" % dtype)
def sgemm(self, transA, transB,
rowsCountA, columnCountB, commonSideLength,
alpha, A, B, beta, C, offsetA=0, offsetB=0, offsetC=0):
return self._sgemm(
transA, transB, rowsCountA, columnCountB, commonSideLength,
alpha, A, B, beta, C,
offsetA=offsetA, offsetB=offsetB, offsetC=offsetC)
def dgemm(self, transA, transB,
rowsCountA, columnCountB, commonSideLength,
alpha, A, B, beta, C, offsetA=0, offsetB=0, offsetC=0):
return self._dgemm(
transA, transB, rowsCountA, columnCountB, commonSideLength,
alpha, A, B, beta, C,
offsetA=offsetA, offsetB=offsetB, offsetC=offsetC)
def clblas_sgemm(self, transA, transB,
rowsCountA, columnCountB, commonSideLength,
alpha, A, B, beta, C, offsetA=0, offsetB=0, offsetC=0):
"""Does a matrix multiplication like in CUBLAS using clBLAS.
Matricies are assumed to be tightly packed and stored like in CUBLAS.
Single precision (float) version.
"""
self.blas.sgemm((self.device.queue_,), clblas.clblasColumnMajor,
transA, transB, rowsCountA, columnCountB,
commonSideLength, alpha, A, B, beta, C,
offsetA=offsetA, offsetB=offsetB, offsetC=offsetC)
def clblas_dgemm(self, transA, transB,
rowsCountA, columnCountB, commonSideLength,
alpha, A, B, beta, C, offsetA=0, offsetB=0, offsetC=0):
"""Does a matrix multiplication like in CUBLAS using clBLAS.
Matricies are assumed to be tightly packed and stored like in CUBLAS.
Double precision (double) version.
"""
self.blas.dgemm((self.device.queue_,), clblas.clblasColumnMajor,
transA, transB, rowsCountA, columnCountB,
commonSideLength, alpha, A, B, beta, C,
offsetA=offsetA, offsetB=offsetB, offsetC=offsetC)
def veles_gemm(self, transA, transB,
rowsCountA, columnCountB, commonSideLength,
alpha, A, B, beta, C, offsetA=0, offsetB=0, offsetC=0):
"""Does a matrix multiplication like in CUBLAS using custom kernel.
Matricies are assumed to be tightly packed and stored like in CUBLAS.
"""
with self._lock_:
self._veles_gemm(transA, transB,
rowsCountA, columnCountB, commonSideLength,
alpha, A, B, beta, C, offsetA, offsetB, offsetC)
def _veles_gemm(self, transA, transB,
rowsCountA, columnCountB, commonSideLength,
alpha, A, B, beta, C, offsetA, offsetB, offsetC):
dtype = alpha.dtype
key = (transA, transB, rowsCountA, columnCountB, commonSideLength,
dtype)
krn_info = self.kernels.get(key)
if krn_info is None:
block_size, vector_opt = self.device.device_info.get_kernel_bs_vo(
kernel="matrix_multiplication", dtype=dtype)
defines = {
"BLOCK_SIZE": block_size,
"VECTOR_OPT": int(bool(vector_opt)),
"B_WIDTH": rowsCountA,
"A_WIDTH": columnCountB,
"AB_COMMON": commonSideLength
}
if transB == CUBLAS_OP_T:
defines["A_COL"] = 1
else:
assert transB == CUBLAS_OP_N
if transA == CUBLAS_OP_N:
defines["B_COL"] = 1
else:
assert transA == CUBLAS_OP_T
global_size = (roundup(rowsCountA, block_size),
roundup(columnCountB, block_size))
local_size = (block_size, block_size)
w = DummyWorkflow()
builder = Builder(
w, source="gemm", defines=defines, kernel_name="gemm",
cache_file_name=(
"veles_gemm_%s" % "_".join(str(x) for x in key)),
dtype=dtype)
builder.initialize(self.device)
krn_info = (builder.kernel, global_size, local_size)
self.kernels[key] = krn_info
del builder
del w
# Set the constants and execute the kernel
krn = krn_info[0]
self._const_i[0:3] = offsetA, offsetB, offsetC
# Our kernel stores output in row-major order, so swap A and B
krn.set_args(B, A, C, alpha, beta, self._const_i[1:2],
self._const_i[0:1], self._const_i[2:3])
global_size = krn_info[1]
local_size = krn_info[2]
self.device.queue_.execute_kernel(krn, global_size, local_size,
need_event=False)
|
codigo_das_aulas/aula_17/exemplo_09.py | VeirichR/curso-python-selenium | 234 | 12780107 | <gh_stars>100-1000
from selene.support.shared import browser
from selene.support.conditions import be
from selene.support.conditions import have
browser.open('http://google.com')
browser.element(
'input[name="q"]'
).should(be.blank).type('Envydust').press_enter()
|
tests/test_pycuda.py | karthik20122001/docker-python | 2,030 | 12780109 | <gh_stars>1000+
"""Tests for general GPU support"""
import unittest
from common import gpu_test
class TestPycuda(unittest.TestCase):
@gpu_test
def test_pycuda(self):
import pycuda.driver
pycuda.driver.init()
gpu_name = pycuda.driver.Device(0).name()
self.assertNotEqual(0, len(gpu_name))
|
pymatgen/io/cube.py | Crivella/pymatgen | 921 | 12780119 | <gh_stars>100-1000
"""
Module for reading Gaussian cube files, which have become one of the standard file formats
for volumetric data in quantum chemistry and solid state physics software packages
(VASP being an exception).
Some basic info about cube files
(abridged info from http://paulbourke.net/dataformats/cube/ by <NAME>)
The file consists of a header which includes the atom information and the size as well
as orientation of the volumetric data. The first two lines of the header are comments. The
third line has the number of atoms included in the file followed by the position of the
origin of the volumetric data. The next three lines give the number of voxels along each axis
(x, y, z) followed by the axis vector. The last section in the header is one line for each
atom consisting of 5 numbers, the first is the atom number, the second is the charge, and
the last three are the x,y,z coordinates of the atom center. The volumetric data is straightforward,
one floating point number for each volumetric element.
Example
In the following example the volumetric data is a 40 by 40 by 40 grid, each voxel is 0.283459 units
wide and the volume is aligned with the coordinate axis. There are three atoms.
CPMD CUBE FILE.
OUTER LOOP: X, MIDDLE LOOP: Y, INNER LOOP: Z
3 0.000000 0.000000 0.000000
40 0.283459 0.000000 0.000000
40 0.000000 0.283459 0.000000
40 0.000000 0.000000 0.283459
8 0.000000 5.570575 5.669178 5.593517
1 0.000000 5.562867 5.669178 7.428055
1 0.000000 7.340606 5.669178 5.111259
-0.25568E-04 0.59213E-05 0.81068E-05 0.10868E-04 0.11313E-04 0.35999E-05
: : : : : :
: : : : : :
: : : : : :
In this case there will be 40 x 40 x 40 floating point values
: : : : : :
: : : : : :
: : : : : :
"""
import numpy as np
from monty.io import zopen
from pymatgen.core.sites import Site
from pymatgen.core.structure import Structure
from pymatgen.core.units import bohr_to_angstrom
# TODO: can multiprocessing be incorporated without causing issues during drone assimilation?
class Cube:
"""
Class to read Gaussian cube file formats for volumetric data.
Cube files are, by default, written in atomic units, and this
class assumes that convention.
"""
def __init__(self, fname):
"""
Initialize the cube object and store the data as self.data
Args:
fname (str): filename of the cube to read
"""
f = zopen(fname, "rt")
# skip header lines
for i in range(2):
f.readline()
# number of atoms followed by the position of the origin of the volumetric data
line = f.readline().split()
self.natoms = int(line[0])
self.origin = np.array(list(map(float, line[1:])))
# The number of voxels along each axis (x, y, z) followed by the axis vector.
line = f.readline().split()
self.NX = int(line[0])
self.X = np.array([bohr_to_angstrom * float(l) for l in line[1:]])
self.dX = np.linalg.norm(self.X)
line = f.readline().split()
self.NY = int(line[0])
self.Y = np.array([bohr_to_angstrom * float(l) for l in line[1:]])
self.dY = np.linalg.norm(self.Y)
line = f.readline().split()
self.NZ = int(line[0])
self.Z = np.array([bohr_to_angstrom * float(l) for l in line[1:]])
self.dZ = np.linalg.norm(self.Z)
self.voxel_volume = abs(np.dot(np.cross(self.X, self.Y), self.Z))
self.volume = abs(np.dot(np.cross(self.X.dot(self.NZ), self.Y.dot(self.NY)), self.Z.dot(self.NZ)))
# The last section in the header is one line for each atom consisting of 5 numbers,
# the first is the atom number, second is charge,
# the last three are the x,y,z coordinates of the atom center.
self.sites = []
for i in range(self.natoms):
line = f.readline().split()
self.sites.append(Site(line[0], np.multiply(bohr_to_angstrom, list(map(float, line[2:])))))
self.structure = Structure(
lattice=[self.X * self.NX, self.Y * self.NY, self.Z * self.NZ],
species=[s.specie for s in self.sites],
coords=[s.coords for s in self.sites],
coords_are_cartesian=True,
)
# Volumetric data
self.data = np.reshape(np.array(f.read().split()).astype(float), (self.NX, self.NY, self.NZ))
def mask_sphere(self, radius, cx, cy, cz):
"""
Create a mask for a sphere with radius=radius, centered at cx, cy, cz.
Args:
radius: (flaot) of the mask (in Angstroms)
cx, cy, cz: (float) the fractional coordinates of the center of the sphere
"""
dx, dy, dz = (
np.floor(radius / np.linalg.norm(self.X)).astype(int),
np.floor(radius / np.linalg.norm(self.Y)).astype(int),
np.floor(radius / np.linalg.norm(self.Z)).astype(int),
)
gcd = max(np.gcd(dx, dy), np.gcd(dy, dz), np.gcd(dx, dz))
sx, sy, sz = dx // gcd, dy // gcd, dz // gcd
r = min(dx, dy, dz)
x0, y0, z0 = int(np.round(self.NX * cx)), int(np.round(self.NY * cy)), int(np.round(self.NZ * cz))
centerx, centery, centerz = self.NX // 2, self.NY // 2, self.NZ // 2
a = np.roll(self.data, (centerx - x0, centery - y0, centerz - z0))
i, j, k = np.indices(a.shape, sparse=True)
a = np.sqrt((sx * i - sx * centerx) ** 2 + (sy * j - sy * centery) ** 2 + (sz * k - sz * centerz) ** 2)
indices = a > r
a[indices] = 0
return a
def get_atomic_site_averages(self, atomic_site_radii):
"""
Get the average value around each atomic site.
Args:
atomic_site_radii (dict): dictionary determining the cutoff radius (in Angstroms)
for averaging around atomic sites (e.g. {'Li': 0.97, 'B': 0.77, ...}. If
not provided, then the
returns:
Array of site averages, [Average around site 1, Average around site 2, ...]
"""
return [self._get_atomic_site_average(s, atomic_site_radii[s.species_string]) for s in self.structure.sites]
def _get_atomic_site_average(self, site, radius):
"""
Helper function for get_atomic_site_averages.
Args:
site: Site in the structure around which to get the average
radius: (float) the atomic_site_radius (in Angstroms) for given atomic species
returns:
Average around the atomic site
"""
mask = self.mask_sphere(radius, *site.frac_coords)
return np.sum(self.data * mask) / np.count_nonzero(mask)
def get_atomic_site_totals(self, atomic_site_radii):
"""
Get the integrated total in a sphere around each atomic site.
Args:
atomic_site_radii (dict): dictionary determining the cutoff radius (in Angstroms)
for averaging around atomic sites (e.g. {'Li': 0.97, 'B': 0.77, ...}. If
not provided, then the
returns:
Array of site averages, [Average around site 1, Average around site 2, ...]
"""
return [self._get_atomic_site_total(s, atomic_site_radii[s.species_string]) for s in self.structure.sites]
def _get_atomic_site_total(self, site, radius):
"""
Helper function for get_atomic_site_averages.
Args:
site: Site in the structure around which to get the total
radius: (float) the atomic_site_radius (in Angstroms) for given atomic species
returns:
Average around the atomic site
"""
mask = self.mask_sphere(radius, *site.frac_coords)
return np.sum(self.data * mask)
def get_axis_grid(self, ind):
"""
Modified from pymatgen.io.vasp.outputs
Returns the grid for a particular axis.
Args:
ind (int): Axis index.
"""
ng = self.data.shape
num_pts = ng[ind]
lengths = self.structure.lattice.abc
return [i / num_pts * lengths[ind] for i in range(num_pts)]
def get_average_along_axis(self, ind):
"""
Modified from pymatgen.io.vasp.outputs
Get the averaged total of the volumetric data a certain axis direction.
For example, useful for visualizing Hartree Potentials.
Args:
ind (int): Index of axis.
Returns:
Average total along axis
"""
ng = self.data.shape
m = self.data
if ind == 0:
total = np.sum(np.sum(m, axis=1), 1)
elif ind == 1:
total = np.sum(np.sum(m, axis=0), 1)
else:
total = np.sum(np.sum(m, axis=0), 0)
return total / ng[(ind + 1) % 3] / ng[(ind + 2) % 3]
|
Python/SinglyLinkedList.py | Nikhil-Sharma-1/DS-Algo-Point | 1,148 | 12780138 | class Node:
def __init__(self, data):
self.data = data
self.next = None
class SinglyLinkedList:
def __init__(self):
self.head = None
self.pos = None
def insert(self, data):
newNode = Node(data)
if self.head == None:
self.head = newNode
self.pos = newNode
else:
tmp = self.pos.next
self.pos.next = newNode
self.pos = newNode
if tmp != None:
newNode.next = tmp
def delete(self):
if self.pos == self.head:
self.pos = self.pos.next
del self.head
self.head = self.pos
else:
tmp = self.head
while tmp.next != self.pos:
tmp = tmp.next
tmp.next = self.pos.next
del self.pos
self.pos = tmp
def reset(self):
self.pos = self.head
def advance(self):
if self.pos != None:
self.pos = self.pos.next
def out_of_list(self):
if self.pos == None:
return True
else:
return False
def pos_position(self):
if not (self.out_of_list()):
return self.pos.data
else:
return "pos is out of list"
def print_list(self):
if self.head == None:
print("List is empty")
else:
tmp = self.head
while tmp != None:
print(tmp.data)
tmp = tmp.next
run = True
sll = SinglyLinkedList()
while run:
print( "\ni [insert] insert element")
print( "d [delete] delete element")
print( "o [out] out_of_list ?")
print( "p [pos] current position of pos")
print( "r [reset] pos-pointer")
print( "a [advance] pos-pointer")
print( "pr [print] print list")
print( "q [quit] program")
choice = input()
if choice == "i":
num = input("Enter Data for insertion: ")
sll.insert(num)
elif choice == "d":
sll.delete()
elif choice == "o":
print(sll.out_of_list())
elif choice == "r":
sll.reset()
elif choice == "a":
sll.advance()
elif choice == "p":
print(sll.pos_position())
elif choice == "q":
run = False
elif choice == "pr":
sll.print_list()
else:
print("Invalid Input")
"""
Sample I/O:
i [insert] insert element
d [delete] delete element
o [out] out_of_list ?
p [pos] current position of pos
r [reset] pos-pointer
a [advance] pos-pointer
pr [print] print list
q [quit] program
i (Userinput)
Enter Data for insertion: 10
i [insert] insert element
d [delete] delete element
o [out] out_of_list ?
p [pos] current position of pos
r [reset] pos-pointer
a [advance] pos-pointer
pr [print] print list
q [quit] program
i (Userinput)
Enter Data for insertion: 20
i [insert] insert element
d [delete] delete element
o [out] out_of_list ?
p [pos] current position of pos
r [reset] pos-pointer
a [advance] pos-pointer
pr [print] print list
q [quit] program
pr (Userinput)
10
20
i [insert] insert element
d [delete] delete element
o [out] out_of_list ?
p [pos] current position of pos
r [reset] pos-pointer
a [advance] pos-pointer
pr [print] print list
q [quit] program
p (Userinput)
20
i [insert] insert element
d [delete] delete element
o [out] out_of_list ?
p [pos] current position of pos
r [reset] pos-pointer
a [advance] pos-pointer
pr [print] print list
q [quit] program
a (Userinput)
i [insert] insert element
d [delete] delete element
o [out] out_of_list ?
p [pos] current position of pos
r [reset] pos-pointer
a [advance] pos-pointer
pr [print] print list
q [quit] program
o (Userinput)
True
i [insert] insert element
d [delete] delete element
o [out] out_of_list ?
p [pos] current position of pos
r [reset] pos-pointer
a [advance] pos-pointer
pr [print] print list
q [quit] program
r (Userinput)
i [insert] insert element
d [delete] delete element
o [out] out_of_list ?
p [pos] current position of pos
r [reset] pos-pointer
a [advance] pos-pointer
pr [print] print list
q [quit] program
p (Userinput)
10
i [insert] insert element
d [delete] delete element
o [out] out_of_list ?
p [pos] current position of pos
r [reset] pos-pointer
a [advance] pos-pointer
pr [print] print list
q [quit] program
d (Userinput)
i [insert] insert element
d [delete] delete element
o [out] out_of_list ?
p [pos] current position of pos
r [reset] pos-pointer
a [advance] pos-pointer
pr [print] print list
q [quit] program
pr (Userinput)
20
i [insert] insert element
d [delete] delete element
o [out] out_of_list ?
p [pos] current position of pos
r [reset] pos-pointer
a [advance] pos-pointer
pr [print] print list
q [quit] program
p (Userinput)
20
i [insert] insert element
d [delete] delete element
o [out] out_of_list ?
p [pos] current position of pos
r [reset] pos-pointer
a [advance] pos-pointer
pr [print] print list
q [quit] program
q (Userinput)
Time Complexity:
Insert: O(1)
Delete: O(N)
PrintList: O(N)
Everything Else: O(1)
"""
|
Python/ds/Firstfit.py | Khushboo85277/NeoAlgo | 897 | 12780139 | """
First fit is the simplest of all the storage allocation strategies.
Here the list of storages is searched and as soon as a free storage block of size >= N is found ,
the pointer of that block is sent to the calling program after retaining the residue space.Thus, for example,
for a block of size 5k , 2k memory will be sent to the caller . The below program is a simulation of the first fit strategy
using array data structure.
"""
# Block class is used as the fixed memory blocks for allocation
class Block:
def __init__(self):
self.size = 0
self.ID = 0
self.fragment = 0
# process class is used for allocating memory for the requesting processes
class process:
def __init__(self):
self.Num = 0
self.size = 0
self.block = None
# initialiseBlocks function initializes all the blocks with sizes and id
def initialiseBlocks(arr, sizes, n):
for i in range(n):
arr[i].size = sizes[i]
arr[i].fragment = sizes[i]
arr[i].ID = i + 1
# printResult function prints the result of the memory allocation strategy
def printResult(arr2, numOfProcess):
print(
"Process No Process Size Block ID Block Size Block Fragment"
)
for i in range(numOfProcess):
print(
str(arr2[i].Num)
+ " "
+ str(arr2[i].size)
+ " "
+ str(arr2[i].block.ID)
+ " "
+ str(arr2[i].block.size)
+ " "
+ str(arr2[i].block.fragment)
)
# firstfit function allocates memory to processes using firstfit allocation algorithm
def firstfit(arr, sizes, n, arr2, numOfProcess):
initialiseBlocks(arr, sizes, n)
for i in range(numOfProcess):
for j in range(n):
if arr2[i].size <= arr[j].fragment:
arr[j].fragment -= arr2[i].size
arr2[i].block = Block()
arr2[i].block.size = arr[j].size
arr2[i].block.ID = arr[j].ID
arr2[i].block.fragment = arr[j].fragment
break
print("First Fit Allocation")
printResult(arr2, numOfProcess)
# Driver code
if __name__ == "__main__":
sizes = [60, 20, 12, 35, 64, 42, 31, 35, 40, 50]
arr = []
for i in range(10):
arr.append(Block())
initialiseBlocks(arr, sizes, 10)
numOfProcess = int(
input("Enter the number of process for memory to be allocated : ")
)
print("Enter the sizes required by the processes in the order of requirement")
psize = list(map(int, input().split(" ")))
arr2 = []
for i in range(numOfProcess):
arr2.append(process())
arr2[i].size = psize[i]
arr2[i].Num = i + 1
firstfit(arr, sizes, 10, arr2, numOfProcess)
"""
Sample I/O:
Enter the number of process for memory to be allocated : 5
Enter the sizes required by the processes in the order of requirement
15 12 13 20 11
First Fit Allocation
Process No Process Size Block ID Block Size Block Fragment
1 15 1 60 45
2 12 1 60 33
3 13 1 60 20
4 20 1 60 0
5 11 2 20 9
Time complexity : O(n)
space complexity : O(n)
"""
|
Python/test/currencies.py | yrtf/QuantLib-SWIG | 231 | 12780165 | """
Copyright (C) 2021 <NAME>
This file is part of QuantLib, a free-software/open-source library
for financial quantitative analysts and developers - http://quantlib.org/
QuantLib is free software: you can redistribute it and/or modify it
under the terms of the QuantLib license. You should have received a
copy of the license along with this program; if not, please email
<<EMAIL>>. The license is also available online at
<http://quantlib.org/license.shtml>.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the license for more details.
"""
import unittest
import QuantLib as ql
class CurrencyTest(unittest.TestCase):
def test_default_currency_constructor(self):
"""Testing default currency constructor"""
fail_msg = "Failed to create default currency."
default_ccy = ql.Currency()
self.assertTrue(default_ccy.empty(), fail_msg)
def test_eur_constructor(self):
"""Testing EUR constructor"""
fail_msg = "Failed to create EUR currency."
eur = ql.EURCurrency()
self.assertFalse(eur.empty(), fail_msg)
def test_bespoke_currency_constructor(self):
"""Testing bespoke currency constructor"""
fail_msg = "Failed to create bespoke currency."
custom_ccy = ql.Currency(
"CCY", "CCY", 100, "#", "", 100, ql.Rounding(), "")
self.assertFalse(custom_ccy.empty(), fail_msg)
if __name__ == '__main__':
print('testing QuantLib ' + ql.__version__)
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(CurrencyTest, 'test'))
unittest.TextTestRunner(verbosity=2).run(suite)
|
smt/SMT.py | jeanqasaur/jeeves | 253 | 12780201 | '''
Translate expressions to SMT import format.
'''
from Z3 import Z3
class UnsatisfiableException(Exception):
pass
# NOTE(JY): Think about if the solver needs to know about everything for
# negative constraints. I don't think so because enough things should be
# concrete that this doesn't matter.
def solve(constraints, defaults, desiredVars):
# NOTE(JY): This is just a sketch of what should go on...
# Implement defaults by adding values to the model and
#for v in jeeveslib.env.envVars:
# jeeveslib.solver.push()
# solver.assertConstraint(v = z3.BoolVal(True))
# if (solver.check() == solver.Unsat):
# jeeveslib.solver.pop()
# Now get the variables back from the solver by evaluating all
# variables in question...
# Now return the new environment...
#return NotImplemented
solver = Z3()
result = {}
for constraint in constraints:
if constraint.type != bool:
raise ValueError("constraints must be bools")
solver.boolExprAssert(constraint)
if not solver.check():
raise UnsatisfiableException("Constraints not satisfiable")
for default in defaults:
solver.push()
if default.type != bool:
raise ValueError("defaults must be bools")
solver.boolExprAssert(default)
if not solver.isSatisfiable():
solver.pop()
assert solver.check()
result = {}
for var in desiredVars:
result[var] = solver.evaluate(var)
assert (result[var] is True) or (result[var] is False)
return result
|
watchmen/pipeline/core/context/unit_context.py | Insurance-Metrics-Measure-Advisory/watchman-data-connector | 125 | 12780202 | <reponame>Insurance-Metrics-Measure-Advisory/watchman-data-connector
from model.model.pipeline.pipeline import ProcessUnit
from watchmen.monitor.model.pipeline_monitor import UnitRunStatus
from watchmen.pipeline.core.context.stage_context import StageContext
class UnitContext:
stageContext: StageContext
unit: ProcessUnit
unitStatus: UnitRunStatus
def __init__(self, stageContext, unit):
self.stageContext = stageContext
self.unit = unit
self.unitStatus = UnitRunStatus()
|
tensorflow/python/distribute/distribute_coordinator_context.py | EricRemmerswaal/tensorflow | 190,993 | 12780203 | <filename>tensorflow/python/distribute/distribute_coordinator_context.py<gh_stars>1000+
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The context retrieval method for distribute coordinator."""
import threading
_worker_context = threading.local()
def get_current_worker_context():
"""Returns the current task context."""
try:
return _worker_context.current
except AttributeError:
return None
|
plugins/quetz_content_trust/quetz_content_trust/repo_signer.py | maresb/quetz | 108 | 12780213 | import os
import shutil
from pathlib import Path
import conda_content_trust.signing as cct_signing
class RepoSigner:
def sign_repodata(self, repodata_fn, pkg_mgr_key):
final_fn = self.in_folder / "repodata_signed.json"
print("copy", repodata_fn, final_fn)
shutil.copyfile(repodata_fn, final_fn)
cct_signing.sign_all_in_repodata(str(final_fn), pkg_mgr_key)
print(f"Signed {final_fn}")
def __init__(self, in_folder, pkg_mgr_key):
self.in_folder = Path(in_folder).resolve()
f = os.path.join(self.in_folder, "repodata.json")
if os.path.isfile(f):
self.sign_repodata(Path(f), pkg_mgr_key)
|
machine/storage/backends/base.py | drdarina/slack-machine | 111 | 12780217 | <reponame>drdarina/slack-machine<filename>machine/storage/backends/base.py
class MachineBaseStorage:
"""Base class for storage backends
Extending classes should implement the five methods in this base class. Slack Machine takes
care of a lot of details regarding the persistent storage of data. So storage backends
**do not** have to deal with the following, because Slack Machine takes care of these:
- Serialization/Deserialization of data
- Namespacing of keys (so data stored by different plugins doesn't clash)
"""
def __init__(self, settings):
self.settings = settings
def get(self, key):
"""Retrieve data by key
:param key: key for which to retrieve data
:return: the raw data for the provided key, as (byte)string. Should return ``None`` when
the key is unknown or the data has expired.
"""
raise NotImplementedError
def set(self, key, value, expires=None):
"""Store data by key
:param key: the key under which to store the data
:param value: data as (byte)string
:param expires: optional expiration time in seconds, after which the data should not be
returned any more.
"""
raise NotImplementedError
def delete(self, key):
"""Delete data by key
:param key: key for which to delete the data
"""
raise NotImplementedError
def has(self, key):
"""Check if the key exists
:param key: key to check
:return: ``True/False`` wether the key exists
"""
raise NotImplementedError
def size(self):
"""Calculate the total size of the storage
:return: total size of storage in bytes (integer)
"""
raise NotImplementedError
|
tools/etnaviv/mmt.py | ilbers/etna_viv | 121 | 12780222 | import struct
from collections import namedtuple
def read_1(f):
return f.read(1)[0]
def read_2(f):
return struct.unpack('<H', f.read(2))[0]
def read_4(f):
return struct.unpack('<I', f.read(4))[0]
def read_8(f):
return struct.unpack('<Q', f.read(8))[0]
def read_buffer(f):
length = read_4(f)
return f.read(length)
def read_str(f):
s = read_buffer(f)
assert(s[-1] == 0)
return s[0:-1]
LogMessage = namedtuple('LogMessage', ['msg'])
Open = namedtuple('Open', ['flags', 'mode', 'fd', 'path'])
Mmap = namedtuple('Mmap', ['offset', 'prot', 'flags', 'fd', 'region_id', 'start', 'length'])
Munmap = namedtuple('Munmap', ['offset', 'region_id', 'start', 'length', 'unk1', 'unk2'])
StoreInfo = namedtuple('StoreInfo', ['msg'])
Store = namedtuple('Store', ['region_id', 'offset', 'data'])
ProcessMap = namedtuple('ProcessMap', ['msg'])
# etnaviv specific
Commit = namedtuple('Commit', [])
def parse_mmt_file(f):
while True:
ch = f.read(1)
if ch == b'':
return
elif ch == b'=' or ch == b'-': # Comment
s = b''
while True: # read until \n
ch = f.read(1)
if ch == b'\n':
break
else:
s += ch
yield LogMessage(s)
elif ch == b'o': # open
flags = read_4(f)
mode = read_4(f)
fd = read_4(f)
path = read_str(f)
assert(read_1(f) == 10)
yield Open(flags, mode, fd, path)
elif ch == b'M': # mmap
offset = read_8(f)
prot = read_4(f)
flags = read_4(f)
fd = read_4(f)
region_id = read_4(f)
start = read_8(f)
length = read_8(f)
assert(read_1(f) == 10)
yield Mmap(offset, prot, flags, fd, region_id, start, length)
elif ch == b'u': # munmap
offset = read_8(f)
region_id = read_4(f)
start = read_8(f)
length = read_8(f)
unk1 = read_8(f)
unk2 = read_8(f)
assert(read_1(f) == 10)
yield Munmap(offset, region_id, start, length, unk1, unk2)
elif ch == b'x': # store_info
info = read_str(f)
assert(read_1(f) == 10)
yield StoreInfo(info)
elif ch == b'w': # store
region_id = read_4(f)
offset = read_4(f)
length = read_1(f)
data = f.read(length)
assert(read_1(f) == 10)
yield Store(region_id, offset, data)
elif ch == b'c': # commit
assert(read_1(f) == 10)
yield Commit()
elif ch == b'y': # process map
assert(read_8(f) == 1)
msg = read_buffer(f)
assert(read_1(f) == 10)
yield ProcessMap(msg)
else:
print('Unknown ', ch)
exit(1)
|
segmenters/nlp/JiebaSegmenter/tests/test_jiebasegmenter.py | saoc90/jina-hub | 106 | 12780230 | <reponame>saoc90/jina-hub
import os
import numpy as np
import pytest
from .. import JiebaSegmenter
cur_dir = os.path.dirname(os.path.abspath(__file__))
path_dict_file = os.path.join(cur_dir, 'dict.txt')
def test_jieba_segmenter():
segmenter = JiebaSegmenter(mode='accurate')
text = '今天是个大晴天!安迪回来以后,我们准备去动物园。'
docs_chunks = segmenter.segment(np.stack([text, text]))
assert len(docs_chunks) == 2
for chunks in docs_chunks:
assert len(chunks) == 14
def test_jieba_user_dir():
segmenter = JiebaSegmenter()
text = '今天是个大晴天!安迪回来以后,我们准备去动物园。thisisnotachineseword'
docs_chunks = segmenter.segment(np.stack([text, text]))
assert len(docs_chunks) == 2
for chunks in docs_chunks:
assert len(chunks) == 15
segmenter = JiebaSegmenter(user_dict_file=path_dict_file)
text = '今天是个大晴天!安迪回来以后,我们准备去动物园。thisisnotachineseword'
docs_chunks = segmenter.segment(np.stack([text, text]))
assert len(docs_chunks) == 2
for chunks in docs_chunks:
assert len(chunks) == 20
def test_jieba_user_dir_file_not_found():
with pytest.raises(FileNotFoundError):
JiebaSegmenter(user_dict_file='/this/path/does/not/exist.txt')
|
data/transcoder_evaluation_gfg/python/MINIMUM_PERIMETER_N_BLOCKS.py | mxl1n/CodeGen | 241 | 12780252 | <filename>data/transcoder_evaluation_gfg/python/MINIMUM_PERIMETER_N_BLOCKS.py
# Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import math
def f_gold ( n ) :
l = math.sqrt ( n )
sq = l * l
if ( sq == n ) :
return l * 4
else :
row = n / l
perimeter = 2 * ( l + row )
if ( n % l != 0 ) :
perimeter += 2
return perimeter
#TOFILL
if __name__ == '__main__':
param = [
(45,),
(80,),
(54,),
(48,),
(83,),
(68,),
(32,),
(20,),
(68,),
(66,)
]
n_success = 0
for i, parameters_set in enumerate(param):
if f_filled(*parameters_set) == f_gold(*parameters_set):
n_success+=1
print("#Results: %i, %i" % (n_success, len(param))) |
aa1_data_util/1_process_zhihu.py | sunshinenum/text_classification | 7,723 | 12780308 | # -*- coding: utf-8 -*-
import sys
#reload(sys)
#sys.setdefaultencoding('utf8')
#1.将问题ID和TOPIC对应关系保持到字典里:process question_topic_train_set.txt
#from:question_id,topics(topic_id1,topic_id2,topic_id3,topic_id4,topic_id5)
# to:(question_id,topic_id1)
# (question_id,topic_id2)
#read question_topic_train_set.txt
import codecs
#1.################################################################################################################
print("process question_topic_train_set.txt,started...")
q_t='question_topic_train_set.txt'
q_t_file = codecs.open(q_t, 'r', 'utf8')
lines=q_t_file.readlines()
question_topic_dict={}
for i,line in enumerate(lines):
if i%300000==0:
print(i)
#print(line)
question_id,topic_list_string=line.split('\t')
#print(question_id)
#print(topic_list_string)
topic_list=topic_list_string.replace("\n","").split(",")
question_topic_dict[question_id]=topic_list
#for ii,topic in enumerate(topic_list):
# print(ii,topic)
#print("=====================================")
#if i>10:
# print(question_topic_dict)
# break
print("process question_topic_train_set.txt,ended...")
###################################################################################################################
###################################################################################################################
#2.处理问题--得到问题ID:问题的表示,存成字典。proces question. for every question form a a list of string to reprensent it.
import codecs
print("process question started11...")
q='question_train_set.txt'
q_file = codecs.open(q, 'r', 'utf8')
q_lines=q_file.readlines()
questionid_words_representation={}
question_representation=[]
length_desc=30
for i,line in enumerate(q_lines):
#print("line:")
#print(line)
element_lists=line.split('\t') #['c324,c39','w305...','c']
question_id=element_lists[0]
#print("question_id:",element_lists[0])
#for i,q_e in enumerate(element_lists):
# print("e:",q_e)
#question_representation=[x for x in element_lists[2].split(",")] #+ #TODO this is only for title's word. no more.
title_words=[x for x in element_lists[2].strip().split(",")][-length_desc:]
#print("title_words:",title_words)
title_c=[x for x in element_lists[1].strip().split(",")][-length_desc:]
#print("title_c:", title_c)
desc_words=[x for x in element_lists[4].strip().split(",")][-length_desc:]
#print("desc_words:", desc_words)
desc_c=[x for x in element_lists[3].strip().split(",")][-length_desc:]
#print("desc_c:", desc_c)
question_representation =title_words+ title_c+desc_words+ desc_c
question_representation=" ".join(question_representation)
#print("question_representation:",question_representation)
#print("question_representation:",question_representation)
questionid_words_representation[question_id]=question_representation
q_file.close()
print("proces question ended2...")
#####################################################################################################################
###################################################################################################################
# 3.获得模型需要的训练数据。以{问题的表示:TOPIC_ID}的形式的列表
# save training data,testing data: question __label__topic_id
import codecs
import random
print("saving traininig data.started1...")
count = 0
train_zhihu = 'train-zhihu6-title-desc.txt'
test_zhihu = 'test-zhihu6-title-desc.txt'
valid_zhihu = 'valid-zhihu6-title-desc.txt'
data_list = []
multi_label_flag=True
def split_list(listt):
random.shuffle(listt)
list_len = len(listt)
train_len = 0.95
valid_len = 0.025
train = listt[0:int(list_len * train_len)]
valid = listt[int(list_len * train_len):int(list_len * (train_len + valid_len))]
test = listt[int(list_len * (train_len + valid_len)):]
return train, valid, test
for question_id, question_representation in questionid_words_representation.items():
# print("===================>")
# print('question_id',question_id)
# print("question_representation:",question_representation)
# get label_id for this question_id by using:question_topic_dict
topic_list = question_topic_dict[question_id]
# print("topic_list:",topic_list)
# if count>5:
# ii=0
# ii/0
if not multi_label_flag:
for topic_id in topic_list:
data_list.append((question_representation, topic_id)) #single-label
else:
data_list.append((question_representation, topic_list)) #multi-label
count = count + 1
# random shuffle list
random.shuffle(data_list)
def write_data_to_file_system(file_name, data):
file = codecs.open(file_name, 'a', 'utf8')
for d in data:
# print(d)
question_representation, topic_id = d
question_representation_ = " ".join(question_representation)
file.write(question_representation_ + " __label__" + str(topic_id) + "\n")
file.close()
def write_data_to_file_system_multilabel(file_name, data):
file = codecs.open(file_name, 'a', 'utf8')
for d in data:
question_representation, topic_id_list = d
topic_id_list_=" ".join(topic_id_list)
file.write(question_representation + " __label__" + str(topic_id_list_) + "\n")
file.close()
train_data, valid_data, test_data = split_list(data_list)
if not multi_label_flag:#single label
write_data_to_file_system(train_zhihu, train_data)
write_data_to_file_system(valid_zhihu, valid_data)
write_data_to_file_system(test_zhihu, test_data)
else:#multi-label
write_data_to_file_system_multilabel(train_zhihu, train_data)
write_data_to_file_system_multilabel(valid_zhihu, valid_data)
write_data_to_file_system_multilabel(test_zhihu, test_data)
print("saving traininig data.ended...")
###################################################################################################################### |
torchbenchmark/models/fastNLP/reproduction/Star_transformer/util.py | Chillee/benchmark | 2,693 | 12780328 | import fastNLP as FN
import argparse
import os
import random
import numpy
import torch
def get_argparser():
parser = argparse.ArgumentParser()
parser.add_argument('--lr', type=float, required=True)
parser.add_argument('--w_decay', type=float, required=True)
parser.add_argument('--lr_decay', type=float, required=True)
parser.add_argument('--bsz', type=int, required=True)
parser.add_argument('--ep', type=int, required=True)
parser.add_argument('--drop', type=float, required=True)
parser.add_argument('--gpu', type=str, required=True)
parser.add_argument('--log', type=str, default=None)
return parser
def add_model_args(parser):
parser.add_argument('--nhead', type=int, default=6)
parser.add_argument('--hdim', type=int, default=50)
parser.add_argument('--hidden', type=int, default=300)
return parser
def set_gpu(gpu_str):
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ['CUDA_VISIBLE_DEVICES'] = gpu_str
def set_rng_seeds(seed=None):
if seed is None:
seed = numpy.random.randint(0, 65536)
random.seed(seed)
numpy.random.seed(seed)
torch.random.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
# print('RNG_SEED {}'.format(seed))
return seed
class TensorboardCallback(FN.Callback):
"""
接受以下一个或多个字符串作为参数:
- "model"
- "loss"
- "metric"
"""
def __init__(self, *options):
super(TensorboardCallback, self).__init__()
args = {"model", "loss", "metric"}
for opt in options:
if opt not in args:
raise ValueError(
"Unrecognized argument {}. Expect one of {}".format(opt, args))
self.options = options
self._summary_writer = None
self.graph_added = False
def on_train_begin(self):
save_dir = self.trainer.save_path
if save_dir is None:
path = os.path.join(
"./", 'tensorboard_logs_{}'.format(self.trainer.start_time))
else:
path = os.path.join(
save_dir, 'tensorboard_logs_{}'.format(self.trainer.start_time))
self._summary_writer = SummaryWriter(path)
def on_batch_begin(self, batch_x, batch_y, indices):
if "model" in self.options and self.graph_added is False:
# tesorboardX 这里有大bug,暂时没法画模型图
# from fastNLP.core.utils import _build_args
# inputs = _build_args(self.trainer.model, **batch_x)
# args = tuple([value for value in inputs.values()])
# args = args[0] if len(args) == 1 else args
# self._summary_writer.add_graph(self.trainer.model, torch.zeros(32, 2))
self.graph_added = True
def on_backward_begin(self, loss):
if "loss" in self.options:
self._summary_writer.add_scalar(
"loss", loss.item(), global_step=self.trainer.step)
if "model" in self.options:
for name, param in self.trainer.model.named_parameters():
if param.requires_grad:
self._summary_writer.add_scalar(
name + "_mean", param.mean(), global_step=self.trainer.step)
# self._summary_writer.add_scalar(name + "_std", param.std(), global_step=self.trainer.step)
self._summary_writer.add_scalar(name + "_grad_mean", param.grad.mean(),
global_step=self.trainer.step)
def on_valid_end(self, eval_result, metric_key):
if "metric" in self.options:
for name, metric in eval_result.items():
for metric_key, metric_val in metric.items():
self._summary_writer.add_scalar("valid_{}_{}".format(name, metric_key), metric_val,
global_step=self.trainer.step)
def on_train_end(self):
self._summary_writer.close()
del self._summary_writer
def on_exception(self, exception):
if hasattr(self, "_summary_writer"):
self._summary_writer.close()
del self._summary_writer
|
contrib/frontends/django/nntpchan/nntpchan/frontend/templatetags/chanup.py | majestrate/nntpchan | 233 | 12780360 | <gh_stars>100-1000
from django import template
from django.template.defaultfilters import stringfilter
from django.utils.html import conditional_escape
from django.utils.safestring import mark_safe
from nntpchan.frontend.models import Newsgroup, Post
import re
from urllib.parse import urlparse
from html import unescape
register = template.Library()
re_postcite = re.compile('>> ?([0-9a-fA-F]+)')
re_boardlink = re.compile('>>> ?/([a-zA-Z0-9\.]+[a-zA-Z0-9])/')
re_redtext = re.compile('== ?(.+) ?==')
re_psytext = re.compile('@@ ?(.+) ?@@')
def greentext(text, esc):
return_text = ''
f = False
for line in text.split('\n'):
line = line.strip()
if len(line) < 2:
continue
if line[0] == '>' and line[1] != '>':
return_text += '<span class="greentext">%s </span>' % esc ( line ) + '\n'
f = True
else:
return_text += esc(line) + '\n'
return return_text, f
def blocktext(text, esc, delim='', css='', tag='span'):
parts = text.split(delim)
f = False
if len(parts) > 1:
parts.reverse()
return_text = ''
while len(parts) > 0:
return_text += esc(parts.pop())
if len(parts) > 0:
f = True
return_text += '<{} class="{}">%s</{}>'.format(tag,css,tag) % esc(parts.pop())
return return_text, f
else:
return text, f
redtext = lambda t, e : blocktext(t, e, '==', 'redtext')
psytext = lambda t, e : blocktext(t, e, '@@', 'psy')
codeblock = lambda t, e : blocktext(t, e, '[code]', 'code', 'pre')
def postcite(text, esc):
return_text = ''
filtered = False
for line in text.split('\n'):
for word in line.split(' '):
match = re_postcite.match(unescape(word))
if match:
posthash = match.groups()[0]
posts = Post.objects.filter(posthash__startswith=posthash)
if len(posts) > 0:
filtered = True
return_text += '<a href="%s" class="postcite">>>%s</a> ' % ( posts[0].get_absolute_url(), posthash)
else:
return_text += '<span class="greentext">>>%s</span> ' % match.string
elif filtered:
return_text += word + ' '
else:
return_text += esc(word) + ' '
return_text += '\n'
return return_text, filtered
def boardlink(text, esc):
return_text = ''
filtered = False
for line in text.split('\n'):
for word in line.split(' '):
match = re_boardlink.match(unescape(word))
if match:
name = match.groups()[0]
group = Newsgroup.objects.filter(name=name)
if len(group) > 0:
filtered = True
return_text += '<a href="%s" class="boardlink">%s</a> ' % ( group[0].get_absolute_url(), esc(match.string ) )
else:
return_text += '<span class="greentext">%s</span> ' % esc (match.string)
else:
return_text += esc(word) + ' '
return_text += '\n'
return return_text, filtered
def urlify(text, esc):
return_text = ''
filtered = False
for line in text.split('\n'):
for word in line.split(' '):
u = urlparse(word)
if u.scheme != '' and u.netloc != '':
return_text += '<a href="%s">%s</a> ' % ( u.geturl(), esc(word) )
filtered = True
else:
return_text += esc(word) + ' '
return_text += '\n'
return return_text, filtered
line_funcs = [
greentext,
redtext,
urlify,
psytext,
codeblock,
postcite,
boardlink,
]
@register.filter(needs_autoescape=True, name='memepost')
def memepost(text, autoescape=True):
text, _ = line_funcs[0](text, conditional_escape)
for f in line_funcs[1:]:
text, _ = f(text, lambda x : x)
return mark_safe(text)
@register.filter(name='truncate')
@stringfilter
def truncate(text, truncate=500):
if len(text) > truncate:
return text[:truncate] + '...'
return text
|
examples/affect/affect_mfm.py | kapikantzari/MultiBench | 148 | 12780388 | import torch
import sys
import os
sys.path.append(os.getcwd())
sys.path.append(os.path.dirname(os.path.dirname(os.getcwd())))
from unimodals.MVAE import TSEncoder, TSDecoder # noqa
from utils.helper_modules import Sequential2 # noqa
from objective_functions.objectives_for_supervised_learning import MFM_objective # noqa
from torch import nn # noqa
from unimodals.common_models import MLP # noqa
from training_structures.Supervised_Learning import train, test # noqa
from datasets.affect.get_data import get_dataloader # noqa
from fusions.common_fusions import Concat # noqa
classes = 2
n_latent = 256
dim_0 = 35
dim_1 = 74
dim_2 = 300
timestep = 50
# mosi_data.pkl, mosei_senti_data.pkl
# mosi_raw.pkl, mosei_raw.pkl, sarcasm.pkl, humor.pkl
# raw_path: mosi.hdf5, mosei.hdf5, sarcasm_raw_text.pkl, humor_raw_text.pkl
traindata, validdata, test_robust = get_dataloader(
'/home/paul/MultiBench/mosi_raw.pkl', task='classification', robust_test=False, max_pad=True, max_seq_len=timestep)
encoders = [TSEncoder(dim_0, 30, n_latent, timestep, returnvar=False).cuda(), TSEncoder(
dim_1, 30, n_latent, timestep, returnvar=False).cuda(), TSEncoder(dim_2, 30, n_latent, timestep, returnvar=False).cuda()]
decoders = [TSDecoder(dim_0, 30, n_latent, timestep).cuda(), TSDecoder(
dim_1, 30, n_latent, timestep).cuda(), TSDecoder(dim_2, 30, n_latent, timestep).cuda()]
fuse = Sequential2(Concat(), MLP(3*n_latent, n_latent, n_latent//2)).cuda()
intermediates = [MLP(n_latent, n_latent//2, n_latent//2).cuda(), MLP(n_latent,
n_latent//2, n_latent//2).cuda(), MLP(n_latent, n_latent//2, n_latent//2).cuda()]
head = MLP(n_latent//2, 20, classes).cuda()
argsdict = {'decoders': decoders, 'intermediates': intermediates}
additional_modules = decoders+intermediates
objective = MFM_objective(2.0, [torch.nn.MSELoss(
), torch.nn.MSELoss(), torch.nn.MSELoss()], [1.0, 1.0, 1.0])
train(encoders, fuse, head, traindata, validdata, 200, additional_modules,
objective=objective, objective_args_dict=argsdict, save='mosi_mfm_best.pt')
print("Testing:")
model = torch.load('mosi_mfm_best.pt').cuda()
test(model=model, test_dataloaders_all=test_robust,
dataset='mosi', is_packed=False, no_robust=True)
|
clinicadl/tests/test_classify.py | yogeshmj/AD-DL | 112 | 12780391 | <gh_stars>100-1000
# coding: utf8
import pytest
import os
from os.path import join, exists
@pytest.fixture(params=[
'classify_image',
'classify_slice',
'classify_patch'
])
def classify_commands(request):
out_filename = 'fold-0/cnn_classification/best_balanced_accuracy/DB-TEST_image_level_prediction.tsv'
if request.param == 'classify_image':
data_folder = 'data/models/image_model_baseline_AD_CN_single_fold/'
test_input = [
'classify',
'data/classify/OASIS_test',
'data/classify/OASIS_test/data.tsv',
data_folder,
'--prefix_output', 'DB-TEST',
'-cpu'
]
output_files = join(data_folder, out_filename)
elif request.param == 'classify_slice':
data_folder = 'data/models/slice_model_baseline_AD_CN_single_fold/'
test_input = [
'classify',
'data/classify/OASIS_test',
'data/classify/OASIS_test/data.tsv',
data_folder,
'--prefix_output', 'DB-TEST',
'-cpu'
]
output_files = join(data_folder, out_filename)
elif request.param == 'classify_patch':
data_folder = 'data/models/patch_model_baseline_AD_CN_multicnn_single_fold/'
test_input = [
'classify',
'data/classify/OASIS_test',
'data/classify/OASIS_test/data.tsv',
data_folder,
'--prefix_output', 'DB-TEST',
'-cpu'
]
output_files = join(data_folder, out_filename)
else:
raise NotImplementedError(
"Test %s is not implemented." %
request.param)
return test_input, output_files
def test_classify(classify_commands):
test_input = classify_commands[0]
output_files = classify_commands[1]
flag_error = not os.system("clinicadl " + " ".join(test_input))
assert flag_error
assert exists(output_files)
|
configs/universenet/ablation/universenet50_2008_nosepc_fp16_4x4_mstrain_480_960_1x_coco.py | Jack-Hu-2001/UniverseNet | 314 | 12780397 | <gh_stars>100-1000
_base_ = [
'../../universenet/models/universenet50_2008.py',
'../../_base_/datasets/coco_detection_mstrain_480_960.py',
'../../_base_/schedules/schedule_1x.py', '../../_base_/default_runtime.py'
]
model = dict(
neck=dict(
_delete_=True,
type='FPN',
in_channels=[256, 512, 1024, 2048],
out_channels=256,
start_level=1,
add_extra_convs='on_output',
num_outs=5),
bbox_head=dict(type='GFLHead', stacked_convs=4))
data = dict(samples_per_gpu=4)
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(
_delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
lr_config = dict(warmup_iters=1000)
fp16 = dict(loss_scale=512.)
|
tools/train_w2v.py | sketscripter/emotional-chatbot-cakechat | 1,608 | 12780402 | <reponame>sketscripter/emotional-chatbot-cakechat
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from cakechat.utils.text_processing import get_processed_corpus_path, load_processed_dialogs_from_json, \
FileTextLinesIterator, get_dialog_lines_and_conditions, ProcessedLinesIterator, get_flatten_dialogs
from cakechat.utils.w2v.model import _get_w2v_model as get_w2v_model
from cakechat.config import TRAIN_CORPUS_NAME, VOCABULARY_MAX_SIZE, WORD_EMBEDDING_DIMENSION, W2V_WINDOW_SIZE, \
USE_SKIP_GRAM
if __name__ == '__main__':
processed_corpus_path = get_processed_corpus_path(TRAIN_CORPUS_NAME)
dialogs = load_processed_dialogs_from_json(
FileTextLinesIterator(processed_corpus_path), text_field_name='text', condition_field_name='condition')
training_dialogs_lines_for_w2v, _ = get_dialog_lines_and_conditions(
get_flatten_dialogs(dialogs), text_field_name='text', condition_field_name='condition')
tokenized_training_lines = ProcessedLinesIterator(training_dialogs_lines_for_w2v, processing_callbacks=[str.split])
get_w2v_model(
tokenized_lines=tokenized_training_lines,
corpus_name=TRAIN_CORPUS_NAME,
voc_size=VOCABULARY_MAX_SIZE,
vec_size=WORD_EMBEDDING_DIMENSION,
window_size=W2V_WINDOW_SIZE,
skip_gram=USE_SKIP_GRAM)
|
gubernator/pb_glance.py | Noahhoetger2001/test-infra | 3,390 | 12780412 | <gh_stars>1000+
#!/usr/bin/env python
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A tiny, minimal protobuf2 parser that's able to extract enough information
to be useful.
"""
import cStringIO as StringIO
def parse_protobuf(data, schema=None):
"""
Do a simple parse of a protobuf2 given minimal type information.
Args:
data: a string containing the encoded protocol buffer.
schema: a dict containing information about each field number.
The keys are field numbers, and the values represent:
- str: the name of the field
- dict: schema to recursively decode an embedded message.
May contain a 'name' key to name the field.
Returns:
dict: mapping from fields to values. The fields may be strings instead of
numbers if schema named them, and the value will *always* be
a list of values observed for that key.
"""
if schema is None:
schema = {}
buf = StringIO.StringIO(data)
def read_varint():
out = 0
shift = 0
c = 0x80
while c & 0x80:
c = ord(buf.read(1))
out = out | ((c & 0x7f) << shift)
shift += 7
return out
values = {}
while buf.tell() < len(data):
key = read_varint()
wire_type = key & 0b111
field_number = key >> 3
field_name = field_number
if wire_type == 0:
value = read_varint()
elif wire_type == 1: # 64-bit
value = buf.read(8)
elif wire_type == 2: # length-delim
length = read_varint()
value = buf.read(length)
if isinstance(schema.get(field_number), basestring):
field_name = schema[field_number]
elif field_number in schema:
# yes, I'm using dynamic features of a dynamic language.
# pylint: disable=redefined-variable-type
value = parse_protobuf(value, schema[field_number])
field_name = schema[field_number].get('name', field_name)
elif wire_type == 5: # 32-bit
value = buf.read(4)
else:
raise ValueError('unhandled wire type %d' % wire_type)
values.setdefault(field_name, []).append(value)
return values
|
exercises/en/exc_02_02_02.py | Jette16/spacy-course | 2,085 | 12780420 | <filename>exercises/en/exc_02_02_02.py
from spacy.lang.en import English
nlp = English()
doc = nlp("<NAME> is a PERSON")
# Look up the hash for the string label "PERSON"
person_hash = ____.____.____[____]
print(person_hash)
# Look up the person_hash to get the string
person_string = ____.____.____[____]
print(person_string)
|
utils/tool.py | Gagarinwjj/Coeus | 139 | 12780482 | <filename>utils/tool.py
# coding: utf-8
__author__ = 'deff'
import re
class Tools:
@staticmethod
def xml_assent(word):
symbola = re.compile('>')
word = symbola.sub('<', word)
symbolb = re.compile('<')
word = symbolb.sub('>', word)
symbolc = re.compile('&')
word = symbolc.sub('&', word)
symbold = re.compile('\'')
word = symbold.sub(''', word)
symbole = re.compile('\"')
word = symbole.sub('"', word)
return word
|
test/test_pulljson.py | Teradata/PyTd | 133 | 12780510 | <filename>test/test_pulljson.py
# The MIT License (MIT)
#
# Copyright (c) 2015 by Teradata
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from teradata import pulljson
import unittest
import sys
if sys.version_info[0] == 2:
from StringIO import StringIO # @UnresolvedImport #@UnusedImport
else:
from io import StringIO # @UnresolvedImport @UnusedImport @Reimport
class TestJSONPullParser (unittest.TestCase):
def testNextEvent(self):
stream = StringIO("""{"key1":"value", "key2":100, "key3":null,
"key4": true, "key5":false, "key6":-201.50E1, "key7":{"key8":"value2",
"key9":null}, "key10":["value3", 10101010101010101010101, null,
{} ] }""")
reader = pulljson.JSONPullParser(stream)
# Start of object
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.START_OBJECT)
# Key1 - "value"
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.FIELD_NAME)
self.assertEqual(event.value, "key1")
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.FIELD_VALUE)
self.assertEqual(event.value, "value")
self.assertEqual(event.valueType, pulljson.STRING)
# Key2 - 100
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.FIELD_NAME)
self.assertEqual(event.value, "key2")
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.FIELD_VALUE)
self.assertEqual(event.value, 100)
self.assertEqual(event.valueType, pulljson.NUMBER)
# Key3 - null
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.FIELD_NAME)
self.assertEqual(event.value, "key3")
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.FIELD_VALUE)
self.assertIsNone(event.value)
self.assertEqual(event.valueType, pulljson.NULL)
# Key4 - true
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.FIELD_NAME)
self.assertEqual(event.value, "key4")
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.FIELD_VALUE)
self.assertTrue(event.value)
self.assertEqual(event.valueType, pulljson.BOOLEAN)
# Key5 - false
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.FIELD_NAME)
self.assertEqual(event.value, "key5")
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.FIELD_VALUE)
self.assertFalse(event.value)
self.assertEqual(event.valueType, pulljson.BOOLEAN)
# Key6
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.FIELD_NAME)
self.assertEqual(event.value, "key6")
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.FIELD_VALUE)
self.assertEqual(event.value, -2015)
self.assertEqual(event.valueType, pulljson.NUMBER)
# Key7
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.FIELD_NAME)
self.assertEqual(event.value, "key7")
# Start of key7 object
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.START_OBJECT)
# Key8 - value2
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.FIELD_NAME)
self.assertEqual(event.value, "key8")
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.FIELD_VALUE)
self.assertEqual(event.value, "value2")
self.assertEqual(event.valueType, pulljson.STRING)
# Key9 - null
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.FIELD_NAME)
self.assertEqual(event.value, "key9")
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.FIELD_VALUE)
self.assertIsNone(event.value)
# End of key7 object
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.END_OBJECT)
# Key10 - array[0] - value3
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.FIELD_NAME)
self.assertEqual(event.value, "key10")
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.START_ARRAY)
# Key10 - array[0] - value3
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.ARRAY_VALUE)
self.assertEqual(event.value, "value3")
self.assertEqual(event.valueType, pulljson.STRING)
self.assertEqual(event.arrayIndex, 0)
# Key10 - array[1] - 10101010101010101010101
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.ARRAY_VALUE)
self.assertEqual(event.value, 10101010101010101010101)
self.assertEqual(event.valueType, pulljson.NUMBER)
self.assertEqual(event.arrayIndex, 1)
# Key10 - array[2] - null
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.ARRAY_VALUE)
self.assertIsNone(event.value)
self.assertEqual(event.valueType, pulljson.NULL)
self.assertEqual(event.arrayIndex, 2)
# Key10 - array[3] - object
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.START_OBJECT)
self.assertEqual(event.arrayIndex, 3)
# Key10 - array[3] - object
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.END_OBJECT)
self.assertEqual(event.arrayIndex, 3)
# End of key 10 array.
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.END_ARRAY)
# End of object
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.END_OBJECT)
event = reader.nextEvent()
self.assertIsNone(event)
def testDocumentIncomplete(self):
stream = StringIO('{"key":"value"')
reader = pulljson.JSONPullParser(stream)
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.START_OBJECT)
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.FIELD_NAME)
self.assertEqual(event.value, "key")
with self.assertRaises(pulljson.JSONParseError) as cm:
event = reader.nextEvent()
self.assertEqual(
cm.exception.code, pulljson.JSON_INCOMPLETE_ERROR,
cm.exception.msg)
def testEmptyName(self):
stream = StringIO('{:"value"}')
reader = pulljson.JSONPullParser(stream)
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.START_OBJECT)
with self.assertRaises(pulljson.JSONParseError) as cm:
event = reader.nextEvent()
self.assertEqual(
cm.exception.code, pulljson.JSON_SYNTAX_ERROR, cm.exception.msg)
def testExtraWhiteSpace(self):
stream = StringIO('{\n\t "key"\n\t\t: "\t value\n"} ')
reader = pulljson.JSONPullParser(stream)
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.START_OBJECT)
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.FIELD_NAME)
self.assertEqual(event.value, "key")
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.FIELD_VALUE)
self.assertEqual(event.value, "\t value\n")
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.END_OBJECT)
event = reader.nextEvent()
self.assertIsNone(event)
def testEscapeCharacter(self):
stream = StringIO('{"\\"ke\\"y\\\\" : "va\\"l\\"ue"} ')
reader = pulljson.JSONPullParser(stream)
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.START_OBJECT)
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.FIELD_NAME)
self.assertEqual(event.value, '"ke"y\\')
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.FIELD_VALUE)
self.assertEqual(event.value, 'va"l"ue')
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.END_OBJECT)
event = reader.nextEvent()
self.assertIsNone(event)
def testEmptyArray(self):
stream = StringIO('[]')
reader = pulljson.JSONPullParser(stream)
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.START_ARRAY)
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.END_ARRAY)
event = reader.nextEvent()
self.assertIsNone(event)
def testMissingColon(self):
stream = StringIO('{"key" "value"}')
reader = pulljson.JSONPullParser(stream)
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.START_OBJECT)
with self.assertRaises(pulljson.JSONParseError) as cm:
event = reader.nextEvent()
self.assertEqual(
cm.exception.code, pulljson.JSON_SYNTAX_ERROR, cm.exception.msg)
def testCommaInsteadOfColon(self):
stream = StringIO('{"key","value"}')
reader = pulljson.JSONPullParser(stream)
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.START_OBJECT)
with self.assertRaises(pulljson.JSONParseError) as cm:
event = reader.nextEvent()
self.assertEqual(
cm.exception.code, pulljson.JSON_SYNTAX_ERROR, cm.exception.msg)
def testColonInsteadOfComma(self):
stream = StringIO('["key":"value"]')
reader = pulljson.JSONPullParser(stream)
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.START_ARRAY)
with self.assertRaises(pulljson.JSONParseError) as cm:
event = reader.nextEvent()
self.assertEqual(
cm.exception.code, pulljson.JSON_SYNTAX_ERROR, cm.exception.msg)
def testNumberLiteral(self):
stream = StringIO('1')
reader = pulljson.JSONPullParser(stream)
with self.assertRaises(pulljson.JSONParseError) as cm:
reader.nextEvent()
self.assertEqual(
cm.exception.code, pulljson.JSON_SYNTAX_ERROR, cm.exception.msg)
def testStringLiteral(self):
stream = StringIO('"This is a test"')
reader = pulljson.JSONPullParser(stream)
with self.assertRaises(pulljson.JSONParseError) as cm:
reader.nextEvent()
self.assertEqual(
cm.exception.code, pulljson.JSON_SYNTAX_ERROR, cm.exception.msg)
def testObjectMissingValue(self):
stream = StringIO('{"key":}')
reader = pulljson.JSONPullParser(stream)
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.START_OBJECT)
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.FIELD_NAME)
with self.assertRaises(pulljson.JSONParseError) as cm:
event = reader.nextEvent()
self.assertEqual(
cm.exception.code, pulljson.JSON_SYNTAX_ERROR, cm.exception.msg)
def testArrayMissingValue(self):
stream = StringIO('[1, ,2}')
reader = pulljson.JSONPullParser(stream)
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.START_ARRAY)
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.ARRAY_VALUE)
with self.assertRaises(pulljson.JSONParseError) as cm:
event = reader.nextEvent()
self.assertEqual(
cm.exception.code, pulljson.JSON_SYNTAX_ERROR, cm.exception.msg)
def testArrayInObject(self):
stream = StringIO('{[]}')
reader = pulljson.JSONPullParser(stream)
event = reader.nextEvent()
self.assertEqual(event.type, pulljson.START_OBJECT)
with self.assertRaises(pulljson.JSONParseError) as cm:
event = reader.nextEvent()
self.assertEqual(
cm.exception.code, pulljson.JSON_SYNTAX_ERROR, cm.exception.msg)
def testReadObject(self):
stream = StringIO(
'{"key1":[0,1,2,3,4,{"value":"5"}], "key2":\
{"key1":[0,1,2,3,4,{"value":"5"}]}}')
reader = pulljson.JSONPullParser(stream)
obj = reader.readObject()
self.assertEqual(len(obj), 2)
for i in range(0, 2):
self.assertEqual(len(obj["key1"]), 6)
for i in range(0, 5):
self.assertEqual(obj["key1"][i], i)
self.assertEqual(obj["key1"][5]["value"], "5")
if i == 1:
obj = obj["key2"]
self.assertEqual(len(obj), 1)
def testReadArray(self):
stream = StringIO('[0,1,2,3,4,[0,1,2,3,4,[0,1,2,3,4]],[0,1,2,3,4]]')
reader = pulljson.JSONPullParser(stream)
arr = reader.readArray()
self.assertEqual(len(arr), 7)
for i in range(0, 5):
self.assertEqual(arr[i], i)
for i in range(0, 5):
self.assertEqual(arr[5][i], i)
for i in range(0, 5):
self.assertEqual(arr[5][5][i], i)
for i in range(0, 5):
self.assertEqual(arr[6][i], i)
def testArraySyntaxError(self):
stream = StringIO('[[0,1][0,1]]')
reader = pulljson.JSONPullParser(stream)
with self.assertRaises(pulljson.JSONParseError) as cm:
reader.readArray()
self.assertEqual(
cm.exception.code, pulljson.JSON_SYNTAX_ERROR, cm.exception.msg)
def testIterateArray(self):
stream = StringIO(
'[{"key0}":["}\\"","\\"}","}"]}, {"key1}":["}","\\"}","}"]}, '
'{"key2}":["}","}","\\"}"]}]')
reader = pulljson.JSONPullParser(stream)
i = 0
for x in reader.expectArray():
self.assertEqual(len(x["key" + str(i) + "}"]), 3)
i += 1
if __name__ == '__main__':
unittest.main()
|
src/blockdiag/imagedraw/__init__.py | flying-foozy/blockdiag | 155 | 12780514 | <filename>src/blockdiag/imagedraw/__init__.py<gh_stars>100-1000
# -*- coding: utf-8 -*-
# Copyright 2011 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pkg_resources
from blockdiag.utils.logging import warning
drawers = {}
def init_imagedrawers(debug=False):
for drawer in pkg_resources.iter_entry_points('blockdiag_imagedrawers'):
try:
module = drawer.load()
if hasattr(module, 'setup'):
module.setup(module)
except Exception as exc:
if debug:
warning('Failed to load %s: %r' % (drawer.module_name, exc))
def install_imagedrawer(ext, drawer):
drawers[ext] = drawer
def create(_format, filename, **kwargs):
if len(drawers) == 0:
init_imagedrawers(debug=kwargs.get('debug'))
_format = _format.lower()
if _format in drawers:
drawer = drawers[_format](filename, **kwargs)
else:
msg = 'failed to load %s image driver' % _format
raise RuntimeError(msg)
if 'linejump' in kwargs.get('filters', []):
from blockdiag.imagedraw.filters.linejump import LineJumpDrawFilter
jumpsize = kwargs.get('jumpsize', 0)
drawer = LineJumpDrawFilter(drawer, jumpsize)
return drawer
|
myia/abstract/__init__.py | strint/myia | 222 | 12780538 | <reponame>strint/myia
"""Abstract data and type/shape inference."""
from .aliasing import *
from .amerge import *
from .data import *
from .infer import *
from .loop import *
from .macro import *
from .ref import *
from .to_abstract import *
from .utils import *
|
Chapter09/c9_52_impact_of_correlation_on_efficient_frontier_notWorking.py | John-ye666/Python-for-Finance-Second-Edition | 236 | 12780545 | <filename>Chapter09/c9_52_impact_of_correlation_on_efficient_frontier_notWorking.py
"""
Name : c9_52_impacto_of_correlation_on_efficient_frontier.py
Book : Python for Finance (2nd ed.)
Publisher: Packt Publishing Ltd.
Author : <NAME>
Date : 6/6/2017
email : <EMAIL>
<EMAIL>
"""
from matplotlib.finance import quotes_historical_yahoo_ochl as getData
import matplotlib.pyplot as plt
import numpy as np, pandas as pd, scipy as sp
from numpy.linalg import inv, pinv
begYear,endYear = 2012,2016
stocks=['IBM','WMT']
def ret_monthly(ticker): # function 1
x = getData(ticker,(begYear,1,1),(endYear,12,31),asobject=True,adjusted=True)
logret=np.log(x.aclose[1:]/x.aclose[:-1])
date=[]
d0=x.date
for i in range(0,np.size(logret)):
date.append(''.join([d0[i].strftime("%Y"),d0[i].strftime("%m")]))
y=pd.DataFrame(logret,date,columns=[ticker])
return y.groupby(y.index).sum()
def std_f(ticker):
x=ret_monthly(ticker)
return sp.std(x)
def objFunction(W, R, target_ret):
stock_mean=np.mean(R,axis=0)
port_mean=np.dot(W,stock_mean) # portfolio mean
#cov=np.cov(R.T) # var-cov matrix
cov=cov0
port_var=np.dot(np.dot(W,cov),W.T) # portfolio variance
penalty = 2000*abs(port_mean-target_ret)# penalty 4 deviation
return np.sqrt(port_var) + penalty # objective function
R0=ret_monthly(stocks[0]) # starting from 1st stock
n_stock=len(stocks) # number of stocks
std1=std_f(stocks[0])
std2=std_f(stocks[1])
for jj in sp.arange(1):
k=0.1*std1*std2
#cov0=sp.array([[0.00266285,0.00037303],[0.00037303,0.0021296]])
#cov0=sp.array([[std1**2,k],[k,std2**2]])
cov0=sp.array([[std1**2,0.00037303],[0.00037303,std2**2]])
for i in xrange(1,n_stock): # merge with other stocks
x=ret_monthly(stocks[i])
R0=pd.merge(R0,x,left_index=True,right_index=True)
R=np.array(R0)
out_mean,out_std,out_weight=[],[],[]
stockMean=np.mean(R,axis=0)
for r in np.linspace(np.min(stockMean),np.max(stockMean),num=100):
W = np.ones([n_stock])/n_stock # starting from equal weights
b_ = [(0,1)
for i in range(n_stock)] # bounds, here no short
c_ = ({'type':'eq', 'fun': lambda W: sum(W)-1. })#constraint
result=sp.optimize.minimize(objFunction,W,(R,r),method='SLSQP',constraints=c_, bounds=b_)
if not result.success: # handle error raise
BaseException(result.message)
out_mean.append(round(r,4)) # 4 decimal places
std_=round(np.std(np.sum(R*result.x,axis=1)),6)
out_std.append(std_)
out_weight.append(result.x)
plt.title('Efficient Frontier')
plt.xlabel('Standard Deviation of the porfolio (Risk))')
plt.ylabel('Return of the portfolio')
plt.figtext(0.5,0.75,str(n_stock)+' stock are used: ')
plt.figtext(0.5,0.7,' '+str(stocks))
plt.figtext(0.5,0.65,'Time period: '+str(begYear)+' ------ '+str(endYear))
plt.plot(out_std,out_mean,'--')
plt.show()
|
tests/core/test_oauth2/test_rfc7591.py | YPCrumble/authlib | 3,172 | 12780555 | from unittest import TestCase
from authlib.oauth2.rfc7591 import ClientMetadataClaims
from authlib.jose.errors import InvalidClaimError
class ClientMetadataClaimsTest(TestCase):
def test_validate_redirect_uris(self):
claims = ClientMetadataClaims({'redirect_uris': ['foo']}, {})
self.assertRaises(InvalidClaimError, claims.validate)
def test_validate_client_uri(self):
claims = ClientMetadataClaims({'client_uri': 'foo'}, {})
self.assertRaises(InvalidClaimError, claims.validate)
def test_validate_logo_uri(self):
claims = ClientMetadataClaims({'logo_uri': 'foo'}, {})
self.assertRaises(InvalidClaimError, claims.validate)
def test_validate_tos_uri(self):
claims = ClientMetadataClaims({'tos_uri': 'foo'}, {})
self.assertRaises(InvalidClaimError, claims.validate)
def test_validate_policy_uri(self):
claims = ClientMetadataClaims({'policy_uri': 'foo'}, {})
self.assertRaises(InvalidClaimError, claims.validate)
def test_validate_jwks_uri(self):
claims = ClientMetadataClaims({'jwks_uri': 'foo'}, {})
self.assertRaises(InvalidClaimError, claims.validate)
|
backprop/models/st_model/model.py | lucky7323/backprop | 200 | 12780625 | from typing import Dict, List
import torch
from functools import partial
from backprop.models import PathModel
from torch.optim.adamw import AdamW
from sentence_transformers import SentenceTransformer
class STModel(PathModel):
"""
Class for models which are initialised from Sentence Transformers
Attributes:
model_path: path to ST model
name: string identifier for the model. Lowercase letters and numbers.
No spaces/special characters except dashes.
max_length: Max supported token length for vectorisation
description: String description of the model.
tasks: List of supported task strings
details: Dictionary of additional details about the model
init_model: Class used to initialise model
device: Device for model. Defaults to "cuda" if available.
"""
def __init__(self, model_path, init_model=SentenceTransformer, name: str = None,
description: str = None, tasks: List[str] = None, details: Dict = None,
max_length=512, device=None):
init_model = partial(init_model, device=device)
tasks = ["text-vectorisation"]
PathModel.__init__(self, model_path, name=name, description=description,
details=details, tasks=tasks,
init_model=init_model,
device=device)
self.max_length = max_length
@staticmethod
def list_models():
from .models_list import models
return models
@torch.no_grad()
def __call__(self, task_input, task="text-vectorisation", return_tensor=False):
"""
Uses the model for the text-vectorisation task
Args:
task_input: input dictionary according to the ``text-vectorisation`` task specification
task: text-vectorisation
"""
is_list = False
if task == "text-vectorisation":
input_ids = None
attention_mask = None
text = task_input.get("text")
if type(text) == list:
is_list = True
else:
text = [text]
features = self.model.tokenizer(text, truncation=True, padding=True, return_tensors="pt").to(self._model_device)
text_vecs = self.vectorise(features)
if not return_tensor:
text_vecs = text_vecs.tolist()
output = text_vecs
if not is_list:
output = output[0]
return output
else:
raise ValueError(f"Unsupported task '{task}'")
def training_step(self, params, task="text-vectorisation"):
text = params["text"]
return self.vectorise(text)
def process_batch(self, params, task="text-vectorisation"):
if task == "text-vectorisation":
max_length = params["max_length"] or self.max_length
if max_length > self.max_length:
raise ValueError(f"This model has a max_length limit of {self.max_length}")
text = params["text"]
return self.model.tokenizer(text, truncation=True, padding="max_length", return_tensors="pt")
def vectorise(self, features):
return self.model.forward(features)["sentence_embedding"]
def configure_optimizers(self):
return AdamW(params=self.model.parameters(), lr=2e-5, eps=1e-6, correct_bias=False) |
table_border_syntax.py | akrabat/SublimeTableEditor | 313 | 12780627 | <gh_stars>100-1000
# table_border_syntax.py - Base classes for table with borders: Pandoc,
# Emacs Org mode, Simple, reStrucutredText
# Copyright (C) 2012 Free Software Foundation, Inc.
# Author: <NAME>
# Package: SublimeTableEditor
# Homepage: https://github.com/vkocubinsky/SublimeTableEditor
# This file is part of SublimeTableEditor.
# SublimeTableEditor is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# SublimeTableEditor is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with SublimeTableEditor. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
from __future__ import division
import re
try:
from . import table_base as tbase
except ValueError:
import table_base as tbase
class SeparatorRow(tbase.Row):
def __init__(self, table, separator='-', size=0):
tbase.Row.__init__(self, table)
self.separator = separator
for i in range(size):
self.columns.append(SeparatorColumn(self, self.separator))
def new_empty_column(self):
return SeparatorColumn(self, self.separator)
def create_column(self, text):
return SeparatorColumn(self, self.separator)
def is_header_separator(self):
return True
def is_separator(self):
return True
def render(self):
r = self.syntax.hline_out_border
for ind, column in enumerate(self.columns):
if ind != 0:
r += self.syntax.hline_in_border
r += column.render()
r += self.syntax.hline_out_border
return r
class SeparatorColumn(tbase.Column):
def __init__(self, row, separator):
tbase.Column.__init__(self, row)
self.separator = separator
def min_len(self):
# '---' or '==='
return 3
def render(self):
return self.separator * self.col_len
class BorderTableDriver(tbase.TableDriver):
def editor_insert_single_hline(self, table, table_pos):
table.rows.insert(table_pos.row_num + 1, SeparatorRow(table, '-'))
table.pack()
return ("Single separator row inserted",
tbase.TablePos(table_pos.row_num, table_pos.field_num))
def editor_insert_double_hline(self, table, table_pos):
table.rows.insert(table_pos.row_num + 1, SeparatorRow(table, '='))
table.pack()
return ("Double separator row inserted",
tbase.TablePos(table_pos.row_num, table_pos.field_num))
def editor_insert_hline_and_move(self, table, table_pos):
table.rows.insert(table_pos.row_num + 1, SeparatorRow(table, '-'))
table.pack()
if table_pos.row_num + 2 < len(table):
if table[table_pos.row_num + 2].is_separator():
table.insert_empty_row(table_pos.row_num + 2)
else:
table.insert_empty_row(table_pos.row_num + 2)
return("Single separator row inserted",
tbase.TablePos(table_pos.row_num + 2, 0))
class BorderTableParser(tbase.BaseTableParser):
def _is_single_row_separator(self, str_cols):
if len(str_cols) == 0:
return False
for col in str_cols:
if not re.match(r"^\s*[\-]+\s*$", col):
return False
return True
def _is_double_row_separator(self, str_cols):
if len(str_cols) == 0:
return False
for col in str_cols:
if not re.match(r"^\s*[\=]+\s*$", col):
return False
return True
def create_row(self, table, line):
if self._is_single_row_separator(line.str_cols()):
row = SeparatorRow(table, '-')
elif self._is_double_row_separator(line.str_cols()):
row = SeparatorRow(table, '=')
else:
row = self.create_data_row(table, line)
return row
def create_data_row(self, table, line):
return tbase.DataRow(table)
|
runtime/python/Lib/asyncio/format_helpers.py | hwaipy/InteractionFreeNode | 207 | 12780646 | <filename>runtime/python/Lib/asyncio/format_helpers.py<gh_stars>100-1000
import functools
import inspect
import reprlib
import sys
import traceback
from . import constants
def _get_function_source(func):
func = inspect.unwrap(func)
if inspect.isfunction(func):
code = func.__code__
return (code.co_filename, code.co_firstlineno)
if isinstance(func, functools.partial):
return _get_function_source(func.func)
if isinstance(func, functools.partialmethod):
return _get_function_source(func.func)
return None
def _format_callback_source(func, args):
func_repr = _format_callback(func, args, None)
source = _get_function_source(func)
if source:
func_repr += f' at {source[0]}:{source[1]}'
return func_repr
def _format_args_and_kwargs(args, kwargs):
"""Format function arguments and keyword arguments.
Special case for a single parameter: ('hello',) is formatted as ('hello').
"""
# use reprlib to limit the length of the output
items = []
if args:
items.extend(reprlib.repr(arg) for arg in args)
if kwargs:
items.extend(f'{k}={reprlib.repr(v)}' for k, v in kwargs.items())
return '({})'.format(', '.join(items))
def _format_callback(func, args, kwargs, suffix=''):
if isinstance(func, functools.partial):
suffix = _format_args_and_kwargs(args, kwargs) + suffix
return _format_callback(func.func, func.args, func.keywords, suffix)
if hasattr(func, '__qualname__') and func.__qualname__:
func_repr = func.__qualname__
elif hasattr(func, '__name__') and func.__name__:
func_repr = func.__name__
else:
func_repr = repr(func)
func_repr += _format_args_and_kwargs(args, kwargs)
if suffix:
func_repr += suffix
return func_repr
def extract_stack(f=None, limit=None):
"""Replacement for traceback.extract_stack() that only does the
necessary work for asyncio debug mode.
"""
if f is None:
f = sys._getframe().f_back
if limit is None:
# Limit the amount of work to a reasonable amount, as extract_stack()
# can be called for each coroutine and future in debug mode.
limit = constants.DEBUG_STACK_DEPTH
stack = traceback.StackSummary.extract(traceback.walk_stack(f),
limit=limit,
lookup_lines=False)
stack.reverse()
return stack
|
bazel/aprutil.bzl | arunvc/incubator-pagespeed-mod | 535 | 12780647 | aprutil_build_rule = """
cc_library(
name = "aprutil",
srcs = [
"@mod_pagespeed//third_party/aprutil:aprutil_pagespeed_memcache_c",
'buckets/apr_brigade.c',
'buckets/apr_buckets.c',
'buckets/apr_buckets_alloc.c',
'buckets/apr_buckets_eos.c',
'buckets/apr_buckets_file.c',
'buckets/apr_buckets_flush.c',
'buckets/apr_buckets_heap.c',
'buckets/apr_buckets_mmap.c',
'buckets/apr_buckets_pipe.c',
'buckets/apr_buckets_pool.c',
'buckets/apr_buckets_refcount.c',
'buckets/apr_buckets_simple.c',
'buckets/apr_buckets_socket.c',
'crypto/apr_md5.c',
'crypto/getuuid.c',
'crypto/uuid.c',
#'dbm/apr_dbm.c',
#'dbm/apr_dbm_sdbm.c',
#'dbm/sdbm/sdbm.c',
#'dbm/sdbm/sdbm_hash.c',
#'dbm/sdbm/sdbm_lock.c',
#'dbm/sdbm/sdbm_pair.c',
'encoding/apr_base64.c',
'hooks/apr_hooks.c',
#'ldap/apr_ldap_stub.c',
#'ldap/apr_ldap_url.c',
'memcache/apr_memcache.c',
'misc/apr_date.c',
'misc/apr_queue.c',
'misc/apr_reslist.c',
'misc/apr_rmm.c',
'misc/apr_thread_pool.c',
'misc/apu_dso.c',
'misc/apu_version.c',
'strmatch/apr_strmatch.c',
'uri/apr_uri.c',
'xlate/xlate.c',
],
hdrs = [
"@mod_pagespeed//third_party/aprutil:aprutil_pagespeed",
"crypto/crypt_blowfish.h",
#"test/test_apu.h",
#"test/abts_tests.h",
#"test/testutil.h",
#"test/abts.h",
"dbm/sdbm/sdbm_private.h",
"dbm/sdbm/sdbm_pair.h",
"dbm/sdbm/sdbm_tune.h",
"include/apr_siphash.h",
"include/apr_dbm.h",
"include/apr_xlate.h",
"include/apr_ldap_url.h",
"include/apu_version.h",
"include/apr_redis.h",
"include/private/apr_dbd_odbc_v2.h",
"include/private/apr_dbm_private.h",
"include/private/apu_internal.h",
"include/private/apr_dbd_internal.h",
"include/private/apr_crypto_internal.h",
"include/apr_md5.h",
"include/apu_errno.h",
"include/apr_xml.h",
"include/apr_sdbm.h",
"include/apr_md4.h",
"include/apr_hooks.h",
"include/apr_date.h",
"include/apr_reslist.h",
"include/apr_memcache.h",
"include/apr_uuid.h",
"include/apr_base64.h",
"include/apr_sha1.h",
"include/apr_uri.h",
"include/apr_queue.h",
"include/apr_ldap_option.h",
"include/apr_optional.h",
"include/apr_dbd.h",
"include/apr_anylock.h",
"include/apr_strmatch.h",
"include/apr_optional_hooks.h",
"include/apr_thread_pool.h",
"include/apr_buckets.h",
"include/apr_rmm.h",
"include/apr_ldap_rebind.h",
"include/apr_ldap_init.h",
"include/apr_crypto.h",
],
copts = [
"-Ithird_party/aprutil/gen/arch/linux/x64/include/",
"-Ithird_party/aprutil/gen/arch/linux/x64/include/private",
"-Iexternal/aprutil/include/",
"-Iexternal/aprutil/include/private/",
"-Iexternal/aprutil/include/arch/unix/",
"-Iexternal/aprutil/",
"-Iexternal/apr/include/",
"-Iexternal/apr/include/arch/unix/",
"-Ithird_party/apr/gen/arch/linux/x64/include/",
],
deps = [
"@apr//:apr",
],
visibility = ["//visibility:public"],
)
"""
# find | grep .h$ | while read line; do echo "\"$line\","; done |
backpack/extensions/firstorder/batch_grad/batch_grad_base.py | jabader97/backpack | 395 | 12780666 | """Calculates the batch_grad derivative."""
from __future__ import annotations
from typing import TYPE_CHECKING, Callable, List, Tuple
from torch import Tensor
from torch.nn import Module
from backpack.core.derivatives.basederivatives import BaseParameterDerivatives
from backpack.extensions.firstorder.base import FirstOrderModuleExtension
from backpack.utils.subsampling import subsample
if TYPE_CHECKING:
from backpack.extensions.firstorder import BatchGrad
class BatchGradBase(FirstOrderModuleExtension):
"""Calculates the batch_grad derivative.
Passes the calls for the parameters to the derivatives class.
Implements functions with method names from params.
If child class wants to overwrite these methods
- for example to support an additional external module -
it can do so using the interface for parameter "param1"::
param1(ext, module, g_inp, g_out, bpQuantities):
return batch_grads
In this case, the method is not overwritten by this class.
"""
def __init__(
self, derivatives: BaseParameterDerivatives, params: List[str]
) -> None:
"""Initializes all methods.
If the param method has already been defined, it is left unchanged.
Args:
derivatives: Derivatives object used to apply parameter Jacobians.
params: List of parameter names.
"""
self._derivatives = derivatives
for param_str in params:
if not hasattr(self, param_str):
setattr(self, param_str, self._make_param_function(param_str))
super().__init__(params=params)
def _make_param_function(
self, param_str: str
) -> Callable[[BatchGrad, Module, Tuple[Tensor], Tuple[Tensor], None], Tensor]:
"""Creates a function that calculates batch_grad w.r.t. param.
Args:
param_str: Parameter name.
Returns:
Function that calculates batch_grad wrt param
"""
def param_function(
ext: BatchGrad,
module: Module,
g_inp: Tuple[Tensor],
g_out: Tuple[Tensor],
bpQuantities: None,
) -> Tensor:
"""Calculates batch_grad with the help of derivatives object.
Args:
ext: extension that is used
module: module that performed forward pass
g_inp: input gradient tensors
g_out: output gradient tensors
bpQuantities: additional quantities for second order
Returns:
Scaled individual gradients
"""
subsampling = ext.get_subsampling()
batch_axis = 0
return self._derivatives.param_mjp(
param_str,
module,
g_inp,
g_out,
subsample(g_out[0], dim=batch_axis, subsampling=subsampling),
sum_batch=False,
subsampling=subsampling,
)
return param_function
|
chinese-poem/probability.py | MashiMaroLjc/ML-and-DM-in-action | 370 | 12780669 | # coding:utf-8
#
def two(words):
"""
:param words:
:return:
"""
new = []
s = len(words)
for index in range(s):
w = words[index]
for next_index in range(index + 1, s):
next_w = words[next_index]
new.append(frozenset([w, next_w]))
return new
poemfile = open("five_poem.txt").readlines()
feature = []
n = 1
length = len(poemfile)
for poemline in poemfile:
print("finish:%.5f" % (n / length))
poemline = poemline.strip().replace("\n", "")
sentences = poemline.split(".")
temp = []
for sen in sentences:
if len(sen) != 5:
continue
temp.append(sen[:2])
feature.append(temp)
n += 1
size = len(feature)
word_fre = dict()
for fea in feature:
for word in set(fea):
word_fre[word] = word_fre.get(word, 0) + 1 / size
two_fre = dict()
two_feature = []
#
for fea in feature:
fea = list(set(fea))
two_feature.append(two(fea))
for fea in two_feature:
for word in fea:
two_fre[word] = two_fre.get(word, 0) + 1 / size
#
pro = dict()
for k, v in two_fre.items():
event = list(k)
#
key = event[0]
if key not in pro:
pro[key] = []
pro[key].append(
[event[1], two_fre[k] / word_fre[key]]
)
key = event[1]
if key not in pro:
pro[key] = []
pro[key].append(
[event[0], two_fre[k] / word_fre[key]]
)
#
import json
out = open("pro.json", "w")
json.dump(pro, out)
|
chapter4/pizza.py | sharad16j/Expert-Python-Programming-Third-Edition | 112 | 12780687 | <filename>chapter4/pizza.py
class Pizza:
def __init__(self, toppings):
self.toppings = toppings
def __repr__(self):
return "Pizza with " + " and ".join(self.toppings)
@classmethod
def recommend(cls):
"""Recommend some pizza with arbitrary toppings,"""
return cls(['spam', 'ham', 'eggs'])
class VikingPizza(Pizza):
@classmethod
def recommend(cls):
"""Use same recommendation as super but add extra spam"""
recommended = super(VikingPizza).recommend()
recommended.toppings += ['spam'] * 5
return recommended
if __name__ == "__main__":
print("Ordinary pizza recomendation:", Pizza.recommend())
print("Viking pizza recomendation:", VikingPizza.recommend()) |
moya/context/sortmodifiers.py | moyaproject/moya | 129 | 12780710 | <reponame>moyaproject/moya
"""Hacked up script to sort modifiers"""
# Couldn't find a tool for this
"""
import io
with io.open('modifiers.py', 'rt') as f:
iter_lines = iter(f)
while 1:
line = next(iter_lines, None)
if line.startswith('class ExpressionModifiers('):
break
defs = []
while 1:
line = next(iter_lines, None)
if line is None:
break
if line.lstrip().startswith('def'):
defs.append([])
if defs:
defs[-1].append(line)
for d in sorted(defs, key=lambda m: m[0]):
print ''.join(d),
"""
|
example_evaluate_with_diff.py | ducha-aiki/manifold-diffusion | 118 | 12780739 | <gh_stars>100-1000
# EXAMPLE_EVALUATE Code to evaluate example results on ROxford and RParis datasets.
# Revisited protocol has 3 difficulty setups: Easy (E), Medium (M), and Hard (H),
# and evaluates the performance using mean average precision (mAP), as well as mean precision @ k (mP@k)
#
# More details about the revisited annotation and evaluation can be found in:
# <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., Revisiting Oxford and Paris: Large-Scale Image Retrieval Benchmarking, CVPR 2018
#
# Authors: <NAME>., <NAME>., <NAME>., <NAME>., <NAME>., 2018
# Added diffusion: <NAME>.
import os
import numpy as np
from scipy.io import loadmat
from dataset import configdataset
from download import download_datasets, download_features
from evaluate import compute_map
#---------------------------------------------------------------------
# Set data folder and testing parameters
#---------------------------------------------------------------------
# Set data folder, change if you have downloaded the data somewhere else
data_root = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), 'data')
# Check, and, if necessary, download test data (Oxford and Pairs),
# revisited annotation, and example feature vectors for evaluation
download_datasets(data_root)
download_features(data_root)
# Set test dataset: roxford5k | rparis6k
test_dataset = 'roxford5k'
#---------------------------------------------------------------------
# Evaluate
#---------------------------------------------------------------------
print('>> {}: Evaluating test dataset...'.format(test_dataset))
# config file for the dataset
# separates query image list from database image list, when revisited protocol used
cfg = configdataset(test_dataset, os.path.join(data_root, 'datasets'))
# load query and database features
print('>> {}: Loading features...'.format(test_dataset))
features = loadmat(os.path.join(data_root, 'features', '{}_resnet_rsfm120k_gem.mat'.format(test_dataset)))
Q = features['Q']
X = features['X']
K = 100 # approx 50 mutual nns
QUERYKNN = 10
R = 2000
alpha = 0.9
from diffussion import *
# perform search
print('>> {}: Retrieval...'.format(test_dataset))
sim = np.dot(X.T, Q)
qsim = sim_kernel(sim).T
sortidxs = np.argsort(-qsim, axis = 1)
for i in range(len(qsim)):
qsim[i,sortidxs[i,QUERYKNN:]] = 0
qsim = sim_kernel(qsim)
A = np.dot(X.T, X)
W = sim_kernel(A).T
W = topK_W(W, K)
Wn = normalize_connection_graph(W)
plain_ranks = np.argsort(-sim, axis=0)
cg_ranks = cg_diffusion(qsim, Wn, alpha)
cg_trunk_ranks = dfs_trunk(sim, A, alpha = alpha, QUERYKNN = QUERYKNN )
fast_spectral_ranks = fsr_rankR(qsim, Wn, alpha, R)
alg_names = ['Plain', 'Diffusion cg', 'Diffusion trunkated', 'Spectral R=2000']
alg_ranks = [plain_ranks, cg_ranks,cg_trunk_ranks, fast_spectral_ranks ]
for rn in range(len(alg_names)):
ranks = alg_ranks[rn]
name = alg_names[rn]
# revisited evaluation
gnd = cfg['gnd']
# evaluate ranks
ks = [1, 5, 10]
# search for easy
gnd_t = []
for i in range(len(gnd)):
g = {}
g['ok'] = np.concatenate([gnd[i]['easy']])
g['junk'] = np.concatenate([gnd[i]['junk'], gnd[i]['hard']])
gnd_t.append(g)
mapE, apsE, mprE, prsE = compute_map(ranks, gnd_t, ks)
# search for easy & hard
gnd_t = []
for i in range(len(gnd)):
g = {}
g['ok'] = np.concatenate([gnd[i]['easy'], gnd[i]['hard']])
g['junk'] = np.concatenate([gnd[i]['junk']])
gnd_t.append(g)
mapM, apsM, mprM, prsM = compute_map(ranks, gnd_t, ks)
# search for hard
gnd_t = []
for i in range(len(gnd)):
g = {}
g['ok'] = np.concatenate([gnd[i]['hard']])
g['junk'] = np.concatenate([gnd[i]['junk'], gnd[i]['easy']])
gnd_t.append(g)
mapH, apsH, mprH, prsH = compute_map(ranks, gnd_t, ks)
print(name)
print('>> {}: mAP E: {}, M: {}, H: {}'.format(test_dataset, np.around(mapE*100, decimals=2), np.around(mapM*100, decimals=2), np.around(mapH*100, decimals=2)))
print('>> {}: mP@k{} E: {}, M: {}, H: {}'.format(test_dataset, np.array(ks), np.around(mprE*100, decimals=2), np.around(mprM*100, decimals=2), np.around(mprH*100, decimals=2))) |
users/factories.py | bllli/Django-China-API | 187 | 12780785 | # factories that automatically create user data
import factory
from users.models import User
class UserFactory(factory.DjangoModelFactory):
class Meta:
model = User
username = factory.Sequence(lambda n: 'user%s' % n)
email = factory.LazyAttribute(lambda o: <EMAIL>' % o.username)
password = 'password'
mugshot = factory.django.ImageField()
@classmethod
def _create(cls, model_class, *args, **kwargs):
manager = cls._get_manager(model_class)
return manager.create_user(*args, **kwargs)
|
xv_leak_tools/test_device/linux_device.py | UAEKondaya1/expressvpn_leak_testing | 219 | 12780862 | <filename>xv_leak_tools/test_device/linux_device.py
import platform
import signal
from xv_leak_tools.exception import XVEx
from xv_leak_tools.helpers import unused
from xv_leak_tools.log import L
from xv_leak_tools.test_device.desktop_device import DesktopDevice
from xv_leak_tools.test_device.connector_helper import ConnectorHelper
from xv_leak_tools.process import XVProcessException
# TODO: consider a UnixDevice as ancestor of MacOSDevice, LinuxDevice
class LinuxDevice(DesktopDevice):
def __init__(self, config, connector):
super().__init__(config, connector)
self._connector_helper = ConnectorHelper(self)
@staticmethod
def local_ips():
raise XVEx("TODO: Local IPs for Linux")
@staticmethod
def open_app(binary_path, root=False):
unused(root)
if binary_path is None:
L.debug('Application has no binary path; not opening')
# TODO: open the application here
@staticmethod
def close_app(binary_path, root=False):
unused(root)
if binary_path is None:
L.debug('Application has no binary path; not closing')
# TODO: close the application here
def os_name(self):
return 'linux'
def os_version(self):
return " ".join(platform.linux_distribution())
def report_info(self):
info = super().report_info()
commands = [
['uname', '-a'],
['lsb_release', '-a'],
['lscpu'],
]
for command in commands:
try:
info += self._connector_helper.check_command(command)[0]
except XVProcessException as ex:
L.warning("Couldn't get system info using command {}:\n{}".format(command, ex))
return info
def kill_process(self, pid):
L.debug("Killing process {}".format(pid))
return self._connector_helper.execute_scriptlet(
'remote_os_kill.py', [pid, int(signal.SIGKILL)], root=True)
def pgrep(self, process_name):
L.debug("pgrep-ing for {}".format(process_name))
return self._connector_helper.execute_scriptlet('pgrep.py', [process_name], root=True)
def command_line_for_pid(self, pid):
return self._connector_helper.execute_scriptlet('command_line_for_pid.py', [pid], root=True)
|
Chapter24/apple_factory.py | DeeMATT/AdvancedPythonProgramming | 278 | 12780886 |
MINI14 = '1.4GHz Mac mini'
class AppleFactory:
class MacMini14:
def __init__(self):
self.memory = 4 # in gigabytes
self.hdd = 500 # in gigabytes
self.gpu = 'Intel HD Graphics 5000'
def __str__(self):
info = (f'Model: {MINI14}',
f'Memory: {self.memory}GB',
f'Hard Disk: {self.hdd}GB',
f'Graphics Card: {self.gpu}')
return '\n'.join(info)
def build_computer(self, model):
if model == MINI14:
return self.MacMini14()
else:
print(f"I don't know how to build {model}")
if __name__ == '__main__':
afac = AppleFactory()
mac_mini = afac.build_computer(MINI14)
print(mac_mini)
|
run-addon.py | chinedufn/landon | 117 | 12780904 | <reponame>chinedufn/landon
# A script to temporarily install and run the addon. Useful for running
# blender-mesh-to-json via blender CLI where you might be in a
# continuous integration environment that doesn't have the addon
# installed
#
# blender file.blend --python $(mesh2json)
# -> becomes ->
# blender file.blend --python /path/to/run-addon
import bpy
import os
# Get the absolute path to the addon
dir = os.path.dirname(__file__)
addonFilePath = dir + '/blender-mesh-to-json.py'
# Install and enable the addon temporarily (since we aren't saving our user preferences)
# We just want to have access to the addon during this blender session
bpy.ops.preferences.addon_install(filepath=addonFilePath)
bpy.ops.preferences.addon_enable(module='blender-mesh-to-json')
# Run our addon
bpy.ops.import_export.mesh2json()
|
tests/data/aws/securityhub.py | ramonpetgrave64/cartography | 2,322 | 12780949 | GET_HUB = {
'HubArn': 'arn:aws:securityhub:us-east-1:000000000000:hub/default',
'SubscribedAt': '2020-12-03T11:05:17.571Z',
'AutoEnableControls': True,
}
|
lib/python/treadmill/tests/syscall/__init__.py | vrautela/treadmill | 133 | 12780960 | <reponame>vrautela/treadmill
"""Tests for treadmill's linux direct system call interface."""
|
migrations/20211128_01_Mn7Ng-create-holdem-game-record-table.py | zw-g/Funny-Nation | 126 | 12780971 | """
Create holdem game record table
"""
from yoyo import step
__depends__ = {'20211109_01_xKblp-change-comments-on-black-jack-record'}
steps = [
step("CREATE TABLE `holdemGameRecord` ( `userID` BIGINT NOT NULL , `moneyInvested` BIGINT NOT NULL , `status` INT NOT NULL COMMENT '0 represent in progress; 1 represent lose or fold; 2 represent win;' , `tableID` BIGINT NOT NULL , `time` TIMESTAMP NOT NULL , `tableUUID` VARCHAR(64) NOT NULL ) ENGINE = InnoDB;")
]
|
chariot/transformer/text/lower_normalizer.py | Y-Kuro-u/chariot | 134 | 12781046 | <reponame>Y-Kuro-u/chariot<gh_stars>100-1000
from chariot.transformer.text.base import TextNormalizer
class LowerNormalizer(TextNormalizer):
def __init__(self, copy=True):
super().__init__(copy)
def apply(self, text):
return text.lower()
|
Subsets and Splits