hexsha
stringlengths
40
40
size
int64
5
2.06M
ext
stringclasses
10 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
3
248
max_stars_repo_name
stringlengths
5
125
max_stars_repo_head_hexsha
stringlengths
40
78
max_stars_repo_licenses
listlengths
1
10
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
3
248
max_issues_repo_name
stringlengths
5
125
max_issues_repo_head_hexsha
stringlengths
40
78
max_issues_repo_licenses
listlengths
1
10
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
3
248
max_forks_repo_name
stringlengths
5
125
max_forks_repo_head_hexsha
stringlengths
40
78
max_forks_repo_licenses
listlengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
5
2.06M
avg_line_length
float64
1
1.02M
max_line_length
int64
3
1.03M
alphanum_fraction
float64
0
1
count_classes
int64
0
1.6M
score_classes
float64
0
1
count_generators
int64
0
651k
score_generators
float64
0
1
count_decorators
int64
0
990k
score_decorators
float64
0
1
count_async_functions
int64
0
235k
score_async_functions
float64
0
1
count_documentation
int64
0
1.04M
score_documentation
float64
0
1
43b1df830b2abdb7a53300c3467f70be764c0f6f
1,235
py
Python
k_values_graph.py
leobouts/Skyline_top_k_queries
5f5e8ab8f5e521dc20f33a69dd042917ff5d42f0
[ "MIT" ]
null
null
null
k_values_graph.py
leobouts/Skyline_top_k_queries
5f5e8ab8f5e521dc20f33a69dd042917ff5d42f0
[ "MIT" ]
null
null
null
k_values_graph.py
leobouts/Skyline_top_k_queries
5f5e8ab8f5e521dc20f33a69dd042917ff5d42f0
[ "MIT" ]
null
null
null
from a_top_k import * from b_top_k import * import time def main(): # test the generator for the top-k input # starting time values_k = [1, 2, 5, 10, 20, 50, 100] times_topk_join_a = [] times_topk_join_b = [] number_of_valid_lines_a = [] number_of_valid_lines_b = [] for k in values_k: number_of_valid_lines = [] top_k_a_generator = generate_top_join_a(number_of_valid_lines) start_time_a = time.time() for i in range(k): next(top_k_a_generator) number_of_valid_lines_a.append(len(number_of_valid_lines)) top_k_time_a = time.time() - start_time_a times_topk_join_a.append(top_k_time_a) number_of_valid_lines = [] top_k_b_generator = generate_top_join_b(number_of_valid_lines) start_time_b = time.time() for i in range(k): next(top_k_b_generator) number_of_valid_lines_b.append(len(number_of_valid_lines)) top_k_time_b = time.time() - start_time_b times_topk_join_b.append(top_k_time_b) print(times_topk_join_a) print(times_topk_join_b) print(number_of_valid_lines_a) print(number_of_valid_lines_b) if __name__ == "__main__": main()
24.7
70
0.673684
0
0
0
0
0
0
0
0
65
0.052632
43b219f1675072d8c1034bc153a5f05238d1fdf2
639
py
Python
AppPkg/Applications/Python/Python-2.7.2/Lib/lib2to3/fixes/fix_methodattrs.py
CEOALT1/RefindPlusUDK
116b957ad735f96fbb6d80a0ba582046960ba164
[ "BSD-2-Clause" ]
2,757
2018-04-28T21:41:36.000Z
2022-03-29T06:33:36.000Z
AppPkg/Applications/Python/Python-2.7.2/Lib/lib2to3/fixes/fix_methodattrs.py
CEOALT1/RefindPlusUDK
116b957ad735f96fbb6d80a0ba582046960ba164
[ "BSD-2-Clause" ]
20
2019-07-23T15:29:32.000Z
2022-01-21T12:53:04.000Z
AppPkg/Applications/Python/Python-2.7.2/Lib/lib2to3/fixes/fix_methodattrs.py
CEOALT1/RefindPlusUDK
116b957ad735f96fbb6d80a0ba582046960ba164
[ "BSD-2-Clause" ]
449
2018-05-09T05:54:05.000Z
2022-03-30T14:54:18.000Z
"""Fix bound method attributes (method.im_? -> method.__?__). """ # Author: Christian Heimes # Local imports from .. import fixer_base from ..fixer_util import Name MAP = { "im_func" : "__func__", "im_self" : "__self__", "im_class" : "__self__.__class__" } class FixMethodattrs(fixer_base.BaseFix): BM_compatible = True PATTERN = """ power< any+ trailer< '.' attr=('im_func' | 'im_self' | 'im_class') > any* > """ def transform(self, node, results): attr = results["attr"][0] new = unicode(MAP[attr.value]) attr.replace(Name(new, prefix=attr.prefix))
25.56
80
0.596244
347
0.543036
0
0
0
0
0
0
276
0.431925
43b28c13174a1c70f27d43e88e2fd455da590fcc
4,764
py
Python
models/TextCNN/cnn2d.py
Renovamen/Text-Classification
4a4aa4001c402ed4371ebaabe1393b27794e5992
[ "MIT" ]
72
2020-06-23T18:26:47.000Z
2022-03-26T13:33:30.000Z
models/TextCNN/cnn2d.py
Renovamen/Text-Classification
4a4aa4001c402ed4371ebaabe1393b27794e5992
[ "MIT" ]
5
2020-12-04T13:31:09.000Z
2021-08-03T14:11:52.000Z
models/TextCNN/cnn2d.py
Renovamen/Text-Classification
4a4aa4001c402ed4371ebaabe1393b27794e5992
[ "MIT" ]
15
2020-06-24T16:08:39.000Z
2022-02-04T06:53:38.000Z
import torch import torch.nn as nn import torch.nn.functional as F from typing import List class TextCNN2D(nn.Module): """ Implementation of 2D version of TextCNN proposed in paper [1]. `Here <https://github.com/yoonkim/CNN_sentence>`_ is the official implementation of TextCNN. Parameters ---------- n_classes : int Number of classes vocab_size : int Number of words in the vocabulary embeddings : torch.Tensor Word embedding weights emb_size : int Size of word embeddings fine_tune : bool Allow fine-tuning of embedding layer? (only makes sense when using pre-trained embeddings) n_kernels : int Number of kernels kernel_sizes : List[int] Size of each kernel dropout : float Dropout n_channels : int Number of channels (1 / 2) References ---------- 1. "`Convolutional Neural Networks for Sentence Classification. \ <https://www.aclweb.org/anthology/D14-1181.pdf>`_" Yoon Kim. EMNLP 2014. """ def __init__( self, n_classes: int, vocab_size: int, embeddings: torch.Tensor, emb_size: int, fine_tune: bool, n_kernels: int, kernel_sizes: List[int], dropout: float, n_channels = 1 ) -> None: super(TextCNN2D, self).__init__() # embedding layer self.embedding1 = nn.Embedding(vocab_size, emb_size) self.set_embeddings(embeddings, 1, fine_tune) if n_channels == 2: # multichannel: a static channel and a non-static channel # which means embedding2 is frozen self.embedding2 = nn.Embedding(vocab_size, emb_size) self.set_embeddings(embeddings, 1, False) else: self.embedding2 = None # 2d conv layer self.convs = nn.ModuleList([ nn.Conv2d( in_channels = n_channels, out_channels = n_kernels, kernel_size = (size, emb_size) ) for size in kernel_sizes ]) self.fc = nn.Linear(len(kernel_sizes) * n_kernels, n_classes) self.dropout = nn.Dropout(dropout) self.relu = nn.ReLU() def set_embeddings( self, embeddings: torch.Tensor, layer_id: int = 1, fine_tune: bool = True ) -> None: """ Set weights for embedding layer Parameters ---------- embeddings : torch.Tensor Word embeddings layer_id : int Embedding layer 1 or 2 (when adopting multichannel architecture) fine_tune : bool, optional, default=True Allow fine-tuning of embedding layer? (only makes sense when using pre-trained embeddings) """ if embeddings is None: # initialize embedding layer with the uniform distribution if layer_id == 1: self.embedding1.weight.data.uniform_(-0.1, 0.1) else: self.embedding2.weight.data.uniform_(-0.1, 0.1) else: # initialize embedding layer with pre-trained embeddings if layer_id == 1: self.embedding1.weight = nn.Parameter(embeddings, requires_grad = fine_tune) else: self.embedding2.weight = nn.Parameter(embeddings, requires_grad = fine_tune) def forward(self, text: torch.Tensor, words_per_sentence: torch.Tensor) -> torch.Tensor: """ Parameters ---------- text : torch.Tensor (batch_size, word_pad_len) Input data words_per_sentence : torch.Tensor (batch_size) Sentence lengths Returns ------- scores : torch.Tensor (batch_size, n_classes) Class scores """ # word embedding embeddings = self.embedding1(text).unsqueeze(1) # (batch_size, 1, word_pad_len, emb_size) # multichannel if self.embedding2: embeddings2 = self.embedding2(text).unsqueeze(1) # (batch_size, 1, word_pad_len, emb_size) embeddings = torch.cat((embeddings, embeddings2), dim = 1) # (batch_size, 2, word_pad_len, emb_size) # conv conved = [self.relu(conv(embeddings)).squeeze(3) for conv in self.convs] # [(batch size, n_kernels, word_pad_len - kernel_sizes[n] + 1)] # pooling pooled = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in conved] # [(batch size, n_kernels)] # flatten flattened = self.dropout(torch.cat(pooled, dim = 1)) # (batch size, n_kernels * len(kernel_sizes)) scores = self.fc(flattened) # (batch size, n_classes) return scores
30.538462
145
0.588161
4,671
0.980479
0
0
0
0
0
0
2,269
0.47628
43b32db495f046dd61a5bbd3592b8806b465b229
785
py
Python
LEVEL2/다리를지나는트럭/solution.py
seunghwanly/CODING-TEST
a820da950c163d399594770199aa2e782d1fbbde
[ "MIT" ]
null
null
null
LEVEL2/다리를지나는트럭/solution.py
seunghwanly/CODING-TEST
a820da950c163d399594770199aa2e782d1fbbde
[ "MIT" ]
null
null
null
LEVEL2/다리를지나는트럭/solution.py
seunghwanly/CODING-TEST
a820da950c163d399594770199aa2e782d1fbbde
[ "MIT" ]
null
null
null
def solution(bridge_length, weight, truck_weights): answer = 0 # { weight, time } wait = truck_weights[:] bridge = [] passed = 0 currWeight = 0 while True: if passed == len(truck_weights) and len(wait) == 0: return answer answer += 1 # sth needs to be passed if bridge: if bridge[0]['t'] + bridge_length == answer: front = bridge.pop(0) currWeight -= front['w'] passed += 1 # add new truck if wait: if currWeight + wait[0] <= weight: bridge.append({ 'w' : wait[0], 't' : answer }) currWeight += wait[0] wait.pop(0) # print(solution(2, 10, [7, 4, 5, 6])) print(solution(100, 100, [10]))
28.035714
73
0.49172
0
0
0
0
0
0
0
0
107
0.136306
43b37687b876abf43457859ada796360f659fa78
2,595
py
Python
heat/tests/convergence/framework/testutils.py
maestro-hybrid-cloud/heat
91a4bb3170bd81b1c67a896706851e55709c9b5a
[ "Apache-2.0" ]
null
null
null
heat/tests/convergence/framework/testutils.py
maestro-hybrid-cloud/heat
91a4bb3170bd81b1c67a896706851e55709c9b5a
[ "Apache-2.0" ]
null
null
null
heat/tests/convergence/framework/testutils.py
maestro-hybrid-cloud/heat
91a4bb3170bd81b1c67a896706851e55709c9b5a
[ "Apache-2.0" ]
null
null
null
# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools from oslo_log import log as logging from heat.tests.convergence.framework import reality from heat.tests.convergence.framework import scenario_template LOG = logging.getLogger(__name__) def verify(test, reality, tmpl): for name in tmpl.resources: rsrc_count = len(reality.resources_by_logical_name(name)) test.assertEqual(1, rsrc_count, 'Found %d copies of resource "%s"' % (rsrc_count, name)) all_rsrcs = reality.all_resources() for name, defn in tmpl.resources.items(): phys_rsrc = reality.resources_by_logical_name(name)[0] for prop_name, prop_def in defn.properties.items(): real_value = reality.resource_properties(phys_rsrc, prop_name) if isinstance(prop_def, scenario_template.GetAtt): targs = reality.resources_by_logical_name(prop_def.target_name) att_value = targs[0].properties_data[prop_def.attr] test.assertEqual(att_value, real_value) elif isinstance(prop_def, scenario_template.GetRes): targs = reality.resources_by_logical_name(prop_def.target_name) test.assertEqual(targs[0].nova_instance, real_value) else: test.assertEqual(prop_def, real_value) test.assertEqual(len(defn.properties), len(phys_rsrc.properties_data)) test.assertEqual(len(tmpl.resources), len(all_rsrcs)) def scenario_globals(procs, testcase): return { 'test': testcase, 'reality': reality.reality, 'verify': functools.partial(verify, testcase, reality.reality), 'Template': scenario_template.Template, 'RsrcDef': scenario_template.RsrcDef, 'GetRes': scenario_template.GetRes, 'GetAtt': scenario_template.GetAtt, 'engine': procs.engine, 'worker': procs.worker, }
36.549296
79
0.649326
0
0
0
0
0
0
0
0
670
0.258189
43b5471678e7c510bd2a55fdced1140414dcd734
440
py
Python
device_geometry.py
AstroShen/fpga21-scaled-tech
8a7016913c18d71844f733bc80a3ceaa2d033ac2
[ "MIT" ]
2
2021-09-02T13:13:35.000Z
2021-12-19T11:35:03.000Z
device_geometry.py
AstroShen/fpga21-scaled-tech
8a7016913c18d71844f733bc80a3ceaa2d033ac2
[ "MIT" ]
null
null
null
device_geometry.py
AstroShen/fpga21-scaled-tech
8a7016913c18d71844f733bc80a3ceaa2d033ac2
[ "MIT" ]
2
2021-09-29T02:53:03.000Z
2022-03-27T09:55:35.000Z
"""Holds the device gemoetry parameters (Table 5), taken from Wu et al., >> A Predictive 3-D Source/Drain Resistance Compact Model and the Impact on 7 nm and Scaled FinFets<<, 2020, with interpolation for 4nm. 16nm is taken from PTM HP. """ node_names = [16, 7, 5, 4, 3] GP = [64, 56, 48, 44, 41] FP = [40, 30, 28, 24, 22] GL = [20, 18, 16, 15, 14] FH = [26, 35, 45, 50, 55] FW = [12, 6.5, 6, 5.5, 5.5] vdd = [0.85, 0.75, 0.7, 0.65, 0.65]
36.666667
163
0.615909
0
0
0
0
0
0
0
0
240
0.545455
43b56590cfbfa648aa925a4f729f3fc4fe304008
2,605
py
Python
nova/tests/servicegroup/test_zk_driver.py
vmthunder/nova
baf05caab705c5778348d9f275dc541747b7c2de
[ "Apache-2.0" ]
7
2017-06-19T19:37:00.000Z
2019-06-16T02:06:14.000Z
nova/tests/servicegroup/test_zk_driver.py
vmthunder/nova
baf05caab705c5778348d9f275dc541747b7c2de
[ "Apache-2.0" ]
9
2015-05-20T11:20:17.000Z
2017-07-27T08:21:33.000Z
nova/tests/servicegroup/test_zk_driver.py
vmthunder/nova
baf05caab705c5778348d9f275dc541747b7c2de
[ "Apache-2.0" ]
13
2015-05-05T09:34:04.000Z
2017-11-08T02:03:46.000Z
# Copyright (c) AT&T 2012-2013 Yun Mao <[email protected]> # Copyright 2012 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Test the ZooKeeper driver for servicegroup. You need to install ZooKeeper locally and related dependencies to run the test. It's unclear how to install python-zookeeper lib in venv so you might have to run the test without it. To set up in Ubuntu 12.04: $ sudo apt-get install zookeeper zookeeperd python-zookeeper $ sudo pip install evzookeeper $ nosetests nova.tests.servicegroup.test_zk_driver """ import eventlet from nova import servicegroup from nova import test class ZKServiceGroupTestCase(test.NoDBTestCase): def setUp(self): super(ZKServiceGroupTestCase, self).setUp() servicegroup.API._driver = None from nova.servicegroup.drivers import zk self.flags(servicegroup_driver='zk') self.flags(address='localhost:2181', group="zookeeper") try: zk.ZooKeeperDriver() except ImportError: self.skipTest("Unable to test due to lack of ZooKeeper") def test_join_leave(self): self.servicegroup_api = servicegroup.API() service_id = {'topic': 'unittest', 'host': 'serviceA'} self.servicegroup_api.join(service_id['host'], service_id['topic']) self.assertTrue(self.servicegroup_api.service_is_up(service_id)) self.servicegroup_api.leave(service_id['host'], service_id['topic']) # make sure zookeeper is updated and watcher is triggered eventlet.sleep(1) self.assertFalse(self.servicegroup_api.service_is_up(service_id)) def test_stop(self): self.servicegroup_api = servicegroup.API() service_id = {'topic': 'unittest', 'host': 'serviceA'} pulse = self.servicegroup_api.join(service_id['host'], service_id['topic'], None) self.assertTrue(self.servicegroup_api.service_is_up(service_id)) pulse.stop() eventlet.sleep(1) self.assertFalse(self.servicegroup_api.service_is_up(service_id))
39.469697
78
0.700576
1,466
0.562764
0
0
0
0
0
0
1,284
0.492898
43b6084ad6323124af0ef6d980f927d5cab21334
780
py
Python
tests/test_misc.py
lordmauve/chopsticks
87c6a5d0049a45db1477a21510cba650f470a8ac
[ "Apache-2.0" ]
171
2016-07-14T11:29:15.000Z
2022-03-12T07:39:12.000Z
tests/test_misc.py
moreati/chopsticks
87c6a5d0049a45db1477a21510cba650f470a8ac
[ "Apache-2.0" ]
59
2016-07-23T14:05:58.000Z
2020-06-26T15:49:07.000Z
tests/test_misc.py
moreati/chopsticks
87c6a5d0049a45db1477a21510cba650f470a8ac
[ "Apache-2.0" ]
17
2016-08-01T06:46:27.000Z
2018-03-25T14:46:15.000Z
"""Tests for miscellaneous properties, such as debuggability.""" import time from chopsticks.tunnel import Docker from chopsticks.group import Group def test_tunnel_repr(): """Tunnels have a usable repr.""" tun = Docker('py36', image='python:3.6') assert repr(tun) == "Docker('py36')" def test_group_repr(): """Groups have a usable repr.""" grp = Group([ Docker('py35', image='python:3.5'), Docker('py36', image='python:3.6') ]) assert repr(grp) == "Group([Docker('py35'), Docker('py36')])" def test_group_reuse(): """We can re-use a group.""" grp = Group([ Docker('py35', image='python:3.5'), Docker('py36', image='python:3.6') ]) with grp: grp.call(time.time) grp.call(time.time)
25.16129
65
0.601282
0
0
0
0
0
0
0
0
304
0.389744
43b62d9d4c35cd12677417d9abccab4b3568c545
3,028
py
Python
Evaluation/PostProcesing.py
AnnonymousRacoon/Quantum-Random-Walks-to-Solve-Diffusion
366ac5073cea96b662b934c3657446c9f1aa2f65
[ "MIT" ]
null
null
null
Evaluation/PostProcesing.py
AnnonymousRacoon/Quantum-Random-Walks-to-Solve-Diffusion
366ac5073cea96b662b934c3657446c9f1aa2f65
[ "MIT" ]
3
2022-03-12T17:16:36.000Z
2022-03-17T12:14:56.000Z
Evaluation/PostProcesing.py
AnnonymousRacoon/Quantum-Random-Walks-to-Solve-Diffusion
366ac5073cea96b662b934c3657446c9f1aa2f65
[ "MIT" ]
1
2022-03-12T11:56:43.000Z
2022-03-12T11:56:43.000Z
import pandas as pd import re import glob def rebuild_counts_from_csv(path,n_dims, shots): df = pd.read_csv(path) return rebuild_counts_from_dataframe(dataframe=df, n_dims=n_dims, shots=shots) def rebuild_counts_from_dataframe(dataframe,n_dims,shots): dimension_counts = {} for dimension in range(n_dims): dimension_counts[dimension] = [] pde = list(dataframe.probability_density) for idx, density in enumerate(pde): n_counts = int(density*shots) for _ in range(n_counts): # print(dataframe["dimension_0"][idx]) for dimension in range(n_dims): dimension_key = "dimension_{}".format(dimension) # dimension_counts[dimension]+=[dataframe[dimension_key][idx]] # print(dimension_counts) rebuilt_dict = {} for dimension in range(n_dims): rebuilt_dict[f"d{dimension}"] = dimension_counts[dimension] return rebuilt_dict def rebuild_counts_from_dictionary(dictionary:dict, n_dims, shots): dataframe = pd.DataFrame(dictionary) return rebuild_counts_from_dataframe(dataframe=dataframe, n_dims=n_dims, shots=shots) def get_stats_from_counts_dict(results_dict:dict): dataframe = pd.DataFrame(results_dict) return get_stats_from_counts_dataframe(dataframe) def get_stats_from_counts_dataframe(counts_dataframe: pd.DataFrame)-> dict: results_dict = {} results_dict["corr"] = counts_dataframe.corr() results_dict["cov"] = counts_dataframe.cov() results_dict["mean"] = counts_dataframe.mean() results_dict['var'] = counts_dataframe.var() return results_dict def get_n_steps_from_filepath(filepath)-> int: filename = filepath.split('/')[-1] return int(re.findall(r"\d+_steps",filename)[0].split('_')[0]) def get_n_shots_from_path(path)-> int: experiment_dir_name = path.split('/')[-1] nshots = int(re.findall(r"\d+shots",experiment_dir_name)[0].split('s')[0]) return nshots def get_n_dims_from_path(path)-> int: experiment_dir_name = path.split('/')[-1] ndims = int(re.findall(r"\d+D_",experiment_dir_name)[0].split('D')[0]) return ndims def extract_mean_variance_vs_nsteps(directory_path: str,dimension = 0): nshots = get_n_shots_from_path(directory_path) ndims = get_n_dims_from_path(directory_path) assert dimension < ndims, "queried dimension exceeds experiment space" files = glob.glob(directory_path+'/*/data/**.csv') files.sort(key = get_n_steps_from_filepath) n_steps = [] variance = [] mean = [] for filepath in files: filename = filepath.split('/')[-1] nsteps = int(re.findall(r"\d+_steps",filename)[0].split('_')[0]) rebuilt_dict = rebuild_counts_from_csv(filepath,n_dims=ndims,shots=nshots) stats = get_stats_from_counts_dict(rebuilt_dict) variance.append(stats['var'][dimension]) mean.append(stats['mean'][dimension]) n_steps.append(nsteps) return n_steps, variance, mean
33.274725
89
0.691546
0
0
0
0
0
0
0
0
253
0.083554
43b693bbc83efef69f13c3a5a3bab32c542470ab
2,276
py
Python
app/wirecard/tasks.py
michel-rodrigues/viggio_backend
f419f0b939209722e1eb1e272f33de172cd5c1f1
[ "MIT" ]
null
null
null
app/wirecard/tasks.py
michel-rodrigues/viggio_backend
f419f0b939209722e1eb1e272f33de172cd5c1f1
[ "MIT" ]
null
null
null
app/wirecard/tasks.py
michel-rodrigues/viggio_backend
f419f0b939209722e1eb1e272f33de172cd5c1f1
[ "MIT" ]
null
null
null
from sentry_sdk import capture_exception from dateutil.parser import parse from project_configuration.celery import app from orders.models import Charge from request_shoutout.domain.models import Charge as DomainCharge from .models import WirecardTransactionData CROSS_SYSTEMS_STATUS_MAPPING = { 'WAITING': DomainCharge.PROCESSING, 'IN_ANALYSIS': DomainCharge.PROCESSING, 'PRE_AUTHORIZED': DomainCharge.PRE_AUTHORIZED, 'AUTHORIZED': DomainCharge.PAID, 'CANCELLED': DomainCharge.CANCELLED, 'REFUNDED': DomainCharge.CANCELLED, 'REVERSED': DomainCharge.CANCELLED, 'SETTLED': DomainCharge.PAID, } def _update_status(wirecard_status, wirecard_payment_hash): ( Charge.objects .filter(order__third_party_transaction__wirecard_payment_hash=wirecard_payment_hash) .update(status=CROSS_SYSTEMS_STATUS_MAPPING[wirecard_status]) ) def _update_payment_event_timestamp(wirecard_transaction, payment_event_timestamp): wirecard_transaction.payment_event_last_timestamp = payment_event_timestamp wirecard_transaction.save() def _is_a_delayied_notification(payment_event_timestamp, wirecard_transaction): if wirecard_transaction.payment_event_last_timestamp: return payment_event_timestamp < wirecard_transaction.payment_event_last_timestamp return False @app.task def update_payment_status(notification): payment_event_timestamp = parse(notification['resource']['payment']['updatedAt']) payment_status = notification['resource']['payment']['status'] wirecard_payment_hash = notification['resource']['payment']['id'] try: wirecard_transaction = ( WirecardTransactionData.objects.get(wirecard_payment_hash=wirecard_payment_hash) ) # Algumas vezes tem subido essa exceção, como não sabemos se é devido à falhas na sandbox # da wirecard, estamos evitando quebrar a aplicação e enviando a exceção para o sentry except WirecardTransactionData.DoesNotExist: capture_exception() else: if not _is_a_delayied_notification(payment_event_timestamp, wirecard_transaction): _update_status(payment_status, wirecard_payment_hash) _update_payment_event_timestamp(wirecard_transaction, payment_event_timestamp)
38.576271
93
0.784271
0
0
0
0
947
0.414442
0
0
354
0.154923
43b6c1b507adc1bb371518dff1d4802b73e3e1a5
434
py
Python
py/multiple_dispatch_example.py
coalpha/coalpha.github.io
8a620314a5c0bcbe2225d29f733379d181534430
[ "Apache-2.0" ]
null
null
null
py/multiple_dispatch_example.py
coalpha/coalpha.github.io
8a620314a5c0bcbe2225d29f733379d181534430
[ "Apache-2.0" ]
1
2020-04-12T07:48:18.000Z
2020-04-12T07:49:29.000Z
py/multiple_dispatch_example.py
coalpha/coalpha.github.io
8a620314a5c0bcbe2225d29f733379d181534430
[ "Apache-2.0" ]
1
2020-09-30T05:27:07.000Z
2020-09-30T05:27:07.000Z
from typing import * from multiple_dispatch import multiple_dispatch @overload @multiple_dispatch def add(a: Literal[4, 6, 8], b): raise TypeError("No adding 2, 4, 6, or 8!") @overload @multiple_dispatch def add(a: int, b: str): return f"int + str = {a} + {b}" @overload @multiple_dispatch def add(a: int, b: int): return a + b @multiple_dispatch def add(a, b): return f"Any + Any = {a} + {b}" print(add(2, "hello"))
18.083333
47
0.658986
0
0
0
0
333
0.767281
0
0
81
0.186636
43b93580a409ca7d715e6c81e1d0f3517269cec7
4,277
py
Python
dygraph/alexnet/network.py
Sunyingbin/models
30a7f1757bfad79935aa865f4362a7b38e63a415
[ "Apache-2.0" ]
null
null
null
dygraph/alexnet/network.py
Sunyingbin/models
30a7f1757bfad79935aa865f4362a7b38e63a415
[ "Apache-2.0" ]
null
null
null
dygraph/alexnet/network.py
Sunyingbin/models
30a7f1757bfad79935aa865f4362a7b38e63a415
[ "Apache-2.0" ]
null
null
null
""" 动态图构建 AlexNet """ import paddle.fluid as fluid import numpy as np class Conv2D(fluid.dygraph.Layer): def __init__(self, name_scope, num_channels, num_filters, filter_size, stride=1, padding=0, dilation=1, groups=1, act=None, use_cudnn=False, param_attr=None, bias_attr=None): super(Conv2D, self).__init__(name_scope) self._conv2d = fluid.dygraph.Conv2D( num_channels=num_channels, num_filters=num_filters, filter_size=filter_size, stride=stride, padding=padding, dilation=dilation, groups=groups, param_attr=param_attr, bias_attr=bias_attr, act=act, use_cudnn=use_cudnn) def forward(self, inputs): x = self._conv2d(inputs) return x class Conv2DPool(fluid.dygraph.Layer): def __init__(self, name_scope, num_channels, num_filters, filter_size, pool_size, pool_stride, pool_padding=0, pool_type='max', global_pooling=False, conv_stride=1, conv_padding=0, conv_dilation=1, conv_groups=1, act=None, use_cudnn=False, param_attr=None, bias_attr=None): super(Conv2DPool, self).__init__(name_scope) self._conv2d = fluid.dygraph.Conv2D( num_channels=num_channels, num_filters=num_filters, filter_size=filter_size, stride=conv_stride, padding=conv_padding, dilation=conv_dilation, groups=conv_groups, param_attr=param_attr, bias_attr=bias_attr, act=act, use_cudnn=use_cudnn) self._pool2d = fluid.dygraph.Pool2D( pool_size=pool_size, pool_type=pool_type, pool_stride=pool_stride, pool_padding=pool_padding, global_pooling=global_pooling, use_cudnn=use_cudnn) def forward(self, inputs): x = self._conv2d(inputs) x = self._pool2d(x) return x class AlexNet(fluid.dygraph.Layer): def __init__(self, name_scope, class_dim): super(AlexNet, self).__init__(name_scope) self.conv_pool_1 = Conv2DPool(self.full_name(), 3, 64, 11, 3, 2, conv_stride=4, conv_padding=2, act='relu') self.conv_pool_2 = Conv2DPool(self.full_name(), 64, 192, 5, 3, 2, conv_stride=1, conv_padding=2, act='relu') self.conv_3 = Conv2D(self.full_name(), 192, 384, 3, 1, 1, act='relu') self.conv_4 = Conv2D(self.full_name(), 384, 256, 3, 1, 1, act='relu') self.conv_pool_5 = Conv2DPool(self.full_name(), 256, 256, 3, 3, 2, conv_stride=1, conv_padding=1, act='relu') self.fc6 = fluid.dygraph.FC(self.full_name(), 9216, 4096, act='relu') self.fc7 = fluid.dygraph.FC(self.full_name(), 4096, 4096, act='relu') self.fc8 = fluid.dygraph.FC(self.full_name(), 4096, class_dim, act='softmax') def forward(self, inputs, label=None): out = self.conv_pool_1(inputs) out = self.conv_pool_2(out) out = self.conv_3(out) out = self.conv_4(out) out = self.conv_pool_5(out) out = self.fc6(out) out = fluid.layers.dropout(out, 0.5) out = self.fc7(out) out = fluid.layers.dropout(out, 0.5) out = self.fc8(out) if label is not None: acc = fluid.layers.accuracy(input=out, label=label) return out, acc else: return out if __name__ == '__main__': with fluid.dygraph.guard(): alexnet = AlexNet('alex-net', 3) img = np.zeros([2, 3, 224, 224]).astype('float32') img = fluid.dygraph.to_variable(img) outs = alexnet(img).numpy() print(outs)
32.9
118
0.53098
3,915
0.913226
0
0
0
0
0
0
118
0.027525
43bbbe3418d6d5e2da95d398c3928141e4b68eab
905
py
Python
turtlegameproject/turtlegame.py
Ayon134/code_for_Kids
d90698bb38efe5e26c31f02bd129bfdadea158e2
[ "MIT" ]
null
null
null
turtlegameproject/turtlegame.py
Ayon134/code_for_Kids
d90698bb38efe5e26c31f02bd129bfdadea158e2
[ "MIT" ]
null
null
null
turtlegameproject/turtlegame.py
Ayon134/code_for_Kids
d90698bb38efe5e26c31f02bd129bfdadea158e2
[ "MIT" ]
2
2021-01-08T03:52:46.000Z
2021-04-01T19:16:12.000Z
import turtle import random p1=turtle.Turtle() p1.color("green") p1.shape("turtle") p1.penup() p1.goto(-200,100) p2=p1.clone() p2.color("blue") p2.penup() p2.goto(-200,-100) p1.goto(300,60) p1.pendown() p1.circle(40) p1.penup() p1.goto(-200,100) p2.goto(300,-140) p2.pendown() p2.circle(40) p2.penup() p2.goto(-200,-100) die=[1,2,3,4,5,6] i=1 while(i <= 20): if p1.pos() >= (300,100): print("p1 wins") break elif p2.pos() >= (300,-100): print("p2 wins") break else: p1_turn=input("press enter to start") die_out=random.choice(die) print("you get", die_out) print("the number of steps:", 20*die_out) p1.forward(20*die_out) p2_turn=input("press enter to challenge") d=random.choice(die) print("you get",d) print("the number os steps:",20*d) p2.forward(20*d)
17.745098
49
0.571271
0
0
0
0
0
0
0
0
149
0.164641
43bbc2ac72a79eec23f8c2578bc9f103ba32b758
8,684
py
Python
hivwholeseq/sequencing/check_pipeline.py
neherlab/hivwholeseq
978ce4060362e4973f92b122ed5340a5314d7844
[ "MIT" ]
3
2016-09-13T12:15:47.000Z
2021-07-03T01:28:56.000Z
hivwholeseq/sequencing/check_pipeline.py
iosonofabio/hivwholeseq
d504c63b446c3a0308aad6d6e484ea1666bbe6df
[ "MIT" ]
null
null
null
hivwholeseq/sequencing/check_pipeline.py
iosonofabio/hivwholeseq
d504c63b446c3a0308aad6d6e484ea1666bbe6df
[ "MIT" ]
3
2016-01-17T03:43:46.000Z
2020-03-25T07:00:11.000Z
#!/usr/bin/env python # vim: fdm=marker ''' author: Fabio Zanini date: 15/06/14 content: Check the status of the pipeline for one or more sequencing samples. ''' # Modules import os import sys from itertools import izip import argparse from Bio import SeqIO from hivwholeseq.utils.generic import getchar from hivwholeseq.sequencing.samples import SampleSeq, load_sequencing_run from hivwholeseq.patients.patients import load_samples_sequenced as lssp from hivwholeseq.patients.patients import SamplePat from hivwholeseq.sequencing.samples import load_samples_sequenced as lss from hivwholeseq.utils.mapping import get_number_reads from hivwholeseq.cluster.fork_cluster import fork_check_pipeline as fork_self # Globals len_fr = 8 len_msg = 6 spacing_fragments = 4 # Functions def check_status(sample, step, detail=1): '''Check for a sample a certain step of the pipeline at a certain detail''' if detail == 1: if step == 'premapped': return [os.path.isfile(sample.get_premapped_filename())] elif step == 'divided': return [(fr, os.path.isfile(sample.get_divided_filename(fr))) for fr in sample.regions_complete] elif step == 'consensus': return [(fr, os.path.isfile(sample.get_consensus_filename(fr))) for fr in sample.regions_generic] elif step == 'mapped': return [(fr, os.path.isfile(sample.get_mapped_filename(fr, filtered=False))) for fr in sample.regions_generic] elif step == 'filtered': return [(fr, os.path.isfile(sample.get_mapped_filename(fr, filtered=True))) for fr in sample.regions_generic] elif step == 'mapped_initial': return [(fr, os.path.isfile(sample.get_mapped_to_initial_filename(fr))) for fr in sample.regions_generic] elif step == 'mapped_filtered': # Check whether the mapped filtered is older than the mapped_initial from hivwholeseq.utils.generic import modification_date out = [] for fr in sample.regions_generic: fn_mi = sample.get_mapped_to_initial_filename(fr) fn_mf = sample.get_mapped_filtered_filename(fr) if not os.path.isfile(fn_mf): out.append((fr, False)) continue if not os.path.isfile(fn_mi): out.append((fr, True)) continue md_mi = modification_date(fn_mi) md_mf = modification_date(fn_mf) if md_mf < md_mi: out.append((fr, 'OLD')) else: out.append((fr, True)) return out elif detail == 2: if step in ('filtered', 'consensus'): return check_status(sample, step, detail=3) else: return check_status(sample, step, detail=1) elif detail == 3: if step == 'premapped': if os.path.isfile(sample.get_premapped_filename()): return [get_number_reads(sample.get_premapped_filename())] else: return [False] elif step == 'divided': stati = [] for fr in sample.regions_complete: fn = sample.get_divided_filename(fr) if os.path.isfile(fn): status = (fr, get_number_reads(fn)) else: status = (fr, False) stati.append(status) return stati elif step == 'consensus': stati = [] for fr in sample.regions_generic: fn = sample.get_consensus_filename(fr) if os.path.isfile(fn): status = (fr, len(SeqIO.read(fn, 'fasta'))) else: status = (fr, False) stati.append(status) return stati elif step == 'mapped': stati = [] for fr in sample.regions_generic: fn = sample.get_mapped_filename(fr, filtered=False) if os.path.isfile(fn): status = (fr, get_number_reads(fn)) else: status = (fr, False) stati.append(status) return stati elif step == 'filtered': stati = [] for fr in sample.regions_generic: fn = sample.get_mapped_filename(fr, filtered=True) if os.path.isfile(fn): status = (fr, get_number_reads(fn)) else: status = (fr, False) stati.append(status) return stati # TODO: add mapped_to_initial and downstream elif step in ('mapped_initial', 'mapped_filtered'): return check_status(sample, step, detail=1) def print_info(name, status, detail=1): '''Print info on these files''' print '{:<20s}'.format(name+':'), if name.lower() in ['premapped']: status = status[0] if status == True: print 'OK' elif status == False: print 'MISS' else: print str(status) else: stati = list(status) msg = [] for (fr, status) in stati: ms = ('{:<'+str(len_fr)+'s}').format(fr+':') if status == True: msg.append(ms+('{:>'+str(len_msg)+'}').format('OK')) elif status == False: msg.append(ms+('{:>'+str(len_msg)+'}').format('MISS')) else: msg.append(ms+('{:>'+str(len_msg)+'}').format(str(status))) print (' ' * spacing_fragments).join(msg) # Script if __name__ == '__main__': # Parse input args parser = argparse.ArgumentParser(description='Check sequencing run for missing parts of the analysis', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--runs', required=True, nargs='+', help='Seq runs to analyze (e.g. Tue28, test_tiny)') parser.add_argument('--adaIDs', nargs='+', help='Adapter IDs to analyze (e.g. TS2)') parser.add_argument('--nopatients', action='store_false', dest='use_pats', help='Include non-patient samples (e.g. reference strains)') parser.add_argument('--interactive', action='store_true', help='Interactive mode') parser.add_argument('--detail', type=int, default=1, help='Include details on number of reads, length of consensus') parser.add_argument('--submit', action='store_true', help='Execute the script in parallel on the cluster') args = parser.parse_args() seq_runs = args.runs adaIDs = args.adaIDs use_pats = args.use_pats use_interactive = args.interactive detail = args.detail submit = args.submit if submit: fork_self(seq_runs, adaIDs=adaIDs, pats=use_pats, detail=detail) sys.exit() samples_pat = lssp(include_wrong=True) samples = lss() samples = samples.loc[samples['seq run'].isin(seq_runs)] if adaIDs is not None: samples = samples.loc[samples.adapter.isin(adaIDs)] if len(seq_runs) >= 2: samples.sort(columns=['patient sample', 'seq run'], inplace=True) for isa, (samplename, sample) in enumerate(samples.iterrows()): sample = SampleSeq(sample) print sample.name, 'seq:', sample['seq run'], sample.adapter, if sample['patient sample'] == 'nan': print 'not a patient sample', if use_pats: print '(skip)' continue else: print '' else: sample_pat = samples_pat.loc[sample['patient sample']] print 'patient: '+sample_pat.patient steps = ['premapped', 'divided', 'consensus', 'mapped', 'filtered', 'mapped_initial', 'mapped_filtered'] for step in steps: status = check_status(sample, step, detail=detail) print_info(step.capitalize(), status, detail=detail) if (isa != len(samples) - 1): print '' if use_interactive and (isa != len(samples) - 1): print 'Press q to exit', sys.stdout.flush() ch = getchar() if ch.lower() in ['q']: print 'stopped' break else: sys.stdout.write("\x1b[1A") print ''
36.033195
106
0.554353
0
0
0
0
0
0
0
0
1,419
0.163404
43be862a8ae3652cfbde5c9e9ea45da257901956
1,633
py
Python
app.py
thliang01/nba-s
660d0e830989916b7b9f3123eb809d143b714186
[ "BSD-2-Clause" ]
null
null
null
app.py
thliang01/nba-s
660d0e830989916b7b9f3123eb809d143b714186
[ "BSD-2-Clause" ]
null
null
null
app.py
thliang01/nba-s
660d0e830989916b7b9f3123eb809d143b714186
[ "BSD-2-Clause" ]
null
null
null
import streamlit as st import pandas as pd import numpy as np import seaborn as sns import matplotlib.pyplot as plt # -------------------------------------------------------------- # Import and clean data game_details = pd.read_csv('games_details.csv') # print(game_details.head(5)) game_details.drop(['GAME_ID', 'TEAM_ID', 'PLAYER_ID', 'START_POSITION', 'COMMENT', 'TEAM_ABBREVIATION'], axis=1, inplace=True) game_details['FTL'] = game_details['FTA'] - game_details['FTM'] game_details = game_details.dropna() # game_details.shape # game_details.info() game_details['MIN'] = game_details['MIN'].str.strip(':').str[0:2] df = game_details.copy() if st.checkbox('Show dataframe'): st.write("Players Game Details") st.dataframe(df.head(10)) # -------------------------------------------------------------- st.write("Top 20 Players in the NBA") top_activities = df.groupby(by='PLAYER_NAME')['PTS'].sum().sort_values(ascending=False).head(20).reset_index() plt.figure(figsize=(15, 10)) plt.xlabel('POINTS', fontsize=15) plt.ylabel('PLAYER_NAME', fontsize=15) plt.title('Top 20 Players in the NBA League', fontsize=20) ax = sns.barplot(x=top_activities['PTS'], y=top_activities['PLAYER_NAME']) for i, (value, name) in enumerate(zip(top_activities['PTS'], top_activities['PLAYER_NAME'])): ax.text(value, i - .05, f'{value:,.0f}', size=10, ha='left', va='center') ax.set(xlabel='POINTS', ylabel='PLAYER_NAME') st.pyplot(plt) player = st.multiselect( "Choose Player", df['PLAYER_NAME'] ) st.write(""" # My first app Hello *world!* """) x = st.slider("Select a number") st.write("You selected:", x)
32.019608
110
0.647887
0
0
0
0
0
0
0
0
662
0.405389
43bfd11896f962234020d5d611ad3cb21b537df7
19,228
py
Python
python/craftassist/voxel_models/geoscorer/geoscorer_util.py
kepolol/craftassist
f60a7edd0b4ea72b774cca45ba468d2e275445c2
[ "MIT" ]
null
null
null
python/craftassist/voxel_models/geoscorer/geoscorer_util.py
kepolol/craftassist
f60a7edd0b4ea72b774cca45ba468d2e275445c2
[ "MIT" ]
null
null
null
python/craftassist/voxel_models/geoscorer/geoscorer_util.py
kepolol/craftassist
f60a7edd0b4ea72b774cca45ba468d2e275445c2
[ "MIT" ]
1
2020-03-29T20:04:11.000Z
2020-03-29T20:04:11.000Z
""" Copyright (c) Facebook, Inc. and its affiliates. """ import numpy as np import random from datetime import datetime import sys import argparse import torch import os from inspect import currentframe, getframeinfo GEOSCORER_DIR = os.path.dirname(os.path.realpath(__file__)) CRAFTASSIST_DIR = os.path.join(GEOSCORER_DIR, "../") sys.path.append(CRAFTASSIST_DIR) from shapes import get_bounds def pretty_log(log_string): cf = currentframe().f_back filename = getframeinfo(cf).filename.split("/")[-1] print( "{} {}:{} {}".format( datetime.now().strftime("%m/%d/%Y %H:%M:%S"), filename, cf.f_lineno, log_string ) ) sys.stdout.flush() ## Train Fxns ## def get_base_train_parser(): parser = argparse.ArgumentParser() parser.add_argument("--cuda", type=int, default=1, help="0 for cpu") parser.add_argument("--batchsize", type=int, default=64, help="batchsize") parser.add_argument("--dataset", default="shapes", help="shapes/segments/both") parser.add_argument( "--epochsize", type=int, default=1000, help="number of examples in an epoch" ) parser.add_argument("--nepoch", type=int, default=1000, help="number of epochs") parser.add_argument("--context_sidelength", type=int, default=32, help="size of cube") parser.add_argument("--hidden_dim", type=int, default=64, help="size of hidden dim") parser.add_argument("--num_layers", type=int, default=3, help="num layers") parser.add_argument( "--blockid_embedding_dim", type=int, default=8, help="size of blockid embedding" ) parser.add_argument( "--num_words", type=int, default=256, help="number of words for the blockid embeds" ) parser.add_argument("--lr", type=float, default=0.1, help="step size for net") parser.add_argument( "--optim", type=str, default="adagrad", help="optim type to use (adagrad|sgd|adam)" ) parser.add_argument("--momentum", type=float, default=0.0, help="momentum") parser.add_argument("--checkpoint", default="", help="where to save model") parser.add_argument("--num_workers", type=int, default=4, help="number of dataloader workers") return parser def add_dataset_flags(parser): parser.add_argument( "--dataset_ratios", type=str, default="shape:1.0", help="comma separated name:prob" ) parser.add_argument("--useid", type=bool, default=False, help="use blockid") parser.add_argument("--fixed_cube_size", type=int, default=None, help="fixed_cube_size") parser.add_argument("--fixed_center", type=bool, default=False, help="fixed_center") parser.add_argument( "--min_seg_size", type=int, default=6, help="min seg size for seg data type" ) parser.add_argument( "--use_saved_data", type=bool, default=False, help="use preparsed data for this min_seg_size", ) def add_directional_flags(parser): parser.add_argument("--spatial_embedding_dim", type=int, default=8, help="size of spatial emb") parser.add_argument("--output_embedding_dim", type=int, default=8, help="size of output emb") parser.add_argument( "--seg_direction_net", type=bool, default=False, help="use segdirnet module" ) parser.add_argument( "--seg_use_viewer_pos", type=bool, default=False, help="use viewer pos in seg" ) parser.add_argument( "--seg_use_viewer_look", type=bool, default=False, help="use viewer look in seg" ) parser.add_argument( "--seg_use_direction", type=bool, default=False, help="use direction in seg" ) parser.add_argument("--num_seg_dir_layers", type=int, default=3, help="num segdir net layers") parser.add_argument( "--cont_use_direction", type=bool, default=False, help="use direction in context" ) parser.add_argument( "--cont_use_xyz_from_viewer_look", type=bool, default=False, help="use xyz position relative to viewer look in context emb", ) def get_dataloader(dataset, opts, collate_fxn): def init_fn(wid): np.random.seed(torch.initial_seed() % (2 ** 32)) return torch.utils.data.DataLoader( dataset, batch_size=opts["batchsize"], shuffle=True, pin_memory=True, drop_last=True, num_workers=opts["num_workers"], worker_init_fn=init_fn, collate_fn=collate_fxn, ) def to_cuda(list_modules): for m in list_modules: m.cuda() def multitensor_collate_fxn(x): """ Takes a list of BATCHSIZE lists of tensors of length D. Returns a list of length D of batched tensors. """ num_tensors_to_batch = len(x[0]) regroup_tensors = [[] for i in range(num_tensors_to_batch)] for t_list in x: for i, t in enumerate(t_list): regroup_tensors[i].append(t.unsqueeze(0)) batched_tensors = [torch.cat(tl) for tl in regroup_tensors] return batched_tensors ## 3D Utils ## def get_side_lengths(bounds): """ Bounds should be a list of [min_x, max_x, min_y, max_y, min_z, max_z]. Returns a list of the side lengths. """ return [x + 1 for x in (bounds[1] - bounds[0], bounds[3] - bounds[2], bounds[5] - bounds[4])] def coord_to_index(coord, sl): """ Takes a 3D coordinate in a cube and the cube side length. Returns index in flattened 3D array. """ return coord[0] * sl * sl + coord[1] * sl + coord[2] def index_to_coord(index, sl): """ Takes an index into a flattened 3D array and its side length. Returns the coordinate in the cube. """ coord = [] two_d_slice_size = sl * sl coord.append(index // two_d_slice_size) remaining = index % two_d_slice_size coord.append(remaining // sl) coord.append(remaining % sl) return coord def shift_subsegment_corner(S): """ Takes a segment, described as a list of tuples of the form: ((x, y, z), (block_id, ?)) Returns the segment in the same form, shifted to the origin, and the shift vec """ bounds = get_bounds(S) shift_zero_vec = [-bounds[0], -bounds[2], -bounds[4]] new_S = [] for s in S: new_S.append((tuple([sum(x) for x in zip(s[0], shift_zero_vec)]), s[1])) return new_S, shift_zero_vec def subset_and_scale_3d(init_array, mins, maxs, scale=1): return scale * init_array[mins[0] : maxs[0], mins[1] : maxs[1], mins[2] : maxs[2]] def combine_seg_context(seg, context, seg_shift, seg_mult=1): completed_context = context.clone() # Calculate the region to copy over, sometimes the segment # falls outside the range of the context bounding box c_mins = [int(i) for i in seg_shift] c_maxs = [int(min(ss + 8, 32)) for ss in seg_shift] s_mins = [0 for i in range(3)] # If the edge of the segment goes past the edge of the context (ss + 8 > 32), # remove the extra from the segment. s_maxs = [int(8 - max(0, (ss + 8) - 32)) for ss in seg_shift] seg_to_add = subset_and_scale_3d(seg, s_mins, s_maxs, seg_mult) context_subset = subset_and_scale_3d(completed_context, c_mins, c_maxs, 1) completed_context[c_mins[0] : c_maxs[0], c_mins[1] : c_maxs[1], c_mins[2] : c_maxs[2]] = ( seg_to_add + context_subset ) return completed_context def get_vector(start, end): return end - start def get_random_viewer_info(sl): viewer_pos = torch.tensor(random_int_triple(0, sl - 1)) viewer_look = torch.tensor(random_int_triple(0, sl - 1)) if viewer_pos.eq(viewer_look).sum() == viewer_pos.size(0): if viewer_look[0] < sl + 1: viewer_look[0] += 1 else: viewer_look[0] -= 1 return viewer_pos, viewer_look def b_greater_than_a(a, b): if a == b: return 0 return 1 if b > a else -1 def shift_block(b, s): return tuple((tuple((b[0][0] + s[0], b[0][1] + s[1], b[0][2] + s[2])), b[1])) def rotate_block(b, c, r): """ rotates the block b around the point c by 90*r degrees in the xz plane. r should be 1 or -1.""" # TODO add a reflection c = np.array(c) p = np.add(b[0], -c) x = p[0] z = p[2] if r == -1: p[0] = z p[2] = -x else: p[0] = -z p[2] = x return (tuple(p + c), b[1]) def random_int_triple(minval, maxval): t = [ random.randint(minval, maxval), random.randint(minval, maxval), random.randint(minval, maxval), ] return t def check_inrange(x, minval, maxval): """inclusive check""" return all([v >= minval for v in x]) and all([v <= maxval for v in x]) def normalize(batched_vector): vec = batched_vector.double() norm = torch.norm(vec, dim=1) # Set norm to 1 if it's 0 norm = norm + norm.eq(0).double() expanded_norm = norm.unsqueeze(1).expand(-1, vec.size()[1]) return torch.div(vec, expanded_norm) def get_rotation_matrix(viewer_pos, viewer_look): # VP, VL: N x 3, VP_to_VL: N x 3 vp_to_vl = get_vector(viewer_pos, viewer_look)[:, :2] nlook_vec = normalize(vp_to_vl) nly = nlook_vec[:, 1] # Nlx necessary to correct for the range of acrcos nlx = nlook_vec[:, 0] nlx = nlx.gt(0).double() - nlx.lt(0).double() - nlx.eq(0).double() # Take care of nans created by raising 0 to a power # and then masking the sin theta to 0 as intended base = 1 - nly * nly nan_mask = torch.isnan(torch.pow(base, 0.5)).double() base = base + nan_mask sin_theta = nlx * nan_mask.eq(0).double() * torch.pow(base, 0.5) nly = nly.unsqueeze(1) sin_theta = sin_theta.unsqueeze(1) rm_pt1 = torch.cat([nly, sin_theta], 1).unsqueeze(1) rm_pt2 = torch.cat([-sin_theta, nly], 1).unsqueeze(1) rm = torch.cat([rm_pt1, rm_pt2], 1) return rm def rotate_x_y(coord, rotation_matrix): return torch.mm(coord.unsqueeze(0), rotation_matrix).squeeze(0) def float_equals(a, b, epsilon): return True if abs(a - b) < epsilon else False def get_argmax_list(vals, epsilon, minlist=False, maxlen=None): mult = -1 if minlist else 1 max_ind = [] for i, v in enumerate(vals): if not max_ind or float_equals(max_ind[0][1], v, epsilon): if maxlen and len(max_ind) == maxlen: continue max_ind.append((i, v)) elif mult * (v - max_ind[0][1]) > 0: max_ind = [(i, v)] return max_ind def get_firstmax(vals, epsilon, minlist=False): return get_argmax_list(vals, epsilon, minlist, 1)[0] # N -> batch size in training # D -> num target coord per element # Viewer pos, viewer_look are N x 3 tensors # Batched target coords is a N x D x 3 tensor # Output is a N x D x 3 tensor def get_xyz_viewer_look_coords_batched(viewer_pos, viewer_look, batched_target_coords): # First verify the sizing and unsqueeze if necessary btc_sizes = batched_target_coords.size() vp_sizes = viewer_pos.size() vl_sizes = viewer_look.size() if len(btc_sizes) > 3 or len(vp_sizes) > 2 or len(vl_sizes) > 2: raise Exception("One input has too many dimensions") if btc_sizes[-1] != 3 or vp_sizes[-1] != 3 or vl_sizes[-1] != 3: raise Exception("The last dimension of all inputs should be size 3") if len(btc_sizes) < 3: for i in range(3 - len(btc_sizes)): batched_target_coords = batched_target_coords.unsqueeze(0) if len(vp_sizes) == 1: viewer_pos = viewer_pos.unsqueeze(0) if len(vl_sizes) == 1: viewer_look = viewer_look.unsqueeze(0) n = batched_target_coords.size()[0] d = batched_target_coords.size()[1] # Handle xy and z separately # XY = N X D x 2 xy = batched_target_coords[:, :, 0:2].double() # Z = N x D x 1 z = batched_target_coords[:, :, 2].unsqueeze(2).double() ## XY # Shift such that viewer pos is the origin # VPXY, VLXY: N x 2 vpxy = viewer_pos.double()[:, 0:2] vlxy = viewer_look.double()[:, 0:2] vpxy_to_vlxy = vlxy - vpxy # VPXY to XY: N x D x 2 vpxy_to_xy = xy - vpxy.unsqueeze(1).expand(n, d, -1) # Rotate them around the viewer position such that a normalized # viewer look vector would be (0, 1) # Rotation_matrix: N x 2 x 2 rotation_matrix = get_rotation_matrix(viewer_pos, viewer_look) # N x 1 x 2 mm N x 2 x 2 ==> N x 1 x 2 ==> N x 2 r_vpxy_to_vlxy = torch.bmm(vpxy_to_vlxy.unsqueeze(1), rotation_matrix).unsqueeze(1) # RM: N x 2 x 2 ==> N x D x 2 x 2 expanded_rm = rotation_matrix.unsqueeze(1).expand(n, d, 2, 2).contiguous().view(-1, 2, 2) # N x D x 2 ==> N*D x 1 x 2 mm N*D x 2 x 2 ==> N*D x 1 x 2 ==> N x D x 2 reshape_vpxy_to_xy = vpxy_to_xy.contiguous().view(-1, 1, 2) r_vpxy_to_xy = torch.bmm(reshape_vpxy_to_xy, expanded_rm).contiguous().view(n, d, 2) # N x D x 2 # Get the xy position in this rotated coord system with rvl as the origin rvl_to_rxy = r_vpxy_to_xy - r_vpxy_to_vlxy.squeeze(1).expand(n, d, 2) ## Z # VLZ = N x 1 vlz = viewer_look.double()[:, 2] # Z = N x D x 1 diffz = z - vlz.view(-1, 1, 1).expand(n, d, -1) ## Combine # rvl_to_rxy: N x D x 2, diffz: N x D x 1 new_xyz = torch.cat([rvl_to_rxy, diffz], 2) return new_xyz def get_dir_dist(viewer_pos, viewer_look, batched_target_coords): if len(batched_target_coords.size()) == 1: batched_target_coords = batched_target_coords.unsqueeze(0) xyz = get_xyz_viewer_look_coords_batched(viewer_pos, viewer_look, batched_target_coords) dist = xyz.abs() direction = xyz.gt(0).double() - xyz.lt(0).double() return direction, dist def get_sampled_direction_vec(viewer_pos, viewer_look, target_coord): directions, dists = get_dir_dist(viewer_pos, viewer_look, target_coord) dists = dists.squeeze() directions = directions.squeeze() ndists = dists / sum(dists) dim = np.random.choice(3, p=ndists) direction = directions[dim].item() dim_l = [(0 if i == dim else 1) for i in range(3)] dir_l = [0, 1] if direction == -1 else [1, 0] return torch.tensor(dim_l + dir_l, dtype=torch.long) def get_max_direction_vec(viewer_pos, viewer_look, target_coord): directions, dists = get_dir_dist(viewer_pos, viewer_look, target_coord) dists = dists.squeeze() directions = directions.squeeze() ndists = dists / sum(dists) dim = np.argmax(ndists) direction = directions[dim].item() dim_l = [(0 if i == dim else 1) for i in range(3)] dir_l = [0, 1] if direction == -1 else [1, 0] return torch.tensor(dim_l + dir_l, dtype=torch.long) # outputs a dense voxel rep (np array) from a sparse one. # size should be a tuple of (H, W, D) for the desired voxel representation # useid=True puts the block id into the voxel representation, # otherwise put a 1 def densify(blocks, size, center=(0, 0, 0), useid=False): V = np.zeros((size[0], size[1], size[2]), dtype="int32") offsets = (size[0] // 2 - center[0], size[1] // 2 - center[1], size[2] // 2 - center[2]) for b in blocks: x = b[0][0] + offsets[0] y = b[0][1] + offsets[1] z = b[0][2] + offsets[2] if x >= 0 and y >= 0 and z >= 0 and x < size[0] and y < size[1] and z < size[2]: if type(b[1]) is int: V[x, y, z] = b[1] else: V[x, y, z] = b[1][0] if not useid: V[V > 0] = 1 return V, offsets def center_of_mass(S, seg=None): seg = seg or [True for i in S] if len(S[0]) == 2: m = list(np.round(np.mean([S[i][0] for i in range(len(S)) if seg[i]], axis=0))) else: m = list(np.round(np.mean([S[i] for i in range(len(S)) if seg[i]], axis=0))) return [int(i) for i in m] def check_l1_dist(a, b, d): return abs(b[0] - a[0]) <= d[0] and abs(b[1] - a[1]) <= d[1] and abs(b[2] - a[2]) <= d[2] def sparsify_segment(seg, context): seg_sparse = [] for i, use in enumerate(seg): if use: seg_sparse.append(context[i]) return seg_sparse def get_dense_array_from_sl(sparse_shape, sl, useid): center = [sl // 2, sl // 2, sl // 2] shape_dense, _ = np.asarray(densify(sparse_shape, [sl, sl, sl], center=center, useid=useid)) return shape_dense def convert_sparse_context_seg_to_example( context_sparse, seg_sparse, c_sl, s_sl, useid, vis=False ): context_dense = get_dense_array_from_sl(context_sparse, c_sl, useid) seg_dense_uncentered = get_dense_array_from_sl(seg_sparse, c_sl, useid) # For visualization if vis: context_dense = context_dense + seg_dense_uncentered else: context_dense = context_dense - seg_dense_uncentered shifted_seg_sparse, shift_vec = shift_subsegment_corner(seg_sparse) seg_dense_centered = get_dense_array_from_sl(shifted_seg_sparse, s_sl, useid) target_coord = [-x for x in shift_vec] target_index = coord_to_index(target_coord, c_sl) return [ torch.from_numpy(context_dense), torch.from_numpy(seg_dense_centered), torch.tensor([target_index]), ] ############################################################################ # For these "S" is a list of blocks in ((x,y,z),(id, meta)) format # the segment is a list of the same length as S with either True or False # at each entry marking whether that block is in the segment # each outputs a list of blocks in ((x,y,z),(id, meta)) format def shift_negative_vec(S, segment, vec, args): N = [] for s in range(len(segment)): if not segment[s]: new_coords = tuple(np.add(S[s][0], vec)) N.append([new_coords, S[s][1]]) else: if "seg_id" in args: N.append([S[s][0], (args["seg_id"], S[s][1][1])]) else: N.append(S[s]) return N def shift_negative(S, segment, args): shift_max = args["shift_max"] """takes the blocks not in the sgement and shifts them randomly""" shift_vec = random_int_triple(-shift_max, shift_max) return shift_negative_vec(S, segment, shift_vec, args) def rotate_negative(S, segment, args): c = center_of_mass(S, seg=segment) r = random.choice([1, -1]) return [rotate_block(S[i], c, r) if segment[i] else S[i] for i in range(len(S))] def replace_negative(S, segment, args): data = args["data"] oseg, oS = data.get_positive() c_pos = center_of_mass(S, seg=segment) c_neg = center_of_mass(oS, seg=oseg) offset = np.add(c_pos, -np.array(c_neg)) N = [S[i] for i in range(len(S)) if not segment[i]] return N + [shift_block(oS[i], offset) for i in range(len(oS)) if oseg[i]] class NegativeSampler: def __init__(self, dataloader, shift_max=10, ntype_probs=[0.6, 0.2, 0.2]): # self.data_prob = [x['prob'] for x in dataloaders.values()] # self.dataloaders = [x['data'] for x in dataloaders.values()] self.dataloader = dataloader self.shift_max = shift_max self.ntype_probs = ntype_probs self.negative_samplers = [shift_negative, rotate_negative, replace_negative] def build_negative(self, S, segment): negative_fn = np.random.choice(self.negative_samplers, p=self.ntype_probs) return negative_fn(S, segment, {"shift_max": self.shift_max, "data": self.dataloader})
34.27451
99
0.63808
672
0.034949
0
0
0
0
0
0
4,396
0.228625
43c0a7c7b3cc424327d10e1b990bf63c250e8eb4
4,907
py
Python
CryptoAttacks/tests/Block/test_gcm.py
akbarszcz/CryptoAttacks
ae675d016b314414a3dc9b23c7d8a32da4c62457
[ "MIT" ]
54
2017-03-28T23:46:58.000Z
2022-02-23T01:53:38.000Z
CryptoAttacks/tests/Block/test_gcm.py
maximmasiutin/CryptoAttacks
d1d47d3cb2ce38738a60b728bc35ce80bfe64374
[ "MIT" ]
null
null
null
CryptoAttacks/tests/Block/test_gcm.py
maximmasiutin/CryptoAttacks
d1d47d3cb2ce38738a60b728bc35ce80bfe64374
[ "MIT" ]
13
2017-03-31T06:07:23.000Z
2021-11-20T19:01:30.000Z
#!/usr/bin/python from __future__ import absolute_import, division, print_function import subprocess from builtins import bytes, range from os.path import abspath, dirname from os.path import join as join_path from random import randint from CryptoAttacks.Block.gcm import * from CryptoAttacks.Utils import log def test_polynomials(): print("Test polynomials") Pmod = GF_2k_generator(128, [128,7,2,1,0]) P = Pmod(0b10011010101100110100100110011101100110010111111000111011101000000110110100010101000101100100111100011001010100100110100111011000) Q = Pmod(0b01111010101010110111000011011100010011101111000001010000011000010000111010001111100001111010110001001000011101000011111110010101) print(P.to_bits(), bin(P.to_int()), P) print(Q.to_bits(), bin(Q.to_int()), Q) w = P*Q print(w.to_bits(), bin(w.to_int()), w) assert Q.coefficients == Pmod(Q.coefficients).coefficients assert Q.coefficients == Pmod(Q.to_int()).coefficients assert Q.coefficients == Pmod(Q.to_bytes()).coefficients print('') Pmod = GF_2k_generator(10, [11,7,2,1,0]) c1 = Pmod(1) c2 = Pmod(0) c3 = Pmod(0) c4 = Pmod(0) polynomial1 = Polynomial_128([c1,c2,c3,c4]) c1 = Pmod(1236) c2 = Pmod(0) c3 = Pmod(0) c4 = Pmod(0) polynomial2 = Polynomial_128([c1,c2,c3,c4]) print(polynomial1) print(polynomial2) print("+", polynomial1 + polynomial2) print("*", polynomial1 * polynomial2) q = polynomial1 / polynomial2 r = polynomial1 % polynomial2 print("/", q) print("%", r) print('') print(polynomial1) print(polynomial2*q + r) print('') def test_gcm(): print("Test GCM") plaintext = bytes(b'hn9YA(F BW&B (W&&W(RT&WEF f7*WB FTgsdc') additional = bytes(b'j gej8g0SRYH8s 8s9yf sgd78taDS* GASyd ') key = bytes(b'xgrtjdh&LA28XNwh') nonce = bytes(b'a drO*1@((js') ciphertext, tag = gcm_encrypt(plaintext, additional, key, nonce) assert gcm_verify(tag, ciphertext, additional, key, nonce) blocks = aes_bytes_to_poly_blocks(ciphertext, additional) ciphertext2, additional2 = poly_blocks_to_aes_bytes(blocks) assert ciphertext == ciphertext2 assert additional == additional2 def polynomial_factors_product(factorization): """factorization: [(poly1, power), (poly2, power)]""" result = factorization[0][0].one_element() for f, f_degree in factorization: result *= f**f_degree return result def test_factor(): print("Test factor") Pmod = GF_2k_generator(9, [9,7,2,1,0]) c1 = Pmod(31) c2 = Pmod(0) c3 = Pmod(0) c4 = Pmod(3) polynomial1 = Polynomial_128([c1,c2,c3,c4]) c1 = Pmod(237) c2 = Pmod(1) c3 = Pmod(0) c4 = Pmod(10) polynomial2 = Polynomial_128([c1,c2,c3,c4]) polynomial = polynomial1 * polynomial2 print(polynomial1) print(polynomial2) print(polynomial) print(polynomial.monic()) print('') factorization = factor_polynomial(polynomial) print(factorization) result = polynomial.one_element() for f, f_degree in factorization: result *= f**f_degree print(result) print('') assert polynomial_factors_product(factorization) == polynomial.monic() def test_repeated_nonce(): print("Test Key-Recovery Attack on GCM with Repeated Nonces") for _ in range(3): nonce = random_bytes(12) key = random_bytes(16) h = bytes(AES.new(key, AES.MODE_ECB).encrypt(bytes(b'\x00'*16))) h = aes_polynomial(h) ciphertexts_additionals_tags = [] for _ in range(4): plaintext = random_bytes(randint(0, 50)) additional = random_bytes(randint(0, 50)) ciphertext, tag = gcm_encrypt(plaintext, additional, key, nonce) ciphertexts_additionals_tags.append((ciphertext, additional, tag)) valid_ciphertext, valid_additional, valid_tag = ciphertexts_additionals_tags[0] auth_key_candidates = recover_key_repated_nonce(ciphertexts_additionals_tags) assert h.to_bytes() in auth_key_candidates # try found auth key candidates correct_auth_key_found = False for auth_key in auth_key_candidates: forged_ciphertext = random_bytes(randint(0, 10)) forged_additional = random_bytes(randint(0, 10)) forged_tag = gcm_forge_tag(ciphertext=forged_ciphertext, additional=forged_additional, auth_key=auth_key, valid_ciphertext=valid_ciphertext, valid_additional=valid_additional, valid_tag=valid_tag) if gcm_verify(forged_tag, forged_ciphertext, forged_additional, key, nonce): correct_auth_key_found = True break assert correct_auth_key_found def run(): log.level = 'debug' test_polynomials() test_gcm() test_factor() test_repeated_nonce() if __name__ == "__main__": run()
31.254777
144
0.678419
0
0
0
0
0
0
0
0
359
0.073161
43c14b71a9e55a3f072d7e8094c999b91490df88
507
py
Python
python_clean_architecture/use_cases/orderdata_use_case.py
jfsolarte/python_clean_architecture
56b0c0eff50bc98774a0caee12e3030789476687
[ "MIT" ]
null
null
null
python_clean_architecture/use_cases/orderdata_use_case.py
jfsolarte/python_clean_architecture
56b0c0eff50bc98774a0caee12e3030789476687
[ "MIT" ]
null
null
null
python_clean_architecture/use_cases/orderdata_use_case.py
jfsolarte/python_clean_architecture
56b0c0eff50bc98774a0caee12e3030789476687
[ "MIT" ]
null
null
null
from python_clean_architecture.shared import use_case as uc from python_clean_architecture.shared import response_object as res class OrderDataGetUseCase(uc.UseCase): def __init__(self, repo): self.repo = repo def execute(self, request_object): #if not request_object: #return res.ResponseFailure.build_from_invalid_request_object(request_object) storage_rooms = self.repo.order(items=request_object.items) return res.ResponseSuccess(storage_rooms)
31.6875
89
0.755424
376
0.741617
0
0
0
0
0
0
100
0.197239
43c1a9b70d766525944aa92cfc1043f3d5e3bc1b
17,842
py
Python
owscapable/swe/common.py
b-cube/OwsCapable
a01815418fe982434503d6542cb18e1ac8989684
[ "BSD-3-Clause" ]
1
2016-02-01T12:55:13.000Z
2016-02-01T12:55:13.000Z
owscapable/swe/common.py
b-cube/OwsCapable
a01815418fe982434503d6542cb18e1ac8989684
[ "BSD-3-Clause" ]
1
2015-06-23T14:07:50.000Z
2015-06-23T14:07:50.000Z
owscapable/swe/common.py
b-cube/OwsCapable
a01815418fe982434503d6542cb18e1ac8989684
[ "BSD-3-Clause" ]
null
null
null
from __future__ import (absolute_import, division, print_function) from owscapable.util import nspath_eval from owscapable.namespaces import Namespaces from owscapable.util import testXMLAttribute, testXMLValue, InfiniteDateTime, NegativeInfiniteDateTime from dateutil import parser from datetime import timedelta from owscapable.etree import etree def get_namespaces(): ns = Namespaces() return ns.get_namespaces(["swe20", "xlink"]) namespaces = get_namespaces() def nspv(path): return nspath_eval(path, namespaces) def make_pair(string, cast=None): if string is None: return None string = string.split(" ") if cast is not None: try: string = map(lambda x: cast(x), string) except: print("Could not cast pair to correct type. Setting to an empty tuple!") string = "" return tuple(string) def get_uom(element): uom = testXMLAttribute(element, "code") if uom is None: uom = testXMLAttribute(element, nspv("xlink:href")) return uom def get_boolean(value): if value is None: return None if value is True or value.lower() in ["yes","true"]: return True elif value is False or value.lower() in ["no","false"]: return False else: return None def get_int(value): try: return int(value) except: return None def get_float(value): try: return float(value) except: return None AnyScalar = map(lambda x: nspv(x), ["swe20:Boolean", "swe20:Count", "swe20:Quantity", "swe20:Time", "swe20:Category", "swe20:Text"]) AnyNumerical = map(lambda x: nspv(x), ["swe20:Count", "swe20:Quantity", "swe20:Time"]) AnyRange = map(lambda x: nspv(x), ["swe20:QuantityRange", "swe20:TimeRange", "swe20:CountRange", "swe20:CategoryRange"]) class NamedObject(object): def __init__(self, element): # No call to super(), the type object will process that. self.name = testXMLAttribute(element, "name") try: self.content = eval(element[-1].tag.split("}")[-1])(element[-1]) except IndexError: self.content = None except BaseException: raise # Revert to the content if attribute does not exists def __getattr__(self, name): return getattr(self.content, name) class AbstractSWE(object): def __init__(self, element): # Attributes self.id = testXMLAttribute(element,"id") # string, optional # Elements self.extention = [] # anyType, min=0, max=X class AbstractSWEIdentifiable(AbstractSWE): def __init__(self, element): super(AbstractSWEIdentifiable, self).__init__(element) # Elements self.identifier = testXMLValue(element.find(nspv("swe20:identifier"))) # anyURI, min=0 self.label = testXMLValue(element.find(nspv("swe20:label"))) # string, min=0 self.description = testXMLValue(element.find(nspv("swe20:description"))) # string, min=0 class AbstractDataComponent(AbstractSWEIdentifiable): def __init__(self, element): super(AbstractDataComponent, self).__init__(element) # Attributes self.definition = testXMLAttribute(element,"definition") # anyURI, required self.updatable = get_boolean(testXMLAttribute(element,"updatable")) # boolean, optional self.optional = get_boolean(testXMLAttribute(element,"optional")) or False # boolean, default=False class AbstractSimpleComponent(AbstractDataComponent): def __init__(self, element): super(AbstractSimpleComponent, self).__init__(element) # Attributes self.referenceFrame = testXMLAttribute(element,"referenceFrame") # anyURI, optional self.axisID = testXMLAttribute(element,"axisID") # string, optional # Elements self.quality = filter(None, [Quality(q) for q in [e.find('*') for e in element.findall(nspv("swe20:quality"))] if q is not None]) try: self.nilValues = NilValues(element.find(nspv("swe20:nilValues"))) except: self.nilValues = None class Quality(object): def __new__(cls, element): t = element.tag.split("}")[-1] if t == "Quantity": return Quantity(element) elif t == "QuantityRange": return QuantityRange(element) elif t == "Category": return Category(element) elif t == "Text": return Text(element) else: return None class NilValues(AbstractSWE): def __init__(self, element): super(NilValues, self).__init__(element) self.nilValue = filter(None, [nilValue(x) for x in element.findall(nspv("swe20:nilValue"))]) # string, min=0, max=X class nilValue(object): def __init__(self, element): self.reason = testXMLAttribute(element, "reason") self.value = testXMLValue(element) class AllowedTokens(AbstractSWE): def __init__(self, element): super(AllowedTokens, self).__init__(element) self.value = filter(None, [testXMLValue(x) for x in element.findall(nspv("swe20:value"))]) # string, min=0, max=X self.pattern = testXMLValue(element.find(nspv("swe20:pattern"))) # string (Unicode Technical Standard #18, Version 13), min=0 class AllowedValues(AbstractSWE): def __init__(self, element): super(AllowedValues, self).__init__(element) self.value = filter(None, map(lambda x: get_float(x), [testXMLValue(x) for x in element.findall(nspv("swe20:value"))])) self.interval = filter(None, [make_pair(testXMLValue(x)) for x in element.findall(nspv("swe20:interval"))]) self.significantFigures = get_int(testXMLValue(element.find(nspv("swe20:significantFigures")))) # integer, min=0 class AllowedTimes(AbstractSWE): def __init__(self, element): super(AllowedTimes, self).__init__(element) self.value = filter(None, [testXMLValue(x) for x in element.findall(nspv("swe20:value"))]) self.interval = filter(None, [make_pair(testXMLValue(x)) for x in element.findall(nspv("swe20:interval"))]) self.significantFigures = get_int(testXMLValue(element.find(nspv("swe20:significantFigures")))) # integer, min=0 class Boolean(AbstractSimpleComponent): def __init__(self, element): super(Boolean, self).__init__(element) # Elements """ 6.2.1 Boolean A Boolean representation of a proptery can take only two values that should be "true/false" or "yes/no". """ value = get_boolean(testXMLValue(element.find(nspv("swe20:value")))) # boolean, min=0, max=1 class Text(AbstractSimpleComponent): def __init__(self, element): super(Text, self).__init__(element) # Elements """ Req 6. A textual representation shall at least consist of a character string. """ self.value = testXMLValue(element.find(nspv("swe20:value"))) # string, min=0, max=1 try: self.constraint = AllowedTokens(element.find(nspv("swe20:constraint/swe20:AllowedTokens"))) # AllowedTokens, min=0, max=1 except: self.constraint = None class Category(AbstractSimpleComponent): def __init__(self, element): super(Category, self).__init__(element) # Elements self.codeSpace = testXMLAttribute(element.find(nspv("swe20:codeSpace")), nspv("xlink:href")) # Reference, min=0, max=1 self.value = testXMLValue(element.find(nspv("swe20:value"))) # string, min=0, max=1 try: self.constraint = AllowedTokens(element.find(nspv("swe20:constraint/swe20:AllowedTokens"))) # AllowedTokens, min=0, max=1 except: self.constraint = None class CategoryRange(Category): def __init__(self, element): super(CategoryRange, self).__init__(element) # Elements value = testXMLValue(element.find(nspv("swe20:value"))) self.values = make_pair(value) if value is not None else None class Count(AbstractSimpleComponent): def __init__(self, element): super(Count, self).__init__(element) # Elements self.value = get_int(testXMLValue(element.find(nspv("swe20:value")))) # integer, min=0, max=1 try: self.constraint = AllowedValues(element.find(nspv("swe20:constraint/swe20:AllowedValues"))) # AllowedValues, min=0, max=1 except: self.constraint = None class CountRange(Count): def __init__(self, element): super(CountRange, self).__init__(element) # Elements value = testXMLValue(element.find(nspv("swe20:value"))) self.value = make_pair(value,int) if value is not None else None class Quantity(AbstractSimpleComponent): def __init__(self, element): super(Quantity, self).__init__(element) # Elements self.uom = get_uom(element.find(nspv("swe20:uom"))) self.value = get_float(testXMLValue(element.find(nspv("swe20:value")))) # double, min=0, max=1 try: self.constraint = AllowedValues(element.find(nspv("swe20:constraint/swe20:AllowedValues"))) # AllowedValues, min=0, max=1 except: self.constraint = None class QuantityRange(Quantity): def __init__(self, element): super(QuantityRange, self).__init__(element) # Elements value = testXMLValue(element.find(nspv("swe20:value"))) self.value = make_pair(value,float) if value is not None else None def get_time(value, referenceTime, uom): try: value = parser.parse(value) except (AttributeError, ValueError): # Most likely an integer/float using a referenceTime try: if uom.lower() == "s": value = referenceTime + timedelta(seconds=float(value)) elif uom.lower() == "min": value = referenceTime + timedelta(minutes=float(value)) elif uom.lower() == "h": value = referenceTime + timedelta(hours=float(value)) elif uom.lower() == "d": value = referenceTime + timedelta(days=float(value)) except (AttributeError, ValueError): pass except OverflowError: # Too many numbers (> 10) or INF/-INF if value.lower() == "inf": value = InfiniteDateTime() elif value.lower() == "-inf": value = NegativeInfiniteDateTime() return value class Time(AbstractSimpleComponent): def __init__(self, element): super(Time, self).__init__(element) # Elements self.uom = get_uom(element.find(nspv("swe20:uom"))) try: self.constraint = AllowedTimes(element.find(nspv("swe20:constraint/swe20:AllowedTimes"))) # AllowedTimes, min=0, max=1 except: self.constraint = None # Attributes self.localFrame = testXMLAttribute(element,"localFrame") # anyURI, optional try: self.referenceTime = parser.parse(testXMLAttribute(element,"referenceTime")) # dateTime, optional except (AttributeError, ValueError): self.referenceTime = None value = testXMLValue(element.find(nspv("swe20:value"))) # TimePosition, min=0, max=1 self.value = get_time(value, self.referenceTime, self.uom) class TimeRange(AbstractSimpleComponent): def __init__(self, element): super(TimeRange, self).__init__(element) # Elements self.uom = get_uom(element.find(nspv("swe20:uom"))) try: self.constraint = AllowedTimes(element.find(nspv("swe20:constraint/swe20:AllowedTimes"))) # AllowedTimes, min=0, max=1 except: self.constraint = None # Attributes self.localFrame = testXMLAttribute(element,"localFrame") # anyURI, optional try: self.referenceTime = parser.parse(testXMLAttribute(element,"referenceTime")) # dateTime, optional except (AttributeError, ValueError): self.referenceTime = None values = make_pair(testXMLValue(element.find(nspv("swe20:value")))) # TimePosition, min=0, max=1 self.value = [get_time(t, self.referenceTime, self.uom) for t in values] class DataRecord(AbstractDataComponent): def __init__(self, element): super(DataRecord, self).__init__(element) # Elements self.field = [Field(x) for x in element.findall(nspv("swe20:field"))] def get_by_name(self, name): return next((x for x in self.field if x.name == name), None) class Field(NamedObject): def __init__(self, element): super(Field, self).__init__(element) class Vector(AbstractDataComponent): def __init__(self, element): super(Vector, self).__init__(element) # Elements self.coordinate = [Coordinate(x) for x in element.findall(nspv("swe20:coordinate"))] # Attributes self.referenceFrame = testXMLAttribute(element,"referenceFrame") # anyURI, required self.localFrame = testXMLAttribute(element,"localFrame") # anyURI, optional def get_by_name(self, name): return next((x for x in self.coordinate if x.name == name), None) class Coordinate(NamedObject): def __init__(self, element): super(Coordinate, self).__init__(element) #if element[-1].tag not in AnyNumerical: # print "Coordinate does not appear to be an AnyNumerical member" class DataChoice(AbstractDataComponent): def __init__(self, element): super(DataChoice, self).__init__(element) self.item = [Item(x) for x in element.findall(nspv("swe20:item"))] def get_by_name(self, name): return next((x for x in self.item if x.name == name), None) class Item(NamedObject): def __init__(self, element): super(Item, self).__init__(element) class DataArray(AbstractDataComponent): def __init__(self, element): super(DataArray, self).__init__(element) self.elementCount = element.find(nspv("swe20:elementCount/swe20:Count")) # required self.elementType = ElementType(element.find(nspv("swe20:elementType"))) # required self.values = testXMLValue(element.find(nspv("swe20:values"))) try: self.encoding = AbstractEncoding(element.find(nspv("swe20:encoding"))) except: self.encoding = None class Matrix(AbstractDataComponent): def __init__(self, element): super(Matrix, self).__init__(element) self.elementCount = element.find(nspv("swe20:elementCount/swe20:Count")) # required self.elementType = ElementType(element.find(nspv("swe20:elementType"))) # required self.encoding = AbstractEncoding(element.find(nspv("swe20:encoding"))) self.values = testXMLValue(element.find(nspv("swe20:values"))) self.referenceFrame = testXMLAttribute(element, "referenceFrame") # anyURI, required self.localFrame = testXMLAttribute(element, "localFrame") # anyURI, optional class DataStream(AbstractSWEIdentifiable): def __init__(self, element): super(DataStream, self).__init__(element) self.elementCount = element.find(nspv("swe20:elementCount/swe20:Count")) # optional self.elementType = ElementType(element.find(nspv("swe20:elementType"))) # optional self.encoding = AbstractEncoding(element.find(nspv("swe20:encoding"))) self.values = testXMLValue(element.find(nspv("swe20:values"))) class ElementType(NamedObject): def __init__(self, element): super(ElementType, self).__init__(element) class AbstractEncoding(object): def __new__(cls, element): t = element[-1].tag.split("}")[-1] if t == "TextEncoding": return super(AbstractEncoding, cls).__new__(TextEncoding, element) elif t == "XMLEncoding": return super(AbstractEncoding, cls).__new__(XMLEncoding, element) elif t == "BinaryEncoding": return super(AbstractEncoding, cls).__new__(BinaryEncoding, element) class TextEncoding(AbstractEncoding): def __init__(self, element): self.tokenSeparator = testXMLAttribute(element[-1], "tokenSeparator") # string, required self.blockSeparator = testXMLAttribute(element[-1], "blockSeparator") # string, required self.decimalSeparator = testXMLAttribute(element[-1], "decimalSeparator") or "." # string, optional, default="." self.collapseWhiteSpaces = get_boolean(testXMLAttribute(element[-1], "collapseWhiteSpaces")) or True # boolean, optional, default=True class XMLEncoding(AbstractEncoding): def __init__(self, element): raise NotImplementedError class BinaryEncoding(AbstractEncoding): def __init__(self, element): raise NotImplementedError
43.200969
172
0.619325
14,999
0.840657
0
0
0
0
0
0
3,428
0.192131
43c2d697bacb0820c4e842d6861cb1732909d8a0
11,386
py
Python
main_fed.py
gao969/scaffold-dgc-clustering
9f259dfdf0897dcb1dece2e1197268f585f54a69
[ "MIT" ]
null
null
null
main_fed.py
gao969/scaffold-dgc-clustering
9f259dfdf0897dcb1dece2e1197268f585f54a69
[ "MIT" ]
null
null
null
main_fed.py
gao969/scaffold-dgc-clustering
9f259dfdf0897dcb1dece2e1197268f585f54a69
[ "MIT" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- # Python version: 3.6 import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import copy import numpy as np from torchvision import datasets, transforms import torch import os import torch.distributed as dist from utils.sampling import mnist_iid, mnist_noniid, cifar_iid from utils.options import args_parser from models.Update import LocalUpdate from models.Update import LocalUpdateF from models.Nets import MLP, CNNMnist, CNNCifar from models.Fed import FedAvg from models.test import test_img from torch.multiprocessing import Process from deep_gradient_compression import DGC import json # __name__是内置的变量,在执行当前文件(main_fed.py)时,默认值为__main__ # 但是如果其他.py文件import当前文件(main_fed.py)时,在其他文件中执行main_fed.py中的__name__,此时main_fed.py中的__name__默认值为文件名main_fed.py if __name__ == '__main__': # parse args args = args_parser() args.device = torch.device('cuda:{}'.format(args.gpu)) torch.manual_seed(0) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False rank = 0 device_id = rank os.environ['MASTER_ADDR'] = '127.0.0.1' os.environ['MASTER_PORT'] = '29500' dist.init_process_group(backend='gloo', rank=rank, world_size=args.world_size) # if torch.cuda.is_available() and args.gpu != -1 else 'cpu' # load dataset and split users if args.dataset == 'mnist': # ToTensor():归一数据到(0,1),Normalize():(date-0.1307)/0.3081,将数据分布到(-1, 1) trans_mnist = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]) if trans_mnist is not None: print(1) print(trans_mnist) # 测试(60000)和训练集(10000) dataset_train = datasets.MNIST('../data/mnist/', train=True, download=True, transform=trans_mnist) dataset_test = datasets.MNIST('../data/mnist/', train=False, download=True, transform=trans_mnist) # sample users # Noniid数据 if args.iid: dict_users = mnist_iid(dataset_train, args.num_users) else: dict_users = mnist_noniid(dataset_train, args.num_users) elif args.dataset == 'cifar': trans_cifar = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) dataset_train = datasets.CIFAR10('../data/cifar', train=True, download=True, transform=trans_cifar) dataset_test = datasets.CIFAR10('../data/cifar', train=False, download=True, transform=trans_cifar) if args.iid: dict_users = cifar_iid(dataset_train, args.num_users) else: exit('Error: only consider IID setting in CIFAR10') else: exit('Error: unrecognized dataset') img_size = dataset_train[0][0].shape # print('df ',img_size) [1,28,28] # build model # print(args.model) if args.model == 'cnn' and args.dataset == 'cifar': net_glob = CNNCifar(args=args).to(args.device) elif args.model == 'cnn' and args.dataset == 'mnist': net_glob = CNNMnist(args=args).to(args.device) elif args.model == 'mlp': len_in = 1 for x in img_size: # print('x取值',x) len_in *= x net_glob = MLP(dim_in=len_in, dim_hidden=200, dim_out=args.num_classes).to(args.device) # add control_global = MLP(dim_in=len_in, dim_hidden=200, dim_out=args.num_classes).to(args.device) else: exit('Error: unrecognized model') # 设置为训练模型 net_glob.train() print(net_glob) control_weights =control_global.state_dict() # copy weights # 初始化全局权重 w_glob = net_glob.state_dict() c_glob = copy.deepcopy(net_glob.state_dict()) # print(w_glob) # training loss_train = [] accuracy = [] cv_loss, cv_acc = [], [] val_loss_pre, counter = 0, 0 net_best = None best_loss = None val_acc_list, net_list = [], [] count = 0, 0 test_acc_list = [] if args.all_clients: print("Aggregation over all clients") w_locals = [w_glob for i in range(args.num_users)] # add else: # 初始化本地权重 c_local = [MLP(dim_in=len_in, dim_hidden=200, dim_out=args.num_classes).to(args.device) for i in range(args.num_users)] for net in c_local: net.load_state_dict(control_weights) delta_c = copy.deepcopy(net_glob.state_dict()) # delta_x = copy.deepcopy(net_glob.state_dict()) # with open("test.txt", "w") as f: # for i in range(0, len(c_local)): # for k,v in c_local[i].state_dict().items(): # f.write(f"{k},{v}\n".format(k,v)) # with open("test.txt", "a") as f: # for i in range(0, len(c_local)): # for k, v in w_locals[i].items(): # f.write(f"{k},{v}\n".format(k, v)) # add 初始化变化量 # print("why?") for iter in range(args.epochs): # 初始换控制变量 for i in delta_c: delta_c[i] = 0.0 # for i in delta_x: # delta_x[i] = 0.0 loss_locals = [] if not args.all_clients: w_locals = [] m = max(int(args.frac * args.num_users), 1) # 每次随机十位幸运观众 idxs_users = np.random.choice(range(args.num_users), m, replace=False) for idx in idxs_users: # momentum法SGD local = LocalUpdate(args=args, dataset=dataset_train, idxs=dict_users[idx]) w, loss, local_delta_c, local_delta, control_local_w= local.train(net=copy.deepcopy(net_glob).to(args.device), control_local = c_local[idx], control_global=control_global, rank=rank, device_id=device_id, size=args.world_size) # add if iter != 0: c_local[idx].load_state_dict(control_local_w) if args.all_clients: w_locals[idx] = copy.deepcopy(w) else: w_locals.append(copy.deepcopy(w)) # add loss_locals.append(copy.deepcopy(loss)) # add for i in delta_c: if iter != 0: delta_c[i] += w[i] else: delta_c[i] += local_delta_c[i] # delta_x[i] += local_delta[i] # add # update the delta C for i in delta_c: delta_c[i] /= m # delta_x[i] /= m # update global weights w_glob = FedAvg(w_locals) # add 更新全局c,w # w_glob = net_glob.state_dict() control_global_w = control_global.state_dict() for i in control_global_w: if iter !=0: # w_glob[i] = delta_x[i] # else: # w_glob[i] += delta_x[i] control_global_w[i] += (m / args.num_users) * delta_c[i] # copy weight to net_glob net_glob.load_state_dict(w_glob) # add control_global.load_state_dict(control_global_w) # print loss loss_avg = sum(loss_locals) / len(loss_locals) print('Round {:3d}, Average loss {:.3f}'.format(iter, loss_avg)) loss_train.append(loss_avg) # acc_train, loss_train = test_img(net_glob, dataset_train, args) acc_test, loss_test = test_img(net_glob, dataset_test, args) accuracy.append(acc_test) # add for c in range(args.num_users): local_model = LocalUpdate(args=args, dataset=dataset_train, idxs=dict_users[idx]) torch.cuda.empty_cache() # net_glob.eval() # print("Training accuracy: {:.2f}".format(acc_train)) # print("Testing accuracy: {:.2f}".format(acc_test)) ####################################################################################################################### ####################################################################################################################### ####################################################################################################################### ####################################################################################################################### # Fedavg # build model if args.model == 'cnn' and args.dataset == 'cifar': net_globF = CNNCifar(args=args).to(args.device) elif args.model == 'cnn' and args.dataset == 'mnist': net_globF = CNNMnist(args=args).to(args.device) elif args.model == 'mlp': len_in = 1 for x in img_size: len_in *= x net_globF = MLP(dim_in=len_in, dim_hidden=200, dim_out=args.num_classes).to(args.device) else: exit('Error: unrecognized model') print(net_globF) net_globF.train() # copy weights w_globF = net_globF.state_dict() # training loss_trainF = [] accuracyF = [] cv_loss, cv_acc = [], [] val_loss_pre, counter = 0, 0 net_best = None best_loss = None val_acc_list, net_list = [], [] if args.all_clients: print("Aggregation over all clients") w_localsF = [w_globF for i in range(args.num_users)] for iter in range(args.epochs): loss_locals = [] if not args.all_clients: w_localsF = [] m = max(int(args.frac * args.num_users), 1) idxs_users = np.random.choice(range(args.num_users), m, replace=False) for idx in idxs_users: localF = LocalUpdateF(args=args, dataset=dataset_train, idxs=dict_users[idx]) w, loss = localF.train(net=copy.deepcopy(net_globF).to(args.device)) if args.all_clients: w_localsF[idx] = copy.deepcopy(w) else: w_localsF.append(copy.deepcopy(w)) loss_locals.append(copy.deepcopy(loss)) # update global weights w_globF = FedAvg(w_localsF) # copy weight to net_globF net_globF.load_state_dict(w_globF) # print loss loss_avgF = sum(loss_locals) / len(loss_locals) print('Round {:3d}, Average loss {:.3f}'.format(iter, loss_avgF)) loss_trainF.append(loss_avgF) acc_test, loss_test = test_img(net_globF, dataset_test, args) accuracyF.append(acc_test) # plot loss curve plt.figure() print(loss_train, loss_trainF) plt.plot(range(len(loss_train)), loss_train, label='Scaffold', zorder=2) plt.plot(range(len(loss_trainF)), loss_trainF, 'r', label='FedAvg',zorder=1) plt.ylabel('train_loss') plt.xlabel('epochs') plt.legend(loc='best') plt.savefig('./save/fed_{}_{}_{}_{}_iid{}.png'.format(args.dataset, args.model, args.epochs, 'train_loss', args.iid)) # testing net_glob.eval() acc_train, loss_train = test_img(net_glob, dataset_train, args) acc_test, loss_test = test_img(net_glob, dataset_test, args) print("Training accuracy: {:.2f}".format(acc_train)) print("Testing accuracy: {:.2f}".format(acc_test)) # plot loss curve plt.figure() # plt.plot((np.arange(1, len(accuracy)), 1), accuracy, 'r') plt.plot(range(len(accuracy)), accuracy, label='Scaffold', zorder=2) plt.plot(range(len(accuracyF)), accuracyF, 'r', label='FedAvg', zorder=1) plt.ylabel('test_acc') plt.xlabel('epochs') plt.legend(loc='best') plt.savefig('./save/fed_{}_{}_{}_{}_iid{}.png'.format(args.dataset, args.model, args.epochs, 'acc_test', args.iid))
35.033846
136
0.584578
0
0
0
0
0
0
0
0
3,212
0.275094
43c3bca28b83f4b20caa188f5ac7f59f03173404
2,085
py
Python
b_lambda_layer_common_test/integration/infrastructure/function_with_unit_tests.py
gkazla/B.LambdaLayerCommon
1a4f9cd3d8b7e447c8467bd7dde50cb9e9a6e980
[ "Apache-2.0" ]
null
null
null
b_lambda_layer_common_test/integration/infrastructure/function_with_unit_tests.py
gkazla/B.LambdaLayerCommon
1a4f9cd3d8b7e447c8467bd7dde50cb9e9a6e980
[ "Apache-2.0" ]
null
null
null
b_lambda_layer_common_test/integration/infrastructure/function_with_unit_tests.py
gkazla/B.LambdaLayerCommon
1a4f9cd3d8b7e447c8467bd7dde50cb9e9a6e980
[ "Apache-2.0" ]
null
null
null
from aws_cdk.aws_lambda import Function, Code, Runtime from aws_cdk.core import Stack, Duration from b_aws_testing_framework.tools.cdk_testing.testing_stack import TestingStack from b_cfn_lambda_layer.package_version import PackageVersion from b_lambda_layer_common.layer import Layer from b_lambda_layer_common_test.unit import root class FunctionWithUnitTests(Function): """ Function that lets us run unit tests inside lambda function. We want to run unit tests both locally and remotely. """ def __init__(self, scope: Stack): super().__init__( scope=scope, id=f'{TestingStack.global_prefix()}FunctionWithUnitTests', code=Code.from_asset(root), handler='handler.handler', runtime=Runtime.PYTHON_3_8, timeout=Duration.minutes(5), memory_size=512, layers=[ Layer( scope=scope, name=f'{TestingStack.global_prefix()}TestingLayerWithUnitTests', dependencies={ # These dependencies are required for running unit tests inside lambda functions. # Pytest is used for running actual unit tests. 'pytest': PackageVersion.from_string_version('6.2.5'), # Pook is used for HTTP mocking, therefore it is also needed here. 'pook': PackageVersion.from_string_version('1.0.1'), # Not sure about this dependency. Lambda runtime throws errors if its missing. 'aws-cdk.core': PackageVersion.from_string_version('1.99.0'), # This dependency should be installed with 'pook' since it depends on 'jsonschema' which depends on this. # For some reason it doesn't. # Tests would fail with import error otherwise. 'importlib-resources': PackageVersion.from_string_version('5.4.0') } ) ] )
47.386364
129
0.601918
1,747
0.83789
0
0
0
0
0
0
793
0.380336
43c4a0c547cce9ae68639184c6cd8640efc21e50
857
py
Python
tests/metarl/tf/baselines/test_baselines.py
neurips2020submission11699/metarl
ae4825d21478fa1fd0aa6b116941ea40caa152a5
[ "MIT" ]
2
2021-02-07T12:14:52.000Z
2021-07-29T08:07:22.000Z
tests/metarl/tf/baselines/test_baselines.py
neurips2020submission11699/metarl
ae4825d21478fa1fd0aa6b116941ea40caa152a5
[ "MIT" ]
null
null
null
tests/metarl/tf/baselines/test_baselines.py
neurips2020submission11699/metarl
ae4825d21478fa1fd0aa6b116941ea40caa152a5
[ "MIT" ]
null
null
null
""" This script creates a test that fails when metarl.tf.baselines failed to initialize. """ import tensorflow as tf from metarl.envs import MetaRLEnv from metarl.tf.baselines import ContinuousMLPBaseline from metarl.tf.baselines import GaussianMLPBaseline from tests.fixtures import TfGraphTestCase from tests.fixtures.envs.dummy import DummyBoxEnv class TestTfBaselines(TfGraphTestCase): def test_baseline(self): """Test the baseline initialization.""" box_env = MetaRLEnv(DummyBoxEnv()) deterministic_mlp_baseline = ContinuousMLPBaseline(env_spec=box_env) gaussian_mlp_baseline = GaussianMLPBaseline(env_spec=box_env) self.sess.run(tf.compat.v1.global_variables_initializer()) deterministic_mlp_baseline.get_param_values() gaussian_mlp_baseline.get_param_values() box_env.close()
31.740741
76
0.772462
503
0.586931
0
0
0
0
0
0
131
0.152859
43c4fed77cd489496d3337fe3e83cfcc13582afb
2,390
py
Python
api/files/api/app/monthly_report.py
trackit/trackit-legacy
76cfab7941eddb9d390dd6c7b9a408a9ad4fc8da
[ "Apache-2.0" ]
2
2018-02-01T09:18:05.000Z
2020-03-12T18:11:11.000Z
api/files/api/app/monthly_report.py
trackit/trackit-legacy
76cfab7941eddb9d390dd6c7b9a408a9ad4fc8da
[ "Apache-2.0" ]
null
null
null
api/files/api/app/monthly_report.py
trackit/trackit-legacy
76cfab7941eddb9d390dd6c7b9a408a9ad4fc8da
[ "Apache-2.0" ]
5
2018-05-11T10:32:52.000Z
2021-05-26T12:09:47.000Z
import jinja2 import json from send_email import send_email from app.models import User, MyResourcesAWS, db from app.es.awsdetailedlineitem import AWSDetailedLineitem from sqlalchemy import desc import subprocess import datetime from flask import render_template def monthly_html_template(): template_dir = '/usr/trackit/templates' loader = jinja2.FileSystemLoader(template_dir) env = jinja2.Environment(loader=loader) template = env.get_template('emailPDFreport.html') now = datetime.datetime.now() try: users = User.query.all() for user in users: if user.report_last_emailed_at == None: user.report_last_emailed_at = datetime.datetime.utcnow() db.session.add(user) db.session.commit() last_emailed_days = (now - user.report_last_emailed_at).days if last_emailed_days >= 30: for key in user.aws_keys: date = "{} {}".format(now.strftime("%B"), now.year) pretty_key = user.get_aws_key(key.key).pretty + ' ' + key.key monthly_cost = AWSDetailedLineitem.get_monthly_cost_by_product(key.get_aws_user_id()) estimation_hour, estimation_month = get_estimation(user, key) total = sum(float(i.get("cost")) for i in monthly_cost['products']) email_template = template.render(email=user.email, date=date, key=pretty_key, products=monthly_cost['products'], total=total, hourly_cost=estimation_hour, monthly_cost=estimation_month) if user.email.endswith("msolution.io"): send_email(user.email, 'Trackit monthly report', email_template.encode('utf-8').strip(), True) user.report_last_emailed_at = datetime.datetime.utcnow() db.session.add(user) db.session.commit() except Exception, e: print("ERROR " + str(e)) def get_estimation(user, key): estimation = MyResourcesAWS.query.filter(MyResourcesAWS.key == key.key).order_by(desc(MyResourcesAWS.date)).first() estimation = [] if not estimation else estimation.json() cost = sum(estimation_cost(e) for e in estimation) return cost, cost*720 def estimation_cost(estimation): return sum(item['cost'] for item in estimation['prices'] if item['name'] == 'aws')
47.8
205
0.65523
0
0
0
0
0
0
0
0
163
0.068201
43c657c522f9cb22a9a0ca2bb0912e5da035332c
7,309
py
Python
slow_tests/boot_test.py
rdturnermtl/mlpaper
5da5cb7b3a56d3cfdc7162d01fac2679c9050e76
[ "Apache-2.0" ]
9
2020-07-23T02:12:48.000Z
2021-06-24T08:19:08.000Z
slow_tests/boot_test.py
rdturnermtl/benchmark_tools
5da5cb7b3a56d3cfdc7162d01fac2679c9050e76
[ "Apache-2.0" ]
14
2017-11-29T04:17:04.000Z
2018-03-07T00:35:00.000Z
slow_tests/boot_test.py
rdturnermtl/mlpaper
5da5cb7b3a56d3cfdc7162d01fac2679c9050e76
[ "Apache-2.0" ]
1
2017-12-29T01:46:31.000Z
2017-12-29T01:46:31.000Z
# Ryan Turner ([email protected]) from __future__ import division, print_function from builtins import range import numpy as np import scipy.stats as ss import mlpaper.constants as cc import mlpaper.mlpaper as bt import mlpaper.perf_curves as pc from mlpaper.classification import DEFAULT_NGRID, curve_boot from mlpaper.test_constants import FPR from mlpaper.util import area, interp1d _FPR = FPR / 3.0 # Divide by number of test funcs def fail_check_stat(fail, runs, expect_p_fail, fpr): pvals_2side = [ss.binom_test(ff, runs, expect_p_fail) for ff in fail] pvals_1side = [ss.binom_test(ff, runs, expect_p_fail, alternative="greater") for ff in fail] # Note that we are not going multiple comparison correction between the # two sided and one sided tests. print(fail) print(pvals_2side) assert np.min(pvals_2side) >= fpr / len(pvals_2side) print(pvals_1side) assert np.min(pvals_1side) >= fpr / len(pvals_1side) def test_boot(runs=100): N = 201 confidence = 0.95 # Drawing more seeds than we need to be safe seeds = np.nditer(np.random.randint(low=0, high=int(1e6), size=runs * 5)) def run_trial(y_true, y_score, y_score_ref, true_curve, curve_f, seed, x_grid=None): epsilon = 1e-6 curve, _ = curve_f(y_true, y_score[:, 1]) auc, = area(*curve) curve, _ = curve_f(y_true, y_score_ref[:, 1]) auc_ref, = area(*curve) true_value, = area(*true_curve) np.random.seed(seed) (auc_, EB, pval), curve = curve_boot( y_true, y_score, ref=true_value, curve_f=curve_f, confidence=confidence, x_grid=x_grid ) true_curve_grid, = interp1d(curve[cc.XGRID].values, *true_curve) assert auc_ == auc fail_EB = np.abs(auc - true_value) > EB # Could also test distn with 1-sided KS test but this easier for now fail_P = pval < 1.0 - confidence fail_curve = (true_curve_grid < curve[cc.LB].values - epsilon) | ( curve[cc.UB].values + epsilon < true_curve_grid ) assert (x_grid is None) or np.all(curve[cc.XGRID].values == x_grid) np.random.seed(seed) (auc_, EB_, pval), curve_ = curve_boot( y_true, y_score, ref=y_score_ref, curve_f=curve_f, confidence=confidence, pairwise_CI=False, x_grid=x_grid ) assert auc_ == auc assert EB_ == EB # Could also test distn with 1-sided KS test but this easier for now fail_P2 = pval < 1.0 - confidence assert np.all(curve_.values == curve.values) np.random.seed(seed) (auc_, EB, pval_), curve_ = curve_boot( y_true, y_score, ref=y_score_ref, curve_f=curve_f, confidence=confidence, pairwise_CI=True, x_grid=x_grid ) assert auc_ == auc fail_EB2 = np.abs(auc - auc_ref) > EB # Could also test distn with 1-sided KS test but this easier for now assert pval_ == pval assert np.all(curve_.values == curve.values) return fail_EB, fail_P, fail_EB2, fail_P2, fail_curve fail = [0] * 12 fail_curve_roc = np.zeros(DEFAULT_NGRID, dtype=int) fail_curve_ap = np.zeros(DEFAULT_NGRID, dtype=int) fail_curve_prg = np.zeros(DEFAULT_NGRID, dtype=int) for ii in range(runs): mu = np.random.randn(2) S = np.random.randn(2, 2) S = np.dot(S, S.T) # Coverage, esp at edges, is worse for imbalanced data. See issue #20. p = 0.5 x_grid = np.linspace(0.0, 0.99, DEFAULT_NGRID) true_curve = (np.array([[0.0, 1.0]]), np.array([[0.0, 1.0]]), pc.LINEAR) y_true = np.random.rand(N) <= p y_score = np.random.multivariate_normal(mu, S, size=N) if np.random.randn() <= 0.5: # resample to test dupes idx = np.random.choice(N, size=N, replace=True) y_score = y_score[idx, :] y_score, y_score_ref = y_score.T y_score = np.stack((np.zeros(N), y_score), axis=1) y_score_ref = np.stack((np.zeros(N), y_score_ref), axis=1) # Coverage doesn't hold at edges, hence [0.05, 0.95]. See issue #20. x_grid = np.linspace(0.05, 0.95, DEFAULT_NGRID) fail_EB, fail_P, fail_EB2, fail_P2, fail_curve = run_trial( y_true, y_score, y_score_ref, true_curve, pc.roc_curve, next(seeds), x_grid ) fail[0] += fail_EB fail[1] += fail_P fail[2] += fail_EB2 fail[3] += fail_P2 fail_curve_roc += fail_curve true_curve = (np.array([[0.0, 1.0]]), np.array([[p, p]]), pc.PREV) fail_EB, fail_P, fail_EB2, fail_P2, fail_curve = run_trial( y_true, y_score, y_score_ref, true_curve, pc.recall_precision_curve, next(seeds), x_grid ) fail[4] += fail_EB fail[5] += fail_P fail[6] += fail_EB2 fail[7] += fail_P2 fail_curve_ap += fail_curve x_grid = np.linspace(0.0, 0.99, DEFAULT_NGRID) true_curve = (np.array([[0.0, 1.0]]), np.array([[0.0, 0.0]]), pc.PREV) fail_EB, fail_P, fail_EB2, fail_P2, fail_curve = run_trial( y_true, y_score, y_score_ref, true_curve, pc.prg_curve, next(seeds), x_grid ) fail[8] += fail_EB fail[9] += fail_P fail[10] += fail_EB2 fail[11] += fail_P2 fail_curve_prg += fail_curve sub_FPR = _FPR / 4.0 expect_p_fail = 1.0 - confidence fail_check_stat(fail, runs, expect_p_fail, sub_FPR) print("ROC curve") fail_check_stat(fail_curve_roc, runs, expect_p_fail, sub_FPR) print("RP curve") fail_check_stat(fail_curve_ap, runs, expect_p_fail, sub_FPR) print("PRG curve") fail_check_stat(fail_curve_prg, runs, expect_p_fail, sub_FPR) def test_boot_mean(runs=100): N = 201 confidence = 0.95 fail = 0 for ii in range(runs): mu = np.random.randn() S = np.abs(np.random.randn()) x = mu + S * np.random.randn(N) mu_est = np.mean(x) EB = bt.boot_EB(x, confidence=0.95) fail += np.abs(mu - mu_est) > EB expect_p_fail = 1.0 - confidence print("boot mean") fail_check_stat([fail], runs, expect_p_fail, _FPR) def test_boot_EB_and_test(runs=100): """Arguably this should do out to its own file since it tests bt core.""" mu = np.random.randn() stdev = np.abs(np.random.randn()) N = 201 confidence = 0.95 def run_trial(x, true_value): _, _, CI = bt._boot_EB_and_test(x, confidence=confidence, return_CI=True) LB, UB = CI fail_CI = (true_value < LB) or (UB < true_value) _, pval, CI = bt._boot_EB_and_test(x - true_value, confidence=confidence, return_CI=True) LB, UB = CI fail_CI2 = (0 < LB) or (UB < 0) fail_P = pval < 1.0 - confidence return fail_CI, fail_CI2, fail_P fail = [0] * 3 for ii in range(runs): x = mu + stdev * np.random.randn(N) fail_CI, fail_CI2, fail_P = run_trial(x, mu) fail[0] += fail_CI fail[1] += fail_CI2 fail[2] += fail_P expect_p_fail = 1.0 - confidence print("boot mean and test") fail_check_stat(fail, runs, expect_p_fail, _FPR) if __name__ == "__main__": np.random.seed(56467) test_boot() test_boot_mean() test_boot_EB_and_test() print("passed")
35.480583
118
0.623341
0
0
0
0
0
0
0
0
749
0.102476
43c8749a8ff42646c3b9643c7de460258d1664ae
68
py
Python
TTBenchmark/check_benchmark.py
yuqil725/benchmark_lib
f404ff829d7b3a8bb0f6b00689038cf533bba83e
[ "MIT" ]
null
null
null
TTBenchmark/check_benchmark.py
yuqil725/benchmark_lib
f404ff829d7b3a8bb0f6b00689038cf533bba83e
[ "MIT" ]
null
null
null
TTBenchmark/check_benchmark.py
yuqil725/benchmark_lib
f404ff829d7b3a8bb0f6b00689038cf533bba83e
[ "MIT" ]
null
null
null
def check_difference(): pass def update_benchmark(): pass
9.714286
23
0.676471
0
0
0
0
0
0
0
0
0
0
43c90a0a29279010bde058050d6af3ae4d07f61d
3,047
py
Python
core/test/test_timeseries_study.py
ajmal017/amp
8de7e3b88be87605ec3bad03c139ac64eb460e5c
[ "BSD-3-Clause" ]
null
null
null
core/test/test_timeseries_study.py
ajmal017/amp
8de7e3b88be87605ec3bad03c139ac64eb460e5c
[ "BSD-3-Clause" ]
null
null
null
core/test/test_timeseries_study.py
ajmal017/amp
8de7e3b88be87605ec3bad03c139ac64eb460e5c
[ "BSD-3-Clause" ]
null
null
null
from typing import Any, Dict import numpy as np import pandas as pd import core.artificial_signal_generators as sig_gen import core.statistics as stats import core.timeseries_study as tss import helpers.unit_test as hut class TestTimeSeriesDailyStudy(hut.TestCase): def test_usual_case(self) -> None: idx = pd.date_range("2018-12-31", "2019-01-31") vals = np.random.randn(len(idx)) ts = pd.Series(vals, index=idx) tsds = tss.TimeSeriesDailyStudy(ts) tsds.execute() class TestTimeSeriesMinutelyStudy(hut.TestCase): def test_usual_case(self) -> None: idx = pd.date_range("2018-12-31", "2019-01-31", freq="5T") vals = np.random.randn(len(idx)) ts = pd.Series(vals, index=idx) tsms = tss.TimeSeriesMinutelyStudy(ts, freq_name="5 minutes") tsms.execute() class TestMapDictToDataframeTest1(hut.TestCase): def test1(self) -> None: stat_funcs = { "norm_": stats.apply_normality_test, "adf_": stats.apply_adf_test, "kpss_": stats.apply_kpss_test, } result_dict = self._get_dict_of_series(1) actual = tss.map_dict_to_dataframe( dict_=result_dict, functions=stat_funcs ) actual_string = hut.convert_df_to_string(actual, index=True) self.check_string(actual_string) def test2(self) -> None: stat_funcs = { "norm_": stats.apply_normality_test, "adf_": stats.apply_adf_test, "kpss_": stats.apply_kpss_test, } result_dict = self._get_dict_of_series(1) actual = tss.map_dict_to_dataframe( dict_=result_dict, functions=stat_funcs, add_prefix=False, ) actual_string = hut.convert_df_to_string(actual, index=True) self.check_string(actual_string) def test3(self) -> None: stat_funcs = { "norm_": stats.apply_normality_test, "adf_": stats.apply_adf_test, "kpss_": stats.apply_kpss_test, } result_dict = self._get_dict_of_series(1) actual = tss.map_dict_to_dataframe( dict_=result_dict, functions=stat_funcs, progress_bar=False, ) actual_string = hut.convert_df_to_string(actual, index=True) self.check_string(actual_string) @staticmethod def _get_series(seed: int) -> pd.Series: arparams = np.array([0.75, -0.25]) maparams = np.array([0.65, 0.35]) arma_process = sig_gen.ArmaProcess(arparams, maparams) date_range = {"start": "1/1/2010", "periods": 40, "freq": "M"} series = arma_process.generate_sample( date_range_kwargs=date_range, seed=seed ) return series def _get_dict_of_series(self, seed: int) -> Dict[Any, pd.Series]: n_items = 15 test_keys = ["test_key_" + str(x) for x in range(n_items)] result_dict = {key: self._get_series(seed) for key in test_keys} return result_dict
33.855556
72
0.628159
2,816
0.924188
0
0
408
0.133902
0
0
169
0.055464
43cb99a95c79677af08d364cd292e9e06fb31368
718
py
Python
util.py
takat0m0/infoGAN
bc3ba0d4e407851e97f49322add98ea2e7e429de
[ "MIT" ]
null
null
null
util.py
takat0m0/infoGAN
bc3ba0d4e407851e97f49322add98ea2e7e429de
[ "MIT" ]
null
null
null
util.py
takat0m0/infoGAN
bc3ba0d4e407851e97f49322add98ea2e7e429de
[ "MIT" ]
null
null
null
#! -*- coding:utf-8 -*- import os import sys import cv2 import numpy as np def _resizing(img): #return cv2.resize(img, (256, 256)) return cv2.resize(img, (32, 32)) def _reg(img): return img/127.5 - 1.0 def _re_reg(img): return (img + 1.0) * 127.5 def get_figs(target_dir): ret = [] for file_name in os.listdir(target_dir): target_file = os.path.join(target_dir, file_name) img = cv2.imread(target_file, 0) ret.append(_reg(_resizing(img))) return np.asarray(ret, dtype = np.float32) def dump_figs(figs, dump_dir): for i, fig in enumerate(figs): target_file = os.path.join(dump_dir, '{}.jpg'.format(i)) cv2.imwrite(target_file, _re_reg(fig))
23.933333
64
0.639276
0
0
0
0
0
0
0
0
66
0.091922
43cc7a30161b57bb1e1d6f7efc6e267ff0a84af5
471
py
Python
myhoodApp/migrations/0002_healthfacilities_hospital_image.py
MutuaFranklin/MyHood
6ddd21c4a67936c8926d6f5a8665a06edf81f39e
[ "MIT" ]
null
null
null
myhoodApp/migrations/0002_healthfacilities_hospital_image.py
MutuaFranklin/MyHood
6ddd21c4a67936c8926d6f5a8665a06edf81f39e
[ "MIT" ]
null
null
null
myhoodApp/migrations/0002_healthfacilities_hospital_image.py
MutuaFranklin/MyHood
6ddd21c4a67936c8926d6f5a8665a06edf81f39e
[ "MIT" ]
null
null
null
# Generated by Django 3.2.7 on 2021-09-23 20:01 import cloudinary.models from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('myhoodApp', '0001_initial'), ] operations = [ migrations.AddField( model_name='healthfacilities', name='hospital_image', field=cloudinary.models.CloudinaryField(blank=True, max_length=255, verbose_name='Hospital Image'), ), ]
23.55
111
0.647558
361
0.766454
0
0
0
0
0
0
122
0.259023
43cc95eb28ba86bd35c1811cb4456f10d8f69c56
380
py
Python
forecasting_algorithms/Multiple_Timeseries/VAR/var.py
ans682/SafePredict_and_Forecasting
30ac5a0b665fce090567476bc07b54489b2f3d0f
[ "BSD-3-Clause" ]
1
2021-08-05T23:01:47.000Z
2021-08-05T23:01:47.000Z
forecasting_algorithms/Multiple_Timeseries/VAR/var.py
ans682/SafePredict_and_Forecasting
30ac5a0b665fce090567476bc07b54489b2f3d0f
[ "BSD-3-Clause" ]
1
2021-12-22T08:26:13.000Z
2021-12-22T08:26:13.000Z
forecasting_algorithms/Multiple_Timeseries/VAR/var.py
ans682/SafePredict_and_Forecasting
30ac5a0b665fce090567476bc07b54489b2f3d0f
[ "BSD-3-Clause" ]
null
null
null
# VAR example from statsmodels.tsa.vector_ar.var_model import VAR from random import random # contrived dataset with dependency data = list() for i in range(100): v1 = i + random() v2 = v1 + random() row = [v1, v2] data.append(row) # fit model model = VAR(data) model_fit = model.fit() # make prediction yhat = model_fit.forecast(model_fit.y, steps=1) print(yhat)
22.352941
51
0.697368
0
0
0
0
0
0
0
0
76
0.2
43ccba90b50389b99008103e1fcff4ea674ca290
2,140
py
Python
candidate-scrape.py
jonykarki/hamroscraper
a7e34a9cdca89be10422d045f1ed34e9956bd75f
[ "MIT" ]
2
2019-09-23T23:41:44.000Z
2019-10-06T03:13:17.000Z
candidate-scrape.py
jonykarki/hamroscraper
a7e34a9cdca89be10422d045f1ed34e9956bd75f
[ "MIT" ]
null
null
null
candidate-scrape.py
jonykarki/hamroscraper
a7e34a9cdca89be10422d045f1ed34e9956bd75f
[ "MIT" ]
4
2019-11-26T18:29:20.000Z
2021-01-22T06:30:20.000Z
import json import urllib.request import MySQLdb db = MySQLdb.connect(host="localhost", # your host, usually localhost user="root", # your username passwd="", # your password db="election") cur = db.cursor() # user_agent for sending headers with the request user_agent = 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.7) Gecko/2009021910 Firefox/3.0.7' # header headers={'User-Agent':user_agent,} district = input("Enter the Name of the district: ") url = "http://election.ujyaaloonline.com/api/candidates?district=" + district request = urllib.request.Request(url, None, headers) response = urllib.request.urlopen(request) source = response.read() # print(source) data = json.loads(source) #print(data['candidates']['2']['400'][0]['cName']) election_area = data['election_areas'] # get all the possible election-areas from the district # data needed for the database ''' resultno :> autoincrement constituencyname :> stateno :> Remove the column? districtno :> candidate :> gender :> Remove the column??? votes :> set to zero for now ''' i = 0 j = 0 for key, value in election_area.items(): area_key = key district_name = data['district_slug'] try: for item in data["candidates"]['1'][area_key]: print(item['aName']) print(item["cName"]) i = i + 1 except: for item in data["candidates"]['2'][area_key]: constituencyname = item['aName'].encode('utf-8') candidatename = item["cName"].encode('utf-8') sql = "INSERT INTO `test` (`id`, `candidatename`, `constituencyname`) VALUES (NULL, %s, %s)" cur.execute(sql, (candidatename, constituencyname)) db.commit() print('INSERTED ' + item["cName"] + " into the database") j = j + 1 print(data['district_slug'] + " has " + str(i) + " candidates in provincial election") print(data['district_slug'] + " has " + str(j) + " candidates in federal election") print("Total: " + str(i + j) + " candidates added to the database")
27.792208
105
0.619159
0
0
0
0
0
0
0
0
1,051
0.491121
43cde366d5fb7850e5493e9384c566462676fb5d
3,101
py
Python
sangita/hindi/lemmatizer.py
ashiscs/sangita
b90c49859339147137db1c2bdb60a1039a00c706
[ "Apache-2.0" ]
36
2017-05-30T04:41:06.000Z
2019-02-17T08:41:10.000Z
sangita/hindi/lemmatizer.py
07kshitij/sangita
b90c49859339147137db1c2bdb60a1039a00c706
[ "Apache-2.0" ]
13
2018-06-25T11:14:48.000Z
2021-05-15T17:57:47.000Z
sangita/hindi/lemmatizer.py
07kshitij/sangita
b90c49859339147137db1c2bdb60a1039a00c706
[ "Apache-2.0" ]
33
2018-06-23T21:46:39.000Z
2022-03-01T15:55:37.000Z
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Fri Jun 9 23:28:21 2017 @author: samriddhi """ import re import sangita.hindi.tokenizer as tok import sangita.hindi.corpora.lemmata as lt def numericLemmatizer(instr): lst = type([1,2,3]) tup = type(("Hello", "Hi")) string = type("Hello") num_match = re.compile(r'([०१२३४५६७८९]+[\.\,]*)+[०१२३४५६७८९]+|([-+]*\d+[\.\,]*)+\d+|([०१२३४५६७८९]+|\d+)') if(type(instr) == lst): for index,item in enumerate(instr): if(type(item) == tup): if num_match.search(str(item[0])): instr[index] = (instr[index][1], instr[index][1]) else: if num_match.search(str(item)): instr[index] = (instr[index], instr[index][1]) else: if(type(instr) == string): instr = tok.tokenize(instr) numericLemmatizer(instr) else: print("not supported") return(instr) def defaultLemmatizer(instr): lst = type([1,2,3]) tup = type(("Hello", "Hi")) string = type("Hello") if(type(instr) == lst): for index,item in enumerate(instr): if(type(item) != tup): instr[index] = (instr[index], instr[index]) else: if(type(instr) == string): instr = tok.tokenize(instr) defaultLemmatizer(instr) else: print("not supported") return(instr) def lookupLemmatizer(instr): lst = type([1,2,3]) tup = type(("Hello", "Hi")) string = type("Hello") lemmatalist = lt.drawlist() words = [] lemma = [] for item in lemmatalist: words.append(item.split("\t")[0]) lemma.append(item.split("\t")[1]) tokens = set(words) if(type(instr) == lst): for index,item in enumerate(instr): if(type(item) == tup): if item in tokens: tag = lemma[words.index(item)] instr[index] = (instr[index][1],tag) else: if(type(item) != tup): if item in tokens: tag = lemma[words.index(item)] instr[index] = (instr[index], tag) else: if(type(instr) == string): instr = tok.tokenize(instr) lookupLemmatizer(instr) else: print("not supported") return(instr) def Lemmatizer(instr): instr = lookupLemmatizer(instr) instr = numericLemmatizer(instr) instr = defaultLemmatizer(instr) return(instr) if __name__ == '__main__': input_str = 'पुंछ में हुई मुठभेड़ के बारे में एक सरकारी अधिकारी ने बताया कि १३वीं सिख लाईट इनफेंट्री द्वारा लश्कर-ए - ताइबा गुट के आतंकियों को नियंत्रण-रेखा पर चुनौती देने पर मुठभेड़ रात ११.४५ बजे शुरू हुई।' print(lookupLemmatizer(input_str)) print(numericLemmatizer(input_str)) print(defaultLemmatizer(input_str)) print(Lemmatizer(input_str))
27.936937
209
0.507256
0
0
0
0
0
0
0
0
863
0.249062
43cee9ce3aeb6af7cef400c841ab802c88461d4b
8,148
py
Python
gslib/tests/test_stet_util.py
ttobisawa/gsutil
ef665b590aa8e6cecfe251295bce8bf99ea69467
[ "Apache-2.0" ]
null
null
null
gslib/tests/test_stet_util.py
ttobisawa/gsutil
ef665b590aa8e6cecfe251295bce8bf99ea69467
[ "Apache-2.0" ]
null
null
null
gslib/tests/test_stet_util.py
ttobisawa/gsutil
ef665b590aa8e6cecfe251295bce8bf99ea69467
[ "Apache-2.0" ]
null
null
null
# -*- coding: utf-8 -*- # Copyright 2021 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for stet_util.py.""" from __future__ import absolute_import from __future__ import print_function from __future__ import division from __future__ import unicode_literals import os import shutil from gslib import storage_url from gslib.tests import testcase from gslib.tests import util from gslib.tests.util import unittest from gslib.utils import execution_util from gslib.utils import stet_util import mock class TestStetUtil(testcase.GsUtilUnitTestCase): """Test STET utils.""" @mock.patch.object(execution_util, 'ExecuteExternalCommand') def test_stet_upload_uses_binary_and_config_from_boto( self, mock_execute_external_command): fake_config_path = self.CreateTempFile() mock_execute_external_command.return_value = ('stdout', 'stderr') mock_logger = mock.Mock() source_url = storage_url.StorageUrlFromString('in') destination_url = storage_url.StorageUrlFromString('gs://bucket/obj') with util.SetBotoConfigForTest([ ('GSUtil', 'stet_binary_path', 'fake_binary_path'), ('GSUtil', 'stet_config_path', fake_config_path), ]): out_file_url = stet_util.encrypt_upload(source_url, destination_url, mock_logger) self.assertEqual(out_file_url, storage_url.StorageUrlFromString('in_.stet_tmp')) mock_execute_external_command.assert_called_once_with([ 'fake_binary_path', 'encrypt', '--config-file={}'.format(fake_config_path), '--blob-id=gs://bucket/obj', 'in', 'in_.stet_tmp', ]) mock_logger.debug.assert_called_once_with('stderr') @mock.patch.object(execution_util, 'ExecuteExternalCommand') def test_stet_upload_runs_with_binary_from_path_with_correct_settings( self, mock_execute_external_command): fake_config_path = self.CreateTempFile() temporary_path_directory = self.CreateTempDir() fake_stet_binary_path = self.CreateTempFile(tmpdir=temporary_path_directory, file_name='stet') previous_path = os.getenv('PATH') os.environ['PATH'] += os.path.pathsep + temporary_path_directory mock_execute_external_command.return_value = ('stdout', 'stderr') mock_logger = mock.Mock() source_url = storage_url.StorageUrlFromString('in') destination_url = storage_url.StorageUrlFromString('gs://bucket/obj') with util.SetBotoConfigForTest([ ('GSUtil', 'stet_binary_path', None), ('GSUtil', 'stet_config_path', fake_config_path), ]): out_file_url = stet_util.encrypt_upload(source_url, destination_url, mock_logger) self.assertEqual(out_file_url, storage_url.StorageUrlFromString('in_.stet_tmp')) mock_execute_external_command.assert_called_once_with([ fake_stet_binary_path, 'encrypt', '--config-file={}'.format(fake_config_path), '--blob-id=gs://bucket/obj', 'in', 'in_.stet_tmp', ]) mock_logger.debug.assert_called_once_with('stderr') os.environ['PATH'] = previous_path @mock.patch.object(execution_util, 'ExecuteExternalCommand') def test_stet_upload_uses_config_from_default_path_with_correct_settings( self, mock_execute_external_command): mock_execute_external_command.return_value = ('stdout', 'stderr') mock_logger = mock.Mock() source_url = storage_url.StorageUrlFromString('in') destination_url = storage_url.StorageUrlFromString('gs://bucket/obj') with util.SetBotoConfigForTest([ ('GSUtil', 'stet_binary_path', 'fake_binary_path'), ('GSUtil', 'stet_config_path', None), ]): with mock.patch.object(os.path, 'exists', new=mock.Mock(return_value=True)): out_file_url = stet_util.encrypt_upload(source_url, destination_url, mock_logger) self.assertEqual(out_file_url, storage_url.StorageUrlFromString('in_.stet_tmp')) mock_execute_external_command.assert_called_once_with([ 'fake_binary_path', 'encrypt', '--config-file={}'.format( os.path.expanduser(stet_util.DEFAULT_STET_CONFIG_PATH)), '--blob-id=gs://bucket/obj', 'in', 'in_.stet_tmp', ]) mock_logger.debug.assert_called_once_with('stderr') @mock.patch.object(shutil, 'move') @mock.patch.object(execution_util, 'ExecuteExternalCommand') def test_stet_download_runs_binary_and_replaces_temp_file( self, mock_execute_external_command, mock_move): fake_config_path = self.CreateTempFile() mock_execute_external_command.return_value = ('stdout', 'stderr') mock_logger = mock.Mock() source_url = storage_url.StorageUrlFromString('gs://bucket/obj') destination_url = storage_url.StorageUrlFromString('out') with util.SetBotoConfigForTest([ ('GSUtil', 'stet_binary_path', 'fake_binary_path'), ('GSUtil', 'stet_config_path', fake_config_path), ]): stet_util.decrypt_download(source_url, destination_url, mock_logger) mock_execute_external_command.assert_called_once_with([ 'fake_binary_path', 'decrypt', '--config-file={}'.format(fake_config_path), '--blob-id=gs://bucket/obj', 'out', 'out_.stet_tmp' ]) mock_logger.debug.assert_called_once_with('stderr') mock_move.assert_called_once_with('out_.stet_tmp', 'out') @mock.patch.object(stet_util, '_get_stet_binary_from_path', new=mock.Mock(return_value=None)) def test_stet_util_errors_if_no_binary(self): fake_config_path = self.CreateTempFile() source_url = storage_url.StorageUrlFromString('in') destination_url = storage_url.StorageUrlFromString('gs://bucket/obj') with util.SetBotoConfigForTest([ ('GSUtil', 'stet_binary_path', None), ('GSUtil', 'stet_config_path', fake_config_path), ]): with self.assertRaises(KeyError): stet_util.encrypt_upload(source_url, destination_url, None) def test_stet_util_errors_if_no_config(self): source_url = storage_url.StorageUrlFromString('in') destination_url = storage_url.StorageUrlFromString('gs://bucket/obj') with util.SetBotoConfigForTest([ ('GSUtil', 'stet_binary_path', 'fake_binary_path'), ('GSUtil', 'stet_config_path', None), ]): with mock.patch.object(os.path, 'exists', new=mock.Mock(return_value=False)): with self.assertRaises(KeyError): stet_util.encrypt_upload(source_url, destination_url, None) @mock.patch.object(os.path, 'expanduser', autospec=True) @mock.patch.object(execution_util, 'ExecuteExternalCommand', new=mock.Mock(return_value=('stdout', 'stderr'))) def test_stet_util_expands_home_directory_symbol(self, mock_expanduser): fake_config_path = self.CreateTempFile() source_url = storage_url.StorageUrlFromString('in') destination_url = storage_url.StorageUrlFromString('gs://bucket/obj') with util.SetBotoConfigForTest([ ('GSUtil', 'stet_binary_path', 'fake_binary_path'), ('GSUtil', 'stet_config_path', fake_config_path), ]): stet_util.encrypt_upload(source_url, destination_url, mock.Mock()) mock_expanduser.assert_has_calls( [mock.call('fake_binary_path'), mock.call(fake_config_path)])
40.74
80
0.689494
7,103
0.871748
0
0
6,422
0.788169
0
0
2,001
0.245582
43cfdd42faa2065cb7d2cefc439413b4ed53c719
4,471
py
Python
markdown_editing/tests/test_extension.py
makyo/markdown-editing
ecbc8970f4d416038f9d2c46fae22d4dbb79c647
[ "MIT" ]
null
null
null
markdown_editing/tests/test_extension.py
makyo/markdown-editing
ecbc8970f4d416038f9d2c46fae22d4dbb79c647
[ "MIT" ]
null
null
null
markdown_editing/tests/test_extension.py
makyo/markdown-editing
ecbc8970f4d416038f9d2c46fae22d4dbb79c647
[ "MIT" ]
null
null
null
from markdown import markdown from unittest import TestCase from markdown_editing.extension import EditingExtension class TestExtension(TestCase): def test_substitution(self): source = '~{out with the old}{in with the new}' expected = '<p><span class="substitution"><del>out with the old</del><ins>in with the new</ins></span></p>' html = markdown(source, extensions=[EditingExtension()]) self.assertEqual(html, expected) # Only need to test this once. html = markdown(source, extensions=['markdown_editing']) self.assertEqual(html, expected) def test_addition(self): source = 'foo +{bar} baz +{qux}(yap)' expected = '<p>foo <ins class="addition">bar</ins> baz <ins class="addition">qux<q class="comment">yap</q></ins></p>' html = markdown(source, extensions=[EditingExtension()]) self.assertEqual(html, expected) def test_deletion(self): source = 'foo -{bar} baz -{qux}(yap)' expected = '<p>foo <del class="deletion">bar</del> baz <del class="deletion">qux<q class="comment">yap</q></del></p>' html = markdown(source, extensions=[EditingExtension()]) self.assertEqual(html, expected) def test_selected(self): source = 'foo ?{bar}(qux) baz' expected = '<p>foo <mark class="selected">bar<q class="comment">qux</q></mark> baz</p>' html = markdown(source, extensions=[EditingExtension()]) self.assertEqual(html, expected) def test_comments(self): self.maxDiff = None source = """ * Substitution: ~{out with the old}{in with the new} * With comment: ~{out with the old}{in with the new}(is what I always say) * With attribution: ~{out with the old}{in with the new}(is what I always say (Makyo)) * With date: ~{out with the old}{in with the new}(is what I always say (Makyo 2020-04-21)) * Comment thread: +{Foxes}(More foxes are always good)!{SGTM} * Comment with attribution: !{SGTM}(Makyo 2020-04-22) """.strip() expected = """ <ul> <li>Substitution: <span class="substitution"><del>out with the old</del><ins>in with the new</ins></span></li> <li>With comment: <span class="substitution"><del>out with the old</del><ins>in with the new</ins><q class="comment">is what I always say</q></span></li> <li>With attribution: <span class="substitution"><del>out with the old</del><ins>in with the new</ins><q class="comment">is what I always say<span class="attribution">Makyo</span></q></span></li> <li>With date: <span class="substitution"><del>out with the old</del><ins>in with the new</ins><q class="comment">is what I always say<span class="attribution">Makyo</span><span class="date">2020-04-21</span></q></span></li> <li>Comment thread: <ins class="addition">Foxes<q class="comment">More foxes are always good</q></ins><q class="comment">SGTM</q></li> <li>Comment with attribution: <q class="comment">SGTM<span class="attribution">Makyo</span><span class="date">2020-04-22</span></q></li> </ul> """.strip() html = markdown(source, extensions=[EditingExtension()]) self.assertEqual(html, expected) def test_level(self): source = """ ``` ?{Some text}(bad wolf) ``` ?{Some text}(bad wolf) > ?{Some text}(good doggy) """.strip() expected = """ <p><code>?{Some text}(bad wolf)</code></p> <pre><code>?{Some text}(bad wolf) </code></pre> <blockquote> <p><mark class="selected">Some text<q class="comment">good doggy</q></mark></p> </blockquote> """.strip() html = markdown(source, extensions=[EditingExtension()]) self.assertEqual(html, expected) def test_nesting(self): source = """ ?{The only currently working form of nesting}(But what if...!{NO}) """.strip() expected = """ <p><mark class="selected">The only currently working form of nesting<q class="comment">But what if...<q class="comment">NO</q></q></mark></p> """.strip() html = markdown(source, extensions=[EditingExtension()]) self.assertEqual(html, expected) def test_mixed(self): source = """ +{some *fancy* new stuff}(With a **fancy** comment) """.strip() expected = """ <p><ins class="addition">some <em>fancy</em> new stuff<q class="comment">With a <strong>fancy</strong> comment</q></ins></p> """.strip() html = markdown(source, extensions=[EditingExtension()]) self.assertEqual(html, expected)
39.566372
224
0.636547
4,350
0.972937
0
0
0
0
0
0
2,729
0.610378
43cffed323ab5de7f6be36b25de0a210ece3af09
15,477
py
Python
apps/siren/test_handlers.py
thomasyi17/diana2
2167053dfe15b782d96cb1e695047433f302d4dd
[ "MIT" ]
15
2019-02-12T23:26:09.000Z
2021-12-21T08:53:58.000Z
apps/siren/test_handlers.py
thomasyi17/diana2
2167053dfe15b782d96cb1e695047433f302d4dd
[ "MIT" ]
2
2019-01-23T21:13:12.000Z
2019-06-28T15:45:51.000Z
apps/siren/test_handlers.py
thomasyi17/diana2
2167053dfe15b782d96cb1e695047433f302d4dd
[ "MIT" ]
6
2019-01-23T20:22:50.000Z
2022-02-03T03:27:04.000Z
""" SIREN/DIANA basic functionality testing framework Requires env vars: - GMAIL_USER - GMAIL_APP_PASSWORD - GMAIL_BASE_NAME -- ie, abc -> [email protected] These env vars are set to default: - ORTHANC_PASSWORD - SPLUNK_PASSWORD - SPLUNK_HEC_TOKEN TODO: Move stuff to archive after collected TODO: Write data into daily folder or something from mi-share ingress TODO: Suppress dicom-simplify missing (series) creation time """ import time import logging import shutil import io import tempfile from pathlib import Path from pprint import pformat from contextlib import redirect_stdout from multiprocessing import Process from datetime import datetime, timedelta from interruptingcow import timeout from crud.manager import EndpointManager from crud.abc import Watcher, Trigger from crud.endpoints import Splunk from wuphf.endpoints import SmtpMessenger from diana.apis import Orthanc, ObservableOrthanc, DcmDir, ObservableDcmDir from diana.dixel import Dixel, ShamDixel from diana.utils.dicom import DicomLevel as DLv, DicomEventType as DEv from wuphf.cli.string_descs import * from diana.utils import unpack_data from crud.utils import deserialize_dict from diana.utils.gateways import suppress_urllib_debug from diana.utils.endpoint.watcher import suppress_watcher_debug from handlers import handle_upload_dir, handle_upload_zip, handle_notify_study, \ handle_file_arrived, start_watcher, tagged_studies from trial_dispatcher import TrialDispatcher as Dispatcher LOCAL_SERVICES = False # Set False to use UMich services USE_GMAIL = True # Set False to use UMich smtp DO_DIR_UPLOAD = False CHECK_SPLUNK = False # Set False to skip long wait for dixel to index CHECK_WATCH_STUDIES= False # Set False to skip long wait for orthanc watcher EMAIL_DRYRUN = False # Set False to send live emails # CONFIG _services = "@services.yaml" _subscriptions = "@subscriptions.yaml" os.environ["SPLUNK_INDEX"] = "testing" SMTP_MESSENGER_NAME = "smtp_server" if LOCAL_SERVICES: # Set everythin back to default os.environ["UMICH_HOST"] = "localhost" # For testing del os.environ["ORTHANC_USER"] del os.environ["ORTHANC_PASSWORD"] del os.environ["SPLUNK_USER"] del os.environ["SPLUNK_PASSWORD"] if USE_GMAIL: SMTP_MESSENGER_NAME = "gmail:" test_email_addr1 = "[email protected]" #test_email_addr1 = "[email protected]" #test_email_addr1 = os.environ.get("TEST_EMAIL_ADDR1") # os.environ["TEST_GMAIL_BASE"] = test_email_addr1.split("@")[0] anon_salt = "Test+Test+Test" fkey = b'o-KzB3u1a_Vlb8Ji1CdyfTFpZ2FvdsPK4yQCRzFCcss=' msg_t = """to: {{ recipient.email }}\nfrom: {{ from_addr }}\nsubject: Test Message\n\nThis is the message text: "{{ item.msg_text }}"\n""" notify_msg_t = "@./notify.txt.j2" # TESTING CONfIG test_sample_zip = os.path.abspath("../../tests/resources/dcm_zip/test.zip") test_sample_file = os.path.abspath("../../tests/resources/dcm/IM2263") test_sample_dir = os.path.expanduser("~/data/test") # Need to dl separately # TESTS def test_upload_one(orth: Orthanc, dixel: Dixel): print("Testing can upload") orth.clear() tagged_studies.clear() assert (len(orth.studies()) == 0) orth.put(dixel) assert (len(orth.studies()) > 0) assert (orth.exists(dixel)) print("Passed!") return True def test_anonymize_one(orth: Orthanc, dixel: Dixel): print("Testing can anonymize, tag, and untag") orth.clear() tagged_studies.clear() assert (len(orth.studies()) == 0) orth.put(dixel) anon = ShamDixel.from_dixel(dixel, salt=anon_salt) afile = orth.anonymize(anon, replacement_map=anon.orthanc_sham_map()) anon.file = afile orth.put(anon) orth.putm(anon.sham_parent_oid(DLv.STUDIES), level=DLv.STUDIES, key="signature", value=anon.pack_fields(fkey)) assert (len(orth.studies()) == 2) orth.delete(dixel) assert (len(orth.studies()) == 1) oid = orth.studies()[0] test = orth.get(oid) assert( test.tags["PatientName"] == anon.meta["ShamName"] ) enc = orth.getm(test, key="signature") tags = unpack_data(enc, fkey) assert( tags["PatientName"] in dixel.tags["PatientName"] ) print("Passed!") return True def test_index_one( splunk: Splunk, dixel: Dixel, check_exists=CHECK_SPLUNK ): print("Testing can index") splunk.put(dixel, index=os.environ.get("SPLUNK_INDEX")) if check_exists: print("Waiting for 1 min to index") time.sleep(60) time_range = [ datetime.now()-timedelta(minutes=2), datetime.now() ] r = splunk.find("search index=testing", time_range=time_range) logging.debug(r) assert( len(r) > 0 ) print("Passed") return True def test_email_messenger( messenger: SmtpMessenger, dryrun=EMAIL_DRYRUN ): print("Testing can email from template") outgoing = "The quick brown fox jumped over the lazy dog" data = {"item": {"msg_text": outgoing}, "recipient": {"email": test_email_addr1}} msg = messenger.get(data, target=test_email_addr1) assert( test_email_addr1 in msg ) assert( outgoing in msg ) if not dryrun: messenger.send(data, target=test_email_addr1) print("Passed!") return True def test_distribute( subscriptions, messenger: SmtpMessenger ): print("Testing can dispatch") ch, subs = deserialize_dict(subscriptions) dispatch = Dispatcher(channel_tags=ch) dispatch.add_subscribers(subs) messenger.set_msg_t(notify_msg_t) dispatch.email_messenger = messenger logging.debug(pformat(dispatch.subscribers)) data = {"tags": {"AccessionNumber": "ABC123", "PatientName": "DOE^JOHN^S"}, "meta": {"signature": {"trial": "hobit", "site": "duke"} } } sent = dispatch.put(data, dryrun=EMAIL_DRYRUN) data["meta"]["signature"]["site"] = "detroit" sent += dispatch.put(data, dryrun=EMAIL_DRYRUN) print(sent) msgs = [x['msg'] for x in sent] msgs = "\n".join(msgs) # logging.debug(pformat(msgs)) assert( "SIREN/HOBIT" in msgs ) assert( "[email protected]" in msgs ) assert( 'subject jacket for "DOE^JOHN^S"' in msgs ) print("Passed!") return True def test_upload_dir_handler(dcm_dir: DcmDir, orth: Orthanc): print("Testing can upload dir w handler") orth.clear() tagged_studies.clear() assert (len(orth.studies()) == 0) handle_upload_dir(dcm_dir, orth, fkey, anon_salt=anon_salt) assert (len(orth.instances()) > 20) print("Passed!") return True def test_upload_zip_handler(zip_file, orth: Orthanc): print("Testing can upload zip w handler") orth.clear() tagged_studies.clear() assert (len(orth.studies()) == 0) handle_upload_zip(DcmDir(), zip_file, orth, fkey, anon_salt=anon_salt) assert (len(orth.instances()) > 1) print("Passed!") return True def test_file_arrived_handler(dcm_file, zip_file, orth: Orthanc): print("Testing can handle file arrived") orth.clear() tagged_studies.clear() assert (len(orth.studies()) == 0) watch_path = tempfile.mkdtemp() site_path = os.path.join(watch_path, "my_trial", "my_site") os.makedirs(site_path) shutil.copy(zip_file, site_path) data = {"fn": os.path.join( site_path, Path(zip_file).name )} handle_file_arrived(data, DcmDir(path=watch_path), orth, fkey=fkey, anon_salt=anon_salt, signature_meta_key="signature") assert (len(orth.instances()) > 1) oid = orth.studies()[0] data = orth.getm(oid, key="signature") clear = unpack_data(data, fkey) print(pformat(clear)) assert(clear["trial"] == "my_trial") orth.clear() tagged_studies.clear() assert (len(orth.studies()) == 0) shutil.copy(dcm_file, site_path) data = {"fn": os.path.join(site_path, Path(dcm_file).name)} handle_file_arrived(data, DcmDir(path=watch_path), orth, fkey=fkey, anon_salt=anon_salt, signature_meta_key="signature") assert (len(orth.instances()) == 1) time.sleep(1.0) oid = orth.studies()[0] data = orth.getm(oid, key="signature") clear = unpack_data(data, fkey) print(pformat(clear)) assert(clear["trial"] == "my_trial") orth.clear() assert (len(orth.studies()) == 0) shutil.rmtree(watch_path, ignore_errors=True) print("Passed!") return True def test_notify_handler(dixel, orth: Orthanc, subscriptions, messenger: SmtpMessenger, indexer: Splunk, dryrun=EMAIL_DRYRUN): orth.clear() tagged_studies.clear() assert (len(orth.studies()) == 0) orth.put(dixel) dixel.meta["trial"] = "hobit" dixel.meta["site"] = "testing" orth.putm(dixel.parent_oid(DLv.STUDIES), level=DLv.STUDIES, key="signature", value=dixel.pack_fields(fkey, fields=["trial", "site"])) ch, subs = deserialize_dict(subscriptions) dispatch = Dispatcher( channel_tags=ch ) dispatch.add_subscribers(subs) messenger.set_msg_t(notify_msg_t) dispatch.email_messenger = messenger data = {"oid": dixel.parent_oid(DLv.STUDIES)} handle_notify_study(data, source=orth, dispatcher=dispatch, dryrun=dryrun, indexer=indexer, index_name=SPLUNK_INDEX, fkey=fkey) print("Passed!") return True def test_watch_orthanc(test_dixel, orth: ObservableOrthanc): orth.clear() tagged_studies.clear() assert (len(orth.studies()) == 0) watcher = Watcher() trigger0 = Trigger( evtype=DEv.INSTANCE_ADDED, source=orth, action=orth.say) watcher.add_trigger(trigger0) trigger1 = Trigger( evtype=DEv.STUDY_ADDED, source=orth, action=orth.say) watcher.add_trigger(trigger1) def runner(): """Pause to start watcher and then copy sample file to incoming""" time.sleep(1.0) orth.put(test_dixel) p = Process(target=runner) p.start() f = io.StringIO() print("Starting watcher") with redirect_stdout(f): print("In capture") try: with timeout(5): # Give it a little time to say the instance watcher.run() except RuntimeError: print("Stopping watcher") finally: watcher.stop() out = f.getvalue() print("Watcher output:") print(out) if dixel.oid() in out: print("Passed!") return True def test_watch_dir(test_file): watch_path = tempfile.mkdtemp() site_path = os.path.join(watch_path, "my_trial", "my_site") os.makedirs(site_path) dcm_dir = ObservableDcmDir(path=watch_path) watcher = Watcher() trigger = Trigger( evtype=DEv.FILE_ADDED, source=dcm_dir, action=dcm_dir.say) watcher.add_trigger(trigger) def runner(): """Pause to start watcher and then copy sample file to incoming""" time.sleep(1.0) shutil.copy(test_file, site_path) p = Process(target=runner) p.start() f = io.StringIO() print("Starting watcher") with redirect_stdout(f): print("In capture") try: with timeout(5): # Give it a little time to say the filename watcher.run() except RuntimeError: print("Stopping watcher") finally: watcher.stop() out = f.getvalue() print("Watcher output:") print(out) shutil.rmtree(watch_path, ignore_errors=True) from pathlib import Path if Path(test_file).name in out: print("Passed!") return True def test_siren_receiver(test_file, orth: Orthanc, subscriptions, messenger: SmtpMessenger, indexer: Splunk, dryrun=EMAIL_DRYRUN): orth.clear() tagged_studies.clear() assert (len(orth.studies()) == 0) ch, subs = deserialize_dict(subscriptions) dispatch = Dispatcher( channel_tags=ch ) dispatch.add_subscribers(subs) messenger.set_msg_t(notify_msg_t) dispatch.email_messenger = messenger watch_path = tempfile.mkdtemp() site_path = os.path.join(watch_path, "hobit", "testing") os.makedirs(site_path) incoming = ObservableDcmDir(path=watch_path) def runner(): """Pause to start watcher and then copy sample file to incoming/trial/site""" time.sleep(1.0) shutil.copy(test_file, site_path) p = Process(target=runner) p.start() f = io.StringIO() print("Starting SIREN Receiver") with redirect_stdout(f): print("In capture") try: with timeout(90): # Give it a little time for the study to settle watcher = start_watcher( incoming, orth, fkey=fkey, anon_salt=anon_salt, dispatcher=dispatch, dryrun=dryrun, indexer=indexer, index_name=os.environ.get("SPLUNK_INDEX") ) except RuntimeError: print("Stopping watcher subprocess") out = f.getvalue() print("SIREN Reciever output:") print(out) shutil.rmtree(watch_path, ignore_errors=True) return True if __name__ == "__main__": logging.basicConfig(level=logging.DEBUG) suppress_urllib_debug() suppress_watcher_debug() # Create service endpoints services = EndpointManager(serialized_ep_descs=_services) print(pformat(services.ep_descs)) orth: ObservableOrthanc = services.get("hobit") orth.polling_interval = 2.0 messenger: SmtpMessenger = services.get(SMTP_MESSENGER_NAME) messenger.msg_t = msg_t splunk: Splunk = services.get("splunk") dcm_dir = DcmDir(path=test_sample_dir) # Load a dixel dixel = dcm_dir.get("HOBIT1172/IM0", file=True) # assert( dixel ) # assert( dixel.file ) # # # Verify that all endpoints are online # assert( orth.check() ) # assert( messenger.check() ) # assert( splunk.check() ) # # # Verify basic capabilities: # # - upload # # - anonymize # # - index # # - message # # - distribute # # assert( test_upload_one(orth, dixel) ) # assert( test_anonymize_one(orth, dixel) ) # assert( test_index_one(splunk, dixel) ) assert( test_email_messenger(messenger) ) # assert( test_distribute(_subscriptions, messenger) ) exit() # Verify observer daemons: # - watch dir # - watch orth assert( test_watch_dir(test_sample_file) ) assert( test_watch_orthanc(dixel, orth) ) # Verify handlers: # - directory # - zip # - file # - notify if DO_DIR_UPLOAD: assert( test_upload_dir_handler(dcm_dir, orth) ) assert( test_upload_zip_handler(test_sample_zip, orth) ) assert( test_file_arrived_handler(test_sample_file, test_sample_zip, orth) ) assert( test_notify_handler(dixel, orth, _subscriptions, messenger, splunk) ) # Verify watcher pipeline # - run watcher assert( test_siren_receiver(test_sample_file, orth, _subscriptions, messenger, splunk) )
27.588235
151
0.648511
0
0
0
0
0
0
0
0
3,600
0.232603
43d0fea901e478a41a7213fecbddf4d86fc4b79e
6,735
py
Python
deptree.py
jeking3/boost-deptree
27eda54df2d022af17347df4ba4892c39392e474
[ "BSL-1.0" ]
null
null
null
deptree.py
jeking3/boost-deptree
27eda54df2d022af17347df4ba4892c39392e474
[ "BSL-1.0" ]
null
null
null
deptree.py
jeking3/boost-deptree
27eda54df2d022af17347df4ba4892c39392e474
[ "BSL-1.0" ]
null
null
null
# # Copyright (c) 2019 James E. King III # # Use, modification, and distribution are subject to the # Boost Software License, Version 1.0. (See accompanying file # LICENSE_1_0.txt or copy at https://www.boost.org/LICENSE_1_0.txt) # import json import networkx import re from pathlib import Path class BoostDependencyTree(object): """ Generates a PlantUML dependency tree to visualize the dependencies. One of the benefits of generating a visual graph is that cycles become immediately evident. """ EDGES = { 2: "-->", 1: "..>" } STRENGTHS = { "include": 2, "src": 2, "test": 1, "tests": 1 } def __init__(self, root: Path, out: Path): """ Arguments: root: path to BOOST_ROOT out: path to output file """ self.exp = re.compile(r"^\s*#\s*include\s*[<\"](?P<header>[^>\"]+)[>\"]\s*$") self.graph = networkx.DiGraph() self.headers = {} # key: header include path; value: repo key self.repos = {} # key: repo key; value: repo path self.out = out self.root = root self.libs = self.root / "libs" with (self.libs / "config" / "include" / "boost" / "version.hpp").open() as fp: vlines = fp.readlines() for vline in vlines: if "BOOST_LIB_VERSION" in vline: #define BOOST_LIB_VERSION "1_71" tokens = vline.split(" ") self.boost_version = tokens[2].strip()[1:-1].replace("_", ".") def load(self): self.collect() self.analyze() def collect(self): """ Locate every .hpp and .h file and associate it with a repository. """ metas = self.libs.glob("**/libraries.json") for meta in metas: with meta.open() as fp: metadata = json.loads(fp.read()) repodir = meta.parent.parent metadata = metadata[0] if isinstance(metadata, list) else metadata # for boost/core repokey = metadata["key"] repoinc = repodir / "include" if repoinc.is_dir(): # libs/geometry/index has no include but looks like a repo? self.graph.add_node(repokey) self.repos[repokey] = repodir headers = repoinc.glob("**/*.h??") for header in headers: # print(str(header)) incpath = header.relative_to(repoinc) assert incpath not in self.headers,\ f"{incpath} in {repokey} already in header map from "\ f"{self.headers[incpath]} - duplicate header paths!" self.headers[str(incpath)] = repokey def analyze(self): """ Find every include statement and create a graph of dependencies. """ for repokey, repodir in self.repos.items(): for ext in ["c", "cpp", "h", "hpp", "ipp"]: files = repodir.glob("**/*." + ext) for code in files: inside = code.relative_to(repodir).parts[0] if inside not in self.STRENGTHS.keys(): continue weight = self.STRENGTHS[inside] with code.open() as fp: try: #print(str(code)) source = fp.readlines() except UnicodeDecodeError: continue for line in source: match = self.exp.search(line) if match: include = match.group("header") if include in self.headers: deprepo = self.headers[include] if repokey != deprepo: # avoid self-references data = self.graph.get_edge_data(repokey, deprepo, {"weight": 0}) if data["weight"] > 0 and data["weight"] < weight: self.graph.remove_edge(repokey, deprepo) data["weight"] = 0 if data["weight"] == 0: self.graph.add_edge(repokey, deprepo, weight=weight) def report_cycles(self): with self.out.open("w") as fp: fp.write("@startuml\n") fp.write("\n") fp.write(f"title Boost {self.boost_version} Direct Dependency Cycles\n") fp.write("footer Generated by boost-deptree (C) 2019 James E. King III\n") fp.write("\n") for edge in self.graph.edges: fwdweight = self.graph.get_edge_data(edge[0], edge[1])["weight"] if fwdweight > 1: if self.graph.get_edge_data(edge[1], edge[0], {"weight": 0})["weight"] > 1: fp.write(f"['{edge[0]}'] --> ['{edge[1]}']\n") fp.write("\n") fp.write("@enduml\n") def report_dependencies_from(self, repokey): with self.out.open("w") as fp: fp.write("@startuml\n") fp.write("\n") fp.write(f"title Boost {self.boost_version} dependencies of {repokey}\n") fp.write("footer Generated by boost-deptree (C) 2019 James E. King III\n") fp.write("\n") for edge in self.graph.edges: if edge[0] == repokey: fwdweight = self.graph.get_edge_data(edge[0], edge[1])["weight"] fp.write(f"['{edge[0]}'] {self.EDGES[fwdweight]} ['{edge[1]}']\n") fp.write("\n") fp.write("@enduml\n") if __name__ == "__main__": import argparse parser = argparse.ArgumentParser(description='Generate PlantUML dependency tree.') parser.add_argument('root', type=str, help='Boost root directory.') parser.add_argument('out', type=str, help='Output filename.') require_one = parser.add_mutually_exclusive_group(required=True) require_one.add_argument('--cycles', action='store_true', help='Show direct repository dependency cycles.') require_one.add_argument('--from', help='Show dependencies from a given repository.') args = parser.parse_args() root = Path(args.root) assert root.is_dir(), "root is not a directory" out = Path(args.out) tree = BoostDependencyTree(root, out) tree.load() if args.cycles: tree.report_cycles() else: tree.report_dependencies_from(args.__dict__["from"])
40.572289
111
0.515367
5,578
0.828211
0
0
0
0
0
0
2,020
0.299926
43d13fbbdf77afe2138ccc76bfc3468760cf2d47
7,357
py
Python
uberbackend.py
adiHusky/uber_backend
adc78882c081f7636b809d6e1889ba3297309e20
[ "MIT" ]
null
null
null
uberbackend.py
adiHusky/uber_backend
adc78882c081f7636b809d6e1889ba3297309e20
[ "MIT" ]
null
null
null
uberbackend.py
adiHusky/uber_backend
adc78882c081f7636b809d6e1889ba3297309e20
[ "MIT" ]
null
null
null
from flask import Flask, flash, request, jsonify, render_template, redirect, url_for, g, session, send_from_directory, abort from flask_cors import CORS # from flask import status from datetime import date, datetime, timedelta from calendar import monthrange from dateutil.parser import parse import pytz import os import sys import time import uuid import json import random import string import pathlib import io from uuid import UUID from bson.objectid import ObjectId # straight mongo access from pymongo import MongoClient import sentry_sdk from sentry_sdk.integrations.flask import FlaskIntegration sentry_sdk.init( dsn="https://[email protected]/5685529", integrations=[FlaskIntegration()], # Set traces_sample_rate to 1.0 to capture 100% # of transactions for performance monitoring. # We recommend adjusting this value in production. traces_sample_rate=1.0, # By default the SDK will try to use the SENTRY_RELEASE # environment variable, or infer a git commit # SHA as release, however you may want to set # something more human-readable. # release="[email protected]", ) class InvalidUsage(Exception): status_code = 400 def __init__(self, message, status_code=None, payload=None): Exception.__init__(self) self.message = message if status_code is not None: self.status_code = status_code self.payload = payload def to_dict(self): rv = dict(self.payload or ()) rv['message'] = self.message return rv # mongo # mongo_client = MongoClient('mongodb://localhost:27017/') mongo_client = MongoClient( "mongodb+srv://Mahitha-Maddi:Mahitha%[email protected]/test") app = Flask(__name__) # CORS(app) CORS(app, resources={r"/*": {"origins": "*"}}) basedir = os.path.abspath(os.path.dirname(__file__)) # Here are my datasets bookings = dict() ################ # Apply to mongo ################ def atlas_connect(): # Node # const MongoClient = require('mongodb').MongoClient; # const uri = "mongodb+srv://admin:<password>@tweets.8ugzv.mongodb.net/myFirstDatabase?retryWrites=true&w=majority"; # const client = new MongoClient(uri, { useNewUrlParser: true, useUnifiedTopology: true }); # client.connect(err => { # const collection = client.db("test").collection("devices"); # // perform actions on the collection object # client.close(); # }); # Python client = pymongo.MongoClient( "mongodb+srv://Mahitha-Maddi:Mahitha%[email protected]/test") db = client.test # database access layer def insert_one(r): start_time = datetime.now() with mongo_client: # start_time_db = datetime.now() db = mongo_client['Uber'] # microseconds_caching_db = (datetime.now() - start_time_db).microseconds # print("*** It took " + str(microseconds_caching_db) + " microseconds to cache mongo handle.") print("...insert_one() to mongo: ", r) try: mongo_collection = db['bookings'] result = mongo_collection.insert_one(r) print("inserted _ids: ", result.inserted_id) except Exception as e: print(e) microseconds_doing_mongo_work = (datetime.now() - start_time).microseconds print("*** It took " + str(microseconds_doing_mongo_work) + " microseconds to insert_one.") def tryexcept(requesto, key, default): lhs = None try: lhs = requesto.json[key] # except Exception as e: except: lhs = default return lhs def ssm(): now = datetime.now() midnight = now.replace(hour=0, minute=0, second=0, microsecond=0) return str((now - midnight).seconds) @app.errorhandler(InvalidUsage) def handle_invalid_usage(error): response = jsonify(error.to_dict()) response.status_code = error.status_code return response # endpoint to check Availability @app.route("/checkAvailability", methods=["POST"]) def check_availability(): source = request.json['source'] destination = request.json['destination'] date = request.json['date'] with mongo_client: #raise InvalidUsage('This view is gone', status_code=410) db = mongo_client['Uber'] mongo_collection = db['available'] print(source) myquery = {"source": {"$regex": str(source)}, "destination": { "$regex": str(destination)}, "date": {"$regex": str(date)}} cursor = dict() cursor = mongo_collection.find(myquery, {"_id": 0}) records = list(cursor) howmany = len(records) print('found ' + str(howmany) + ' bookings!') sorted_records = sorted(records, key=lambda t: t['source']) print(type(sorted_records)) return jsonify(sorted_records) # endpoint to create new Booking @app.route("/book", methods=["POST"]) def book_bus(): source = request.json['source'] destination = request.json['destination'] date = request.json['date'] startTime = request.json['startTime'] endTime = request.json['endTime'] user = request.json['user'] busnumber = request.json['busnumber'] booking = dict(user=user, source=source, destination=destination, busnumber=busnumber, date=date, startTime=startTime, endTime=endTime, bookeddate=datetime.now( ).strftime("%Y-%m-%d %H:%M:%S"), _id=str(ObjectId())) insert_one(booking) return jsonify(booking) @app.route("/bookings-results", methods=["GET"]) def get_tweets_results(): global bookings with mongo_client: db = mongo_client['Uber'] mongo_collection = db['bookings'] cursor = mongo_collection.find({}) records = list(cursor) howmany = len(records) print('found ' + str(howmany) + ' bookings!') sorted_records = sorted(records, key=lambda t: t['source']) return jsonify(sorted_records) ################## # Apply from mongo ################## def applyRecordLevelUpdates(): return None def applyCollectionLevelUpdates(): global bookings with mongo_client: db = mongo_client['Uber'] mongo_collection = db['available'] cursor = mongo_collection.find({}) records = list(cursor) # bookings[0] = records[0] howmany = len(records) print('found ' + str(howmany) + ' bookings!') sorted_records = sorted(records, key=lambda t: t['source']) # return json.dumps({"results": sorted_records }) for booking in sorted_records: bookings[booking['_id']] = booking @app.route("/") def home(): return """Welcome to Uber backend!<br/>""" ################## # ADMINISTRATION # ################## # This runs once before the first single request # Used to bootstrap our collections @app.before_first_request def before_first_request_func(): applyCollectionLevelUpdates() # This runs once before any request @app.before_request def before_request_func(): applyRecordLevelUpdates() ############################ # INFO on containerization # ############################ # To containerize a flask app: # https://pythonise.com/series/learning-flask/building-a-flask-app-with-docker-compose if __name__ == '__main__': app.run(debug=True, host='0.0.0.0')
29.079051
124
0.652984
409
0.055593
0
0
2,378
0.32323
0
0
2,613
0.355172
43d2040db0a01d747e5d0a9ffdc2859f95f69610
6,359
py
Python
sppas/sppas/src/models/acm/htkscripts.py
mirfan899/MTTS
3167b65f576abcc27a8767d24c274a04712bd948
[ "MIT" ]
null
null
null
sppas/sppas/src/models/acm/htkscripts.py
mirfan899/MTTS
3167b65f576abcc27a8767d24c274a04712bd948
[ "MIT" ]
null
null
null
sppas/sppas/src/models/acm/htkscripts.py
mirfan899/MTTS
3167b65f576abcc27a8767d24c274a04712bd948
[ "MIT" ]
null
null
null
""" .. --------------------------------------------------------------------- ___ __ __ __ ___ / | \ | \ | \ / the automatic \__ |__/ |__/ |___| \__ annotation and \ | | | | \ analysis ___/ | | | | ___/ of speech http://www.sppas.org/ Use of this software is governed by the GNU Public License, version 3. SPPAS is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. SPPAS is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with SPPAS. If not, see <http://www.gnu.org/licenses/>. This banner notice must not be removed. --------------------------------------------------------------------- src.models.acm.htkscripts.py ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ """ import os import os.path import logging # --------------------------------------------------------------------------- class sppasHtkScripts(object): """HTK-ASCII scripts reader/writer. :organization: Laboratoire Parole et Langage, Aix-en-Provence, France :license: GPL, v3 :copyright: Copyright (C) 2011-2018 Brigitte Bigi :author: Brigitte Bigi :contact: [email protected] This class is able to write all scripts of the VoxForge tutorial. They are used to train acoustic models thanks to the HTK toolbox. For details, refer to: http://www.voxforge.org/ """ def __init__(self): """Create a sppasHtkScripts instance.""" self.configfile = "" self.globalfile = "" self.mkphones0file = "" self.mkphones1file = "" self.mktrifile = "" self.maketriphonesfile = "" self.silfile = "" # ----------------------------------------------------------------------- def write_all(self, dirname): """Write all scripts at once. Write scripts with their default name, in the given directory. :param dirname: (str) a directory name (existing or to be created). """ if os.path.exists(dirname) is False: os.mkdir(dirname) self.write_global_ded(os.path.join(dirname, "global.ded")) self.write_mkphones0_led(os.path.join(dirname, "mkphones0.led")) self.write_mkphones1_led(os.path.join(dirname, "mkphones1.led")) self.write_mktri_led(os.path.join(dirname, "mktri.led")) self.write_maketriphones_ded(os.path.join(dirname, "maketriphones.ded")) self.write_sil_hed(os.path.join(dirname, "sil.hed")) # ----------------------------------------------------------------------- def write_global_ded(self, filename): """Write the htk script `global.ded`. :param filename: (str) Name of the script file. """ logging.info('Write script file: {!s:s}'.format(filename)) with open(filename, "w") as fp: fp.write("AS sp\n") fp.write("RS cmu\n") fp.write("MP sil sil sp\n") fp.write("\n") fp.close() self.globalfile = filename # ----------------------------------------------------------------------- def write_mkphones0_led(self, filename): """Write the htk script `mkphones0.led`. :param filename: (str) Name of the script file. """ logging.info('Write script file: {!s:s}'.format(filename)) with open(filename, "w") as fp: fp.write("EX\n") fp.write("IS sil sil\n") fp.write("DE sp\n") fp.write("\n") fp.close() self.mkphones0file = filename # ----------------------------------------------------------------------- def write_mkphones1_led(self, filename): """Write the htk script `mkphones1.led`. :param filename: (str) Name of the script file. """ logging.info('Write script file: {!s:s}'.format(filename)) with open(filename, "w") as fp: fp.write("EX\n") fp.write("IS sil sil\n") fp.write("\n") fp.close() self.mkphones1file = filename # ----------------------------------------------------------------------- def write_mktri_led(self, filename): """Write the htk script `mktri.led`. :param filename: (str) Name of the script file. """ logging.info('Write script file: {!s:s}'.format(filename)) with open(filename, "w") as fp: fp.write("WB sp\n") fp.write("WB sil\n") fp.write("TC\n") fp.write("\n") fp.close() self.mktrifile = filename # ----------------------------------------------------------------------- def write_maketriphones_ded(self, filename): """Write the htk script `maketriphones.ded`. :param filename: (str) Name of the script file. """ logging.info('Write script file: {!s:s}'.format(filename)) with open(filename, "w") as fp: fp.write("AS sp\n") fp.write("MP sil sil sp\n") fp.write("TC\n") fp.write("\n") fp.close() self.maketriphonesfile = filename # ----------------------------------------------------------------------- def write_sil_hed(self, filename): """Write the htk script `sil.hed`. :param filename: (str) Name of the script file. """ logging.info('Write script file: {!s:s}'.format(filename)) with open(filename, "w") as fp: fp.write("AT 2 4 0.2 {sil.transP}\n") fp.write("AT 4 2 0.2 {sil.transP}\n") fp.write("AT 1 3 0.3 {sp.transP}\n") fp.write("TI silst {sil.state[3],sp.state[2]}\n") fp.write("\n") fp.close() self.silfile = filename
32.610256
80
0.492845
4,887
0.768517
0
0
0
0
0
0
3,847
0.604969
43d3b50d90e2618726a0619c25ddcb995a36172f
2,961
py
Python
icekit/plugins/map/tests.py
ic-labs/django-icekit
c507ea5b1864303732c53ad7c5800571fca5fa94
[ "MIT" ]
52
2016-09-13T03:50:58.000Z
2022-02-23T16:25:08.000Z
icekit/plugins/map/tests.py
ic-labs/django-icekit
c507ea5b1864303732c53ad7c5800571fca5fa94
[ "MIT" ]
304
2016-08-11T14:17:30.000Z
2020-07-22T13:35:18.000Z
icekit/plugins/map/tests.py
ic-labs/django-icekit
c507ea5b1864303732c53ad7c5800571fca5fa94
[ "MIT" ]
12
2016-09-21T18:46:35.000Z
2021-02-15T19:37:50.000Z
from mock import patch from django.contrib.contenttypes.models import ContentType from django.contrib.sites.models import Site from django.contrib.auth import get_user_model from django.core import exceptions from django_dynamic_fixture import G from django_webtest import WebTest from icekit.models import Layout from icekit.page_types.layout_page.models import LayoutPage from icekit.utils import fluent_contents from . import models User = get_user_model() class MapItemTestCase(WebTest): def setUp(self): self.embed_code = ''' <iframe src="https://www.google.com/maps/embed?pb=!1m18!1m12!1m3!1d3312.0476344648832!2d151.19845715159963!3d-33.88842702741586!2m3!1f0!2f0!3f0!3m2!1i1024!2i768!4f13.1!3m3!1m2!1s0x6b12b1d842ee9aa9%3A0xb0a19ac433ef0be8!2sThe+Interaction+Consortium!5e0!3m2!1sen!2sau!4v1496201264670" width="600" height="450" frameborder="0" style="border:0" allowfullscreen ></iframe> ''' self.cleaned_embed_code = '<iframe allowfullscreen="" frameborder="0" src="https://www.google.com/maps/embed?pb=!1m18!1m12!1m3!1d3312.0476344648832!2d151.19845715159963!3d-33.88842702741586!2m3!1f0!2f0!3f0!3m2!1i1024!2i768!4f13.1!3m3!1m2!1s0x6b12b1d842ee9aa9%3A0xb0a19ac433ef0be8!2sThe+Interaction+Consortium!5e0!3m2!1sen!2sau!4v1496201264670" style="border: 0;"></iframe>' self.layout_1 = G( Layout, template_name='icekit/layouts/default.html', ) self.layout_1.content_types.add( ContentType.objects.get_for_model(LayoutPage)) self.layout_1.save() self.staff_1 = User.objects.create( email='[email protected]', is_staff=True, is_active=True, is_superuser=True, ) self.page_1 = LayoutPage() self.page_1.title = 'Test Page' self.page_1.slug = 'test-page' self.page_1.parent_site = Site.objects.first() self.page_1.layout = self.layout_1 self.page_1.author = self.staff_1 self.page_1.status = LayoutPage.PUBLISHED self.page_1.save() self.map_1 = fluent_contents.create_content_instance( models.MapItem, self.page_1, _embed_code=self.embed_code, ) self.map_item = models.MapItem( parent_type=ContentType.objects.get_for_model(type(self.page_1)), parent_id=self.page_1.id, placeholder=self.page_1.get_placeholder_by_slot('main')[0], _embed_code=self.embed_code, ) self.page_1.publish() def test_map_renders(self): response = self.app.get(self.page_1.get_published().get_absolute_url()) response.mustcontain(self.cleaned_embed_code) def test_cleaned_embed_code(self): self.assertEqual(self.map_1._cleaned_embed_code.strip(), self.cleaned_embed_code)
38.960526
381
0.67207
2,494
0.842283
0
0
0
0
0
0
927
0.31307
43d418c8d833bba41481c7b2cbeab0fbbe8f44c5
548
py
Python
example/example.py
saravanabalagi/imshowtools
ea81af888c69223ff8b42b5c4b8c034483eebe21
[ "MIT" ]
4
2019-07-18T17:24:02.000Z
2020-10-14T06:09:05.000Z
example/example.py
saravanabalagi/imshowtools
ea81af888c69223ff8b42b5c4b8c034483eebe21
[ "MIT" ]
1
2020-04-18T01:05:22.000Z
2020-04-18T01:10:53.000Z
example/example.py
saravanabalagi/imshowtools
ea81af888c69223ff8b42b5c4b8c034483eebe21
[ "MIT" ]
null
null
null
from imshowtools import imshow import cv2 if __name__ == '__main__': image_lenna = cv2.imread("lenna.png") imshow(image_lenna, mode='BGR', window_title="LennaWindow", title="Lenna") image_lenna_bgr = cv2.imread("lenna_bgr.png") imshow(image_lenna, image_lenna_bgr, mode=['BGR', 'RGB'], title=['lenna_rgb', 'lenna_bgr']) imshow(*[image_lenna for _ in range(12)], title=["Lenna" for _ in range(12)], window_title="LennaWindow") imshow(*[image_lenna for _ in range(30)], title="Lenna", padding=(1, 1, 0, (0, 0, 0.8, 0.8)))
39.142857
109
0.678832
0
0
0
0
0
0
0
0
120
0.218978
43d619ff813d6467445c26ac811f7e5c110c5dd3
729
py
Python
terminalone/models/concept.py
amehta1/t1-python
4f7eb0bec7671b29baf3105b8cafafb373107e7b
[ "Apache-2.0" ]
24
2015-07-09T18:49:10.000Z
2021-06-07T18:36:58.000Z
terminalone/models/concept.py
amehta1/t1-python
4f7eb0bec7671b29baf3105b8cafafb373107e7b
[ "Apache-2.0" ]
100
2015-07-13T20:24:50.000Z
2020-08-10T11:16:39.000Z
terminalone/models/concept.py
amehta1/t1-python
4f7eb0bec7671b29baf3105b8cafafb373107e7b
[ "Apache-2.0" ]
36
2015-07-09T18:51:48.000Z
2022-02-14T22:44:37.000Z
# -*- coding: utf-8 -*- """Provides concept object.""" from __future__ import absolute_import from .. import t1types from ..entity import Entity class Concept(Entity): """Concept entity.""" collection = 'concepts' resource = 'concept' _relations = { 'advertiser', } _pull = { 'advertiser_id': int, 'created_on': t1types.strpt, 'id': int, 'name': None, 'status': t1types.int_to_bool, 'updated_on': t1types.strpt, 'version': int, } _push = _pull.copy() _push.update({ 'status': int, }) def __init__(self, session, properties=None, **kwargs): super(Concept, self).__init__(session, properties, **kwargs)
22.78125
68
0.581619
580
0.79561
0
0
0
0
0
0
179
0.245542
43d690157e44125280f30cea5097fb9b835832b6
932
py
Python
videofeed.py
dmeklund/asyncdemo
956f193c0fa38744965362966ac7f8ef224409b4
[ "MIT" ]
null
null
null
videofeed.py
dmeklund/asyncdemo
956f193c0fa38744965362966ac7f8ef224409b4
[ "MIT" ]
null
null
null
videofeed.py
dmeklund/asyncdemo
956f193c0fa38744965362966ac7f8ef224409b4
[ "MIT" ]
null
null
null
""" Mock up a video feed pipeline """ import asyncio import logging import sys import cv2 logging.basicConfig(format="[%(thread)-5d]%(asctime)s: %(message)s") logger = logging.getLogger('async') logger.setLevel(logging.INFO) async def process_video(filename): cap = cv2.VideoCapture(filename) tasks = list() frame_ind = 0 while cap.isOpened(): ret, frame = cap.read() tasks.append(asyncio.ensure_future(process_frame(frame, frame_ind))) frame_ind += 1 await asyncio.sleep(0) await asyncio.gather(tasks) async def process_frame(frame, frame_ind): logger.info("Processing frame {}".format(frame_ind)) await asyncio.sleep(20.0) logger.info("Finished processing frame {}".format(frame_ind)) def main(): loop = asyncio.get_event_loop() loop.run_until_complete(process_video(sys.argv[1])) logger.info("Completed") if __name__ == '__main__': main()
22.731707
76
0.687768
0
0
0
0
0
0
524
0.562232
156
0.167382
43d763b4860a448a07b1ac979d461dd9025028b9
11,807
py
Python
parsers/read_lspci_and_glxinfo.py
mikeus9908/peracotta
c54c351acae8afec250185f4bc714a2f86c47c90
[ "MIT" ]
3
2019-04-01T17:28:20.000Z
2020-11-19T17:25:32.000Z
parsers/read_lspci_and_glxinfo.py
mikeus9908/peracotta
c54c351acae8afec250185f4bc714a2f86c47c90
[ "MIT" ]
142
2018-11-05T18:13:13.000Z
2022-03-12T17:43:40.000Z
parsers/read_lspci_and_glxinfo.py
mikeus9908/peracotta
c54c351acae8afec250185f4bc714a2f86c47c90
[ "MIT" ]
10
2019-10-25T12:28:37.000Z
2021-05-17T17:32:56.000Z
#!/usr/bin/python3 """ Read "lspci -v" and "glxinfo" outputs """ import re from dataclasses import dataclass from InputFileNotFoundError import InputFileNotFoundError @dataclass class VideoCard: type = "graphics-card" manufacturer_brand = "" reseller_brand = "" internal_name = "" model = "" capacity = -1 # bytes warning = "" def parse_lspci_output(gpu: VideoCard, lspci_path: str, interactive: bool = False): try: with open(lspci_path, "r") as f: lspci_output = f.read() except FileNotFoundError: raise InputFileNotFoundError(lspci_path) lspci_sections = lspci_output.split("\n\n") for section in lspci_sections: if "VGA compatible controller" in section: first_line = section.splitlines()[0].split(": ", 1)[ 1 ] # removes "VGA compatible controller:" second_line = section.splitlines()[1] part_between_square_brackets = None try: # take the first string between [] from the first line part_between_square_brackets = first_line.split("[")[1].split("]")[0] except IndexError: # there may not be an argument in between [] pass if "Subsystem:" in second_line: # The model or model family is often repeated here, but removing it automatically is complicated gpu.reseller_brand = ( second_line.split("Subsystem: ")[1].split("[", 1)[0].strip() ) gpu.reseller_brand = gpu.reseller_brand.replace( "Integrated Graphics Controller", "" ) # ----------------------------------------------------------------- # AMD/ATI # ----------------------------------------------------------------- if part_between_square_brackets is not None and ( "AMD" in part_between_square_brackets or "ATI" in part_between_square_brackets ): gpu.manufacturer_brand = part_between_square_brackets # take second string between [] gpu.model = first_line.split("[")[2].split("]")[0] if "controller" in gpu.model: gpu.model = section.splitlines()[1].split(" ")[-1] # ----------------------------------------------------------------- # Nvidia # ----------------------------------------------------------------- elif "NVIDIA" in first_line.upper(): gpu.manufacturer_brand = "Nvidia" gpu.model = part_between_square_brackets if gpu.reseller_brand != "": pieces = gpu.reseller_brand.rsplit(" ", 1) gpu.reseller_brand = pieces[0] gpu.internal_name = pieces[1] # ----------------------------------------------------------------- # Intel # ----------------------------------------------------------------- elif "INTEL" in first_line.upper(): gpu.manufacturer_brand = "Intel" if "Integrated Graphics" in first_line: tmp_model = first_line.split("Intel Corporation ")[1].split( " Integrated Graphics" )[0] # if there are no numbers, e.g. "Core Processor", tmp_model is not a model number if not re.search("\\d+", tmp_model): tmp_model = "" elif "HD Graphics" in first_line: tmp_model = ( first_line.split("Intel Corporation ")[1] .split("(", 1)[0] .strip() ) elif "[" in first_line and "]" in first_line: tmp_model = first_line.split("[")[1].split("]")[0] else: tmp_model = "" if tmp_model != "": gpu.model = tmp_model else: gpu.model = "" # ----------------------------------------------------------------- # VIA # ----------------------------------------------------------------- elif first_line.startswith("VIA"): gpu.manufacturer_brand = "VIA" gpu.model = part_between_square_brackets tmp_model = first_line.split("[")[0] i = 0 for i, char in enumerate("VIA Technologies, Inc. "): if tmp_model[i] != char: break gpu.internal_name = tmp_model[i:].strip() # ----------------------------------------------------------------- # SiS # ----------------------------------------------------------------- elif part_between_square_brackets == "SiS": # May be written somewhere else on other models, but we have so few SiS cards that it's difficult to # find more examples. Also, they haven't made any video card in the last 15 years or so. gpu.manufacturer_brand = part_between_square_brackets if gpu.reseller_brand.lower() == "silicon integrated systems": gpu.reseller_brand = "SiS" gpu.model = first_line.split("]", 1)[1] # These may be useful for non-integrated cards, however the example ones are all integrated if " PCIE" in gpu.model: gpu.model = gpu.model.split(" PCIE", 1)[0].strip() elif " PCI/AGP" in gpu.model: gpu.model = gpu.model.split(" PCI/AGP", 1)[0].strip() if gpu.model in gpu.reseller_brand: gpu.reseller_brand = gpu.reseller_brand.split(gpu.model, 1)[ 0 ].strip() else: gpu.manufacturer_brand = None error = ( "I couldn't find the Video Card brand. The model was set to 'None' and is to be edited " "logging into the TARALLO afterwards. The information you're looking for should be in the " f"following 2 lines:\n{first_line}\n{second_line}\n" ) if interactive: print(error) gpu.warning += error if gpu.model is None: error = ( "I couldn't find the Integrated Graphics model. The model was set to 'None' and is to be " "edited logging into the TARALLO afterwards. The information you're looking for should be in " f"the following 2 lines:\n{first_line}\n{second_line}\n" ) if interactive: print(error) gpu.warning += error else: # Try to remove duplicate information gpu.reseller_brand = gpu.reseller_brand.replace(gpu.model, "").strip() if gpu.internal_name is not None: # Same gpu.reseller_brand = gpu.reseller_brand.replace( gpu.internal_name, "" ).strip() break def parse_glxinfo_output(gpu: VideoCard, glxinfo_path: str): try: with open(glxinfo_path, "r") as f: glxinfo_output = f.read() except FileNotFoundError: raise InputFileNotFoundError(glxinfo_path) for i, line in enumerate(glxinfo_output.splitlines()): # this line comes before the "Dedicated video memory" line # this basically saves a default value if the dedicated memory line cannot be found if "Video memory" in line: try: tmp_vid_mem = int(line.split(" ")[6].split(" ")[0][:-2]) tmp_vid_mem_multiplier = line[-2:] except ValueError: exit(-1) return # To stop complaints from PyCharm gpu.capacity = convert_video_memory_size( tmp_vid_mem, tmp_vid_mem_multiplier ) if "Dedicated video memory" in line: try: tmp_vram = int(line.split(" ")[7].split(" ")[0]) tmp_vram_multiplier = line[-2:] except ValueError: exit(-1) return capacity = convert_video_memory_size(tmp_vram, tmp_vram_multiplier) if capacity < 0: gpu.warning = "Could not find dedicated video memory" if gpu.capacity < 0: gpu.warning += ". The value cannot be trusted." else: gpu.capacity = capacity break if gpu.capacity > 0: # Round to the next power of 2 # this may be different from human readable capacity... rounded = 2 ** (gpu.capacity - 1).bit_length() one_and_half = int(rounded / 2 * 1.5) # Accounts for 3 GB VRAM cards and similar # Yes they do exist, try to remove this part and watch tests fail (and the card was manually verified to be 3 GB) if one_and_half >= gpu.capacity: gpu.capacity = one_and_half else: gpu.capacity = rounded def convert_video_memory_size(capacity, units_of_measure): if units_of_measure == "GB": capacity *= 1024 * 1024 * 1024 elif units_of_measure == "MB": capacity *= 1024 * 1024 elif units_of_measure.upper() == "KB": capacity *= 1024 else: capacity = -1 return capacity def read_lspci_and_glxinfo( has_dedicated: bool, lspci_path: str, glxinfo_path: str, interactive: bool = False ): gpu = VideoCard() if has_dedicated: parse_lspci_output(gpu, lspci_path, interactive) parse_glxinfo_output(gpu, glxinfo_path) else: # integrated_in_mobo or integrated_in_cpu parse_lspci_output(gpu, lspci_path, interactive) # don't parse glxinfo because the VRAM is part of the RAM and varies gpu.capacity = None # print("The VRAM capacity could not be detected. " # "Please try looking for it on the Video Card or on the Internet. " # "The capacity value defaulted to 'None'. " # "For an integrated GPU, the VRAM may also be shared with the system RAM, so an empty value is acceptable.") result = { "type": "graphics-card", "brand": gpu.reseller_brand.strip(), "model": gpu.model.strip(), "internal-name": gpu.internal_name.strip(), "capacity-byte": gpu.capacity, "working": "yes", # Indeed it is working } if gpu.manufacturer_brand is not None and gpu.reseller_brand is not None: if gpu.manufacturer_brand.lower() != gpu.reseller_brand.lower(): result["brand-manufacturer"] = gpu.manufacturer_brand return result if __name__ == "__main__": import argparse import json parser = argparse.ArgumentParser(description="Parse lspci/glxinfo output") parser.add_argument("lspci", type=str, nargs=1, help="path to lspci output") parser.add_argument("glxinfo", type=str, nargs=1, help="path to glxinfo output") parser.add_argument( "-d", "--dedicated", action="store_true", default=False, help="computer has dedicated GPU", ) args = parser.parse_args() try: print( json.dumps( read_lspci_and_glxinfo(args.dedicated, args.lspci[0], args.glxinfo[0]), indent=2, ) ) except InputFileNotFoundError as e: print(str(e)) exit(1)
40.023729
121
0.510206
177
0.014991
0
0
188
0.015923
0
0
3,561
0.301601
43d8185a62fc1d316a49c5b7d44a50853bf56a88
9,682
py
Python
upload.py
snymainn/tools-
af57a1a4d0f1aecff33ab28c6f27acc893f37fbc
[ "MIT" ]
null
null
null
upload.py
snymainn/tools-
af57a1a4d0f1aecff33ab28c6f27acc893f37fbc
[ "MIT" ]
null
null
null
upload.py
snymainn/tools-
af57a1a4d0f1aecff33ab28c6f27acc893f37fbc
[ "MIT" ]
null
null
null
#!/usr/bin/python import sys from loglib import SNYLogger import ftplib import argparse import re import os import calendar import time def read_skipfile(infile, log): skiplines = list() skipfile = open(infile, 'r') for line in skipfile: newline = line.rstrip('\r\n') linelength = len(newline) if linelength>0: log.debug("Adding "+newline+" to skiplines") tmpobjects = re.compile(newline) skiplines.append(tmpobjects) skipfile.close() return skiplines #GET LOCAL FILELIST def get_local_files(localpath,log): locallist = list() os.chdir(localpath) log.debug("*** GETTING LOCAL FILELIST ***") for name in os.listdir("."): if (not name.startswith('.')): statinfo = os.stat(name) if (statinfo.st_mode>=32768): entrytype = "file" else: entrytype = "dir" size = statinfo.st_size date = statinfo.st_mtime log.debug("Date:"+str(int(date))+" type:"+entrytype+", name:"+name+" size:"+str(size)) locallist.append({'name':name,'type':entrytype,'modify':int(date),'size':size}) return locallist # # login to ftp server # def ftp_login(args, log): ftp = ftplib.FTP() port = 21 ftp.connect(args.host, port) try: log.debug("Logging in...") ftp.login(args.user, args.password) log.debug(ftp.getwelcome()) except ftplib.error_perm, resp: log.logprint(str(resp)) except: log.logprint("Login section failed..") return ftp # # get remote files # def get_remote_files(ftp, remotepath, args, log): # LIST CONTENTS contents = list() dirlist = list() log.debug("*** GET REMOTE FILELIST ***") try: ftp.cwd(remotepath) # Entry point ftp.retrlines('MLSD', contents.append) for line in contents: # log.debug(line) entry = line.split(";") size = "0" #Set this because directories does not report size for item in entry: cell = item.split("=") if (cell[0]=="modify"): date = cell[1] modify=calendar.timegm(time.strptime(str(date), "%Y%m%d%H%M%S")) #for loops/if checks are not blocks in python, i.e. no need to predefine modify if (cell[0]=="type"): entrytype=cell[1] if (cell[0]=="size"): size = cell[1] if (len(cell[0])>0) and cell[0].startswith(' '): #If string does not contain =, cell[1] will not be defined #and first entry in cell[0] string will be whitespace name = cell[0].lstrip() log.debug("Date:"+str(modify)+" type:"+entrytype+" Name:"+name+" size:"+size) if (entrytype=='file' or entrytype=='dir'): #Do not include current and parent dir entries dirlist.append({'name':name,'type':entrytype,'modify':int(modify),'size':size}) except ftplib.error_perm, resp: log.logprint(str(resp)) exit(1) return dirlist def touch(fname): try: os.utime(fname, None) except: log.logprint("Updating mtime failed, "+fname+" does not exist") def sync_files(ftp, args, skiplines, localpath, remotepath, log): locallist = get_local_files(localpath,log) remotelist = get_remote_files(ftp, remotepath, args, log) #Create dictionaries for easy lookup localdict = {} index = 0 for lfile in locallist: localdict[lfile['name']]=index index+=1 remotedict = {} index = 0 for rfile in remotelist: remotedict[rfile['name']]=index index+=1 # Traverse local filelist and # check if local file is present on remote for lfile in locallist: #Check if file is present in skipfile #If present in skipfile, skip to next file in locallist skiptonext = False for p in skiplines: m=p.match(lfile['name']) if (m): #log.logprint(lfile['name']+" match "+m.group()+", thus present in skipfile "+args.skipfile) log.logprint("Skipping: "+lfile['name']) skiptonext = True break if skiptonext: continue # #Check if remote has the local file #if present remote, type file and modify time is older than local file, set upload flag # upload = False #Set to True here instead of False since this will handle the case where #remote does not exist, i.e. always upload except when remote is present #and up to date if lfile['name'] in remotedict: rfile = remotelist[remotedict[lfile['name']]] #Get fileinfo from remotelist using index if lfile['type']=="file": log.debug(lfile['name']+" is present remote : "+rfile['name']) if (lfile['modify']>rfile['modify']): log.debug("Local file is newer by "+str(lfile['modify']-rfile['modify'])+" seconds, try to upload...") upload = True elif lfile['type']=="dir": log.debug(lfile['name']+" is present remote and is directory: "+rfile['name']) sync_files(ftp, args, skiplines, lfile['name'], rfile['name'], log) elif lfile['type']=="dir": log.debug(lfile['name']+" is NOT present remote and is directory: ") try: ftp.mkd(lfile['name']) log.logprint("CREATED DIR : "+lfile['name']) sync_files(ftp, args, skiplines, lfile['name'], lfile['name'], log) except ftplib.all_errors, resp: log.logprint("ERROR: Failed to create directory "+lfile['name']+" - "+str(resp)) elif lfile['type']=="file": log.debug(lfile['name']+" is NOT present remote and is file") upload = True #Handle upload flag if (upload and lfile['type']=="file"): try: touch(lfile['name']) #Touch local file to set modify time to approx the same as the remote will get ftp.storbinary('STOR '+lfile['name'], open(lfile['name'], 'rb')) log.logprint("UPLOADED : "+lfile['name']) except ftplib.all_errors, resp: log.logprint("ERROR: Failed to upload "+lfile['name']+" - "+str(resp)) #Make sure locally deleted items are deleted remotely for rfile in remotelist: if rfile['name'] not in localdict: if rfile['type']=="file": #Remote file is not present locally=>Delete it try: ftp.delete(rfile['name']) log.logprint("DELETED: "+rfile['name']) except ftplib.all_errors, resp: log.logprint("ERROR: Failed to delete "+rfile['name']+" - "+str(resp)) elif rfile['type']=="dir": log.debug("Remote dir "+rfile['name']+" not present locally, delete it recursively") #Remote dir is not present locally, decend and recursively delete everything #TODO: recursive_delete(ftp, rfile['name']) delete_recursive(ftp, args, rfile['name'], log) ftp.cwd("..") os.chdir("..") def delete_recursive(ftp, args, remotepath, log): remotelist = get_remote_files(ftp, remotepath, args, log) #Make sure locally deleted items are deleted remotely for rfile in remotelist: if rfile['type']=="file": try: ftp.delete(rfile['name']) log.logprint("DELETED: "+rfile['name']) except ftplib.all_errors, resp: log.logprint("ERROR: Failed to delete "+rfile['name']+" - "+str(resp)) elif rfile['type']=="dir": log.debug("Remote dir "+rfile['name']+" not present locally, delete it recursively") delete_recursive(ftp, args, rfile['name'], log) ftp.cwd("..") try: ftp.rmd(remotepath) log.logprint("DELETED DIR: "+remotepath) except ftplib.all_errors, resp: log.logprint("ERROR: Failed to delete directory "+remotepath+" - "+str(resp)) parser = argparse.ArgumentParser() parser.add_argument("-o", "--host", help="ftp hostname", required=True) parser.add_argument("-u", "--user", help="username on ftp server", required=True) parser.add_argument("-p", "--password", help="password", required=True) parser.add_argument("-d", "--debug", help="print debug to terminal, default 0, use multiple times to increase verbosity, i.e. -d -d", action="count") parser.add_argument("-b", "--basedir", help="Toplevel directory on ftp server, default www") parser.add_argument("-t", "--path", help="Local toplevel directory, default ., i.e. current dir") parser.add_argument("-s", "--skipfile", help="Do not upload files in <skipfile>, default name upload.skip") parser.set_defaults(debug=0) parser.set_defaults(skipfile="upload.skip") parser.set_defaults(basedir="www") parser.set_defaults(path=".") args = parser.parse_args() log = SNYLogger(basename="upload", size_limit=10, no_logfiles=2, stdout=args.debug) skiplines = read_skipfile(args.skipfile, log) ftp = ftp_login(args, log) sync_files(ftp, args, skiplines, args.path, args.basedir, log) ftp.quit()
37.968627
122
0.567858
0
0
0
0
0
0
0
0
3,168
0.327205
43d87b5ab1e5e10305ebbe366e85481beb47273f
2,637
py
Python
chapter2/intogen-arrays/src/mrna/mrna_comb_gene_classif.py
chris-zen/phd-thesis
1eefdff8e7ca1910304e27ae42551dc64496b101
[ "Unlicense" ]
1
2015-12-22T00:53:18.000Z
2015-12-22T00:53:18.000Z
chapter2/intogen-arrays/src/mrna/mrna_comb_gene_classif.py
chris-zen/phd-thesis
1eefdff8e7ca1910304e27ae42551dc64496b101
[ "Unlicense" ]
null
null
null
chapter2/intogen-arrays/src/mrna/mrna_comb_gene_classif.py
chris-zen/phd-thesis
1eefdff8e7ca1910304e27ae42551dc64496b101
[ "Unlicense" ]
null
null
null
#!/usr/bin/env python """ Classify oncodrive gene results and prepare for combination * Configuration parameters: - The ones required by intogen.data.entity.EntityManagerFactory * Input: - oncodrive_ids: The mrna.oncodrive_genes to process * Output: - combinations: The mrna.combination prepared to be calculated * Entities: - mrna.oncodrive_genes - mrna.combination """ import uuid import json from wok.task import Task from wok.element import DataElement from intogen.data.entity.server import EntityServer from intogen.data.entity import types def run(task): # Initialization task.check_conf(["entities"]) conf = task.conf log = task.logger() task.check_in_ports(["oncodrive_ids"]) task.check_out_ports(["combinations"]) oncodrive_port = task.ports["oncodrive_ids"] combination_port = task.ports["combinations"] es = EntityServer(conf["entities"]) em = es.manager() log.info("Indexing available combination results ...") comb_results_index = em.group_ids( ["icdo_topography", "icdo_morphology", "id_type"], types.MRNA_COMBINATION, unique = True) ENSEMBL_GENE = "ensembl:gene" classif = {} log.info("Classifying oncodrive results ...") for oid in oncodrive_port: o = em.find(oid, types.MRNA_ONCODRIVE_GENES) if o is None: log.error("{0} not found: {1}".format(types.MRNA_ONCODRIVE_GENES, oid)) continue okey = (o["study_id"], o["platform_id"], o["icdo_topography"], o["icdo_morphology"]) key = (o["icdo_topography"], o["icdo_morphology"], ENSEMBL_GENE) log.debug("Oncodrive results ({0}) [{1}] classified into ({2}) ...".format(", ".join(okey), oid, ", ".join(key))) if key in classif: classif[key] += [o] else: classif[key] = [o] log.info("Preparing combinations ...") for key in sorted(classif): if key in comb_results_index: cid = comb_results_index[key][0] c = em.find(cid, types.MRNA_COMBINATION) if c is None: log.error("{0} not found: {1}".format(types.MRNA_COMBINATION, cid)) return else: c = DataElement(key_sep = "/") c["id"] = cid = str(uuid.uuid4()) c["icdo_topography"] = key[0] c["icdo_morphology"] = key[1] c["id_type"] = ENSEMBL_GENE olist = classif[key] log.info("({0}) [{1}] --> {2} results".format(", ".join(key), cid, len(olist))) ids = c.create_list() flist = c.create_list() for o in olist: ids += [o["id"]] flist += [o["results_file"]] c["source"] = src = c.create_element() src["type"] = types.MRNA_ONCODRIVE_GENES src["ids"] = ids c["files"] = flist combination_port.write(json.dumps(c.to_native())) em.close() if __name__ == "__main__": Task(run).start()
21.975
115
0.680319
0
0
0
0
0
0
0
0
969
0.367463
43d8dcfde4fc817f885eb2d557c4f9603d6da4be
86
py
Python
src/FunctionApps/DevOps/tests/test_get_ip.py
CDCgov/prime-public-health-data-infrastructure
7e4849c3a486a84e94765bf0023b80261c510c57
[ "Apache-2.0" ]
3
2022-02-24T18:16:39.000Z
2022-03-29T20:21:41.000Z
src/FunctionApps/DevOps/tests/test_get_ip.py
CDCgov/prime-public-health-data-infrastructure
7e4849c3a486a84e94765bf0023b80261c510c57
[ "Apache-2.0" ]
17
2022-02-08T17:13:55.000Z
2022-03-28T16:49:00.000Z
src/FunctionApps/DevOps/tests/test_get_ip.py
CDCgov/prime-public-health-data-infrastructure
7e4849c3a486a84e94765bf0023b80261c510c57
[ "Apache-2.0" ]
3
2022-02-27T23:12:50.000Z
2022-03-17T04:51:47.000Z
def test_get_ip_placeholder(): """placeholder so pytest does not fail""" pass
21.5
45
0.697674
0
0
0
0
0
0
0
0
41
0.476744
43d92304705312e029e4656dd5bbcccaf8cbee7d
861
py
Python
data/models/svm_benchmark.py
Laurenhut/Machine_Learning_Final
4fca33754ef42acde504cc64e6bbe4e463caadf8
[ "MIT" ]
null
null
null
data/models/svm_benchmark.py
Laurenhut/Machine_Learning_Final
4fca33754ef42acde504cc64e6bbe4e463caadf8
[ "MIT" ]
null
null
null
data/models/svm_benchmark.py
Laurenhut/Machine_Learning_Final
4fca33754ef42acde504cc64e6bbe4e463caadf8
[ "MIT" ]
null
null
null
#!/usr/bin/env python from sklearn import svm import csv_io def main(): training, target = csv_io.read_data("../Data/train.csv") training = [x[1:] for x in training] target = [float(x) for x in target] test, throwaway = csv_io.read_data("../Data/test.csv") test = [x[1:] for x in test] svc = svm.SVC(kernel='poly', degree=2) scores = cross_val_score(rf, training, target, cv=10) print np.mean(scores) # svc.fit(training, target) # predicted_probs = svc.predict_proba(test) # predicted_probs = [[min(max(x,0.001),0.999) for x in y] # for y in predicted_probs] # predicted_probs = [["%f" % x for x in y] for y in predicted_probs] # csv_io.write_delimited_file("../Submissions/svm_benchmark.csv", # predicted_probs) if __name__=="__main__": main()
31.888889
72
0.615563
0
0
0
0
0
0
0
0
426
0.494774
43d983edaa81a2f049c07647c3d3908b2dea574f
1,605
py
Python
configs/utils/config_generator.py
user-wu/SOD_eval_metrics
d5b8804580cb52a4237c8e613818d10591dc6597
[ "MIT" ]
null
null
null
configs/utils/config_generator.py
user-wu/SOD_eval_metrics
d5b8804580cb52a4237c8e613818d10591dc6597
[ "MIT" ]
null
null
null
configs/utils/config_generator.py
user-wu/SOD_eval_metrics
d5b8804580cb52a4237c8e613818d10591dc6597
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- from matplotlib import colors # max = 148 _COLOR_Genarator = iter( sorted( [ color for name, color in colors.cnames.items() if name not in ["red", "white"] or not name.startswith("light") or "gray" in name ] ) ) def curve_info_generator(): line_style_flag = True def _template_generator( method_info: dict, method_name: str, line_color: str = None, line_width: int = 3 ) -> dict: nonlocal line_style_flag template_info = dict( path_dict=method_info, curve_setting=dict( line_style="-" if line_style_flag else "--", line_label=method_name, line_width=line_width, ), ) print(method_name) if method_name == "Ours": template_info["curve_setting"]["line_color"] = 'red' template_info["curve_setting"]["line_style"] = '-' # line_style_flag = not line_style_flag else: if line_color is not None: template_info["curve_setting"]["line_color"] = line_color else: template_info["curve_setting"]["line_color"] = next(_COLOR_Genarator) line_style_flag = not line_style_flag return template_info return _template_generator def simple_info_generator(): def _template_generator(method_info: dict, method_name: str) -> dict: template_info = dict(path_dict=method_info, label=method_name) return template_info return _template_generator
29.181818
93
0.598754
0
0
0
0
0
0
0
0
227
0.141433
43db9748cf12932e64e00e512404058350f2661e
1,151
py
Python
core/sms_service.py
kartik1000/jcc-registration-portal
053eade1122fa760ae112a8599a396d68dfb16b8
[ "MIT" ]
null
null
null
core/sms_service.py
kartik1000/jcc-registration-portal
053eade1122fa760ae112a8599a396d68dfb16b8
[ "MIT" ]
null
null
null
core/sms_service.py
kartik1000/jcc-registration-portal
053eade1122fa760ae112a8599a396d68dfb16b8
[ "MIT" ]
null
null
null
from urllib.parse import urlencode from decouple import config import hashlib import requests BASE = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" auth_key = config('AUTH_KEY') url = 'http://sms.globehost.com/api/sendhttp.php?' def encode_base(num, array=BASE): if(num == 0): return array[0] retarr = [] base = len(array) while num: num, res = divmod(num, base) retarr.append(array[res]) retarr.reverse() return ''.join(retarr)[:6] def generate(alphanum): short = (hashlib.md5(alphanum.encode())).hexdigest() short = int(short, 16) short = encode_base(short) return short def send_message(team_name, team_id, contact): message = 'Your unique team ID for Junior Code Cracker 2k18 is ' + \ team_id + '.Kindly take note and submit this at the event.' data = { 'authkey': auth_key, 'mobiles': contact, 'message': message, 'sender': 'GNULUG', 'route': '4', } data_encoded = urlencode(data) r = requests.get(url + data_encoded) print('Message Sent Successfully !!') return r.status_code
23.979167
72
0.644657
0
0
0
0
0
0
0
0
306
0.265856
43dc511c1276023b6e01df3b43e2f8d7dd243462
1,522
py
Python
scripts/fetch_images.py
Protagonistss/sanic-for-v3
ba7e94273b77914b8d85d67cf513041ada00780d
[ "MIT" ]
null
null
null
scripts/fetch_images.py
Protagonistss/sanic-for-v3
ba7e94273b77914b8d85d67cf513041ada00780d
[ "MIT" ]
null
null
null
scripts/fetch_images.py
Protagonistss/sanic-for-v3
ba7e94273b77914b8d85d67cf513041ada00780d
[ "MIT" ]
null
null
null
import sys import os sys.path.append(os.pardir) import random import time import requests from contextlib import closing from help import utils from threading import Thread def get_train_set_path(path: str): create_path = utils.join_root_path(path) return create_path def create_train_set_dir(path='auth-set'): create_path = get_train_set_path(path) is_existed = os.path.exists(create_path) if not is_existed: os.mkdir(create_path) def gen_image_name(char_pool): prefix = '' for i in range(4): prefix += random.choice(char_pool) suffix = str(time.time()).replace('.', '') return "{}_{}".format(prefix, suffix) def gen_image_all_url(path): rule = '0123456789' return '{}/{}.png'.format(path, gen_image_name(rule)) def get_image(url, count=20000, path='auth-set'): create_train_set_dir(path) for loop in range(count): response = requests.get(url, verify=False, stream=True) with closing(response) as response: with open(gen_image_all_url(get_train_set_path(path)), 'wb') as f: for i in response.iter_content(chunk_size=512): f.write(i) print('第{}张图片保存成功'.format(loop + 1)) def main(): get_image('https://gray.930pm.cn/home.php/Login/verify_c', path='auth-set') if __name__ == '__main__': t1 = Thread(target=main) t2 = Thread(target=main) t3 = Thread(target=main) t4 = Thread(target=main) t1.start() t2.start() t3.start() t4.start()
24.15873
79
0.660972
0
0
0
0
0
0
0
0
156
0.10143
43dd49ec321203c525ba8f13879673eb4d300e9f
3,912
py
Python
GeneralStats/example.py
haoruilee/statslibrary
01494043bc7fb82d4aa6d7d550a4e7dc2ac0503a
[ "MIT" ]
58
2019-02-04T13:53:16.000Z
2022-02-24T02:59:55.000Z
GeneralStats/example.py
haoruilee/statslibrary
01494043bc7fb82d4aa6d7d550a4e7dc2ac0503a
[ "MIT" ]
null
null
null
GeneralStats/example.py
haoruilee/statslibrary
01494043bc7fb82d4aa6d7d550a4e7dc2ac0503a
[ "MIT" ]
19
2019-03-21T01:54:55.000Z
2021-12-03T13:55:16.000Z
import GeneralStats as gs import numpy as np from scipy.stats import skew from scipy.stats import kurtosistest import pandas as pd if __name__ == "__main__": gen=gs.GeneralStats() data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]]) data1=np.array([1,2,3,4,5]) print("data = ", data) print("data1 = ", data1) res=gen.average(data,rowvar=True) res1=gen.average(data1,rowvar=True) print("data平均值 = ",res) print("data1平均值 = ",res1) data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]]) data1=np.array([1,2,3,4,5]) res=gen.median(data,rowvar=True) res1=gen.median(data1,rowvar=True) print("data中位值 = ",res) print("data1中位值 = ",res1) data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]]) data1=np.array([1,2,3,4,5]) res=gen.mode(data,rowvar=True) res1=gen.mode(data1,rowvar=True) print("data众数值 = ",res) print("data1众数值 = ",res1) data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]]) data1=np.array([1,2,3,4,5]) res=gen.quantile(data,0.5,rowvar=True,interpolation='lower') #若元素个数为偶数,则模式为'midpoint'的0.5分位数值等价于中位数 res1=gen.quantile(data1,0.5,rowvar=True,interpolation='lower') #若元素个数为奇数,则模式为'lower'的0.5分位数值等价于中位数 print("data 0.5分位数值 = ",res) print("data1 0.5分位数值 = ",res1) res=gen.quantile(data,0.25,rowvar=True,interpolation='lower') res1=gen.quantile(data1,0.25,rowvar=True,interpolation='lower') print("data 0.25分位数值s = ",res) print("data1 0.25分位数值 = ",res1) res=gen.quantile(data,0.75,rowvar=True,interpolation='lower') res1=gen.quantile(data1,0.75,rowvar=True,interpolation='lower') print("data 0.75分位数值 = ",res) print("data1 0.75分位数值 = ",res1) res=gen.quantile(data,1.0,rowvar=True,interpolation='lower') res1=gen.quantile(data1,1.0,rowvar=True,interpolation='lower') print("data 1.0分位数值 = ",res) print("data1 1.0分位数值 = ",res1) data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]]) data1=np.array([1,2,3,4,5]) res=gen.range(data,rowvar=True) res1=gen.range(data1,rowvar=True) print("data极差 = ",res) print("data1极差 = ",res1) data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]]) data1=np.array([1,2,3,4,5]) res=gen.variance(data,rowvar=True) res1=gen.variance(data1,rowvar=True) print("data方差 = ",res) print("data1方差 = ",res1) data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]]) data1=np.array([1,2,3,4,5]) res=gen.standard_dev(data,rowvar=True) res1=gen.standard_dev(data1,rowvar=True) print("data标准差 = ",res) print("data1标准差 = ",res1) data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]]) data1=np.array([1,2,3,4,5]) res=gen.skewness(data,rowvar=True) res1=gen.skewness(data1,rowvar=True) print("data偏度 = ",res) print("data1偏度 = ",res1) res=np.array([skew(data[0]),skew(data[1]),skew(data[2]),skew(data[3])]) print("使用scipy skew方法验证的data偏度 = ",res) res1=np.array(skew(data1)) print("使用scipy skew方法验证的data1偏度 = ",res1) data=np.array([[1, 1, 2, 2, 3],[2, 2, 3, 3, 5],[1, 4, 3, 3, 3],[2, 4, 5, 5, 3]]) data1=np.array([53, 61, 49, 66, 78, 47]) res=gen.kurtosis(data,rowvar=True) res1=gen.kurtosis(data1,rowvar=True) print("data峰度 = ",res) print("data1峰度 = ",res1) data_0=pd.Series(data[0]) data_1=pd.Series(data[1]) data_2=pd.Series(data[2]) data_3=pd.Series(data[3]) print("使用pandas kurt方法验证的data峰度 = ",[data_0.kurt(),data_1.kurt(),data_2.kurt(),data_3.kurt()]) data1=pd.Series(data1) print("使用pandas kurt方法验证的data1峰度 = ",data1.kurt())
36.222222
109
0.576431
0
0
0
0
0
0
0
0
925
0.218987
43ddbd75df809ab6f556d3498600ef7c94a80521
16,408
py
Python
bootstrap.py
tqchen/yarn-ec2
303f3980ad41770011b72532ed9f7c6bbe876508
[ "Apache-2.0" ]
35
2016-02-23T19:15:46.000Z
2021-01-01T02:57:43.000Z
bootstrap.py
tqchen/cloud-scripts
303f3980ad41770011b72532ed9f7c6bbe876508
[ "Apache-2.0" ]
4
2016-11-12T16:49:16.000Z
2018-11-02T21:20:23.000Z
bootstrap.py
tqchen/yarn-ec2
303f3980ad41770011b72532ed9f7c6bbe876508
[ "Apache-2.0" ]
25
2016-02-26T20:28:13.000Z
2020-07-26T12:02:34.000Z
#!/usr/bin/env python # encoding: utf-8 """ script to install all the necessary things for working on a linux machine with nothing Installing minimum dependencies """ import sys import os import logging import subprocess import xml.etree.ElementTree as ElementTree import xml.dom.minidom as minidom import socket import time import pwd ###---------------------------------------------------## # Configuration Section, will be modified by script # ###---------------------------------------------------## node_apt_packages = [ 'emacs', 'git', 'g++', 'make', 'python-numpy', 'libprotobuf-dev', 'libcurl4-openssl-dev'] # master only packages master_apt_packages = [ 'protobuf-compiler'] # List of r packages to be installed in master master_r_packages = [ 'r-base-dev', 'r-base', 'r-cran-statmod', 'r-cran-RCurl', 'r-cran-rjson' ] # download link of hadoop. hadoop_url = 'http://apache.claz.org/hadoop/common/hadoop-2.8.0/hadoop-2.8.0.tar.gz' hadoop_dir = 'hadoop-2.8.0' # customized installation script. # See optional installation scripts for options. def custom_master_install(): #install_spark() #install_r() pass # customized installation script for all nodes. def custom_all_nodes_install(): install_gcc() pass ###---------------------------------------------------## # Automatically set by script # ###---------------------------------------------------## USER_NAME = 'ubuntu' # setup variables MASTER = os.getenv('MY_MASTER_DNS', '') # node type the type of current node NODE_TYPE = os.getenv('MY_NODE_TYPE', 'm3.xlarge') NODE_VMEM = int(os.getenv('MY_NODE_VMEM', str(1024*15))) NODE_VCPU = int(os.getenv('MY_NODE_VCPU', '4')) AWS_ID = os.getenv('AWS_ACCESS_KEY_ID', 'undefined') AWS_KEY = os.getenv('AWS_SECRET_ACCESS_KEY', 'undefined') JAVA_HOME = os.getenv('JAVA_HOME') HADOOP_HOME = os.getenv('HADOOP_HOME') DISK_LIST = [('xvd' + chr(ord('b') + i)) for i in range(10)] ENVIRON = os.environ.copy() ###--------------------------------## # Optional installation scripts. # ###--------------------------------## def install_r(): if master_r_packages: sudo("apt-key adv --keyserver keyserver.ubuntu.com --recv-keys E084DAB9") sudo("echo deb https://cran.r-project.org/bin/linux/ubuntu trusty/ >>/etc/apt/sources.list") sudo('apt-get -y update') sudo('apt-get -y install %s' % (' '.join(master_r_packages))) def install_spark(): run('wget https://www.apache.org/dist/spark/spark-2.1.1/spark-2.1.1-bin-hadoop2.7.tgz') run('tar xf spark-2.1.1-bin-hadoop2.7.tgz') run('rm -rf spark-2.1.1-bin-hadoop2.7.tgz') with open('.bashrc', 'a') as fo: fo.write('\nexport PATH=${PATH}:spark-2.1.1-bin-hadoop2.7\n') def install_xgboost(): run('git clone --recursive https://github.com/dmlc/xgboost') run('cd xgboost; cp make/config.mk .; echo USE_S3=1 >> config.mk; make -j4') ### Script section ### def run(cmd): try: print cmd logging.info(cmd) proc = subprocess.Popen(cmd, shell=True, env = ENVIRON, stdout=subprocess.PIPE, stderr = subprocess.PIPE) out, err = proc.communicate() retcode = proc.poll() if retcode != 0: logging.error('Command %s returns %d' % (cmd,retcode)) logging.error(out) logging.error(err) else: print out except Exception as e: print(str(e)) logging.error('Exception running: %s' % cmd) logging.error(str(e)) pass def sudo(cmd): run('sudo %s' % cmd) ### Installation helpers ### def install_packages(pkgs): sudo('apt-get -y update') sudo('apt-get -y install %s' % (' '.join(pkgs))) # install g++4.9, needed for regex match. def install_gcc(): sudo('add-apt-repository -y ppa:ubuntu-toolchain-r/test') sudo('apt-get -y update') sudo('apt-get -y install g++-4.9') def install_java(): """ install java and setup environment variables Returns environment variables that needs to be exported """ if not os.path.exists('jdk1.8.0_131'): run('wget --no-check-certificate --no-cookies'\ ' --header \"Cookie: oraclelicense=accept-securebackup-cookie\"'\ ' http://download.oracle.com/otn-pub/java/jdk/8u131-b11/d54c1d3a095b4ff2b6607d096fa80163/jdk-8u131-linux-x64.tar.gz') run('tar xf jdk-8u131-linux-x64.tar.gz') run('rm -f jdk-8u131-linux-x64.tar.gz') global JAVA_HOME if JAVA_HOME is None: JAVA_HOME = os.path.abspath('jdk1.8.0_131') return [('JAVA_HOME', JAVA_HOME)] def install_hadoop(is_master): def update_site(fname, rmap): """ update the site script """ try: tree = ElementTree.parse(fname) root = tree.getroot() except Exception: cfg = ElementTree.Element("configuration") tree = ElementTree.ElementTree(cfg) root = tree.getroot() rset = set() for prop in root.getiterator('property'): prop = dict((p.tag, p) for p in prop) name = prop['name'].text.strip() if name in rmap: prop['value'].text = str(rmap[name]) rset.add(name) for name, text in rmap.iteritems(): if name in rset: continue prop = ElementTree.SubElement(root, 'property') ElementTree.SubElement(prop, 'name').text = name ElementTree.SubElement(prop, 'value').text = str(text) rough_string = ElementTree.tostring(root, 'utf-8') reparsed = minidom.parseString(rough_string) pretty = reparsed.toprettyxml(indent='\t') fo = open(fname, 'w') fo.write(pretty) fo.close() def setup_hadoop_site(master, hadoop_dir, hdfs_dir, vcpu, vmem): """ setup hadoop side given the parameters Parameters ---------- master: the dns to master uri hadoop_dir: the directory to store temp files hdfs_dir: the directories for hdfs vcpu: the number of cpus current machine have vmem: the memory(MB) current machine have """ if vmem < 4 * 1024: reserved_ram = 256 elif vmem < 8 * 1024: reserved_ram = 1 * 1024 elif vmem < 24 * 1024 : reserved_ram = 2 * 1024 elif vmem < 48 * 1024: reserved_ram = 2 * 1024 elif vmem < 64 * 1024: reserved_ram = 6 * 1024 else: reserved_ram = 8 * 1024 ram_per_container = (vmem - reserved_ram) / vcpu if is_master: vcpu = vcpu - 2 tmp_dir = hadoop_dir[0] core_site = { 'fs.defaultFS': 'hdfs://%s:9000/' % master, 'fs.s3n.impl': 'org.apache.hadoop.fs.s3native.NativeS3FileSystem', 'hadoop.tmp.dir': tmp_dir } if AWS_ID != 'undefined': core_site['fs.s3n.awsAccessKeyId'] = AWS_ID core_site['fs.s3n.awsSecretAccessKey'] = AWS_KEY update_site('%s/etc/hadoop/core-site.xml' % HADOOP_HOME, core_site) hdfs_site = { 'dfs.data.dir': ','.join(['%s/data' % d for d in hdfs_dir]), 'dfs.permissions': 'false', 'dfs.replication': '1' } update_site('%s/etc/hadoop/hdfs-site.xml' % HADOOP_HOME, hdfs_site) yarn_site = { 'yarn.resourcemanager.resource-tracker.address': '%s:8025' % master, 'yarn.resourcemanager.scheduler.address': '%s:8030' % master, 'yarn.resourcemanager.address': '%s:8032' % master, 'yarn.scheduler.minimum-allocation-mb': 512, 'yarn.scheduler.maximum-allocation-mb': 640000, 'yarn.scheduler.minimum-allocation-vcores': 1, 'yarn.scheduler.maximum-allocation-vcores': 32, 'yarn.nodemanager.resource.memory-mb': vcpu * ram_per_container, 'yarn.nodemanager.resource.cpu-vcores': vcpu, 'yarn.log-aggregation-enable': 'true', 'yarn.nodemanager.vmem-check-enabled': 'false', 'yarn.nodemanager.aux-services': 'mapreduce_shuffle', 'yarn.nodemanager.aux-services.mapreduce.shuffle.class': 'org.apache.hadoop.mapred.ShuffleHandler', 'yarn.nodemanager.remote-app-log-dir': os.path.join(tmp_dir, 'logs'), 'yarn.nodemanager.log-dirs': os.path.join(tmp_dir, 'userlogs'), 'yarn.nodemanager.local-dirs': ','.join(['%s/yarn/nm-local-dir' % d for d in hadoop_dir]) } update_site('%s/etc/hadoop/yarn-site.xml' % HADOOP_HOME, yarn_site) mapred_site = { 'mapreduce.application.classpath' : ':'.join(['$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/*', '$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/lib/*', '$HADOOP_MAPRED_HOME/share/hadoop/tools/lib/*']), 'yarn.app.mapreduce.am.resource.mb': 2 * ram_per_container, 'yarn.app.mapreduce.am.command-opts': '-Xmx%dm' % int(0.8 * 2 * ram_per_container), 'mapreduce.framework.name': 'yarn', 'mapreduce.map.cpu.vcores': 1, 'mapreduce.map.memory.mb': ram_per_container, 'mapreduce.map.java.opts': '-Xmx%dm' % int(0.8 * ram_per_container), 'mapreduce.reduce.cpu.vcores': 1, 'mapreduce.reduce.memory.mb': 2 * ram_per_container, 'mapreduce.reduce.java.opts': '-Xmx%dm' % int(0.8 * ram_per_container) } update_site('%s/etc/hadoop/mapred-site.xml' % HADOOP_HOME, mapred_site) capacity_site = { 'yarn.scheduler.capacity.resource-calculator': 'org.apache.hadoop.yarn.util.resource.DominantResourceCalculator' } update_site('%s/etc/hadoop/capacity-scheduler.xml' % HADOOP_HOME, capacity_site) fo = open('%s/etc/hadoop/hadoop-env.sh' % HADOOP_HOME, 'w') fo.write('export HADOOP_CLASSPATH=$HADOOP_CLASSPATH:$HADOOP_PREFIX/share/hadoop/tools/lib/*\n') fo.write('export HADOOP_LOG_DIR=%s/log\n' % tmp_dir) fo.write('export YARN_LOG_DIR=%s/log\n' % tmp_dir) fo.write('export JAVA_HOME=\"%s\"\n' % JAVA_HOME) fo.close() fo = open('%s/etc/hadoop/slaves' % HADOOP_HOME, 'w') fo.write(master + '\n') fo.close() def run_install(): if not os.path.exists('hadoop-2.8.0'): run('wget %s' % hadoop_url) run('tar xf hadoop-2.8.0.tar.gz') run('rm -f hadoop-2.8.0.tar.gz') global HADOOP_HOME if HADOOP_HOME is None: HADOOP_HOME = os.path.abspath('hadoop-2.8.0') env = [('HADOOP_HOME', HADOOP_HOME)] env += [('HADOOP_PREFIX', HADOOP_HOME)] env += [('HADOOP_MAPRED_HOME', HADOOP_HOME)] env += [('HADOOP_COMMON_HOME', HADOOP_HOME)] env += [('HADOOP_HDFS_HOME', HADOOP_HOME)] env += [('YARN_HOME', HADOOP_HOME)] env += [('YARN_CONF_DIR', '%s/etc/hadoop' % HADOOP_HOME)] env += [('HADOOP_CONF_DIR', '%s/etc/hadoop' % HADOOP_HOME)] disks = ['/disk/%s' % d for d in DISK_LIST if os.path.exists('/dev/%s' % d)] setup_hadoop_site(MASTER, ['%s/hadoop' % d for d in disks], ['%s/hadoop/dfs' % d for d in disks], NODE_VCPU, NODE_VMEM) return env return run_install() def regsshkey(fname): for dns in (open(fname).readlines() + ['localhost', '0.0.0.0']): try: run('ssh-keygen -R %s' % dns.strip()) except: pass run('ssh-keyscan %s >> ~/.ssh/known_hosts' % dns.strip()) # main script to install all dependencies def install_main(is_master): if is_master: install_packages(master_apt_packages + node_apt_packages) else: install_packages(node_apt_packages) env = [] env += install_java() env += install_hadoop(is_master) path = ['$HADOOP_HOME/bin', '$HADOOP_HOME/sbin', '$JAVA_HOME/bin'] env += [('LD_LIBRARY_PATH', '$HADOOP_HOME/native/lib')] env += [('LD_LIBRARY_PATH', '${LD_LIBRARY_PATH}:$HADOOP_HDFS_HOME/lib/native:$JAVA_HOME/jre/lib/amd64/server')] env += [('LD_LIBRARY_PATH', '${LD_LIBRARY_PATH}:/usr/local/lib')] env += [('LIBHDFS_OPTS', '--Xmx128m')] env += [('MY_MASTER_DNS', MASTER)] env += [('MY_NODE_TYPE', NODE_TYPE)] env += [('MY_NODE_VMEM', str(NODE_VMEM))] env += [('MY_NODE_VCPU', str(NODE_VCPU))] if AWS_ID != 'undefined': env += [('AWS_ACCESS_KEY_ID', AWS_ID)] if AWS_KEY != 'undefined': env += [('AWS_SECRET_ACCESS_KEY', AWS_KEY)] # setup environments fo = open('.hadoop_env', 'w') for k, v in env: fo.write('export %s=%s\n' % (k,v)) ENVIRON[k] = v fo.write('export PATH=$PATH:%s\n' % (':'.join(path))) fo.write('export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/lib\n') fo.close() for l in open('.bashrc'): if l.find('.hadoop_env') != -1: return run('echo source ~/.hadoop_env >> ~/.bashrc') # allow ssh, if they already share the key. key_setup = """ [ -f ~/.ssh/id_rsa ] || (ssh-keygen -q -t rsa -N '' -f ~/.ssh/id_rsa && cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys) """ run(key_setup) regsshkey('%s/etc/hadoop/slaves' % HADOOP_HOME) # end of instalation. # Make startup script for bulding def make_startup_script(is_master): assert JAVA_HOME is not None assert HADOOP_HOME is not None assert NODE_VCPU is not None assert NODE_VMEM is not None disks = [] cmds = [] if is_master: cmds.append('$HADOOP_HOME/sbin/stop-all.sh') for d in DISK_LIST: if os.path.exists('/dev/%s' % d): cmds.append('sudo umount /dev/%s' % d) cmds.append('sudo mkfs -t ext4 /dev/%s' % d) cmds.append('sudo mkdir -p /disk/%s' % d) cmds.append('sudo mount /dev/%s /disk/%s' % (d, d)) disks.append('/disk/%s' % d) for d in disks: cmds.append('sudo mkdir -p %s/hadoop' %d) cmds.append('sudo chown ubuntu:ubuntu %s/hadoop' % d) cmds.append('sudo mkdir -p %s/tmp' %d) cmds.append('sudo chown ubuntu:ubuntu %s/tmp' % d) cmds.append('rm -rf %s/hadoop/dfs' % d) cmds.append('mkdir %s/hadoop/dfs' % d) cmds.append('mkdir %s/hadoop/dfs/name' % d) cmds.append('mkdir %s/hadoop/dfs/data' % d) # run command if is_master: cmds.append('$HADOOP_HOME/bin/hadoop namenode -format') cmds.append('$HADOOP_HOME/sbin/start-all.sh') else: cmds.append('export HADOOP_LIBEXEC_DIR=$HADOOP_HOME/libexec &&'\ ' $HADOOP_HOME/sbin/yarn-daemon.sh --config $HADOOP_HOME/etc/hadoop start nodemanager') with open('startup.sh', 'w') as fo: fo.write('#!/bin/bash\n') fo.write('set -v\n') fo.write('\n'.join(cmds)) run('chmod +x startup.sh') run('./startup.sh') def main(): global MASTER logging.basicConfig(filename = 'bootstrap.log', level = logging.INFO, format='%(asctime)s %(levelname)s %(message)s') if MASTER == '': is_master = True MASTER = socket.getfqdn() logging.info('assuming master is myself as %s' % MASTER) else: is_master = socket.getfqdn() == MASTER tstart = time.time() install_main(is_master) tmid = time.time() logging.info('installation finishes in %g secs' % (tmid - tstart)) make_startup_script(is_master) ENVIRON['HADOOP_HOME'] = HADOOP_HOME ENVIRON['JAVA_HOME'] = JAVA_HOME tend = time.time() if is_master: custom_master_install() custom_all_nodes_install() logging.info('boostrap finishes in %g secs' % (tend - tmid)) logging.info('all finishes in %g secs' % (tend - tstart)) if __name__ == '__main__': pw_record = pwd.getpwnam(USER_NAME) user_name = pw_record.pw_name user_home_dir = pw_record.pw_dir user_uid = pw_record.pw_uid user_gid = pw_record.pw_gid env = os.environ.copy() cwd = user_home_dir ENVIRON['HOME'] = user_home_dir os.setgid(user_gid) os.setuid(user_uid) os.chdir(user_home_dir) main()
37.461187
133
0.585629
0
0
0
0
0
0
0
0
7,199
0.438749
43de15a64fd73557d8ace8fe63e08534f03c9747
400
py
Python
intro/matplotlib/examples/plot_good.py
zmoon/scipy-lecture-notes
75a89ddedeb48930dbdb6fe25a76e9ef0587ae21
[ "CC-BY-4.0" ]
2,538
2015-01-01T04:58:41.000Z
2022-03-31T21:06:05.000Z
intro/matplotlib/examples/plot_good.py
zmoon/scipy-lecture-notes
75a89ddedeb48930dbdb6fe25a76e9ef0587ae21
[ "CC-BY-4.0" ]
362
2015-01-18T14:16:23.000Z
2021-11-18T16:24:34.000Z
intro/matplotlib/examples/plot_good.py
zmoon/scipy-lecture-notes
75a89ddedeb48930dbdb6fe25a76e9ef0587ae21
[ "CC-BY-4.0" ]
1,127
2015-01-05T14:39:29.000Z
2022-03-25T08:38:39.000Z
""" A simple, good-looking plot =========================== Demoing some simple features of matplotlib """ import numpy as np import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt fig = plt.figure(figsize=(5, 4), dpi=72) axes = fig.add_axes([0.01, 0.01, .98, 0.98]) X = np.linspace(0, 2, 200) Y = np.sin(2*np.pi*X) plt.plot(X, Y, lw=2) plt.ylim(-1.1, 1.1) plt.grid() plt.show()
18.181818
44
0.625
0
0
0
0
0
0
0
0
112
0.28
43de29ccab29a96dd8a22a7b82fb926f80943d99
4,087
py
Python
pfio/_context.py
HiroakiMikami/pfio
1ac997dcba7babd5d91dd8c4f2793d27a6bab69b
[ "MIT" ]
24
2020-05-23T13:00:27.000Z
2022-02-17T05:20:51.000Z
pfio/_context.py
HiroakiMikami/pfio
1ac997dcba7babd5d91dd8c4f2793d27a6bab69b
[ "MIT" ]
88
2020-05-01T06:56:50.000Z
2022-03-16T07:15:34.000Z
pfio/_context.py
HiroakiMikami/pfio
1ac997dcba7babd5d91dd8c4f2793d27a6bab69b
[ "MIT" ]
9
2020-05-07T05:47:35.000Z
2022-02-09T05:42:56.000Z
import os import re from typing import Tuple from pfio._typing import Union from pfio.container import Container from pfio.io import IO, create_fs_handler class FileSystemDriverList(object): def __init__(self): # TODO(tianqi): dynamically create this list # as well as the patterns upon loading the pfio module. self.scheme_list = ["hdfs", "posix"] self.posix_pattern = re.compile(r"file:\/\/(?P<path>.+)") self.hdfs_pattern = re.compile(r"(?P<path>hdfs:\/\/.+)") self.pattern_list = {"hdfs": self.hdfs_pattern, "posix": self.posix_pattern, } def _determine_fs_type(self, path: str) -> Tuple[str, str, bool]: if None is not path: for fs_type, pattern in self.pattern_list.items(): ret = pattern.match(path) if ret: return (fs_type, ret.groupdict()["path"], True) return ("posix", path, False) def format_path(self, fs: IO, path: str) -> Tuple[str, bool]: fs_type = fs.type if fs_type in self.pattern_list.keys(): pattern = self.pattern_list[fs_type] ret = pattern.match(path) if ret: return (ret.groupdict()["path"], True) else: return (path, False) else: return (path, False) def get_handler_from_path(self, path: str) -> Tuple[IO, str, bool]: (fs_type, actual_path, is_URI) = self._determine_fs_type(path) handler = create_fs_handler(fs_type) return (handler, actual_path, is_URI) def get_handler_for_root(self, uri_or_handler_name: str) -> Tuple[IO, str, bool]: if uri_or_handler_name in self.pattern_list.keys(): return (create_fs_handler(uri_or_handler_name), "", False) else: (new_handler, actual_path, is_URI) = self.get_handler_from_path( uri_or_handler_name) new_handler.root = actual_path return (new_handler, actual_path, is_URI) def is_supported_scheme(self, scheme: str) -> bool: return scheme in self.scheme_list class DefaultContext(object): def __init__(self): self._fs_handler_list = FileSystemDriverList() self._root = "" self._default_context = \ self._fs_handler_list.get_handler_for_root("posix")[0] def set_root(self, uri_or_handler: Union[str, IO]) -> None: # TODO(check) if root is directory if isinstance(uri_or_handler, IO): handler = uri_or_handler self._root = "" else: (handler, self._root, is_URI) = \ self.get_handler_by_name(uri_or_handler) assert handler is not None if self._root: if not handler.isdir(self._root): raise RuntimeError("the URI does not point to a directory") self._default_context = handler def get_handler(self, path: str = "") -> Tuple[IO, str]: (handler, formatted_path, is_URI) = self._fs_handler_list.get_handler_from_path(path) if not is_URI: actual_path = os.path.join(self._root, formatted_path) return (self._default_context, actual_path) else: return (handler, formatted_path) def open_as_container(self, path: str) -> Container: (handler, formatted_path, is_URI) = self._fs_handler_list.get_handler_from_path(path) if not is_URI: actual_path = os.path.join(self._root, formatted_path) handler = self._default_context else: actual_path = formatted_path self._root = "" return handler.open_as_container(actual_path) def get_handler_by_name(self, path: str) -> Tuple[IO, str, bool]: return self._fs_handler_list.get_handler_for_root(path) def get_root_dir(self) -> str: return self._root def is_supported_scheme(self, scheme: str) -> bool: return self._fs_handler_list.is_supported_scheme(scheme)
35.232759
79
0.614387
3,925
0.960362
0
0
0
0
0
0
282
0.068999
43e09c3343b0c13466ea8190e66d19dfafb80ae6
9,330
py
Python
parser/fase2/team19/Analisis_Ascendente/Instrucciones/PLPGSQL/Ifpl.py
Josue-Zea/tytus
f9e4be9a8c03eb698fade7a748972e4f52d46685
[ "MIT" ]
35
2020-12-07T03:11:43.000Z
2021-04-15T17:38:16.000Z
parser/fase2/team19/Analisis_Ascendente/Instrucciones/PLPGSQL/Ifpl.py
Josue-Zea/tytus
f9e4be9a8c03eb698fade7a748972e4f52d46685
[ "MIT" ]
47
2020-12-09T01:29:09.000Z
2021-01-13T05:37:50.000Z
parser/fase2/team19/Analisis_Ascendente/Instrucciones/PLPGSQL/Ifpl.py
Josue-Zea/tytus
f9e4be9a8c03eb698fade7a748972e4f52d46685
[ "MIT" ]
556
2020-12-07T03:13:31.000Z
2021-06-17T17:41:10.000Z
import Analisis_Ascendente.Instrucciones.PLPGSQL.EjecutarFuncion as EjecutarFuncion from Analisis_Ascendente.Instrucciones.PLPGSQL.plasignacion import Plasignacion from Analisis_Ascendente.Instrucciones.instruccion import Instruccion from Analisis_Ascendente.Instrucciones.Create.createTable import CreateTable from Analisis_Ascendente.Instrucciones.Create.createDatabase import CreateReplace from Analisis_Ascendente.Instrucciones.Select.select import Select from Analisis_Ascendente.Instrucciones.Use_Data_Base.useDB import Use from Analisis_Ascendente.Instrucciones.Select.select1 import selectTime import Analisis_Ascendente.Instrucciones.Insert.insert as insert_import from Analisis_Ascendente.Instrucciones.Select.Select2 import Selectp3 from Analisis_Ascendente.Instrucciones.Select import selectInst from Analisis_Ascendente.Instrucciones.Expresiones.Expresion import Expresion from Analisis_Ascendente.Instrucciones.Drop.drop import Drop from Analisis_Ascendente.Instrucciones.Alter.alterDatabase import AlterDatabase from Analisis_Ascendente.Instrucciones.Alter.alterTable import AlterTable from Analisis_Ascendente.Instrucciones.Update.Update import Update from Analisis_Ascendente.Instrucciones.Delete.delete import Delete from Analisis_Ascendente.Instrucciones.Select import SelectDist from Analisis_Ascendente.Instrucciones.Type.type import CreateType #----------------------------------Imports FASE2-------------------------- from Analisis_Ascendente.Instrucciones.Index.Index import Index from Analisis_Ascendente.Instrucciones.PLPGSQL.createFunction import CreateFunction from Analisis_Ascendente.Instrucciones.Index.DropIndex import DropIndex from Analisis_Ascendente.Instrucciones.Index.AlterIndex import AlterIndex from Analisis_Ascendente.Instrucciones.PLPGSQL.DropProcedure import DropProcedure from Analisis_Ascendente.Instrucciones.PLPGSQL.CreateProcedure import CreateProcedure from Analisis_Ascendente.Instrucciones.PLPGSQL.CasePL import CasePL from Analisis_Ascendente.Instrucciones.PLPGSQL.plCall import plCall from Analisis_Ascendente.Instrucciones.PLPGSQL.dropFunction import DropFunction import C3D.GeneradorEtiquetas as GeneradorEtiquetas import C3D.GeneradorTemporales as GeneradorTemporales import Analisis_Ascendente.reportes.Reportes as Reportes class Ifpl(Instruccion): ''' #1 If #2 If elif else #3 If else ''' def __init__(self, caso,e_if,s_if,elif_s,s_else, fila, columna): self.caso = caso self.e_if = e_if self.s_if = s_if self.elif_s = elif_s self.s_else = s_else self.fila = fila self.columna = columna def ejecutar(self,tsglobal,ts, consola, exceptions): try: if self.caso == 1: resultado = Expresion.Resolver(self.e_if, ts, consola, exceptions) if resultado == True: for x in range(0, len(self.s_if)): self.procesar_instrucciones(self.s_if[x],ts,consola,exceptions,tsglobal) else: pass elif self.caso == 2: print('hola') else: resultado = Expresion.Resolver(self.e_if, ts, consola, exceptions) if resultado == True: for x in range(0, len(self.s_if)): self.procesar_instrucciones(self.s_if[x], ts, consola, exceptions,tsglobal) else: for x in range(0, len(self.s_else)): self.procesar_instrucciones(self.s_else[x],ts,consola,exceptions,tsglobal) except: consola.append("XX000 : internal_error") def procesar_instrucciones(self,instr,ts,consola,exceptions,tsglobal): if isinstance(instr, CreateReplace): CreateReplace.ejecutar(instr, ts, consola, exceptions) elif isinstance(instr, Select): if instr.caso == 1: consola.append('caso 1') selectTime.ejecutar(instr, ts, consola, exceptions, True) elif instr.caso == 2: consola.append('caso 2') variable = SelectDist.Select_Dist() SelectDist.Select_Dist.ejecutar(variable, instr, ts, consola, exceptions) elif instr.caso == 3: consola.append('caso 3') variable = selectInst.Select_inst() selectInst.Select_inst.ejecutar(variable, instr, ts, consola, exceptions) elif instr.caso == 4: consola.append('caso 4') Selectp3.ejecutar(instr, ts, consola, exceptions, True) elif instr.caso == 6: consola.append('caso 6') elif isinstance(instr, CreateTable): CreateTable.ejecutar(instr, ts, consola, exceptions) elif isinstance(instr, Use): Use.ejecutar(instr, ts, consola, exceptions) elif isinstance(instr, insert_import.InsertInto): insert_import.InsertInto.ejecutar(instr, ts, consola, exceptions) # print("Ejecute un insert") elif isinstance(instr, Drop): Drop.ejecutar(instr, ts, consola, exceptions) # print("Ejecute drop") elif isinstance(instr, AlterDatabase): AlterDatabase.ejecutar(instr, ts, consola, exceptions) # print("Ejecute alter database") elif isinstance(instr, AlterTable): AlterTable.ejecutar(instr, ts, consola, exceptions) # print("Ejecute alter table") elif isinstance(instr, Delete): Delete.ejecutar(instr, ts, consola, exceptions) # print("Ejecute delete") elif isinstance(instr, Update): Update.ejecutar(instr, ts, consola, exceptions) elif isinstance(instr, CreateType): CreateType.ejecutar(instr, ts, consola, exceptions) elif isinstance(instr, Index): Index.ejecutar(instr, ts, consola, exceptions) # print("Ejecute Index") elif isinstance(instr, CreateFunction): CreateFunction.ejecutar(instr, ts, consola, exceptions) elif isinstance(instr, DropFunction): DropFunction.ejecutar(instr, ts, consola, exceptions) elif isinstance(instr, DropIndex): DropIndex.ejecutar(instr, ts, consola, exceptions) elif isinstance(instr, AlterIndex): AlterIndex.ejecutar(instr, ts, consola, exceptions) elif isinstance(instr, DropProcedure): DropProcedure.ejecutar(instr, ts, consola, exceptions) elif isinstance(instr, CreateProcedure): CreateProcedure.ejecutar(instr, ts, consola, exceptions) elif isinstance(instr, CasePL): CasePL.ejecutar(instr, ts, consola, exceptions) elif isinstance(instr, plCall): plCall.ejecutar(instr, ts, consola, exceptions) elif isinstance(instr, Plasignacion): EjecutarFuncion.ejecutarPlasignacionIf(instr,ts,consola,exceptions,tsglobal) elif isinstance(instr, Ifpl): instr.ejecutar(tsglobal,ts,consola,exceptions) else: return def getC3D(self, lista_optimizaciones_C3D): etiqueta_if = GeneradorEtiquetas.nueva_etiqueta() etiqueta_else = GeneradorEtiquetas.nueva_etiqueta() etiqueta_salida = GeneradorEtiquetas.nueva_etiqueta() e_if = self.e_if.getC3D(lista_optimizaciones_C3D) noOptimizado = '''if %s: goto .%s <br> goto .%s<br> label .%s<br> &lt;instrucciones&gt;<br> label .%s''' % (e_if['tmp'], etiqueta_if, etiqueta_else, etiqueta_if, etiqueta_else) optimizado = '''if not %s: goto .%s <br> &lt;instrucciones&gt;<br> label .%s''' % (e_if['tmp'], etiqueta_else, etiqueta_else) optimizacion1 = Reportes.ListaOptimizacion(noOptimizado, optimizado, Reportes.TipoOptimizacion.REGLA3) lista_optimizaciones_C3D.append(optimizacion1) sentencias_if = '' for sentencias in self.s_if: sentencias_if += sentencias.getC3D(lista_optimizaciones_C3D) c3d = ''' %s if not %s: goto .%s %s goto .%s ''' % (e_if['code'], e_if['tmp'], etiqueta_else, sentencias_if, etiqueta_salida) if self.s_else is not None: sentencias_else = '' for sentencias in self.s_else: sentencias_else += sentencias.getC3D(lista_optimizaciones_C3D) c3d += ''' label .%s %s label .%s''' % (etiqueta_else, sentencias_else, etiqueta_salida) else: c3d += ''' label .%s label .%s ''' % (etiqueta_else, etiqueta_salida) return c3d def get_quemado(self): sententias_if = '' for sentencia in self.s_if: sententias_if += sentencia.get_quemado() + ';\n' quemado = ''' if %s then %s ''' % (self.e_if.get_quemado(), sententias_if) if self.s_else is not None: sentencias_else = '' for sentencia in self.s_else: sentencias_else += sentencia.get_quemado() + ';\n' quemado += '''ELSE %s ''' % sentencias_else quemado += ' end if' return quemado
47.121212
111
0.653805
7,008
0.751125
0
0
0
0
0
0
754
0.080815
43e0bf1b8f706e0abd42a5ac8a65294eb668c3ab
183
py
Python
epages_client/dataobjects/enum_fetch_operator.py
vilkasgroup/epages_client
10e63d957ee45dc5d4df741064806f724fb1be1f
[ "MIT" ]
3
2018-01-26T13:44:26.000Z
2020-05-13T13:58:19.000Z
epages_client/dataobjects/enum_fetch_operator.py
vilkasgroup/epages_client
10e63d957ee45dc5d4df741064806f724fb1be1f
[ "MIT" ]
53
2018-02-05T10:59:22.000Z
2022-01-01T19:31:08.000Z
epages_client/dataobjects/enum_fetch_operator.py
vilkasgroup/epages_client
10e63d957ee45dc5d4df741064806f724fb1be1f
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- from __future__ import unicode_literals class FetchOperator(object): '''Defines values for fetch operators''' ADD = 1 REMOVE = 2 REPLACE = 3
18.3
44
0.650273
116
0.63388
0
0
0
0
0
0
63
0.344262
43e232a6058aefed0715e6e5fea4ed4fd550c388
6,067
py
Python
pyhwpscan/hwp_scan.py
orca-eaa5a/dokkaebi_scanner
756314376e2cbbce6c03fd908ebd0b8cc27aa7fc
[ "MIT" ]
null
null
null
pyhwpscan/hwp_scan.py
orca-eaa5a/dokkaebi_scanner
756314376e2cbbce6c03fd908ebd0b8cc27aa7fc
[ "MIT" ]
1
2022-02-17T15:01:29.000Z
2022-02-20T07:15:31.000Z
pyhwpscan/hwp_scan.py
orca-eaa5a/dokkaebi_scanner
756314376e2cbbce6c03fd908ebd0b8cc27aa7fc
[ "MIT" ]
null
null
null
from threading import current_thread from jsbeautifier.javascript.beautifier import remove_redundant_indentation from pyparser.oleparser import OleParser from pyparser.hwp_parser import HwpParser from scan.init_scan import init_hwp5_scan from scan.bindata_scanner import BinData_Scanner from scan.jscript_scanner import JS_Scanner from scan.paratext_scanner import ParaText_Scanner import zipfile import os import sys import platform from common.errors import * from utils.dumphex import print_hexdump js_scanner = None bindata_scanner = None paratext_scanner = None _platform = None binary_info = { "type": "", "p": None } def cmd_handler(cmdline): global binary_info global js_scanner global bindata_scanner global paratext_scanner global _platform ty = binary_info["type"] parser = binary_info["p"] s_cmd = cmdline.split(" ") cmd = s_cmd[0] arg = s_cmd[1:] if "windows" in _platform: os.system('cls') else: os.system('clear') print(">> "+cmdline) if cmd == "help": print("> tree") print(" Print the structure of target Binary") print("> dump [binary_name] [directory]") print(" Dump OLE or Zipped Binary at specific direcotry (default is current direcotry)") print("> show-hex [binary_name]") print(" Print hexcidecimal view of specific OLE or Zipped Binary") print("> scan") print(" re-scanning the target file") print("> exit") print(" quit command liner") return 1 elif cmd == "clear": if "windows" in _platform: os.system('cls') else: os.system('clear') return 0 elif cmd == "tree": if ty == "hwp": parser.ole_container.print_dir_entry_all() else: for file in parser.filelist: print(file.filename) return 0 elif cmd == "dump": if len(arg) > 1: binary_name, target_dir = arg[0], arg[1] else: binary_name, target_dir = arg[0], None if not target_dir: target_dir = os.getcwd() if ty == "hwp": stream = parser.ole_container.get_dir_entry_by_name(binary_name).get_decompressed_stream() else: targ = "" for file in parser.filelist: fname = file.filename.split("/")[-1] if fname == binary_name: targ = file.filename break if not targ: print("no file exist") return 0 stream = parser.read(targ) with open(target_dir+"/"+binary_name, "wb") as f: f.write(stream) print("dump succeed..") return 1 elif cmd == "show-hex": binary_name = arg[0] if ty == "hwp": stream = parser.ole_container.get_dir_entry_by_name(binary_name).get_decompressed_stream() else: stream = parser.read(binary_name) print_hexdump(stream) return 1 elif cmd == "scan": if ty == "hwp": bindata_scanner.scan() js_scanner.scan() else: paratext_scanner.scan() return 1 elif cmd == "exit": return -1 else: print("unknown command..") return 0 print() class HWPScanner: def __init__(self) -> None: self.__platform__ = platform.platform() self.hwpx_flag = False self.ole_parser = OleParser() self.hwp_parser = None pass def parse_hwpdoc(self, file_name): self.file_name = file_name self.ole_parser.read_ole_binary(file_name) try: self.ole_parser.parse() self.hwp_parser = HwpParser(self.ole_parser) self.hwp_parser.parse() if not init_hwp5_scan(self.hwp_parser.hwp_header): exit(-1) except: self.hwpx_docs = zipfile.ZipFile(self.file_name, "r") self.hwpx_flag = True pass ''' def parse_hwpdoc(self): try: self.hwp_parser = HwpParser(self.ole_parser) self.hwp_parser.parse() if not init_hwp5_scan(self.hwp_parser.hwp_header): exit(-1) except: self.hwpx_docs = zipfile.ZipFile(self.file_name, "r") self.hwpx_flag = True pass ''' def setup_scanner(self): if not self.hwpx_flag: self.js_scanner = JS_Scanner(self.hwp_parser) self.bindata_scanner = BinData_Scanner(self.hwp_parser) else: self.paratext_scanner = ParaText_Scanner(self.hwpx_docs) def get_file_structure(self): strt = {} if not self.hwpx_flag: self.ole_parser.get_dir_entry_all(strt, entry_id=0, depth=0) else: for _file in self.hwpx_docs.filelist: _path = os.path.split( _file.filename) if _path[0] not in strt: # root if _path[0]: strt[_path[0]] = {} else: strt[_path[1]] = _file.file_size continue cur_strt = strt[_path[0]] for path in _path: if path not in strt: if path == _path[-1]: cur_strt[path] = _file.file_size else: cur_strt[path] = {} cur_strt = cur_strt[path] else: cur_strt = strt[path] return strt def scan(self): scan_result = "" if not self.hwpx_flag: scan_result += self.js_scanner.scan() scan_result += self.bindata_scanner.scan() else: scan_result += self.paratext_scanner.scan() return scan_result
29.309179
102
0.543926
2,644
0.4358
0
0
0
0
0
0
916
0.150981
43e2d67fdf43b1951abb85a9aaab6711fb8852be
1,132
py
Python
tests/core/test_plugins.py
franalgaba/nile
f771467f27f03c8d20b8032bac64b3ab60436d3c
[ "MIT" ]
null
null
null
tests/core/test_plugins.py
franalgaba/nile
f771467f27f03c8d20b8032bac64b3ab60436d3c
[ "MIT" ]
null
null
null
tests/core/test_plugins.py
franalgaba/nile
f771467f27f03c8d20b8032bac64b3ab60436d3c
[ "MIT" ]
null
null
null
""" Tests for plugins in core module. Only unit tests for now. """ from unittest.mock import patch import click from nile.core.plugins import get_installed_plugins, load_plugins, skip_click_exit def test_skip_click_exit(): def dummy_method(a, b): return a + b dummy_result = dummy_method(1, 2) decorated = skip_click_exit(dummy_method) decorated_result = decorated(1, 2) assert callable(decorated) assert dummy_result == decorated_result def testget_installed_plugins(): class Dummy: value = "nile.core.plugins.get_installed_plugins" name = "get_installed_plugins" with patch("nile.core.plugins.entry_points", return_value=[Dummy()]): installed_plugins = get_installed_plugins() assert "get_installed_plugins" in installed_plugins def test_load_plugins(): @click.group() def cli(): """Nile CLI group.""" pass def dummy(): print("dummy_result") with patch( "nile.core.plugins.get_installed_plugins", return_value={"dummy": dummy} ): app = load_plugins(cli) assert callable(app)
22.64
82
0.681095
109
0.09629
0
0
72
0.063604
0
0
269
0.237633
43e2e7854a4f56963d0c0900b0d6355f030a3675
339
py
Python
commands/source.py
Open-Source-eUdeC/UdeCursos-bot
f900073044e1c74532af532618672501c0a43a13
[ "MIT" ]
3
2022-03-01T17:14:06.000Z
2022-03-15T21:15:44.000Z
commands/source.py
Open-Source-eUdeC/UdeCursos-bot
f900073044e1c74532af532618672501c0a43a13
[ "MIT" ]
1
2022-03-07T20:59:20.000Z
2022-03-07T20:59:20.000Z
commands/source.py
Open-Source-eUdeC/UdeCursos-bot
f900073044e1c74532af532618672501c0a43a13
[ "MIT" ]
2
2022-02-28T19:32:54.000Z
2022-03-12T20:19:39.000Z
async def source(update, context): source_code = "https://github.com/Open-Source-eUdeC/UdeCursos-bot" await context.bot.send_message( chat_id=update.effective_chat.id, text=( "*UdeCursos bot v2.0*\n\n" f"Código fuente: [GitHub]({source_code})" ), parse_mode="Markdown" )
30.818182
70
0.60177
0
0
0
0
0
0
339
0.997059
130
0.382353
43e3929f6d656cd5f3e6cf6054493ace5b92bd70
1,255
py
Python
history/tests.py
MPIB/Lagerregal
3c950dffcf4fa164008c5a304c4839bc282a3388
[ "BSD-3-Clause" ]
24
2017-03-19T16:17:37.000Z
2021-11-07T15:35:33.000Z
history/tests.py
MPIB/Lagerregal
3c950dffcf4fa164008c5a304c4839bc282a3388
[ "BSD-3-Clause" ]
117
2016-04-19T12:35:10.000Z
2022-02-22T13:19:05.000Z
history/tests.py
MPIB/Lagerregal
3c950dffcf4fa164008c5a304c4839bc282a3388
[ "BSD-3-Clause" ]
11
2017-08-08T12:11:39.000Z
2021-12-08T05:34:06.000Z
from django.contrib.contenttypes.models import ContentType from django.test import TestCase from django.test.client import Client from model_mommy import mommy from devices.models import Device from users.models import Lageruser class HistoryTests(TestCase): def setUp(self): self.client = Client() self.admin = Lageruser.objects.create_superuser('test', '[email protected]', "test") self.client.login(username="test", password="test") def test_global_view(self): response = self.client.get('/history/global/') self.assertEqual(response.status_code, 200) def test_list_view(self): content_type = ContentType.objects.get(model='device') device = mommy.make(Device) response = self.client.get('/history/%i/%i/' % (content_type.pk, device.pk)) self.assertEqual(response.status_code, 200) def test_detail_view(self): device = mommy.make(Device) response = self.client.post('/devices/%i/edit/' % device.pk, data={ 'name': 'test', 'creator': self.admin.pk, }) self.assertEqual(response.status_code, 302) response = self.client.get('/history/version/1/') self.assertEqual(response.status_code, 200)
34.861111
88
0.67251
1,021
0.813546
0
0
0
0
0
0
143
0.113944
78d8d23f31a9ec6e42dd56f7cc23f8c31fbd70c2
376
py
Python
django_git_info/management/commands/get_git_info.py
spapas/django-git
a62215d315263bce5d5d0afcfa14152601f76901
[ "MIT" ]
1
2019-03-15T10:32:21.000Z
2019-03-15T10:32:21.000Z
django_git_info/management/commands/get_git_info.py
spapas/django-git
a62215d315263bce5d5d0afcfa14152601f76901
[ "MIT" ]
null
null
null
django_git_info/management/commands/get_git_info.py
spapas/django-git
a62215d315263bce5d5d0afcfa14152601f76901
[ "MIT" ]
1
2016-03-25T03:57:49.000Z
2016-03-25T03:57:49.000Z
# -*- coding: utf-8 -*- from django.core.management.base import BaseCommand, CommandError from django_git_info import get_git_info class Command(BaseCommand): help = 'Gets git info' #@transaction.commit_manually def handle(self, *args, **options): info = get_git_info() for key in info.keys(): print '{0}={1}'.format(key, info[key])
26.857143
65
0.656915
242
0.643617
0
0
0
0
0
0
76
0.202128
78db0363110019cfe555b18f1fdc95de024b7945
19,306
py
Python
mevis/_internal/conversion.py
robert-haas/mevis
1bbf8dfb56aa8fc52b8f38c570ee7b2d2a9d3327
[ "Apache-2.0" ]
2
2022-01-12T23:08:52.000Z
2022-01-12T23:21:23.000Z
mevis/_internal/conversion.py
robert-haas/mevis
1bbf8dfb56aa8fc52b8f38c570ee7b2d2a9d3327
[ "Apache-2.0" ]
null
null
null
mevis/_internal/conversion.py
robert-haas/mevis
1bbf8dfb56aa8fc52b8f38c570ee7b2d2a9d3327
[ "Apache-2.0" ]
null
null
null
from collections.abc import Callable as _Callable import networkx as _nx from opencog.type_constructors import AtomSpace as _AtomSpace from .args import check_arg as _check_arg def convert(data, graph_annotated=True, graph_directed=True, node_label=None, node_color=None, node_opacity=None, node_size=None, node_shape=None, node_border_color=None, node_border_size=None, node_label_color=None, node_label_size=None, node_hover=None, node_click=None, node_image=None, node_properties=None, edge_label=None, edge_color=None, edge_opacity=None, edge_size=None, edge_label_color=None, edge_label_size=None, edge_hover=None, edge_click=None): """Convert an Atomspace or list of Atoms to a NetworkX graph with annotations. Several arguments accept a Callable. - In case of node annotations, the Callable gets an Atom as input, which the node represents in the graph. The Callable needs to return one of the other types accepted by the argument, e.g. ``str`` or ``int``/``float``. - In case of edge annotations, the Callable gets two Atoms as input, which the edge connects in the graph. The Callable needs to return one of the other types accepted by the argument, e.g. ``str`` or ``int``/``float``. Several arguments accept a color, which can be in following formats: - Name: ``"black"``, ``"red"``, ``"green"``, ... - Color code - 6 digit hex RGB code: ``"#05ac05"`` - 3 digit hex RGB code: ``"#0a0"`` (equivalent to ``"#00aa00"``) Parameters ---------- data : Atomspace, list of Atoms Input that gets converted to a graph. graph_annotated : bool If ``False``, no annotations are added to the graph. This could be used for converting large AtomSpaces quickly to graphs that use less RAM and can be exported to smaller files (e.g. also compressed as gml.gz) for inspection with other tools. graph_directed : bool If ``True``, a NetworkX DiGraph is created. If ``False``, a NetworkX Graph is created. node_label : str, Callable Set a label for each node, which is shown as text below it. node_color : str, Callable Set a color for each node, which becomes the fill color of its shape. node_opacity : float between 0.0 and 1.0 Set an opacity for each node, which becomes the opacity of its shape. Caution: This is only supported by d3. node_size : int, float, Callable Set a size for each node, which becomes the height and width of its shape. node_shape : str, Callable Set a shape for each node, which is some geometrical form that has the node coordinates in its center. Possible values: ``"circle"``, ``"rectangle"``, ``"hexagon"`` node_border_color : str, Callable Set a border color for each node, which influences the border drawn around its shape. node_border_size : int, float, Callable Set a border size for each node, which influences the border drawn around its shape. node_label_color : str, Callable Set a label color for each node, which determines the font color of the text below the node. node_label_size : int, float, Callable Set a label size for each node, which determines the font size of the text below the node. node_hover : str, Callable Set a hover text for each node, which shows up besides the mouse cursor when hovering over a node. node_click : str, Callable Set a click text for each node, which shows up in a div element below the plot when clicking on a node and can easily be copied and pasted. node_image : str, Callable Set an image for each node, which appears within its shape. Possible values: - URL pointing to an image - Data URL encoding the image node_properties : str, dict, Callable Set additional properties for each node, which may not immediately be translated into a visual element, but can be chosen in the data selection menu in the interactive HTML visualizations to map them on some plot element. These properties also appear when exporting a graph to a file in a format such as GML and may be recognized by external visualization tools. Note that a Callable needs to return a dict in this case, and each key becomes a property, which is equivalent to the other properties such as node_size and node_color. Special cases: - ``node_properties="tv"`` is a shortcut for using a function that returns ``{"mean": atom.tv.mean, "confidence": atom.tv.confidence}`` - Keys ``"x"``, ``"y"`` and ``"z"`` properties are translated into node coordinates. Examples: - ``dict(x=0.0)``: This fixes the x coordinate of each node to 0.0, so that the JavaScript layout algorithm does not influence it, but the nodes remain free to move in the y and z directions. - ``lambda atom: dict(x=2.0) if atom.is_node() else None``: This fixes the x coordinate of each Atom of type Node to 2.0 but allows each Atom of type Link to move freely. - ``lambda atom: dict(y=-len(atom.out)*100) if atom.is_link() else dict(y=0)`` This fixes the y coordinates of Atoms at different heights. Atoms of type Node are put at the bottom and Atoms of type Link are ordered by the number of their outgoing edges. The results is a hierarchical visualization that has some similarity with the "dot" layout. - ``lambda atom: dict(x=-100) if atom.is_node() else dict(x=100)``: This fixes the x coordinate of Node Atoms at -100 and of Link Atoms at 100. The results is a visualization with two lines of nodes that has some similarity with the "bipartite" layout. edge_label : str, Callable Set a label for each edge, which becomes the text plotted in the middle of the edge. edge_color : str, Callable Set a color for each edge, which becomes the color of the line representing the edge. edge_opacity : int, float, Callable Set an opacity for each edge, which allows to make it transparent to some degree. edge_size : int, float, Callable Set a size for each edge, which becomes the width of the line representing the edge. edge_label_color : str, Callable Set a color for each edge label, which becomes the color of the text in the midpoint of the edge. edge_label_size : int, float, Callable Set a size for each edge label, which becomes the size of the text in the midpoint of the edge. edge_hover : str, Callable edge_click : str, Callable Returns ------- graph : NetworkX Graph or DiGraph Whether an undirected or directed graph is created depends on the argument "directed". """ # Argument processing _check_arg(data, 'data', (list, _AtomSpace)) _check_arg(graph_annotated, 'graph_annotated', bool) _check_arg(graph_directed, 'graph_directed', bool) _check_arg(node_label, 'node_label', (str, _Callable), allow_none=True) _check_arg(node_color, 'node_color', (str, _Callable), allow_none=True) _check_arg(node_opacity, 'node_opacity', (int, float, _Callable), allow_none=True) _check_arg(node_size, 'node_size', (int, float, _Callable), allow_none=True) _check_arg(node_shape, 'node_shape', (str, _Callable), allow_none=True) _check_arg(node_border_color, 'node_border_color', (str, _Callable), allow_none=True) _check_arg(node_border_size, 'node_border_size', (int, float, _Callable), allow_none=True) _check_arg(node_label_color, 'node_label_color', (str, _Callable), allow_none=True) _check_arg(node_label_size, 'node_label_size', (int, float, _Callable), allow_none=True) _check_arg(node_hover, 'node_hover', (str, _Callable), allow_none=True) _check_arg(node_click, 'node_click', (str, _Callable), allow_none=True) _check_arg(node_image, 'node_image', (str, _Callable), allow_none=True) _check_arg(node_properties, 'node_properties', (str, dict, _Callable), allow_none=True) _check_arg(edge_label, 'edge_label', (str, _Callable), allow_none=True) _check_arg(edge_color, 'edge_color', (str, _Callable), allow_none=True) _check_arg(edge_opacity, 'edge_opacity', (int, float, _Callable), allow_none=True) _check_arg(edge_size, 'edge_size', (int, float, _Callable), allow_none=True) _check_arg(edge_label_color, 'edge_label_color', (str, _Callable), allow_none=True) _check_arg(edge_label_size, 'edge_label_size', (int, float, _Callable), allow_none=True) _check_arg(edge_hover, 'edge_hover', (str, _Callable), allow_none=True) _check_arg(edge_click, 'edge_click', (str, _Callable), allow_none=True) # Prepare annoation functions if graph_annotated: node_ann = prepare_node_func( node_label, node_color, node_opacity, node_size, node_shape, node_border_color, node_border_size, node_label_color, node_label_size, node_hover, node_click, node_image, node_properties) edge_ann = prepare_edge_func( edge_label, edge_color, edge_opacity, edge_size, edge_label_color, edge_label_size, edge_hover, edge_click) else: empty = dict() def node_ann(atom): return empty def edge_ann(atom1, atom2): return empty # Create the NetworkX graph graph = _nx.DiGraph() if graph_directed else _nx.Graph() # 0) Set graph annotations graph.graph['node_click'] = '$hover' # node_click will by default show content of node_hover # 1) Add vertices and their annotations for atom in data: graph.add_node(to_uid(atom), **node_ann(atom)) # 2) Add edges and their annotations (separate step to exclude edges to filtered vertices) for atom in data: uid = to_uid(atom) if atom.is_link(): # for all that is incoming to the Atom for atom2 in atom.incoming: uid2 = to_uid(atom2) if uid2 in graph.nodes: graph.add_edge(uid2, uid, **edge_ann(atom2, atom)) # for all that is outgoing of the Atom for atom2 in atom.out: uid2 = to_uid(atom2) if uid2 in graph.nodes: graph.add_edge(uid, uid2, **edge_ann(atom, atom2)) return graph def prepare_node_func(node_label, node_color, node_opacity, node_size, node_shape, node_border_color, node_border_size, node_label_color, node_label_size, node_hover, node_click, node_image, node_properties): """Prepare a function that calculates all annoations for a node representing an Atom.""" # individual node annotation functions node_label = use_node_def_or_str(node_label, node_label_default) node_color = use_node_def_or_str(node_color, node_color_default) node_opacity = use_node_def_or_num(node_opacity, node_opacity_default) node_size = use_node_def_or_num(node_size, node_size_default) node_shape = use_node_def_or_str(node_shape, node_shape_default) node_border_color = use_node_def_or_str(node_border_color, node_border_color_default) node_border_size = use_node_def_or_num(node_border_size, node_border_size_default) node_label_color = use_node_def_or_str(node_label_color, node_label_color_default) node_label_size = use_node_def_or_num(node_label_size, node_label_size_default) node_hover = use_node_def_or_str(node_hover, node_hover_default) node_click = use_node_def_or_str(node_click, node_click_default) node_image = use_node_def_or_str(node_image, node_image_default) # special case: additional user-defined node properties by a function that returns a dict if node_properties is None: node_properties = node_properties_default elif isinstance(node_properties, dict): val = node_properties def node_properties(atom): return val elif node_properties == 'tv': node_properties = node_properties_tv # combined node annotation function: calls each of the individual ones name_func = ( ('label', node_label), ('color', node_color), ('opacity', node_opacity), ('size', node_size), ('shape', node_shape), ('border_color', node_border_color), ('border_size', node_border_size), ('label_color', node_label_color), ('label_size', node_label_size), ('hover', node_hover), ('click', node_click), ('image', node_image), ) def func(atom): data = {} for n, f in name_func: val = f(atom) if val is not None: data[n] = val try: data.update(node_properties(atom)) except Exception: pass return data return func def prepare_edge_func(edge_label, edge_color, edge_opacity, edge_size, edge_label_color, edge_label_size, edge_hover, edge_click): """Prepare a function that calculates all annoations for an edge between Atoms.""" # individual edge annotation functions edge_label = use_edge_def_or_str(edge_label, edge_label_default) edge_color = use_edge_def_or_str(edge_color, edge_color_default) edge_opacity = use_edge_def_or_num(edge_opacity, edge_opacity_default) edge_size = use_edge_def_or_num(edge_size, edge_size_default) edge_label_color = use_edge_def_or_str(edge_label_color, edge_label_color_default) edge_label_size = use_edge_def_or_num(edge_label_size, edge_label_size_default) edge_hover = use_edge_def_or_str(edge_hover, edge_hover_default) edge_click = use_edge_def_or_str(edge_click, edge_click_default) # combined edge annotation function: calls each of the individual ones name_func = ( ('label', edge_label), ('color', edge_color), ('opacity', edge_opacity), ('size', edge_size), ('label_color', edge_label_color), ('label_size', edge_label_size), ('hover', edge_hover), ('click', edge_click), ) def func(atom1, atom2): data = {} for n, f in name_func: val = f(atom1, atom2) if val is not None: data[n] = val return data return func def use_node_def_or_str(given_value, default_func): """Transform a value of type (None, str, Callable) to a node annotation function.""" # Default: use pre-defined function from this module if given_value is None: func = default_func # Transform: value to function that returns the value elif isinstance(given_value, str): given_value = str(given_value) def func(atom): return given_value # Passthrough: value itself is a function else: func = given_value return func def use_node_def_or_num(given_value, default_func): """Transform a value of type (None, int, float, Callable) to a node annotation function.""" # Default: use pre-defined function from this module if given_value is None: func = default_func # Transform: value to function that returns the value elif isinstance(given_value, (int, float)): given_value = float(given_value) def func(atom): return given_value # Passthrough: value itself is a function else: func = given_value return func def use_edge_def_or_str(given_value, default_func): """Transform a value of type (None, str, Callable) to an edge annotation function.""" # Default: use pre-defined function from this module if given_value is None: func = default_func # Transform: value to function that returns the value elif isinstance(given_value, str): given_value = str(given_value) def func(atom1, atom2): return given_value # Passthrough: value itself is a function else: func = given_value return func def use_edge_def_or_num(given_value, default_func): """Transform a value of type (None, int, float, Callable) to an edge annotation function.""" # Default: use pre-defined function from this module if given_value is None: func = default_func # Transform: value to function that returns the value elif isinstance(given_value, (int, float)): given_value = float(given_value) def func(atom1, atom2): return given_value # Passthrough: value itself is a function else: func = given_value return func def to_uid(atom): """Return a unique identifier for an Atom.""" return atom.id_string() # Default functions for node annotations # - "return None" means that the attribute and value won't be included # to the output data, so that defaults of the JS library are used and files get smaller # - A return of a value in some cases and None in other cases means that the # default value of the JS library is used in None cases and again files get smaller def node_label_default(atom): # None => no node labels return '{} "{}"'.format(atom.type_name, atom.name) if atom.is_node() else atom.type_name def node_color_default(atom): # None => black return 'red' if atom.is_node() else None def node_opacity_default(atom): # None => 1.0 return None def node_size_default(atom): # None => 10 return None def node_shape_default(atom): # None => circle return 'rectangle' if atom.is_node() else None def node_border_color_default(atom): # None => black return None def node_border_size_default(atom): # None => 0.0 return None def node_label_color_default(atom): # None => black return None def node_label_size_default(atom): # None => 12.0 return None def node_hover_default(atom): # None => no hover text return atom.short_string() def node_click_default(atom): # None => no click text (in addition to always shown "Node: <id>" in header) return None def node_image_default(atom): # None => no image inside node return None def node_properties_default(atom): # None => no extra node annotations return None def node_properties_tv(atom): return dict(mean=atom.tv.mean, confidence=atom.tv.confidence) # Default functions for edge annotations def edge_label_default(atom1, atom2): # None => no edge label return None def edge_color_default(atom1, atom2): # None => black return None if atom1.is_link() and atom2.is_link() else 'red' def edge_opacity_default(atom1, atom2): # None => 1.0 return None def edge_size_default(atom1, atom2): # None => 1.0 return None def edge_label_color_default(atom1, atom2): # None => black return None def edge_label_size_default(atom1, atom2): # None => 8.0 return None def edge_hover_default(atom1, atom2): # None => no hover text return None def edge_click_default(atom1, atom2): # None => no click text (in addition to always shown "Edge: <id>" in header) return None
38.923387
97
0.682897
0
0
0
0
0
0
0
0
9,574
0.495908
78db1f0ed3fd45150eca94cbff8fdb625dd1d917
156
py
Python
testData/completion/classMethodCls.py
seandstewart/typical-pycharm-plugin
4f6ec99766239421201faae9d75c32fa0ee3565a
[ "MIT" ]
null
null
null
testData/completion/classMethodCls.py
seandstewart/typical-pycharm-plugin
4f6ec99766239421201faae9d75c32fa0ee3565a
[ "MIT" ]
null
null
null
testData/completion/classMethodCls.py
seandstewart/typical-pycharm-plugin
4f6ec99766239421201faae9d75c32fa0ee3565a
[ "MIT" ]
null
null
null
from builtins import * from pydantic import BaseModel class A(BaseModel): abc: str @classmethod def test(cls): return cls.<caret>
11.142857
30
0.647436
96
0.615385
0
0
58
0.371795
0
0
0
0
78db3efa5c77dd290cf1467f8ac973b8fc19949b
13,168
py
Python
watcher_metering/tests/agent/test_agent.py
b-com/watcher-metering
7c09b243347146e5a421700d5b07d1d0a5c4d604
[ "Apache-2.0" ]
2
2015-10-22T19:44:57.000Z
2017-06-15T15:01:07.000Z
watcher_metering/tests/agent/test_agent.py
b-com/watcher-metering
7c09b243347146e5a421700d5b07d1d0a5c4d604
[ "Apache-2.0" ]
1
2015-10-26T13:52:58.000Z
2015-10-26T13:52:58.000Z
watcher_metering/tests/agent/test_agent.py
b-com/watcher-metering
7c09b243347146e5a421700d5b07d1d0a5c4d604
[ "Apache-2.0" ]
4
2015-10-10T13:59:39.000Z
2020-05-29T11:47:07.000Z
# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from __future__ import unicode_literals from collections import OrderedDict import os import types from mock import MagicMock from mock import Mock from mock import patch from mock import PropertyMock import msgpack import operator from oslo_config import cfg from oslotest.base import BaseTestCase from stevedore.driver import DriverManager from stevedore.extension import Extension from watcher_metering.agent.agent import Agent from watcher_metering.agent.measurement import Measurement from watcher_metering.tests.agent.agent_fixtures import ConfFixture from watcher_metering.tests.agent.agent_fixtures import DummyMetricPuller from watcher_metering.tests.agent.agent_fixtures import FakeMetricPuller class TestAgent(BaseTestCase): # patches to be applied for each test in this test suite patches = [] def setUp(self): super(TestAgent, self).setUp() self.conf = cfg.ConfigOpts() # To load the drivers without using the config file self.useFixture(ConfFixture(self.conf)) def _fake_parse(self, args=[]): return cfg.ConfigOpts._parse_cli_opts(self, []) _fake_parse_method = types.MethodType(_fake_parse, self.conf) self.conf._parse_cli_opts = _fake_parse_method # First dependency to be returned self.dummy_driver_manager = DriverManager.make_test_instance( extension=Extension( name=DummyMetricPuller.get_name(), entry_point='fake.entry.point', plugin=DummyMetricPuller, obj=None, ), namespace='TESTING', ) # 2nd dependency to be returned self.fake_driver_manager = DriverManager.make_test_instance( extension=Extension( name=FakeMetricPuller.get_name(), entry_point='fake.entry.point', plugin=FakeMetricPuller, obj=None, ), namespace='TESTING', ) self.defaults_drivers = { DummyMetricPuller.get_name(): self.dummy_driver_manager, FakeMetricPuller.get_name(): self.fake_driver_manager, } def _fake_loader(name, **kw): return self.defaults_drivers[name] # Patches the agent socket self.m_agent_socket = MagicMock(autospec=True) self.patches.extend([ # Deactivates the nanomsg socket patch( "watcher_metering.agent.agent.nanomsg.Socket", new=self.m_agent_socket, ), # Sets the test namespace to 'TESTING' patch.object( Agent, "namespace", PropertyMock(return_value='TESTING'), ), # Patches the driver manager to retourn our test drivers # instead of the real ones patch( "watcher_metering.load.loader.DriverManager", MagicMock(side_effect=_fake_loader), ), ]) # Applies all of our patches before each test for _patch in self.patches: _patch.start() self.agent = Agent( conf=self.conf, driver_names=self.conf.agent.driver_names, use_nanoconfig_service=False, publisher_endpoint="fake", nanoconfig_service_endpoint="", nanoconfig_update_endpoint="", nanoconfig_profile="nanoconfig://test_profile" ) # Default ticking is set to 0 to reduce test execution time self.agent.TICK_INTERVAL = 0 def tearDown(self): super(TestAgent, self).tearDown() # The drivers are stored at the class level so we need to clear # it after each test self.agent.drivers.clear() for _patch in self.patches: _patch.stop() def test_register_driver(self): expected_driver1_key = "metrics_driver.dummy_data.puller.dummy" expected_driver2_key = "metrics_driver.fake_data.puller.fake" self.agent.register_drivers() self.assertEqual( sorted(self.agent.drivers.keys()), [expected_driver1_key, expected_driver2_key] ) sorted_drivers = OrderedDict( sorted(self.agent.drivers.items(), key=operator.itemgetter(0)) ) self.assertEqual(len(sorted_drivers), 2) driver1 = self.agent.drivers[expected_driver1_key] driver2 = self.agent.drivers[expected_driver2_key] self.assertEqual(driver1.title, "metrics_driver.dummy") self.assertEqual(driver1.probe_id, "data.puller.dummy") self.assertEqual(driver1.interval, 0.01) self.assertEqual(driver2.title, "metrics_driver.fake") self.assertEqual(driver2.probe_id, "data.puller.fake") self.assertEqual(driver2.interval, 0.01) self.assertIn(self.agent, driver1._observers) self.assertIn(self.agent, driver2._observers) def test_unregister_driver(self): driver_key = "metrics_driver.dummy_data.puller.dummy" self.agent.register_drivers() self.agent.unregister_driver(driver_key) # Initial is 2 drivers => 2 - 1 == 1 self.assertEqual(len(self.agent.drivers), 1) @patch.object(Measurement, "as_dict") def test_send_measurements(self, m_as_dict): self.agent.register_drivers() measurement_dict = OrderedDict( name="dummy.data.puller", unit="", type_="", value=13.37, resource_id="test_hostname", host="test_hostname", timestamp="2015-08-04T15:15:45.703542", ) m_as_dict.return_value = measurement_dict measurement = Measurement(**measurement_dict) for driver in self.agent.drivers.values(): driver.send_measurements([measurement]) break # only the first one expected_encoded_msg = msgpack.dumps(measurement_dict) self.m_agent_socket.return_value.send.assert_called_once_with( expected_encoded_msg ) @patch.object(DummyMetricPuller, "is_alive") @patch.object(DummyMetricPuller, "start") @patch("watcher_metering.agent.manager.MetricManager.lock") def test_check_drivers_alive(self, m_lock, m_start, m_is_alive): m_lock.acquire = Mock(return_value=True) # Emulates a thread behavior m_lock.release = Mock(return_value=True) # Emulates a thread behavior m_is_alive.return_value = True # Emulates a thread that is running m_start.return_value = None self.agent.register_drivers() self.agent.check_drivers_alive() self.assertTrue(m_is_alive.called) self.assertFalse(m_start.called) @patch.object(DummyMetricPuller, "is_alive") @patch.object(DummyMetricPuller, "start") @patch("watcher_metering.agent.manager.MetricManager.lock") def test_check_drivers_alive_with_driver_stopped(self, m_lock, m_start, m_is_alive): m_lock.acquire = Mock(return_value=True) # Emulates a thread behavior m_lock.release = Mock(return_value=True) # Emulates a thread behavior m_is_alive.side_effect = [False, True] m_start.side_effect = [RuntimeError, True, True] # Fails once self.agent.register_drivers() # should re-run the driver self.agent.check_drivers_alive() self.assertEqual(m_is_alive.call_count, 1) self.assertEqual(m_start.call_count, 2) @patch.object(os._Environ, "__setitem__") @patch("watcher_metering.agent.agent.os.environ.get") def test_setup_nanoconfig_valid_using_default(self, m_env_getter, m_env_setter): # Override default where it is set to False m_env_getter.side_effect = ["FAKE_NN_CONFIG_SERVICE", "FAKE_NN_CONFIG_UPDATES"] self.agent.use_nanoconfig_service = True self.agent.nanoconfig_service_endpoint = "" self.agent.nanoconfig_update_endpoint = "" self.agent.set_nanoconfig_endpoints() self.assertEqual(m_env_getter.call_count, 2) m_env_getter.assert_any_call("NN_CONFIG_SERVICE") # First call m_env_getter.assert_called_with("NN_CONFIG_UPDATES") # Last call self.assertEqual(m_env_setter.call_count, 0) self.assertEqual(self.agent.nanoconfig_service_endpoint, "FAKE_NN_CONFIG_SERVICE") self.assertEqual(self.agent.nanoconfig_update_endpoint, "FAKE_NN_CONFIG_UPDATES") @patch.object(os._Environ, "__setitem__") @patch("watcher_metering.agent.agent.os.environ.get") def test_setup_nanoconfig_valid_custom_values(self, m_env_getter, m_env_setter): # Override default where it is set to False m_env_getter.side_effect = ["FAKE_NN_CONFIG_SERVICE", "FAKE_NN_CONFIG_UPDATES"] self.agent.use_nanoconfig_service = True self.agent.nanoconfig_service_endpoint = "CUSTOM_NN_CONFIG_SERVICE" self.agent.nanoconfig_update_endpoint = "CUSTOM_NN_CONFIG_UPDATES" self.agent.set_nanoconfig_endpoints() self.assertEqual(m_env_getter.call_count, 2) m_env_getter.assert_any_call("NN_CONFIG_SERVICE") m_env_getter.assert_called_with("NN_CONFIG_UPDATES") m_env_setter.assert_any_call("NN_CONFIG_SERVICE", "CUSTOM_NN_CONFIG_SERVICE") m_env_setter.assert_called_with("NN_CONFIG_UPDATES", "CUSTOM_NN_CONFIG_UPDATES") self.assertEqual(self.agent.nanoconfig_service_endpoint, "CUSTOM_NN_CONFIG_SERVICE") self.assertEqual(self.agent.nanoconfig_update_endpoint, "CUSTOM_NN_CONFIG_UPDATES") @patch.object(os._Environ, "__setitem__") @patch("watcher_metering.agent.agent.os.environ.get") def test_setup_nanoconfig_invalid_service(self, m_env_getter, m_env_setter): # Override default where it is set to False m_env_getter.return_value = "" # Emulates empty ENV vars self.agent.use_nanoconfig_service = True self.agent.nanoconfig_service_endpoint = "" self.agent.nanoconfig_update_endpoint = "CUSTOM_NN_CONFIG_UPDATES" self.assertRaises(ValueError, self.agent.set_nanoconfig_endpoints) m_env_getter.assert_called_once_with("NN_CONFIG_SERVICE") self.assertEqual(m_env_setter.call_count, 0) @patch.object(os._Environ, "__setitem__") @patch("watcher_metering.agent.agent.os.environ.get") def test_setup_nanoconfig_invalid_update(self, m_env_getter, m_env_setter): # Override default where it is set to False m_env_getter.return_value = "" # Emulates empty ENV vars self.agent.use_nanoconfig_service = True self.agent.nanoconfig_service_endpoint = "CUSTOM_NN_CONFIG_SERVICE" self.agent.nanoconfig_update_endpoint = "" self.assertRaises(ValueError, self.agent.set_nanoconfig_endpoints) m_env_getter.assert_any_call("NN_CONFIG_SERVICE") m_env_getter.assert_called_with("NN_CONFIG_UPDATES") m_env_setter.assert_called_once_with("NN_CONFIG_SERVICE", "CUSTOM_NN_CONFIG_SERVICE") @patch.object(Agent, 'check_drivers_alive', MagicMock()) @patch("watcher_metering.agent.manager." "MetricManager.terminated", new_callable=PropertyMock) def test_run_agent(self, m_terminated): # Patches the guard/exit condition of the thread periodic event loop # -> 1st time = False (carry on) and 2nd = True (Should terminate) m_terminated.side_effect = [False, True] self.agent.run() self.assertEqual(m_terminated.call_count, 2) @patch.object(DummyMetricPuller, 'send_measurements', MagicMock()) def test_stop_agent(self): self.agent.register_drivers() self.agent.start() self.agent.join(timeout=.01) self.agent.stop() self.assertEqual(len(self.agent.drivers.values()), 2) self.assertTrue( all([driver.terminated for driver in self.agent.drivers.values()]) ) self.assertTrue(self.agent.terminated) self.assertFalse(self.agent.is_alive())
40.024316
79
0.659478
11,817
0.897403
0
0
7,212
0.547691
0
0
3,277
0.248861
78dce9aa3f78b6fd58cffc69a08166742b99da9b
31,044
py
Python
mmtbx/bulk_solvent/mosaic.py
ndevenish/cctbx_project
1f1a2627ae20d01d403f367948e7269cef0f0217
[ "BSD-3-Clause-LBNL" ]
null
null
null
mmtbx/bulk_solvent/mosaic.py
ndevenish/cctbx_project
1f1a2627ae20d01d403f367948e7269cef0f0217
[ "BSD-3-Clause-LBNL" ]
null
null
null
mmtbx/bulk_solvent/mosaic.py
ndevenish/cctbx_project
1f1a2627ae20d01d403f367948e7269cef0f0217
[ "BSD-3-Clause-LBNL" ]
null
null
null
from __future__ import absolute_import, division, print_function from cctbx.array_family import flex from scitbx import matrix import math from libtbx import adopt_init_args import scitbx.lbfgs from mmtbx.bulk_solvent import kbu_refinery from cctbx import maptbx import mmtbx.masks import boost_adaptbx.boost.python as bp asu_map_ext = bp.import_ext("cctbx_asymmetric_map_ext") from libtbx import group_args from mmtbx import bulk_solvent from mmtbx.ncs import tncs from collections import OrderedDict import mmtbx.f_model import sys from libtbx.test_utils import approx_equal from mmtbx import masks from cctbx.masks import vdw_radii_from_xray_structure ext = bp.import_ext("mmtbx_masks_ext") mosaic_ext = bp.import_ext("mmtbx_mosaic_ext") APPLY_SCALE_K1_TO_FOBS = False def moving_average(x, n): r = [] for i, xi in enumerate(x): s = 0 cntr = 0 for j in range(max(0,i-n), min(i+n+1, len(x))): s+=x[j] cntr+=1 s = s/cntr r.append(s) return r # Utilities used by algorithm 2 ------------------------------------------------ class minimizer(object): def __init__(self, max_iterations, calculator): adopt_init_args(self, locals()) self.x = self.calculator.x self.cntr=0 exception_handling_params = scitbx.lbfgs.exception_handling_parameters( ignore_line_search_failed_step_at_lower_bound=True, ) self.minimizer = scitbx.lbfgs.run( target_evaluator=self, exception_handling_params=exception_handling_params, termination_params=scitbx.lbfgs.termination_parameters( max_iterations=max_iterations)) def compute_functional_and_gradients(self): self.cntr+=1 self.calculator.update_target_and_grads(x=self.x) t = self.calculator.target() g = self.calculator.gradients() #print "step: %4d"%self.cntr, "target:", t, "params:", \ # " ".join(["%10.6f"%i for i in self.x]), math.log(t) return t,g class minimizer2(object): def __init__(self, calculator, min_iterations=0, max_iterations=2000): adopt_init_args(self, locals()) self.x = self.calculator.x self.n = self.x.size() self.cntr=0 def run(self, use_curvatures=0): self.minimizer = kbu_refinery.lbfgs_run( target_evaluator=self, min_iterations=self.min_iterations, max_iterations=self.max_iterations, use_curvatures=use_curvatures) self(requests_f_and_g=True, requests_diag=False) return self def __call__(self, requests_f_and_g, requests_diag): self.cntr+=1 self.calculator.update_target_and_grads(x=self.x) if (not requests_f_and_g and not requests_diag): requests_f_and_g = True requests_diag = True if (requests_f_and_g): self.f = self.calculator.target() self.g = self.calculator.gradients() self.d = None if (requests_diag): self.d = self.calculator.curvatures() #assert self.d.all_ne(0) if(self.d.all_eq(0)): self.d=None else: self.d = 1 / self.d #print "step: %4d"%self.cntr, "target:", self.f, "params:", \ # " ".join(["%10.6f"%i for i in self.x]) #, math.log(self.f) return self.x, self.f, self.g, self.d class tg(object): def __init__(self, x, i_obs, F, use_curvatures): self.x = x self.i_obs = i_obs self.F = F self.t = None self.g = None self.d = None # Needed to do sums from small to large to prefent loss s = flex.sort_permutation(self.i_obs.data()) self.i_obs = self.i_obs.select(s) self.F = [f.select(s) for f in self.F] # self.sum_i_obs = flex.sum(self.i_obs.data()) # needed for Python version self.use_curvatures=use_curvatures self.tgo = mosaic_ext.alg2_tg( F = [f.data() for f in self.F], i_obs = self.i_obs.data()) self.update_target_and_grads(x=x) def update(self, x): self.update_target_and_grads(x = x) def update_target_and_grads(self, x): self.x = x self.tgo.update(self.x) self.t = self.tgo.target() self.g = self.tgo.gradient() # # Reference implementation in Python # s = 1 #180/math.pi # i_model = flex.double(self.i_obs.data().size(),0) # for n, kn in enumerate(self.x): # for m, km in enumerate(self.x): # tmp = self.F[n].data()*flex.conj(self.F[m].data()) # i_model += kn*km*flex.real(tmp) # #pn = self.F[n].phases().data()*s # #pm = self.F[m].phases().data()*s # #Fn = flex.abs(self.F[n].data()) # #Fm = flex.abs(self.F[m].data()) # #i_model += kn*km*Fn*Fm*flex.cos(pn-pm) # diff = i_model - self.i_obs.data() # #print (flex.min(diff), flex.max(diff)) # t = flex.sum(diff*diff)/4 # # # g = flex.double() # for j in range(len(self.F)): # tmp = flex.double(self.i_obs.data().size(),0) # for m, km in enumerate(self.x): # tmp += km * flex.real( self.F[j].data()*flex.conj(self.F[m].data()) ) # #pj = self.F[j].phases().data()*s # #pm = self.F[m].phases().data()*s # #Fj = flex.abs(self.F[j].data()) # #Fm = flex.abs(self.F[m].data()) # #tmp += km * Fj*Fm*flex.cos(pj-pm) # g.append(flex.sum(diff*tmp)) # self.t = t/self.sum_i_obs # self.g = g/self.sum_i_obs # #print (self.t,t1) # #print (list(self.g)) # #print (list(g1)) # #print () # #assert approx_equal(self.t, t1, 5) # #assert approx_equal(self.g, g1, 1.e-6) # if self.use_curvatures: d = flex.double() for j in range(len(self.F)): tmp1 = flex.double(self.i_obs.data().size(),0) tmp2 = flex.double(self.i_obs.data().size(),0) for m, km in enumerate(self.x): zz = flex.real( self.F[j].data()*flex.conj(self.F[m].data()) ) tmp1 += km * zz tmp2 += zz #pj = self.F[j].phases().data()*s #pm = self.F[m].phases().data()*s #Fj = flex.abs(self.F[j].data()) #Fm = flex.abs(self.F[m].data()) #tmp += km * Fj*Fm*flex.cos(pj-pm) d.append(flex.sum(tmp1*tmp1 + tmp2)) self.d=d def target(self): return self.t def gradients(self): return self.g def gradient(self): return self.gradients() def curvatures(self): return self.d/self.sum_i_obs #------------------------------------------------------------------------------- def write_map_file(crystal_symmetry, map_data, file_name): from iotbx import mrcfile mrcfile.write_ccp4_map( file_name = file_name, unit_cell = crystal_symmetry.unit_cell(), space_group = crystal_symmetry.space_group(), map_data = map_data, labels = flex.std_string([""])) class refinery(object): def __init__(self, fmodel, fv, alg, anomaly=True, log = sys.stdout): assert alg in ["alg0", "alg2", "alg4", None] self.log = log self.f_obs = fmodel.f_obs() self.r_free_flags = fmodel.r_free_flags() k_mask_overall = fmodel.k_masks()[0] self.bin_selections = fmodel.bin_selections # k_total = fmodel.k_total() self.f_calc = fmodel.f_model() self.F = [self.f_calc.deep_copy()] + fv.keys() # n_zones_start = len(self.F) r4_start = fmodel.r_work4() for it in range(5): # if(it>0): r4 = self.fmodel.r_work4() print(r4_start, r4, abs(round(r4-r4_start,4))) if(abs(round(r4-r4_start,4))<1.e-4): break r4_start = r4 #if(it>0 and n_zones_start == len(self.F)): break # #if it>0: # self.F = [self.fmodel.f_model().deep_copy()] + self.F[1:] self._print("cycle: %2d"%it) self._print(" volumes: "+" ".join([str(fv[f]) for f in self.F[1:]])) f_obs = self.f_obs.deep_copy() if it==0: k_total = fmodel.k_total() else: k_total = self.fmodel.k_total() i_obs = f_obs.customized_copy(data = f_obs.data()*f_obs.data()) K_MASKS = OrderedDict() self.bin_selections = self.f_obs.log_binning( n_reflections_in_lowest_resolution_bin = 100*len(self.F)) for i_bin, sel in enumerate(self.bin_selections): d_max, d_min = f_obs.select(sel).d_max_min() if d_max<3: continue bin = " bin %2d: %5.2f-%-5.2f: "%(i_bin, d_max, d_min) F = [f.select(sel) for f in self.F] k_total_sel = k_total.select(sel) F_scaled = [F[0].deep_copy()]+[f.customized_copy(data=f.data()*k_total_sel) for f in F[1:]] # # XXX WHY NOT THIS INSTEAD (INVESTIGATE LATER)? #F_scaled = [f.customized_copy(data=f.data()*k_total_sel) for f in F] #r00=bulk_solvent.r_factor(f_obs.select(sel).data()*k_total_sel, F[0].data()*k_total_sel) # algorithm_0 if(alg=="alg0"): k_masks = algorithm_0( f_obs = f_obs.select(sel), F = F_scaled, kt=k_total_sel) #fd = flex.complex_double(F[0].data().size()) #for i,f in enumerate(F): # fd = fd + f.data()*k_masks[i] #r0=bulk_solvent.r_factor(f_obs.select(sel).data()*k_total_sel, fd*k_total_sel) # algorithm_4 if(alg=="alg4"): if it==0: phase_source = fmodel.f_model().select(sel) else: phase_source = self.fmodel.f_model().select(sel) k_masks = algorithm_4( f_obs = self.f_obs.select(sel), F = F_scaled, auto_converge_eps = 0.0001, phase_source = phase_source) #fd = flex.complex_double(F[0].data().size()) #for i,f in enumerate(F): # fd = fd + f.data()*k_masks[i] #r4=bulk_solvent.r_factor(f_obs.select(sel).data()*k_total_sel, fd*k_total_sel) # algorithm_2 if(alg=="alg2"): k_masks = algorithm_2( i_obs = i_obs.select(sel), F = F_scaled, x = self._get_x_init(i_bin), use_curvatures = False) #fd = flex.complex_double(F[0].data().size()) #for i,f in enumerate(F): # fd = fd + f.data()*k_masks[i] #r2=bulk_solvent.r_factor(f_obs.select(sel).data()*k_total_sel, fd*k_total_sel) #self._print(bin+" ".join(["%6.2f"%k for k in k_masks])+" %6.4f %6.4f %6.4f %6.4f"%(r00,r0,r4, r2)) k_mean = flex.mean(k_mask_overall.select(sel)) k_masks_plus = [k_masks[0]]+[k_mean + k for k in k_masks[1:]] self._print(bin+" ".join(["%6.2f"%k for k in k_masks_plus]) ) K_MASKS[sel] = [k_masks, k_masks_plus] # if(len(self.F)==2): break # stop and fall back onto using largest mask # # #print() #self.update_k_masks(K_MASKS) #for k_masks in K_MASKS.values(): # self._print(bin+" ".join(["%6.2f"%k for k in k_masks])) # f_calc_data = self.f_calc.data().deep_copy() f_bulk_data = flex.complex_double(fmodel.f_calc().data().size(), 0) for sel, k_masks in zip(K_MASKS.keys(), K_MASKS.values()): k_masks = k_masks[0] # 1 is shifted! f_bulk_data_ = flex.complex_double(sel.count(True), 0) for i_mask, k_mask in enumerate(k_masks): if i_mask==0: f_calc_data = f_calc_data.set_selected(sel, f_calc_data.select(sel)*k_mask) continue f_bulk_data_ += self.F[i_mask].data().select(sel)*k_mask f_bulk_data = f_bulk_data.set_selected(sel,f_bulk_data_) # self.update_F(K_MASKS) f_bulk = fmodel.f_calc().customized_copy(data = f_bulk_data) if(len(self.F)==2): self.fmodel = mmtbx.f_model.manager( f_obs = self.f_obs, r_free_flags = self.r_free_flags, f_calc = fmodel.f_calc(), f_mask = self.F[1], k_mask = flex.double(f_obs.data().size(),1) ) self.fmodel.update_all_scales(remove_outliers=False, apply_scale_k1_to_f_obs = APPLY_SCALE_K1_TO_FOBS) else: self.fmodel = mmtbx.f_model.manager( f_obs = self.f_obs, r_free_flags = self.r_free_flags, #f_calc = self.f_obs.customized_copy(data = f_calc_data), f_calc = self.f_calc, bin_selections = self.bin_selections, f_mask = f_bulk, k_mask = flex.double(f_obs.data().size(),1) ) self.fmodel.update_all_scales(remove_outliers=False, apply_scale_k1_to_f_obs = APPLY_SCALE_K1_TO_FOBS) # self.fmodel = mmtbx.f_model.manager( f_obs = self.f_obs, r_free_flags = self.r_free_flags, #f_calc = self.f_obs.customized_copy(data = f_calc_data), f_calc = self.fmodel.f_calc(), f_mask = self.fmodel.f_bulk(), k_mask = flex.double(f_obs.data().size(),1) ) self.fmodel.update_all_scales(remove_outliers=False, apply_scale_k1_to_f_obs = APPLY_SCALE_K1_TO_FOBS) self._print(self.fmodel.r_factors(prefix=" ")) #self._print(self.fmodel.r_factors(prefix=" ")) self.mc = self.fmodel.electron_density_map().map_coefficients( map_type = "mFobs-DFmodel", isotropize = True, exclude_free_r_reflections = False) #def update_k_masks(self, K_MASKS): # tmp = [] # for i_mask, F in enumerate(self.F): # k_masks = [k_masks_bin[i_mask] for k_masks_bin in K_MASKS.values()] # found = False # for i_bin, k_masks_bin in enumerate(K_MASKS.values()): # if(not found and k_masks_bin[i_mask]<=0.009): # found = True # K_MASKS.values()[i_bin][i_mask]=0 # elif found: # K_MASKS.values()[i_bin][i_mask]=0 def _print(self, m): if(self.log is not None): print(m, file=self.log) def update_F(self, K_MASKS): tmp = [] for i_mask, F in enumerate(self.F): k_masks = [k_masks_bin[1][i_mask] for k_masks_bin in K_MASKS.values()] if(i_mask == 0): tmp.append(self.F[0]) elif moving_average(k_masks,2)[0]>=0.03: tmp.append(F) self.F = tmp[:] def _get_x_init(self, i_bin): return flex.double([1] + [1]*len(self.F[1:])) #k_maks1_init = 0.35 - i_bin*0.35/len(self.bin_selections) #x = flex.double([1,k_maks1_init]) #x.extend( flex.double(len(self.F)-2, 0.1)) #return x def get_f_mask(xrs, ma, step, option = 2, r_shrink = None, r_sol = None): crystal_gridding = maptbx.crystal_gridding( unit_cell = xrs.unit_cell(), space_group_info = xrs.space_group_info(), symmetry_flags = maptbx.use_space_group_symmetry, step = step) n_real = crystal_gridding.n_real() atom_radii = vdw_radii_from_xray_structure(xray_structure = xrs) mask_params = masks.mask_master_params.extract() grid_step_factor = ma.d_min()/step if(r_shrink is not None): mask_params.shrink_truncation_radius = r_shrink if(r_sol is not None): mask_params.solvent_radius = r_sol mask_params.grid_step_factor = grid_step_factor # 1 if(option==1): asu_mask = ext.atom_mask( unit_cell = xrs.unit_cell(), group = xrs.space_group(), resolution = ma.d_min(), grid_step_factor = grid_step_factor, solvent_radius = mask_params.solvent_radius, shrink_truncation_radius = mask_params.shrink_truncation_radius) asu_mask.compute(xrs.sites_frac(), atom_radii) fm_asu = asu_mask.structure_factors(ma.indices()) f_mask = ma.set().array(data = fm_asu) # 2 elif(option==2): asu_mask = ext.atom_mask( unit_cell = xrs.unit_cell(), space_group = xrs.space_group(), gridding_n_real = n_real, solvent_radius = mask_params.solvent_radius, shrink_truncation_radius = mask_params.shrink_truncation_radius) asu_mask.compute(xrs.sites_frac(), atom_radii) fm_asu = asu_mask.structure_factors(ma.indices()) f_mask = ma.set().array(data = fm_asu) # 3 elif(option==3): mask_p1 = mmtbx.masks.mask_from_xray_structure( xray_structure = xrs, p1 = True, for_structure_factors = True, solvent_radius = mask_params.solvent_radius, shrink_truncation_radius = mask_params.shrink_truncation_radius, n_real = n_real, in_asu = False).mask_data maptbx.unpad_in_place(map=mask_p1) mask = asu_map_ext.asymmetric_map( xrs.crystal_symmetry().space_group().type(), mask_p1).data() f_mask = ma.structure_factors_from_asu_map( asu_map_data = mask, n_real = n_real) # 4 elif(option==4): f_mask = masks.bulk_solvent( xray_structure = xrs, ignore_zero_occupancy_atoms = False, solvent_radius = mask_params.solvent_radius, shrink_truncation_radius = mask_params.shrink_truncation_radius, ignore_hydrogen_atoms = False, grid_step = step, atom_radii = atom_radii).structure_factors( miller_set = ma) elif(option==5): o = mmtbx.masks.bulk_solvent( xray_structure = xrs, ignore_zero_occupancy_atoms = False, solvent_radius = mask_params.solvent_radius, shrink_truncation_radius = mask_params.shrink_truncation_radius, ignore_hydrogen_atoms = False, gridding_n_real = n_real, atom_radii = atom_radii) assert approx_equal(n_real, o.data.accessor().all()) f_mask = o.structure_factors(ma) elif(option==6): # XXX No control over n_real, so results with others don't match mask_manager = masks.manager( miller_array = ma, miller_array_twin = None, mask_params = mask_params) f_mask = mask_manager.shell_f_masks(xray_structure=xrs, force_update=True)[0] else: assert 0 # return f_mask def filter_mask(mask_p1, volume_cutoff, crystal_symmetry, for_structure_factors = False): co = maptbx.connectivity( map_data = mask_p1, threshold = 0.01, preprocess_against_shallow = True, wrapping = True) mi, ma = flex.min(mask_p1), flex.max(mask_p1) print (mask_p1.size(), (mask_p1<0).count(True)) assert mi == 0, mi assert ma == 1, ma a,b,c = crystal_symmetry.unit_cell().parameters()[:3] na,nb,nc = mask_p1.accessor().all() step = flex.mean(flex.double([a/na, b/nb, c/nc])) if(crystal_symmetry.space_group_number() != 1): co.merge_symmetry_related_regions(space_group=crystal_symmetry.space_group()) conn = co.result().as_double() z = zip(co.regions(),range(0,co.regions().size())) sorted_by_volume = sorted(z, key=lambda x: x[0], reverse=True) for i_seq, p in enumerate(sorted_by_volume): v, i = p if(i==0): continue # skip macromolecule # skip small volume volume = v*step**3 if volume < volume_cutoff: conn = conn.set_selected(conn==i, 0) conn = conn.set_selected(conn>0, 1) if for_structure_factors: conn = conn / crystal_symmetry.space_group().order_z() return conn class mosaic_f_mask(object): def __init__(self, xray_structure, step, volume_cutoff=None, mean_diff_map_threshold=None, compute_whole=False, preprocess_against_shallow=True, largest_only=False, wrapping=True, f_obs=None, r_sol=1.1, r_shrink=0.9, f_calc=None, log = None, write_masks=False): adopt_init_args(self, locals()) # self.dsel = f_obs.d_spacings().data()>=0 # XXX WHY???????????? self.miller_array = f_obs.select(self.dsel) # # To avoid "Miller index not in structure factor map" crash step = min(step, self.miller_array.d_min()/3) # self.crystal_symmetry = self.xray_structure.crystal_symmetry() # compute mask in p1 (via ASU) self.crystal_gridding = maptbx.crystal_gridding( unit_cell = xray_structure.unit_cell(), space_group_info = xray_structure.space_group_info(), symmetry_flags = maptbx.use_space_group_symmetry, step = step) self.n_real = self.crystal_gridding.n_real() # XXX Where do we want to deal with H and occ==0? mask_p1 = mmtbx.masks.mask_from_xray_structure( xray_structure = xray_structure, p1 = True, for_structure_factors = True, solvent_radius = r_sol, shrink_truncation_radius = r_shrink, n_real = self.n_real, in_asu = False).mask_data maptbx.unpad_in_place(map=mask_p1) self.f_mask_whole = None if(compute_whole): mask = asu_map_ext.asymmetric_map( xray_structure.crystal_symmetry().space_group().type(), mask_p1).data() self.f_mask_whole = self.miller_array.structure_factors_from_asu_map( asu_map_data = mask, n_real = self.n_real) self.solvent_content = 100.*mask_p1.count(1)/mask_p1.size() if(write_masks): write_map_file(crystal_symmetry=xray_structure.crystal_symmetry(), map_data=mask_p1, file_name="mask_whole.mrc") # conn analysis co = maptbx.connectivity( map_data = mask_p1, threshold = 0.01, preprocess_against_shallow = preprocess_against_shallow, wrapping = wrapping) co.merge_symmetry_related_regions(space_group=xray_structure.space_group()) del mask_p1 self.conn = co.result().as_double() z = zip(co.regions(),range(0,co.regions().size())) sorted_by_volume = sorted(z, key=lambda x: x[0], reverse=True) # f_mask_data_0 = flex.complex_double(f_obs.data().size(), 0) f_mask_data = flex.complex_double(f_obs.data().size(), 0) self.FV = OrderedDict() self.mc = None diff_map = None mean_diff_map = None self.regions = OrderedDict() self.f_mask_0 = None self.f_mask = None # if(log is not None): print(" # volume_p1 uc(%) mFo-DFc: min,max,mean,sd", file=log) # for i_seq, p in enumerate(sorted_by_volume): v, i = p # skip macromolecule if(i==0): continue # skip small volume volume = v*step**3 uc_fraction = v*100./self.conn.size() if(volume_cutoff is not None): if volume < volume_cutoff: continue selection = self.conn==i mask_i_asu = self.compute_i_mask_asu(selection = selection, volume = volume) volume_asu = (mask_i_asu>0).count(True)*step**3 if(uc_fraction >= 1): f_mask_i = self.compute_f_mask_i(mask_i_asu) f_mask_data_0 += f_mask_i.data() elif(largest_only): break if(uc_fraction < 1 and diff_map is None): diff_map = self.compute_diff_map(f_mask_data = f_mask_data_0) mi,ma,me,sd = None,None,None,None if(diff_map is not None): blob = diff_map.select(selection.iselection()) mean_diff_map = flex.mean(diff_map.select(selection.iselection())) mi,ma,me = flex.min(blob), flex.max(blob), flex.mean(blob) sd = blob.sample_standard_deviation() if(log is not None): print("%3d"%i_seq,"%12.3f"%volume, "%8.4f"%round(uc_fraction,4), "%7s"%str(None) if diff_map is None else "%7.3f %7.3f %7.3f %7.3f"%( mi,ma,me,sd), file=log) if(mean_diff_map_threshold is not None and mean_diff_map is not None and mean_diff_map<=mean_diff_map_threshold): continue self.regions[i_seq] = group_args( id = i, i_seq = i_seq, volume = volume, uc_fraction = uc_fraction, diff_map = group_args(mi=mi, ma=ma, me=me, sd=sd)) f_mask_i = self.compute_f_mask_i(mask_i_asu) f_mask_data += f_mask_i.data() self.FV[f_mask_i] = [round(volume, 3), round(uc_fraction,1)] # self.f_mask_0 = f_obs.customized_copy(data = f_mask_data_0) self.f_mask = f_obs.customized_copy(data = f_mask_data) self.do_mosaic = False self.n_regions = len(self.FV.keys()) if(self.n_regions>1): self.do_mosaic = True def compute_f_mask_i(self, mask_i_asu): f_mask_i = self.miller_array.structure_factors_from_asu_map( asu_map_data = mask_i_asu, n_real = self.n_real) data = flex.complex_double(self.dsel.size(), 0) data = data.set_selected(self.dsel, f_mask_i.data()) return self.f_obs.set().array(data = data) def compute_diff_map(self, f_mask_data): if(self.f_calc is None): return None f_mask = self.f_obs.customized_copy(data = f_mask_data) fmodel = mmtbx.f_model.manager( f_obs = self.f_obs, f_calc = self.f_calc, f_mask = f_mask) fmodel = fmodel.select(self.dsel) fmodel.update_all_scales(remove_outliers=True, apply_scale_k1_to_f_obs = APPLY_SCALE_K1_TO_FOBS) self.mc = fmodel.electron_density_map().map_coefficients( map_type = "mFobs-DFmodel", isotropize = True, exclude_free_r_reflections = False) fft_map = self.mc.fft_map(crystal_gridding = self.crystal_gridding) fft_map.apply_sigma_scaling() return fft_map.real_map_unpadded() def compute_i_mask_asu(self, selection, volume): mask_i = flex.double(flex.grid(self.n_real), 0) mask_i = mask_i.set_selected(selection, 1) if(self.write_masks): write_map_file( crystal_symmetry = self.crystal_symmetry, map_data = mask_i, file_name = "mask_%s.mrc"%str(round(volume,3))) tmp = asu_map_ext.asymmetric_map( self.crystal_symmetry.space_group().type(), mask_i).data() return tmp def algorithm_0(f_obs, F, kt): """ Grid search """ fc, f_masks = F[0], F[1:] k_mask_trial_range=[] s = -1 while s<1: k_mask_trial_range.append(s) s+=0.0001 r = [] fc_data = fc.data() for i, f_mask in enumerate(f_masks): #print("mask ",i) assert f_obs.data().size() == fc.data().size() assert f_mask.data().size() == fc.data().size() #print (bulk_solvent.r_factor(f_obs.data(),fc_data)) kmask_, k_ = \ bulk_solvent.k_mask_and_k_overall_grid_search( f_obs.data()*kt, fc_data*kt, f_mask.data()*kt, flex.double(k_mask_trial_range), flex.bool(fc.data().size(),True)) r.append(kmask_) fc_data += fc_data*k_ + kmask_*f_mask.data() #print (bulk_solvent.r_factor(f_obs.data(),fc_data + kmask_*f_mask.data(),k_)) r = [1,]+r return r def algorithm_2(i_obs, F, x, use_curvatures=True, macro_cycles=10): """ Unphased one-step search """ calculator = tg(i_obs = i_obs, F=F, x = x, use_curvatures=use_curvatures) for it in range(macro_cycles): if(use_curvatures): m = minimizer(max_iterations=100, calculator=calculator) else: #upper = flex.double([1.1] + [1]*(x.size()-1)) #lower = flex.double([0.9] + [-1]*(x.size()-1)) upper = flex.double([1.1] + [5]*(x.size()-1)) lower = flex.double([0.9] + [-5]*(x.size()-1)) #upper = flex.double([10] + [5]*(x.size()-1)) #lower = flex.double([0.1] + [-5]*(x.size()-1)) #upper = flex.double([10] + [0.65]*(x.size()-1)) #lower = flex.double([0.1] + [0]*(x.size()-1)) #upper = flex.double([1] + [0.65]*(x.size()-1)) #lower = flex.double([1] + [0]*(x.size()-1)) #upper = flex.double([1] + [5.65]*(x.size()-1)) #lower = flex.double([1] + [-5]*(x.size()-1)) m = tncs.minimizer( potential = calculator, use_bounds = 2, lower_bound = lower, upper_bound = upper, initial_values = x).run() calculator = tg(i_obs = i_obs, F=F, x = m.x, use_curvatures=use_curvatures) if(use_curvatures): for it in range(10): m = minimizer(max_iterations=100, calculator=calculator) calculator = tg(i_obs = i_obs, F=F, x = m.x, use_curvatures=use_curvatures) m = minimizer2(max_iterations=100, calculator=calculator).run(use_curvatures=True) calculator = tg(i_obs = i_obs, F=F, x = m.x, use_curvatures=use_curvatures) return m.x def algorithm_3(i_obs, fc, f_masks): """ Unphased two-step search """ F = [fc]+f_masks Gnm = [] cs = {} cntr=0 nm=[] # Compute and store Gnm for n, Fn in enumerate(F): for m, Fm in enumerate(F): if m < n: continue Gnm.append( flex.real( Fn.data()*flex.conj(Fm.data()) ) ) cs[(n,m)] = cntr cntr+=1 nm.append((n,m)) # Keep track of indices for "upper triangular matrix vs full" for k,v in zip(list(cs.keys()), list(cs.values())): i,j=k if i==j: continue else: cs[(j,i)]=v # Generate and solve system Ax=b, x = A_1*b A = [] b = [] for u, Gnm_u in enumerate(Gnm): for v, Gnm_v in enumerate(Gnm): scale = 2 n,m=nm[v] if n==m: scale=1 A.append( flex.sum(Gnm_u*Gnm_v)*scale ) b.append( flex.sum(Gnm_u * i_obs.data()) ) A = matrix.sqr(A) A_1 = A.inverse() b = matrix.col(b) x = A_1 * b # Expand Xmn from solution x Xmn = [] for n, Fn in enumerate(F): rows = [] for m, Fm in enumerate(F): x_ = x[cs[(n,m)]] rows.append(x_) Xmn.append(rows) # Do formula (19) lnK = [] for j, Fj in enumerate(F): t1 = flex.sum( flex.log( flex.double(Xmn[j]) ) ) t2 = 0 for n, Fn in enumerate(F): for m, Fm in enumerate(F): t2 += math.log(Xmn[n][m]) t2 = t2 / (2*len(F)) lnK.append( 1/len(F)*(t1-t2) ) return [math.exp(x) for x in lnK] def algorithm_4(f_obs, F, phase_source, max_cycles=100, auto_converge_eps=1.e-7, use_cpp=True): """ Phased simultaneous search (alg4) """ fc, f_masks = F[0], F[1:] fc = fc.deep_copy() F = [fc]+F[1:] # C++ version if(use_cpp): return mosaic_ext.alg4( [f.data() for f in F], f_obs.data(), phase_source.data(), max_cycles, auto_converge_eps) # Python version (1.2-3 times slower, but much more readable!) cntr = 0 x_prev = None while True: f_obs_cmpl = f_obs.phase_transfer(phase_source = phase_source) A = [] b = [] for j, Fj in enumerate(F): A_rows = [] for n, Fn in enumerate(F): Gjn = flex.real( Fj.data()*flex.conj(Fn.data()) ) A_rows.append( flex.sum(Gjn) ) Hj = flex.real( Fj.data()*flex.conj(f_obs_cmpl.data()) ) b.append(flex.sum(Hj)) A.extend(A_rows) A = matrix.sqr(A) A_1 = A.inverse() b = matrix.col(b) x = A_1 * b # fc_d = flex.complex_double(phase_source.indices().size(), 0) for i, f in enumerate(F): fc_d += f.data()*x[i] phase_source = phase_source.customized_copy(data = fc_d) x_ = x[:] # cntr+=1 if(cntr>max_cycles): break if(x_prev is None): x_prev = x_[:] else: max_diff = flex.max(flex.abs(flex.double(x_prev)-flex.double(x_))) if(max_diff<=auto_converge_eps): break x_prev = x_[:] return x_
36.266355
107
0.60862
19,424
0.625693
0
0
0
0
0
0
5,575
0.179584
78ddef69c8c618801719da4ee218c45f1df458b0
25,941
py
Python
mars/tensor/execution/tests/test_base_execute.py
lmatz/mars
45f9166b54eb91b21e66cef8b590a41aa8ac9569
[ "Apache-2.0" ]
1
2018-12-26T08:37:04.000Z
2018-12-26T08:37:04.000Z
mars/tensor/execution/tests/test_base_execute.py
lmatz/mars
45f9166b54eb91b21e66cef8b590a41aa8ac9569
[ "Apache-2.0" ]
null
null
null
mars/tensor/execution/tests/test_base_execute.py
lmatz/mars
45f9166b54eb91b21e66cef8b590a41aa8ac9569
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright 1999-2018 Alibaba Group Holding Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import scipy.sparse as sps from mars.tensor.execution.core import Executor from mars import tensor as mt from mars.tensor.expressions.datasource import tensor, ones, zeros, arange from mars.tensor.expressions.base import copyto, transpose, moveaxis, broadcast_to, broadcast_arrays, where, \ expand_dims, rollaxis, atleast_1d, atleast_2d, atleast_3d, argwhere, array_split, split, \ hsplit, vsplit, dsplit, roll, squeeze, ptp, diff, ediff1d, digitize, average, cov, corrcoef, \ flip, flipud, fliplr, repeat, tile, isin from mars.tensor.expressions.merge import stack from mars.tensor.expressions.reduction import all as tall class Test(unittest.TestCase): def setUp(self): self.executor = Executor('numpy') def testRechunkExecution(self): raw = np.random.random((11, 8)) arr = tensor(raw, chunks=3) arr2 = arr.rechunk(4) res = self.executor.execute_tensor(arr2) self.assertTrue(np.array_equal(res[0], raw[:4, :4])) self.assertTrue(np.array_equal(res[1], raw[:4, 4:])) self.assertTrue(np.array_equal(res[2], raw[4:8, :4])) self.assertTrue(np.array_equal(res[3], raw[4:8, 4:])) self.assertTrue(np.array_equal(res[4], raw[8:, :4])) self.assertTrue(np.array_equal(res[5], raw[8:, 4:])) def testCopytoExecution(self): a = ones((2, 3), chunks=1) b = tensor([3, -1, 3], chunks=2) copyto(a, b, where=b > 1) res = self.executor.execute_tensor(a, concat=True)[0] expected = np.array([[3, 1, 3], [3, 1, 3]]) np.testing.assert_equal(res, expected) def testAstypeExecution(self): raw = np.random.random((10, 5)) arr = tensor(raw, chunks=3) arr2 = arr.astype('i8') res = self.executor.execute_tensor(arr2, concat=True) self.assertTrue(np.array_equal(res[0], raw.astype('i8'))) raw = sps.random(10, 5, density=.2) arr = tensor(raw, chunks=3) arr2 = arr.astype('i8') res = self.executor.execute_tensor(arr2, concat=True) self.assertTrue(np.array_equal(res[0].toarray(), raw.astype('i8').toarray())) def testTransposeExecution(self): raw = np.random.random((11, 8, 5)) arr = tensor(raw, chunks=3) arr2 = transpose(arr) res = self.executor.execute_tensor(arr2, concat=True) self.assertTrue(np.array_equal(res[0], raw.T)) arr3 = transpose(arr, axes=(-2, -1, -3)) res = self.executor.execute_tensor(arr3, concat=True) self.assertTrue(np.array_equal(res[0], raw.transpose(1, 2, 0))) raw = sps.random(11, 8) arr = tensor(raw, chunks=3) arr2 = transpose(arr) self.assertTrue(arr2.issparse()) res = self.executor.execute_tensor(arr2, concat=True) self.assertTrue(np.array_equal(res[0].toarray(), raw.T.toarray())) def testSwapaxesExecution(self): raw = np.random.random((11, 8, 5)) arr = tensor(raw, chunks=3) arr2 = arr.swapaxes(2, 0) res = self.executor.execute_tensor(arr2, concat=True) self.assertTrue(np.array_equal(res[0], raw.swapaxes(2, 0))) raw = sps.random(11, 8, density=.2) arr = tensor(raw, chunks=3) arr2 = arr.swapaxes(1, 0) res = self.executor.execute_tensor(arr2, concat=True) self.assertTrue(np.array_equal(res[0].toarray(), raw.toarray().swapaxes(1, 0))) def testMoveaxisExecution(self): x = zeros((3, 4, 5), chunks=2) t = moveaxis(x, 0, -1) res = self.executor.execute_tensor(t, concat=True)[0] self.assertEqual(res.shape, (4, 5, 3)) t = moveaxis(x, -1, 0) res = self.executor.execute_tensor(t, concat=True)[0] self.assertEqual(res.shape, (5, 3, 4)) t = moveaxis(x, [0, 1], [-1, -2]) res = self.executor.execute_tensor(t, concat=True)[0] self.assertEqual(res.shape, (5, 4, 3)) t = moveaxis(x, [0, 1, 2], [-1, -2, -3]) res = self.executor.execute_tensor(t, concat=True)[0] self.assertEqual(res.shape, (5, 4, 3)) def testBroadcastToExecution(self): raw = np.random.random((10, 5, 1)) arr = tensor(raw, chunks=2) arr2 = broadcast_to(arr, (5, 10, 5, 6)) res = self.executor.execute_tensor(arr2, concat=True) self.assertTrue(np.array_equal(res[0], np.broadcast_to(raw, (5, 10, 5, 6)))) def testBroadcastArraysExecutions(self): x_data = [[1, 2, 3]] x = tensor(x_data, chunks=1) y_data = [[1], [2], [3]] y = tensor(y_data, chunks=2) a = broadcast_arrays(x, y) res = [self.executor.execute_tensor(arr, concat=True)[0] for arr in a] expected = np.broadcast_arrays(x_data, y_data) for r, e in zip(res, expected): np.testing.assert_equal(r, e) def testWhereExecution(self): raw_cond = np.random.randint(0, 2, size=(4, 4), dtype='?') raw_x = np.random.rand(4, 1) raw_y = np.random.rand(4, 4) cond, x, y = tensor(raw_cond, chunks=2), tensor(raw_x, chunks=2), tensor(raw_y, chunks=2) arr = where(cond, x, y) res = self.executor.execute_tensor(arr, concat=True) self.assertTrue(np.array_equal(res[0], np.where(raw_cond, raw_x, raw_y))) raw_cond = sps.csr_matrix(np.random.randint(0, 2, size=(4, 4), dtype='?')) raw_x = sps.random(4, 1, density=.1) raw_y = sps.random(4, 4, density=.1) cond, x, y = tensor(raw_cond, chunks=2), tensor(raw_x, chunks=2), tensor(raw_y, chunks=2) arr = where(cond, x, y) res = self.executor.execute_tensor(arr, concat=True)[0] self.assertTrue(np.array_equal(res.toarray(), np.where(raw_cond.toarray(), raw_x.toarray(), raw_y.toarray()))) def testReshapeExecution(self): raw_data = np.random.rand(10, 20, 30) x = tensor(raw_data, chunks=6) y = x.reshape(-1, 30) res = self.executor.execute_tensor(y, concat=True) self.assertTrue(np.array_equal(res[0], raw_data.reshape(-1, 30))) y2 = x.reshape(10, -1) res = self.executor.execute_tensor(y2, concat=True) self.assertTrue(np.array_equal(res[0], raw_data.reshape(10, -1))) y3 = x.reshape(-1) res = self.executor.execute_tensor(y3, concat=True) self.assertTrue(np.array_equal(res[0], raw_data.reshape(-1))) y4 = x.ravel() res = self.executor.execute_tensor(y4, concat=True) self.assertTrue(np.array_equal(res[0], raw_data.ravel())) raw_data = np.random.rand(30, 100, 20) x = tensor(raw_data, chunks=6) y = x.reshape(-1, 20, 5, 5, 4) res = self.executor.execute_tensor(y, concat=True) self.assertTrue(np.array_equal(res[0], raw_data.reshape(-1, 20, 5, 5, 4))) y2 = x.reshape(3000, 10, 2) res = self.executor.execute_tensor(y2, concat=True) self.assertTrue(np.array_equal(res[0], raw_data.reshape(3000, 10, 2))) y3 = x.reshape(60, 25, 40) res = self.executor.execute_tensor(y3, concat=True) self.assertTrue(np.array_equal(res[0], raw_data.reshape(60, 25, 40))) def testExpandDimsExecution(self): raw_data = np.random.rand(10, 20, 30) x = tensor(raw_data, chunks=6) y = expand_dims(x, 1) res = self.executor.execute_tensor(y, concat=True) self.assertTrue(np.array_equal(res[0], np.expand_dims(raw_data, 1))) y = expand_dims(x, 0) res = self.executor.execute_tensor(y, concat=True) self.assertTrue(np.array_equal(res[0], np.expand_dims(raw_data, 0))) y = expand_dims(x, 3) res = self.executor.execute_tensor(y, concat=True) self.assertTrue(np.array_equal(res[0], np.expand_dims(raw_data, 3))) y = expand_dims(x, -1) res = self.executor.execute_tensor(y, concat=True) self.assertTrue(np.array_equal(res[0], np.expand_dims(raw_data, -1))) y = expand_dims(x, -4) res = self.executor.execute_tensor(y, concat=True) self.assertTrue(np.array_equal(res[0], np.expand_dims(raw_data, -4))) with self.assertRaises(np.AxisError): expand_dims(x, -5) with self.assertRaises(np.AxisError): expand_dims(x, 4) def testRollAxisExecution(self): x = ones((3, 4, 5, 6), chunks=1) y = rollaxis(x, 3, 1) res = self.executor.execute_tensor(y, concat=True) self.assertTrue(np.array_equal(res[0], np.rollaxis(np.ones((3, 4, 5, 6)), 3, 1))) def testAtleast1dExecution(self): x = 1 y = ones(3, chunks=2) z = ones((3, 4), chunks=2) t = atleast_1d(x, y, z) res = [self.executor.execute_tensor(i, concat=True)[0] for i in t] self.assertTrue(np.array_equal(res[0], np.array([1]))) self.assertTrue(np.array_equal(res[1], np.ones(3))) self.assertTrue(np.array_equal(res[2], np.ones((3, 4)))) def testAtleast2dExecution(self): x = 1 y = ones(3, chunks=2) z = ones((3, 4), chunks=2) t = atleast_2d(x, y, z) res = [self.executor.execute_tensor(i, concat=True)[0] for i in t] self.assertTrue(np.array_equal(res[0], np.array([[1]]))) self.assertTrue(np.array_equal(res[1], np.atleast_2d(np.ones(3)))) self.assertTrue(np.array_equal(res[2], np.ones((3, 4)))) def testAtleast3dExecution(self): x = 1 y = ones(3, chunks=2) z = ones((3, 4), chunks=2) t = atleast_3d(x, y, z) res = [self.executor.execute_tensor(i, concat=True)[0] for i in t] self.assertTrue(np.array_equal(res[0], np.atleast_3d(x))) self.assertTrue(np.array_equal(res[1], np.atleast_3d(np.ones(3)))) self.assertTrue(np.array_equal(res[2], np.atleast_3d(np.ones((3, 4))))) def testArgwhereExecution(self): x = arange(6, chunks=2).reshape(2, 3) t = argwhere(x > 1) res = self.executor.execute_tensor(t, concat=True)[0] expected = np.argwhere(np.arange(6).reshape(2, 3) > 1) self.assertTrue(np.array_equal(res, expected)) def testArraySplitExecution(self): x = arange(48, chunks=3).reshape(2, 3, 8) ss = array_split(x, 3, axis=2) res = [self.executor.execute_tensor(i, concat=True)[0] for i in ss] expected = np.array_split(np.arange(48).reshape(2, 3, 8), 3, axis=2) self.assertEqual(len(res), len(expected)) [np.testing.assert_equal(r, e) for r, e in zip(res, expected)] ss = array_split(x, [3, 5, 6, 10], axis=2) res = [self.executor.execute_tensor(i, concat=True)[0] for i in ss] expected = np.array_split(np.arange(48).reshape(2, 3, 8), [3, 5, 6, 10], axis=2) self.assertEqual(len(res), len(expected)) [np.testing.assert_equal(r, e) for r, e in zip(res, expected)] def testSplitExecution(self): x = arange(48, chunks=3).reshape(2, 3, 8) ss = split(x, 4, axis=2) res = [self.executor.execute_tensor(i, concat=True)[0] for i in ss] expected = np.split(np.arange(48).reshape(2, 3, 8), 4, axis=2) self.assertEqual(len(res), len(expected)) [np.testing.assert_equal(r, e) for r, e in zip(res, expected)] ss = split(x, [3, 5, 6, 10], axis=2) res = [self.executor.execute_tensor(i, concat=True)[0] for i in ss] expected = np.split(np.arange(48).reshape(2, 3, 8), [3, 5, 6, 10], axis=2) self.assertEqual(len(res), len(expected)) [np.testing.assert_equal(r, e) for r, e in zip(res, expected)] # hsplit x = arange(120, chunks=3).reshape(2, 12, 5) ss = hsplit(x, 4) res = [self.executor.execute_tensor(i, concat=True)[0] for i in ss] expected = np.hsplit(np.arange(120).reshape(2, 12, 5), 4) self.assertEqual(len(res), len(expected)) [np.testing.assert_equal(r, e) for r, e in zip(res, expected)] # vsplit x = arange(48, chunks=3).reshape(8, 3, 2) ss = vsplit(x, 4) res = [self.executor.execute_tensor(i, concat=True)[0] for i in ss] expected = np.vsplit(np.arange(48).reshape(8, 3, 2), 4) self.assertEqual(len(res), len(expected)) [np.testing.assert_equal(r, e) for r, e in zip(res, expected)] # dsplit x = arange(48, chunks=3).reshape(2, 3, 8) ss = dsplit(x, 4) res = [self.executor.execute_tensor(i, concat=True)[0] for i in ss] expected = np.dsplit(np.arange(48).reshape(2, 3, 8), 4) self.assertEqual(len(res), len(expected)) [np.testing.assert_equal(r, e) for r, e in zip(res, expected)] x_data = sps.random(12, 8, density=.1) x = tensor(x_data, chunks=3) ss = split(x, 4, axis=0) res = [self.executor.execute_tensor(i, concat=True)[0] for i in ss] expected = np.split(x_data.toarray(), 4, axis=0) self.assertEqual(len(res), len(expected)) [np.testing.assert_equal(r.toarray(), e) for r, e in zip(res, expected)] def testRollExecution(self): x = arange(10, chunks=2) t = roll(x, 2) res = self.executor.execute_tensor(t, concat=True)[0] expected = np.roll(np.arange(10), 2) np.testing.assert_equal(res, expected) x2 = x.reshape(2, 5) t = roll(x2, 1) res = self.executor.execute_tensor(t, concat=True)[0] expected = np.roll(np.arange(10).reshape(2, 5), 1) np.testing.assert_equal(res, expected) t = roll(x2, 1, axis=0) res = self.executor.execute_tensor(t, concat=True)[0] expected = np.roll(np.arange(10).reshape(2, 5), 1, axis=0) np.testing.assert_equal(res, expected) t = roll(x2, 1, axis=1) res = self.executor.execute_tensor(t, concat=True)[0] expected = np.roll(np.arange(10).reshape(2, 5), 1, axis=1) np.testing.assert_equal(res, expected) def testSqueezeExecution(self): data = np.array([[[0], [1], [2]]]) x = tensor(data, chunks=1) t = squeeze(x) res = self.executor.execute_tensor(t, concat=True)[0] expected = np.squeeze(data) np.testing.assert_equal(res, expected) t = squeeze(x, axis=2) res = self.executor.execute_tensor(t, concat=True)[0] expected = np.squeeze(data, axis=2) np.testing.assert_equal(res, expected) def testPtpExecution(self): x = arange(4, chunks=1).reshape(2, 2) t = ptp(x, axis=0) res = self.executor.execute_tensor(t, concat=True)[0] expected = np.ptp(np.arange(4).reshape(2, 2), axis=0) np.testing.assert_equal(res, expected) t = ptp(x, axis=1) res = self.executor.execute_tensor(t, concat=True)[0] expected = np.ptp(np.arange(4).reshape(2, 2), axis=1) np.testing.assert_equal(res, expected) t = ptp(x) res = self.executor.execute_tensor(t)[0] expected = np.ptp(np.arange(4).reshape(2, 2)) np.testing.assert_equal(res, expected) def testDiffExecution(self): data = np.array([1, 2, 4, 7, 0]) x = tensor(data, chunks=2) t = diff(x) res = self.executor.execute_tensor(t, concat=True)[0] expected = np.diff(data) np.testing.assert_equal(res, expected) t = diff(x, n=2) res = self.executor.execute_tensor(t, concat=True)[0] expected = np.diff(data, n=2) np.testing.assert_equal(res, expected) data = np.array([[1, 3, 6, 10], [0, 5, 6, 8]]) x = tensor(data, chunks=2) t = diff(x) res = self.executor.execute_tensor(t, concat=True)[0] expected = np.diff(data) np.testing.assert_equal(res, expected) t = diff(x, axis=0) res = self.executor.execute_tensor(t, concat=True)[0] expected = np.diff(data, axis=0) np.testing.assert_equal(res, expected) x = mt.arange('1066-10-13', '1066-10-16', dtype=mt.datetime64) t = diff(x) res = self.executor.execute_tensor(t, concat=True)[0] expected = np.diff(np.arange('1066-10-13', '1066-10-16', dtype=np.datetime64)) np.testing.assert_equal(res, expected) def testEdiff1d(self): data = np.array([1, 2, 4, 7, 0]) x = tensor(data, chunks=2) t = ediff1d(x) res = self.executor.execute_tensor(t, concat=True)[0] expected = np.ediff1d(data) np.testing.assert_equal(res, expected) to_begin = tensor(-99, chunks=2) to_end = tensor([88, 99], chunks=2) t = ediff1d(x, to_begin=to_begin, to_end=to_end) res = self.executor.execute_tensor(t, concat=True)[0] expected = np.ediff1d(data, to_begin=-99, to_end=np.array([88, 99])) np.testing.assert_equal(res, expected) data = [[1, 2, 4], [1, 6, 24]] t = ediff1d(tensor(data, chunks=2)) res = self.executor.execute_tensor(t, concat=True)[0] expected = np.ediff1d(data) np.testing.assert_equal(res, expected) def testDigitizeExecution(self): data = np.array([0.2, 6.4, 3.0, 1.6]) x = tensor(data, chunks=2) bins = np.array([0.0, 1.0, 2.5, 4.0, 10.0]) inds = digitize(x, bins) res = self.executor.execute_tensor(inds, concat=True)[0] expected = np.digitize(data, bins) np.testing.assert_equal(res, expected) b = tensor(bins, chunks=2) inds = digitize(x, b) res = self.executor.execute_tensor(inds, concat=True)[0] expected = np.digitize(data, bins) np.testing.assert_equal(res, expected) data = np.array([1.2, 10.0, 12.4, 15.5, 20.]) x = tensor(data, chunks=2) bins = np.array([0, 5, 10, 15, 20]) inds = digitize(x, bins, right=True) res = self.executor.execute_tensor(inds, concat=True)[0] expected = np.digitize(data, bins, right=True) np.testing.assert_equal(res, expected) inds = digitize(x, bins, right=False) res = self.executor.execute_tensor(inds, concat=True)[0] expected = np.digitize(data, bins, right=False) np.testing.assert_equal(res, expected) data = sps.random(10, 1, density=.1) * 12 x = tensor(data, chunks=2) bins = np.array([1.0, 2.0, 2.5, 4.0, 10.0]) inds = digitize(x, bins) res = self.executor.execute_tensor(inds, concat=True)[0] expected = np.digitize(data.toarray(), bins, right=False) np.testing.assert_equal(res.toarray(), expected) def testAverageExecution(self): data = arange(1, 5, chunks=1) t = average(data) res = self.executor.execute_tensor(t)[0] expected = np.average(np.arange(1, 5)) self.assertEqual(res, expected) t = average(arange(1, 11, chunks=2), weights=arange(10, 0, -1, chunks=2)) res = self.executor.execute_tensor(t)[0] expected = np.average(range(1, 11), weights=range(10, 0, -1)) self.assertEqual(res, expected) data = arange(6, chunks=2).reshape((3, 2)) t = average(data, axis=1, weights=tensor([1./4, 3./4], chunks=2)) res = self.executor.execute_tensor(t, concat=True)[0] expected = np.average(np.arange(6).reshape(3, 2), axis=1, weights=(1./4, 3./4)) np.testing.assert_equal(res, expected) with self.assertRaises(TypeError): average(data, weights=tensor([1./4, 3./4], chunks=2)) def testCovExecution(self): data = np.array([[0, 2], [1, 1], [2, 0]]).T x = tensor(data, chunks=1) t = cov(x) res = self.executor.execute_tensor(t, concat=True)[0] expected = np.cov(data) np.testing.assert_equal(res, expected) data_x = [-2.1, -1, 4.3] data_y = [3, 1.1, 0.12] x = tensor(data_x, chunks=1) y = tensor(data_y, chunks=1) X = stack((x, y), axis=0) t = cov(x, y) r = tall(t == cov(X)) self.assertTrue(self.executor.execute_tensor(r)[0]) def testCorrcoefExecution(self): data_x = [-2.1, -1, 4.3] data_y = [3, 1.1, 0.12] x = tensor(data_x, chunks=1) y = tensor(data_y, chunks=1) t = corrcoef(x, y) res = self.executor.execute_tensor(t, concat=True)[0] expected = np.corrcoef(data_x, data_y) np.testing.assert_equal(res, expected) def testFlipExecution(self): a = arange(8, chunks=2).reshape((2, 2, 2)) t = flip(a, 0) res = self.executor.execute_tensor(t, concat=True)[0] expected = np.flip(np.arange(8).reshape(2, 2, 2), 0) np.testing.assert_equal(res, expected) t = flip(a, 1) res = self.executor.execute_tensor(t, concat=True)[0] expected = np.flip(np.arange(8).reshape(2, 2, 2), 1) np.testing.assert_equal(res, expected) t = flipud(a) res = self.executor.execute_tensor(t, concat=True)[0] expected = np.flipud(np.arange(8).reshape(2, 2, 2)) np.testing.assert_equal(res, expected) t = fliplr(a) res = self.executor.execute_tensor(t, concat=True)[0] expected = np.fliplr(np.arange(8).reshape(2, 2, 2)) np.testing.assert_equal(res, expected) def testRepeatExecution(self): a = repeat(3, 4) res = self.executor.execute_tensor(a)[0] expected = np.repeat(3, 4) np.testing.assert_equal(res, expected) x_data = np.random.randn(20, 30) x = tensor(x_data, chunks=(3, 4)) t = repeat(x, 2) res = self.executor.execute_tensor(t, concat=True)[0] expected = np.repeat(x_data, 2) np.testing.assert_equal(res, expected) t = repeat(x, 3, axis=1) res = self.executor.execute_tensor(t, concat=True)[0] expected = np.repeat(x_data, 3, axis=1) np.testing.assert_equal(res, expected) t = repeat(x, np.arange(20), axis=0) res = self.executor.execute_tensor(t, concat=True)[0] expected = np.repeat(x_data, np.arange(20), axis=0) np.testing.assert_equal(res, expected) t = repeat(x, arange(20, chunks=5), axis=0) res = self.executor.execute_tensor(t, concat=True)[0] expected = np.repeat(x_data, np.arange(20), axis=0) np.testing.assert_equal(res, expected) x_data = sps.random(20, 30, density=.1) x = tensor(x_data, chunks=(3, 4)) t = repeat(x, 2, axis=1) res = self.executor.execute_tensor(t, concat=True)[0] expected = np.repeat(x_data.toarray(), 2, axis=1) np.testing.assert_equal(res.toarray(), expected) def testTileExecution(self): a_data = np.array([0, 1, 2]) a = tensor(a_data, chunks=2) t = tile(a, 2) res = self.executor.execute_tensor(t, concat=True)[0] expected = np.tile(a_data, 2) np.testing.assert_equal(res, expected) t = tile(a, (2, 2)) res = self.executor.execute_tensor(t, concat=True)[0] expected = np.tile(a_data, (2, 2)) np.testing.assert_equal(res, expected) t = tile(a, (2, 1, 2)) res = self.executor.execute_tensor(t, concat=True)[0] expected = np.tile(a_data, (2, 1, 2)) np.testing.assert_equal(res, expected) b_data = np.array([[1, 2], [3, 4]]) b = tensor(b_data, chunks=1) t = tile(b, 2) res = self.executor.execute_tensor(t, concat=True)[0] expected = np.tile(b_data, 2) np.testing.assert_equal(res, expected) t = tile(b, (2, 1)) res = self.executor.execute_tensor(t, concat=True)[0] expected = np.tile(b_data, (2, 1)) np.testing.assert_equal(res, expected) c_data = np.array([1, 2, 3, 4]) c = tensor(c_data, chunks=3) t = tile(c, (4, 1)) res = self.executor.execute_tensor(t, concat=True)[0] expected = np.tile(c_data, (4, 1)) np.testing.assert_equal(res, expected) def testIsInExecution(self): element = 2 * arange(4, chunks=1).reshape((2, 2)) test_elements = [1, 2, 4, 8] mask = isin(element, test_elements) res = self.executor.execute_tensor(mask, concat=True)[0] expected = np.isin(2 * np.arange(4).reshape((2, 2)), test_elements) np.testing.assert_equal(res, expected) res = self.executor.execute_tensor(element[mask], concat=True)[0] expected = np.array([2, 4]) np.testing.assert_equal(res, expected) mask = isin(element, test_elements, invert=True) res = self.executor.execute_tensor(mask, concat=True)[0] expected = np.isin(2 * np.arange(4).reshape((2, 2)), test_elements, invert=True) np.testing.assert_equal(res, expected) res = self.executor.execute_tensor(element[mask], concat=True)[0] expected = np.array([0, 6]) np.testing.assert_equal(res, expected) test_set = {1, 2, 4, 8} mask = isin(element, test_set) res = self.executor.execute_tensor(mask, concat=True)[0] expected = np.isin(2 * np.arange(4).reshape((2, 2)), test_set) np.testing.assert_equal(res, expected)
34.132895
110
0.596623
24,622
0.949154
0
0
0
0
0
0
728
0.028064
78ddf0916f6002f2dfd416cfa16eaf9855682728
77
py
Python
comix-imagenet/init_paths.py
drumpt/Co-Mixup
4c43f0ec873ce6c1e8ab446c7cb9e25089b9b91a
[ "MIT" ]
86
2021-02-05T03:13:09.000Z
2022-03-29T03:10:50.000Z
comix-imagenet/init_paths.py
drumpt/Co-Mixup
4c43f0ec873ce6c1e8ab446c7cb9e25089b9b91a
[ "MIT" ]
4
2021-06-01T13:07:06.000Z
2022-02-15T03:08:30.000Z
comix-imagenet/init_paths.py
drumpt/Co-Mixup
4c43f0ec873ce6c1e8ab446c7cb9e25089b9b91a
[ "MIT" ]
7
2021-02-09T01:27:03.000Z
2021-09-01T14:07:40.000Z
import sys import matplotlib matplotlib.use('Agg') sys.path.insert(0, 'lib')
15.4
25
0.753247
0
0
0
0
0
0
0
0
10
0.12987
78de98de938be5cc3ac224e5095778425f0adabc
14,828
py
Python
members_abundances_in_out_uncertainties.py
kcotar/Gaia_clusters_potential
aee2658c40446891d31528f8dec3cec899b63c68
[ "MIT" ]
null
null
null
members_abundances_in_out_uncertainties.py
kcotar/Gaia_clusters_potential
aee2658c40446891d31528f8dec3cec899b63c68
[ "MIT" ]
null
null
null
members_abundances_in_out_uncertainties.py
kcotar/Gaia_clusters_potential
aee2658c40446891d31528f8dec3cec899b63c68
[ "MIT" ]
null
null
null
import matplotlib matplotlib.use('Agg') import numpy as np import matplotlib.pyplot as plt from glob import glob from astropy.table import Table, join from os import chdir, system from scipy.stats import norm as gauss_norm from sys import argv from getopt import getopt # turn off polyfit ranking warnings import warnings warnings.filterwarnings('ignore') def _prepare_pdf_data(means, stds, range, norm=True): x_vals = np.linspace(range[0], range[1], 250) y_vals = np.zeros_like(x_vals) # create and sum all PDF of stellar abundances for d_m, d_s in zip(means, stds): if np.isfinite([d_m, d_s]).all(): y_vals += gauss_norm.pdf(x_vals, loc=d_m, scale=d_s) # return normalized summed pdf of all stars if norm and np.nansum(y_vals) > 0.: y_vals = 1. * y_vals/np.nanmax(y_vals) return x_vals, y_vals def _prepare_hist_data(d, bins, range, norm=True): heights, edges = np.histogram(d, bins=bins, range=range) width = np.abs(edges[0] - edges[1]) if norm: heights = 1.*heights / np.nanmax(heights) return edges[:-1], heights, width def _evaluate_abund_trend_fit(orig, fit, idx, sigma_low, sigma_high): # diffence to the original data diff = orig - fit std_diff = np.nanstd(diff[idx]) # select data that will be fitted idx_outlier = np.logical_or(diff < (-1. * std_diff * sigma_low), diff > (std_diff * sigma_high)) return np.logical_and(idx, ~idx_outlier) def fit_abund_trend(p_data, a_data, steps=3, sigma_low=2.5, sigma_high=2.5, order=5, window=10, n_min_perc=10.,func='poly'): idx_fit = np.logical_and(np.isfinite(p_data), np.isfinite(a_data)) data_len = np.sum(idx_fit) n_fit_points_prev = np.sum(idx_fit) if data_len <= order + 1: return None, None p_offset = np.nanmedian(p_data) for i_f in range(steps): # number of sigma clipping steps if func == 'cheb': coef = np.polynomial.chebyshev.chebfit(p_data[idx_fit] - p_offset, a_data[idx_fit], order) f_data = np.polynomial.chebyshev.chebval(p_data - p_offset, coef) if func == 'legen': coef = np.polynomial.legendre.legfit(p_data[idx_fit] - p_offset, a_data[idx_fit], order) f_data = np.polynomial.legendre.legval(p_data - p_offset, coef) if func == 'poly': coef = np.polyfit(p_data[idx_fit] - p_offset, a_data[idx_fit], order) f_data = np.poly1d(coef)(p_data - p_offset) if func == 'spline': coef = splrep(p_data[idx_fit] - p_offset, a_data[idx_fit], k=order, s=window) f_data = splev(p_data - p_offset, coef) idx_fit = _evaluate_abund_trend_fit(a_data, f_data, idx_fit, sigma_low, sigma_high) n_fit_points = np.sum(idx_fit) if 100.*n_fit_points/data_len < n_min_perc: break if n_fit_points == n_fit_points_prev: break else: n_fit_points_prev = n_fit_points a_std = np.nanstd(a_data - f_data) return [coef, p_offset], a_std def eval_abund_trend(p_data, m_data, func='poly'): coef, p_offset = m_data if func == 'cheb': f_data = np.polynomial.chebyshev.chebval(p_data - p_offset, coef) if func == 'legen': f_data = np.polynomial.legendre.legval(p_data - p_offset, coef) if func == 'poly': f_data = np.poly1d(coef)(p_data - p_offset) if func == 'spline': f_data = splev(p_data - p_offset, coef) return f_data simulation_dir = '/shared/data-camelot/cotar/' data_dir_clusters = simulation_dir+'GaiaDR2_open_clusters_2001_GALAH/' data_dir = '/shared/ebla/cotar/' USE_DR3 = True Q_FLAGS = True P_INDIVIDUAL = False suffix = '' if len(argv) > 1: # parse input options opts, args = getopt(argv[1:], '', ['dr3=', 'suffix=', 'flags=', 'individual=']) # set parameters, depending on user inputs print(opts) for o, a in opts: if o == '--dr3': USE_DR3 = int(a) > 0 if o == '--suffix': suffix += str(a) if o == '--flags': Q_FLAGS = int(a) > 0 if o == '--individual': P_INDIVIDUAL = int(a) > 0 CG_data = Table.read(data_dir+'clusters/Cantat-Gaudin_2018/members.fits') tails_data = Table.read(data_dir+'clusters/cluster_tails/members_open_gaia_tails.fits') # remove cluster members from tails data print('Cluster members all:', len(CG_data), len(tails_data)) idx_not_in_cluster = np.in1d(tails_data['source_id'], CG_data['source_id'], invert=True) tails_data = tails_data[idx_not_in_cluster] print('Cluster members all:', len(CG_data), len(tails_data)) if USE_DR3: # cannon_data = Table.read(data_dir+'GALAH_iDR3_main_alpha_190529.fits') cannon_data = Table.read(data_dir+'GALAH_iDR3_main_191213.fits') fe_col = 'fe_h' teff_col = 'teff' q_flag = 'flag_sp' suffix += '_DR3' else: pass if Q_FLAGS: suffix += '_flag0' # determine all possible simulation subdirs chdir(data_dir_clusters) for cluster_dir in glob('Cluster_orbits_GaiaDR2_*'): chdir(cluster_dir) print('Working on clusters in ' + cluster_dir) for sub_dir in glob('*'): current_cluster = '_'.join(sub_dir.split('_')[0:2]) source_id_cg = CG_data[CG_data['cluster'] == current_cluster]['source_id'] source_id_tail = tails_data[tails_data['cluster'] == current_cluster]['source_id'] idx_cg_memb = np.in1d(cannon_data['source_id'], np.array(source_id_cg)) idx_tail = np.in1d(cannon_data['source_id'], np.array(source_id_tail)) if '.png' in sub_dir or 'individual-abund' in sub_dir: continue print(' ') print(sub_dir) chdir(sub_dir) try: g_init = Table.read('members_init_galah.csv', format='ascii', delimiter='\t') idx_init = np.in1d(cannon_data['source_id'], g_init['source_id']) except: idx_init = np.full(len(cannon_data), False) try: g_in_all = Table.read('possible_ejected-step1.csv', format='ascii', delimiter='\t') g_in = Table.read('possible_ejected-step1_galah.csv', format='ascii', delimiter='\t') # further refinement of results to be plotted here g_in_all = g_in_all[np.logical_and(g_in_all['time_in_cluster'] >= 1., # [Myr] longest time (of all incarnations) inside cluster g_in_all['in_cluster_prob'] >= 68.)] # percentage of reincarnations inside cluster g_in = g_in[np.logical_and(g_in['time_in_cluster'] >= 1., g_in['in_cluster_prob'] >= 68.)] idx_in = np.in1d(cannon_data['source_id'], g_in['source_id']) idx_in_no_CG = np.logical_and(idx_in, np.logical_not(np.in1d(cannon_data['source_id'], CG_data['source_id']))) except: idx_in = np.full(len(cannon_data), False) idx_in_no_CG = np.full(len(cannon_data), False) try: g_out = Table.read('possible_outside-step1_galah.csv', format='ascii', delimiter='\t') # further refinement of results to be plotted here g_out = g_out[np.logical_and(g_out['time_in_cluster'] <= 0, g_out['in_cluster_prob'] <= 0)] idx_out = np.in1d(cannon_data['source_id'], g_out['source_id']) except: idx_out = np.full(len(cannon_data), False) chdir('..') if np.sum(idx_init) == 0 or np.sum(idx_in) == 0 or np.sum(idx_out) == 0: print(' Some Galah lists are missing') if USE_DR3: abund_cols = [c for c in cannon_data.colnames if '_fe' in c and 'nr_' not in c and 'diff_' not in c and 'e_' not in c and 'Li' not in c and 'alpha' not in c] # and ('I' in c or 'II' in c or 'III' in c)] else: abund_cols = [c for c in cannon_data.colnames if '_abund' in c and len(c.split('_')) == 3] # abund_cols = ['e_' + cc for cc in abund_cols] # rg = (0., 0.35) # yt = [0., 0.1, 0.2, 0.3] # medfix = '-snr-sigma_' abund_cols = ['diff_' + cc for cc in abund_cols] rg = (-0.45, 0.45) yt = [-0.3, -0.15, 0.0, 0.15, 0.3] medfix = '-detrended-snr_' # ------------------------------------------------------------------------------ # NEW: plot with parameter dependency trends # ------------------------------------------------------------------------------ bs = 40 x_cols_fig = 7 y_cols_fig = 5 param_lims = {'snr_c2_iraf': [5, 175], 'age': [0., 14.], 'teff': [3000, 7000], 'logg': [0.0, 5.5], 'fe_h': [-1.2, 0.5]} for param in ['snr_c2_iraf']: #list(param_lims.keys()): cannon_data['abund_det'] = 0 cannon_data['abund_det_elems'] = 0 print('Estimating membership using parameter', param) fig, ax = plt.subplots(y_cols_fig, x_cols_fig, figsize=(15, 10)) for i_c, col in enumerate(abund_cols): # print(col) x_p = i_c % x_cols_fig y_p = int(1. * i_c / x_cols_fig) fit_x_param = 'teff' cur_abund_col = '_'.join(col.split('_')[1:]) cannon_data['diff_' + cur_abund_col] = cannon_data[cur_abund_col] idx_val = np.isfinite(cannon_data[col]) if Q_FLAGS: idx_val = np.logical_and(idx_val, cannon_data[q_flag] == 0) idx_u1 = np.logical_and(idx_out, idx_val) idx_u2 = np.logical_and(idx_init, idx_val) idx_u3 = np.logical_and(idx_in, idx_val) idx_u4 = np.logical_and(idx_cg_memb, idx_val) idx_u5 = np.logical_and(idx_tail, idx_val) fit_model, col_std = fit_abund_trend(cannon_data[fit_x_param][idx_u2], cannon_data[cur_abund_col][idx_u2], order=3, steps=2, func='poly', sigma_low=2.5, sigma_high=2.5, n_min_perc=10.) if fit_model is not None: cannon_data['diff_' + cur_abund_col] = cannon_data[cur_abund_col] - eval_abund_trend(cannon_data[fit_x_param], fit_model, func='poly') else: cannon_data['diff_' + cur_abund_col] = np.nan ax[y_p, x_p].scatter(cannon_data[param][idx_u1], cannon_data[col][idx_u1], lw=0, s=3, color='C2', label='Field') ax[y_p, x_p].scatter(cannon_data[param][idx_u2], cannon_data[col][idx_u2], lw=0, s=3, color='C0', label='Initial') ax[y_p, x_p].scatter(cannon_data[param][idx_u3], cannon_data[col][idx_u3], lw=0, s=3, color='C1', label='Ejected') if np.sum(idx_u5) > 0: print('Ejected in tail:', np.sum(np.logical_and(idx_u3, idx_u5))) ax[y_p, x_p].scatter(cannon_data[param][idx_u5], cannon_data[col][idx_u5], lw=0, s=3, color='C4', label='Tail') label_add = ' = {:.0f}, {:.0f}, {:.0f}'.format(np.sum(idx_u1), np.sum(idx_u2), np.sum(idx_u3)) ax[y_p, x_p].set(xlim=param_lims[param], title=' '.join(col.split('_')[:2]) + label_add, ylim=rg, yticks=yt,) ax[y_p, x_p].grid(ls='--', alpha=0.2, color='black') rg = (-0.6, 0.6) idx_val = np.isfinite(cannon_data[teff_col]) if Q_FLAGS: idx_val = np.logical_and(idx_val, cannon_data[q_flag] == 0) x_p = -1 y_p = -1 idx_u1 = np.logical_and(idx_out, idx_val) idx_u2 = np.logical_and(idx_init, idx_val) idx_u3 = np.logical_and(idx_in, idx_val) idx_u5 = np.logical_and(idx_tail, idx_val) sl1 = ax[y_p, x_p].scatter(cannon_data[param][idx_u1], cannon_data[fe_col][idx_u1], lw=0, s=3, color='C2', label='Field') sl2 = ax[y_p, x_p].scatter(cannon_data[param][idx_u2], cannon_data[fe_col][idx_u2], lw=0, s=3, color='C0', label='Initial') sl3 = ax[y_p, x_p].scatter(cannon_data[param][idx_u3], cannon_data[fe_col][idx_u3], lw=0, s=3, color='C1', label='Ejected') fit_model, col_std = fit_abund_trend(cannon_data[param][idx_u2], cannon_data[fe_col][idx_u2], order=3, steps=2, sigma_low=2.5, sigma_high=2.5, n_min_perc=10., func='poly') if np.sum(idx_u5) > 0: sl5 = ax[y_p, x_p].scatter(cannon_data[param][idx_u5], cannon_data[fe_col][idx_u5], lw=0, s=3, color='C4', label='Tail') ax[-1, -3].legend(handles=[sl1, sl1, sl3, sl5]) else: ax[-1, -3].legend(handles=[sl1, sl1, sl3]) label_add = ' = {:.0f}, {:.0f}, {:.0f}'.format(np.sum(idx_u1), np.sum(idx_u2), np.sum(idx_u3)) ax[y_p, x_p].set(ylim=rg, title='Fe/H' + label_add, xlim=param_lims[param]) ax[y_p, x_p].grid(ls='--', alpha=0.2, color='black') x_p = -2 y_p = -1 ax[y_p, x_p].scatter(cannon_data['age'][idx_u1], cannon_data[param][idx_u1], lw=0, s=3, color='C2', label='Field') ax[y_p, x_p].scatter(cannon_data['age'][idx_u2], cannon_data[param][idx_u2], lw=0, s=3, color='C0', label='Initial') ax[y_p, x_p].scatter(cannon_data['age'][idx_u3], cannon_data[param][idx_u3], lw=0, s=3, color='C1', label='Ejected') if np.sum(idx_u5) > 0: ax[y_p, x_p].scatter(cannon_data['age'][idx_u5], cannon_data[param][idx_u5], lw=0, s=3, color='C4', label='Tail') label_add = ' = {:.0f}, {:.0f}, {:.0f}'.format(np.sum(idx_u1), np.sum(idx_u2), np.sum(idx_u3)) ax[y_p, x_p].set(ylim=param_lims[param], title='age' + label_add, xlim=[0., 14.]) ax[y_p, x_p].grid(ls='--', alpha=0.2, color='black') plt.subplots_adjust(top=0.97, bottom=0.02, left=0.04, right=0.98, hspace=0.3, wspace=0.3) # plt.show() plt.savefig('p_' + param + '_abundances' + medfix + sub_dir + '' + suffix + '.png', dpi=250) plt.close(fig) chdir('..')
43.740413
215
0.55874
0
0
0
0
0
0
0
0
2,631
0.177435
78df11b8ab67a00fef993f03b911ed0dd7fc3180
707
py
Python
src/python_minifier/transforms/remove_pass.py
donno2048/python-minifier
9a9ff4dd5d2bb8dc666cae5939c125d420c2ffd5
[ "MIT" ]
null
null
null
src/python_minifier/transforms/remove_pass.py
donno2048/python-minifier
9a9ff4dd5d2bb8dc666cae5939c125d420c2ffd5
[ "MIT" ]
null
null
null
src/python_minifier/transforms/remove_pass.py
donno2048/python-minifier
9a9ff4dd5d2bb8dc666cae5939c125d420c2ffd5
[ "MIT" ]
null
null
null
import ast from python_minifier.transforms.suite_transformer import SuiteTransformer class RemovePass(SuiteTransformer): """ Remove Pass keywords from source If a statement is syntactically necessary, use an empty expression instead """ def __call__(self, node): return self.visit(node) def suite(self, node_list, parent): without_pass = [self.visit(a) for a in filter(lambda n: not self.is_node(n, ast.Pass), node_list)] if len(without_pass) == 0: if isinstance(parent, ast.Module): return [] else: return [self.add_child(ast.Expr(value=ast.Num(0)), parent=parent)] return without_pass
27.192308
106
0.649222
618
0.874116
0
0
0
0
0
0
128
0.181047
78df4f62738c15a3903b9ac814a118e7bd487166
1,214
py
Python
test/tests.py
gzu300/Linear_Algebra
437a285b0230f4da8b0573b04da32ee965b09233
[ "MIT" ]
null
null
null
test/tests.py
gzu300/Linear_Algebra
437a285b0230f4da8b0573b04da32ee965b09233
[ "MIT" ]
null
null
null
test/tests.py
gzu300/Linear_Algebra
437a285b0230f4da8b0573b04da32ee965b09233
[ "MIT" ]
null
null
null
import unittest from pkg import Linear_Algebra import numpy as np class TestLU(unittest.TestCase): def setUp(self): self.U_answer = np.around(np.array([[2,1,0],[0,3/2,1],[0,0,4/3]], dtype=float), decimals=2).tolist() self.L_answer = np.around(np.array([[1,0,0],[1/2,1,0],[0,2/3,1]], dtype=float), decimals=2).tolist() def test_perm(self): answer = np.array([[0,1,0], [1,0,0], [0,0,1]], dtype=float).tolist() result = Linear_Algebra.make_perm_mx(3, 0, 1).tolist() self.assertEqual(result, answer) def test_LU(self): L_result, U_result = np.around(Linear_Algebra.LU(np.array([[2,1,0],[1,2,1],[0,1,2]], dtype=float)), decimals=2).tolist() self.assertEqual(U_result, self.U_answer) self.assertEqual(L_result, self.L_answer) class TestDet(unittest.TestCase): def setUp(self): self.input_mx = np.array([[2,-1,0,0],[-1,2,-1,0],[0,-1,2,-1],[0,0,-1,2]], dtype=float) def test_find_det(self): result = np.around(Linear_Algebra.find_det(A = self.input_mx), decimals=2).tolist() answer = np.around(5, decimals=2).tolist() self.assertEqual(result, answer) if __name__ == '__main__': unittest.main()
41.862069
128
0.629325
1,093
0.900329
0
0
0
0
0
0
10
0.008237
78e0a22b8b4b6603603bcdb8feefa51265cf9c14
345
py
Python
src/backend/common/models/favorite.py
ofekashery/the-blue-alliance
df0e47d054161fe742ac6198a6684247d0713279
[ "MIT" ]
266
2015-01-04T00:10:48.000Z
2022-03-28T18:42:05.000Z
src/backend/common/models/favorite.py
ofekashery/the-blue-alliance
df0e47d054161fe742ac6198a6684247d0713279
[ "MIT" ]
2,673
2015-01-01T20:14:33.000Z
2022-03-31T18:17:16.000Z
src/backend/common/models/favorite.py
ofekashery/the-blue-alliance
df0e47d054161fe742ac6198a6684247d0713279
[ "MIT" ]
230
2015-01-04T00:10:48.000Z
2022-03-26T18:12:04.000Z
from backend.common.models.mytba import MyTBAModel class Favorite(MyTBAModel): """ In order to make strongly consistent DB requests, instances of this class should be created with a parent that is the associated Account key. """ def __init__(self, *args, **kwargs): super(Favorite, self).__init__(*args, **kwargs)
28.75
77
0.704348
291
0.843478
0
0
0
0
0
0
161
0.466667
78e27b1810b0eb666d13182e83f2f3c881794f6e
17,296
py
Python
Cartwheel/lib/Python26/Lib/site-packages/wx-2.8-msw-unicode/wx/lib/filebrowsebutton.py
MontyThibault/centre-of-mass-awareness
58778f148e65749e1dfc443043e9fc054ca3ff4d
[ "MIT" ]
27
2020-11-12T19:24:54.000Z
2022-03-27T23:10:45.000Z
Cartwheel/lib/Python26/Lib/site-packages/wx-2.8-msw-unicode/wx/lib/filebrowsebutton.py
MontyThibault/centre-of-mass-awareness
58778f148e65749e1dfc443043e9fc054ca3ff4d
[ "MIT" ]
2
2020-11-02T06:30:39.000Z
2022-02-23T18:39:55.000Z
Cartwheel/lib/Python26/Lib/site-packages/wx-2.8-msw-unicode/wx/lib/filebrowsebutton.py
MontyThibault/centre-of-mass-awareness
58778f148e65749e1dfc443043e9fc054ca3ff4d
[ "MIT" ]
3
2021-08-16T00:21:08.000Z
2022-02-23T19:19:36.000Z
#---------------------------------------------------------------------- # Name: wxPython.lib.filebrowsebutton # Purpose: Composite controls that provide a Browse button next to # either a wxTextCtrl or a wxComboBox. The Browse button # launches a wxFileDialog and loads the result into the # other control. # # Author: Mike Fletcher # # RCS-ID: $Id: filebrowsebutton.py 59674 2009-03-20 21:00:16Z RD $ # Copyright: (c) 2000 by Total Control Software # Licence: wxWindows license #---------------------------------------------------------------------- # 12/02/2003 - Jeff Grimmett ([email protected]) # # o 2.5 Compatability changes # import os import types import wx #---------------------------------------------------------------------- class FileBrowseButton(wx.Panel): """ A control to allow the user to type in a filename or browse with the standard file dialog to select file """ def __init__ (self, parent, id= -1, pos = wx.DefaultPosition, size = wx.DefaultSize, style = wx.TAB_TRAVERSAL, labelText= "File Entry:", buttonText= "Browse", toolTip= "Type filename or click browse to choose file", # following are the values for a file dialog box dialogTitle = "Choose a file", startDirectory = ".", initialValue = "", fileMask = "*.*", fileMode = wx.OPEN, # callback for when value changes (optional) changeCallback= lambda x:x, labelWidth = 0, name = 'fileBrowseButton', ): """ :param labelText: Text for label to left of text field :param buttonText: Text for button which launches the file dialog :param toolTip: Help text :param dialogTitle: Title used in file dialog :param startDirectory: Default directory for file dialog startup :param fileMask: File mask (glob pattern, such as *.*) to use in file dialog :param fileMode: wx.OPEN or wx.SAVE, indicates type of file dialog to use :param changeCallback: Optional callback called for all changes in value of the control :param labelWidth: Width of the label """ # store variables self.labelText = labelText self.buttonText = buttonText self.toolTip = toolTip self.dialogTitle = dialogTitle self.startDirectory = startDirectory self.initialValue = initialValue self.fileMask = fileMask self.fileMode = fileMode self.changeCallback = changeCallback self.callCallback = True self.labelWidth = labelWidth # create the dialog self.createDialog(parent, id, pos, size, style, name ) # Setting a value causes the changeCallback to be called. # In this case that would be before the return of the # constructor. Not good. So a default value on # SetValue is used to disable the callback self.SetValue( initialValue, 0) def createDialog( self, parent, id, pos, size, style, name ): """Setup the graphic representation of the dialog""" wx.Panel.__init__ (self, parent, id, pos, size, style, name) self.SetMinSize(size) # play nice with sizers box = wx.BoxSizer(wx.HORIZONTAL) self.label = self.createLabel( ) box.Add( self.label, 0, wx.CENTER ) self.textControl = self.createTextControl() box.Add( self.textControl, 1, wx.LEFT|wx.CENTER, 5) self.browseButton = self.createBrowseButton() box.Add( self.browseButton, 0, wx.LEFT|wx.CENTER, 5) # add a border around the whole thing and resize the panel to fit outsidebox = wx.BoxSizer(wx.VERTICAL) outsidebox.Add(box, 1, wx.EXPAND|wx.ALL, 3) outsidebox.Fit(self) self.SetAutoLayout(True) self.SetSizer( outsidebox ) self.Layout() if type( size ) == types.TupleType: size = apply( wx.Size, size) self.SetDimensions(-1, -1, size.width, size.height, wx.SIZE_USE_EXISTING) # if size.width != -1 or size.height != -1: # self.SetSize(size) def SetBackgroundColour(self,color): wx.Panel.SetBackgroundColour(self,color) self.label.SetBackgroundColour(color) def createLabel( self ): """Create the label/caption""" label = wx.StaticText(self, -1, self.labelText, style =wx.ALIGN_RIGHT ) font = label.GetFont() w, h, d, e = self.GetFullTextExtent(self.labelText, font) if self.labelWidth > 0: label.SetSize((self.labelWidth+5, h)) else: label.SetSize((w+5, h)) return label def createTextControl( self): """Create the text control""" textControl = wx.TextCtrl(self, -1) textControl.SetToolTipString( self.toolTip ) if self.changeCallback: textControl.Bind(wx.EVT_TEXT, self.OnChanged) textControl.Bind(wx.EVT_COMBOBOX, self.OnChanged) return textControl def OnChanged(self, evt): if self.callCallback and self.changeCallback: self.changeCallback(evt) def createBrowseButton( self): """Create the browse-button control""" button =wx.Button(self, -1, self.buttonText) button.SetToolTipString( self.toolTip ) button.Bind(wx.EVT_BUTTON, self.OnBrowse) return button def OnBrowse (self, event = None): """ Going to browse for file... """ current = self.GetValue() directory = os.path.split(current) if os.path.isdir( current): directory = current current = '' elif directory and os.path.isdir( directory[0] ): current = directory[1] directory = directory [0] else: directory = self.startDirectory current = '' dlg = wx.FileDialog(self, self.dialogTitle, directory, current, self.fileMask, self.fileMode) if dlg.ShowModal() == wx.ID_OK: self.SetValue(dlg.GetPath()) dlg.Destroy() def GetValue (self): """ retrieve current value of text control """ return self.textControl.GetValue() def SetValue (self, value, callBack=1): """set current value of text control""" save = self.callCallback self.callCallback = callBack self.textControl.SetValue(value) self.callCallback = save def Enable (self, value=True): """ Convenient enabling/disabling of entire control """ self.label.Enable (value) self.textControl.Enable (value) return self.browseButton.Enable (value) def Disable (self,): """ Convenient disabling of entire control """ self.Enable(False) def GetLabel( self ): """ Retrieve the label's current text """ return self.label.GetLabel() def SetLabel( self, value ): """ Set the label's current text """ rvalue = self.label.SetLabel( value ) self.Refresh( True ) return rvalue class FileBrowseButtonWithHistory( FileBrowseButton ): """ with following additions: __init__(..., history=None) history -- optional list of paths for initial history drop-down (must be passed by name, not a positional argument) If history is callable it will must return a list used for the history drop-down changeCallback -- as for FileBrowseButton, but with a work-around for win32 systems which don't appear to create wx.EVT_COMBOBOX events properly. There is a (slight) chance that this work-around will cause some systems to create two events for each Combobox selection. If you discover this condition, please report it! As for a FileBrowseButton.__init__ otherwise. GetHistoryControl() Return reference to the control which implements interfaces required for manipulating the history list. See GetHistoryControl documentation for description of what that interface is. GetHistory() Return current history list SetHistory( value=(), selectionIndex = None ) Set current history list, if selectionIndex is not None, select that index """ def __init__( self, *arguments, **namedarguments): self.history = namedarguments.get( "history" ) if self.history: del namedarguments["history"] self.historyCallBack=None if callable(self.history): self.historyCallBack=self.history self.history=None name = namedarguments.get('name', 'fileBrowseButtonWithHistory') namedarguments['name'] = name FileBrowseButton.__init__(self, *arguments, **namedarguments) def createTextControl( self): """Create the text control""" textControl = wx.ComboBox(self, -1, style = wx.CB_DROPDOWN ) textControl.SetToolTipString( self.toolTip ) textControl.Bind(wx.EVT_SET_FOCUS, self.OnSetFocus) if self.changeCallback: textControl.Bind(wx.EVT_TEXT, self.OnChanged) textControl.Bind(wx.EVT_COMBOBOX, self.OnChanged) if self.history: history=self.history self.history=None self.SetHistory( history, control=textControl) return textControl def GetHistoryControl( self ): """ Return a pointer to the control which provides (at least) the following methods for manipulating the history list: Append( item ) -- add item Clear() -- clear all items Delete( index ) -- 0-based index to delete from list SetSelection( index ) -- 0-based index to select in list Semantics of the methods follow those for the wxComboBox control """ return self.textControl def SetHistory( self, value=(), selectionIndex = None, control=None ): """Set the current history list""" if control is None: control = self.GetHistoryControl() if self.history == value: return self.history = value # Clear history values not the selected one. tempValue=control.GetValue() # clear previous values control.Clear() control.SetValue(tempValue) # walk through, appending new values for path in value: control.Append( path ) if selectionIndex is not None: control.SetSelection( selectionIndex ) def GetHistory( self ): """Return the current history list""" if self.historyCallBack != None: return self.historyCallBack() elif self.history: return list( self.history ) else: return [] def OnSetFocus(self, event): """When the history scroll is selected, update the history""" if self.historyCallBack != None: self.SetHistory( self.historyCallBack(), control=self.textControl) event.Skip() if wx.Platform == "__WXMSW__": def SetValue (self, value, callBack=1): """ Convenient setting of text control value, works around limitation of wx.ComboBox """ save = self.callCallback self.callCallback = callBack self.textControl.SetValue(value) self.callCallback = save # Hack to call an event handler class LocalEvent: def __init__(self, string): self._string=string def GetString(self): return self._string if callBack==1: # The callback wasn't being called when SetValue was used ?? # So added this explicit call to it self.changeCallback(LocalEvent(value)) class DirBrowseButton(FileBrowseButton): def __init__(self, parent, id = -1, pos = wx.DefaultPosition, size = wx.DefaultSize, style = wx.TAB_TRAVERSAL, labelText = 'Select a directory:', buttonText = 'Browse', toolTip = 'Type directory name or browse to select', dialogTitle = '', startDirectory = '.', changeCallback = None, dialogClass = wx.DirDialog, newDirectory = False, name = 'dirBrowseButton'): FileBrowseButton.__init__(self, parent, id, pos, size, style, labelText, buttonText, toolTip, dialogTitle, startDirectory, changeCallback = changeCallback, name = name) self.dialogClass = dialogClass self.newDirectory = newDirectory # def OnBrowse(self, ev = None): style=0 if not self.newDirectory: style |= wx.DD_DIR_MUST_EXIST dialog = self.dialogClass(self, message = self.dialogTitle, defaultPath = self.startDirectory, style = style) if dialog.ShowModal() == wx.ID_OK: self.SetValue(dialog.GetPath()) dialog.Destroy() # #---------------------------------------------------------------------- if __name__ == "__main__": #from skeletonbuilder import rulesfile class SimpleCallback: def __init__( self, tag ): self.tag = tag def __call__( self, event ): print self.tag, event.GetString() class DemoFrame( wx.Frame ): def __init__(self, parent): wx.Frame.__init__(self, parent, -1, "File entry with browse", size=(500,260)) self.Bind(wx.EVT_CLOSE, self.OnCloseWindow) panel = wx.Panel (self,-1) innerbox = wx.BoxSizer(wx.VERTICAL) control = FileBrowseButton( panel, initialValue = "z:\\temp", ) innerbox.Add( control, 0, wx.EXPAND ) middlecontrol = FileBrowseButtonWithHistory( panel, labelText = "With History", initialValue = "d:\\temp", history = ["c:\\temp", "c:\\tmp", "r:\\temp","z:\\temp"], changeCallback= SimpleCallback( "With History" ), ) innerbox.Add( middlecontrol, 0, wx.EXPAND ) middlecontrol = FileBrowseButtonWithHistory( panel, labelText = "History callback", initialValue = "d:\\temp", history = self.historyCallBack, changeCallback= SimpleCallback( "History callback" ), ) innerbox.Add( middlecontrol, 0, wx.EXPAND ) self.bottomcontrol = control = FileBrowseButton( panel, labelText = "With Callback", style = wx.SUNKEN_BORDER|wx.CLIP_CHILDREN , changeCallback= SimpleCallback( "With Callback" ), ) innerbox.Add( control, 0, wx.EXPAND) self.bottommostcontrol = control = DirBrowseButton( panel, labelText = "Simple dir browse button", style = wx.SUNKEN_BORDER|wx.CLIP_CHILDREN) innerbox.Add( control, 0, wx.EXPAND) ID = wx.NewId() innerbox.Add( wx.Button( panel, ID,"Change Label", ), 1, wx.EXPAND) self.Bind(wx.EVT_BUTTON, self.OnChangeLabel , id=ID) ID = wx.NewId() innerbox.Add( wx.Button( panel, ID,"Change Value", ), 1, wx.EXPAND) self.Bind(wx.EVT_BUTTON, self.OnChangeValue, id=ID ) panel.SetAutoLayout(True) panel.SetSizer( innerbox ) self.history={"c:\\temp":1, "c:\\tmp":1, "r:\\temp":1,"z:\\temp":1} def historyCallBack(self): keys=self.history.keys() keys.sort() return keys def OnFileNameChangedHistory (self, event): self.history[event.GetString ()]=1 def OnCloseMe(self, event): self.Close(True) def OnChangeLabel( self, event ): self.bottomcontrol.SetLabel( "Label Updated" ) def OnChangeValue( self, event ): self.bottomcontrol.SetValue( "r:\\somewhere\\over\\the\\rainbow.htm" ) def OnCloseWindow(self, event): self.Destroy() class DemoApp(wx.App): def OnInit(self): wx.InitAllImageHandlers() frame = DemoFrame(None) frame.Show(True) self.SetTopWindow(frame) return True def test( ): app = DemoApp(0) app.MainLoop() print 'Creating dialog' test( )
36.721868
95
0.566142
16,197
0.936459
0
0
0
0
0
0
5,492
0.31753
78e3235c058d0f0d01fe78bcda45b0e5210cc956
3,798
py
Python
modules/pygsm/devicewrapper.py
whanderley/eden
08ced3be3d52352c54cbd412ed86128fbb68b1d2
[ "MIT" ]
205
2015-01-20T08:26:09.000Z
2022-03-27T19:59:33.000Z
modules/pygsm/devicewrapper.py
nursix/eden-asp
e49f46cb6488918f8d5a163dcd5a900cd686978c
[ "MIT" ]
249
2015-02-10T09:56:35.000Z
2022-03-23T19:54:36.000Z
modules/pygsm/devicewrapper.py
nursix/eden-asp
e49f46cb6488918f8d5a163dcd5a900cd686978c
[ "MIT" ]
231
2015-02-10T09:33:17.000Z
2022-02-18T19:56:05.000Z
#!/usr/bin/env python # vim: ai ts=4 sts=4 et sw=4 encoding=utf-8 # arch: pacman -S python-pyserial # debian/ubuntu: apt-get install python-serial import serial import re import errors class DeviceWrapper(object): def __init__(self, logger, *args, **kwargs): self.device = serial.Serial(*args, **kwargs) self.logger = logger def isOpen(self): return self.device.isOpen() def close(self): self.device.close() def write(self, str): self.device.write(str) def _read(self, read_term=None, read_timeout=None): """Read from the modem (blocking) until _terminator_ is hit, (defaults to \r\n, which reads a single "line"), and return.""" buffer = [] # if a different timeout was requested just # for _this_ read, store and override the # current device setting (not thread safe!) if read_timeout is not None: old_timeout = self.device.timeout self.device.timeout = read_timeout def __reset_timeout(): """restore the device's previous timeout setting, if we overrode it earlier.""" if read_timeout is not None: self.device.timeout =\ old_timeout # the default terminator reads # until a newline is hit if read_term is None: read_term = "\r\n" while(True): buf = self.device.read() buffer.append(buf) # if a timeout was hit, raise an exception including the raw data that # we've already read (in case the calling func was _expecting_ a timeout # (wouldn't it be nice if serial.Serial.read returned None for this?) if buf == '': __reset_timeout() raise(errors.GsmReadTimeoutError(buffer)) # if last n characters of the buffer match the read # terminator, return what we've received so far if ''.join(buffer[-len(read_term):]) == read_term: buf_str = ''.join(buffer) __reset_timeout() self._log(repr(buf_str), 'read') return buf_str def read_lines(self, read_term=None, read_timeout=None): """Read from the modem (blocking) one line at a time until a response terminator ("OK", "ERROR", or "CMx ERROR...") is hit, then return a list containing the lines.""" buffer = [] # keep on looping until a command terminator # is encountered. these are NOT the same as the # "read_term" argument - only OK or ERROR is valid while(True): buf = self._read( read_term=read_term, read_timeout=read_timeout) buf = buf.strip() buffer.append(buf) # most commands return OK for success, but there # are some exceptions. we're not checking those # here (unlike RubyGSM), because they should be # handled when they're _expected_ if buf == "OK": return buffer # some errors contain useful error codes, so raise a # proper error with a description from pygsm/errors.py m = re.match(r"^\+(CM[ES]) ERROR: (\d+)$", buf) if m is not None: type, code = m.groups() raise(errors.GsmModemError(type, int(code))) # ...some errors are not so useful # (at+cmee=1 should enable error codes) if buf == "ERROR": raise(errors.GsmModemError) def _log(self, str, type="debug"): if hasattr(self, "logger"): self.logger(self, str, type)
35.166667
84
0.561611
3,607
0.94971
0
0
0
0
0
0
1,618
0.426014
78e3d8480adc030df86059c4a34f7c8aad96d287
306
py
Python
day1/loops.py
alqmy/The-Garage-Summer-Of-Code
af310d5e5194a62962db2fc1e601099468251efa
[ "MIT" ]
null
null
null
day1/loops.py
alqmy/The-Garage-Summer-Of-Code
af310d5e5194a62962db2fc1e601099468251efa
[ "MIT" ]
null
null
null
day1/loops.py
alqmy/The-Garage-Summer-Of-Code
af310d5e5194a62962db2fc1e601099468251efa
[ "MIT" ]
null
null
null
# while True: # # ejecuta esto # print("Hola") real = 7 print("Entre un numero entre el 1 y el 10") guess = int(input()) # =/= while guess != real: print("Ese no es el numero") print("Entre un numero entre el 1 y el 10") guess = int(input()) # el resto print("Yay! Lo sacastes!")
16.105263
47
0.591503
0
0
0
0
0
0
0
0
180
0.588235
78e51986ef4ee9e7c7af6f2a83426baeaab981b9
1,426
py
Python
pentest-scripts/learning-python-for-forensics/Chapter 6/rot13.py
paulveillard/cybersecurity-penetration-testing
a5afff13ec25afd0cf16ef966d35bddb91518af4
[ "Apache-2.0" ]
6
2021-12-07T21:02:12.000Z
2022-03-03T12:08:14.000Z
pentest-scripts/learning-python-for-forensics/Chapter 6/rot13.py
paulveillard/cybersecurity-penetration-testing
a5afff13ec25afd0cf16ef966d35bddb91518af4
[ "Apache-2.0" ]
null
null
null
pentest-scripts/learning-python-for-forensics/Chapter 6/rot13.py
paulveillard/cybersecurity-penetration-testing
a5afff13ec25afd0cf16ef966d35bddb91518af4
[ "Apache-2.0" ]
1
2022-01-15T23:57:36.000Z
2022-01-15T23:57:36.000Z
def rotCode(data): """ The rotCode function encodes/decodes data using string indexing :param data: A string :return: The rot-13 encoded/decoded string """ rot_chars = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z'] substitutions = [] # Walk through each individual character for c in data: # Walk through each individual character if c.isupper(): try: # Find the position of the character in rot_chars list index = rot_chars.index(c.lower()) except ValueError: substitutions.append(c) continue # Calculate the relative index that is 13 characters away from the index substitutions.append((rot_chars[(index-13)]).upper()) else: try: # Find the position of the character in rot_chars list index = rot_chars.index(c) except ValueError: substitutions.append(c) continue substitutions.append(rot_chars[((index-13))]) return ''.join(substitutions) if __name__ == '__main__': print rotCode('Jul, EBG-13?')
33.162791
90
0.47756
0
0
0
0
0
0
0
0
524
0.367461
78e6e9a7d73aab5ad3ba5822b10f0996d16afd5b
1,762
py
Python
examples/sim_tfidf.py
sunyilgdx/CwVW-SIF
85ef56d80512e2f6bff1266e030552075566b240
[ "MIT" ]
12
2019-05-14T10:31:53.000Z
2022-01-20T17:16:59.000Z
examples/sim_tfidf.py
sunyilgdx/CwVW-SIF
85ef56d80512e2f6bff1266e030552075566b240
[ "MIT" ]
null
null
null
examples/sim_tfidf.py
sunyilgdx/CwVW-SIF
85ef56d80512e2f6bff1266e030552075566b240
[ "MIT" ]
1
2020-12-21T09:16:51.000Z
2020-12-21T09:16:51.000Z
import pickle, sys sys.path.append('../src') import data_io, sim_algo, eval, params ## run # wordfiles = [#'../data/paragram_sl999_small.txt', # need to download it from John Wieting's github (https://github.com/jwieting/iclr2016) # '../data/glove.840B.300d.txt' # need to download it first # ] wordfiles = [#'../data/paragram_sl999_small.txt', # need to download it from John Wieting's github (https://github.com/jwieting/iclr2016) '../data/glove.6B.50d.txt' # need to download it first ] rmpcs = [0,1] comment4para = [ # need to align with the following loop ['word vector files', wordfiles], # comments and values, ['remove principal component or not', rmpcs] ] params = params.params() parr4para = {} sarr4para = {} for wordfile in wordfiles: (words, We) = data_io.getWordmap(wordfile) weight4ind = data_io.getIDFWeight(wordfile) for rmpc in rmpcs: print('word vectors loaded from %s' % wordfile) print('word weights computed from idf') params.rmpc = rmpc print('remove the first %d principal components' % rmpc) # eval just one example dataset parr, sarr = eval.sim_evaluate_one(We, words, weight4ind, sim_algo.weighted_average_sim_rmpc, params) ## eval all datasets; need to obtained datasets from John Wieting (https://github.com/jwieting/iclr2016) # parr, sarr = eval.sim_evaluate_all(We, words, weight4ind, sim_algo.weighted_average_sim_rmpc, params) paras = (wordfile, rmpc) parr4para[paras] = parr sarr4para[paras] = sarr ## save result save_result = False # True result_file = 'result/sim_tfidf.result' if save_result: with open(result_file, 'w') as f: pickle.dump([parr4para, sarr4para, comment4para] , f)
39.155556
139
0.685585
0
0
0
0
0
0
0
0
905
0.513621
78e748ebc4d60824e0cdf86518ddf127e1b97b2b
120
py
Python
tests/cases/cls.py
div72/py2many
60277bc13597bd32d078b88a7390715568115fc6
[ "MIT" ]
345
2021-01-28T17:33:08.000Z
2022-03-25T16:07:56.000Z
tests/cases/cls.py
mkos11/py2many
be6cfaad5af32c43eb24f182cb20ad63b979d4ef
[ "MIT" ]
291
2021-01-31T13:15:06.000Z
2022-03-23T21:28:49.000Z
tests/cases/cls.py
mkos11/py2many
be6cfaad5af32c43eb24f182cb20ad63b979d4ef
[ "MIT" ]
23
2021-02-09T17:15:03.000Z
2022-02-03T05:57:44.000Z
class Foo: def bar(self): return "a" if __name__ == "__main__": f = Foo() b = f.bar() print(b)
13.333333
26
0.483333
48
0.4
0
0
0
0
0
0
13
0.108333
78e74ab110d94c6516104012ed887badd152a66c
1,602
py
Python
theano-rfnn/mnist_loader.py
jhja/RFNN
a63641d6e584df743a5e0a9efaf41911f057a977
[ "MIT" ]
55
2016-05-11T18:53:30.000Z
2022-02-22T12:31:08.000Z
theano-rfnn/mnist_loader.py
jhja/RFNN
a63641d6e584df743a5e0a9efaf41911f057a977
[ "MIT" ]
null
null
null
theano-rfnn/mnist_loader.py
jhja/RFNN
a63641d6e584df743a5e0a9efaf41911f057a977
[ "MIT" ]
14
2016-08-16T02:00:47.000Z
2022-03-08T13:16:00.000Z
import numpy as np import os from random import shuffle datasets_dir = './../data/' def one_hot(x,n): if type(x) == list: x = np.array(x) x = x.flatten() o_h = np.zeros((len(x),n)) o_h[np.arange(len(x)),x] = 1 return o_h def mnist(ntrain=60000,ntest=10000,onehot=True): ntrain=np.array(ntrain).astype(int).squeeze() data_dir = os.path.join(datasets_dir,'mnist/') fd = open(os.path.join(data_dir,'train-images-idx3-ubyte')) loaded = np.fromfile(file=fd,dtype=np.uint8) trX = loaded[16:].reshape((60000,28*28)).astype(float) fd = open(os.path.join(data_dir,'train-labels-idx1-ubyte')) loaded = np.fromfile(file=fd,dtype=np.uint8) trY = loaded[8:].reshape((60000)) fd = open(os.path.join(data_dir,'t10k-images-idx3-ubyte')) loaded = np.fromfile(file=fd,dtype=np.uint8) teX = loaded[16:].reshape((10000,28*28)).astype(float) fd = open(os.path.join(data_dir,'t10k-labels-idx1-ubyte')) loaded = np.fromfile(file=fd,dtype=np.uint8) teY = loaded[8:].reshape((10000)) trY_shuffle = [] trX_shuffle = [] index_shuf = range(len(trY)) shuffle(index_shuf) for i in index_shuf: trY_shuffle.append(trY[i]) trX_shuffle.append(trX[i]) trX = np.asarray(trX_shuffle) trY = np.asarray(trY_shuffle) trX = trX/255. teX = teX/255. trX = trX[:ntrain] trY = trY[:ntrain] teX = teX[:ntest] teY = teY[:ntest] if onehot: trY = one_hot(trY, 10) teY = one_hot(teY, 10) else: trY = np.asarray(trY) teY = np.asarray(teY) return trX,teX,trY,teY
26.262295
63
0.624844
0
0
0
0
0
0
0
0
118
0.073658
78e7d5ba18b9d335d132f7d6ec0d73b6ca3d020d
686
py
Python
Ejercicio 2.py
crltsnch/Ejercicios-grupales
72e01d6489816ea1b9308af1abd62792e5464c93
[ "Apache-2.0" ]
null
null
null
Ejercicio 2.py
crltsnch/Ejercicios-grupales
72e01d6489816ea1b9308af1abd62792e5464c93
[ "Apache-2.0" ]
null
null
null
Ejercicio 2.py
crltsnch/Ejercicios-grupales
72e01d6489816ea1b9308af1abd62792e5464c93
[ "Apache-2.0" ]
null
null
null
import math import os import random import re import sys def compareTriplets(a, b): puntosA=0 puntosB=0 for i in range (0,3): if a[i]<b[i]: puntosB+=1 elif a[i]>b[i]: puntosA+=1 puntosTotales=[puntosA, puntosB] return puntosTotales if __name__ == '__main__': fptr = open(os.environ['OUTPUT_PATH'] + 'solucion2.txt', 'w') print("Escribe las notas de a") a = list(map(int, input().rstrip().split())) print("Escribe las notas de b") b = list(map(int, input().rstrip().split())) result = compareTriplets(a, b) fptr.write(' '.join(map(str, result))) fptr.write('\n') fptr.close()
21.4375
65
0.580175
0
0
0
0
0
0
0
0
96
0.139942
78ed1b7fc24c0d300d3ad14111db8c17f3c020fd
5,401
py
Python
app/routes/router.py
nityagautam/ReportDashboard-backend
d23fe008cb0df6a703fcd665181897a75b71d5b2
[ "MIT" ]
1
2021-05-06T09:48:46.000Z
2021-05-06T09:48:46.000Z
app/routes/router.py
nityagautam/ReportDashboard
d23fe008cb0df6a703fcd665181897a75b71d5b2
[ "MIT" ]
2
2021-09-09T05:34:33.000Z
2021-12-13T15:31:36.000Z
app/routes/router.py
nityagautam/ReportDashboard
d23fe008cb0df6a703fcd665181897a75b71d5b2
[ "MIT" ]
null
null
null
#=============================================================== # @author: [email protected] # @written: 08 December 2021 # @desc: Routes for the Backend server #=============================================================== # Import section with referecne of entry file or main file; from __main__ import application from flask import jsonify, render_template, url_for, request, redirect # Local sample data import from app.config.uiconfig import app_ui_config from app import sample_data # ============================================================== # App Routes/Gateways # ============================================================== @application.route('/test', methods=['GET']) def test(): return '<h4>HELLO WORLD!</h4><hr/> it works!' @application.route('/', methods=['GET']) @application.route('/home', methods=['GET']) @application.route('/dashboard', methods=['GET']) def root(): return render_template("dashboard.html", app_data=app_ui_config, data=sample_data.latest_data) @application.route('/history', methods=['GET']) def history(): return render_template("history.html", app_data=app_ui_config, data=sample_data.history_data) @application.route('/about', methods=['GET']) def about(): return render_template("about.html", app_data=app_ui_config, data=sample_data.latest_data) @application.route('/get-notes', methods=['POST']) def get_todo(): print("KEY :: VALUE (from the received form data)") print([(key, val) for key, val in zip(request.form.keys(), request.form.values())]) return redirect("/notes", code=302) @application.route('/notes') def info(): return render_template("notes.html", app_data=app_ui_config) @application.route('/sample-data') def get_sample_data(): return jsonify(app_ui_config) # ============================================================== # Error Handlers Starts # ============================================================== # 404 Handler; We can also pass the specific request errors codes to the decorator; @application.errorhandler(404) def not_found(err): return render_template("error.html", app_data=app_ui_config, error_data=err), 400 # Exception/Error handler; We can also pass the specific errors to the decorator; @application.errorhandler(TypeError) def server_error(err): application.logger.exception(err) return render_template("error.html", app_data=app_ui_config, error_data=err), 500 # Exception/Error handler; We can also pass the specific errors to the decorator; @application.errorhandler(Exception) def server_error(err): application.logger.exception(err) return render_template("error.html", app_data=app_ui_config, error_data=err), 500 # ============================================================== # Error Handlers Ends # ============================================================== # Route For Sample data @application.route('/data') def get_data(): data = { "reports": [ { "build": "build_no", "created": "Imported 05052021T11:30:00:00IST", "platform": "Imported Win/Unix/Mac", "project_name": "project_name_1", "report_location_path": "path/to/report/location/index.html", "report_summary": {"pass": "50", "fail": "0", "ignored": "0", "skipped": "0"}, "total_time": "35 min." }, { "build": "build_no", "created": "Imported 05052021T11:30:00:00IST", "platform": "Imported Win/Unix/Mac", "project_name": "project_name_2", "report_location_path": "path/to/report/location/index.html", "report_summary": {"pass": "10", "fail": "2", "ignored": "0", "skipped": "0"}, "total_time": "0.2345 secs." }, { "build": "build_no", "created": "Imported 05052021T11:30:00:00IST", "platform": "Imported Win/Unix/Mac", "project_name": "project_name_3", "report_location_path": "path/to/report/location/index.html", "report_summary": {"pass": "100", "fail": "5", "ignored": "0", "skipped": "0"}, "total_time": "5 days" } ] } return jsonify(data) # ============================================================== # Extra routes starts # ============================================================== @application.route('/sample1') def sample1(): return render_template("web-analytics-overview.html") @application.route('/sample2') def sample2(): return render_template("web-analytics-real-time.html") @application.route('/logo') def get_logo(): """ Queries the snapshot data for both Serenity and JMeter projects from the MongoDB. Renders the Snapshot view of html :return: N/A """ # set template directory of the Flask App to the path set by the user as command line arg. return f'<html><head><title>Root</title><head><body><hr/> Welcome to the main page <hr/> ' \ f'Building image from static public location: <br/> ' \ f'<img src=\'{url_for("static", filename="images/logo.svg")}\' /> </body></html>'
38.035211
99
0.549713
0
0
0
0
3,903
0.722644
0
0
2,858
0.529161
78eed98843af7c2acb54d95dbb60b3f984e9337b
15,624
py
Python
idaes/generic_models/properties/core/examples/ASU_PR.py
carldlaird/idaes-pse
cc7a32ca9fa788f483fa8ef85f3d1186ef4a596f
[ "RSA-MD" ]
112
2019-02-11T23:16:36.000Z
2022-03-23T20:59:57.000Z
idaes/generic_models/properties/core/examples/ASU_PR.py
carldlaird/idaes-pse
cc7a32ca9fa788f483fa8ef85f3d1186ef4a596f
[ "RSA-MD" ]
621
2019-03-01T14:44:12.000Z
2022-03-31T19:49:25.000Z
idaes/generic_models/properties/core/examples/ASU_PR.py
carldlaird/idaes-pse
cc7a32ca9fa788f483fa8ef85f3d1186ef4a596f
[ "RSA-MD" ]
154
2019-02-01T23:46:33.000Z
2022-03-23T15:07:10.000Z
################################################################################# # The Institute for the Design of Advanced Energy Systems Integrated Platform # Framework (IDAES IP) was produced under the DOE Institute for the # Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021 # by the software owners: The Regents of the University of California, through # Lawrence Berkeley National Laboratory, National Technology & Engineering # Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University # Research Corporation, et al. All rights reserved. # # Please see the files COPYRIGHT.md and LICENSE.md for full copyright and # license information. ################################################################################# """ Air separation phase equilibrium package using Peng-Robinson EoS. Example property package using the Generic Property Package Framework. This example shows how to set up a property package to do air separation phase equilibrium in the generic framework using Peng-Robinson equation along with methods drawn from the pre-built IDAES property libraries. The example includes two dictionaries. 1. The dictionary named configuration contains parameters obtained from The Properties of Gases and Liquids (1987) 4th edition and NIST. 2. The dictionary named configuration_Dowling_2015 contains parameters used in A framework for efficient large scale equation-oriented flowsheet optimization (2015) Dowling. The parameters are extracted from Properties of Gases and Liquids (1977) 3rd edition for Antoine's vapor equation and acentric factors and converted values from the Properties of Gases and Liquids (1977) 3rd edition to j. """ # Import Python libraries import logging # Import Pyomo units from pyomo.environ import units as pyunits # Import IDAES cores from idaes.core import LiquidPhase, VaporPhase, Component from idaes.generic_models.properties.core.state_definitions import FTPx from idaes.generic_models.properties.core.eos.ceos import Cubic, CubicType from idaes.generic_models.properties.core.phase_equil import SmoothVLE from idaes.generic_models.properties.core.phase_equil.bubble_dew import \ LogBubbleDew from idaes.generic_models.properties.core.phase_equil.forms import log_fugacity from idaes.generic_models.properties.core.pure import RPP4 from idaes.generic_models.properties.core.pure import NIST from idaes.generic_models.properties.core.pure import RPP3 # Set up logger _log = logging.getLogger(__name__) # --------------------------------------------------------------------- # Configuration dictionary for a Peng-Robinson Oxygen-Argon-Nitrogen system # Data Sources: # [1] The Properties of Gases and Liquids (1987) # 4th edition, Chemical Engineering Series - Robert C. Reid # [2] NIST, https://webbook.nist.gov/ # Retrieved 16th August, 2020 # [3] The Properties of Gases and Liquids (1987) # 3rd edition, Chemical Engineering Series - Robert C. Reid # Cp parameters where converted to j in Dowling 2015 # [4] A framework for efficient large scale equation-oriented flowsheet optimization (2015) # Computers and Chemical Engineering - Alexander W. Dowling configuration = { # Specifying components "components": { "nitrogen": {"type": Component, "enth_mol_ig_comp": RPP4, "entr_mol_ig_comp": RPP4, "pressure_sat_comp": NIST, "phase_equilibrium_form": {("Vap", "Liq"): log_fugacity}, "parameter_data": { "mw": (28.0135E-3, pyunits.kg/pyunits.mol), # [1] "pressure_crit": (34e5, pyunits.Pa), # [1] "temperature_crit": (126.2, pyunits.K), # [1] "omega": 0.037, # [1] "cp_mol_ig_comp_coeff": { "A": (3.115E1, pyunits.J/pyunits.mol/pyunits.K), # [1] "B": (-1.357E-2, pyunits.J/pyunits.mol/pyunits.K**2), "C": (2.680E-5, pyunits.J/pyunits.mol/pyunits.K**3), "D": (-1.168E-8, pyunits.J/pyunits.mol/pyunits.K**4)}, "enth_mol_form_vap_comp_ref": ( 0.0, pyunits.J/pyunits.mol), # [2] "entr_mol_form_vap_comp_ref": ( 191.61, pyunits.J/pyunits.mol/pyunits.K), # [2] "pressure_sat_comp_coeff": { "A": (3.7362, None), # [2] "B": (264.651, pyunits.K), "C": (-6.788, pyunits.K)}}}, "argon": {"type": Component, "enth_mol_ig_comp": RPP4, "entr_mol_ig_comp": RPP4, "pressure_sat_comp": NIST, "phase_equilibrium_form": {("Vap", "Liq"): log_fugacity}, "parameter_data": { "mw": (39.948E-3, pyunits.kg/pyunits.mol), # [1] "pressure_crit": (48.98e5, pyunits.Pa), # [1] "temperature_crit": (150.86, pyunits.K), # [1] "omega": 0.001, # [1] "cp_mol_ig_comp_coeff": { "A": (2.050E1, pyunits.J/pyunits.mol/pyunits.K), # [1] "B": (0.0, pyunits.J/pyunits.mol/pyunits.K**2), "C": (0.0, pyunits.J/pyunits.mol/pyunits.K**3), "D": (0.0, pyunits.J/pyunits.mol/pyunits.K**4)}, "enth_mol_form_vap_comp_ref": ( 0.0, pyunits.J/pyunits.mol), # [2] "entr_mol_form_vap_comp_ref": ( 154.8, pyunits.J/pyunits.mol/pyunits.K), # [2] "pressure_sat_comp_coeff": {"A": (3.29555, None), # [2] "B": (215.24, pyunits.K), "C": (-22.233, pyunits.K)}}}, "oxygen": {"type": Component, "enth_mol_ig_comp": RPP4, "entr_mol_ig_comp": RPP4, "pressure_sat_comp": NIST, "phase_equilibrium_form": {("Vap", "Liq"): log_fugacity}, "parameter_data": { "mw": (31.999E-3, pyunits.kg/pyunits.mol), # [1] "pressure_crit": (50.43e5, pyunits.Pa), # [1] "temperature_crit": (154.58, pyunits.K), # [1] "omega": 0.025, # [1] "cp_mol_ig_comp_coeff": { "A": (2.811E1, pyunits.J/pyunits.mol/pyunits.K), "B": (-3.680E-6, pyunits.J/pyunits.mol/pyunits.K**2), "C": (1.746E-5, pyunits.J/pyunits.mol/pyunits.K**3), "D": (-1.065E-8, pyunits.J/pyunits.mol/pyunits.K**4)}, "enth_mol_form_vap_comp_ref": ( 0.0, pyunits.J/pyunits.mol), # [2] "entr_mol_form_vap_comp_ref": ( 205.152, pyunits.J/pyunits.mol/pyunits.K), # [2] "pressure_sat_comp_coeff": { "A": (3.85845, None), # [2] "B": (325.675, pyunits.K), "C": (-5.667, pyunits.K)}}}}, # Specifying phases "phases": {"Liq": {"type": LiquidPhase, "equation_of_state": Cubic, "equation_of_state_options": { "type": CubicType.PR}}, "Vap": {"type": VaporPhase, "equation_of_state": Cubic, "equation_of_state_options": { "type": CubicType.PR}}}, # Set base units of measurement "base_units": {"time": pyunits.s, "length": pyunits.m, "mass": pyunits.kg, "amount": pyunits.mol, "temperature": pyunits.K}, # Specifying state definition "state_definition": FTPx, "state_bounds": {"flow_mol": (0, 100, 1000, pyunits.mol/pyunits.s), "temperature": (10, 300, 350, pyunits.K), "pressure": (5e4, 1e5, 1e7, pyunits.Pa)}, "pressure_ref": (101325, pyunits.Pa), "temperature_ref": (298.15, pyunits.K), # Defining phase equilibria "phases_in_equilibrium": [("Vap", "Liq")], "phase_equilibrium_state": {("Vap", "Liq"): SmoothVLE}, "bubble_dew_method": LogBubbleDew, "parameter_data": {"PR_kappa": {("nitrogen", "nitrogen"): 0.000, ("nitrogen", "argon"): -0.26e-2, ("nitrogen", "oxygen"): -0.119e-1, ("argon", "nitrogen"): -0.26e-2, ("argon", "argon"): 0.000, ("argon", "oxygen"): 0.104e-1, ("oxygen", "nitrogen"): -0.119e-1, ("oxygen", "argon"): 0.104e-1, ("oxygen", "oxygen"): 0.000}}} configuration_Dowling_2015 = { # Specifying components "components": { "nitrogen": {"type": Component, "enth_mol_ig_comp": RPP4, "entr_mol_ig_comp": RPP4, "pressure_sat_comp": RPP3, "phase_equilibrium_form": {("Vap", "Liq"): log_fugacity}, "parameter_data": { "mw": (28.0135E-3, pyunits.kg/pyunits.mol), # [3] "pressure_crit": (33.943875e5, pyunits.Pa), # [4] "temperature_crit": (126.2, pyunits.K), # [4] "omega": 0.04, # [3] "cp_mol_ig_comp_coeff": { 'A': (3.112896E1, pyunits.J/pyunits.mol/pyunits.K), # [3] 'B': (-1.356E-2, pyunits.J/pyunits.mol/pyunits.K**2), 'C': (2.6878E-5, pyunits.J/pyunits.mol/pyunits.K**3), 'D': (-1.167E-8, pyunits.J/pyunits.mol/pyunits.K**4)}, "enth_mol_form_vap_comp_ref": ( 0.0, pyunits.J/pyunits.mol), # [2] "entr_mol_form_vap_comp_ref": ( 191.61, pyunits.J/pyunits.mol/pyunits.K), # [2] "pressure_sat_comp_coeff": { 'A': (14.9342, None), # [3] 'B': (588.72, pyunits.K), 'C': (-6.60, pyunits.K)}}}, "argon": {"type": Component, "enth_mol_ig_comp": RPP4, "entr_mol_ig_comp": RPP4, "pressure_sat_comp": RPP3, "phase_equilibrium_form": {("Vap", "Liq"): log_fugacity}, "parameter_data": { "mw": (39.948E-3, pyunits.kg/pyunits.mol), # [3] "pressure_crit": (48.737325e5, pyunits.Pa), # [4] "temperature_crit": (150.86, pyunits.K), # [4] "omega": -0.004, # [1] "cp_mol_ig_comp_coeff": { 'A': (2.0790296E1, pyunits.J/pyunits.mol/pyunits.K), # [3] 'B': (-3.209E-05, pyunits.J/pyunits.mol/pyunits.K**2), 'C': (5.163E-08, pyunits.J/pyunits.mol/pyunits.K**3), 'D': (0.0, pyunits.J/pyunits.mol/pyunits.K**4)}, "enth_mol_form_vap_comp_ref": ( 0.0, pyunits.J/pyunits.mol), # [3] "entr_mol_form_vap_comp_ref": ( 154.8, pyunits.J/pyunits.mol/pyunits.K), # [3] "pressure_sat_comp_coeff": { 'A': (15.2330, None), # [3] 'B': (700.51, pyunits.K), 'C': (-5.84, pyunits.K)}}}, "oxygen": {"type": Component, "enth_mol_ig_comp": RPP4, "entr_mol_ig_comp": RPP4, "pressure_sat_comp": RPP3, "phase_equilibrium_form": {("Vap", "Liq"): log_fugacity}, "parameter_data": { "mw": (31.999E-3, pyunits.kg/pyunits.mol), # [3] "pressure_crit": (50.45985e5, pyunits.Pa), # [4] "temperature_crit": (154.58, pyunits.K), # [4] "omega": 0.021, # [1] "cp_mol_ig_comp_coeff": { 'A': (2.8087192E1, pyunits.J/pyunits.mol/pyunits.K), # [3] 'B': (-3.678E-6, pyunits.J/pyunits.mol/pyunits.K**2), 'C': (1.745E-5, pyunits.J/pyunits.mol/pyunits.K**3), 'D': (-1.064E-8, pyunits.J/pyunits.mol/pyunits.K**4)}, "enth_mol_form_vap_comp_ref": ( 0.0, pyunits.J/pyunits.mol), # [2] "entr_mol_form_vap_comp_ref": ( 205.152, pyunits.J/pyunits.mol/pyunits.K), # [2] "pressure_sat_comp_coeff": { 'A': (15.4075, None), # [3] 'B': (734.55, pyunits.K), 'C': (-6.45, pyunits.K)}}}}, # Specifying phases "phases": {"Liq": {"type": LiquidPhase, "equation_of_state": Cubic, "equation_of_state_options": { "type": CubicType.PR}}, "Vap": {"type": VaporPhase, "equation_of_state": Cubic, "equation_of_state_options": { "type": CubicType.PR}}}, # Set base units of measurement "base_units": {"time": pyunits.s, "length": pyunits.m, "mass": pyunits.kg, "amount": pyunits.mol, "temperature": pyunits.K}, # Specifying state definition "state_definition": FTPx, "state_bounds": {"flow_mol": (0, 100, 1000, pyunits.mol/pyunits.s), "temperature": (10, 300, 350, pyunits.K), "pressure": (5e4, 1e5, 1e7, pyunits.Pa)}, "pressure_ref": (101325, pyunits.Pa), "temperature_ref": (298.15, pyunits.K), # Defining phase equilibria "phases_in_equilibrium": [("Vap", "Liq")], "phase_equilibrium_state": {("Vap", "Liq"): SmoothVLE}, "bubble_dew_method": LogBubbleDew, "parameter_data": {"PR_kappa": {("nitrogen", "nitrogen"): 0.000, ("nitrogen", "argon"): -0.26e-2, ("nitrogen", "oxygen"): -0.119e-1, ("argon", "nitrogen"): -0.26e-2, ("argon", "argon"): 0.000, ("argon", "oxygen"): 0.104e-1, ("oxygen", "nitrogen"): -0.119e-1, ("oxygen", "argon"): 0.104e-1, ("oxygen", "oxygen"): 0.000}}}
51.394737
91
0.473374
0
0
0
0
0
0
0
0
5,773
0.369496
78efdc29bbe17ba841a42c2ad2e6e9e8b6de242a
34
py
Python
tests/functional/test_calculator.py
bellanov/calculator
a66e68a368a5212247aeff3291c9cb8b508e91be
[ "Apache-2.0" ]
null
null
null
tests/functional/test_calculator.py
bellanov/calculator
a66e68a368a5212247aeff3291c9cb8b508e91be
[ "Apache-2.0" ]
null
null
null
tests/functional/test_calculator.py
bellanov/calculator
a66e68a368a5212247aeff3291c9cb8b508e91be
[ "Apache-2.0" ]
1
2021-05-26T16:54:17.000Z
2021-05-26T16:54:17.000Z
"""TODO: Move the Threads Here"""
17
33
0.647059
0
0
0
0
0
0
0
0
33
0.970588
78f03cf1af94e18c9a855dfd8bbdda1565566674
17,569
py
Python
autokeras/hypermodel/graph.py
Sette/autokeras
c5a83607a899ad545916b3794561d6908d9cdbac
[ "MIT" ]
null
null
null
autokeras/hypermodel/graph.py
Sette/autokeras
c5a83607a899ad545916b3794561d6908d9cdbac
[ "MIT" ]
null
null
null
autokeras/hypermodel/graph.py
Sette/autokeras
c5a83607a899ad545916b3794561d6908d9cdbac
[ "MIT" ]
null
null
null
import functools import pickle import kerastuner import tensorflow as tf from tensorflow.python.util import nest from autokeras.hypermodel import base from autokeras.hypermodel import compiler class Graph(kerastuner.engine.stateful.Stateful): """A graph consists of connected Blocks, HyperBlocks, Preprocessors or Heads. # Arguments inputs: A list of input node(s) for the Graph. outputs: A list of output node(s) for the Graph. override_hps: A list of HyperParameters. The predefined HyperParameters that will override the space of the Hyperparameters defined in the Hypermodels with the same names. """ def __init__(self, inputs, outputs, override_hps=None): super().__init__() self.inputs = nest.flatten(inputs) self.outputs = nest.flatten(outputs) self._node_to_id = {} self._nodes = [] self.blocks = [] self._block_to_id = {} self._build_network() self.override_hps = override_hps or [] def compile(self, func): """Share the information between blocks by calling functions in compiler. # Arguments func: A dictionary. The keys are the block classes. The values are corresponding compile functions. """ for block in self.blocks: if block.__class__ in func: func[block.__class__](block) def _register_hps(self, hp): """Register the override HyperParameters for current HyperParameters.""" for single_hp in self.override_hps: name = single_hp.name if name not in hp.values: hp.register(single_hp.name, single_hp.__class__.__name__, single_hp.get_config()) hp.values[name] = single_hp.default def _build_network(self): self._node_to_id = {} # Recursively find all the interested nodes. for input_node in self.inputs: self._search_network(input_node, self.outputs, set(), set()) self._nodes = sorted(list(self._node_to_id.keys()), key=lambda x: self._node_to_id[x]) for node in (self.inputs + self.outputs): if node not in self._node_to_id: raise ValueError('Inputs and outputs not connected.') # Find the blocks. blocks = [] for input_node in self._nodes: for block in input_node.out_blocks: if any([output_node in self._node_to_id for output_node in block.outputs]) and block not in blocks: blocks.append(block) # Check if all the inputs of the blocks are set as inputs. for block in blocks: for input_node in block.inputs: if input_node not in self._node_to_id: raise ValueError('A required input is missing for HyperModel ' '{name}.'.format(name=block.name)) # Calculate the in degree of all the nodes in_degree = [0] * len(self._nodes) for node_id, node in enumerate(self._nodes): in_degree[node_id] = len([ block for block in node.in_blocks if block in blocks]) # Add the blocks in topological order. self.blocks = [] self._block_to_id = {} while len(blocks) != 0: new_added = [] # Collect blocks with in degree 0. for block in blocks: if any([in_degree[self._node_to_id[node]] for node in block.inputs]): continue new_added.append(block) # Remove the collected blocks from blocks. for block in new_added: blocks.remove(block) for block in new_added: # Add the collected blocks to the AutoModel. self._add_block(block) # Decrease the in degree of the output nodes. for output_node in block.outputs: if output_node not in self._node_to_id: continue output_node_id = self._node_to_id[output_node] in_degree[output_node_id] -= 1 def _search_network(self, input_node, outputs, in_stack_nodes, visited_nodes): visited_nodes.add(input_node) in_stack_nodes.add(input_node) outputs_reached = False if input_node in outputs: outputs_reached = True for block in input_node.out_blocks: for output_node in block.outputs: if output_node in in_stack_nodes: raise ValueError('The network has a cycle.') if output_node not in visited_nodes: self._search_network(output_node, outputs, in_stack_nodes, visited_nodes) if output_node in self._node_to_id.keys(): outputs_reached = True if outputs_reached: self._add_node(input_node) in_stack_nodes.remove(input_node) def _add_block(self, block): if block not in self.blocks: block_id = len(self.blocks) self._block_to_id[block] = block_id self.blocks.append(block) def _add_node(self, input_node): if input_node not in self._node_to_id: self._node_to_id[input_node] = len(self._node_to_id) def _get_block(self, name): for block in self.blocks: if block.name == name: return block raise ValueError('Cannot find block named {name}.'.format(name=name)) def get_state(self): # TODO: Include everything including the graph structure. block_state = {str(block_id): block.get_state() for block_id, block in enumerate(self.blocks)} node_state = {str(node_id): node.get_state() for node_id, node in enumerate(self._nodes)} return {'blocks': block_state, 'nodes': node_state} def set_state(self, state): # TODO: Include everything including the graph structure. block_state = state['blocks'] node_state = state['nodes'] for block_id, block in enumerate(self.blocks): block.set_state(block_state[str(block_id)]) for node_id, node in enumerate(self._nodes): node.set_state(node_state[str(node_id)]) def save(self, fname): state = self.get_state() with tf.io.gfile.GFile(fname, 'wb') as f: pickle.dump(state, f) return str(fname) def reload(self, fname): with tf.io.gfile.GFile(fname, 'rb') as f: state = pickle.load(f) self.set_state(state) def build(self, hp): self._register_hps(hp) class PlainGraph(Graph): """A graph built from a HyperGraph to produce KerasGraph and PreprocessGraph. A PlainGraph does not contain HyperBlock. HyperGraph's hyper_build function returns an instance of PlainGraph, which can be directly built into a KerasGraph and a PreprocessGraph. # Arguments inputs: A list of input node(s) for the PlainGraph. outputs: A list of output node(s) for the PlainGraph. """ def __init__(self, inputs, outputs, **kwargs): self._keras_model_inputs = [] super().__init__(inputs=inputs, outputs=outputs, **kwargs) def _build_network(self): super()._build_network() # Find the model input nodes for node in self._nodes: if self._is_keras_model_inputs(node): self._keras_model_inputs.append(node) self._keras_model_inputs = sorted(self._keras_model_inputs, key=lambda x: self._node_to_id[x]) @staticmethod def _is_keras_model_inputs(node): for block in node.in_blocks: if not isinstance(block, base.Preprocessor): return False for block in node.out_blocks: if not isinstance(block, base.Preprocessor): return True return False def build_keras_graph(self): return KerasGraph(self._keras_model_inputs, self.outputs, override_hps=self.override_hps) def build_preprocess_graph(self): return PreprocessGraph(self.inputs, self._keras_model_inputs, override_hps=self.override_hps) class KerasGraph(Graph, kerastuner.HyperModel): """A graph and HyperModel to be built into a Keras model.""" def build(self, hp): """Build the HyperModel into a Keras Model.""" super().build(hp) self.compile(compiler.AFTER) real_nodes = {} for input_node in self.inputs: node_id = self._node_to_id[input_node] real_nodes[node_id] = input_node.build() for block in self.blocks: if isinstance(block, base.Preprocessor): continue temp_inputs = [real_nodes[self._node_to_id[input_node]] for input_node in block.inputs] outputs = block.build(hp, inputs=temp_inputs) outputs = nest.flatten(outputs) for output_node, real_output_node in zip(block.outputs, outputs): real_nodes[self._node_to_id[output_node]] = real_output_node model = tf.keras.Model( [real_nodes[self._node_to_id[input_node]] for input_node in self.inputs], [real_nodes[self._node_to_id[output_node]] for output_node in self.outputs]) return self._compile_keras_model(hp, model) def _get_metrics(self): metrics = {} for output_node in self.outputs: block = output_node.in_blocks[0] if isinstance(block, base.Head): metrics[block.name] = block.metrics return metrics def _get_loss(self): loss = {} for output_node in self.outputs: block = output_node.in_blocks[0] if isinstance(block, base.Head): loss[block.name] = block.loss return loss def _compile_keras_model(self, hp, model): # Specify hyperparameters from compile(...) optimizer = hp.Choice('optimizer', ['adam', 'adadelta', 'sgd'], default='adam') model.compile(optimizer=optimizer, metrics=self._get_metrics(), loss=self._get_loss()) return model class PreprocessGraph(Graph): """A graph consists of only Preprocessors. It is both a search space with Hyperparameters and a model to be fitted. It preprocess the dataset with the Preprocessors. The output is the input to the Keras model. It does not extend Hypermodel class because it cannot be built into a Keras model. """ def preprocess(self, dataset, validation_data=None, fit=False): """Preprocess the data to be ready for the Keras Model. # Arguments dataset: tf.data.Dataset. Training data. validation_data: tf.data.Dataset. Validation data. fit: Boolean. Whether to fit the preprocessing layers with x and y. # Returns if validation data is provided. A tuple of two preprocessed tf.data.Dataset, (train, validation). Otherwise, return the training dataset. """ dataset = self._preprocess(dataset, fit=fit) if validation_data: validation_data = self._preprocess(validation_data) return dataset, validation_data def _preprocess(self, dataset, fit=False): # A list of input node ids in the same order as the x in the dataset. input_node_ids = [self._node_to_id[input_node] for input_node in self.inputs] # Iterate until all the model inputs have their data. while set(map(lambda node: self._node_to_id[node], self.outputs) ) - set(input_node_ids): # Gather the blocks for the next iteration over the dataset. blocks = [] for node_id in input_node_ids: for block in self._nodes[node_id].out_blocks: if block in self.blocks: blocks.append(block) if fit: # Iterate the dataset to fit the preprocessors in current depth. self._fit(dataset, input_node_ids, blocks) # Transform the dataset. output_node_ids = [] dataset = dataset.map(functools.partial( self._transform, input_node_ids=input_node_ids, output_node_ids=output_node_ids, blocks=blocks, fit=fit)) # Build input_node_ids for next depth. input_node_ids = output_node_ids return dataset def _fit(self, dataset, input_node_ids, blocks): # Iterate the dataset to fit the preprocessors in current depth. for x, y in dataset: x = nest.flatten(x) id_to_data = { node_id: temp_x for temp_x, node_id in zip(x, input_node_ids) } for block in blocks: data = [id_to_data[self._node_to_id[input_node]] for input_node in block.inputs] block.update(data, y=y) # Finalize and set the shapes of the output nodes. for block in blocks: block.finalize() nest.flatten(block.outputs)[0].shape = block.output_shape def _transform(self, x, y, input_node_ids, output_node_ids, blocks, fit=False): x = nest.flatten(x) id_to_data = { node_id: temp_x for temp_x, node_id in zip(x, input_node_ids) } output_data = {} # Transform each x by the corresponding block. for hm in blocks: data = [id_to_data[self._node_to_id[input_node]] for input_node in hm.inputs] data = tf.py_function(functools.partial(hm.transform, fit=fit), inp=nest.flatten(data), Tout=hm.output_types()) data = nest.flatten(data)[0] data.set_shape(hm.output_shape) output_data[self._node_to_id[hm.outputs[0]]] = data # Keep the Keras Model inputs even they are not inputs to the blocks. for node_id, data in id_to_data.items(): if self._nodes[node_id] in self.outputs: output_data[node_id] = data for node_id in sorted(output_data.keys()): output_node_ids.append(node_id) return tuple(map( lambda node_id: output_data[node_id], output_node_ids)), y def build(self, hp): """Obtain the values of all the HyperParameters. Different from the build function of Hypermodel. This build function does not produce a Keras model. It only obtain the hyperparameter values from HyperParameters. # Arguments hp: HyperParameters. """ super().build(hp) self.compile(compiler.BEFORE) for block in self.blocks: block.build(hp) def copy(old_instance): instance = old_instance.__class__() instance.set_state(old_instance.get_state()) return instance class HyperGraph(Graph): """A HyperModel based on connected Blocks and HyperBlocks. # Arguments inputs: A list of input node(s) for the HyperGraph. outputs: A list of output node(s) for the HyperGraph. """ def __init__(self, inputs, outputs, **kwargs): super().__init__(inputs, outputs, **kwargs) self.compile(compiler.HYPER) def build_graphs(self, hp): plain_graph = self.hyper_build(hp) preprocess_graph = plain_graph.build_preprocess_graph() preprocess_graph.build(hp) return (preprocess_graph, plain_graph.build_keras_graph()) def hyper_build(self, hp): """Build a GraphHyperModel with no HyperBlock but only Block.""" # Make sure get_uid would count from start. tf.keras.backend.clear_session() inputs = [] old_node_to_new = {} for old_input_node in self.inputs: input_node = copy(old_input_node) inputs.append(input_node) old_node_to_new[old_input_node] = input_node for old_block in self.blocks: inputs = [old_node_to_new[input_node] for input_node in old_block.inputs] if isinstance(old_block, base.HyperBlock): outputs = old_block.build(hp, inputs=inputs) else: outputs = copy(old_block)(inputs) for output_node, old_output_node in zip(outputs, old_block.outputs): old_node_to_new[old_output_node] = output_node inputs = [] for input_node in self.inputs: inputs.append(old_node_to_new[input_node]) outputs = [] for output_node in self.outputs: outputs.append(old_node_to_new[output_node]) return PlainGraph(inputs, outputs, override_hps=self.override_hps)
37.620985
85
0.594001
17,224
0.980363
0
0
318
0.0181
0
0
3,967
0.225795
78f06ac9567797f0104f062bd9b9ac12e57cffa6
474
py
Python
Python/longest-valid-parentheses.py
shreyventure/LeetCode-Solutions
74423d65702b78974e390f17c9d6365d17e6eed5
[ "MIT" ]
388
2020-06-29T08:41:27.000Z
2022-03-31T22:55:05.000Z
Python/longest-valid-parentheses.py
shreyventure/LeetCode-Solutions
74423d65702b78974e390f17c9d6365d17e6eed5
[ "MIT" ]
178
2020-07-16T17:15:28.000Z
2022-03-09T21:01:50.000Z
Python/longest-valid-parentheses.py
shreyventure/LeetCode-Solutions
74423d65702b78974e390f17c9d6365d17e6eed5
[ "MIT" ]
263
2020-07-13T18:33:20.000Z
2022-03-28T13:54:10.000Z
''' Speed: 95.97% Memory: 24.96% Time complexity: O(n) Space complexity: O(n) ''' class Solution(object): def longestValidParentheses(self, s): ans=0 stack=[-1] for i in range(len(s)): if(s[i]=='('): stack.append(i) else: stack.pop() if(len(stack)==0): stack.append(i) else: ans=max(ans,i-stack[-1]) return ans
23.7
44
0.436709
392
0.827004
0
0
0
0
0
0
84
0.177215
78f17ff49e114c184b6a1474d4e3188bcdc4d56c
447
py
Python
setup.py
i25ffz/openaes
a0dbde40d4ce0e4186ea14c4dc9519fe152c018c
[ "BSD-2-Clause" ]
null
null
null
setup.py
i25ffz/openaes
a0dbde40d4ce0e4186ea14c4dc9519fe152c018c
[ "BSD-2-Clause" ]
null
null
null
setup.py
i25ffz/openaes
a0dbde40d4ce0e4186ea14c4dc9519fe152c018c
[ "BSD-2-Clause" ]
null
null
null
from distutils.core import setup, Extension import os.path kw = { 'name':"PyOpenAES", 'version':"0.10.0", 'description':"OpenAES cryptographic library for Python.", 'ext_modules':[ Extension( 'openaes', include_dirs = ['inc', 'src/isaac'], # define_macros=[('ENABLE_PYTHON', '1')], sources = [ os.path.join('src/oaes_lib.c'), os.path.join('src/oaes_py.c'), os.path.join('src/isaac/rand.c') ] ) ] } setup(**kw)
20.318182
59
0.624161
0
0
0
0
0
0
0
0
218
0.487696
78f2293017d6edca3048eb7b10371f7d73e4c830
967
py
Python
examples/isosurface_demo2.py
jayvdb/scitools
8df53a3a3bc95377f9fa85c04f3a329a0ec33e67
[ "BSD-3-Clause" ]
62
2015-03-28T18:07:51.000Z
2022-02-12T20:32:36.000Z
examples/isosurface_demo2.py
jayvdb/scitools
8df53a3a3bc95377f9fa85c04f3a329a0ec33e67
[ "BSD-3-Clause" ]
7
2015-06-09T09:56:03.000Z
2021-05-20T17:53:15.000Z
examples/isosurface_demo2.py
jayvdb/scitools
8df53a3a3bc95377f9fa85c04f3a329a0ec33e67
[ "BSD-3-Clause" ]
29
2015-04-16T03:48:57.000Z
2022-02-03T22:06:52.000Z
#!/usr/bin/env python # Example taken from: # http://www.mathworks.com/access/helpdesk/help/techdoc/visualize/f5-3371.html from scitools.easyviz import * from time import sleep from scipy import io setp(interactive=False) # Displaying an Isosurface: mri = io.loadmat('mri_matlab_v6.mat') D = mri['D'] #Ds = smooth3(D); isosurface(D,5,indexing='xy') #hiso = isosurface(Ds,5), # 'FaceColor',[1,.75,.65],... # 'EdgeColor','none'); shading('interp') # Adding an Isocap to Show a Cutaway Surface: #hcap = patch(isocaps(D,5),... # 'FaceColor','interp',... # 'EdgeColor','none'); #colormap(map) # Define the View: view(45,30) axis('tight') daspect([1,1,.4]) # Add Lighting: #lightangle(45,30); #set(gcf,'Renderer','zbuffer'); lighting phong #isonormals(Ds,hiso) #set(hcap,'AmbientStrength',.6) #set(hiso,'SpecularColorReflectance',0,'SpecularExponent',50) show() raw_input('Press Return key to quit: ') #savefig('tmp_isosurf2a.eps') #savefig('tmp_isosurf2a.png')
20.574468
78
0.701138
0
0
0
0
0
0
0
0
714
0.738366
78f2658f7e058410b484a9d45fd69949bca2813c
4,099
py
Python
structural_model/util_morphology.py
zibneuro/udvary-et-al-2022
8b456c41e72958677cb6035028d9c23013cb7c7e
[ "MIT" ]
1
2022-03-11T13:43:50.000Z
2022-03-11T13:43:50.000Z
structural_model/util_morphology.py
zibneuro/udvary-et-al-2022
8b456c41e72958677cb6035028d9c23013cb7c7e
[ "MIT" ]
null
null
null
structural_model/util_morphology.py
zibneuro/udvary-et-al-2022
8b456c41e72958677cb6035028d9c23013cb7c7e
[ "MIT" ]
null
null
null
import os import numpy as np import json import util_amira def getEdgeLabelName(label): if(label == 6): return "axon" elif(label == 4): return "apical" elif(label == 5): return "basal" elif(label == 7): return "soma" else: return "other" def getSomaPosition(points): somaPos = [] for p in points: if(p["edge_label"] == "soma"): somaPos.append(p["position"]) return np.mean(np.vstack(tuple(somaPos)), axis=0) def loadAmiraExport(filename): with open(filename) as f: lines = f.readlines() labels = lines[0].rstrip().split(",") points = [] for i in range(1, len(lines)): line = lines[i].rstrip().split(",") point = {} point["edge_id"] = int(line[labels.index("edge_id")]) point["source_node_id"] = int(line[labels.index("source_node")]) point["target_node_id"] = int(line[labels.index("target_node")]) point["edge_label"] = getEdgeLabelName( int(line[labels.index("edge_label")])) point["edge_point_id"] = int(line[labels.index("edge_point")]) point["position"] = np.array([float(line[labels.index("x")]), float( line[labels.index("y")]), float(line[labels.index("z")])]) point["radius"] = float(line[labels.index("radius")]) point["inside_vS1"] = int(line[labels.index("inside_vS1")]) if(point["edge_label"] != "other"): points.append(point) return points def separateCompartments(edgePoints): apical = [] basal = [] axon = [] for edgePoint in edgePoints: if(edgePoint["edge_label"] == "apical"): apical.append(edgePoint) elif(edgePoint["edge_label"] == "basal"): basal.append(edgePoint) elif(edgePoint["edge_label"] == "axon"): axon.append(edgePoint) compartments = {} compartments["apical"] = apical compartments["basal"] = basal compartments["axon"] = axon return compartments def loadGraphset(networkDir): if(os.path.exists(os.path.join(networkDir, "morphologies", "Morphologies.am"))): graphset = util_amira.readSpatialGraphSet(os.path.join(networkDir, "morphologies", "Morphologies.am"), legacy=False) else: graphset = util_amira.readSpatialGraphSet(os.path.join(networkDir, "morphologies", "MorphologiesWithNeuronIDs.am"), legacy=True) return graphset def writeToCache(filename, transformation, neuronId): transformationFile = "/tmp/transformation_{}".format(neuronId) np.savetxt(transformationFile, transformation) meta = { "morphologyFile" : filename, "transformationFile" : transformationFile } metaFile = "/tmp/meta_{}.json".format(neuronId) with open(metaFile, "w") as f: print("meta", meta) json.dump(meta, f) def readFromCache(neuronId): metaFile = "/tmp/meta_{}.json".format(neuronId) with open(metaFile) as f: meta = json.load(f) transformationFile = meta["transformationFile"] T = np.loadtxt(transformationFile) morphologyFile = meta["morphologyFile"] return morphologyFile, T def loadAxon(graphset, neuronId, saveToCache = False, loadFromCache = False): if(loadFromCache): filename, T = readFromCache(neuronId) else: idx = len(graphset[neuronId]) - 1 filename = graphset[neuronId][idx]["file"] T = graphset[neuronId][idx]["transformation"] if(saveToCache): writeToCache(filename, T, neuronId) return util_amira.readSpatialGraph(filename, T) def loadDendrite(graphset, neuronId, saveToCache = False, loadFromCache = False): if(loadFromCache): filename, T = readFromCache(neuronId) else: filename = graphset[neuronId][0]["file"] T = graphset[neuronId][0]["transformation"] if(saveToCache): writeToCache(filename, T, neuronId) return util_amira.readSpatialGraph(filename, T)
32.275591
138
0.613808
0
0
0
0
0
0
0
0
644
0.157111
78f33bf3b80a0a0d98e998f783441284fa1b3068
3,503
py
Python
invenio_madmp/views.py
FAIR-Data-Austria/invenio-madmp
74372ee794f81666f5e9cf08ef448c21b2e428be
[ "MIT" ]
1
2022-03-02T10:37:29.000Z
2022-03-02T10:37:29.000Z
invenio_madmp/views.py
FAIR-Data-Austria/invenio-madmp
74372ee794f81666f5e9cf08ef448c21b2e428be
[ "MIT" ]
9
2020-08-25T12:03:08.000Z
2020-10-20T11:45:32.000Z
invenio_madmp/views.py
FAIR-Data-Austria/invenio-madmp
74372ee794f81666f5e9cf08ef448c21b2e428be
[ "MIT" ]
null
null
null
"""Blueprint definitions for maDMP integration.""" from flask import Blueprint, jsonify, request from invenio_db import db from .convert import convert_dmp from .models import DataManagementPlan def _summarize_dmp(dmp: DataManagementPlan) -> dict: """Create a summary dictionary for the given DMP.""" res = {"dmp_id": dmp.dmp_id, "datasets": []} for ds in dmp.datasets: dataset = {"dataset_id": ds.dataset_id, "record": None} if ds.record: dataset["record"] = ds.record.model.json res["datasets"].append(dataset) return res def create_rest_blueprint(app) -> Blueprint: """Create the blueprint for the REST endpoints using the current app extensions.""" # note: using flask.current_app isn't directly possible, because Invenio-MaDMP is # registered as an extension in the API app, not the "normal" app # (which is the one usually returned by current_app) rest_blueprint = Blueprint("invenio_madmp", __name__) auth = app.extensions["invenio-madmp"].auth @rest_blueprint.route("/dmps", methods=["GET"]) @auth.login_required def list_dmps(): """Give a summary of all stored DMPs.""" dmps = DataManagementPlan.query.all() res = [_summarize_dmp(dmp) for dmp in dmps] return jsonify(res) @rest_blueprint.route("/dmps", methods=["POST"]) @auth.login_required def create_dmp(): """Create a new DMP from the maDMP JSON in the request body.""" if request.json is None: return jsonify({"error": "no json body supplied"}), 400 elif request.json.get("dmp") is None: return jsonify({"error": "dmp not found in the body"}), 400 dmp_json = request.json.get("dmp", {}) dmp_json_id = dmp_json.get("dmp_id", {}).get("identifier") if DataManagementPlan.get_by_dmp_id(dmp_json_id) is not None: return jsonify({"error": "dmp with the same id already exists"}), 409 dmp = convert_dmp(dmp_json) db.session.add(dmp) db.session.commit() # TODO change the returned value return jsonify(_summarize_dmp(dmp)), 201 @rest_blueprint.route("/dmps/<dmp_id>", methods=["PATCH"]) @auth.login_required def update_dmp(dmp_id: str = None): """Update the specified DMP using the maDMP JSON in the request body.""" hard_sync = request.args.get("sync", "soft") == "hard" if request.json is None: return jsonify({"error": "no json body supplied"}), 400 elif request.json.get("dmp") is None: return jsonify({"error": "dmp not found in the body"}), 400 dmp_json = request.json.get("dmp", {}) dmp_json_id = dmp_json.get("dmp_id", {}).get("identifier") if dmp_id and dmp_json_id and dmp_id != dmp_json_id: return jsonify({"error": "mismatch between dmp id from url and body"}), 400 dmp_id = dmp_id or dmp_json_id if DataManagementPlan.get_by_dmp_id(dmp_id) is None: return jsonify({"error": "dmp not found"}), 404 dmp = convert_dmp(dmp_json, hard_sync) db.session.commit() # TODO change the returned value return jsonify(_summarize_dmp(dmp)) @rest_blueprint.route("/dmps", methods=["PATCH"]) @auth.login_required def update_dmp_without_id(): """Update the specified DMP using the maDMP JSON in the request body.""" return update_dmp(None) return rest_blueprint
35.744898
87
0.643163
0
0
0
0
2,402
0.685698
0
0
1,176
0.335712
78f362e6e499abd6ba76d1b520e7369bf25061c9
257
py
Python
retrieval/urls.py
aipassio/visual_retrieval
ce8dae2ad517a9edb5e278163dd6d0f7ffc1b5f4
[ "MIT" ]
null
null
null
retrieval/urls.py
aipassio/visual_retrieval
ce8dae2ad517a9edb5e278163dd6d0f7ffc1b5f4
[ "MIT" ]
null
null
null
retrieval/urls.py
aipassio/visual_retrieval
ce8dae2ad517a9edb5e278163dd6d0f7ffc1b5f4
[ "MIT" ]
null
null
null
from django.urls import path from . import views urlpatterns = [ path('', views.index, name='index'), path('retrieval_insert', views.retrieval_insert, name='retrieval_insert'), path('retrieval_get', views.retrieval_get, name='retrieval_get') ]
28.555556
78
0.723735
0
0
0
0
0
0
0
0
75
0.291829
78f3cd314838c8b00373f5ff15a91db4a0e4e749
1,427
py
Python
scripts/Interfacing/encoder_class.py
noshluk2/Wifi-Signal-Robot-localization
538e6c4e7a63486f22ab708908c476cd808f720c
[ "MIT" ]
null
null
null
scripts/Interfacing/encoder_class.py
noshluk2/Wifi-Signal-Robot-localization
538e6c4e7a63486f22ab708908c476cd808f720c
[ "MIT" ]
null
null
null
scripts/Interfacing/encoder_class.py
noshluk2/Wifi-Signal-Robot-localization
538e6c4e7a63486f22ab708908c476cd808f720c
[ "MIT" ]
null
null
null
import RPi.GPIO as GPIO import threading class Encoder(object): def __init__(self, r_en_a,r_en_b,l_en_a,l_en_b): GPIO.setmode(GPIO.BCM) GPIO.setup(r_en_a, GPIO.IN) GPIO.setup(r_en_b, GPIO.IN) GPIO.setup(l_en_a, GPIO.IN) GPIO.setup(l_en_b, GPIO.IN) self.l_en_a=l_en_a;self.l_en_b=l_en_b; self.r_en_a=r_en_a;self.r_en_b=r_en_b; GPIO.add_event_detect(r_en_a, GPIO.BOTH, callback=self.Update_encR) GPIO.add_event_detect(l_en_a, GPIO.BOTH, callback=self.Update_encL) self.count_R =0 self.count_L=0 def Update_encR(self,channel): if GPIO.input(self.r_en_a) == GPIO.input(self.r_en_b): self.count_R=self.count_R + 1 else : self.count_R = self.count_R - 1 def Update_encL(self,channel): if GPIO.input(self.l_en_a) == GPIO.input(self.l_en_b): self.count_L=self.count_L + 1 else : self.count_L = self.count_L - 1 return (self.count_L) def get_r_enc(self): return self.count_R def get_l_enc(self): return self.count_L def clear_encoders(self): self.count_R=0 self.count_L=0 # r_en_a = 27 # r_en_b = 10 # l_en_a = 5 # l_en_b = 6 # enc_obj = Encoder(27,10,5,6) # def update_encoders(): # threading.Timer(1,update_encoders).start() # print(" looping ") # update_encoders()
26.425926
75
0.618781
1,175
0.823406
0
0
0
0
0
0
196
0.137351
78f527fe8104b4c467eef06ba01999f8a1c7339e
2,286
py
Python
systori/apps/equipment/urls.py
systori/systori
e309c63e735079ff6032fdaf1db354ec872b28b1
[ "BSD-3-Clause" ]
12
2018-01-30T00:44:06.000Z
2020-07-13T05:20:48.000Z
systori/apps/equipment/urls.py
systori/systori
e309c63e735079ff6032fdaf1db354ec872b28b1
[ "BSD-3-Clause" ]
36
2018-03-06T17:49:50.000Z
2020-06-23T19:26:00.000Z
systori/apps/equipment/urls.py
systori/systori
e309c63e735079ff6032fdaf1db354ec872b28b1
[ "BSD-3-Clause" ]
3
2018-08-03T07:03:09.000Z
2020-07-09T20:21:10.000Z
from django.conf.urls import url from django.urls import path, include from systori.apps.user.authorization import office_auth from systori.apps.equipment.views import EquipmentListView, EquipmentView, EquipmentCreate, EquipmentDelete, EquipmentUpdate, RefuelingStopCreate, RefuelingStopDelete, RefuelingStopUpdate, MaintenanceCreate, MaintenanceDelete, MaintenanceUpdate urlpatterns = [ # two url rules to make the active_filter keyword optional url( r"^equipment/$", office_auth(EquipmentListView.as_view()), name="equipment.list" ), url( r"^equipment/(?P<active_filter>[\w-]+)$", office_auth(EquipmentListView.as_view()), name="equipment.list", ), url( r"^equipment-(?P<pk>\d+)$", office_auth(EquipmentView.as_view()), name="equipment.view", ), url( r"^create-equipment$", office_auth(EquipmentCreate.as_view()), name="equipment.create", ), url( r"^equipment-(?P<pk>\d+)/edit$", office_auth(EquipmentUpdate.as_view()), name="equipment.edit", ), url( r"^equipment-(?P<pk>\d+)/delete$", office_auth(EquipmentDelete.as_view()), name="equipment.delete", ), url( r"^equipment-(?P<pk>\d+)/create-refueling-stop$", office_auth(RefuelingStopCreate.as_view()), name="refueling_stop.create", ), url( r"^equipment-(?P<equipment_pk>\d+)/refueling-stop-(?P<pk>\d+)/update$", office_auth(RefuelingStopUpdate.as_view()), name="refueling_stop.update", ), url( r"^equipment-(?P<equipment_pk>\d+)/refueling-stop-(?P<pk>\d+)/delete", office_auth(RefuelingStopDelete.as_view()), name="refueling_stop.delete", ), url( r"^equipment-(?P<pk>\d+)/create-maintenance", office_auth(MaintenanceCreate.as_view()), name="maintenance.create", ), url( r"^equipment-(?P<equipment_pk>\d+)/maintenance-(?P<pk>\d+)/update$", office_auth(MaintenanceUpdate.as_view()), name="maintenance.update", ), url( r"^equipment-(?P<equipment_pk>\d+)/maintenance-(?P<pk>\d+)/delete", office_auth(MaintenanceDelete.as_view()), name="maintenance.delete", ), ]
33.130435
244
0.624672
0
0
0
0
0
0
0
0
817
0.357393
78f5546c49c417508d26fa0f809340459987fc66
13,697
py
Python
paddlehub/module/check_info_pb2.py
MRXLT/PaddleHub
a9cd941bef2ac5a2d81b2f20422a4fbd9a87eb90
[ "Apache-2.0" ]
1
2019-07-03T13:08:39.000Z
2019-07-03T13:08:39.000Z
paddlehub/module/check_info_pb2.py
binweiwu/PaddleHub
f92d0edd18057044ef248d7f2c42d8f347b62fbf
[ "Apache-2.0" ]
null
null
null
paddlehub/module/check_info_pb2.py
binweiwu/PaddleHub
f92d0edd18057044ef248d7f2c42d8f347b62fbf
[ "Apache-2.0" ]
null
null
null
#coding:utf-8 # Generated by the protocol buffer compiler. DO NOT EDIT! # source: check_info.proto import sys _b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode('latin1')) from google.protobuf.internal import enum_type_wrapper from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database from google.protobuf import descriptor_pb2 # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='check_info.proto', package='paddlehub.module.checkinfo', syntax='proto3', serialized_pb=_b( '\n\x10\x63heck_info.proto\x12\x1apaddlehub.module.checkinfo\"\x85\x01\n\x08\x46ileInfo\x12\x11\n\tfile_name\x18\x01 \x01(\t\x12\x33\n\x04type\x18\x02 \x01(\x0e\x32%.paddlehub.module.checkinfo.FILE_TYPE\x12\x0f\n\x07is_need\x18\x03 \x01(\x08\x12\x0b\n\x03md5\x18\x04 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x05 \x01(\t\"\x84\x01\n\x08Requires\x12>\n\x0crequire_type\x18\x01 \x01(\x0e\x32(.paddlehub.module.checkinfo.REQUIRE_TYPE\x12\x0f\n\x07version\x18\x02 \x01(\t\x12\x12\n\ngreat_than\x18\x03 \x01(\x08\x12\x13\n\x0b\x64\x65scription\x18\x04 \x01(\t\"\xc8\x01\n\tCheckInfo\x12\x16\n\x0epaddle_version\x18\x01 \x01(\t\x12\x13\n\x0bhub_version\x18\x02 \x01(\t\x12\x1c\n\x14module_proto_version\x18\x03 \x01(\t\x12\x38\n\nfile_infos\x18\x04 \x03(\x0b\x32$.paddlehub.module.checkinfo.FileInfo\x12\x36\n\x08requires\x18\x05 \x03(\x0b\x32$.paddlehub.module.checkinfo.Requires*\x1e\n\tFILE_TYPE\x12\x08\n\x04\x46ILE\x10\x00\x12\x07\n\x03\x44IR\x10\x01*[\n\x0cREQUIRE_TYPE\x12\x12\n\x0ePYTHON_PACKAGE\x10\x00\x12\x0e\n\nHUB_MODULE\x10\x01\x12\n\n\x06SYSTEM\x10\x02\x12\x0b\n\x07\x43OMMAND\x10\x03\x12\x0e\n\nPY_VERSION\x10\x04\x42\x02H\x03\x62\x06proto3' )) _sym_db.RegisterFileDescriptor(DESCRIPTOR) _FILE_TYPE = _descriptor.EnumDescriptor( name='FILE_TYPE', full_name='paddlehub.module.checkinfo.FILE_TYPE', filename=None, file=DESCRIPTOR, values=[ _descriptor.EnumValueDescriptor( name='FILE', index=0, number=0, options=None, type=None), _descriptor.EnumValueDescriptor( name='DIR', index=1, number=1, options=None, type=None), ], containing_type=None, options=None, serialized_start=522, serialized_end=552, ) _sym_db.RegisterEnumDescriptor(_FILE_TYPE) FILE_TYPE = enum_type_wrapper.EnumTypeWrapper(_FILE_TYPE) _REQUIRE_TYPE = _descriptor.EnumDescriptor( name='REQUIRE_TYPE', full_name='paddlehub.module.checkinfo.REQUIRE_TYPE', filename=None, file=DESCRIPTOR, values=[ _descriptor.EnumValueDescriptor( name='PYTHON_PACKAGE', index=0, number=0, options=None, type=None), _descriptor.EnumValueDescriptor( name='HUB_MODULE', index=1, number=1, options=None, type=None), _descriptor.EnumValueDescriptor( name='SYSTEM', index=2, number=2, options=None, type=None), _descriptor.EnumValueDescriptor( name='COMMAND', index=3, number=3, options=None, type=None), _descriptor.EnumValueDescriptor( name='PY_VERSION', index=4, number=4, options=None, type=None), ], containing_type=None, options=None, serialized_start=554, serialized_end=645, ) _sym_db.RegisterEnumDescriptor(_REQUIRE_TYPE) REQUIRE_TYPE = enum_type_wrapper.EnumTypeWrapper(_REQUIRE_TYPE) FILE = 0 DIR = 1 PYTHON_PACKAGE = 0 HUB_MODULE = 1 SYSTEM = 2 COMMAND = 3 PY_VERSION = 4 _FILEINFO = _descriptor.Descriptor( name='FileInfo', full_name='paddlehub.module.checkinfo.FileInfo', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='file_name', full_name='paddlehub.module.checkinfo.FileInfo.file_name', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='type', full_name='paddlehub.module.checkinfo.FileInfo.type', index=1, number=2, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='is_need', full_name='paddlehub.module.checkinfo.FileInfo.is_need', index=2, number=3, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='md5', full_name='paddlehub.module.checkinfo.FileInfo.md5', index=3, number=4, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='description', full_name='paddlehub.module.checkinfo.FileInfo.description', index=4, number=5, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[], nested_types=[], enum_types=[], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[], serialized_start=49, serialized_end=182, ) _REQUIRES = _descriptor.Descriptor( name='Requires', full_name='paddlehub.module.checkinfo.Requires', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='require_type', full_name='paddlehub.module.checkinfo.Requires.require_type', index=0, number=1, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='version', full_name='paddlehub.module.checkinfo.Requires.version', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='great_than', full_name='paddlehub.module.checkinfo.Requires.great_than', index=2, number=3, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='description', full_name='paddlehub.module.checkinfo.Requires.description', index=3, number=4, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[], nested_types=[], enum_types=[], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[], serialized_start=185, serialized_end=317, ) _CHECKINFO = _descriptor.Descriptor( name='CheckInfo', full_name='paddlehub.module.checkinfo.CheckInfo', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='paddle_version', full_name='paddlehub.module.checkinfo.CheckInfo.paddle_version', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='hub_version', full_name='paddlehub.module.checkinfo.CheckInfo.hub_version', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='module_proto_version', full_name= 'paddlehub.module.checkinfo.CheckInfo.module_proto_version', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='file_infos', full_name='paddlehub.module.checkinfo.CheckInfo.file_infos', index=3, number=4, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='requires', full_name='paddlehub.module.checkinfo.CheckInfo.requires', index=4, number=5, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[], nested_types=[], enum_types=[], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[], serialized_start=320, serialized_end=520, ) _FILEINFO.fields_by_name['type'].enum_type = _FILE_TYPE _REQUIRES.fields_by_name['require_type'].enum_type = _REQUIRE_TYPE _CHECKINFO.fields_by_name['file_infos'].message_type = _FILEINFO _CHECKINFO.fields_by_name['requires'].message_type = _REQUIRES DESCRIPTOR.message_types_by_name['FileInfo'] = _FILEINFO DESCRIPTOR.message_types_by_name['Requires'] = _REQUIRES DESCRIPTOR.message_types_by_name['CheckInfo'] = _CHECKINFO DESCRIPTOR.enum_types_by_name['FILE_TYPE'] = _FILE_TYPE DESCRIPTOR.enum_types_by_name['REQUIRE_TYPE'] = _REQUIRE_TYPE FileInfo = _reflection.GeneratedProtocolMessageType( 'FileInfo', (_message.Message, ), dict( DESCRIPTOR=_FILEINFO, __module__='check_info_pb2' # @@protoc_insertion_point(class_scope:paddlehub.module.checkinfo.FileInfo) )) _sym_db.RegisterMessage(FileInfo) Requires = _reflection.GeneratedProtocolMessageType( 'Requires', (_message.Message, ), dict( DESCRIPTOR=_REQUIRES, __module__='check_info_pb2' # @@protoc_insertion_point(class_scope:paddlehub.module.checkinfo.Requires) )) _sym_db.RegisterMessage(Requires) CheckInfo = _reflection.GeneratedProtocolMessageType( 'CheckInfo', (_message.Message, ), dict( DESCRIPTOR=_CHECKINFO, __module__='check_info_pb2' # @@protoc_insertion_point(class_scope:paddlehub.module.checkinfo.CheckInfo) )) _sym_db.RegisterMessage(CheckInfo) DESCRIPTOR.has_options = True DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('H\003')) # @@protoc_insertion_point(module_scope)
33.653563
1,160
0.611083
0
0
0
0
0
0
0
0
3,046
0.222384
78f57ad1256f2c324b8101344d3e6ef85566b84c
632
py
Python
40_3.py
rursvd/pynumerical2
4b2d33125b64a39099ac8eddef885e0ea11b237d
[ "MIT" ]
null
null
null
40_3.py
rursvd/pynumerical2
4b2d33125b64a39099ac8eddef885e0ea11b237d
[ "MIT" ]
null
null
null
40_3.py
rursvd/pynumerical2
4b2d33125b64a39099ac8eddef885e0ea11b237d
[ "MIT" ]
1
2019-12-03T01:34:19.000Z
2019-12-03T01:34:19.000Z
from numpy import zeros # Define ab2 function def ab2(f,t0,tf,y0,n): h = (tf - t0)/n t = zeros(n+1) y = zeros(n+1) t[0] = t0 y[0] = y0 y[1] = y[0] + h * f(t[0],y[0]) t[1] = t[0] + h for i in range(1,n): y[i+1] = y[i] + (3.0/2.0) * h * f(t[i],y[i])-1.0/2.0 * h * f(t[i-1],y[i-1]) t[i+1] = t[i] + h return t,y # Define functions def f(t,y): return t - y # Set initial conditions t0 = 0.0 tf = 1.0 y0 = 1.0 n = 5 # Execute AB2 t, yab2 = ab2(f,t0,tf,y0,n) # Print results print("%5s %8s" % ('t','y')) for i in range(n+1): print("%8.4f %8.4f" % (t[i],yab2[i]))
18.588235
83
0.463608
0
0
0
0
0
0
0
0
120
0.189873
78f5d63c04bc9e40555fc089be45ac3e10cbd62a
40,331
py
Python
test/test_parse_cs.py
NeonDaniel/lingua-franca
eee95702016b4013b0d81dc74da98cd2d2f53358
[ "Apache-2.0" ]
null
null
null
test/test_parse_cs.py
NeonDaniel/lingua-franca
eee95702016b4013b0d81dc74da98cd2d2f53358
[ "Apache-2.0" ]
null
null
null
test/test_parse_cs.py
NeonDaniel/lingua-franca
eee95702016b4013b0d81dc74da98cd2d2f53358
[ "Apache-2.0" ]
1
2020-09-22T12:39:17.000Z
2020-09-22T12:39:17.000Z
# # Copyright 2017 Mycroft AI Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import unittest from datetime import datetime, timedelta from lingua_franca import get_default_lang, set_default_lang, \ load_language, unload_language from lingua_franca.parse import extract_datetime from lingua_franca.parse import extract_duration from lingua_franca.parse import extract_number, extract_numbers from lingua_franca.parse import fuzzy_match from lingua_franca.parse import get_gender from lingua_franca.parse import match_one from lingua_franca.parse import normalize def setUpModule(): load_language("cs-cz") set_default_lang("cs") def tearDownModule(): unload_language("cs") class TestFuzzyMatch(unittest.TestCase): def test_matches(self): self.assertTrue(fuzzy_match("ty a já", "ty a já") >= 1.0) self.assertTrue(fuzzy_match("ty a já", "ty") < 0.5) self.assertTrue(fuzzy_match("Ty", "ty") >= 0.5) self.assertTrue(fuzzy_match("ty a já", "ty") == fuzzy_match("ty", "ty a já")) self.assertTrue(fuzzy_match("ty a já", "on nebo oni") < 0.23) def test_match_one(self): # test list of choices choices = ['frank', 'kate', 'harry', 'henry'] self.assertEqual(match_one('frank', choices)[0], 'frank') self.assertEqual(match_one('fran', choices)[0], 'frank') self.assertEqual(match_one('enry', choices)[0], 'henry') self.assertEqual(match_one('katt', choices)[0], 'kate') # test dictionary of choices choices = {'frank': 1, 'kate': 2, 'harry': 3, 'henry': 4} self.assertEqual(match_one('frank', choices)[0], 1) self.assertEqual(match_one('enry', choices)[0], 4) class TestNormalize(unittest.TestCase): def test_extract_number(self): self.assertEqual(extract_number("tohle je první test", ordinals=True), 1) self.assertEqual(extract_number("tohle je 2 test"), 2) self.assertEqual(extract_number("tohle je druhý test", ordinals=True), 2) #self.assertEqual(extract_number("tohle je třetí test"), 1.0 / 3.0) self.assertEqual(extract_number("tohle je třetí test", ordinals=True), 3.0) self.assertEqual(extract_number("ten čtvrtý", ordinals=True), 4.0) self.assertEqual(extract_number( "ten třicátý šestý", ordinals=True), 36.0) self.assertEqual(extract_number("tohle je test číslo 4"), 4) self.assertEqual(extract_number("jedna třetina šálku"), 1.0 / 3.0) self.assertEqual(extract_number("tři šálky"), 3) self.assertEqual(extract_number("1/3 šálku"), 1.0 / 3.0) self.assertEqual(extract_number("čtvrtina šálku"), 0.25) self.assertEqual(extract_number("1/4 cup"), 0.25) self.assertEqual(extract_number("jedna čtvrtina šálku"), 0.25) self.assertEqual(extract_number("2/3 šálků"), 2.0 / 3.0) self.assertEqual(extract_number("3/4 šálků"), 3.0 / 4.0) self.assertEqual(extract_number("1 a 3/4 šálků"), 1.75) self.assertEqual(extract_number("1 šálek a půl"), 1.5) self.assertEqual(extract_number("jeden šálek a polovina"), 1.5) self.assertEqual(extract_number("jedna a půl šálků"), 1.5) self.assertEqual(extract_number("jedna a jedna polovina šálků"), 1.5) self.assertEqual(extract_number("tři čtvrtina šálků"), 3.0 / 4.0) self.assertEqual(extract_number("tři čtvrtiny šálků"), 3.0 / 4.0) self.assertEqual(extract_number("dvacet dva"), 22) self.assertEqual(extract_number( "Dvacet dva s velkým písmenam na začátku"), 22) self.assertEqual(extract_number( "dvacet Dva s dva krát velkým písmem"), 22) self.assertEqual(extract_number( "dvacet Dva s různou velikostí písmen"), 22) self.assertEqual(extract_number("Dvacet dva a Tři Pětiny"), 22.6) self.assertEqual(extract_number("dvě sto"), 200) self.assertEqual(extract_number("devět tisíc"), 9000) self.assertEqual(extract_number("šest sto šedesát šest"), 666) self.assertEqual(extract_number("dva million"), 2000000) self.assertEqual(extract_number("dva million pět sto tisíc " "tun žhavého kovu"), 2500000) self.assertEqual(extract_number("šest trillion"), 6000000000000.0) self.assertEqual(extract_number("šest trilion", short_scale=False), 6e+18) self.assertEqual(extract_number("jedna tečka pět"), 1.5) self.assertEqual(extract_number("tři tečka čtrnáct"), 3.14) self.assertEqual(extract_number("nula tečka dva"), 0.2) self.assertEqual(extract_number("billion roků "), 1000000000.0) self.assertEqual(extract_number("bilion roků", short_scale=False), 1000000000000.0) self.assertEqual(extract_number("jedno sto tisíc"), 100000) self.assertEqual(extract_number("mínus 2"), -2) self.assertEqual(extract_number("záporné sedmdesát"), -70) self.assertEqual(extract_number("tisíc million"), 1000000000) self.assertEqual(extract_number("miliarda", short_scale=False), 1000000000) self.assertEqual(extract_number("šestina třetina"), 1 / 6 / 3) self.assertEqual(extract_number("šestina třetí", ordinals=True), 3) self.assertEqual(extract_number("třicet sekund"), 30) self.assertEqual(extract_number("třicátý druhý", ordinals=True), 32) self.assertEqual(extract_number("tohle je billiontý test", ordinals=True), 1e09) print("tohle udělat později") #self.assertEqual(extract_number("tohle je billiontý test"), 1e-9) self.assertEqual(extract_number("tohle je biliontý test", ordinals=True, short_scale=False), 1e12) print("tohle udělat později") # self.assertEqual(extract_number("tohle je biliontý test", # short_scale=False), 1e-12) # Verify non-power multiples of ten no longer discard # adjacent multipliers self.assertEqual(extract_number("dvacet tisíc"), 20000) self.assertEqual(extract_number("padesát million"), 50000000) # Verify smaller powers of ten no longer cause miscalculation of larger # powers of ten (see MycroftAI#86) self.assertEqual(extract_number("dvacet billion tři sto million \ devět sto padesát tisíc šest sto \ sedmdesát pět tečka osm"), 20300950675.8) self.assertEqual(extract_number("devět sto devadesát devět million devět \ sto devadesát devět tisíc devět \ sto devadesát devět tečka devět"), 999999999.9) # TODO why does "trillion" result in xxxx.0? self.assertEqual(extract_number("osm sto trillion dva sto \ padesát sedm"), 800000000000257.0) # TODO handle this case # self.assertEqual( # extract_number("6 dot six six six"), # 6.666) self.assertTrue(extract_number("Tenisový hráč je rychlý") is False) self.assertTrue(extract_number("křehký") is False) self.assertTrue(extract_number("křehká nula") is not False) self.assertEqual(extract_number("křehká nula"), 0) #self.assertTrue(extract_number("grobo 0") is not False) #self.assertEqual(extract_number("grobo 0"), 0) self.assertEqual(extract_number("dvojice piv"), 2) self.assertEqual(extract_number("dvojice sto piv"), 200) self.assertEqual(extract_number("dvojice tisíc piv"), 2000) self.assertEqual(extract_number( "tohle je 7 test", ordinals=True), 7) self.assertEqual(extract_number( "tohle je 7 test", ordinals=False), 7) self.assertTrue(extract_number("tohle je n. test") is False) self.assertEqual(extract_number("tohle je 1. test"), 1) self.assertEqual(extract_number("tohle je 2. test"), 2) self.assertEqual(extract_number("tohle je 3. test"), 3) self.assertEqual(extract_number("tohle je 31. test"), 31) self.assertEqual(extract_number("tohle je 32. test"), 32) self.assertEqual(extract_number("tohle je 33. test"), 33) self.assertEqual(extract_number("tohle je 34. test"), 34) self.assertEqual(extract_number("celkem 100%"), 100) def test_extract_duration_cs(self): self.assertEqual(extract_duration("10 sekund"), (timedelta(seconds=10.0), "")) self.assertEqual(extract_duration("5 minut"), (timedelta(minutes=5), "")) self.assertEqual(extract_duration("2 hodiny"), (timedelta(hours=2), "")) self.assertEqual(extract_duration("3 dny"), (timedelta(days=3), "")) self.assertEqual(extract_duration("25 týdnů"), (timedelta(weeks=25), "")) self.assertEqual(extract_duration("sedm hodin"), (timedelta(hours=7), "")) self.assertEqual(extract_duration("7.5 sekund"), (timedelta(seconds=7.5), "")) self.assertEqual(extract_duration("osm a polovina dne třicet" " devět sekund"), (timedelta(days=8.5, seconds=39), "")) self.assertEqual(extract_duration("Nastav časovač na 30 minut"), (timedelta(minutes=30), "nastav časovač na")) self.assertEqual(extract_duration("Čtyři a půl minuty do" " západu"), (timedelta(minutes=4.5), "do západu")) self.assertEqual(extract_duration("devatenáct minut po hodině"), (timedelta(minutes=19), "po hodině")) self.assertEqual(extract_duration("vzbuď mě za tři týdny, čtyři" " sto devadesát sedm dní, a" " tři sto 91.6 sekund"), (timedelta(weeks=3, days=497, seconds=391.6), "vzbuď mě za , , a")) self.assertEqual(extract_duration("film je jedna hodina, padesát sedm" " a půl minuty dlouhý"), (timedelta(hours=1, minutes=57.5), "film je , dlouhý")) self.assertEqual(extract_duration("10-sekund"), (timedelta(seconds=10.0), "")) self.assertEqual(extract_duration("5-minut"), (timedelta(minutes=5), "")) def test_extractdatetime_cs(self): def extractWithFormat(text): date = datetime(2017, 6, 27, 13, 4) # Tue June 27, 2017 @ 1:04pm [extractedDate, leftover] = extract_datetime(text, date) extractedDate = extractedDate.strftime("%Y-%m-%d %H:%M:%S") return [extractedDate, leftover] def testExtract(text, expected_date, expected_leftover): res = extractWithFormat(normalize(text)) self.assertEqual(res[0], expected_date, "for=" + text) self.assertEqual(res[1], expected_leftover, "for=" + text) testExtract("nyní je čas", "2017-06-27 13:04:00", "je čas") testExtract("za sekundu", "2017-06-27 13:04:01", "") testExtract("za minutu", "2017-06-27 13:05:00", "") # testExtract("ve dvou minutách", # "2017-06-27 13:06:00", "") # testExtract("in a couple of minutes", # "2017-06-27 13:06:00", "") # testExtract("ve dvou hodinách", # "2017-06-27 15:04:00", "") # testExtract("in a couple of hours", # "2017-06-27 15:04:00", "") # testExtract("v dvoje týden", # "2017-07-11 00:00:00", "") # testExtract("in a couple of weeks", # "2017-07-11 00:00:00", "") # testExtract("v dvoje měsíc", # "2017-08-27 00:00:00", "") # testExtract("v dvoje rok", # "2019-06-27 00:00:00", "") # testExtract("in a couple of months", # "2017-08-27 00:00:00", "") # testExtract("in a couple of years", # "2019-06-27 00:00:00", "") testExtract("v desetiletí", "2027-06-27 00:00:00", "") # testExtract("in a couple of decades", # "2037-06-27 00:00:00", "") testExtract("další desetiletí", "2027-06-27 00:00:00", "") testExtract("v století", "2117-06-27 00:00:00", "") testExtract("v tisíciletí", "3017-06-27 00:00:00", "") testExtract("v dvoje desetiletí", "2037-06-27 00:00:00", "") testExtract("v 5 desetiletí", "2067-06-27 00:00:00", "") testExtract("v dvoje století", "2217-06-27 00:00:00", "") # testExtract("in a couple of centuries", # "2217-06-27 00:00:00", "") testExtract("v 2 století", "2217-06-27 00:00:00", "") testExtract("v dvoje tisíciletí", "4017-06-27 00:00:00", "") # testExtract("in a couple of millenniums", # "4017-06-27 00:00:00", "") testExtract("v hodina", "2017-06-27 14:04:00", "") testExtract("chci to během hodiny", "2017-06-27 14:04:00", "chci to") testExtract("za 1 sekundu", "2017-06-27 13:04:01", "") testExtract("za 2 sekundy", "2017-06-27 13:04:02", "") testExtract("Nastav časovač na 1 minutu", "2017-06-27 13:05:00", "nastav časovač") testExtract("Nastav časovač na půl hodina", "2017-06-27 13:34:00", "nastav časovač") testExtract("Nastav časovač na 5 den od dnes", "2017-07-02 00:00:00", "nastav časovač") testExtract("den po zítřku", "2017-06-29 00:00:00", "") testExtract("Jaké je počasí den po zítřku?", "2017-06-29 00:00:00", "jaké je počasí") testExtract("Připomeň mi v 10:45 pm", "2017-06-27 22:45:00", "připomeň mi") testExtract("jaké je počasí v pátek ráno", "2017-06-30 08:00:00", "jaké je počasí") testExtract("jaké je zítřejší počasí", "2017-06-28 00:00:00", "jaké je počasí") testExtract("jaké je počasí toto odpoledne", "2017-06-27 15:00:00", "jaké je počasí") testExtract("jaké je počasí tento večer", "2017-06-27 19:00:00", "jaké je počasí") testExtract("jaké bylo počasí toto ráno", "2017-06-27 08:00:00", "jaké bylo počasí") testExtract("připomeň mi abych zavolal mámě v 8 týden a 2 dny", "2017-08-24 00:00:00", "připomeň mi abych zavolal mámě") testExtract("připomeň mi abych zavolal mámě v srpen 3", "2017-08-03 00:00:00", "připomeň mi abych zavolal mámě") # přidat i třetího slovně testExtract("připomeň mi zítra abych zavolal mámě v 7am", "2017-06-28 07:00:00", "připomeň mi abych zavolal mámě") testExtract("připomeň mi zítra abych zavolal mámě v 10pm", "2017-06-28 22:00:00", "připomeň mi abych zavolal mámě") testExtract("připomeň mi abych zavolal mámě v 7am", "2017-06-28 07:00:00", "připomeň mi abych zavolal mámě") testExtract("připomeň mi abych zavolal mámě v hodina", "2017-06-27 14:04:00", "připomeň mi abych zavolal mámě") testExtract("připomeň mi abych zavolal mámě v 1730", "2017-06-27 17:30:00", "připomeň mi abych zavolal mámě") testExtract("připomeň mi abych zavolal mámě v 0630", "2017-06-28 06:30:00", "připomeň mi abych zavolal mámě") testExtract("připomeň mi abych zavolal mámě v 06 30 hodina", "2017-06-28 06:30:00", "připomeň mi abych zavolal mámě") testExtract("připomeň mi abych zavolal mámě v 06 30", "2017-06-28 06:30:00", "připomeň mi abych zavolal mámě") testExtract("připomeň mi abych zavolal mámě v 06 30 hodina", "2017-06-28 06:30:00", "připomeň mi abych zavolal mámě") testExtract("připomeň mi abych zavolal mámě v 7 hodin", "2017-06-27 19:00:00", "připomeň mi abych zavolal mámě") testExtract("připomeň mi abych zavolal mámě večer v 7 hodin", "2017-06-27 19:00:00", "připomeň mi abych zavolal mámě") testExtract("připomeň mi abych zavolal mámě v 7 hodin večer", "2017-06-27 19:00:00", "připomeň mi abych zavolal mámě") testExtract("připomeň mi abych zavolal mámě v 7 hodin ráno", "2017-06-28 07:00:00", "připomeň mi abych zavolal mámě") testExtract("připomeň mi abych zavolal mámě v Čtvrtek večer v 7 hodin", "2017-06-29 19:00:00", "připomeň mi abych zavolal mámě") testExtract("připomeň mi abych zavolal mámě v Čtvrtek ráno v 7 hodin", "2017-06-29 07:00:00", "připomeň mi abych zavolal mámě") testExtract("připomeň mi abych zavolal mámě v 7 hodin Čtvrtek ráno", "2017-06-29 07:00:00", "připomeň mi abych zavolal mámě") testExtract("připomeň mi abych zavolal mámě v 7:00 Čtvrtek ráno", "2017-06-29 07:00:00", "připomeň mi abych zavolal mámě") # TODO: This test is imperfect due to "at 7:00" still in the # remainder. But let it pass for now since time is correct testExtract("připomeň mi abych zavolal mámě v 7:00 Čtvrtek večer", "2017-06-29 19:00:00", "připomeň mi abych zavolal mámě") testExtract("připomeň mi abych zavolal mámě v 8 Středa večer", "2017-06-28 20:00:00", "připomeň mi abych zavolal mámě") testExtract("připomeň mi abych zavolal mámě v 8 Středa v večer", "2017-06-28 20:00:00", "připomeň mi abych zavolal mámě") testExtract("připomeň mi abych zavolal mámě Středa večer v 8", "2017-06-28 20:00:00", "připomeň mi abych zavolal mámě") testExtract("připomeň mi abych zavolal mámě za dvě hodiny", "2017-06-27 15:04:00", "připomeň mi abych zavolal mámě") testExtract("připomeň mi abych zavolal mámě za 2 hodiny", "2017-06-27 15:04:00", "připomeň mi abych zavolal mámě") testExtract("připomeň mi abych zavolal mámě za 15 minut", "2017-06-27 13:19:00", "připomeň mi abych zavolal mámě") testExtract("připomeň mi abych zavolal mámě za patnáct minut", "2017-06-27 13:19:00", "připomeň mi abych zavolal mámě") testExtract("připomeň mi abych zavolal mámě za půl hodina", "2017-06-27 13:34:00", "připomeň mi abych zavolal mámě") testExtract("připomeň mi abych zavolal mámě za půl hodina", "2017-06-27 13:34:00", "připomeň mi abych zavolal mámě") testExtract("připomeň mi abych zavolal mámě za čtvrt hodina", "2017-06-27 13:19:00", "připomeň mi abych zavolal mámě") testExtract("připomeň mi abych zavolal mámě za čtvrt hodina", "2017-06-27 13:19:00", "připomeň mi abych zavolal mámě") testExtract("připomeň mi abych zavolal mámě v 10am 2 den po této sobota", "2017-07-03 10:00:00", "připomeň mi abych zavolal mámě") testExtract("Přehraj Rick Astley hudbu 2 dny od Pátek", "2017-07-02 00:00:00", "přehraj rick astley hudbu") testExtract("Začni invazi v 3:45 pm v Čtvrtek", "2017-06-29 15:45:00", "začni invazi") testExtract("V Pondělí, objednej koláč z pekárny", "2017-07-03 00:00:00", "objednej koláč z pekárny") testExtract("Přehraj Happy Birthday hudbu 5 roků od dnes", "2022-06-27 00:00:00", "přehraj happy birthday hudbu") testExtract("Skype Mámě v 12:45 pm další Čtvrtek", "2017-07-06 12:45:00", "skype mámě") testExtract("Jaké je počasí příští Pátek?", "2017-06-30 00:00:00", "jaké je počasí") testExtract("Jaké je počasí příští Středa?", "2017-07-05 00:00:00", "jaké je počasí") testExtract("Jaké je počasí příští Čtvrtek?", "2017-07-06 00:00:00", "jaké je počasí") testExtract("Jaké je počasí příští pátek ráno", "2017-06-30 08:00:00", "jaké je počasí") testExtract("jaké je počasí příští pátek večer", "2017-06-30 19:00:00", "jaké je počasí") testExtract("jaké je počasí příští pátek odpoledne", "2017-06-30 15:00:00", "jaké je počasí") testExtract("připomeň mi abych zavolal mámě v srpen třetího", "2017-08-03 00:00:00", "připomeň mi abych zavolal mámě") testExtract("Kup ohňostroj v 4 Červenec", "2017-07-04 00:00:00", "kup ohňostroj") testExtract("jaké je počasí 2 týdny od další pátek", "2017-07-14 00:00:00", "jaké je počasí") testExtract("jaké je počasí Středa v 0700 hodina", "2017-06-28 07:00:00", "jaké je počasí") testExtract("Nastav budík Středa v 7 hodin", "2017-06-28 07:00:00", "nastav budík") testExtract("Nastav schůzku v 12:45 pm další Čtvrtek", "2017-07-06 12:45:00", "nastav schůzku") testExtract("Jaké je počasí tento Čtvrtek?", "2017-06-29 00:00:00", "jaké je počasí") testExtract("nastav návštěvu na 2 týdny a 6 dní od Sobota", "2017-07-21 00:00:00", "nastav návštěvu") testExtract("Zahaj invazi v 03 45 v Čtvrtek", "2017-06-29 03:45:00", "zahaj invazi") testExtract("Zahaj invazi v 800 hodin v Čtvrtek", "2017-06-29 08:00:00", "zahaj invazi") testExtract("Zahaj párty v 8 hodin v večer v Čtvrtek", "2017-06-29 20:00:00", "zahaj párty") testExtract("Zahaj invazi v 8 v večer v Čtvrtek", "2017-06-29 20:00:00", "zahaj invazi") testExtract("Zahaj invazi v Čtvrtek v poledne", "2017-06-29 12:00:00", "zahaj invazi") testExtract("Zahaj invazi v Čtvrtek v půlnoc", "2017-06-29 00:00:00", "zahaj invazi") testExtract("Zahaj invazi v Čtvrtek v 0500", "2017-06-29 05:00:00", "zahaj invazi") testExtract("připomeň mi abych vstal v 4 roky", "2021-06-27 00:00:00", "připomeň mi abych vstal") testExtract("připomeň mi abych vstal v 4 roky a 4 dny", "2021-07-01 00:00:00", "připomeň mi abych vstal") testExtract("jaké je počasí 3 dny po zítra?", "2017-07-01 00:00:00", "jaké je počasí") testExtract("prosinec 3", "2017-12-03 00:00:00", "") testExtract("sejdeme se v 8:00 dnes večer", "2017-06-27 20:00:00", "sejdeme se") testExtract("sejdeme se v 5pm", "2017-06-27 17:00:00", "sejdeme se") testExtract("sejdeme se v 8 am", "2017-06-28 08:00:00", "sejdeme se") testExtract("připomeň mi abych vstal v 8 am", "2017-06-28 08:00:00", "připomeň mi abych vstal") testExtract("jaké je počasí v úterý", "2017-06-27 00:00:00", "jaké je počasí") testExtract("jaké je počasí v pondělí", "2017-07-03 00:00:00", "jaké je počasí") testExtract("jaké je počasí toto Středa", "2017-06-28 00:00:00", "jaké je počasí") testExtract("v Čtvrtek jaké je počasí", "2017-06-29 00:00:00", "jaké je počasí") testExtract("tento Čtvrtek jaké je počasí", "2017-06-29 00:00:00", "jaké je počasí") testExtract("poslední pondělí jaké bylo počasí", "2017-06-26 00:00:00", "jaké bylo počasí") testExtract("nastav budík na Středa večer v 8", "2017-06-28 20:00:00", "nastav budík") testExtract("nastav budík na Středa v 3 hodiny v odpoledne", "2017-06-28 15:00:00", "nastav budík") testExtract("nastav budík na Středa v 3 hodiny v ráno", "2017-06-28 03:00:00", "nastav budík") testExtract("nastav budík na Středa ráno v 7 hodin", "2017-06-28 07:00:00", "nastav budík") testExtract("nastav budík na dnes v 7 hodin", "2017-06-27 19:00:00", "nastav budík") testExtract("nastav budík na tento večer v 7 hodin", "2017-06-27 19:00:00", "nastav budík") # TODO: This test is imperfect due to the "at 7:00" still in the # remainder. But let it pass for now since time is correct testExtract("nastav budík na tento večer v 7:00", "2017-06-27 19:00:00", "nastav budík v 7:00") testExtract("večer v červen 5 2017 připomeň mi" + " abych zavolal mámě", "2017-06-05 19:00:00", "připomeň mi abych zavolal mámě") # TODO: This test is imperfect due to the missing "for" in the # remainder. But let it pass for now since time is correct testExtract("aktualizuj můj kalendář na ranní schůzku s julius" + " v březnu 4", "2018-03-04 08:00:00", "aktualizuj můj kalendář schůzku s julius") testExtract("připomeň mi abych zavolal mámě další úterý", "2017-07-04 00:00:00", "připomeň mi abych zavolal mámě") testExtract("připomeň mi abych zavolal mámě 3 týdny", "2017-07-18 00:00:00", "připomeň mi abych zavolal mámě") testExtract("připomeň mi abych zavolal mámě v 8 týdny", "2017-08-22 00:00:00", "připomeň mi abych zavolal mámě") testExtract("připomeň mi abych zavolal mámě v 8 týdny a 2 dny", "2017-08-24 00:00:00", "připomeň mi abych zavolal mámě") testExtract("připomeň mi abych zavolal mámě v 4 dny", "2017-07-01 00:00:00", "připomeň mi abych zavolal mámě") testExtract("připomeň mi abych zavolal mámě v 3 měsíce", "2017-09-27 00:00:00", "připomeň mi abych zavolal mámě") testExtract("připomeň mi abych zavolal mámě v 2 roky a 2 dny", "2019-06-29 00:00:00", "připomeň mi abych zavolal mámě") testExtract("připomeň mi abych zavolal mámě další týden", "2017-07-04 00:00:00", "připomeň mi abych zavolal mámě") testExtract("připomeň mi abych zavolal mámě v 10am v Sobota", "2017-07-01 10:00:00", "připomeň mi abych zavolal mámě") testExtract("připomeň mi abych zavolal mámě v 10am tato Sobota", "2017-07-01 10:00:00", "připomeň mi abych zavolal mámě") testExtract("připomeň mi abych zavolal mámě v 10 další Sobota", "2017-07-01 10:00:00", "připomeň mi abych zavolal mámě") testExtract("připomeň mi abych zavolal mámě v 10am další Sobota", "2017-07-01 10:00:00", "připomeň mi abych zavolal mámě") # test yesterday testExtract("jaký den byl včera", "2017-06-26 00:00:00", "jaký den byl") testExtract("jaký den byl den před včera", "2017-06-25 00:00:00", "jaký den byl") testExtract("měl jsem večeři včera v 6", "2017-06-26 06:00:00", "měl jsem večeři") testExtract("měl jsem večeři včera v 6 am", "2017-06-26 06:00:00", "měl jsem večeři") testExtract("měl jsem večeři včera v 6 pm", "2017-06-26 18:00:00", "měl jsem večeři") # Below two tests, ensure that time is picked # even if no am/pm is specified # in case of weekdays/tonight testExtract("nastav budík na 9 o víkendech", "2017-06-27 21:00:00", "nastav budík víkendech") testExtract("na 8 dnes večer", "2017-06-27 20:00:00", "") testExtract("na 8:30pm dnes večer", "2017-06-27 20:30:00", "") # Tests a time with ':' & without am/pm testExtract("nastav budík na dnes večer 9:30", "2017-06-27 21:30:00", "nastav budík") testExtract("nastav budík na 9:00 na dnes večer", "2017-06-27 21:00:00", "nastav budík") # Check if it picks intent irrespective of correctness testExtract("nastav budík na 9 hodin dnes večer", "2017-06-27 21:00:00", "nastav budík") testExtract("připomeň mi hru dnes v noci v 11:30", "2017-06-27 23:30:00", "připomeň mi hru") testExtract("nastav budík v 7:30 o výkendech", "2017-06-27 19:30:00", "nastav budík o výkendech") # "# days <from X/after X>" testExtract("mé narozeniny jsou 2 dny od dnes", "2017-06-29 00:00:00", "mé narozeniny jsou") testExtract("mé narozeniny jsou 2 dny po dnes", "2017-06-29 00:00:00", "mé narozeniny jsou") testExtract("mé narozeniny jsou 2 dny od zítra", "2017-06-30 00:00:00", "mé narozeniny jsou") testExtract("mé narozeniny jsou 2 dny od zítra", "2017-06-30 00:00:00", "mé narozeniny jsou") testExtract("připomeň mi abych zavolal mámě v 10am 2 dny po další Sobota", "2017-07-10 10:00:00", "připomeň mi abych zavolal mámě") testExtract("mé narozeniny jsou 2 dny od včera", "2017-06-28 00:00:00", "mé narozeniny jsou") testExtract("mé narozeniny jsou 2 dny po včera", "2017-06-28 00:00:00", "mé narozeniny jsou") # "# days ago>" testExtract("mé narozeniny byly před 1 den", "2017-06-26 00:00:00", "mé narozeniny byly") testExtract("mé narozeniny byly před 2 dny", "2017-06-25 00:00:00", "mé narozeniny byly") testExtract("mé narozeniny byly před 3 dny", "2017-06-24 00:00:00", "mé narozeniny byly") testExtract("mé narozeniny byly před 4 dny", "2017-06-23 00:00:00", "mé narozeniny byly") # TODO this test is imperfect due to "tonight" in the reminder, but let is pass since the date is correct testExtract("sejdeme se dnes v noci", "2017-06-27 22:00:00", "sejdeme se noci") # TODO this test is imperfect due to "at night" in the reminder, but let is pass since the date is correct testExtract("sejdeme se později v noci", "2017-06-27 22:00:00", "sejdeme se později v noci") # TODO this test is imperfect due to "night" in the reminder, but let is pass since the date is correct testExtract("Jaké bude počasí zítra v noci", "2017-06-28 22:00:00", "jaké bude počasí v noci") # TODO this test is imperfect due to "night" in the reminder, but let is pass since the date is correct testExtract("jaké bude počasí příští úterý v noci", "2017-07-04 22:00:00", "jaké bude počasí v noci") def test_extract_ambiguous_time_cs(self): morning = datetime(2017, 6, 27, 8, 1, 2) večer = datetime(2017, 6, 27, 20, 1, 2) noonish = datetime(2017, 6, 27, 12, 1, 2) self.assertEqual( extract_datetime('krmení ryb'), None) self.assertEqual( extract_datetime('den'), None) self.assertEqual( extract_datetime('týden'), None) self.assertEqual( extract_datetime('měsíc'), None) self.assertEqual( extract_datetime('rok'), None) self.assertEqual( extract_datetime(' '), None) self.assertEqual( extract_datetime('nakrmit ryby v 10 hodin', morning)[0], datetime(2017, 6, 27, 10, 0, 0)) self.assertEqual( extract_datetime('nakrmit ryby v 10 hodin', noonish)[0], datetime(2017, 6, 27, 22, 0, 0)) self.assertEqual( extract_datetime('nakrmit ryby v 10 hodin', večer)[0], datetime(2017, 6, 27, 22, 0, 0)) """ In Czech is May and may have different format def test_extract_date_with_may_I_cs(self): now = datetime(2019, 7, 4, 8, 1, 2) may_date = datetime(2019, 5, 2, 10, 11, 20) self.assertEqual( extract_datetime('Můžu vědět jaký je to čas zítra', now)[0], datetime(2019, 7, 5, 0, 0, 0)) self.assertEqual( extract_datetime('Můžu vědět kdy je 10 hodin', now)[0], datetime(2019, 7, 4, 10, 0, 0)) self.assertEqual( extract_datetime('24. můžu chtít připomenutí', may_date)[0], datetime(2019, 5, 24, 0, 0, 0)) """ def test_extract_relativedatetime_cs(self): def extractWithFormat(text): date = datetime(2017, 6, 27, 10, 1, 2) [extractedDate, leftover] = extract_datetime(text, date) extractedDate = extractedDate.strftime("%Y-%m-%d %H:%M:%S") return [extractedDate, leftover] def testExtract(text, expected_date, expected_leftover): res = extractWithFormat(normalize(text)) self.assertEqual(res[0], expected_date, "for=" + text) self.assertEqual(res[1], expected_leftover, "for=" + text) testExtract("sejdeme se za 5 minut", "2017-06-27 10:06:02", "sejdeme se") testExtract("sejdeme se za 5minut", "2017-06-27 10:06:02", "sejdeme se") testExtract("sejdeme se za 5 sekund", "2017-06-27 10:01:07", "sejdeme se") testExtract("sejdeme se za 1 hodinu", "2017-06-27 11:01:02", "sejdeme se") testExtract("sejdeme se za 2 hodiny", "2017-06-27 12:01:02", "sejdeme se") print("TODO") # Need better normaliting procedure for czech inflexion # testExtract("sejdeme se za 2hodiny", # "2017-06-27 12:01:02", "sejdeme se") testExtract("sejdeme se za 1 minutu", "2017-06-27 10:02:02", "sejdeme se") testExtract("sejdeme se za 1 sekundu", "2017-06-27 10:01:03", "sejdeme se") testExtract("sejdeme se za 5sekund", "2017-06-27 10:01:07", "sejdeme se") def test_spaces(self): self.assertEqual(normalize(" tohle je test"), "tohle je test") self.assertEqual(normalize(" tohle je test "), "tohle je test") self.assertEqual(normalize(" tohle je jedna test"), "tohle je 1 test") def test_numbers(self): self.assertEqual(normalize("tohle je jedna dva tři test"), "tohle je 1 2 3 test") self.assertEqual(normalize(" to je čtyři pět šest test"), "to je 4 5 6 test") self.assertEqual(normalize("to je sedum osum devět test"), "to je 7 8 9 test") self.assertEqual(normalize("to je sedm osm devět test"), "to je 7 8 9 test") self.assertEqual(normalize("tohle je deset jedenáct dvanáct test"), "tohle je 10 11 12 test") self.assertEqual(normalize("tohle je třináct čtrnáct test"), "tohle je 13 14 test") self.assertEqual(normalize("tohle je patnáct šestnáct sedmnáct"), "tohle je 15 16 17") self.assertEqual(normalize("tohle je osmnáct devatenáct dvacet"), "tohle je 18 19 20") self.assertEqual(normalize("tohle je jedna devatenáct dvacet dva"), "tohle je 1 19 20 2") self.assertEqual(normalize("tohle je jedna sto"), "tohle je 1 sto") self.assertEqual(normalize("tohle je jedna dva dvacet dva"), "tohle je 1 2 20 2") self.assertEqual(normalize("tohle je jedna a půl"), "tohle je 1 a půl") self.assertEqual(normalize("tohle je jedna a půl a pět šest"), "tohle je 1 a půl a 5 6") def test_multiple_numbers(self): self.assertEqual(extract_numbers("tohle je jedna dva tři test"), [1.0, 2.0, 3.0]) self.assertEqual(extract_numbers("to je čtyři pět šest test"), [4.0, 5.0, 6.0]) self.assertEqual(extract_numbers("tohle je deset jedenáct dvanáct test"), [10.0, 11.0, 12.0]) self.assertEqual(extract_numbers("tohle je jedna dvacet jedna test"), [1.0, 21.0]) self.assertEqual(extract_numbers("1 pes, sedm prasat, macdonald měl " "farmu, 3 krát 5 makaréna"), [1, 7, 3, 5]) self.assertEqual(extract_numbers("dva piva pro dva medvědy"), [2.0, 2.0]) self.assertEqual(extract_numbers("dvacet 20 dvacet"), [20, 20, 20]) self.assertEqual(extract_numbers("dvacet 20 22"), [20.0, 20.0, 22.0]) self.assertEqual(extract_numbers("dvacet dvacet dva dvacet"), [20, 22, 20]) self.assertEqual(extract_numbers("dvacet 2"), [22.0]) self.assertEqual(extract_numbers("dvacet 20 dvacet 2"), [20, 20, 22]) self.assertEqual(extract_numbers("třetina jedna"), [1 / 3, 1]) self.assertEqual(extract_numbers("třetí", ordinals=True), [3]) self.assertEqual(extract_numbers("šest trillion", short_scale=True), [6e12]) self.assertEqual(extract_numbers("šest trilion", short_scale=False), [6e18]) self.assertEqual(extract_numbers("dvě prasátka a šest trillion bakterií", short_scale=True), [2, 6e12]) self.assertEqual(extract_numbers("dvě prasátka a šest trilion bakterií", short_scale=False), [2, 6e18]) self.assertEqual(extract_numbers("třicátý druhý nebo první", ordinals=True), [32, 1]) self.assertEqual(extract_numbers("tohle je sedm osm devět a" " půl test"), [7.0, 8.0, 9.5]) if __name__ == "__main__": unittest.main()
54.208333
114
0.564851
40,211
0.969804
0
0
0
0
0
0
20,903
0.504136
78f63355867462f1a454c939b07a72f40e12bd55
955
py
Python
src/net/pluto_ftp.py
WardenAllen/Uranus
0d20cac631320b558254992c17678ddd1658587b
[ "MIT" ]
null
null
null
src/net/pluto_ftp.py
WardenAllen/Uranus
0d20cac631320b558254992c17678ddd1658587b
[ "MIT" ]
null
null
null
src/net/pluto_ftp.py
WardenAllen/Uranus
0d20cac631320b558254992c17678ddd1658587b
[ "MIT" ]
null
null
null
# !/usr/bin/python # -*- coding: utf-8 -*- # @Time : 2020/9/18 12:02 # @Author : WardenAllen # @File : pluto_ftp.py # @Brief : import paramiko class PlutoFtp : # paramiko's Sftp() object. __sftp = object def connect_by_pass(self, host, port, uname, pwd): transport = paramiko.Transport((host, port)) transport.connect(username=uname, password=pwd) self.__sftp = paramiko.SFTPClient.from_transport(transport) def connect_by_key(self, host, port, uname, key_path, key_pass = ''): key = paramiko.RSAKey.from_private_key_file(key_path, key_pass) transport = paramiko.Transport((host, port)) transport.connect(username=uname, pkey=key) self.__sftp = paramiko.SFTPClient.from_transport(transport) def get(self, remote, local, cb = None): self.__sftp.get(remote, local, cb) def put(self, local, remote, cb = None): self.__sftp.put(local, remote, cb)
31.833333
73
0.655497
800
0.837696
0
0
0
0
0
0
160
0.167539
78f6f92a5932a9d711316ff3341b072e7d33ca29
99
py
Python
piped/processors/test/__init__.py
alexbrasetvik/Piped
0312c14d6c4c293df378c915cc9787bcc7faed36
[ "MIT" ]
3
2015-02-12T20:34:30.000Z
2016-08-06T06:54:48.000Z
piped/processors/test/__init__.py
alexbrasetvik/Piped
0312c14d6c4c293df378c915cc9787bcc7faed36
[ "MIT" ]
null
null
null
piped/processors/test/__init__.py
alexbrasetvik/Piped
0312c14d6c4c293df378c915cc9787bcc7faed36
[ "MIT" ]
2
2015-12-16T14:18:14.000Z
2019-04-12T01:43:10.000Z
# Copyright (c) 2010-2011, Found IT A/S and Piped Project Contributors. # See LICENSE for details.
33
71
0.747475
0
0
0
0
0
0
0
0
97
0.979798
78f83610f02792ce2cf026a72886ebff9b5ef71f
579
py
Python
assistance_bot/app.py
reakfog/personal_computer_voice_assistant
3483f633c57cd2e930f94bcbda9739cde34525aa
[ "BSD-3-Clause" ]
null
null
null
assistance_bot/app.py
reakfog/personal_computer_voice_assistant
3483f633c57cd2e930f94bcbda9739cde34525aa
[ "BSD-3-Clause" ]
null
null
null
assistance_bot/app.py
reakfog/personal_computer_voice_assistant
3483f633c57cd2e930f94bcbda9739cde34525aa
[ "BSD-3-Clause" ]
2
2021-07-26T20:22:31.000Z
2021-07-29T12:58:03.000Z
import sys sys.path = ['', '..'] + sys.path[1:] import daemon from assistance_bot import core from functionality.voice_processing import speaking, listening from functionality.commands import * if __name__ == '__main__': speaking.setup_assistant_voice(core.ttsEngine, core.assistant) while True: # start speech recording and speech recognition recognized_speech = listening.get_listening_and_recognition_result( core.recognizer, core.microphone) # executing the given command execute_command(recognized_speech)
32.166667
75
0.723661
0
0
0
0
0
0
0
0
92
0.158895
78f942b69039b6e57cce7169cc8dc3ffec50e359
107
py
Python
python/testData/resolve/AssignmentExpressionsAndOuterVar.py
tgodzik/intellij-community
f5ef4191fc30b69db945633951fb160c1cfb7b6f
[ "Apache-2.0" ]
2
2019-04-28T07:48:50.000Z
2020-12-11T14:18:08.000Z
python/testData/resolve/AssignmentExpressionsAndOuterVar.py
Cyril-lamirand/intellij-community
60ab6c61b82fc761dd68363eca7d9d69663cfa39
[ "Apache-2.0" ]
2
2022-02-19T09:45:05.000Z
2022-02-27T20:32:55.000Z
python/testData/resolve/AssignmentExpressionsAndOuterVar.py
Cyril-lamirand/intellij-community
60ab6c61b82fc761dd68363eca7d9d69663cfa39
[ "Apache-2.0" ]
2
2020-03-15T08:57:37.000Z
2020-04-07T04:48:14.000Z
total = 0 partial_sums = [total := total + v for v in values] print("Total:", total) <ref>
26.75
51
0.551402
0
0
0
0
0
0
0
0
8
0.074766
78fa9f898e64c035eed240732e89631cf36a87b3
18,049
py
Python
exhale/deploy.py
florianhumblot/exhale
d6fa84fa32ee079c6b70898a1b0863a38e703591
[ "BSD-3-Clause" ]
null
null
null
exhale/deploy.py
florianhumblot/exhale
d6fa84fa32ee079c6b70898a1b0863a38e703591
[ "BSD-3-Clause" ]
null
null
null
exhale/deploy.py
florianhumblot/exhale
d6fa84fa32ee079c6b70898a1b0863a38e703591
[ "BSD-3-Clause" ]
null
null
null
# -*- coding: utf8 -*- ######################################################################################## # This file is part of exhale. Copyright (c) 2017-2022, Stephen McDowell. # # Full BSD 3-Clause license available here: # # # # https://github.com/svenevs/exhale/blob/master/LICENSE # ######################################################################################## ''' The deploy module is responsible for two primary actions: 1. Executing Doxygen (if requested in ``exhale_args``). 2. Launching the full API generation via the :func:`~exhale.deploy.explode` function. ''' from __future__ import unicode_literals from . import configs from . import utils from .graph import ExhaleRoot import os import sys import six import re import codecs import tempfile import textwrap from subprocess import PIPE, Popen, STDOUT def _generate_doxygen(doxygen_input): ''' This method executes doxygen based off of the specified input. By the time this method is executed, it is assumed that Doxygen is intended to be run in the **current working directory**. Search for ``returnPath`` in the implementation of :func:`~exhale.configs.apply_sphinx_configurations` for handling of this aspect. This method is intended to be called by :func:`~exhale.deploy.generateDoxygenXML`, which is in turn called by :func:`~exhale.configs.apply_sphinx_configurations`. Two versions of the doxygen command can be executed: 1. If ``doxygen_input`` is exactly ``"Doxyfile"``, then it is assumed that a ``Doxyfile`` exists in the **current working directory**. Meaning the command being executed is simply ``doxygen``. 2. For all other values, ``doxygen_input`` represents the arguments as to be specified on ``stdin`` to the process. **Parameters** ``doxygen_input`` (str) Either the string ``"Doxyfile"`` to run vanilla ``doxygen``, or the selection of doxygen inputs (that would ordinarily be in a ``Doxyfile``) that will be ``communicate``d to the ``doxygen`` process on ``stdin``. .. note:: If using Python **3**, the input **must** still be a ``str``. This method will convert the input to ``bytes`` as follows: .. code-block:: py if sys.version[0] == "3": doxygen_input = bytes(doxygen_input, "utf-8") **Return** ``str`` or ``None`` If an error occurs, a string describing the error is returned with the intention of the caller raising the exception. If ``None`` is returned, then the process executed without error. Example usage: .. code-block:: py status = _generate_doxygen("Doxygen") if status: raise RuntimeError(status) Though a little awkward, this is done to enable the intended caller of this method to restore some state before exiting the program (namely, the working directory before propagating an exception to ``sphinx-build``). ''' if not isinstance(doxygen_input, six.string_types): return "Error: the `doxygen_input` variable must be of type `str`." doxyfile = doxygen_input == "Doxyfile" try: # Setup the arguments to launch doxygen if doxyfile: args = ["doxygen"] kwargs = {} else: args = ["doxygen", "-"] kwargs = {"stdin": PIPE} if configs._on_rtd: # On RTD, any capturing of Doxygen output can cause buffer overflows for # even medium sized projects. So it is disregarded entirely to ensure the # build will complete (otherwise, it silently fails after `cat conf.py`) devnull_file = open(os.devnull, "w") kwargs["stdout"] = devnull_file kwargs["stderr"] = STDOUT else: # TL;DR: strictly enforce that (verbose) doxygen output doesn't cause the # `communicate` to hang due to buffer overflows. # # See excellent synopsis: # https://thraxil.org/users/anders/posts/2008/03/13/Subprocess-Hanging-PIPE-is-your-enemy/ if six.PY2: tempfile_kwargs = {} else: # encoding argument introduced in python 3 tempfile_kwargs = {"encoding": "utf-8"} tempfile_kwargs["mode"] = "r+" tmp_out_file = tempfile.TemporaryFile( prefix="doxygen_stdout_buff", **tempfile_kwargs ) tmp_err_file = tempfile.TemporaryFile( prefix="doxygen_stderr_buff", **tempfile_kwargs ) # Write to the tempfiles over PIPE to avoid buffer overflowing kwargs["stdout"] = tmp_out_file kwargs["stderr"] = tmp_err_file # Note: overload of args / kwargs, Popen is expecting a list as the first # parameter (aka no *args, just args)! doxygen_proc = Popen(args, **kwargs) # Communicate can only be called once, arrange whether or not stdin has value if not doxyfile: # In Py3, make sure we are communicating a bytes-like object which is no # longer interchangeable with strings (as was the case in Py2). if sys.version[0] == "3": doxygen_input = bytes(doxygen_input, "utf-8") comm_kwargs = {"input": doxygen_input} else: comm_kwargs = {} # Waits until doxygen has completed doxygen_proc.communicate(**comm_kwargs) # Print out what was written to the tmpfiles by doxygen if not configs._on_rtd and not configs.exhaleSilentDoxygen: # Doxygen output (some useful information, mostly just enumeration of the # configurations you gave it {useful for debugging...}) if tmp_out_file.tell() > 0: tmp_out_file.seek(0) print(tmp_out_file.read()) # Doxygen error (e.g. any warnings, or invalid input) if tmp_err_file.tell() > 0: # Making them stick out, ideally users would reduce this output to 0 ;) # This will print a yellow [~] before every line, but not make the # entire line yellow because it's definitively not helpful prefix = utils._use_color( utils.prefix("[~]", " "), utils.AnsiColors.BOLD_YELLOW, sys.stderr ) tmp_err_file.seek(0) sys.stderr.write(utils.prefix(prefix, tmp_err_file.read())) # Close the file handles opened for communication with subprocess if configs._on_rtd: devnull_file.close() else: # Delete the tmpfiles tmp_out_file.close() tmp_err_file.close() # Make sure we had a valid execution of doxygen exit_code = doxygen_proc.returncode if exit_code != 0: raise RuntimeError("Non-zero return code of [{0}] from 'doxygen'...".format(exit_code)) except Exception as e: return "Unable to execute 'doxygen': {0}".format(e) # returning None signals _success_ return None def _valid_config(config, required): ''' .. todo:: add documentation of this method ``config``: doxygen input we're looking for ``required``: if ``True``, must be present. if ``False``, NOT ALLOWED to be present ''' re_template = r"\s*{config}\s*=.*".format(config=config) found = re.search(re_template, configs.exhaleDoxygenStdin) if required: return found is not None else: return found is None def generateDoxygenXML(): # If this happens, we really shouldn't be here... if not configs.exhaleExecutesDoxygen: return textwrap.dedent(''' `generateDoxygenXML` should *ONLY* be called internally. You should set `exhaleExecutesDoxygen=True` in `exhale_args` in `conf.py`. ''') # Case 1: the user has their own `Doxyfile`. if configs.exhaleUseDoxyfile: return _generate_doxygen("Doxyfile") # Case 2: use stdin, with some defaults and potentially additional specs from user else: # There are two doxygen specs that we explicitly disallow # # 1. OUTPUT_DIRECTORY: this is *ALREADY* specified implicitly via breathe # 2. STRIP_FROM_PATH: this is a *REQUIRED* config (`doxygenStripFromPath`) # # There is one doxygen spec that is REQUIRED to be given: # # 1. INPUT (where doxygen should parse). # # The below is a modest attempt to validate that these were / were not given. if not isinstance(configs.exhaleDoxygenStdin, six.string_types): return "`exhaleDoxygenStdin` config must be a string!" if not _valid_config("OUTPUT_DIRECTORY", False): # If we are hitting this code, these should both exist and be configured # since this method is called **AFTER** the configuration verification code # performed in configs.apply_sphinx_configurations breathe_projects = configs._the_app.config.breathe_projects breathe_default_project = configs._the_app.config.breathe_default_project return textwrap.dedent(''' `exhaleDoxygenStdin` may *NOT* specify `OUTPUT_DIRECTORY`. Exhale does this internally by reading what you provided to `breathe_projects` in your `conf.py`. Based on what you had in `conf.py`, Exhale will be using - The `breathe_default_project`: {default} - The output path specfied (`breathe_projects[breathe_default_project]`): {path} NOTE: the above path has the `xml` portion removed from what you provided. This path is what is sent to Doxygen, Breathe requires you include the `xml` directory path; so Exhale simply re-uses this variable and adapts the value for our needs. '''.format( default=breathe_default_project, path=breathe_projects[breathe_default_project].rsplit("{sep}xml".format(sep=os.sep), 1)[0] )) if not _valid_config("STRIP_FROM_PATH", False): return textwrap.dedent(''' `exhaleDoxygenStdin` may *NOT* specify `STRIP_FROM_PATH`. Exhale does this internally by using the value you provided to `exhale_args` in your `conf.py` for the key `doxygenStripFromPath`. Based on what you had in `conf.py`, Exhale will be using: {strip} NOTE: the above is what you specified directly in `exhale_args`. Exhale will be using an absolute path to send to Doxygen. It is: {absolute} '''.format( strip=configs._the_app.config.exhale_args["doxygenStripFromPath"], absolute=configs.doxygenStripFromPath )) if not _valid_config("INPUT", True): return textwrap.dedent(''' `exhaleDoxygenStdin` *MUST* specify the `INPUT` doxygen config variable. The INPUT variable is what tells Doxygen where to look for code to extract documentation from. For example, if you had a directory layout project_root/ docs/ conf.py Makefile ... etc ... include/ my_header.hpp src/ my_header.cpp Then you would include the line INPUT = ../include in the string provided to `exhale_args["exhaleDoxygenStdin"]`. ''') # For these, we just want to warn them of the impact but still allow an override re_template = r"\s*{config}\s*=\s*(.*)" for cfg in ("ALIASES", "PREDEFINED"): found = re.search(re_template.format(config=cfg), configs.exhaleDoxygenStdin) if found: sys.stderr.write(utils.info(textwrap.dedent(''' You have supplied to `exhaleDoxygenStdin` a configuration of: {cfg} = {theirs} This has an important impact, as it overrides a default setting that Exhale is using. 1. If you are intentionally overriding this configuration, simply ignore this message --- what you intended will happen. 2. If you meant to _continue_ adding to the defaults Exhale provides, you need to use a `+=` instead of a raw `=`. So do instead {cfg} += {theirs} '''.format(cfg=cfg, theirs=found.groups()[0])), utils.AnsiColors.BOLD_YELLOW)) # Include their custom doxygen definitions after the defaults so that they can # override anything they want to. Populate the necessary output dir and strip path. doxy_dir = configs._doxygen_xml_output_directory.rsplit("{sep}xml".format(sep=os.sep), 1)[0] internal_configs = textwrap.dedent(''' # Tell doxygen to output wherever breathe is expecting things OUTPUT_DIRECTORY = "{out}" # Tell doxygen to strip the path names (RTD builds produce long abs paths...) STRIP_FROM_PATH = "{strip}" '''.format(out=doxy_dir, strip=configs.doxygenStripFromPath)) external_configs = textwrap.dedent(configs.exhaleDoxygenStdin) # Place external configs last so that if the _valid_config method isn't actually # catching what it should be, the internal configs will override theirs full_input = "{base}\n{external}\n{internal}\n\n".format(base=configs.DEFAULT_DOXYGEN_STDIN_BASE, external=external_configs, internal=internal_configs) # << verboseBuild if configs.verboseBuild: msg = "[*] The following input will be sent to Doxygen:\n" if not configs.alwaysColorize and not sys.stderr.isatty(): sys.stderr.write(msg) sys.stderr.write(full_input) else: sys.stderr.write(utils.colorize(msg, utils.AnsiColors.BOLD_CYAN)) sys.stderr.write(utils.__fancy(full_input, "make", "console")) return _generate_doxygen(full_input) ######################################################################################## # ## ### #### ##### Primary entry point. #### ### ## # ######################################################################################## def explode(): ''' This method **assumes** that :func:`~exhale.configs.apply_sphinx_configurations` has already been applied. It performs minimal sanity checking, and then performs in order 1. Creates a :class:`~exhale.graph.ExhaleRoot` object. 2. Executes :func:`~exhale.graph.ExhaleRoot.parse` for this object. 3. Executes :func:`~exhale.graph.ExhaleRoot.generateFullAPI` for this object. 4. Executes :func:`~exhale.graph.ExhaleRoot.toConsole` for this object (which will only produce output when :data:`~exhale.configs.verboseBuild` is ``True``). This results in the full API being generated, and control is subsequently passed back to Sphinx to now read in the source documents (many of which were just generated in :data:`~exhale.configs.containmentFolder`), and proceed to writing the final output. ''' # Quick sanity check to make sure the bare minimum have been set in the configs err_msg = "`configs.{config}` was `None`. Do not call `deploy.explode` directly." if configs.containmentFolder is None: raise RuntimeError(err_msg.format(config="containmentFolder")) if configs.rootFileName is None: raise RuntimeError(err_msg.format(config="rootFileName")) if configs.doxygenStripFromPath is None: raise RuntimeError(err_msg.format(config="doxygenStripFromPath")) # From here on, we assume that everything else has been checked / configured. try: textRoot = ExhaleRoot() except: utils.fancyError("Unable to create an `ExhaleRoot` object:") try: sys.stdout.write("{0}\n".format(utils.info("Exhale: parsing Doxygen XML."))) start = utils.get_time() textRoot.parse() end = utils.get_time() sys.stdout.write("{0}\n".format( utils.progress("Exhale: finished parsing Doxygen XML in {0}.".format( utils.time_string(start, end) )) )) except: utils.fancyError("Exception caught while parsing:") try: sys.stdout.write("{0}\n".format( utils.info("Exhale: generating reStructuredText documents.") )) start = utils.get_time() textRoot.generateFullAPI() end = utils.get_time() sys.stdout.write("{0}\n".format( utils.progress("Exhale: generated reStructuredText documents in {0}.".format( utils.time_string(start, end) )) )) except: utils.fancyError("Exception caught while generating:") # << verboseBuild # toConsole only prints if verbose mode is enabled textRoot.toConsole() # allow access to the result after-the-fact configs._the_app.exhale_root = textRoot
42.468235
106
0.588066
0
0
0
0
0
0
0
0
11,608
0.643138
78fb0646e467b92a38f001788a56ced3c1f8a48d
3,816
py
Python
src/bayesian_reliability_comparison.py
rloganiv/bayesian-blackbox
6a111553200b6aa755149e08174abe1a61d37198
[ "MIT" ]
8
2019-12-23T13:27:15.000Z
2021-12-01T13:33:34.000Z
src/bayesian_reliability_comparison.py
rloganiv/bayesian-blackbox
6a111553200b6aa755149e08174abe1a61d37198
[ "MIT" ]
11
2020-03-31T11:06:55.000Z
2022-02-10T00:39:33.000Z
src/bayesian_reliability_comparison.py
disiji/bayesian-blackbox
6a111553200b6aa755149e08174abe1a61d37198
[ "MIT" ]
2
2020-01-24T10:21:57.000Z
2020-02-22T04:41:14.000Z
import argparse import multiprocessing import os import random import numpy as np from data_utils import DATAFILE_LIST, DATASET_LIST, prepare_data, RESULTS_DIR from models import SumOfBetaEce random.seed(2020) num_cores = multiprocessing.cpu_count() NUM_BINS = 10 NUM_RUNS = 100 N_list = [100, 200, 500, 1000, 2000, 5000, 10000] OUTPUT_DIR = RESULTS_DIR + "bayesian_reliability_comparison/" def main(args) -> None: # load data categories, observations, confidences, idx2category, category2idx, labels = prepare_data( DATAFILE_LIST[args.dataset], False) # train a ground_truth ece model if args.ground_truth_type == 'bayesian': ground_truth_model = SumOfBetaEce(num_bins=args.num_bins, pseudocount=args.pseudocount) else: ground_truth_model = SumOfBetaEce(num_bins=args.num_bins, pseudocount=1e-3) ground_truth_model.update_batch(confidences, observations) results = np.zeros((args.num_runs, len(N_list), 5)) for run_id in range(args.num_runs): tmp = list(zip(confidences, observations)) random.shuffle(tmp) confidences, observations = zip(*tmp) model = SumOfBetaEce(num_bins=args.num_bins, pseudocount=args.pseudocount) for i in range(len(N_list)): tmp = 0 if i == 0 else N_list[i - 1] model.update_batch(confidences[tmp: N_list[i]], observations[tmp: N_list[i]]) results[run_id, i, 0] = N_list[i] results[run_id, i, 1] = model.eval results[run_id, i, 2] = model.frequentist_eval results[run_id, i, 3] = model.calibration_estimation_error(ground_truth_model, args.weight_type) results[run_id, i, 4] = model.frequentist_calibration_estimation_error(ground_truth_model, args.weight_type) results_mean = np.mean(results, axis=0) results_variance = np.std(results, axis=0) if args.weight_type == 'online': OUTPUT_DIR += "online_weights/" try: os.stat(OUTPUT_DIR) except: os.mkdir(OUTPUT_DIR) if args.ground_truth_type == 'frequentist': filename_mean = OUTPUT_DIR + "frequentist_ground_truth_%s_pseudocount%d.csv" % (args.dataset, args.pseudocount) filename_std = OUTPUT_DIR + "frequentist_ground_truth_%s_pseudocount%d_std.csv" % ( args.dataset, args.pseudocount) else: filename_mean = OUTPUT_DIR + "bayesian_ground_truth_%s_pseudocount%d.csv" % (args.dataset, args.pseudocount) filename_std = OUTPUT_DIR + "bayesian_ground_truth_%s_pseudocount%d_std.csv" % ( args.dataset, args.pseudocount) header = 'N, bayesian_ece, frequentist_ece, bayesian_estimation_error, frequentist_estimation_error' np.savetxt(filename_mean, results_mean, delimiter=',', header=header) np.savetxt(filename_std, results_variance, delimiter=',', header=header) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('dataset', type=str, default='cifar100', help='input dataset') parser.add_argument('-pseudocount', type=int, default=1, help='strength of prior') parser.add_argument('-ground_truth_type', type=str, default='bayesian', help='compute ground truth in a Bayesian or frequentist way, bayesian or frequentist') parser.add_argument('-weight_type', type=str, default='pool', help='weigh each bin with all data or only data seen so far, online or pool') parser.add_argument('--num_runs', type=int, default=NUM_RUNS, help='number of runs') parser.add_argument('--num_bins', type=int, default=NUM_BINS, help='number of bins in reliability diagram') args, _ = parser.parse_known_args() if args.dataset not in DATASET_LIST: raise ValueError("%s is not in DATASET_LIST." % args.dataset) main(args)
41.032258
120
0.70152
0
0
0
0
0
0
0
0
797
0.208857
78fbbb7e97d40f03f6fe9dcf3d1d397ff5d9dbb9
29,044
py
Python
psyneulink/core/components/functions/statefulfunctions/statefulfunction.py
SamKG/PsyNeuLink
70558bcd870868e1688cb7a7c424d29ca336f2df
[ "Apache-2.0" ]
null
null
null
psyneulink/core/components/functions/statefulfunctions/statefulfunction.py
SamKG/PsyNeuLink
70558bcd870868e1688cb7a7c424d29ca336f2df
[ "Apache-2.0" ]
77
2020-10-01T06:27:19.000Z
2022-03-31T02:03:33.000Z
psyneulink/core/components/functions/statefulfunctions/statefulfunction.py
SamKG/PsyNeuLink
70558bcd870868e1688cb7a7c424d29ca336f2df
[ "Apache-2.0" ]
null
null
null
# # Princeton University licenses this file to You under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. You may obtain a copy of the License at: # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software distributed under the License is distributed # on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and limitations under the License. # # # ***************************************** STATEFUL FUNCTION ********************************************************* """ * `StatefulFunction` * `IntegratorFunctions` * `MemoryFunctions` """ import abc import typecheck as tc import warnings import numbers import numpy as np from psyneulink.core import llvm as pnlvm from psyneulink.core.components.component import DefaultsFlexibility, _has_initializers_setter from psyneulink.core.components.functions.function import Function_Base, FunctionError from psyneulink.core.components.functions.distributionfunctions import DistributionFunction from psyneulink.core.globals.keywords import STATEFUL_FUNCTION_TYPE, STATEFUL_FUNCTION, NOISE, RATE from psyneulink.core.globals.parameters import Parameter from psyneulink.core.globals.utilities import parameter_spec, iscompatible, object_has_single_value, convert_to_np_array from psyneulink.core.globals.preferences.basepreferenceset import is_pref_set from psyneulink.core.globals.context import ContextFlags, handle_external_context __all__ = ['StatefulFunction'] class StatefulFunction(Function_Base): # --------------------------------------------------------------------- """ StatefulFunction( \ default_variable=None, \ initializer, \ rate=1.0, \ noise=0.0, \ params=None, \ owner=None, \ prefs=None, \ ) .. _StatefulFunction: Abstract base class for Functions the result of which depend on their `previous_value <StatefulFunction.previous_value>` attribute. COMMENT: NARRATIVE HERE THAT EXPLAINS: A) initializers and stateful_attributes B) initializer (note singular) is a prespecified member of initializers that contains the value with which to initiailzer previous_value COMMENT Arguments --------- default_variable : number, list or array : default class_defaults.variable specifies a template for `variable <StatefulFunction.variable>`. initializer : float, list or 1d array : default 0.0 specifies initial value for `previous_value <StatefulFunction.previous_value>`. If it is a list or array, it must be the same length as `variable <StatefulFunction.variable>` (see `initializer <StatefulFunction.initializer>` for details). rate : float, list or 1d array : default 1.0 specifies value used as a scaling parameter in a subclass-dependent way (see `rate <StatefulFunction.rate>` for details); if it is a list or array, it must be the same length as `variable <StatefulFunction.default_variable>`. noise : float, function, list or 1d array : default 0.0 specifies random value added in each call to `function <StatefulFunction.function>`; if it is a list or array, it must be the same length as `variable <StatefulFunction.default_variable>` (see `noise <StatefulFunction.noise>` for details). params : Dict[param keyword: param value] : default None a `parameter dictionary <ParameterPort_Specification>` that specifies the parameters for the function. Values specified for parameters in the dictionary override any assigned to those parameters in arguments of the constructor. owner : Component `component <Component>` to which to assign the Function. name : str : default see `name <Function.name>` specifies the name of the Function. prefs : PreferenceSet or specification dict : default Function.classPreferences specifies the `PreferenceSet` for the Function (see `prefs <Function_Base.prefs>` for details). Attributes ---------- variable : number or array current input value. initializer : float or 1d array determines initial value assigned to `previous_value <StatefulFunction.previous_value>`. If `variable <StatefulFunction.variable>` is a list or array, and initializer is a float or has a single element, it is applied to each element of `previous_value <StatefulFunction.previous_value>`. If initializer is a list or array,each element is applied to the corresponding element of `previous_value <Integrator.previous_value>`. previous_value : 1d array last value returned (i.e., for which state is being maintained). initializers : list stores the names of the initialization attributes for each of the stateful attributes of the function. The index i item in initializers provides the initialization value for the index i item in `stateful_attributes <StatefulFunction.stateful_attributes>`. stateful_attributes : list stores the names of each of the stateful attributes of the function. The index i item in stateful_attributes is initialized by the value of the initialization attribute whose name is stored in index i of `initializers <StatefulFunction.initializers>`. In most cases, the stateful_attributes, in that order, are the return values of the function. .. _Stateful_Rate: rate : float or 1d array on each call to `function <StatefulFunction.function>`, applied to `variable <StatefulFunction.variable>`, `previous_value <StatefulFunction.previous_value>`, neither, or both, depending on implementation by subclass. If it is a float or has a single value, it is applied to all elements of its target(s); if it has more than one element, each element is applied to the corresponding element of its target(s). .. _Stateful_Noise: noise : float, function, list, or 1d array random value added on each call to `function <StatefulFunction.function>`. If `variable <StatefulFunction.variable>` is a list or array, and noise is a float or function, it is applied for each element of `variable <StatefulFunction.variable>`. If noise is a function, it is executed and applied separately for each element of `variable <StatefulFunction.variable>`. If noise is a list or array, it is applied elementwise (i.e., in Hadamard form). .. hint:: To generate random noise that varies for every execution, a probability distribution function should be used (see `Distribution Functions <DistributionFunction>` for details), that generates a new noise value from its distribution on each execution. If noise is specified as a float, a function with a fixed output, or a list or array of either of these, then noise is simply an offset that remains the same across all executions. owner : Component `component <Component>` to which the Function has been assigned. name : str the name of the Function; if it is not specified in the **name** argument of the constructor, a default is assigned by FunctionRegistry (see `Registry_Naming` for conventions used for default and duplicate names). prefs : PreferenceSet or specification dict the `PreferenceSet` for the Function; if it is not specified in the **prefs** argument of the Function's constructor, a default is assigned using `classPreferences` defined in __init__.py (see `Preferences` for details). """ componentType = STATEFUL_FUNCTION_TYPE componentName = STATEFUL_FUNCTION class Parameters(Function_Base.Parameters): """ Attributes ---------- initializer see `initializer <StatefulFunction.initializer>` :default value: numpy.array([0]) :type: ``numpy.ndarray`` noise see `noise <StatefulFunction.noise>` :default value: 0.0 :type: ``float`` previous_value see `previous_value <StatefulFunction.previous_value>` :default value: numpy.array([0]) :type: ``numpy.ndarray`` rate see `rate <StatefulFunction.rate>` :default value: 1.0 :type: ``float`` """ noise = Parameter(0.0, modulable=True) rate = Parameter(1.0, modulable=True) previous_value = Parameter(np.array([0]), initializer='initializer', pnl_internal=True) initializer = Parameter(np.array([0]), pnl_internal=True) has_initializers = Parameter(True, setter=_has_initializers_setter, pnl_internal=True) @handle_external_context() @tc.typecheck def __init__(self, default_variable=None, rate=None, noise=None, initializer=None, params: tc.optional(tc.optional(dict)) = None, owner=None, prefs: tc.optional(is_pref_set) = None, context=None, **kwargs ): if not hasattr(self, "initializers"): self.initializers = ["initializer"] if not hasattr(self, "stateful_attributes"): self.stateful_attributes = ["previous_value"] super().__init__( default_variable=default_variable, rate=rate, initializer=initializer, noise=noise, params=params, owner=owner, prefs=prefs, context=context, **kwargs ) def _validate(self, context=None): self._validate_rate(self.defaults.rate) self._validate_initializers(self.defaults.variable, context=context) super()._validate(context=context) def _validate_params(self, request_set, target_set=None, context=None): # Handle list or array for rate specification if RATE in request_set: rate = request_set[RATE] if isinstance(rate, (list, np.ndarray)) and not iscompatible(rate, self.defaults.variable): if len(rate) != 1 and len(rate) != np.array(self.defaults.variable).size: # If the variable was not specified, then reformat it to match rate specification # and assign class_defaults.variable accordingly # Note: this situation can arise when the rate is parametrized (e.g., as an array) in the # StatefulFunction's constructor, where that is used as a specification for a function parameter # (e.g., for an IntegratorMechanism), whereas the input is specified as part of the # object to which the function parameter belongs (e.g., the IntegratorMechanism); in that # case, the StatefulFunction gets instantiated using its class_defaults.variable ([[0]]) before # the object itself, thus does not see the array specification for the input. if self._variable_shape_flexibility is DefaultsFlexibility.FLEXIBLE: self._instantiate_defaults(variable=np.zeros_like(np.array(rate)), context=context) if self.verbosePref: warnings.warn( "The length ({}) of the array specified for the rate parameter ({}) of {} " "must match the length ({}) of the default input ({}); " "the default input has been updated to match".format( len(rate), rate, self.name, np.array(self.defaults.variable).size ), self.defaults.variable, ) else: raise FunctionError( "The length of the array specified for the rate parameter of {} ({}) " "must match the length of the default input ({}).".format( self.name, # rate, len(rate), np.array(self.defaults.variable).size, # self.defaults.variable, ) ) super()._validate_params(request_set=request_set, target_set=target_set, context=context) if NOISE in target_set: noise = target_set[NOISE] if isinstance(noise, DistributionFunction): noise.owner = self target_set[NOISE] = noise.execute self._validate_noise(target_set[NOISE]) def _validate_initializers(self, default_variable, context=None): for initial_value_name in self.initializers: initial_value = self._get_current_parameter_value(initial_value_name, context=context) if isinstance(initial_value, (list, np.ndarray)): if len(initial_value) != 1: # np.atleast_2d may not be necessary here? if np.shape(np.atleast_2d(initial_value)) != np.shape(np.atleast_2d(default_variable)): raise FunctionError("{}'s {} ({}) is incompatible with its default_variable ({}) ." .format(self.name, initial_value_name, initial_value, default_variable)) elif not isinstance(initial_value, (float, int)): raise FunctionError("{}'s {} ({}) must be a number or a list/array of numbers." .format(self.name, initial_value_name, initial_value)) def _validate_rate(self, rate): # FIX: CAN WE JUST GET RID OF THIS? # kmantel: this duplicates much code in _validate_params above, but that calls _instantiate_defaults # which I don't think is the right thing to do here, but if you don't call it in _validate_params # then a lot of things don't get instantiated properly if rate is not None: if isinstance(rate, list): rate = np.asarray(rate) rate_type_msg = 'The rate parameter of {0} must be a number or an array/list of at most 1d (you gave: {1})' if isinstance(rate, np.ndarray): # kmantel: current test_gating test depends on 2d rate # this should be looked at but for now this restriction is removed # if rate.ndim > 1: # raise FunctionError(rate_type_msg.format(self.name, rate)) pass elif not isinstance(rate, numbers.Number): raise FunctionError(rate_type_msg.format(self.name, rate)) if isinstance(rate, np.ndarray) and not iscompatible(rate, self.defaults.variable): if len(rate) != 1 and len(rate) != np.array(self.defaults.variable).size: if self._variable_shape_flexibility is DefaultsFlexibility.FLEXIBLE: self.defaults.variable = np.zeros_like(np.array(rate)) if self.verbosePref: warnings.warn( "The length ({}) of the array specified for the rate parameter ({}) of {} " "must match the length ({}) of the default input ({}); " "the default input has been updated to match".format( len(rate), rate, self.name, np.array(self.defaults.variable).size ), self.defaults.variable, ) self._instantiate_value() self._variable_shape_flexibility = DefaultsFlexibility.INCREASE_DIMENSION else: raise FunctionError( "The length of the array specified for the rate parameter of {} ({})" "must match the length of the default input ({}).".format( len(rate), # rate, self.name, np.array(self.defaults.variable).size, # self.defaults.variable, ) ) # Ensure that the noise parameter makes sense with the input type and shape; flag any noise functions that will # need to be executed def _validate_noise(self, noise): # Noise is a list or array if isinstance(noise, (np.ndarray, list)): if len(noise) == 1: pass # Variable is a list/array elif (not iscompatible(np.atleast_2d(noise), self.defaults.variable) and not iscompatible(np.atleast_1d(noise), self.defaults.variable) and len(noise) > 1): raise FunctionError( "Noise parameter ({}) does not match default variable ({}). Noise parameter of {} " "must be specified as a float, a function, or an array of the appropriate shape ({}).".format( noise, self.defaults.variable, self.name, np.shape(np.array(self.defaults.variable)) ), component=self ) else: for i in range(len(noise)): if isinstance(noise[i], DistributionFunction): noise[i] = noise[i].execute # if not isinstance(noise[i], (float, int)) and not callable(noise[i]): if not np.isscalar(noise[i]) and not callable(noise[i]): raise FunctionError("The elements of a noise list or array must be scalars or functions. " "{} is not a valid noise element for {}".format(noise[i], self.name)) def _try_execute_param(self, param, var, context=None): # FIX: [JDC 12/18/18 - HACK TO DEAL WITH ENFORCEMENT OF 2D BELOW] param_shape = np.array(param).shape if not len(param_shape): param_shape = np.array(var).shape # param is a list; if any element is callable, execute it if isinstance(param, (np.ndarray, list)): # NOTE: np.atleast_2d will cause problems if the param has "rows" of different lengths # FIX: WHY FORCE 2d?? param = np.atleast_2d(param) for i in range(len(param)): for j in range(len(param[i])): try: param[i][j] = param[i][j](context=context) except TypeError: try: param[i][j] = param[i][j]() except TypeError: pass try: param = param.reshape(param_shape) except ValueError: if object_has_single_value(param): param = np.full(param_shape, float(param)) # param is one function elif callable(param): # NOTE: np.atleast_2d will cause problems if the param has "rows" of different lengths new_param = [] # FIX: WHY FORCE 2d?? for row in np.atleast_2d(var): # for row in np.atleast_1d(var): # for row in var: new_row = [] for item in row: try: val = param(context=context) except TypeError: val = param() new_row.append(val) new_param.append(new_row) param = np.asarray(new_param) # FIX: [JDC 12/18/18 - HACK TO DEAL WITH ENFORCEMENT OF 2D ABOVE] try: if len(np.squeeze(param)): param = param.reshape(param_shape) except TypeError: pass return param def _instantiate_attributes_before_function(self, function=None, context=None): if not self.parameters.initializer._user_specified: self._initialize_previous_value(np.zeros_like(self.defaults.variable), context) # use np.broadcast_to to guarantee that all initializer type attributes take on the same shape as variable if not np.isscalar(self.defaults.variable): for attr in self.initializers: param = getattr(self.parameters, attr) param._set( np.broadcast_to( param._get(context), self.defaults.variable.shape ).copy(), context ) # create all stateful attributes and initialize their values to the current values of their # corresponding initializer attributes for attr_name in self.stateful_attributes: initializer_value = getattr(self.parameters, getattr(self.parameters, attr_name).initializer)._get(context).copy() getattr(self.parameters, attr_name)._set(initializer_value, context) super()._instantiate_attributes_before_function(function=function, context=context) def _initialize_previous_value(self, initializer, context=None): initializer = convert_to_np_array(initializer, dimension=1) self.defaults.initializer = initializer.copy() self.parameters.initializer._set(initializer.copy(), context) self.defaults.previous_value = initializer.copy() self.parameters.previous_value.set(initializer.copy(), context) return initializer @handle_external_context() def _update_default_variable(self, new_default_variable, context=None): if not self.parameters.initializer._user_specified: self._initialize_previous_value(np.zeros_like(new_default_variable), context) super()._update_default_variable(new_default_variable, context=context) def _parse_value_order(self, **kwargs): """ Returns: tuple: the values of the keyword arguments in the order in which they appear in this Component's `value <Component.value>` """ return tuple(v for k, v in kwargs.items()) @handle_external_context(fallback_most_recent=True) def reset(self, *args, context=None, **kwargs): """ Resets `value <StatefulFunction.previous_value>` and `previous_value <StatefulFunction.previous_value>` to the specified value(s). If arguments are passed into the reset method, then reset sets each of the attributes in `stateful_attributes <StatefulFunction.stateful_attributes>` to the value of the corresponding argument. Next, it sets the `value <StatefulFunction.value>` to a list containing each of the argument values. If reset is called without arguments, then it sets each of the attributes in `stateful_attributes <StatefulFunction.stateful_attributes>` to the value of the corresponding attribute in `initializers <StatefulFunction.initializers>`. Next, it sets the `value <StatefulFunction.value>` to a list containing the values of each of the attributes in `initializers <StatefulFunction.initializers>`. Often, the only attribute in `stateful_attributes <StatefulFunction.stateful_attributes>` is `previous_value <StatefulFunction.previous_value>` and the only attribute in `initializers <StatefulFunction.initializers>` is `initializer <StatefulFunction.initializer>`, in which case the reset method sets `previous_value <StatefulFunction.previous_value>` and `value <StatefulFunction.value>` to either the value of the argument (if an argument was passed into reset) or the current value of `initializer <StatefulFunction.initializer>`. For specific types of StatefulFunction functions, the reset method may carry out other reinitialization steps. """ num_stateful_attrs = len(self.stateful_attributes) if num_stateful_attrs >= 2: # old args specification can be supported only in subclasses # that explicitly define an order by overriding reset if len(args) > 0: raise FunctionError( f'{self}.reset has more than one stateful attribute' f' ({self.stateful_attributes}). You must specify reset' ' values by keyword.' ) if len(kwargs) != num_stateful_attrs: type_name = type(self).__name__ raise FunctionError( 'StatefulFunction.reset must receive a keyword argument for' f' each item in {type_name}.stateful_attributes in the order in' f' which they appear in {type_name}.value' ) if num_stateful_attrs == 1: try: kwargs[self.stateful_attributes[0]] except KeyError: try: kwargs[self.stateful_attributes[0]] = args[0] except IndexError: kwargs[self.stateful_attributes[0]] = None invalid_args = [] # iterates in order arguments are sent in function call, so it # will match their order in value as long as they are listed # properly in subclass reset method signatures for attr in kwargs: try: kwargs[attr] except KeyError: kwargs[attr] = None if kwargs[attr] is not None: # from before: unsure if conversion to 1d necessary kwargs[attr] = np.atleast_1d(kwargs[attr]) else: try: kwargs[attr] = self._get_current_parameter_value(getattr(self.parameters, attr).initializer, context=context) except AttributeError: invalid_args.append(attr) if len(invalid_args) > 0: raise FunctionError( f'Arguments {invalid_args} to reset are invalid because they do' f" not correspond to any of {self}'s stateful_attributes" ) # rebuilding value rather than simply returning reinitialization_values in case any of the stateful # attrs are modified during assignment value = [] for attr, v in kwargs.items(): # FIXME: HACK: Do not reinitialize random_state if attr != "random_state": getattr(self.parameters, attr).set(kwargs[attr], context, override=True) value.append(getattr(self.parameters, attr)._get(context)) self.parameters.value.set(value, context, override=True) return value def _gen_llvm_function_reset(self, ctx, builder, params, state, arg_in, arg_out, *, tags:frozenset): assert "reset" in tags for a in self.stateful_attributes: initializer = getattr(self.parameters, a).initializer source_ptr = pnlvm.helpers.get_param_ptr(builder, self, params, initializer) dest_ptr = pnlvm.helpers.get_state_ptr(builder, self, state, a) if source_ptr.type != dest_ptr.type: warnings.warn("Shape mismatch: stateful param does not match the initializer: {}({}) vs. {}({})".format(initializer, source_ptr.type, a, dest_ptr.type)) # Take a guess that dest just has an extra dimension assert len(dest_ptr.type.pointee) == 1 dest_ptr = builder.gep(dest_ptr, [ctx.int32_ty(0), ctx.int32_ty(0)]) builder.store(builder.load(source_ptr), dest_ptr) return builder @abc.abstractmethod def _function(self, *args, **kwargs): raise FunctionError("StatefulFunction is not meant to be called explicitly")
48.895623
168
0.596302
27,392
0.943121
0
0
6,060
0.208649
0
0
14,262
0.491048