hexsha
stringlengths
40
40
size
int64
5
2.06M
ext
stringclasses
10 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
3
248
max_stars_repo_name
stringlengths
5
125
max_stars_repo_head_hexsha
stringlengths
40
78
max_stars_repo_licenses
sequencelengths
1
10
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
3
248
max_issues_repo_name
stringlengths
5
125
max_issues_repo_head_hexsha
stringlengths
40
78
max_issues_repo_licenses
sequencelengths
1
10
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
3
248
max_forks_repo_name
stringlengths
5
125
max_forks_repo_head_hexsha
stringlengths
40
78
max_forks_repo_licenses
sequencelengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
5
2.06M
avg_line_length
float64
1
1.02M
max_line_length
int64
3
1.03M
alphanum_fraction
float64
0
1
count_classes
int64
0
1.6M
score_classes
float64
0
1
count_generators
int64
0
651k
score_generators
float64
0
1
count_decorators
int64
0
990k
score_decorators
float64
0
1
count_async_functions
int64
0
235k
score_async_functions
float64
0
1
count_documentation
int64
0
1.04M
score_documentation
float64
0
1
53d70d3013eebf509bd463bbe169adf9205bf22b
4,367
py
Python
api_youtube.py
OnoArnaldo/PythonApiYoutube
8507eac234cd3d05a223db3beebd10412505bcf8
[ "MIT" ]
2
2019-11-15T16:46:36.000Z
2020-11-30T07:34:26.000Z
api_youtube.py
OnoArnaldo/PythonApiYoutube
8507eac234cd3d05a223db3beebd10412505bcf8
[ "MIT" ]
null
null
null
api_youtube.py
OnoArnaldo/PythonApiYoutube
8507eac234cd3d05a223db3beebd10412505bcf8
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- from __future__ import unicode_literals import os import sys import json import urllib2 import codecs BASE_DIR = os.path.dirname(__file__) BASE_URL = 'https://www.googleapis.com/youtube/v3/' API_CHANNELS = 'channels' API_PLAYLIST = 'playlistItems' API_KEY = 'YOUR KEY' CHANNELS = [ 'videosimprovaveis', 'nerdologia', 'Kurzgesagt', '1veritasium', 'minutephysics', 'xadrezverbal', 'estevaoslow', 'Vsauce', 'braincraftvideo', 'CienciaTodoDia', ] class UrlEncoder(object): API_URL = '' def __init__(self, **kwargs): self.args = kwargs def _parms(self): args = [] for k, v in self.args.items(): args.append(k + '=' + str(v)) return '&'.join(args) def get(self): parms = '?' + self._parms() if len(self.args) else '' return self.API_URL + parms def set(self, key, value): if value: self.args[key] = value class ApiChannel(object): URL = BASE_URL + API_CHANNELS FILE_NAME = os.path.join(BASE_DIR, 'channels.json') def __init__(self, channels): self.encoder = self.build_encoder(API_KEY) self.channels = channels def run(self): data = self.generate_data() self.save(data) def generate_data(self): encoder = self.encoder ret = {} for channel in self.channels: encoder.set('forUsername', channel) data = self.get_data(encoder.get()) ret[channel] = self.get_playlist_id(data) return ret def get_data(self, url): url = urllib2.urlopen(url) data = url.read() return json.loads(data) def get_playlist_id(self, data): items = data.get('items') content = items[0].get('contentDetails') playlists = content.get('relatedPlaylists') return playlists.get('uploads') def save(self, data): with open(self.FILE_NAME, 'w') as f: f.write(json.dumps(data)) f.close() def build_encoder(self, api_key): UrlEncoder.API_URL = self.URL encoder = UrlEncoder() encoder.set('key', api_key) encoder.set('part', 'contentDetails') return encoder class ApiPlayList(object): URL = BASE_URL + API_PLAYLIST FILE_NAME = os.path.join(BASE_DIR, 'playlist.txt') def __init__(self, channels): self.channels = channels self.encoder = self.build_encoder(API_KEY) def run(self): data = self.generate_data() self.save(data) def generate_data(self): encoder = self.encoder channels = self.channels ret = [] for key in channels: encoder.set('playlistId', channels[key]) data = self.get_data(encoder.get()) ret += [[key] + self.get_info(data)] return ret def get_info(self, data): items = data.get('items') snippet = items[0].get('snippet') title = snippet.get('title') published_at = snippet.get('publishedAt') description = snippet.get('description') return [title, published_at, description] def save(self, data): fname = os.path.join(BASE_DIR, 'last_update.txt') with codecs.open(fname, 'w', encoding='utf-8') as f: for key, title, published_at, description in sorted(data, key=lambda x: x[2]): f.write('{}: {} - {}\n'.format(published_at[:10], key, title)) f.close() def get_data(self, url): url = urllib2.urlopen(url) data = url.read() return json.loads(data) def build_encoder(self, api_key): UrlEncoder.API_URL = self.URL encoder = UrlEncoder() encoder.set('key', api_key) encoder.set('part', 'snippet') encoder.set('maxResults', '1') return encoder @classmethod def import_channels(cls, fname): with open(fname, 'r') as f: text = f.read() f.close() return json.loads(text) if __name__ == '__main__': args = sys.argv[1:] if '-channel' in args: channel = ApiChannel(CHANNELS) channel.run() if '-playlist' in args: channels = ApiPlayList.import_channels(ApiChannel.FILE_NAME) play_list = ApiPlayList(channels) play_list.run()
24.672316
90
0.587589
3,533
0.809022
0
0
164
0.037554
0
0
544
0.124571
53d750a045a189f59e633e7a1ce562b90e7d821b
2,744
py
Python
python_and_ebpf/train.py
be4r/ssh-miner-detection
47003db1d9f72ae44d5a27e92d0109d5111bec35
[ "MIT" ]
null
null
null
python_and_ebpf/train.py
be4r/ssh-miner-detection
47003db1d9f72ae44d5a27e92d0109d5111bec35
[ "MIT" ]
null
null
null
python_and_ebpf/train.py
be4r/ssh-miner-detection
47003db1d9f72ae44d5a27e92d0109d5111bec35
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 from sklearn.tree import DecisionTreeClassifier import pickle import numpy as np no = [b'runc:[2:INIT]', b'containerssh-ag', b'apt',b'dpkg'] class model: def __init__(self): self.d = DecisionTreeClassifier() def load(self, filename = 'model.p'): try: f = open(filename, 'rb') self.d = pickle.load(f) if type(self.d) != DecisionTreeClassifier: d = None f.close() except: return def save(self, filename = 'model.p'): f = open(filename, 'wb') pickle.dump(self.d, f) f.close() def fit(self, x, y): self.d.fit(x, y) def predict(self, x): return self.d.predict(x) def accuracy(self, y_pred, y_ref): return sum(np.array(y_pred) == np.array(y_ref)) / len(y_ref) def f1(self, y_pred, y_ref): tp = (np.array(y_pred) == 1) * (np.array(y_ref) == 1) tn = (np.array(y_pred) == 0) * (np.array(y_ref) == 0) fp = (np.array(y_pred) == 1) * (np.array(y_ref) == 0) fn = (np.array(y_pred) == 0) * (np.array(y_ref) == 1) return tp / (tp + (fp + fn) / 2) def ngrams(array, size = 25, overlacing = False): return [array[i:i+size] for i in range(0, len(array)//size * size, 1 if overlacing else size)] res = [array[i:i+size] for i in range(0, len(array)//size * size, 1 if overlacing else size)] if sum([len(i) == size for i in res]) != len(res): raise Exception('wtf') def gen_train(a, is_miner): #x1,y1,x2,y2 = train_test_split(x,y,0.05) x = ngrams(a) y = [1 if is_miner else 0,] * len(x) return x,y def train_on_logs(*filenames, is_miner): classifier = model() #classifier.load() x, y = [], [] for id, filename in enumerate(filenames): l = [] with open(filename, 'r') as f: l = eval(''.join(f)) codes = [] for i in l: if i[0] not in no: codes.append(i[1]) x_, y_ = gen_train(codes, is_miner[id]) x.append(x_) y.append(y_) print(x,y) #classifier.fit(x,y) #classifier.save() def predict_on_logs(*filenames, is_miner): classifier = model() classifier.load() x, y = [], [] for id, filename in enumerate(filenames): l = [] with open(filename, 'r') as f: l = eval(''.join(f)) codes = [] for i in l: if i[0] not in no: codes.append(i[1]) x_, y_ = gen_train(codes, is_miner[id]) x.append(x_) y.append(y_) y_pred = classifier.predict(x) print("Accuracy: ", classifier.accuracy(y_pred, y)) print("F1: ",classifier.f1(y_pred, y)) def predict_on_trace(trace, A = 0.9): classifier = model() classifier.load() x, y = [], [] for id, filename in enumerate(filenames): codes = [] for i in trace: if i[0] not in no: codes.append(i[1]) x_, y_ = gen_train(codes, is_miner[id]) x.append(x_) y.append(y_) y_pred = classifier.predict(x) acc = sum(np.array(y_pred)) / len(y_pred) return acc > A
24.283186
95
0.622085
854
0.311224
0
0
0
0
0
0
225
0.081997
53d8b7928beadd81971824eb5f4c9a1dab184d41
1,318
py
Python
data/parse_hipp_data.py
slinderman/pyhsmm-spiketrains
462d8d2c59bd2e7c39d20d624bd8b289a31baaa2
[ "MIT" ]
10
2016-04-23T00:23:20.000Z
2022-01-05T19:28:08.000Z
data/parse_hipp_data.py
slinderman/pyhsmm-spiketrains
462d8d2c59bd2e7c39d20d624bd8b289a31baaa2
[ "MIT" ]
1
2017-06-24T06:37:12.000Z
2017-07-07T17:19:59.000Z
data/parse_hipp_data.py
slinderman/pyhsmm-spiketrains
462d8d2c59bd2e7c39d20d624bd8b289a31baaa2
[ "MIT" ]
9
2016-03-29T21:37:46.000Z
2022-01-05T19:28:11.000Z
import os import numpy as np from scipy.io import loadmat data = loadmat("data/hipp_2dtrack_a/smJun03p2.dat") N = 49 data = reshape(data, 3, length(data)/3); data = data'; size(data) % 43799-by-3 fclose(fid); % sampling time Ts = 0.0333; duration = size(data,1) * Ts; % in second Tmax = data(end, 3); Tmin = data(1,3); time_edges = [Tmin: 0.25: Tmax]; % 250 ms per bin % interpolated rat's position in time bins Rat_pos = interp1(data(:, 3), [data(:, 1), data(:, 2)], time_edges'); vel = abs(diff(Rat_pos, 1, 1 )); % row difference vel = [vel(1, :); vel]; % 250 ms rat_vel = 4 * sqrt(vel(:, 1).^2 + vel(:, 2).^2); % unit: cm/s vel_ind = find(rat_vel >= 10); % RUN velocity threshold % using RUN only T = length(vel_ind); % using Run + pause periods T = length(time_edges); AllSpikeData = zeros(C,T); for i=1:C str = ['Cell_num' num2str(i)]; fid = fopen(str, 'r'); cell_data = fscanf(fid, '%f'); cell_data = reshape(cell_data, 3, length(cell_data)/3)'; spike_time = cell_data(:, 3); spike_pos = cell_data(:, 1:2); [spike_time_count, bin] = histc(spike_time, time_edges); % column vector % if analyzing the RUN period only uncomment this % spike_time_count = spike_time_count(vel_ind); AllSpikeData(i, :) = spike_time_count'; fclose(fid); end
22.338983
78
0.634294
0
0
0
0
0
0
0
0
52
0.039454
53d94f243224facafe883070b86bd959182c98e6
9,455
py
Python
repokid/tests/test_roledata.py
tomdev/repokid
e1a4839290bafccfaa304d87bbdeae85b9dc80aa
[ "Apache-2.0" ]
null
null
null
repokid/tests/test_roledata.py
tomdev/repokid
e1a4839290bafccfaa304d87bbdeae85b9dc80aa
[ "Apache-2.0" ]
null
null
null
repokid/tests/test_roledata.py
tomdev/repokid
e1a4839290bafccfaa304d87bbdeae85b9dc80aa
[ "Apache-2.0" ]
null
null
null
# Copyright 2017 Netflix, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import time from mock import patch import repokid.utils.roledata from repokid.role import Role from repokid.tests.test_repokid_cli import ROLE_POLICIES, ROLES AARDVARK_DATA = { "arn:aws:iam::123456789012:role/all_services_used": [ {"lastAuthenticated": int(time.time()) * 1000, "serviceNamespace": "iam"}, {"lastAuthenticated": int(time.time()) * 1000, "serviceNamespace": "s3"}], "arn:aws:iam::123456789012:role/unused_ec2": [ {"lastAuthenticated": int(time.time()) * 1000, "serviceNamespace": "iam"}, {"lastAuthenticated": 0, "serviceNamespace": "ec2"}], "arn:aws:iam::123456789012:role/young_role": [ {"lastAuthenticated": int(time.time()) * 1000, "serviceNamespace": "iam"}, {"lastAuthenticated": int(time.time()) * 1000, "serviceNamespace": "s3"}] } class TestRoledata(object): @patch('repokid.utils.roledata.expand_policy') @patch('repokid.utils.roledata.get_actions_from_statement') @patch('repokid.utils.roledata.all_permissions') def test_get_role_permissions(self, mock_all_permissions, mock_get_actions_from_statement, mock_expand_policy): test_role = Role(ROLES[0]) all_permissions = ['ec2:associateaddress', 'ec2:attachvolume', 'ec2:createsnapshot', 's3:createbucket', 's3:getobject'] # empty policy to make sure we get the latest test_role.policies = [{'Policy': ROLE_POLICIES['all_services_used']}, {'Policy': ROLE_POLICIES['unused_ec2']}] mock_all_permissions.return_value = all_permissions mock_get_actions_from_statement.return_value = ROLE_POLICIES['unused_ec2']['ec2_perms'] mock_expand_policy.return_value = ROLE_POLICIES['unused_ec2']['ec2_perms'] permissions = repokid.utils.roledata._get_role_permissions(test_role) assert permissions == set(ROLE_POLICIES['unused_ec2']['ec2_perms']) @patch('repokid.hooks.call_hooks') def test_get_repoable_permissions(self, mock_call_hooks): minimum_age = 1 repokid.utils.roledata.IAM_ACCESS_ADVISOR_UNSUPPORTED_SERVICES = ['service_2'] repokid.utils.roledata.IAM_ACCESS_ADVISOR_UNSUPPORTED_ACTIONS = ['service_1:action_3', 'service_1:action_4'] hooks = {} permissions = ['service_1:action_1', 'service_1:action_2', 'service_1:action_3', 'service_1:action_4', 'service_2:action_1', 'service_3:action_1', 'service_3:action_2', 'service_4:action_1', 'service_4:action_2'] # service_1 and service_2 both used more than a day ago, which is outside of our test filter for age aa_data = [{'serviceNamespace': 'service_1', 'lastAuthenticated': (time.time() - 90000) * 1000}, {'serviceNamespace': 'service_2', 'lastAuthenticated': (time.time() - 90000) * 1000}, {'serviceNamespace': 'service_3', 'lastAuthenticated': time.time() * 1000}] no_repo_permissions = {'service_4:action_1': time.time() - 1, 'service_4:action_2': time.time() + 1000} repoable_decision = repokid.utils.roledata.RepoablePermissionDecision() repoable_decision.repoable = True mock_call_hooks.return_value = {'potentially_repoable_permissions': {'service_1:action_1': repoable_decision, 'service_1:action_2': repoable_decision, 'service_4:action_1': repoable_decision}} repoable_permissions = repokid.utils.roledata._get_repoable_permissions(None, 'test_name', permissions, aa_data, no_repo_permissions, minimum_age, hooks) # service_1:action_3 and action_4 are unsupported actions, service_2 is an unsupported service, service_3 # was used too recently, service_4 action 2 is in no_repo_permissions and not expired assert repoable_permissions == set(['service_1:action_1', 'service_1:action_2', 'service_4:action_1']) @patch('repokid.utils.roledata._get_role_permissions') @patch('repokid.utils.roledata._get_repoable_permissions') @patch('repokid.hooks.call_hooks') def test_calculate_repo_scores(self, mock_call_hooks, mock_get_repoable_permissions, mock_get_role_permissions): roles = [Role(ROLES[0]), Role(ROLES[1]), Role(ROLES[2])] roles[0].disqualified_by = [] roles[0].aa_data = 'some_aa_data' # disqualified by a filter roles[1].policies = [{'Policy': ROLE_POLICIES['unused_ec2']}] roles[1].disqualified_by = ['some_filter'] roles[1].aa_data = 'some_aa_data' # no AA data roles[2].policies = [{'Policy': ROLE_POLICIES['all_services_used']}] roles[2].disqualified_by = [] roles[2].aa_data = None hooks = {} mock_get_role_permissions.side_effect = [['iam:AddRoleToInstanceProfile', 'iam:AttachRolePolicy', 'ec2:AllocateHosts', 'ec2:AssociateAddress'], ['iam:AddRoleToInstanceProfile', 'iam:AttachRolePolicy'], ['iam:AddRoleToInstanceProfile', 'iam:AttachRolePolicy']] mock_call_hooks.return_value = set(['iam:AddRoleToInstanceProfile', 'iam:AttachRolePolicy']) mock_get_repoable_permissions.side_effect = [set(['iam:AddRoleToInstanceProfile', 'iam:AttachRolePolicy'])] minimum_age = 90 repokid.utils.roledata._calculate_repo_scores(roles, minimum_age, hooks) assert roles[0].repoable_permissions == 2 assert roles[0].repoable_services == ['iam'] assert roles[1].repoable_permissions == 0 assert roles[1].repoable_services == [] assert roles[2].repoable_permissions == 0 assert roles[2].repoable_services == [] def test_get_repoed_policy(self): policies = ROLE_POLICIES['all_services_used'] repoable_permissions = set(['iam:addroletoinstanceprofile', 'iam:attachrolepolicy', 's3:createbucket']) rewritten_policies, empty_policies = repokid.utils.roledata._get_repoed_policy(policies, repoable_permissions) assert rewritten_policies == {'s3_perms': {'Version': '2012-10-17', 'Statement': [{'Action': ['s3:deletebucket'], 'Resource': ['*'], 'Effect': 'Allow'}]}} assert empty_policies == ['iam_perms'] def test_find_newly_added_permissions(self): old_policy = ROLE_POLICIES['all_services_used'] new_policy = ROLE_POLICIES['unused_ec2'] new_perms = repokid.utils.roledata.find_newly_added_permissions(old_policy, new_policy) assert new_perms == set(['ec2:allocatehosts', 'ec2:associateaddress']) def test_convert_repoable_perms_to_perms_and_services(self): all_perms = ['a:j', 'a:k', 'b:l', 'c:m', 'c:n'] repoable_perms = ['b:l', 'c:m'] expected_repoed_services = ['b'] expected_repoed_permissions = ['c:m'] assert (repokid.utils.roledata._convert_repoable_perms_to_perms_and_services(all_perms, repoable_perms) == (expected_repoed_permissions, expected_repoed_services)) def test_convert_repoed_service_to_sorted_perms_and_services(self): repoed_services = ['route53', 'ec2', 's3:abc', 'dynamodb:def', 'ses:ghi', 'ses:jkl'] expected_services = ['ec2', 'route53'] expected_permissions = ['dynamodb:def', 's3:abc', 'ses:ghi', 'ses:jkl'] assert repokid.utils.roledata._convert_repoed_service_to_sorted_perms_and_services(repoed_services) == ( expected_permissions, expected_services ) def test_get_epoch_authenticated(self): assert(repokid.utils.roledata._get_epoch_authenticated(1545787620000) == (1545787620, True)) assert(repokid.utils.roledata._get_epoch_authenticated(1545787620) == (1545787620, True)) assert(repokid.utils.roledata._get_epoch_authenticated(154578762) == (None, False)) def test_filter_scheduled_repoable_perms(self): assert repokid.utils.roledata._filter_scheduled_repoable_perms( ['a:b', 'a:c', 'b:a'], ['a:c', 'b']) == ['a:c', 'b:a'] assert repokid.utils.roledata._filter_scheduled_repoable_perms( ['a:b', 'a:c', 'b:a'], ['a', 'b']) == ['a:b', 'a:c', 'b:a'] assert repokid.utils.roledata._filter_scheduled_repoable_perms( ['a:b', 'a:c', 'b:a'], ['a:b', 'a:c']) == ['a:b', 'a:c']
51.950549
120
0.639662
7,960
0.841883
0
0
5,148
0.544474
0
0
3,342
0.353464
53da2e6911920cb3cc789891eed24c27f4a325c6
1,838
py
Python
DL_Scripts/image_recognition.py
Matnay/KPIT_Deep_Learning
14f3815fc2829db9bede86c31f23e721f6423f79
[ "MIT" ]
1
2020-05-01T15:28:12.000Z
2020-05-01T15:28:12.000Z
DL_Scripts/image_recognition.py
Matnay/KPIT_Deep_Learning
14f3815fc2829db9bede86c31f23e721f6423f79
[ "MIT" ]
null
null
null
DL_Scripts/image_recognition.py
Matnay/KPIT_Deep_Learning
14f3815fc2829db9bede86c31f23e721f6423f79
[ "MIT" ]
null
null
null
import rospy from sensor_msgs.msg import Image from std_msgs.msg import String from cv_bridge import CvBridge import cv2 import numpy as np import tensorflow as tf import classify_image class RosTensorFlow(): def __init__(self): classify_image.maybe_download_and_extract() self._session = tf.Session() classify_image.create_graph() self._cv_bridge = CvBridge() self._sub = rospy.Subscriber('/usb_cam/image_raw', Image, self.callback, queue_size=1) self._pub = rospy.Publisher('result', String, queue_size=1) self.score_threshold = rospy.get_param('~score_threshold', 0.1) self.use_top_k = rospy.get_param('~use_top_k', 5) def callback(self, image_msg): cv_image = self._cv_bridge.imgmsg_to_cv2(image_msg, "bgr8") # copy from # classify_image.py image_data = cv2.imencode('.jpg', cv_image)[1].tostring() # Creates graph from saved GraphDef. softmax_tensor = self._session.graph.get_tensor_by_name('softmax:0') predictions = self._session.run( softmax_tensor, {'DecodeJpeg/contents:0': image_data}) predictions = np.squeeze(predictions) # Creates node ID --> English string lookup. node_lookup = classify_image.NodeLookup() top_k = predictions.argsort()[-self.use_top_k:][::-1] for node_id in top_k: human_string = node_lookup.id_to_string(node_id) score = predictions[node_id] if score > self.score_threshold: rospy.loginfo('%s (score = %.5f)' % (human_string, score)) self._pub.publish(human_string) def main(self): rospy.spin() if __name__ == '__main__': classify_image.setup_args() rospy.init_node('rostensorflow') tensor = RosTensorFlow() tensor.main()
36.039216
94
0.661589
1,505
0.818825
0
0
0
0
0
0
258
0.14037
53dd0a97f61bddb70bdbb1861eb823497caf7e52
21,202
py
Python
plugins/grouputils.py
aviskumar/speedo
758e8ac1fdeeb0b72c3a57742032ca5c79f0b2fa
[ "BSD-3-Clause" ]
null
null
null
plugins/grouputils.py
aviskumar/speedo
758e8ac1fdeeb0b72c3a57742032ca5c79f0b2fa
[ "BSD-3-Clause" ]
null
null
null
plugins/grouputils.py
aviskumar/speedo
758e8ac1fdeeb0b72c3a57742032ca5c79f0b2fa
[ "BSD-3-Clause" ]
3
2021-10-12T08:17:01.000Z
2021-12-21T01:17:54.000Z
# Copyright (C) 2020-2021 by TeamSpeedo@Github, < https://github.com/TeamSpeedo >. # # This file is part of < https://github.com/TeamSpeedo/FridayUserBot > project, # and is released under the "GNU v3.0 License Agreement". # Please see < https://github.com/TeamSpeedo/blob/master/LICENSE > # # All rights reserved. import asyncio import os import time from asyncio import sleep from pyrogram.types import ChatPermissions import pyrogram from main_start.core.decorators import speedo_on_cmd from main_start.helper_func.basic_helpers import ( edit_or_reply, edit_or_send_as_file, get_text, get_user, is_admin_or_owner, ) from main_start.helper_func.logger_s import LogIt from main_start.helper_func.plugin_helpers import ( convert_to_image, convert_vid_to_vidnote, generate_meme, ) @speedo_on_cmd( ["silentpin"], only_if_admin=True, cmd_help={ "help": "Pin Message Without Sending Notification To Members!", "example": "{ch}silentpin (reply to message)", }, ) async def spin(client, message): engine = message.Engine if not message.reply_to_message: await edit_or_reply(message, engine.get_string("REPLY_TO_PIN")) try: await client.pin_chat_message( message.chat.id, message.reply_to_message.message_id, disable_notification=True, ) except BaseException as e: await edit_or_reply( message, engine.get_string("UNABLE_TO_PIN").format(e) ) return await edit_or_reply(message, engine.get_string("PINNED")) @speedo_on_cmd( ["pinloud", "pin"], only_if_admin=True, cmd_help={ "help": "Pin Message With Sending Notification To Members!", "example": "{ch}pin (reply to messages)", }, ) async def lpin(client, message): engine = message.Engine if not message.reply_to_message: await edit_or_reply(message, engine.get_string("REPLY_TO_PIN")) try: await client.pin_chat_message( message.chat.id, message.reply_to_message.message_id ) except BaseException as e: await edit_or_reply( message, engine.get_string("UNABLE_TO_PIN").format(e) ) return await edit_or_reply(message, engine.get_string("PINNED")) @speedo_on_cmd( ["unpin", "rmpins"], only_if_admin=True, cmd_help={"help": "Unpin All Pinned Messages!", "example": "{ch}rmpins"}, ) async def dpins(client, message): engine = message.Engine await client.unpin_all_chat_messages(message.chat.id) await edit_or_reply(message, engine.get_string("UNPINNED")) @speedo_on_cmd( ["adminlist", "admins"], cmd_help={"help": "Get Adminlist Of Chat!", "example": "{ch}adminlist"}, ) async def midhunadmin(client, message): engine = message.Engine mentions = "" starky = get_text(message) or message.chat.id pablo = await edit_or_reply(message, engine.get_string("PROCESSING")) try: X = await client.get_chat_members(starky, filter="administrators") ujwal = await client.get_chat(starky) except BaseException as e: await pablo.edit(engine.get_string("CANT_FETCH_ADMIN").format("Admins", e)) return for midhun in X: if not midhun.user.is_deleted: link = f'✱ <a href="tg://user?id={midhun.user.id}">{midhun.user.first_name}</a>' userid = f"<code>{midhun.user.id}</code>" mentions += f"\n{link} {userid}" holy = ujwal.username or ujwal.id messag = f""" <b>Admins in {ujwal.title} | {holy}</b> {mentions} """ await edit_or_send_as_file( messag, pablo, client, f"`AdminList Of {holy}!`", "admin-lookup-result", "html", ) @speedo_on_cmd( ["botlist", "bot"], group_only=True, cmd_help={"help": "Get List Of Bots In Chat!", "example": "{ch}botlist"}, ) async def bothub(client, message): engine = message.Engine buts = "**Bot List** \n\n" starky = get_text(message) or message.chat.id pablo = await edit_or_reply(message, engine.get_string("PROCESSING")) try: bots = await client.get_chat_members(starky, filter="bots") except BaseException as e: await pablo.edit(engine.get_string("CANT_FETCH_ADMIN").format("Bots", e)) return for nos, ujwal in enumerate(bots, start=1): buts += f"{nos}〉 [{ujwal.user.first_name}](tg://user?id={ujwal.user.id}) \n" await pablo.edit(buts) @speedo_on_cmd( ["zombies", "delusers"], cmd_help={ "help": "Remove Deleted Accounts In The Group/Channel!", "example": "{ch}zombies", }, ) async def ujwalzombie(client, message): engine = message.Engine pablo = await edit_or_reply(message, engine.get_string("PROCESSING")) if len(message.text.split()) == 1: dm = 0 da = 0 dc = 0 async for member in client.iter_chat_members(message.chat.id): if member.user.is_deleted: await sleep(1) if member.status == "member": dm += 1 elif member.status == "administrator": da += 1 elif member.status == "creator": dc += 1 text = "**Zombies Report!** \n\n" if dm > 0: text += engine.get_string("TOTAL_ZOMBIES_USERS").format(dm) if da > 0: text += engine.get_string("TOTAL_ZOMBIES_ADMINS").format(da) if dc > 0: text += engine.get_string("GRP_OWNER_IS_ZOMBIE") d = dm + da + dc if d > 0: text += (engine.get_string("WIPE_THEM")) await pablo.edit(text) else: await pablo.edit(engine.get_string("NO_ZOMBIES")) return sgname = message.text.split(None, 1)[1] if sgname.lower().strip() == "clean": me = client.me lol = await is_admin_or_owner(message, me.id) if not lol: await pablo.edit(engine.get_string("NOT_ADMIN")) return s = 0 f = 0 async for member in client.iter_chat_members(message.chat.id): if member.user.is_deleted: try: await client.kick_chat_member(message.chat.id, member.user.id) s += 1 except: f += 1 text = "" if s > 0: text += engine.get_string("REMOVED_ZOMBIES").format(s) if f > 0: text += (engine.get_string("FAILED_ZOMBIES").format(f)) await pablo.edit(text) @speedo_on_cmd( ["ban", "bun"], only_if_admin=True, group_only=True, cmd_help={ "help": "Ban Replied User or provide his ID!", "example": "{ch}ban (reply to user message OR provide his ID)", }, ) async def ban_world(client, message): engine = message.Engine bun = await edit_or_reply(message, engine.get_string("PROCESSING")) me_m = client.me me_ = await message.chat.get_member(int(me_m.id)) if not me_.can_restrict_members: await bun.edit(engine.get_string("NOT_ADMIN")) return text_ = get_text(message) userk, reason = get_user(message, text_) if not userk: await bun.edit(engine.get_string("TO_DO").format("Ban")) return try: user_ = await client.get_users(userk) except BaseException as e: await bun.edit(engine.get_string("USER_MISSING").format(e)) return userz = user_.id if not reason: reason = "Not Specified!" if userz == me_m.id: await bun.edit(engine.get_string("TF_DO_IT").format("Ban")) return try: user_ = await client.get_users(userz) except BaseException as e: await bun.edit(engine.get_string("USER_MISSING").format(e)) return try: await client.kick_chat_member(message.chat.id, int(user_.id)) except BaseException as e: await bun.edit(engine.get_string("FAILED_ADMIN_ACTION").format("Ban", e)) return b = f"**#Banned** \n**User :** [{user_.first_name}](tg://user?id={user_.id}) \n**Chat :** `{message.chat.title}` \n**Reason :** `{reason}`" await bun.edit(b) log = LogIt(message) await log.log_msg(client, b) @speedo_on_cmd( ["unban", "unbun"], only_if_admin=True, group_only=True, cmd_help={ "help": "UnBan Replied User or provide his ID!", "example": "{ch}unban (reply to user message OR Provide his id)", }, ) async def unban_world(client, message): engine = message.Engine unbun = await edit_or_reply(message, engine.get_string("PROCESSING")) me_m = client.me me_ = await message.chat.get_member(int(me_m.id)) if not me_.can_restrict_members: await unbun.edit(engine.get_string("NOT_ADMIN")) return text_ = get_text(message) userm, reason = get_user(message, text_) if not userm: await unbun.edit( engine.get_string("TO_DO").format("Un-Ban") ) return try: user_ = await client.get_users(userm) except BaseException as e: await unbun.edit(engine.get_string("USER_MISSING").format(e)) return userz = user_.id if not reason: reason = "Not Specified!" if userz == me_m.id: await unbun.edit(engine.get_string("TF_DO_IT").format("Un-Ban")) return try: await client.unban_chat_member(message.chat.id, int(user_.id)) except BaseException as e: await unbun.edit(engine.get_string("FAILED_ADMIN_ACTION").format("Un-Ban", e)) ub = f"**#UnBanned** \n**User :** [{user_.first_name}](tg://user?id={user_.id}) \n**Chat :** `{message.chat.title}` \n**Reason :** `{reason}`" await unbun.edit(ub) log = LogIt(message) await log.log_msg(client, ub) @speedo_on_cmd( ["promote", "prumote"], only_if_admin=True, group_only=True, cmd_help={ "help": "Promote Replied user or provide his ID!", "example": "{ch}promote (reply to user message OR provide his ID)", }, ) async def ujwal_mote(client, message): engine = message.Engine pablo = await edit_or_reply(message, engine.get_string("PROCESSING")) me_m = client.me me_ = await message.chat.get_member(int(me_m.id)) if not me_.can_promote_members: await pablo.edit(engine.get_string("NOT_ADMIN")) return asplit = get_text(message) userl, Res = get_user(message, asplit) if not userl: await pablo.edit( engine.get_string("TO_DO").format("Promote") ) return try: user = await client.get_users(userl) except BaseException as e: await pablo.edit(engine.get_string("USER_MISSING").format(e)) return userz = user.id if not Res: Res = "Admeme" if userz == me_m.id: await pablo.edit(engine.get_string("TF_DO_IT").format("Promote")) return try: await client.promote_chat_member( message.chat.id, user.id, can_change_info=me_.can_change_info, can_delete_messages=me_.can_delete_messages, can_restrict_members=me_.can_restrict_members, can_invite_users=me_.can_invite_users, can_pin_messages=me_.can_pin_messages, can_promote_members=me_.can_promote_members, ) except BaseException as e: await pablo.edit(engine.get_string("FAILED_ADMIN_ACTION").format("Promote", e)) return p = f"**#Promote** \n**User :** [{user.first_name}](tg://user?id={user.id}) \n**Chat :** `{message.chat.title}` \n**Title :** `{Res}`" await pablo.edit(p) log = LogIt(message) await log.log_msg(client, p) try: if Res: await client.set_administrator_title(message.chat.id, user.id, Res) except: pass @speedo_on_cmd( ["demote", "demute"], only_if_admin=True, group_only=True, cmd_help={ "help": "Demote Replied user or provide his ID!", "example": "{ch}demote (reply to user message OR provide his ID)", }, ) async def ujwal_demote(client, message): engine = message.Engine pablo = await edit_or_reply(message, engine.get_string("PROCESSING")) me_m = client.me await message.chat.get_member(int(me_m.id)) asplit = get_text(message) usero = get_user(message, asplit)[0] if not usero: await pablo.edit( engine.get_string("TO_DO").format("Demote") ) return try: user = await client.get_users(usero) except BaseException as e: await pablo.edit(engine.get_string("USER_MISSING").format(e)) return userz = user.id if userz == me_m.id: await pablo.edit(engine.get_string("TF_DO_IT").format("Demote")) return try: await client.promote_chat_member( message.chat.id, user.id, is_anonymous=False, can_change_info=False, can_post_messages=False, can_edit_messages=False, can_delete_messages=False, can_restrict_members=False, can_invite_users=False, can_pin_messages=False, can_promote_members=False, ) except BaseException as e: await pablo.edit(engine.get_string("FAILED_ADMIN_ACTION").format("Demote", e)) return d = f"**#Demote** \n**User :** [{user.first_name}](tg://user?id={user.id}) \n**Chat :** `{message.chat.title}`" await pablo.edit(d) log = LogIt(message) await log.log_msg(client, d) @speedo_on_cmd( ["mute"], only_if_admin=True, group_only=True, cmd_help={ "help": "Mute Replied user or provide his ID!", "example": "{ch}mute (reply to user message OR provide his ID)", }, ) async def ujwal_mute(client, message): engine = message.Engine pablo = await edit_or_reply(message, engine.get_string("PROCESSING")) me_m = client.me me_ = await message.chat.get_member(int(me_m.id)) if not me_.can_restrict_members: await pablo.edit(engine.get_string("NOT_ADMIN")) return asplit = get_text(message) userf = get_user(message, asplit)[0] if not userf: await pablo.edit( engine.get_string("TO_DO").format("Mute") ) return try: user = await client.get_users(userf) except BaseException as e: await pablo.edit(engine.get_string("USER_MISSING").format(e)) return userz = user.id if userz == me_m.id: await pablo.edit(engine.get_string("TF_DO_IT").format("Mute")) return try: await client.restrict_chat_member( message.chat.id, user.id, ChatPermissions(can_send_messages=False) ) except BaseException as e: await pablo.edit(engine.get_string("FAILED_ADMIN_ACTION").format("Mute", e)) return m = f"**#Muted** \n**User :** [{user.first_name}](tg://user?id={user.id}) \n**Chat :** `{message.chat.title}`" await pablo.edit(m) log = LogIt(message) await log.log_msg(client, m) @speedo_on_cmd( ["unmute"], only_if_admin=True, group_only=True, cmd_help={ "help": "Unmute Replied user or provide his ID!", "example": "{ch}Unmute (reply to user message OR provide his ID)", }, ) async def ujwal_unmute(client, message): engine = message.Engine pablo = await edit_or_reply(message, engine.get_string("PROCESSING")) me_m = client.me me_ = await message.chat.get_member(int(me_m.id)) if not me_.can_restrict_members: await pablo.edit(engine.get_string("NOT_ADMIN")) return asplit = get_text(message) userf = get_user(message, asplit)[0] if not userf: await pablo.edit( engine.get_string("TO_DO").format("Un-Mute") ) return try: user = await client.get_users(userf) except BaseException as e: await pablo.edit(engine.get_string("USER_MISSING").format(e)) return userz = user.id if userz == me_m.id: await pablo.edit(engine.get_string("TF_DO_IT").format("un-mute")) return try: await client.restrict_chat_member( message.chat.id, user.id, ChatPermissions(can_send_messages=True) ) except BaseException as e: await pablo.edit(engine.get_string("FAILED_ADMIN_ACTION").format("Un-mute", e)) return um = f"**#Un_Muted** \n**User :** [{user.first_name}](tg://user?id={user.id}) \n**Chat :** `{message.chat.title}`" await pablo.edit(um) log = LogIt(message) await log.log_msg(client, um) @speedo_on_cmd( ["chatinfo", "grpinfo"], group_only=True, cmd_help={"help": "Get Info Of The Chat!", "example": "{ch}chatinfo"}, ) async def owo_chat_info(client, message): engine = message.Engine s = await edit_or_reply(message, engine.get_string("PROCESSING")) ujwal = await client.get_chat(message.chat.id) peer = await client.resolve_peer(message.chat.id) online_ = await client.send(pyrogram.raw.functions.messages.GetOnlines(peer=peer)) msg = "**Chat Info** \n\n" msg += f"**Chat-ID :** __{ujwal.id}__ \n" msg += f"**Verified :** __{ujwal.is_verified}__ \n" msg += f"**Is Scam :** __{ujwal.is_scam}__ \n" msg += f"**Chat Title :** __{ujwal.title}__ \n" msg += f"**Users Online :** __{online_.onlines}__ \n" if ujwal.photo: msg += f"**Chat DC :** __{ujwal.dc_id}__ \n" if ujwal.username: msg += f"**Chat Username :** __{ujwal.username}__ \n" if ujwal.description: msg += f"**Chat Description :** __{ujwal.description}__ \n" msg += f"**Chat Members Count :** __{ujwal.members_count}__ \n" if ujwal.photo: kek = await client.download_media(ujwal.photo.big_file_id) await client.send_photo(message.chat.id, photo=kek, caption=msg) await s.delete() else: await s.edit(msg) @speedo_on_cmd( ["purge"], only_if_admin=True, cmd_help={ "help": "Purge All Messages Till Replied Message!", "example": "{ch}purge (reply to message)", }, ) async def purge(client, message): engine = message.Engine start_time = time.time() message_ids = [] purge_len = 0 event = await edit_or_reply(message, engine.get_string("PROCESSING")) me_m = client.me if message.chat.type in ["supergroup", "channel"]: me_ = await message.chat.get_member(int(me_m.id)) if not me_.can_delete_messages: await event.edit(engine.get_string("NOT_ADMIN")) return if not message.reply_to_message: await event.edit(engine.get_string("NEEDS_REPLY").format("Message To Purge.")) return async for msg in client.iter_history( chat_id=message.chat.id, offset_id=message.reply_to_message.message_id, reverse=True, ): if msg.message_id != message.message_id: purge_len += 1 message_ids.append(msg.message_id) if len(message_ids) >= 100: await client.delete_messages( chat_id=message.chat.id, message_ids=message_ids, revoke=True ) message_ids.clear() if message_ids: await client.delete_messages( chat_id=message.chat.id, message_ids=message_ids, revoke=True ) end_time = time.time() u_time = round(end_time - start_time) await event.edit( engine.get_string("PURGE_").format(purge_len, u_time) ) await asyncio.sleep(3) await event.delete() @speedo_on_cmd( ["del"], cmd_help={ "help": "Delete Replied Message!", "example": "{ch}del (reply to message)", }, ) async def delmsgs(client, message): engine = message.Engine if not message.reply_to_message: await message.delete() return await client.delete_messages( chat_id=message.chat.id, message_ids=[message.reply_to_message.message_id], revoke=True, ) await message.delete() @speedo_on_cmd( ["setgrppic", "gpic"], cmd_help={ "help": "Set Custom Group Pic, For Lazy Peoples!", "example": "{ch}setgrppic (reply to image)", }, ) async def magic_grps(client, message): engine = message.Engine msg_ = await edit_or_reply(message, engine.get_string("PROCESSING")) if not message.reply_to_message: await msg_.edit(engine.get_string("NEEDS_REPLY").format("image")) return me_ = await message.chat.get_member(int(client.me.id)) if not me_.can_change_info: await msg_.edit(engine.get_string("NOT_ADMIN")) return cool = await convert_to_image(message, client) if not cool: await msg_.edit(engine.get_string("NEEDS_REPLY").format("a valid media")) return if not os.path.exists(cool): await msg_.edit(engine.get_string("INVALID_MEDIA")) return try: await client.set_chat_photo(message.chat.id, photo=cool) except BaseException as e: await msg_.edit(f"`Unable To Set Group Photo! TraceBack : {e}") return await msg_.edit(engine.get_string("DONE_"))
33.076443
146
0.621215
0
0
0
0
20,343
0.959304
17,265
0.814156
4,617
0.217721
53dd16873458e07dbdbf665e77a30bc20865dfcb
16,809
py
Python
carberretta/bot/cogs/feeds.py
Nereg/Carberretta
01e25bc8ece4c310ab541304e8809dfdd3eec3b8
[ "BSD-3-Clause" ]
null
null
null
carberretta/bot/cogs/feeds.py
Nereg/Carberretta
01e25bc8ece4c310ab541304e8809dfdd3eec3b8
[ "BSD-3-Clause" ]
null
null
null
carberretta/bot/cogs/feeds.py
Nereg/Carberretta
01e25bc8ece4c310ab541304e8809dfdd3eec3b8
[ "BSD-3-Clause" ]
null
null
null
""" FEEDS Handles YouTube and Twitch feed notifications. """ import datetime as dt import discord import feedparser from apscheduler.triggers.cron import CronTrigger from discord.ext import commands from carberretta import Config from carberretta.utils import DEFAULT_EMBED_COLOUR, chron LIVE_EMBED_COLOUR = 0x9146FF VOD_EMBED_COLOUR = 0x3498DB class Feeds(commands.Cog): def __init__(self, bot: commands.Bot) -> None: self.bot = bot async def call_feed(self) -> dict: url = f"https://www.youtube.com/feeds/videos.xml?channel_id={Config.YOUTUBE_CHANNEL_ID}&{dt.datetime.utcnow()}" async with self.bot.session.get(url) as response: if not 200 <= response.status <= 299: return [] if not (data := feedparser.parse(await response.text()).entries): return [] return data async def call_yt_api(self, video_id: str) -> dict: url = f"https://www.googleapis.com/youtube/v3/videos?part=contentDetails%2CliveStreamingDetails%2Csnippet&id={video_id}&key={Config.YOUTUBE_API_KEY}" async with self.bot.session.get(url) as response: if not 200 <= response.status <= 299: return [] if not (data := await response.json()): return [] return data["items"][0] async def call_twitch_api(self) -> dict: url = f"https://api.twitch.tv/helix/search/channels?query=carberratutorials" oauthurl = f"https://id.twitch.tv/oauth2/token?client_id={Config.TWITCH_CLIENT_ID}&client_secret={Config.TWITCH_CLIENT_SECRET}&grant_type=client_credentials" async with self.bot.session.post(url=oauthurl) as response: if not 200 <= response.status <= 299: return [] if not (twitch_tok := (await response.json())["access_token"]): return [] headers = { "client-id": f"{Config.TWITCH_CLIENT_ID}", "Authorization": f"Bearer {twitch_tok}", } async with self.bot.session.get(url=url, headers=headers) as response: if not 200 <= response.status <= 299: return [] if not (data := await response.json()): return [] return data["data"][0] @commands.Cog.listener() async def on_ready(self) -> None: if not self.bot.ready.booted: self.videos_channel = self.bot.get_channel(Config.VIDEOS_ID) self.videos_role = self.bot.guild.get_role(Config.VIDEOS_ROLE_ID) self.vods_role = self.bot.guild.get_role(Config.VODS_ROLE_ID) self.streams_role = self.bot.guild.get_role(Config.STREAMS_ROLE_ID) self.youtube = self.bot.get_cog("YouTube") if (await self.bot.application_info()).id == 696804435321552906: self.bot.scheduler.add_job(self.get_new_videos, CronTrigger(minute="*/3", second=0)) self.bot.scheduler.add_job(self.get_new_vods, CronTrigger(minute="*/3", second=15)) self.bot.scheduler.add_job(self.get_new_premieres, CronTrigger(minute="*/3", second=30)) self.bot.scheduler.add_job(self.get_new_streams, CronTrigger(minute="*/3", second=45)) self.bot.ready.up(self) async def get_new_vods(self) -> str: current_vod = await self.bot.db.field("SELECT ContentValue FROM videos WHERE ContentType = ?", "vod") for item in await self.call_feed(): data = await self.call_yt_api(item.yt_videoid) thumbnails = data["snippet"]["thumbnails"] duration = data["contentDetails"]["duration"] if current_vod == item.yt_videoid: # We announced this vod already return elif "#VOD" in item.summary: # This is a vod we havent announced await self.videos_channel.send( f"Hey {self.vods_role.mention}, a new VOD just went live! Catch up on anything you missed from the last stream!", embed=discord.Embed.from_dict( { "title": item.title, "description": desc if len(desc := item.summary) <= 500 else f"{desc[:500]}...", "color": VOD_EMBED_COLOUR, "url": item.link, "author": {"name": "Carberra Tutorials"}, "image": {"url": thumbnails["maxres"]["url"]}, "footer": {"text": f"Runtime: {self.youtube.get_duration(duration, long=True)}"}, } ), ) await self.bot.db.execute( "UPDATE videos SET ContentValue = ? WHERE ContentType = ?", item.yt_videoid, "vod" ) return item.yt_videoid async def get_new_videos(self) -> str: current_vid = await self.bot.db.field("SELECT ContentValue FROM videos WHERE ContentType = ?", "video") for item in await self.call_feed(): data = await self.call_yt_api(item.yt_videoid) thumbnails = data["snippet"]["thumbnails"] duration = data["contentDetails"]["duration"] if item.yt_videoid == current_vid: # This is a video we already announced return elif "liveStreamingDetails" not in data.keys(): # A new video is live and its was not a premiere if "#VOD" not in item.summary: # This isnt a VOD await self.videos_channel.send( f"Hey {self.videos_role.mention}, a new video just went live! Come check it out!", embed=discord.Embed.from_dict( { "title": item.title, "description": desc if len(desc := item.summary) <= 500 else f"{desc[:500]}...", "color": DEFAULT_EMBED_COLOUR, "url": item.link, "author": {"name": "Carberra Tutorials"}, "image": {"url": thumbnails["maxres"]["url"]}, "footer": {"text": f"Runtime: {self.youtube.get_duration(duration, long=True)}"}, } ), ) await self.bot.db.execute( "UPDATE videos SET ContentValue = ? WHERE ContentType = ?", item.yt_videoid, "video" ) return item.yt_videoid async def get_new_premieres(self) -> tuple: known_premieres = { _id: [_upcoming, _announced] for _id, _upcoming, _announced in await self.bot.db.records("SELECT * FROM premieres") } for item in await self.call_feed(): data = await self.call_yt_api(item.yt_videoid) thumbnails = data["snippet"]["thumbnails"] duration = data["contentDetails"]["duration"] live_content = data["snippet"]["liveBroadcastContent"] upcoming = known_premieres[item.yt_videoid][0] if item.yt_videoid in known_premieres.keys() else None announced = known_premieres[item.yt_videoid][1] if item.yt_videoid in known_premieres.keys() else None if "liveStreamingDetails" in data.keys(): start_time = data["liveStreamingDetails"]["scheduledStartTime"].strip("Z") scheduled_time = chron.from_iso(start_time) if not upcoming and duration != "P0D": # We have not seen this premiere before if live_content == "upcoming" and not announced: # This premiere is upcoming and not live await self.videos_channel.send( f"Hey {self.videos_role.mention}, a new premiere is scheduled for {chron.long_date_and_time(scheduled_time)} UTC! Hope to see you there!", embed=discord.Embed.from_dict( { "title": item.title, "description": desc if len(desc := item.summary) <= 500 else f"{desc[:500]}...", "color": DEFAULT_EMBED_COLOUR, "url": item.link, "author": {"name": "Carberra Tutorials"}, "image": {"url": thumbnails["maxres"]["url"]}, "footer": {"text": f"Runtime: {self.youtube.get_duration(duration, long=True)}"}, } ), ) await self.bot.db.execute( "REPLACE INTO premieres (VideoID, Upcoming, Announced) VALUES (?, ?, ?)", item.yt_videoid, 1, 0, ) return item.yt_videoid, False elif live_content == "live" and not upcoming and not announced: # The premiere was never upcoming is now live await self.videos_channel.send( f"Hey {self.videos_role.mention}, a new premiere started on {chron.long_date_and_time(scheduled_time)} UTC! Come and join us!", embed=discord.Embed.from_dict( { "title": item.title, "description": desc if len(desc := item.summary) <= 500 else f"{desc[:500]}...", "color": DEFAULT_EMBED_COLOUR, "url": item.link, "author": {"name": "Carberra Tutorials"}, "image": {"url": thumbnails["maxres"]["url"]}, "footer": {"text": f"Runtime: {self.youtube.get_duration(duration, long=True)}"}, } ), ) await self.bot.db.execute( "REPLACE INTO premieres (VideoID, Upcoming, Announced) VALUES (?, ?, ?)", item.yt_videoid, 1, 1, ) return item.yt_videoid, True elif not announced: # A premiere was upcoming, and is now live await self.videos_channel.send( f"Hey {self.videos_role.mention}, a new premiere started on {chron.long_date_and_time(scheduled_time)} UTC! Come and join us!", embed=discord.Embed.from_dict( { "title": item.title, "description": desc if len(desc := item.summary) <= 500 else f"{desc[:500]}...", "color": DEFAULT_EMBED_COLOUR, "url": item.link, "author": {"name": "Carberra Tutorials"}, "image": {"url": thumbnails["maxres"]["url"]}, "footer": {"text": f"Runtime: {self.youtube.get_duration(duration, long=True)}"}, } ), ) await self.bot.db.execute( "REPLACE INTO premieres (VideoID, Upcoming, Announced) VALUES (?, ?, ?)", item.yt_videoid, 1, 1 ) return item.yt_videoid, True async def get_new_streams(self) -> tuple: data = await self.call_twitch_api() if data: live_now = await self.bot.db.field("SELECT StreamLive FROM streams WHERE ID = 1") if data["is_live"] and not live_now: # The stream is live and we havent announced it yet start = chron.from_iso(data["started_at"].strip("Z")) message = await self.videos_channel.send( f"Hey {self.streams_role.mention}, I'm live on Twitch now! Come watch!", embed=discord.Embed.from_dict( { "title": data["title"], "description": f"**Category: {data['game_name']}**", "color": LIVE_EMBED_COLOUR, "url": "https://www.twitch.tv/carberratutorials", "author": {"name": "Carberra Tutorials"}, "thumbnail": {"url": data["thumbnail_url"]}, "footer": {"text": f"Started: {chron.long_date_and_time(start)} UTC"}, } ), ) await self.bot.db.execute( "UPDATE streams SET StreamLive = ?, StreamStart = ?, StreamMessage= ? WHERE ID = 1", 1, start, message.id, ) return data["title"], False elif not data["is_live"] and live_now: # The stream is not live and last we checked it was (stream is over) await self.bot.db.execute( "UPDATE streams SET StreamLive = ?, StreamEnd = ? WHERE ID = 1", 0, dt.datetime.utcnow() ) start, stream_message, end = await self.bot.db.record( "SELECT StreamStart, StreamMessage, StreamEnd FROM streams WHERE ID = 1" ) duration = chron.from_iso(end) - chron.from_iso(start) try: message = await self.videos_channel.fetch_message(stream_message) except (discord.NotFound, discord.Forbidden, discord.HTTPException): return else: await message.edit( content=f"Hey {self.streams_role.mention}, I'm live on Twitch now! Come watch!", embed=discord.Embed.from_dict( { "title": "The stream has ended.", "description": "**Catch you in the next one!**", "color": LIVE_EMBED_COLOUR, "url": "https://www.twitch.tv/carberratutorials", "author": {"name": "Carberra Tutorials"}, "thumbnail": {"url": data["thumbnail_url"]}, "footer": {"text": f"Runtime: {chron.long_delta(duration)}"}, } ), ) return data["title"], True @commands.group(name="feed", invoke_without_command=True) @commands.is_owner() async def group_feed(self, ctx: commands.Context) -> None: pass @group_feed.command(name="video") @commands.is_owner() async def command_feed_video(self, ctx: commands.Context) -> None: last_video = await self.get_new_videos() await ctx.send(f"Announced video: {last_video}." if last_video else "No new videos.") @group_feed.command(name="vod") @commands.is_owner() async def command_feed_vod(self, ctx: commands.Context) -> None: last_vod = await self.get_new_vods() await ctx.send(f"Announced VOD: {last_vod}." if last_vod else "No new VODs.") @group_feed.command(name="premiere") @commands.is_owner() async def command_feed_premiere(self, ctx: commands.Context) -> None: if not (last_premiere := await self.get_new_premieres()): await ctx.send("No new premieres.") else: await ctx.send( f"Announced live premiere: {last_premiere[0]}." if last_premiere[1] else f"Announced upcoming premiere: {last_premiere[0]}." ) @group_feed.command(name="stream") @commands.is_owner() async def command_feed_stream(self, ctx: commands.Context) -> None: if not (last_stream := await self.get_new_streams()): await ctx.send("No new streams.") else: await ctx.send( f"Stream ended: {last_stream[0]}." if last_stream[1] else f"Announced stream: {last_stream[0]}." ) def setup(bot: commands.Bot) -> None: bot.add_cog(Feeds(bot))
44.586207
166
0.50467
16,389
0.975013
0
0
2,556
0.152061
15,841
0.942412
4,669
0.277768
53dd795653b27c0823e1d06e1e8c37e9cd9ead3e
5,676
py
Python
gdb/proxy.py
abaire/gdb_sniffer
f330193c65a39ce6abb01f25737ca967a0af9629
[ "Unlicense" ]
1
2021-12-22T04:04:22.000Z
2021-12-22T04:04:22.000Z
gdb/proxy.py
abaire/gdb_sniffer
f330193c65a39ce6abb01f25737ca967a0af9629
[ "Unlicense" ]
null
null
null
gdb/proxy.py
abaire/gdb_sniffer
f330193c65a39ce6abb01f25737ca967a0af9629
[ "Unlicense" ]
null
null
null
"""Provides a GDB logging proxy. See https://sourceware.org/gdb/onlinedocs/gdb/Remote-Protocol.html See https://www.embecosm.com/appnotes/ean4/embecosm-howto-rsp-server-ean4-issue-2.html """ from __future__ import annotations import logging import socket from typing import Optional from typing import Tuple from .packet import GDBPacket from net import ip_transport logger = logging.getLogger(__name__) class GDBProxy(ip_transport.IPTransport): """GDB Remote Serial Protocol proxy.""" def __init__(self, target_addr: Tuple[str, int], colorize: bool = False): super().__init__(process_callback=self._on_gdb_bytes_read) self.log_acks = False self.target_addr = target_addr self._target: Optional[ip_transport.IPTransport] = None if colorize: self.target_color = "\x1b[34m\x1b[47m" self.gdb_color = "\x1b[30m\x1b[47m" else: self.target_color = "" self.gdb_color = "" self._gdb_read_buffer: bytearray = bytearray() self._target_read_buffer: bytearray = bytearray() def set_connection(self, sock, addr): super().set_connection(sock, addr) logger.debug(f"{self.target_color}Connecting to target at {self.target_addr}") try: target_sock = socket.create_connection(self.target_addr) except ConnectionRefusedError: logger.error(f"{self.target_color}Connection to Target@{self.target_addr} refused.") self.close() return self._target = ip_transport.IPTransport(self._on_target_bytes_read, f"Target@{self.target_addr}") self._target.set_connection(target_sock, self.target_addr) self._add_sub_connection(self._target) def _on_gdb_bytes_read(self, _ignored): buffer = self._read_buffer self.shift_read_buffer(len(buffer)) self._append_gdb_read_buffer(buffer) self._target._write_buffer.extend(buffer) def _on_target_bytes_read(self, _ignored): buffer = self._target.read_buffer self._target.shift_read_buffer(len(buffer)) self._append_target_read_buffer(buffer) self._write_buffer.extend(buffer) def _append_gdb_read_buffer(self, data: bytes): self._unescape_and_append(self._gdb_read_buffer, data) bytes_consumed = self._log_rsp_bytes(f"{self.gdb_color}GDB :", self._gdb_read_buffer) if bytes_consumed: self._gdb_read_buffer = bytearray(self._gdb_read_buffer[bytes_consumed:]) def _append_target_read_buffer(self, data: bytes): self._unescape_and_append(self._target_read_buffer, data) bytes_consumed = self._log_rsp_bytes(f"{self.target_color}TARGET :", self._target_read_buffer) if bytes_consumed: self._target_read_buffer = bytearray(self._target_read_buffer[bytes_consumed:]) @staticmethod def _unescape_and_append(buffer: bytearray, data: bytes): # RSP uses '}' as an escape character. Escapes are processed in this method # before adding to the read buffer to simplify parsing. if not data: return # Process any left over escapes. if buffer and buffer[-1] == GDBPacket.RSP_ESCAPE_CHAR: buffer[-1] = data[0] ^ 0x20 data = data[1:] escape_char_index = data.find(GDBPacket.RSP_ESCAPE_CHAR) while escape_char_index >= 0: if escape_char_index == len(data): # If there are no more characters after the escape char, just add it to the buffer and let it be # processed when more data is received. break if escape_char_index: buffer.extend(data[: escape_char_index - 1]) unescaped = data[escape_char_index + 1] ^ 0x20 buffer.append(unescaped) data = data[escape_char_index + 2 :] buffer.extend(data) def _log_rsp_bytes(self, log_prefix: str, buffer: bytearray) -> int: total_bytes_consumed = 0 pkt = GDBPacket() buffer_len = len(buffer) while total_bytes_consumed < buffer_len: if buffer[0] == ord("+"): if self.log_acks: logger.info(f"{log_prefix} <<ack>>") total_bytes_consumed += 1 buffer = buffer[1:] continue if buffer[0] == ord("-"): if self.log_acks: logger.info(f"{log_prefix} <<nack>>") total_bytes_consumed += 1 buffer = buffer[1:] continue if buffer[0] == 0x03: logger.info(f"{log_prefix} <<Interrupt request>>") total_bytes_consumed += 1 buffer = buffer[1:] continue leader = buffer.find(GDBPacket.PACKET_LEADER) if leader > 0: logger.warning( f"{log_prefix} Skipping {leader} non-leader bytes {buffer[:total_bytes_consumed + leader]}" ) buffer = buffer[leader:] bytes_consumed = pkt.parse(buffer) buffer = buffer[bytes_consumed:] if not bytes_consumed: break total_bytes_consumed += bytes_consumed if pkt.data: logger.info(f"{log_prefix} Received packet {pkt}") else: logger.info(f"{log_prefix} Received empty packet") if len(buffer): logger.debug( f"{log_prefix} After processing: [{len(buffer)}] {buffer}" ) return total_bytes_consumed
35.698113
112
0.617512
5,264
0.927414
0
0
1,050
0.184989
0
0
1,099
0.193622
53ddde78f62a83aa118f0171be55b4c481a15868
1,373
py
Python
pylayers/em/openems/test/Rect_Waveguide.py
usmanwardag/pylayers
2e8a9bdc993b2aacc92610a9c7edf875c6c7b24a
[ "MIT" ]
143
2015-01-09T07:50:20.000Z
2022-03-02T11:26:53.000Z
pylayers/em/openems/test/Rect_Waveguide.py
usmanwardag/pylayers
2e8a9bdc993b2aacc92610a9c7edf875c6c7b24a
[ "MIT" ]
148
2015-01-13T04:19:34.000Z
2022-03-11T23:48:25.000Z
pylayers/em/openems/test/Rect_Waveguide.py
usmanwardag/pylayers
2e8a9bdc993b2aacc92610a9c7edf875c6c7b24a
[ "MIT" ]
95
2015-05-01T13:22:42.000Z
2022-03-15T11:22:28.000Z
from openems.openems import * # A simple simulation # # FDTD Simulation Setting # F = FDTD() F.add(Exc(typ='Sinus',f0=100000)) F.add(BoundaryCond(['PMC','PMC','PEC','PEC','MUR','MUR'])) # # CSX (Geometry setting) # C = CSX() # The Box is added as a property C.add(Excitation('excitation'),p=Box(P1=[-10,-10,0],P2=[10,10,0],Pr=0)) C.add(DumpBox('Et'),p=Box(P1=[-10,0,-10],P2=[10,0,30],Pr=0)) C.add(RectilinearGrid(np.arange(-10,11,1),np.arange(-10,11,1),np.arange(-10,11,1))) C.add(Polyhedron()) S = OpenEMS(F,C) S.save(filename='RectWaveguide.xml') #gnd = Matter('gnd') #sphere = Matter('sphere') #patch = Matter('patch') #substrate = Matter('substrate',typ='Ma',Epsilon="3.38",Kappa="0.00046") #cdgsht = Matter('copper',typ='Cs',conductivity="56e6",thickness="40e-6") #b1 = Box(P1=[0,0,0],P2=[100,100,200],Pr=0) #b2 = Box(P1=[0,0,0],P2=[10,20,30],Pr=10) #b4 = Box(P1=[-10,0,-10],P2=[10,0,30],Pr=0) #s1 = Sphere(P=[0,0,0],R=100,Pr=50) #dump = DumpBox() #C.add(gnd) #C.add(patch) #C.add(substrate) #C.add(sphere) #C.add(cdgsht) #C.add(exc) #C.add(dump) #C.set('gnd',b1) #C.set('gnd',b2) #C.set('sphere',s1) #C.set('copper',b1) #C.set('copper',b2) #C.set('Et',b4) #C.save(filename='structure.xml') ##C.AddBox(prop='ConductingSheet',name='copper',P1=[0,-50,200],P2=[1000,50,200],Pri=10) ##C.AddCylinder(prop='Metal',name='cyl0',P1=[0,0,0],P2=[0,0,100],Rad=50,Pri=10) #
25.90566
87
0.632921
0
0
0
0
0
0
0
0
969
0.705754
53debe5489e3f53b73538719925c989ad4ce399d
381
py
Python
DataPreprocessing/_segment_Y.py
vd1371/CBSA
f2b3f03c91ccd9ec02c2331f43573d7d6e72fd47
[ "MIT" ]
null
null
null
DataPreprocessing/_segment_Y.py
vd1371/CBSA
f2b3f03c91ccd9ec02c2331f43573d7d6e72fd47
[ "MIT" ]
null
null
null
DataPreprocessing/_segment_Y.py
vd1371/CBSA
f2b3f03c91ccd9ec02c2331f43573d7d6e72fd47
[ "MIT" ]
null
null
null
import numpy as np def segment_Y(Y, **params): Y_segments = params.get("Y_segments") Y_quantile = params.get("Y_quantile") print("segmenting Y") Y = Y.values.reshape(-1) Y_quantile = np.quantile(Y, Y_quantile, axis = 0) bigger_mask = (Y > Y_quantile).copy() smaller_mask = (Y <= Y_quantile).copy() Y[bigger_mask] = 1 Y[smaller_mask] = 0 Y = Y.astype(int) return Y
19.05
50
0.677165
0
0
0
0
0
0
0
0
38
0.099738
53df3216d619040fc2551d1e35eda4fe2e177604
3,868
py
Python
WifiEnigma/BattleAI/question.py
Puzzlebox-IMT/Puzzlebox
6b80e22a4aee3228140692bd6352de18b2f6a96d
[ "MIT" ]
null
null
null
WifiEnigma/BattleAI/question.py
Puzzlebox-IMT/Puzzlebox
6b80e22a4aee3228140692bd6352de18b2f6a96d
[ "MIT" ]
null
null
null
WifiEnigma/BattleAI/question.py
Puzzlebox-IMT/Puzzlebox
6b80e22a4aee3228140692bd6352de18b2f6a96d
[ "MIT" ]
null
null
null
import mysql.connector import random from voice import synthetize_voice, delete_wav def AllQuestionAI(id_theme): i = 0 #CONNEXION A LA BDD conn = mysql.connector.connect(host="localhost", user="phpmyadmin", password="Vince@Mysql1997", database="Puzzlebox") cursor = conn.cursor() #EXECUTER LA REQUETE AVEC LA BDD query = ("SELECT * FROM Question INNER JOIN themes_questions ON Question.ID_QUESTION = themes_questions.ID_QUESTION WHERE ID_THEME=%s") cursor.execute(query, (id_theme, )) #RECUPERATION DES INFORMATIONS rows = cursor.fetchall() if rows: for line in rows: i += 1 enonce = line[1] proposition1 = line[2] proposition2 = line[3] proposition3 = line[4] proposition4 = line[5] reponse = line[5] print("*******************************************************************************") print(" QUESTION ",i," ") print("*******************************************************************************") print("ENONCE : ", enonce) print("PROPOSITION 1 : ", proposition1) print("PROPOSITION 2 : ", proposition2) print("PROPOSITION 3 : ", proposition3) print("PROPOSITION 4 : ", proposition4) print("REPONSE : ", reponse) else: print("Ce thème ne contient pas de questions") def questionAI(id_theme): i = 0 #CONNEXION A LA BDD conn = mysql.connector.connect(host="localhost", user="phpmyadmin", password="Vince@Mysql1997", database="Puzzlebox") cursor = conn.cursor() #EXECUTER LA REQUETE AVEC LA BDD query = ("SELECT * FROM Question INNER JOIN themes_questions ON Question.ID_QUESTION = themes_questions.ID_QUESTION WHERE ID_THEME=%s") cursor.execute(query, (id_theme, )) #RECUPERATION DES INFORMATIONS rows = cursor.fetchall() if rows: nb_rows = len(rows) num_question = random.randint(1, nb_rows) #L'index de la liste commence à zéro, il faut donc décaler d'un le numéro num_question = num_question - 1 question = rows[num_question] result = [] #Tab which stores the query results #RECUPERATION DES TUPLES result.append(question[1]) result.append(question[2]) result.append(question[3]) result.append(question[4]) result.append(question[5]) result.append(question[5]) #This last one is the answer print("*******************************************************************************") print(" QUESTION ",num_question+1," ") print("*******************************************************************************") print("ENONCE : ", result[0]) print("PROPOSITION 1 : ", result[1]) print("PROPOSITION 2 : ", result[2]) print("PROPOSITION 3 : ", result[3]) print("PROPOSITION 4 : ", result[4]) print("REPONSE : ", result[5]) #complete_question = ''.join(complete_question) #Convert tuple into string return result else: print("Ce thème ne contient pas de questions") def tell_question(question): synthetize_voice(question[0]) for i in range(1,5) : num_prop = "Proposition {} ".format(i) num_prop = ''.join(num_prop) line = ''.join(question[i]) line = num_prop + line synthetize_voice(line) delete_wav() def quiz(): counter = 1 while(counter <= 5): questionAI(1) if (__name__ == '__main__'): result = questionAI(1) tell_question(result)
31.447154
140
0.520941
0
0
0
0
0
0
0
0
1,535
0.396231
53e02e91fc0737f80d21208f1511392c2bcd37d1
875
py
Python
toy-amr/flux_functions.py
IanHawke/toy-amr
1f616791993ccd83cc6034616c08e09fa4ba310d
[ "MIT" ]
5
2019-05-27T18:13:45.000Z
2021-01-06T09:42:28.000Z
toy-amr/flux_functions.py
IanHawke/toy-amr
1f616791993ccd83cc6034616c08e09fa4ba310d
[ "MIT" ]
1
2019-10-21T13:34:48.000Z
2019-12-11T22:11:17.000Z
toy-amr/flux_functions.py
IanHawke/toy-amr
1f616791993ccd83cc6034616c08e09fa4ba310d
[ "MIT" ]
2
2019-05-08T18:00:36.000Z
2021-05-27T16:57:57.000Z
import numpy def lax_friedrichs(cons_minus, cons_plus, simulation, tl): alpha = tl.grid.dx / tl.dt flux = numpy.zeros_like(cons_minus) prim_minus, aux_minus = simulation.model.cons2all(cons_minus, tl.prim) prim_plus, aux_plus = simulation.model.cons2all(cons_plus , tl.prim) f_minus = simulation.model.flux(cons_minus, prim_minus, aux_minus) f_plus = simulation.model.flux(cons_plus, prim_plus, aux_plus ) flux[:, 1:-1] = 0.5 * ( (f_plus[:,0:-2] + f_minus[:,1:-1]) + \ alpha * (cons_plus[:,0:-2] - cons_minus[:,1:-1]) ) return flux def upwind(cons_minus, cons_plus, simulation, patch): flux = numpy.zeros_like(cons_minus) flux[:, 1:-1] = simulation.model.riemann_problem_flux(cons_plus [:, 0:-2], cons_minus[:, 1:-1]) return flux
39.772727
79
0.609143
0
0
0
0
0
0
0
0
0
0
53e0390b65014122e4de16c06f08712946e2a007
2,084
py
Python
pi/auth.py
vmagamedov/pi
6ee98af69b757d96aa4eddc32513309e0fe05d1d
[ "BSD-3-Clause" ]
7
2016-06-24T04:49:48.000Z
2020-06-29T17:34:12.000Z
pi/auth.py
vmagamedov/pi
6ee98af69b757d96aa4eddc32513309e0fe05d1d
[ "BSD-3-Clause" ]
11
2016-06-19T13:16:59.000Z
2019-11-02T13:14:19.000Z
pi/auth.py
vmagamedov/pi
6ee98af69b757d96aa4eddc32513309e0fe05d1d
[ "BSD-3-Clause" ]
null
null
null
import re import json import base64 import codecs import os.path import asyncio import subprocess _PREFIX = 'docker-credential-' def read_config(): path = os.path.expanduser('~/.docker/config.json') if not os.path.exists(path): return {} with codecs.open(path, encoding='utf-8') as f: json_data = f.read() return json.loads(json_data) async def _read_creds(creds_store, server): if not re.match(r'^\w+$', creds_store, re.ASCII): raise ValueError('Invalid credsStore: {!r}'.format(creds_store)) proc = await asyncio.create_subprocess_exec( _PREFIX + creds_store, 'get', stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) stdout, stderr = await proc.communicate(server.encode('ascii')) if proc.returncode != 0: return None else: data = json.loads(stdout) return { 'Username': data['Username'], 'Password': data['Secret'], 'ServerAddress': server, } def _decode_auth(auth_data, server): auth_data_decoded = base64.b64decode(auth_data).decode('utf-8') username, _, password = auth_data_decoded.partition(':') return { 'Username': username, 'Password': password, 'ServerAddress': server, } async def resolve_auth(config, server): config_auths = config.get('auths') if config_auths is None: return None server_auth = config_auths.get(server) if server_auth is not None: auth_data = server_auth.get('auth') if auth_data is not None: return _decode_auth(auth_data, server) creds_store = config.get('credsStore') if creds_store is not None: return await _read_creds(creds_store, server) return None def server_name(image_name): registry, _, name = image_name.partition('/') if not name: return 'docker.io' else: return registry def encode_header(auth): json_data = json.dumps(auth) return base64.urlsafe_b64encode(json_data.encode('ascii'))
25.108434
72
0.644914
0
0
0
0
0
0
1,145
0.549424
240
0.115163
53e05b14f47fe11d4c2e4b89d1492b45ec46b072
5,199
py
Python
etl/transform.py
ACWI-SOGW/ngwmn_monitoring_locations_etl
e9ebfebbc5fa349a58669fb1d9944786f26729c3
[ "CC0-1.0" ]
1
2020-10-07T14:44:30.000Z
2020-10-07T14:44:30.000Z
etl/transform.py
ACWI-SOGW/ngwmn_monitoring_locations_etl
e9ebfebbc5fa349a58669fb1d9944786f26729c3
[ "CC0-1.0" ]
7
2020-10-14T19:13:10.000Z
2021-10-06T20:04:38.000Z
etl/transform.py
ACWI-SOGW/ngwmn_monitoring_locations_etl
e9ebfebbc5fa349a58669fb1d9944786f26729c3
[ "CC0-1.0" ]
1
2020-10-02T14:43:18.000Z
2020-10-02T14:43:18.000Z
""" Transform the data into a form that works with the WELL_REGISTRY_STG table. """ import re def mapping_factory(mapping): def map_func(key): if key is not None: ora_val = mapping.get(key.lower()) else: ora_val = None return ora_val return map_func WELL_TYPES = { 'surveillance': 1, 'trend': 2, 'special': 3, } map_well_type = mapping_factory(WELL_TYPES) WELL_PURPOSE = { 'dedicated monitoring/observation': 1, 'other': 2 } map_well_purpose = mapping_factory(WELL_PURPOSE) QW_WELL_CHARS = { 'background': 1, 'suspected/anticipated changes': 2, 'known changes': 3 } map_qw_well_chars = mapping_factory(QW_WELL_CHARS) WL_WELL_CHARS = { 'background': 1, 'suspected/anticipated changes': 2, 'known changes': 3, 'unknown': 999 } map_wl_well_chars = mapping_factory(WL_WELL_CHARS) def to_flag(flag): return '1' if flag else '0' def transform_mon_loc_data(ml_data): """ Map the fields from the API JSON response to the fields in the WELL_REGISTRY_STG table with appropriate foreign key values. """ mapped_data = dict() mapped_data['AGENCY_CD'] = ml_data['agency']['agency_cd'] mapped_data['AGENCY_NM'] = ml_data['agency']['agency_nm'] mapped_data['AGENCY_MED'] = ml_data['agency']['agency_med'] mapped_data['SITE_NO'] = ml_data['site_no'] mapped_data['SITE_NAME'] = ml_data['site_name'] mapped_data['DEC_LAT_VA'] = ml_data['dec_lat_va'] mapped_data['DEC_LONG_VA'] = ml_data['dec_long_va'] mapped_data['HORZ_DATUM'] = ml_data['horizontal_datum'] mapped_data['ALT_VA'] = ml_data['alt_va'] mapped_data['ALT_DATUM_CD'] = ml_data['altitude_datum'] try: mapped_data['NAT_AQUIFER_CD'] = ml_data['nat_aqfr']['nat_aqfr_cd'] mapped_data['NAT_AQFR_DESC'] = ml_data['nat_aqfr']['nat_aqfr_desc'] except (AttributeError, KeyError, TypeError): mapped_data['NAT_AQUIFER_CD'] = None mapped_data['NAT_AQFR_DESC'] = None mapped_data['LOCAL_AQUIFER_NAME'] = ml_data['local_aquifer_name'] mapped_data['AQFR_CHAR'] = ml_data['aqfr_type'] mapped_data['QW_SN_FLAG'] = to_flag(ml_data['qw_sn_flag']) mapped_data['QW_BASELINE_FLAG'] = to_flag(ml_data['qw_baseline_flag']) mapped_data['QW_WELL_CHARS'] = map_qw_well_chars(ml_data['qw_well_chars']) mapped_data['QW_WELL_PURPOSE'] = map_well_purpose(ml_data['qw_well_purpose']) mapped_data['QW_SYS_NAME'] = ml_data['qw_network_name'] mapped_data['WL_SN_FLAG'] = to_flag(ml_data['wl_sn_flag']) mapped_data['WL_BASELINE_FLAG'] = to_flag(ml_data['wl_baseline_flag']) mapped_data['WL_WELL_CHARS'] = map_wl_well_chars(ml_data['wl_well_chars']) mapped_data['WL_WELL_PURPOSE'] = map_well_purpose(ml_data['wl_well_purpose']) mapped_data['WL_SYS_NAME'] = ml_data['wl_network_name'] mapped_data['DATA_PROVIDER'] = None mapped_data['DISPLAY_FLAG'] = to_flag(ml_data['display_flag']) mapped_data['WL_DATA_PROVIDER'] = None mapped_data['QW_DATA_PROVIDER'] = None mapped_data['LITH_DATA_PROVIDER'] = None mapped_data['CONST_DATA_PROVIDER'] = None mapped_data['WELL_DEPTH'] = ml_data['well_depth'] mapped_data['LINK'] = ml_data['link'] mapped_data['INSERT_DATE'] = ml_data['insert_date'] mapped_data['UPDATE_DATE'] = ml_data['update_date'] mapped_data['WL_WELL_PURPOSE_NOTES'] = ml_data['wl_well_purpose_notes'] mapped_data['QW_WELL_PURPOSE_NOTES'] = ml_data['qw_well_purpose_notes'] mapped_data['INSERT_USER_ID'] = ml_data['insert_user'] mapped_data['UPDATE_USER_ID'] = ml_data['update_user'] mapped_data['WL_WELL_TYPE'] = map_well_type(ml_data['wl_well_type']) mapped_data['QW_WELL_TYPE'] = map_well_type(ml_data['qw_well_type']) mapped_data['LOCAL_AQUIFER_CD'] = None mapped_data['REVIEW_FLAG'] = None try: mapped_data['STATE_CD'] = ml_data['state']['state_cd'] except (AttributeError, KeyError, TypeError): mapped_data['STATE_CD'] = None try: mapped_data['COUNTY_CD'] = ml_data['county']['county_cd'] except (AttributeError, KeyError, TypeError): mapped_data['COUNTY_CD'] = None try: mapped_data['COUNTRY_CD'] = ml_data['country']['country_cd'] except (AttributeError, KeyError, TypeError): mapped_data['COUNTRY_CD'] = None mapped_data['WELL_DEPTH_UNITS'] = ml_data['well_depth_units']['unit_id'] if ml_data['well_depth_units'] else None mapped_data['ALT_UNITS'] = ml_data['altitude_units']['unit_id'] if ml_data['altitude_units'] else None mapped_data['SITE_TYPE'] = ml_data['site_type'] mapped_data['HORZ_METHOD'] = ml_data['horz_method'] mapped_data['HORZ_ACY'] = ml_data['horz_acy'] mapped_data['ALT_METHOD'] = ml_data['alt_method'] mapped_data['ALT_ACY'] = ml_data['alt_acy'] return mapped_data def date_format(mapped_data): # fix missing fractions of a second if re.match(r".*:\d\dZ$", mapped_data['INSERT_DATE']): mapped_data['INSERT_DATE'] = mapped_data['INSERT_DATE'][:-1] + ".0Z" if re.match(r".*:\d\dZ$", mapped_data['UPDATE_DATE']): mapped_data['UPDATE_DATE'] = mapped_data['UPDATE_DATE'][:-1] + ".0Z"
38.227941
117
0.695903
0
0
0
0
0
0
0
0
2,110
0.405847
53e0d34e58ad9e0686dc6ee3e5a7f6fc0076f469
55
py
Python
django_reporter_pro/config/model_configs.py
shamilison/django-reporter-pro
0c6f60bbae939d318e7aafaec83613d2768a4f63
[ "Apache-2.0" ]
null
null
null
django_reporter_pro/config/model_configs.py
shamilison/django-reporter-pro
0c6f60bbae939d318e7aafaec83613d2768a4f63
[ "Apache-2.0" ]
null
null
null
django_reporter_pro/config/model_configs.py
shamilison/django-reporter-pro
0c6f60bbae939d318e7aafaec83613d2768a4f63
[ "Apache-2.0" ]
null
null
null
# Created by shamilsakib at 04/10/20 BASE_MODEL = None
18.333333
36
0.763636
0
0
0
0
0
0
0
0
36
0.654545
53e10c53f31c7e396a4573a421ae3212e9a11856
1,543
py
Python
DPSparkImplementations/paf_kernels.py
TEAlab/DPSpark
4d53ee13b03e2e12119c28fe2b2241ad20231eac
[ "MIT" ]
null
null
null
DPSparkImplementations/paf_kernels.py
TEAlab/DPSpark
4d53ee13b03e2e12119c28fe2b2241ad20231eac
[ "MIT" ]
null
null
null
DPSparkImplementations/paf_kernels.py
TEAlab/DPSpark
4d53ee13b03e2e12119c28fe2b2241ad20231eac
[ "MIT" ]
1
2020-12-30T22:12:55.000Z
2020-12-30T22:12:55.000Z
__author__ = "Zafar Ahmad, Mohammad Mahdi Javanmard" __copyright__ = "Copyright (c) 2019 Tealab@SBU" __license__ = "MIT" __version__ = "1.0.0" __maintainer__ = "Zafar Ahmad" __email__ = "[email protected]" __status__ = "Development" import numpy as np import numba as nb ''' Iterative kernels ''' def update_iter(u_block, x_block, n, I_, J_, K_): return _update_iter(np.ascontiguousarray(u_block), np.ascontiguousarray(x_block), n, I_, J_, K_) @nb.jit(nopython=True) def _update_iter(u_block, x_block, n, I_, J_, K_): # For testing purposes, rather than passing f_matrix_broadcast, we call this function def f_matrix(i, j): return float(i+j) for k in range(x_block.shape[0]-1, -1, -1): K = K_*x_block.shape[0]+k for j in range(x_block.shape[0]-1, -1, -1): J = J_*x_block.shape[0]+j for i in range(x_block.shape[0]-1, -1, -1): I = I_*x_block.shape[0]+i min1 = min(K-2, n-3) min2 = min(J-1, n-4) if ((K < n) and (K >= 3) and (J <= min1) and (J >= I+1) and (I <= min2)): x_block[i, j] = max(x_block[i, j], u_block[j+1, k] + f_matrix(J+1, min(K, 2*J-I+1))) return x_block def funcA_iter(block_info, n): ((I_, J_), x_block) = block_info return update_iter(x_block, x_block, n, I_, J_, I_) def funcX_iter(block_info, u_block_info, n): ((I_, J_), x_block) = block_info ((UI_, UJ_), u_block) = u_block_info return update_iter(u_block, x_block, n, I_, J_, UJ_)
35.068182
104
0.610499
0
0
0
0
771
0.499676
0
0
250
0.162022
53e339cc8fb766eb00e75883c4d6064e436e942f
1,343
py
Python
terrakg/rates.py
terrapain/terrakg
90c52ca3b227d2daabd604255e793ac5f536c246
[ "Apache-2.0" ]
null
null
null
terrakg/rates.py
terrapain/terrakg
90c52ca3b227d2daabd604255e793ac5f536c246
[ "Apache-2.0" ]
null
null
null
terrakg/rates.py
terrapain/terrakg
90c52ca3b227d2daabd604255e793ac5f536c246
[ "Apache-2.0" ]
null
null
null
from terra_sdk.exceptions import LCDResponseError from terrakg import logger # Logging from terrakg.client import ClientContainer logger = logger.get_logger(__name__) class Rates: """ Access the most recent rates. """ def __init__(self, client: ClientContainer): self.client = client def get_token_quote_and_fees(self, token_contract: str, pair: str, amount: int = 1000000, reverse: bool = False): """ Returns the price for `amount` of the token `pair` (exchange is included in pair). Set `reverse` to true to get the inverse price. """ desc, action, result_key = ("reverse_simulation", "ask_asset", "offer_amount") if reverse else ( "simulation", "offer_asset", "return_amount") query_msg = { desc: { action: { "amount": str(amount), "info": {"token": { "contract_addr": token_contract } } } } } try: result = self.client.lcd_client.wasm.contract_query(pair, query_msg) return result[result_key], result['commission_amount'] except LCDResponseError as e: logger.warning(f"Issue with price query: {e}") return None
30.522727
117
0.568876
1,170
0.871184
0
0
0
0
0
0
386
0.287416
53e44f41ef2d0962b6580e25176980ba9b2fe713
2,868
py
Python
src/tracking_module.py
HonzaKlicpera/Effective-footage-processing-Blender-add-on
f3faae3fc56a3ef8f2eabba9af8be718e57f4d35
[ "MIT" ]
1
2020-06-09T11:23:44.000Z
2020-06-09T11:23:44.000Z
src/tracking_module.py
HonzaKlicpera/Effective-footage-processing-Blender
f3faae3fc56a3ef8f2eabba9af8be718e57f4d35
[ "MIT" ]
null
null
null
src/tracking_module.py
HonzaKlicpera/Effective-footage-processing-Blender
f3faae3fc56a3ef8f2eabba9af8be718e57f4d35
[ "MIT" ]
null
null
null
import bpy import os, glob from pathlib import Path from enum import Enum from abc import ABC, abstractmethod import csv from . import keying_module def export_tracking_data(self, context): clip = context.space_data.clip clip_name = os.path.splitext(clip.name)[0] tracker_name = context.scene.tracking_local.tracker_name output_path = os.path.join(keying_module.get_abs_output_path(context),clip_name) keying_module.create_directory(output_path) file = open(os.path.join(output_path,clip_name+".csv"), "w", newline='') writer = csv.writer(file, delimiter=',') multiplier = context.scene.tracking_local.tracking_multiplier tracker = clip.tracking.tracks.get(tracker_name) if tracker is not None: prev = tracker.markers[0].co[0] for m in tracker.markers: writer.writerow([(m.co[0] - prev) * multiplier]) prev = m.co[0] self.report({"INFO"},"TRACKER SUCESSFULLY EXPORTED") else: self.report({"ERROR"},"TRACKER NOT FOUND") file.close() #---------------------------------------- # PROPERTIES #---------------------------------------- class TrackingSceneProps(bpy.types.PropertyGroup): tracker_name: bpy.props.StringProperty \ ( name = "Track name", description = "Name of the tracker for data export", ) tracking_multiplier: bpy.props.FloatProperty \ ( name = "Distance multiplier", description = "The exported tracking distance gets multiplied by this value", default = 1, min = 0.0001 ) class TrackingPanel(bpy.types.Panel): bl_label = "Tracking Panel" bl_idname = "SCENE_PT_tracking_rendering" bl_space_type = "CLIP_EDITOR" bl_region_type = "UI" bl_context = "render" def draw(self, context): layout = self.layout scene = context.scene box = layout.box() box.row().label(text = "Tracking export") box.row().prop(scene.tracking_local, "tracker_name") box.row().prop(scene.tracking_local, "tracking_multiplier") box.row().operator("tracking.export_data") class TrackingExportDataOp(bpy.types.Operator): bl_idname = "tracking.export_data" bl_label = "Export Data" bl_description = "Export the tracking data of the chosen tracker" def execute(self, context): export_tracking_data(self, context) return {"FINISHED"} classes = ( TrackingExportDataOp, TrackingPanel, TrackingSceneProps ) def register(): for cls in classes: bpy.utils.register_class(cls) bpy.types.Scene.tracking_local = bpy.props.PointerProperty(type=TrackingSceneProps) def unregister(): for cls in reversed(classes): bpy.utils.unregister_class(cls) del bpy.types.Scene.tracking_local
30.189474
87
0.644003
1,279
0.445955
0
0
0
0
0
0
546
0.190377
53e4b90b1159d838a8edfa7ab52a953ffb4eca72
437
py
Python
nodes/2.x/python/View.ViewTemplate.py
andydandy74/ClockworkForDynamo
bd4ac2c13956a02352a458d01096a35b7258d9f2
[ "MIT" ]
147
2016-02-24T16:37:03.000Z
2022-02-18T12:10:34.000Z
nodes/2.x/python/View.ViewTemplate.py
johnpierson/ClockworkForDynamo
953d3f56b75e99561978925756e527357f9978dd
[ "MIT" ]
269
2016-02-25T14:04:14.000Z
2022-03-26T07:30:53.000Z
nodes/2.x/python/View.ViewTemplate.py
johnpierson/ClockworkForDynamo
953d3f56b75e99561978925756e527357f9978dd
[ "MIT" ]
89
2016-03-16T18:21:56.000Z
2022-02-03T14:34:30.000Z
import clr clr.AddReference('RevitAPI') from Autodesk.Revit.DB import * def GetViewTemplate(view): if not view: return None elif hasattr(view, "ViewTemplateId"): if view.ViewTemplateId.IntegerValue == -1: return None else: return view.Document.GetElement(view.ViewTemplateId) else: return None views = UnwrapElement(IN[0]) if isinstance(IN[0], list): OUT = [GetViewTemplate(x) for x in views] else: OUT = GetViewTemplate(views)
29.133333
69
0.757437
0
0
0
0
0
0
0
0
26
0.059497
53e73c9f153e27f98b4ee8cc325ad02d4ef90185
8,267
py
Python
infrastructure-provisioning/src/general/scripts/gcp/dataengine-service_prepare.py
bohdana-kuzmenko/incubator-dlab
d052709450e7916860c7dd191708d5524cf44c1e
[ "Apache-2.0" ]
null
null
null
infrastructure-provisioning/src/general/scripts/gcp/dataengine-service_prepare.py
bohdana-kuzmenko/incubator-dlab
d052709450e7916860c7dd191708d5524cf44c1e
[ "Apache-2.0" ]
null
null
null
infrastructure-provisioning/src/general/scripts/gcp/dataengine-service_prepare.py
bohdana-kuzmenko/incubator-dlab
d052709450e7916860c7dd191708d5524cf44c1e
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/python # ***************************************************************************** # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # # ****************************************************************************** import json import time from fabric.api import * from dlab.fab import * from dlab.meta_lib import * from dlab.actions_lib import * import sys import os import uuid import logging from Crypto.PublicKey import RSA if __name__ == "__main__": local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['edge_user_name'], os.environ['request_id']) local_log_filepath = "/logs/" + os.environ['conf_resource'] + "/" + local_log_filename logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s', level=logging.INFO, filename=local_log_filepath) try: os.environ['exploratory_name'] except: os.environ['exploratory_name'] = '' if os.path.exists('/response/.dataproc_creating_{}'.format(os.environ['exploratory_name'])): time.sleep(30) print('Generating infrastructure names and tags') dataproc_conf = dict() try: dataproc_conf['exploratory_name'] = (os.environ['exploratory_name']).lower().replace('_', '-') except: dataproc_conf['exploratory_name'] = '' try: dataproc_conf['computational_name'] = (os.environ['computational_name']).lower().replace('_', '-') except: dataproc_conf['computational_name'] = '' dataproc_conf['service_base_name'] = (os.environ['conf_service_base_name']).lower().replace('_', '-') dataproc_conf['edge_user_name'] = (os.environ['edge_user_name']).lower().replace('_', '-') dataproc_conf['key_name'] = os.environ['conf_key_name'] dataproc_conf['key_path'] = '{0}{1}.pem'.format(os.environ['conf_key_dir'], os.environ['conf_key_name']) dataproc_conf['region'] = os.environ['gcp_region'] dataproc_conf['zone'] = os.environ['gcp_zone'] dataproc_conf['subnet'] = '{0}-{1}-subnet'.format(dataproc_conf['service_base_name'], dataproc_conf['edge_user_name']) dataproc_conf['cluster_name'] = '{0}-{1}-des-{2}-{3}'.format(dataproc_conf['service_base_name'], dataproc_conf['edge_user_name'], dataproc_conf['exploratory_name'], dataproc_conf['computational_name']) dataproc_conf['cluster_tag'] = '{0}-{1}-ps'.format(dataproc_conf['service_base_name'], dataproc_conf['edge_user_name']) dataproc_conf['bucket_name'] = '{}-{}-bucket'.format(dataproc_conf['service_base_name'], dataproc_conf['edge_user_name']) dataproc_conf['release_label'] = os.environ['dataproc_version'] dataproc_conf['cluster_labels'] = { os.environ['notebook_instance_name']: "not-configured", "name": dataproc_conf['cluster_name'], "sbn": dataproc_conf['service_base_name'], "user": dataproc_conf['edge_user_name'], "notebook_name": os.environ['notebook_instance_name'], "product": "dlab", "computational_name": dataproc_conf['computational_name'] } dataproc_conf['dataproc_service_account_name'] = '{0}-{1}-ps'.format(dataproc_conf['service_base_name'], dataproc_conf['edge_user_name']) service_account_email = "{}@{}.iam.gserviceaccount.com".format(dataproc_conf['dataproc_service_account_name'], os.environ['gcp_project_id']) dataproc_conf['edge_instance_hostname'] = '{0}-{1}-edge'.format(dataproc_conf['service_base_name'], dataproc_conf['edge_user_name']) dataproc_conf['dlab_ssh_user'] = os.environ['conf_os_user'] edge_status = GCPMeta().get_instance_status(dataproc_conf['edge_instance_hostname']) if edge_status != 'RUNNING': logging.info('ERROR: Edge node is unavailable! Aborting...') print('ERROR: Edge node is unavailable! Aborting...') ssn_hostname = GCPMeta().get_private_ip_address(dataproc_conf['service_base_name'] + '-ssn') put_resource_status('edge', 'Unavailable', os.environ['ssn_dlab_path'], os.environ['conf_os_user'], ssn_hostname) append_result("Edge node is unavailable") sys.exit(1) print("Will create exploratory environment with edge node as access point as following: ".format(json.dumps(dataproc_conf, sort_keys=True, indent=4, separators=(',', ': ')))) logging.info(json.dumps(dataproc_conf)) local('touch /response/.dataproc_creating_{}'.format(os.environ['exploratory_name'])) local("echo Waiting for changes to propagate; sleep 10") dataproc_cluster = json.loads(open('/root/templates/dataengine-service_cluster.json').read().decode('utf-8-sig')) dataproc_cluster['projectId'] = os.environ['gcp_project_id'] dataproc_cluster['clusterName'] = dataproc_conf['cluster_name'] dataproc_cluster['labels'] = dataproc_conf['cluster_labels'] dataproc_cluster['config']['configBucket'] = dataproc_conf['bucket_name'] dataproc_cluster['config']['gceClusterConfig']['serviceAccount'] = service_account_email dataproc_cluster['config']['gceClusterConfig']['zoneUri'] = dataproc_conf['zone'] dataproc_cluster['config']['gceClusterConfig']['subnetworkUri'] = dataproc_conf['subnet'] dataproc_cluster['config']['masterConfig']['machineTypeUri'] = os.environ['dataproc_master_instance_type'] dataproc_cluster['config']['workerConfig']['machineTypeUri'] = os.environ['dataproc_slave_instance_type'] dataproc_cluster['config']['masterConfig']['numInstances'] = int(os.environ['dataproc_master_count']) dataproc_cluster['config']['workerConfig']['numInstances'] = int(os.environ['dataproc_slave_count']) if int(os.environ['dataproc_preemptible_count']) != 0: dataproc_cluster['config']['secondaryWorkerConfig']['numInstances'] = int(os.environ['dataproc_preemptible_count']) else: del dataproc_cluster['config']['secondaryWorkerConfig'] dataproc_cluster['config']['softwareConfig']['imageVersion'] = dataproc_conf['release_label'] ssh_user_pubkey = open(os.environ['conf_key_dir'] + os.environ['edge_user_name'] + '.pub').read() key = RSA.importKey(open(dataproc_conf['key_path'], 'rb').read()) ssh_admin_pubkey = key.publickey().exportKey("OpenSSH") dataproc_cluster['config']['gceClusterConfig']['metadata']['ssh-keys'] = '{0}:{1}\n{0}:{2}'.format(dataproc_conf['dlab_ssh_user'], ssh_user_pubkey, ssh_admin_pubkey) dataproc_cluster['config']['gceClusterConfig']['tags'][0] = dataproc_conf['cluster_tag'] try: logging.info('[Creating Dataproc Cluster]') print('[Creating Dataproc Cluster]') params = "--region {0} --bucket {1} --params '{2}'".format(dataproc_conf['region'], dataproc_conf['bucket_name'], json.dumps(dataproc_cluster)) try: local("~/scripts/{}.py {}".format('dataengine-service_create', params)) except: traceback.print_exc() raise Exception keyfile_name = "/root/keys/{}.pem".format(dataproc_conf['key_name']) local('rm /response/.dataproc_creating_{}'.format(os.environ['exploratory_name'])) except Exception as err: print('Error: {0}'.format(err)) append_result("Failed to create Dataproc Cluster.", str(err)) local('rm /response/.dataproc_creating_{}'.format(os.environ['exploratory_name'])) sys.exit(1)
57.013793
178
0.670134
0
0
0
0
0
0
0
0
4,042
0.488932
53e7f5b9bbd28821250ea584ab34945cec2c0582
931
py
Python
02.py
mattias-lundell/aoc2021
32bd41446d963c5788d4614106405be65de81bcd
[ "MIT" ]
null
null
null
02.py
mattias-lundell/aoc2021
32bd41446d963c5788d4614106405be65de81bcd
[ "MIT" ]
null
null
null
02.py
mattias-lundell/aoc2021
32bd41446d963c5788d4614106405be65de81bcd
[ "MIT" ]
null
null
null
test = """forward 5 down 5 forward 8 up 3 down 8 forward 2 """ def part1(lines): h = 0 d = 0 for line in lines: direction, delta = line.split() delta = int(delta) if direction == 'forward': h += delta elif direction == 'down': d += delta elif direction == 'up': d -= delta print(h*d) def part2(lines): h = 0 d = 0 a = 0 for line in lines: direction, delta = line.split() delta = int(delta) print(direction, delta) if direction == 'forward': h += delta d += (delta * a) elif direction == 'down': a += delta elif direction == 'up': a -= delta print(h*d) if __name__ == '__main__': part1(test.splitlines()) part1(open('in02.txt').readlines()) part2(test.splitlines()) part2(open('in02.txt').readlines())
19.395833
39
0.493018
0
0
0
0
0
0
0
0
123
0.132116
53e86b46c3285488d7ebc41a01e6a577e706cb66
693
py
Python
associations/migrations/0001_initial.py
ollc-code/django-back
205f3adc61f9e62c88dfcc170999cef495cebed7
[ "MIT" ]
null
null
null
associations/migrations/0001_initial.py
ollc-code/django-back
205f3adc61f9e62c88dfcc170999cef495cebed7
[ "MIT" ]
null
null
null
associations/migrations/0001_initial.py
ollc-code/django-back
205f3adc61f9e62c88dfcc170999cef495cebed7
[ "MIT" ]
null
null
null
# Generated by Django 3.1.3 on 2020-11-09 08:56 from django.db import migrations, models class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Associations', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('association_name', models.CharField(max_length=100)), ('incharge', models.CharField(max_length=100)), ('about', models.CharField(max_length=500)), ('contacts', models.CharField(max_length=300)), ], ), ]
27.72
114
0.580087
600
0.865801
0
0
0
0
0
0
114
0.164502
53e96f34f945ecef4aebd95bbb66a14049ee97c2
4,631
py
Python
tests/pds/test_times.py
seignovert/pyvims
a70b5b9b8bc5c37fa43b7db4d15407f312a31849
[ "BSD-3-Clause" ]
4
2019-09-16T15:50:22.000Z
2021-04-08T15:32:48.000Z
tests/pds/test_times.py
seignovert/pyvims
a70b5b9b8bc5c37fa43b7db4d15407f312a31849
[ "BSD-3-Clause" ]
3
2018-05-04T09:28:24.000Z
2018-12-03T09:00:31.000Z
tests/pds/test_times.py
seignovert/pyvims
a70b5b9b8bc5c37fa43b7db4d15407f312a31849
[ "BSD-3-Clause" ]
1
2020-10-12T15:14:17.000Z
2020-10-12T15:14:17.000Z
"""Test PDS times modules.""" from datetime import datetime as dt from pyvims.pds.times import (cassini2utc, cassini_time, dt_date, dt_doy, dt_iso, dyear, pds_folder, pds_time, utc2cassini) from pytest import approx, raises def test_dt_iso(): """Test parsing ISO time pattern.""" assert str(dt_iso('2005-02-14T18:02:29.123')) == '2005-02-14 18:02:29.123000+00:00' assert str(dt_iso('2005-02-14 18:02:29')) == '2005-02-14 18:02:29+00:00' assert str(dt_iso('2005-02-14:18:02')) == '2005-02-14 18:02:00+00:00' assert str(dt_iso('2005-02-14')) == '2005-02-14 00:00:00+00:00' times = dt_iso('from 2005-02-14T18:02:29 to 2005-02-14T18:03') assert len(times) == 2 assert str(times[0]) == '2005-02-14 18:02:29+00:00' assert str(times[1]) == '2005-02-14 18:03:00+00:00' with raises(ValueError): _ = dt_iso('2005-045') def test_dt_doy(): """Test parsing DOY time pattern.""" assert str(dt_doy('2005-045T18:02:29.123')) == '2005-02-14 18:02:29.123000+00:00' assert str(dt_doy('2005-045 18:02:29')) == '2005-02-14 18:02:29+00:00' assert str(dt_doy('2005-045:18:02')) == '2005-02-14 18:02:00+00:00' assert str(dt_doy('2005-045')) == '2005-02-14 00:00:00+00:00' times = dt_doy('from 2005-045T18:02:29 to 2005-045T18:03') assert len(times) == 2 assert str(times[0]) == '2005-02-14 18:02:29+00:00' assert str(times[1]) == '2005-02-14 18:03:00+00:00' with raises(ValueError): _ = dt_doy('2005-02-14') def test_dt_date(): """Test date pattern.""" assert str(dt_date('Feb 14, 2005')) == '2005-02-14 00:00:00+00:00' assert str(dt_date('Febr 14, 2005')) == '2005-02-14 00:00:00+00:00' assert str(dt_date('Feb 14, 2005', eod=True)) == '2005-02-14 23:59:59+00:00' assert str(dt_date('to Feb 14, 2005')) == '2005-02-14 23:59:59+00:00' times = dt_date('from Feb 14, 2005 through March 12, 2006') assert len(times) == 2 assert str(times[0]) == '2005-02-14 00:00:00+00:00' assert str(times[1]) == '2006-03-12 23:59:59+00:00' with raises(ValueError): _ = dt_date('2005-02-14') def test_pds_time(): """Test PDS time parsing.""" assert str(pds_time('May 17, 2007')) == '2007-05-17 00:00:00+00:00' assert str(pds_time('2010-274T00:00:00')) == '2010-10-01 00:00:00+00:00' assert str(pds_time('2011-10-01T00:02:04.244')) == '2011-10-01 00:02:04.244000+00:00' t0, t1 = pds_time('… May 17, 2007 through Jun 30, 2007') assert str(t0) == '2007-05-17 00:00:00+00:00' assert str(t1) == '2007-06-30 23:59:59+00:00' t0, t1 = pds_time('… 2010-274T00:00:00 through 2010-365T23:59:59') assert str(t0) == '2010-10-01 00:00:00+00:00' assert str(t1) == '2010-12-31 23:59:59+00:00' t0, t1 = pds_time('… 2011-10-01T00:02:04.244 through 2011-12-31T12:28:45.128') assert str(t0) == '2011-10-01 00:02:04.244000+00:00' assert str(t1) == '2011-12-31 12:28:45.128000+00:00' t0, t1 = pds_time('2005015T175855_2005016T184233/') assert str(t0) == '2005-01-15 17:58:55+00:00' assert str(t1) == '2005-01-16 18:42:33+00:00' with raises(ValueError): _ = pds_time('No data available') def test_cassini_time(): """Test Cassini time parsing.""" assert cassini_time('v1487096932_1.qub') == 1487096932.0 assert cassini_time(1483230358.172) == 1483230358.172 with raises(ValueError): _ = cassini_time('v123_1') with raises(ValueError): _ = cassini_time(123) def test_cassini2utc(): """Test Cassini time to UTC converter.""" assert str(cassini2utc('v1487096932_1')) == '2005-02-14 18:02:29' assert str(cassini2utc(1483230358.172)) == '2005-01-01 00:00:00' def test_utc2cassini(): """Test UTC to Cassini time converter.""" assert utc2cassini('2005-02-14T18:02:29') == approx(1487096932.068, abs=1e-3) times = utc2cassini('May 17, 2007 through Jun 30, 2007') assert len(times) == 2 assert times[0] == approx(1558053238.602, abs=1e-3) assert times[1] == approx(1561941262.879, abs=1e-3) def test_pds_folder(): """Test convert PDS folder as string.""" assert pds_folder('2005015T175855') == '2005-015T17:58:55' assert pds_folder('2005015T175855_2005016T184233/') == \ '2005-015T17:58:55 2005-016T18:42:33' def test_dyear(): """Test decimal year.""" assert dyear('2005-01-01') == 2005.0 assert dyear('2005-12-31') == 2005.9973 assert dyear('2004-12-31') == 2004.9973 assert dyear(dt(2005, 1, 1)) == 2005.0 assert dyear(dt(2005, 12, 31)) == 2005.9973 assert dyear(dt(2004, 12, 31)) == 2004.9973
34.559701
89
0.628374
0
0
0
0
0
0
0
0
2,055
0.443174
53e9f02f64051ff304c3ebef251b469302530c2e
626
py
Python
e/mail-relay/web/apps/mail/migrations/0109_auto_20171130_1047.py
zhouli121018/nodejsgm
0ccbc8acf61badc812f684dd39253d55c99f08eb
[ "MIT" ]
null
null
null
e/mail-relay/web/apps/mail/migrations/0109_auto_20171130_1047.py
zhouli121018/nodejsgm
0ccbc8acf61badc812f684dd39253d55c99f08eb
[ "MIT" ]
18
2020-06-05T18:17:40.000Z
2022-03-11T23:25:21.000Z
e/mail-relay/web/apps/mail/migrations/0109_auto_20171130_1047.py
zhouli121018/nodejsgm
0ccbc8acf61badc812f684dd39253d55c99f08eb
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('mail', '0108_auto_20171130_1004'), ] operations = [ migrations.AlterModelOptions( name='relaysenderwhitelist', options={'verbose_name': '\u4e2d\u7ee7\u53d1\u4ef6\u4eba\u767d\u540d\u5355'}, ), migrations.AlterModelOptions( name='spamrptblacklist', options={'verbose_name': '\u7f51\u5173\u9694\u79bb\u62a5\u544a\u6536\u4ef6\u4eba\u9ed1\u540d\u5355'}, ), ]
27.217391
113
0.635783
517
0.825879
0
0
0
0
0
0
246
0.392971
53ea00fc5aec5aef16f52f772300f59c029df625
11,168
py
Python
venv/lib/python3.6/site-packages/ansible_test/_data/sanity/code-smell/runtime-metadata.py
usegalaxy-no/usegalaxy
75dad095769fe918eb39677f2c887e681a747f3a
[ "MIT" ]
1
2020-01-22T13:11:23.000Z
2020-01-22T13:11:23.000Z
venv/lib/python3.6/site-packages/ansible_test/_data/sanity/code-smell/runtime-metadata.py
usegalaxy-no/usegalaxy
75dad095769fe918eb39677f2c887e681a747f3a
[ "MIT" ]
12
2020-02-21T07:24:52.000Z
2020-04-14T09:54:32.000Z
venv/lib/python3.6/site-packages/ansible_test/_data/sanity/code-smell/runtime-metadata.py
usegalaxy-no/usegalaxy
75dad095769fe918eb39677f2c887e681a747f3a
[ "MIT" ]
null
null
null
#!/usr/bin/env python """Schema validation of ansible-core's ansible_builtin_runtime.yml and collection's meta/runtime.yml""" from __future__ import (absolute_import, division, print_function) __metaclass__ = type import datetime import os import re import sys from distutils.version import StrictVersion, LooseVersion from functools import partial import yaml from voluptuous import All, Any, MultipleInvalid, PREVENT_EXTRA from voluptuous import Required, Schema, Invalid from voluptuous.humanize import humanize_error from ansible.module_utils.six import string_types from ansible.utils.version import SemanticVersion def isodate(value, check_deprecation_date=False, is_tombstone=False): """Validate a datetime.date or ISO 8601 date string.""" # datetime.date objects come from YAML dates, these are ok if isinstance(value, datetime.date): removal_date = value else: # make sure we have a string msg = 'Expected ISO 8601 date string (YYYY-MM-DD), or YAML date' if not isinstance(value, string_types): raise Invalid(msg) # From Python 3.7 in, there is datetime.date.fromisoformat(). For older versions, # we have to do things manually. if not re.match('^[0-9]{4}-[0-9]{2}-[0-9]{2}$', value): raise Invalid(msg) try: removal_date = datetime.datetime.strptime(value, '%Y-%m-%d').date() except ValueError: raise Invalid(msg) # Make sure date is correct today = datetime.date.today() if is_tombstone: # For a tombstone, the removal date must be in the past if today < removal_date: raise Invalid( 'The tombstone removal_date (%s) must not be after today (%s)' % (removal_date, today)) else: # For a deprecation, the removal date must be in the future. Only test this if # check_deprecation_date is truish, to avoid checks to suddenly start to fail. if check_deprecation_date and today > removal_date: raise Invalid( 'The deprecation removal_date (%s) must be after today (%s)' % (removal_date, today)) return value def removal_version(value, is_ansible, current_version=None, is_tombstone=False): """Validate a removal version string.""" msg = ( 'Removal version must be a string' if is_ansible else 'Removal version must be a semantic version (https://semver.org/)' ) if not isinstance(value, string_types): raise Invalid(msg) try: if is_ansible: version = StrictVersion() version.parse(value) version = LooseVersion(value) # We're storing Ansible's version as a LooseVersion else: version = SemanticVersion() version.parse(value) if version.major != 0 and (version.minor != 0 or version.patch != 0): raise Invalid('removal_version (%r) must be a major release, not a minor or patch release ' '(see specification at https://semver.org/)' % (value, )) if current_version is not None: if is_tombstone: # For a tombstone, the removal version must not be in the future if version > current_version: raise Invalid('The tombstone removal_version (%r) must not be after the ' 'current version (%s)' % (value, current_version)) else: # For a deprecation, the removal version must be in the future if version <= current_version: raise Invalid('The deprecation removal_version (%r) must be after the ' 'current version (%s)' % (value, current_version)) except ValueError: raise Invalid(msg) return value def any_value(value): """Accepts anything.""" return value def get_ansible_version(): """Return current ansible-core version""" from ansible.release import __version__ return LooseVersion('.'.join(__version__.split('.')[:3])) def get_collection_version(): """Return current collection version, or None if it is not available""" import importlib.util collection_detail_path = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'collection_detail.py') collection_detail_spec = importlib.util.spec_from_file_location('collection_detail', collection_detail_path) collection_detail = importlib.util.module_from_spec(collection_detail_spec) sys.modules['collection_detail'] = collection_detail collection_detail_spec.loader.exec_module(collection_detail) # noinspection PyBroadException try: result = collection_detail.read_manifest_json('.') or collection_detail.read_galaxy_yml('.') return SemanticVersion(result['version']) except Exception: # pylint: disable=broad-except # We do not care why it fails, in case we cannot get the version # just return None to indicate "we don't know". return None def validate_metadata_file(path, is_ansible, check_deprecation_dates=False): """Validate explicit runtime metadata file""" try: with open(path, 'r') as f_path: routing = yaml.safe_load(f_path) except yaml.error.MarkedYAMLError as ex: print('%s:%d:%d: YAML load failed: %s' % (path, ex.context_mark.line + 1, ex.context_mark.column + 1, re.sub(r'\s+', ' ', str(ex)))) return except Exception as ex: # pylint: disable=broad-except print('%s:%d:%d: YAML load failed: %s' % (path, 0, 0, re.sub(r'\s+', ' ', str(ex)))) return if is_ansible: current_version = get_ansible_version() else: current_version = get_collection_version() # Updates to schema MUST also be reflected in the documentation # ~https://docs.ansible.com/ansible/devel/dev_guide/developing_collections.html # plugin_routing schema avoid_additional_data = Schema( Any( { Required('removal_version'): any_value, 'warning_text': any_value, }, { Required('removal_date'): any_value, 'warning_text': any_value, } ), extra=PREVENT_EXTRA ) deprecation_schema = All( # The first schema validates the input, and the second makes sure no extra keys are specified Schema( { 'removal_version': partial(removal_version, is_ansible=is_ansible, current_version=current_version), 'removal_date': partial(isodate, check_deprecation_date=check_deprecation_dates), 'warning_text': Any(*string_types), } ), avoid_additional_data ) tombstoning_schema = All( # The first schema validates the input, and the second makes sure no extra keys are specified Schema( { 'removal_version': partial(removal_version, is_ansible=is_ansible, current_version=current_version, is_tombstone=True), 'removal_date': partial(isodate, is_tombstone=True), 'warning_text': Any(*string_types), } ), avoid_additional_data ) plugin_routing_schema = Any( Schema({ ('deprecation'): Any(deprecation_schema), ('tombstone'): Any(tombstoning_schema), ('redirect'): Any(*string_types), }, extra=PREVENT_EXTRA), ) list_dict_plugin_routing_schema = [{str_type: plugin_routing_schema} for str_type in string_types] plugin_schema = Schema({ ('action'): Any(None, *list_dict_plugin_routing_schema), ('become'): Any(None, *list_dict_plugin_routing_schema), ('cache'): Any(None, *list_dict_plugin_routing_schema), ('callback'): Any(None, *list_dict_plugin_routing_schema), ('cliconf'): Any(None, *list_dict_plugin_routing_schema), ('connection'): Any(None, *list_dict_plugin_routing_schema), ('doc_fragments'): Any(None, *list_dict_plugin_routing_schema), ('filter'): Any(None, *list_dict_plugin_routing_schema), ('httpapi'): Any(None, *list_dict_plugin_routing_schema), ('inventory'): Any(None, *list_dict_plugin_routing_schema), ('lookup'): Any(None, *list_dict_plugin_routing_schema), ('module_utils'): Any(None, *list_dict_plugin_routing_schema), ('modules'): Any(None, *list_dict_plugin_routing_schema), ('netconf'): Any(None, *list_dict_plugin_routing_schema), ('shell'): Any(None, *list_dict_plugin_routing_schema), ('strategy'): Any(None, *list_dict_plugin_routing_schema), ('terminal'): Any(None, *list_dict_plugin_routing_schema), ('test'): Any(None, *list_dict_plugin_routing_schema), ('vars'): Any(None, *list_dict_plugin_routing_schema), }, extra=PREVENT_EXTRA) # import_redirection schema import_redirection_schema = Any( Schema({ ('redirect'): Any(*string_types), # import_redirect doesn't currently support deprecation }, extra=PREVENT_EXTRA) ) list_dict_import_redirection_schema = [{str_type: import_redirection_schema} for str_type in string_types] # top level schema schema = Schema({ # All of these are optional ('plugin_routing'): Any(plugin_schema), ('import_redirection'): Any(None, *list_dict_import_redirection_schema), # requires_ansible: In the future we should validate this with SpecifierSet ('requires_ansible'): Any(*string_types), ('action_groups'): dict, }, extra=PREVENT_EXTRA) # Ensure schema is valid try: schema(routing) except MultipleInvalid as ex: for error in ex.errors: # No way to get line/column numbers print('%s:%d:%d: %s' % (path, 0, 0, humanize_error(routing, error))) def main(): """Validate runtime metadata""" paths = sys.argv[1:] or sys.stdin.read().splitlines() collection_legacy_file = 'meta/routing.yml' collection_runtime_file = 'meta/runtime.yml' # This is currently disabled, because if it is enabled this test can start failing # at a random date. For this to be properly activated, we (a) need to be able to return # codes for this test, and (b) make this error optional. check_deprecation_dates = False for path in paths: if path == collection_legacy_file: print('%s:%d:%d: %s' % (path, 0, 0, ("Should be called '%s'" % collection_runtime_file))) continue validate_metadata_file( path, is_ansible=path not in (collection_legacy_file, collection_runtime_file), check_deprecation_dates=check_deprecation_dates) if __name__ == '__main__': main()
39.885714
112
0.632969
0
0
0
0
0
0
0
0
3,385
0.303098
53eb2f5275fa111e5a11e8a6b19fe5db87a5dc8d
2,160
py
Python
catkin_ws/src/o2ac_flexbe/o2ac_flexbe_states/src/o2ac_flexbe_states/align_bearing_holes.py
mitdo/o2ac-ur
74c82a54a693bf6a3fc995ff63e7c91ac1fda6fd
[ "MIT" ]
32
2021-09-02T12:29:47.000Z
2022-03-30T21:44:10.000Z
catkin_ws/src/o2ac_flexbe/o2ac_flexbe_states/src/o2ac_flexbe_states/align_bearing_holes.py
kroglice/o2ac-ur
f684f21fd280a22ec061dc5d503801f6fefb2422
[ "MIT" ]
4
2021-09-22T00:51:14.000Z
2022-01-30T11:54:19.000Z
catkin_ws/src/o2ac_flexbe/o2ac_flexbe_states/src/o2ac_flexbe_states/align_bearing_holes.py
kroglice/o2ac-ur
f684f21fd280a22ec061dc5d503801f6fefb2422
[ "MIT" ]
7
2021-11-02T12:26:09.000Z
2022-02-01T01:45:22.000Z
#!/usr/bin/env python from flexbe_core import EventState, Logger from flexbe_core.proxy import ProxyActionClient # example import of required action from o2ac_msgs.msg import AlignBearingHolesAction, AlignBearingHolesGoal class AlignBearingHolesActionState(EventState): ''' Actionlib for aligning the bearing holes -- task_name string Name of the task <= success AlignBearingHoles completed successfully. <= error AlignBearingHoles failed to execute. ''' def __init__(self, task_name): super( AlignBearingHolesActionState, self).__init__( outcomes=[ 'success', 'error']) self._topic = 'o2ac_flexbe/align_bearing_holes' # pass required clients as dict (topic: type) self._client = ProxyActionClient( {self._topic: AlignBearingHolesAction}) self._task_name = task_name self._success = False def execute(self, userdata): if not self._success: return 'error' if self._client.has_result(self._topic): result = self._client.get_result(self._topic) Logger.logwarn('result %s' % str(result)) if not result: Logger.logwarn('Fail to complete AlignBearingHoles') self._success = False return 'error' else: Logger.logwarn('Succeed! completed AlignBearingHoles') self._success = True return 'success' def on_enter(self, userdata): goal = AlignBearingHolesGoal() goal.task_name = self._task_name self._success = True try: self._client.send_goal(self._topic, goal) except Exception as e: Logger.logwarn( 'Failed to send the AlignBearingHoles command:\n%s' % str(e)) self._success = False def on_exit(self, userdata): if not self._client.has_result(self._topic): self._client.cancel(self._topic) Logger.loginfo('Cancelled active action goal.')
30.422535
72
0.600463
1,934
0.89537
0
0
0
0
0
0
583
0.269907
53eb9134fe73eaf59759bdec6bb46f044d4317f1
6,710
py
Python
find_unicode_control.py
sebastian-philipp/find-unicode-control
170730aff64d17a4d9c57b0284d862c932e1565c
[ "BSD-3-Clause" ]
null
null
null
find_unicode_control.py
sebastian-philipp/find-unicode-control
170730aff64d17a4d9c57b0284d862c932e1565c
[ "BSD-3-Clause" ]
null
null
null
find_unicode_control.py
sebastian-philipp/find-unicode-control
170730aff64d17a4d9c57b0284d862c932e1565c
[ "BSD-3-Clause" ]
null
null
null
#!/usr/bin/env python3 """Find unicode control characters in source files By default the script takes one or more files or directories and looks for unicode control characters in all text files. To narrow down the files, provide a config file with the -c command line, defining a scan_exclude list, which should be a list of regular expressions matching paths to exclude from the scan. There is a second mode enabled with -p which when set to 'all', prints all control characters and when set to 'bidi', prints only the 9 bidirectional control characters. """ import sys, os, argparse, re, unicodedata, magic import importlib from stat import * scan_exclude = [r'\.git/', r'\.hg/', r'\.desktop$', r'ChangeLog$', r'NEWS$', r'\.ppd$', r'\.txt$', r'\.directory$'] scan_exclude_mime = [r'text/x-po$', r'text/x-tex$', r'text/x-troff$', r'text/html$'] verbose_mode = False # Print to stderr in verbose mode. def eprint(*args, **kwargs): if verbose_mode: print(*args, file=sys.stderr, **kwargs) # Decode a single latin1 line. def decodeline(inf): if isinstance(inf, str): return inf return inf.decode('latin-1') # Make a text string from a file, attempting to decode from latin1 if necessary. # Other non-utf-8 locales are not supported at the moment. def getfiletext(filename): text = None with open(filename) as infile: try: if detailed_mode: return [decodeline(inf) for inf in infile] except Exception as e: eprint('%s: %s' % (filename, e)) return None try: text = ''.join(infile) except UnicodeDecodeError: eprint('%s: Retrying with latin1' % filename) try: text = ''.join([decodeline(inf) for inf in infile]) except Exception as e: eprint('%s: %s' % (filename, e)) if text: return set(text) else: return None def analyze_text_detailed(filename, text, disallowed, msg): line = 0 warned = False for t in text: line = line + 1 subset = [c for c in t if c in disallowed] if subset: print('%s:%d %s: %s' % (filename, line, msg, subset)) warned = True if not warned: eprint('%s: OK' % filename) # Look for disallowed characters in the text. We reduce all characters into a # set to speed up analysis. FIXME: Add a slow mode to get line numbers in files # that have these disallowed chars. def analyze_text(filename, text, disallowed, msg): if detailed_mode: analyze_text_detailed(filename, text, disallowed, msg) return if not text.isdisjoint(disallowed): print('%s: %s: %s' % (filename, msg, text & disallowed)) else: eprint('%s: OK' % filename) def should_read(f): m = magic.detect_from_filename(f) # Fast check, just the file name. if [e for e in scan_exclude if re.search(e, f)]: return False # Slower check, mime type. if not 'text/' in m.mime_type \ or [e for e in scan_exclude_mime if re.search(e, m.mime_type)]: return False return True # Get file text and feed into analyze_text. def analyze_file(f, disallowed, msg): eprint('%s: Reading file' % f) if should_read(f): text = getfiletext(f) if text: analyze_text(f, text, disallowed, msg) else: eprint('%s: SKIPPED' % f) # Actual implementation of the recursive descent into directories. def analyze_any(p, disallowed, msg): mode = os.stat(p).st_mode if S_ISDIR(mode): analyze_dir(p, disallowed, msg) elif S_ISREG(mode): analyze_file(p, disallowed, msg) else: eprint('%s: UNREADABLE' % p) # Recursively analyze files in the directory. def analyze_dir(d, disallowed, msg): for f in os.listdir(d): analyze_any(os.path.join(d, f), disallowed, msg) def analyze_paths(paths, disallowed, msg): for p in paths: analyze_any(p, disallowed, msg) # All control characters. We omit the ascii control characters. def nonprint_unicode(c): cat = unicodedata.category(c) if cat.startswith('C') and cat != 'Cc': return True return False if __name__ == '__main__': parser = argparse.ArgumentParser(description="Look for Unicode control characters") parser.add_argument('path', metavar='path', nargs='+', help='Sources to analyze') parser.add_argument('-p', '--nonprint', required=False, type=str, choices=['all', 'bidi'], help='Look for either all non-printable unicode characters or bidirectional control characters.') parser.add_argument('-v', '--verbose', required=False, action='store_true', help='Verbose mode.') parser.add_argument('-d', '--detailed', required=False, action='store_true', help='Print line numbers where characters occur.') parser.add_argument('-t', '--notests', required=False, action='store_true', help='Exclude tests (basically test.* as a component of path).') parser.add_argument('-c', '--config', required=False, type=str, help='Configuration file to read settings from.') args = parser.parse_args() verbose_mode = args.verbose detailed_mode = args.detailed if not args.nonprint: # Formatting control characters in the unicode space. This includes the # bidi control characters. disallowed = set(chr(c) for c in range(sys.maxunicode) if \ unicodedata.category(chr(c)) == 'Cf') msg = 'unicode control characters' elif args.nonprint == 'all': # All control characters. disallowed = set(chr(c) for c in range(sys.maxunicode) if \ nonprint_unicode(chr(c))) msg = 'disallowed characters' else: # Only bidi control characters. disallowed = set([ chr(0x202a), chr(0x202b), chr(0x202c), chr(0x202d), chr(0x202e), chr(0x2066), chr(0x2067), chr(0x2068), chr(0x2069)]) msg = 'bidirectional control characters' if args.config: spec = importlib.util.spec_from_file_location("settings", args.config) settings = importlib.util.module_from_spec(spec) spec.loader.exec_module(settings) if hasattr(settings, 'scan_exclude'): scan_exclude = scan_exclude + settings.scan_exclude if hasattr(settings, 'scan_exclude_mime'): scan_exclude_mime = scan_exclude_mime + settings.scan_exclude_mime if args.notests: scan_exclude = scan_exclude + [r'/test[^/]+/'] analyze_paths(args.path, disallowed, msg)
35.882353
109
0.634426
0
0
0
0
0
0
0
0
2,292
0.34158
53ebe27af2c0c28dac914d098023620cb50fc322
1,529
py
Python
igibson/object_states/aabb.py
mamadbiabon/iGibson
d416a470240eb7ad86e04fee475ae4bd67263a7c
[ "MIT" ]
360
2020-04-02T11:12:09.000Z
2022-03-24T21:46:58.000Z
igibson/object_states/aabb.py
mamadbiabon/iGibson
d416a470240eb7ad86e04fee475ae4bd67263a7c
[ "MIT" ]
169
2020-04-07T21:01:05.000Z
2022-03-31T10:07:39.000Z
igibson/object_states/aabb.py
mamadbiabon/iGibson
d416a470240eb7ad86e04fee475ae4bd67263a7c
[ "MIT" ]
94
2020-04-09T23:22:17.000Z
2022-03-17T21:49:03.000Z
import numpy as np from igibson.external.pybullet_tools.utils import aabb_union, get_aabb, get_all_links from igibson.object_states.object_state_base import CachingEnabledObjectState class AABB(CachingEnabledObjectState): def _compute_value(self): body_id = self.obj.get_body_id() all_links = get_all_links(body_id) aabbs = [get_aabb(body_id, link=link) for link in all_links] aabb_low, aabb_hi = aabb_union(aabbs) if not hasattr(self.obj, "category") or self.obj.category != "floors" or self.obj.room_floor is None: return np.array(aabb_low), np.array(aabb_hi) # TODO: remove after split floors # room_floor will be set to the correct RoomFloor beforehand room_instance = self.obj.room_floor.room_instance # Get the x-y values from the room segmentation map room_aabb_low, room_aabb_hi = self.obj.room_floor.scene.get_aabb_by_room_instance(room_instance) if room_aabb_low is None: return np.array(aabb_low), np.array(aabb_hi) # Use the z values from pybullet room_aabb_low[2] = aabb_low[2] room_aabb_hi[2] = aabb_hi[2] return np.array(room_aabb_low), np.array(room_aabb_hi) def _set_value(self, new_value): raise NotImplementedError("AABB state currently does not support setting.") # Nothing needs to be done to save/load AABB since it will happen due to pose caching. def _dump(self): return None def load(self, data): return
36.404762
109
0.699804
1,342
0.877698
0
0
0
0
0
0
328
0.214519
53ed119c9b07bf3b0dd5b8ddf0cc3d573400eed1
34,187
py
Python
vsphere/tests/test_vsphere.py
fujigon/integrations-core
256b1c138fd1bf1c71db63698737e813cfda00f8
[ "BSD-3-Clause" ]
null
null
null
vsphere/tests/test_vsphere.py
fujigon/integrations-core
256b1c138fd1bf1c71db63698737e813cfda00f8
[ "BSD-3-Clause" ]
null
null
null
vsphere/tests/test_vsphere.py
fujigon/integrations-core
256b1c138fd1bf1c71db63698737e813cfda00f8
[ "BSD-3-Clause" ]
1
2019-12-23T13:35:17.000Z
2019-12-23T13:35:17.000Z
# (C) Datadog, Inc. 2010-2017 # All rights reserved # Licensed under Simplified BSD License (see LICENSE) from __future__ import unicode_literals import time from datetime import datetime import mock import pytest from mock import MagicMock from pyVmomi import vim from datadog_checks.vsphere import VSphereCheck from datadog_checks.vsphere.cache_config import CacheConfig from datadog_checks.vsphere.common import SOURCE_TYPE from datadog_checks.vsphere.errors import BadConfigError, ConnectionError from datadog_checks.vsphere.vsphere import ( REFRESH_METRICS_METADATA_INTERVAL, REFRESH_MORLIST_INTERVAL, RESOURCE_TYPE_METRICS, SHORT_ROLLUP, ) from .utils import MockedMOR, assertMOR, disable_thread_pool, get_mocked_server SERVICE_CHECK_TAGS = ["vcenter_server:vsphere_mock", "vcenter_host:None", "foo:bar"] def test__init__(instance): with pytest.raises(BadConfigError): # Must define a unique 'name' per vCenter instance VSphereCheck('vsphere', {}, {}, [{'': ''}]) init_config = { 'clean_morlist_interval': 50, 'refresh_morlist_interval': 42, 'refresh_metrics_metadata_interval': -42, 'batch_property_collector_size': -1, } check = VSphereCheck('vsphere', init_config, {}, [instance]) i_key = check._instance_key(instance) assert check.time_started > 0 assert not check.server_instances assert check.cache_config.get_interval(CacheConfig.Morlist, i_key) == 42 assert check.cache_config.get_interval(CacheConfig.Metadata, i_key) == -42 assert check.clean_morlist_interval == 50 assert len(check.event_config) == 1 assert 'vsphere_mock' in check.event_config assert not check.registry assert not check.latest_event_query assert check.batch_collector_size == 0 assert check.batch_morlist_size == 50 assert check.excluded_host_tags == [] def test_excluded_host_tags(vsphere, instance, aggregator): # Check default value and precedence of instance config over init config check = VSphereCheck('vsphere', {}, {}, [instance]) assert check.excluded_host_tags == [] check = VSphereCheck('vsphere', {"excluded_host_tags": ["vsphere_host"]}, {}, [instance]) assert check.excluded_host_tags == ["vsphere_host"] instance["excluded_host_tags"] = [] check = VSphereCheck('vsphere', {"excluded_host_tags": ["vsphere_host"]}, {}, [instance]) assert check.excluded_host_tags == [] # Test host tags are excluded from external host metadata, but still stored in the cache for metrics vsphere.excluded_host_tags = ["vsphere_host"] mocked_vm = MockedMOR(spec="VirtualMachine") mocked_host = MockedMOR(spec="HostSystem") mocked_mors_attrs = { mocked_vm: { "name": "mocked_vm", "parent": mocked_host, "runtime.powerState": vim.VirtualMachinePowerState.poweredOn, }, mocked_host: {"name": "mocked_host", "parent": None}, } with mock.patch("datadog_checks.vsphere.VSphereCheck._collect_mors_and_attributes", return_value=mocked_mors_attrs): server_instance = vsphere._get_server_instance(instance) result = MagicMock() result.value = [23.4] server_instance.content.perfManager.QueryPerf.return_value = [MagicMock(value=[result], entity=mocked_vm)] vsphere.metadata_cache = MagicMock() vsphere.metadata_cache.get_metadata.return_value = {"name": "mymetric", "unit": "kb"} vsphere.in_compatibility_mode = MagicMock() vsphere.in_compatibility_mode.return_value = False vsphere.check(instance) ext_host_tags = vsphere.get_external_host_tags() # vsphere_host tag not in external metadata for host, source_tags in ext_host_tags: if host == u"mocked_vm": tags = source_tags["vsphere"] for tag in tags: assert "vsphere_host:" not in tag break # vsphere_host tag still in cache for sending with metrics aggregator.assert_metric('vsphere.mymetric', value=23.4, hostname="mocked_vm", count=1) aggregator.assert_metric_has_tag('vsphere.mymetric', tag="vsphere_host:mocked_host", count=1) def test__is_excluded(): """ * Exclude hosts/vms not compliant with the user's `*_include` configuration. * Exclude "non-labeled" virtual machines when the user configuration instructs to. """ # Sample(s) include_regexes = {'host_include': "f[o]+", 'vm_include': "f[o]+"} # OK included_host = MockedMOR(spec="HostSystem", name="foo") included_vm = MockedMOR(spec="VirtualMachine", name="foo") assert not VSphereCheck._is_excluded(included_host, {"name": included_host.name}, include_regexes, None) assert not VSphereCheck._is_excluded(included_vm, {"name": included_vm.name}, include_regexes, None) # Not OK! excluded_host = MockedMOR(spec="HostSystem", name="bar") excluded_vm = MockedMOR(spec="VirtualMachine", name="bar") assert VSphereCheck._is_excluded(excluded_host, {"name": excluded_host.name}, include_regexes, None) assert VSphereCheck._is_excluded(excluded_vm, {"name": excluded_vm.name}, include_regexes, None) # Sample(s) include_regexes = None include_only_marked = True # OK included_vm = MockedMOR(spec="VirtualMachine", name="foo", label=True) assert not VSphereCheck._is_excluded( included_vm, {"customValue": included_vm.customValue}, include_regexes, include_only_marked ) # Not OK included_vm = MockedMOR(spec="VirtualMachine", name="foo") assert VSphereCheck._is_excluded(included_vm, {"customValue": []}, include_regexes, include_only_marked) def test_vms_in_filtered_host_are_filtered(vsphere, instance): """Test that all vms belonging to a filtered host are also filtered""" server_instance = vsphere._get_server_instance(instance) filtered_host = MockedMOR(spec="HostSystem") filtered_vm = MockedMOR(spec="VirtualMachine") non_filtered_host = MockedMOR(spec="HostSystem") non_filtered_vm = MockedMOR(spec="VirtualMachine") mocked_mors_attrs = { filtered_host: {"name": "filtered_host_number_1", "parent": None}, filtered_vm: { "name": "this_vm_is_filtered", "runtime.powerState": vim.VirtualMachinePowerState.poweredOn, "runtime.host": filtered_host, }, non_filtered_host: {"name": "non_filtered_host_number_1", "parent": None}, non_filtered_vm: { "name": "this_vm_is_not_filtered", "runtime.powerState": vim.VirtualMachinePowerState.poweredOn, "runtime.host": non_filtered_host, }, } regex = {'host_include': '^(?!filtered_.+)'} with mock.patch("datadog_checks.vsphere.VSphereCheck._collect_mors_and_attributes", return_value=mocked_mors_attrs): obj_list = vsphere._get_all_objs(server_instance, regex, False, []) assert len(obj_list[vim.VirtualMachine]) == 1 assert len(obj_list[vim.HostSystem]) == 1 assert { "mor_type": "vm", "mor": non_filtered_vm, "hostname": "this_vm_is_not_filtered", "tags": ["vsphere_host:non_filtered_host_number_1", "vsphere_type:vm"], } == obj_list[vim.VirtualMachine][0] assert { "mor_type": "host", "mor": non_filtered_host, "hostname": "non_filtered_host_number_1", "tags": ["vsphere_type:host"], } == obj_list[vim.HostSystem][0] def test__get_all_objs(vsphere, instance): """ Test that we don't raise KeyError if the property collector failed to collect some attributes and that we handle the case were there are missing attributes """ server_instance = vsphere._get_server_instance(instance) vm_no_parent = MockedMOR(spec="VirtualMachine") vm_no_powerstate = MockedMOR(spec="VirtualMachine") vm_host_parent = MockedMOR(spec="VirtualMachine") mocked_host = MockedMOR(spec="HostSystem") mocked_datastore = MockedMOR(spec="Datastore") mocked_datacenter = MockedMOR(spec="Datacenter") mocked_cluster = MockedMOR(spec="ClusterComputeResource") mocked_mors_attrs = { vm_no_parent: {"name": "vm_no_parent", "runtime.powerState": vim.VirtualMachinePowerState.poweredOn}, vm_no_powerstate: {"name": "vm_no_powerstate"}, vm_host_parent: {"parent": mocked_host, "runtime.powerState": vim.VirtualMachinePowerState.poweredOn}, mocked_host: {"name": "mocked_host", "parent": None}, mocked_datastore: {}, mocked_cluster: {"name": "cluster"}, mocked_datacenter: {"parent": MockedMOR(spec="Folder", name="unknown folder"), "name": "datacenter"}, } with mock.patch("datadog_checks.vsphere.VSphereCheck._collect_mors_and_attributes", return_value=mocked_mors_attrs): obj_list = vsphere._get_all_objs(server_instance, None, False, []) assert len(obj_list[vim.VirtualMachine]) == 2 assert { "mor_type": "vm", "mor": vm_no_parent, "hostname": "vm_no_parent", "tags": ["vsphere_host:unknown", "vsphere_type:vm"], } in obj_list[vim.VirtualMachine] assert { "mor_type": "vm", "mor": vm_host_parent, "hostname": "unknown", "tags": ["vsphere_host:mocked_host", "vsphere_host:unknown", "vsphere_type:vm"], } in obj_list[vim.VirtualMachine] assert len(obj_list[vim.HostSystem]) == 1 assert { "mor_type": "host", "mor": mocked_host, "hostname": "mocked_host", "tags": ["vsphere_type:host"], } in obj_list[vim.HostSystem] assert len(obj_list[vim.Datastore]) == 1 assert { "mor_type": "datastore", "mor": mocked_datastore, "hostname": None, "tags": ["vsphere_datastore:unknown", "vsphere_type:datastore"], } in obj_list[vim.Datastore] assert len(obj_list[vim.Datacenter]) == 1 assert { "mor_type": "datacenter", "mor": mocked_datacenter, "hostname": None, "tags": ["vsphere_folder:unknown", "vsphere_datacenter:datacenter", "vsphere_type:datacenter"], } in obj_list[vim.Datacenter] assert len(obj_list[vim.ClusterComputeResource]) == 1 assert { "mor_type": "cluster", "mor": mocked_cluster, "hostname": None, "tags": ["vsphere_cluster:cluster", "vsphere_type:cluster"], } in obj_list[vim.ClusterComputeResource] def test__collect_mors_and_attributes(vsphere, instance): """ Test that we check for errors when collecting properties with property collector """ server_instance = vsphere._get_server_instance(instance) with mock.patch("datadog_checks.vsphere.vsphere.vmodl"): obj = MagicMock(missingSet=None, obj="obj") result = MagicMock(token=None, objects=[obj]) server_instance.content.propertyCollector.RetrievePropertiesEx.return_value = result log = MagicMock() vsphere.log = log mor_attrs = vsphere._collect_mors_and_attributes(server_instance) log.error.assert_not_called() assert len(mor_attrs) == 1 obj.missingSet = [MagicMock(path="prop", fault="fault")] mor_attrs = vsphere._collect_mors_and_attributes(server_instance) log.error.assert_called_once_with('Unable to retrieve property %s for object %s: %s', 'prop', 'obj', 'fault') assert len(mor_attrs) == 1 def test__cache_morlist_raw(vsphere, instance): """ Explore the vCenter infrastructure to discover hosts, virtual machines. Input topology: ``` rootFolder - datacenter1 - compute_resource1 - host1 # Filtered out - host2 - folder1 - datacenter2 - compute_resource2 - host3 - vm1 # Not labeled - vm2 # Filtered out - vm3 # Powered off - vm4 ``` """ # Samples with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): instance["host_include_only_regex"] = "host[2-9]" instance["vm_include_only_regex"] = "vm[^2]" instance["include_only_marked"] = True # Discover hosts and virtual machines vsphere._cache_morlist_raw(instance) # Assertions: 1 labeled+monitored VM + 2 hosts + 2 datacenters + 2 clusters + 1 datastore. assertMOR(vsphere, instance, count=8) # ...on hosts assertMOR(vsphere, instance, spec="host", count=2) tags = [ "vcenter_server:vsphere_mock", "vsphere_folder:rootFolder", "vsphere_datacenter:datacenter1", "vsphere_compute:compute_resource1", "vsphere_cluster:compute_resource1", "vsphere_type:host", ] assertMOR(vsphere, instance, name="host2", spec="host", tags=tags) tags = [ "vcenter_server:vsphere_mock", "vsphere_folder:rootFolder", "vsphere_folder:folder1", "vsphere_datacenter:datacenter2", "vsphere_compute:compute_resource2", "vsphere_cluster:compute_resource2", "vsphere_type:host", ] assertMOR(vsphere, instance, name="host3", spec="host", tags=tags) # ...on VMs assertMOR(vsphere, instance, spec="vm", count=1) tags = [ "vcenter_server:vsphere_mock", "vsphere_folder:folder1", "vsphere_datacenter:datacenter2", "vsphere_compute:compute_resource2", "vsphere_cluster:compute_resource2", "vsphere_host:host3", "vsphere_type:vm", ] assertMOR(vsphere, instance, name="vm4", spec="vm", subset=True, tags=tags) def test_use_guest_hostname(vsphere, instance): # Default value with mock.patch("datadog_checks.vsphere.VSphereCheck._get_all_objs") as mock_get_all_objs, mock.patch( "datadog_checks.vsphere.vsphere.vmodl" ): vsphere._cache_morlist_raw(instance) # Default value assert not mock_get_all_objs.call_args[1]["use_guest_hostname"] # use guest hostname instance["use_guest_hostname"] = True vsphere._cache_morlist_raw(instance) assert mock_get_all_objs.call_args[1]["use_guest_hostname"] with mock.patch("datadog_checks.vsphere.vsphere.vmodl"): # Discover hosts and virtual machines instance["use_guest_hostname"] = True vsphere._cache_morlist_raw(instance) assertMOR(vsphere, instance, spec="vm", count=3) # Fallback on VM name when guest hostname not available assertMOR(vsphere, instance, name="vm1", spec="vm", subset=True) assertMOR(vsphere, instance, name="vm2_guest", spec="vm", subset=True) assertMOR(vsphere, instance, name="vm4_guest", spec="vm", subset=True) def test__process_mor_objects_queue(vsphere, instance): vsphere.log = MagicMock() vsphere._process_mor_objects_queue_async = MagicMock() vsphere._process_mor_objects_queue(instance) # Queue hasn't been initialized vsphere.log.debug.assert_called_once_with( "Objects queue is not initialized yet for instance %s, skipping processing", vsphere._instance_key(instance) ) vsphere.batch_morlist_size = 1 i_key = vsphere._instance_key(instance) with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): vsphere._cache_morlist_raw(instance) assert sum(vsphere.mor_objects_queue.size(i_key, res_type) for res_type in RESOURCE_TYPE_METRICS) == 11 vsphere._process_mor_objects_queue(instance) # Object queue should be empty after processing assert sum(vsphere.mor_objects_queue.size(i_key, res_type) for res_type in RESOURCE_TYPE_METRICS) == 0 assert vsphere._process_mor_objects_queue_async.call_count == 0 # realtime only for call_args in vsphere._process_mor_objects_queue_async.call_args_list: # query_specs parameter should be a list of size 1 since the batch size is 1 assert len(call_args[0][1]) == 1 instance["collect_realtime_only"] = False vsphere._cache_morlist_raw(instance) assert sum(vsphere.mor_objects_queue.size(i_key, res_type) for res_type in RESOURCE_TYPE_METRICS) == 11 vsphere._process_mor_objects_queue(instance) # Object queue should be empty after processing assert sum(vsphere.mor_objects_queue.size(i_key, res_type) for res_type in RESOURCE_TYPE_METRICS) == 0 assert vsphere._process_mor_objects_queue_async.call_count == 5 # 2 datacenters, 2 clusters, 1 datastore def test_collect_realtime_only(vsphere, instance): """ Test the collect_realtime_only parameter acts as expected """ vsphere._process_mor_objects_queue_async = MagicMock() instance["collect_realtime_only"] = False with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): vsphere._cache_morlist_raw(instance) vsphere._process_mor_objects_queue(instance) # Called once to process the 2 datacenters, then 2 clusters, then the datastore assert vsphere._process_mor_objects_queue_async.call_count == 3 instance["collect_realtime_only"] = True vsphere._process_mor_objects_queue_async.reset_mock() with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): vsphere._cache_morlist_raw(instance) vsphere._process_mor_objects_queue(instance) assert vsphere._process_mor_objects_queue_async.call_count == 0 def test__cache_metrics_metadata(vsphere, instance): vsphere.metadata_cache = MagicMock() vsphere._cache_metrics_metadata(instance) vsphere.metadata_cache.init_instance.assert_called_once_with(vsphere._instance_key(instance)) vsphere.metadata_cache.set_metadata.assert_called_once() vsphere.metadata_cache.set_metric_ids.assert_called_once() def test__cache_metrics_metadata_compatibility(vsphere, instance): server_instance = vsphere._get_server_instance(instance) i_key = vsphere._instance_key(instance) counter = MagicMock() counter.rollupType = "average" counter.key = 1 vsphere.format_metric_name = MagicMock() # New way instance["collection_level"] = 3 server_instance.content.perfManager.QueryPerfCounterByLevel.return_value = [counter] vsphere._cache_metrics_metadata(instance) server_instance.content.perfManager.QueryPerfCounterByLevel.assert_called_once_with(3) assert len(vsphere.metadata_cache._metric_ids[i_key]) == 1 assert len(vsphere.metadata_cache._metadata[i_key]) == 1 vsphere.format_metric_name.assert_called_once_with(counter) # Compatibility mode instance["all_metrics"] = False del instance["collection_level"] vsphere.format_metric_name.reset_mock() server_instance.content.perfManager.perfCounter = [counter] vsphere._cache_metrics_metadata(instance) assert not vsphere.metadata_cache._metric_ids[i_key] assert len(vsphere.metadata_cache._metadata[i_key]) == 1 vsphere.format_metric_name.assert_called_once_with(counter, compatibility=True) def test_in_compatibility_mode(vsphere, instance): vsphere.log = MagicMock() instance["collection_level"] = 2 assert not vsphere.in_compatibility_mode(instance) instance["all_metrics"] = True assert not vsphere.in_compatibility_mode(instance) vsphere.log.warning.assert_not_called() assert not vsphere.in_compatibility_mode(instance, log_warning=True) vsphere.log.warning.assert_called_once() del instance["collection_level"] vsphere.log.reset_mock() assert vsphere.in_compatibility_mode(instance) vsphere.log.warning.assert_not_called() assert vsphere.in_compatibility_mode(instance, log_warning=True) vsphere.log.warning.assert_called_once() def test_format_metric_name(vsphere): counter = MagicMock() counter.groupInfo.key = "group" counter.nameInfo.key = "name" counter.rollupType = "rollup" assert vsphere.format_metric_name(counter, compatibility=True) == "group.name" for rollup, short_rollup in SHORT_ROLLUP.items(): counter.rollupType = rollup assert vsphere.format_metric_name(counter) == "group.name.{}".format(short_rollup) def test_collect_metrics(vsphere, instance): with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): vsphere.batch_morlist_size = 1 vsphere._collect_metrics_async = MagicMock() vsphere._cache_metrics_metadata(instance) vsphere._cache_morlist_raw(instance) vsphere._process_mor_objects_queue(instance) vsphere.collect_metrics(instance) assert vsphere._collect_metrics_async.call_count == 6 # One for each VM/host, datacenters are not collected for call_args in vsphere._collect_metrics_async.call_args_list: # query_specs parameter should be a list of size 1 since the batch size is 1 assert len(call_args[0][1]) == 1 def test__collect_metrics_async_compatibility(vsphere, instance): server_instance = vsphere._get_server_instance(instance) server_instance.content.perfManager.QueryPerf.return_value = [MagicMock(value=[MagicMock()])] vsphere.mor_cache = MagicMock() vsphere.metadata_cache = MagicMock() vsphere.metadata_cache.get_metadata.return_value = {"name": "unknown"} vsphere.in_compatibility_mode = MagicMock() vsphere.log = MagicMock() vsphere.in_compatibility_mode.return_value = True vsphere._collect_metrics_async(instance, []) vsphere.log.debug.assert_called_with('Skipping unknown `%s` metric.', 'unknown') vsphere.log.reset_mock() vsphere.in_compatibility_mode.return_value = False vsphere._collect_metrics_async(instance, []) vsphere.log.debug.assert_not_called() def test__collect_metrics_async_hostname(vsphere, instance, aggregator): server_instance = vsphere._get_server_instance(instance) result = MagicMock() result.value = [23.4] server_instance.content.perfManager.QueryPerf.return_value = [MagicMock(value=[result])] mor = {"hostname": "foo"} vsphere.mor_cache = MagicMock() vsphere.mor_cache.get_mor.return_value = mor vsphere.metadata_cache = MagicMock() vsphere.metadata_cache.get_metadata.return_value = {"name": "mymetric", "unit": "kb"} vsphere.in_compatibility_mode = MagicMock() vsphere.in_compatibility_mode.return_value = False vsphere._collect_metrics_async(instance, []) aggregator.assert_metric('vsphere.mymetric', value=23.4, hostname="foo") def test_check(vsphere, instance): """ Test the check() method """ with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): with mock.patch.object(vsphere, 'set_external_tags') as set_external_tags: vsphere.check(instance) set_external_tags.assert_called_once() all_the_tags = dict(set_external_tags.call_args[0][0]) assert all_the_tags['vm4'][SOURCE_TYPE] == [ 'vcenter_server:vsphere_mock', 'vsphere_folder:rootFolder', 'vsphere_folder:folder1', 'vsphere_datacenter:datacenter2', 'vsphere_cluster:compute_resource2', 'vsphere_compute:compute_resource2', 'vsphere_host:host3', 'vsphere_host:host3', 'vsphere_type:vm', ] assert all_the_tags['host1'][SOURCE_TYPE] == [ 'vcenter_server:vsphere_mock', 'vsphere_folder:rootFolder', 'vsphere_datacenter:datacenter1', 'vsphere_cluster:compute_resource1', 'vsphere_compute:compute_resource1', 'vsphere_type:host', ] assert all_the_tags['host3'][SOURCE_TYPE] == [ 'vcenter_server:vsphere_mock', 'vsphere_folder:rootFolder', 'vsphere_folder:folder1', 'vsphere_datacenter:datacenter2', 'vsphere_cluster:compute_resource2', 'vsphere_compute:compute_resource2', 'vsphere_type:host', ] assert all_the_tags['vm2'][SOURCE_TYPE] == [ 'vcenter_server:vsphere_mock', 'vsphere_folder:rootFolder', 'vsphere_folder:folder1', 'vsphere_datacenter:datacenter2', 'vsphere_cluster:compute_resource2', 'vsphere_compute:compute_resource2', 'vsphere_host:host3', 'vsphere_host:host3', 'vsphere_type:vm', ] assert all_the_tags['vm1'][SOURCE_TYPE] == [ 'vcenter_server:vsphere_mock', 'vsphere_folder:rootFolder', 'vsphere_folder:folder1', 'vsphere_datacenter:datacenter2', 'vsphere_cluster:compute_resource2', 'vsphere_compute:compute_resource2', 'vsphere_host:host3', 'vsphere_host:host3', 'vsphere_type:vm', ] assert all_the_tags['host2'][SOURCE_TYPE] == [ 'vcenter_server:vsphere_mock', 'vsphere_folder:rootFolder', 'vsphere_datacenter:datacenter1', 'vsphere_cluster:compute_resource1', 'vsphere_compute:compute_resource1', 'vsphere_type:host', ] def test_service_check_ko(aggregator, instance): check = disable_thread_pool(VSphereCheck('disk', {}, {}, [instance])) with mock.patch('datadog_checks.vsphere.vsphere.connect.SmartConnect') as SmartConnect: # SmartConnect fails SmartConnect.side_effect = Exception() with pytest.raises(ConnectionError): check.check(instance) aggregator.assert_service_check( VSphereCheck.SERVICE_CHECK_NAME, status=VSphereCheck.CRITICAL, count=1, tags=SERVICE_CHECK_TAGS ) aggregator.reset() # SmartConnect succeeds, CurrentTime fails server = MagicMock() server.CurrentTime.side_effect = Exception() SmartConnect.side_effect = None SmartConnect.return_value = server with pytest.raises(ConnectionError): check.check(instance) aggregator.assert_service_check( VSphereCheck.SERVICE_CHECK_NAME, status=VSphereCheck.CRITICAL, count=1, tags=SERVICE_CHECK_TAGS ) def test_service_check_ok(aggregator, instance): check = disable_thread_pool(VSphereCheck('disk', {}, {}, [instance])) with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): with mock.patch('datadog_checks.vsphere.vsphere.connect.SmartConnect') as SmartConnect: SmartConnect.return_value = get_mocked_server() check.check(instance) aggregator.assert_service_check( VSphereCheck.SERVICE_CHECK_NAME, status=VSphereCheck.OK, tags=SERVICE_CHECK_TAGS ) def test__instance_key(vsphere, instance): assert vsphere._instance_key(instance) == "vsphere_mock" del instance['name'] with pytest.raises(BadConfigError): vsphere._instance_key(instance) def test__should_cache(instance): now = time.time() # do not use fixtures for the check instance, some params are set at # __init__ time and we need to instantiate the check multiple times check = VSphereCheck('vsphere', {}, {}, [instance]) i_key = check._instance_key(instance) # first run should always cache assert check._should_cache(instance, CacheConfig.Morlist) assert check._should_cache(instance, CacheConfig.Metadata) # explicitly set cache expiration times, don't use defaults so we also test # configuration is properly propagated init_config = { 'refresh_morlist_interval': 2 * REFRESH_MORLIST_INTERVAL, 'refresh_metrics_metadata_interval': 2 * REFRESH_METRICS_METADATA_INTERVAL, } check = VSphereCheck('vsphere', init_config, {}, [instance]) # simulate previous runs, set the last execution time in the past check.cache_config.set_last(CacheConfig.Morlist, i_key, now - (2 * REFRESH_MORLIST_INTERVAL)) check.cache_config.set_last(CacheConfig.Metadata, i_key, now - (2 * REFRESH_METRICS_METADATA_INTERVAL)) with mock.patch("time.time", return_value=now): assert not check._should_cache(instance, CacheConfig.Morlist) assert not check._should_cache(instance, CacheConfig.Metadata) def alarm_event(from_status='green', to_status='red', message='Some error'): now = datetime.utcnow() vm = MockedMOR(spec='VirtualMachine') dc = MockedMOR(spec="Datacenter") dc_arg = vim.event.DatacenterEventArgument(datacenter=dc, name='dc1') alarm = MockedMOR(spec="Alarm") alarm_arg = vim.event.AlarmEventArgument(alarm=alarm, name='alarm1') entity = vim.event.ManagedEntityEventArgument(entity=vm, name='vm1') event = vim.event.AlarmStatusChangedEvent( entity=entity, fullFormattedMessage=message, createdTime=now, to=to_status, datacenter=dc_arg, alarm=alarm_arg ) setattr(event, 'from', from_status) # noqa: B009 return event def migrated_event(): now = datetime.utcnow() vm = MockedMOR(spec='VirtualMachine', name='vm1') vm_arg = vim.event.VmEventArgument(vm=vm) host = MockedMOR(spec='HostSystem') host_arg = vim.event.HostEventArgument(host=host, name='host1') host_dest = MockedMOR(spec='HostSystem') host_dest_arg = vim.event.HostEventArgument(host=host_dest, name='host2') dc = MockedMOR(spec='Datacenter') dc_arg = vim.event.DatacenterEventArgument(datacenter=dc, name='dc1') dc_dest = MockedMOR(spec='Datacenter') dc_dest_arg = vim.event.DatacenterEventArgument(datacenter=dc_dest, name='dc2') ds = MockedMOR(spec='Datastore') ds_arg = vim.event.DatastoreEventArgument(datastore=ds, name='ds1') ds_dest = MockedMOR(spec='Datastore') ds_dest_arg = vim.event.DatastoreEventArgument(datastore=ds_dest, name='ds2') event = vim.event.VmBeingHotMigratedEvent( vm=vm_arg, userName='John', fullFormattedMessage='Some error', createdTime=now, host=host_arg, destHost=host_dest_arg, datacenter=dc_arg, destDatacenter=dc_dest_arg, ds=ds_arg, destDatastore=ds_dest_arg, ) return event def test_events(aggregator, vsphere, instance): with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): server_instance = vsphere._get_server_instance(instance) server_instance.content.eventManager.QueryEvents.return_value = [alarm_event()] vsphere.event_config['vsphere_mock'] = {'collect_vcenter_alarms': True} vsphere.check(instance) aggregator.assert_event( "vCenter monitor status changed on this alarm, it was green and it's now red.", tags=['foo:bar'] ) def test_events_tags(aggregator, vsphere, instance): with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): server_instance = vsphere._get_server_instance(instance) server_instance.content.eventManager.QueryEvents.return_value = [migrated_event()] vsphere.event_config['vsphere_mock'] = {'collect_vcenter_alarms': True} vsphere.check(instance) aggregator.assert_event( "John has launched a hot migration of this virtual machine", exact_match=False, tags=[ 'foo:bar', 'vsphere_host:host1', 'vsphere_host:host2', 'vsphere_datacenter:dc1', 'vsphere_datacenter:dc2', ], ) server_instance = vsphere._get_server_instance(instance) server_instance.content.eventManager.QueryEvents.return_value = [alarm_event()] vsphere.check(instance) aggregator.assert_event( "vCenter monitor status changed on this alarm, it was green and it's now red.", tags=['foo:bar'] ) def test_events_gray_handled(aggregator, vsphere, instance): with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): server_instance = vsphere._get_server_instance(instance) event = alarm_event(from_status='gray', message='Went from Gray to Red') server_instance.content.eventManager.QueryEvents.return_value = [event] vsphere.event_config['vsphere_mock'] = {'collect_vcenter_alarms': True} vsphere.check(instance) aggregator.assert_event( "vCenter monitor status changed on this alarm, it was gray and it's now red.", tags=['foo:bar'] ) event = alarm_event(from_status='yellow', to_status='gray', message='Went from Yellow to Gray') server_instance.content.eventManager.QueryEvents.return_value = [event] vsphere.check(instance) aggregator.assert_event( "vCenter monitor status changed on this alarm, it was yellow and it's now gray.", tags=['foo:bar'], alert_type='info', ) def test_events_gray_ignored(aggregator, vsphere, instance): with mock.patch('datadog_checks.vsphere.vsphere.vmodl'): server_instance = vsphere._get_server_instance(instance) event = alarm_event(from_status='gray', to_status='green', message='Went from Gray to Green') server_instance.content.eventManager.QueryEvents.return_value = [event] vsphere.event_config['vsphere_mock'] = {'collect_vcenter_alarms': True} vsphere.check(instance) assert not aggregator.events event = alarm_event(from_status='green', to_status='gray', message='Went from Green to Gray') server_instance.content.eventManager.QueryEvents.return_value = [event] vsphere.check(instance) assert not aggregator.events
42.363073
120
0.678796
0
0
0
0
0
0
0
0
9,802
0.286717
53f022c5295afcf5069c62aac2f57d65cf97e719
2,147
py
Python
data_steward/constants/validation/email_notification.py
jp3477/curation
41f98d57c8273d9963ad6d466a237c99b63c74be
[ "MIT" ]
1
2021-04-05T18:06:25.000Z
2021-04-05T18:06:25.000Z
data_steward/constants/validation/email_notification.py
jp3477/curation
41f98d57c8273d9963ad6d466a237c99b63c74be
[ "MIT" ]
null
null
null
data_steward/constants/validation/email_notification.py
jp3477/curation
41f98d57c8273d9963ad6d466a237c99b63c74be
[ "MIT" ]
null
null
null
MANDRILL_API_KEY = 'MANDRILL_API_KEY' UNSET_MANDRILL_API_KEY_MSG = f"Mandrill API key not set in environment variable {MANDRILL_API_KEY}" CONTACT_LIST_QUERY = """ SELECT * FROM `{{project}}.{{dataset}}.{{contact_table}}` """ EHR_OPERATIONS = 'EHR Ops' EHR_OPS_ZENDESK = '[email protected]' DATA_CURATION_LISTSERV = '[email protected]' NO_REPLY_ADDRESS = '[email protected]' NO_DATA_STEWARD = 'no data steward' # HPO contact list table columns SITE_NAME = 'site_name' HPO_ID = 'hpo_id' SITE_POINT_OF_CONTACT = 'site_point_of_contact' # Mandrill API constants MAIL_TO = 'mail_to' EHR_OPS_SITE_URL = 'https://sites.google.com/view/ehrupload' # Email content EMAIL_BODY = """ <p style="font-size:115%;">Hi {{ site_name }},</p> <p style="font-size:115%;">Your submission <b>{{ folder }}</b> {% if submission_error %}was NOT successfully loaded on {{ timestamp }}.<br> {% else %}was successfully loaded on {{ timestamp }}.<br> {% endif %} Please review the <code>results.html</code> submission report attached to this email{% if submission_error %}<br> and resolve the errors before making a new submission{% endif %}.<br> If any of your files have not been successfully uploaded, please run the <a href="https://github.com/all-of-us/aou-ehr-file-check">local file check</a> before making your submission.<br> To view the full set of curation reports, please visit the submission folder in your GCS bucket <a href="{{ submission_folder_url }}">here</a>.<br> For more information on the reports and how to download them, please refer to our <a href="{{ ehr_ops_site_url }}">EHR Ops website</a>.</p> <p style="font-size:115%;">You are receiving this email because you are listed as a point of contact for HPO Site <em>{{ site_name }}</em>.<br> If you have additional questions or wish to no longer receive these emails, please reply/send an email to <a href="mailto:{{ eo_zendesk }}">{{ eo_zendesk }}</a>.</p> <p style="font-size:115%;">EHR Ops team, DRC<br> <em>All of Us</em> Research Program<br> <img src="cid:{{ aou_logo }}"/></p> """ AOU_LOGO = 'aou_logo' AOU_LOGO_PNG = 'all-of-us-logo.png'
39.036364
116
0.726129
0
0
0
0
0
0
0
0
1,841
0.857476
53f15f1ad7b41be043cf58489197157314abeded
2,110
py
Python
clip/clip.py
keshav11/clip
f426dee5c3a6885ddeba20d450d85fc71951c5ca
[ "MIT" ]
1
2018-03-27T05:13:43.000Z
2018-03-27T05:13:43.000Z
clip/clip.py
keshav11/clip
f426dee5c3a6885ddeba20d450d85fc71951c5ca
[ "MIT" ]
1
2018-03-27T14:57:05.000Z
2018-03-27T14:57:05.000Z
clip/clip.py
keshav11/clip
f426dee5c3a6885ddeba20d450d85fc71951c5ca
[ "MIT" ]
null
null
null
import os import argparse from pathlib import Path CLIP_FILE = os.path.join(Path.home(), '.clip') TEMP_FILE = '.TEMP_FILE' def add_text(key, text): if os.path.exists(CLIP_FILE): open_mode = 'a' else: open_mode = 'w+' with open(CLIP_FILE, open_mode) as clip_file: clip_file.write(key + ": " + text + "\n") def list_texts(): with open(CLIP_FILE, 'r') as clip_file: for text in clip_file.read().split('\n'): print(text) def get_text(key): with open(CLIP_FILE, 'r') as clip_file: for text in clip_file.read().split('\n'): key_val = text.split(':') if key_val[0].strip() == key: print(key_val[1].strip(), end='') def delete_text(key): exists = False with open(TEMP_FILE, 'w+') as temp_file: with open(CLIP_FILE, 'r') as clip_file: for text in clip_file.read().split('\n'): if text.strip() == "": continue key_val = text.split(':') if key_val[0].strip() != key: temp_file.write(text+"\n") else: exists = True if not exists: print("key:", key, "was not found in the clip store") try: os.rename(TEMP_FILE, CLIP_FILE) except Exception as ex: os.remove(TEMP_FILE) print('remove text failed.', ex) def main(): parser = argparse.ArgumentParser(description='clips and saves texts from the command line') parser.add_argument('-a', '--add', nargs=2) parser.add_argument('-g', '--get', nargs=1) parser.add_argument('-d', '--delete', nargs=1) parser.add_argument('-l', '--list', action='store_true') args = parser.parse_args() if args.add: key, value = args.add[0], args.add[1] add_text(key, value) elif args.list: list_texts() elif args.get: key = args.get[0] get_text(key) elif args.delete: key = args.delete[0] delete_text(key) else: parser.print_usage() if __name__ == '__main__': main()
26.708861
95
0.555924
0
0
0
0
0
0
0
0
248
0.117536
53f16f379316b618805c2343722f2905bbfec891
2,383
py
Python
tests/unit/test_nsga2.py
learsi1911/GAMA_pygmo_v4
459807db352dd1c9f9c1e0e322f8c1e9b5abbca0
[ "Apache-2.0" ]
49
2018-10-22T06:05:29.000Z
2021-09-07T20:12:36.000Z
tests/unit/test_nsga2.py
learsi1911/GAMA_pygmo_v4
459807db352dd1c9f9c1e0e322f8c1e9b5abbca0
[ "Apache-2.0" ]
102
2018-10-02T12:00:47.000Z
2021-02-24T14:35:30.000Z
tests/unit/test_nsga2.py
learsi1911/GAMA_pygmo_v4
459807db352dd1c9f9c1e0e322f8c1e9b5abbca0
[ "Apache-2.0" ]
11
2021-06-04T11:56:19.000Z
2022-03-21T20:21:15.000Z
from typing import List, Tuple from gama.genetic_programming.nsga2 import ( NSGAMeta, fast_non_dominated_sort, crowding_distance_assignment, ) def _tuples_to_NSGAMeta(tuples: List[Tuple]) -> List[NSGAMeta]: """ Converts a list of tuples to NSGAMeta objects. """ # Can't declare it directly in a loop as it does not create a new scope. def fetch_value(i): return lambda x: x[i] metrics = [fetch_value(i) for i in range(len(tuples[0]))] return [NSGAMeta(t, metrics) for t in tuples] def test_nsgameta_value_assignment(): pareto = _tuples_to_NSGAMeta([(3, 5), (5, 3), (4, 4)]) three_five, five_three, four_four = pareto assert three_five.values == (3, 5) assert five_three.values == (5, 3) assert four_four.values == (4, 4) def test_dominates(): pareto = _tuples_to_NSGAMeta([(3, 5), (5, 3), (2, 4)]) three_five, five_three, two_four = pareto assert not three_five.dominates(five_three) assert not five_three.dominates(three_five) assert three_five.dominates(two_four) assert not two_four.dominates(three_five) assert not five_three.dominates(two_four) assert not two_four.dominates(five_three) def test_crowding_distance_assignment(): pareto = _tuples_to_NSGAMeta([(3, 5), (5, 3), (4, 4)]) three_five, five_three, four_four = pareto crowding_distance_assignment(pareto) assert three_five.distance == float("inf") assert five_three.distance == float("inf") assert four_four.distance == 2 def test_crowding_distance_assignment_inf(): pareto = _tuples_to_NSGAMeta([(3, float("inf")), (5, 3), (4, 4)]) three_inf, five_three, four_four = pareto crowding_distance_assignment(pareto) assert three_inf.distance == float("inf") assert five_three.distance == float("inf") # In our implementation, we ignore 'axis' that contain inf values. assert four_four.distance == 1 def test_crowd_compare(): pareto = _tuples_to_NSGAMeta([(3, 5), (5, 3), (4, 4), (4.01, 3.99), (4.5, 3.5)]) three_five, five_three, four_four, approx_four_four, half_half = pareto fast_non_dominated_sort(pareto) # assigns rank crowding_distance_assignment(pareto) # assigns distance assert all([three_five.crowd_compare(other) == -1 for other in pareto[2:]]) assert all([five_three.crowd_compare(other) == -1 for other in pareto[2:]])
33.56338
84
0.698699
0
0
0
0
0
0
0
0
250
0.10491
53f1e3a9ae5af85a04a5bf0c18896233f3416fe3
2,738
py
Python
stac_ingest/utils/tds.py
crim-ca/stac-ingest
e4cc2a66fee4b86ec238f139135d78215ec91ea4
[ "Apache-2.0" ]
null
null
null
stac_ingest/utils/tds.py
crim-ca/stac-ingest
e4cc2a66fee4b86ec238f139135d78215ec91ea4
[ "Apache-2.0" ]
null
null
null
stac_ingest/utils/tds.py
crim-ca/stac-ingest
e4cc2a66fee4b86ec238f139135d78215ec91ea4
[ "Apache-2.0" ]
null
null
null
# File taken from https://github.com/Ouranosinc/pavics-vdb/blob/master/catalog/tds.py """Utility function to parse metadata from a THREDDS Data Server catalog.""" def walk(cat, depth=1): """Return a generator walking a THREDDS data catalog for datasets. Parameters ---------- cat : TDSCatalog THREDDS catalog. depth : int Maximum recursive depth. Setting 0 will return only datasets within the top-level catalog. If None, depth is set to 1000. """ yield from cat.datasets.items() if depth is None: depth = 1000 if depth > 0: for name, ref in cat.catalog_refs.items(): child = ref.follow() yield from walk(child, depth=depth-1) def attrs_from_ds(ds): """Extract attributes from TDS Dataset.""" url = ds.access_urls["NCML"] attrs = attrs_from_ncml(url) attrs["__services__"] = ds.access_urls return attrs def attrs_from_ncml(url): """Extract attributes from NcML file. Parameters ---------- url : str Link to NcML service of THREDDS server for a dataset. Returns ------- dict Global attribute values keyed by facet names, with variable attributes in `__variable__` nested dict, and additional specialized attributes in `__group__` nested dict. """ import lxml.etree import requests parser = lxml.etree.XMLParser(encoding='UTF-8') ns = {"ncml": "http://www.unidata.ucar.edu/namespaces/netcdf/ncml-2.2"} # Parse XML content - UTF-8 encoded documents need to be read as bytes xml = requests.get(url).content doc = lxml.etree.fromstring(xml, parser=parser) nc = doc.xpath("/ncml:netcdf", namespaces=ns)[0] # Extract global attributes out = _attrib_to_dict(nc.xpath("ncml:attribute", namespaces=ns)) # Extract group attributes gr = {} for group in nc.xpath("ncml:group", namespaces=ns): gr[group.attrib["name"]] = _attrib_to_dict(group.xpath("ncml:attribute", namespaces=ns)) # Extract variable attributes va = {} for variable in nc.xpath("ncml:variable", namespaces=ns): if '_CoordinateAxisType' in variable.xpath("ncml:attribute/@name", namespaces=ns): continue va[variable.attrib["name"]] = _attrib_to_dict(variable.xpath("ncml:attribute", namespaces=ns)) out["__group__"] = gr out["__variable__"] = va return out def _attrib_to_dict(elems): """Convert element attributes to dictionary. Ignore attributes with names starting with _ """ hidden_prefix = "_" out = {} for e in elems: a = e.attrib if a["name"].startswith(hidden_prefix): continue out[a["name"]] = a["value"] return out
29.44086
111
0.648283
0
0
559
0.204164
0
0
0
0
1,400
0.511322
53f27d7f999c3ddce62ec7074bca13f18a96eb7b
4,484
py
Python
tact/util.py
brunel-physics/mva_scikit
b0182da89efa466461aaf2cff4387c821df1758b
[ "BSD-3-Clause" ]
null
null
null
tact/util.py
brunel-physics/mva_scikit
b0182da89efa466461aaf2cff4387c821df1758b
[ "BSD-3-Clause" ]
null
null
null
tact/util.py
brunel-physics/mva_scikit
b0182da89efa466461aaf2cff4387c821df1758b
[ "BSD-3-Clause" ]
2
2020-05-18T19:52:32.000Z
2022-01-24T10:07:35.000Z
# -*- coding: utf-8 -*- """ Module containing miscellaneous utility functions. """ from __future__ import (absolute_import, division, print_function, unicode_literals) import collections import itertools import numpy as np class BinaryTree(object): def __init__(self): self.left = None self.right = None self.val = None def deep_update(d1, d2): """ Adds key-value pairs in d2 to d1. Conflicts are resolved in favour of d2. Recurses into all values in d2 which belong to the collections.Mapping abstract base class. Parameters ---------- d1 : collections.Mapping Base dictionary d2 : collections.Mapping Dictionary with updated values Returns ------- d1 : collections.Mapping Updated dictionary """ for k, v in d2.iteritems(): if isinstance(v, collections.Mapping): d1[k] = deep_update(d1.get(k, {}), v) else: d1[k] = v return d1 def nodes(tree): """ Return a list of values at every node of a tree. Parameters ---------- tree : BinaryTree BinaryTree to extract nodes from. Returns ------- nodelist : list List of values at tree nodes. """ nodelist = [] def _get_nodes(tree): """ Build up a list of nodes. Parameters ---------- tree : BinaryTree BinaryTree to extract nodes from. Returns ------- None """ nodelist.append(tree.val) try: _get_nodes(tree.left) except AttributeError: nodelist.append(tree.left) try: _get_nodes(tree.right) except AttributeError: nodelist.append(tree.right) _get_nodes(tree) return nodelist def maenumerate(marr): """ Multidimensional index iterator for masked arrays. Return an iterator yielding pairs of array coordinates and values, with masked values skipped. Parameters ---------- marr : MaskedArray Input array. """ for i, m in itertools.izip(np.ndenumerate(marr), ~marr.mask.ravel()): if m: yield i def corrcoef(x, y=None, rowvar=True, fweights=None, aweights=None): """ Return Pearson product-moment correlation coefficients. This is a copy of the implementation found in numpy, with the removal of the deperecated bias and ddof keyword arguments, and the addition of the fweights and aweights arguments, which are pased to np.cov. Parameters ---------- x : array_like A 1-D or 2-D array containing multiple variables and observations. Each row of `x` represents a variable, and each column a single observation of all those variables. Also see `rowvar` below. y : array_like, optional An additional set of variables and observations. `y` has the same shape as `x`. rowvar : bool, optional If `rowvar` is True (default), then each row represents a variable, with observations in the columns. Otherwise, the relationship is transposed: each column represents a variable, while the rows contain observations. fweights : array_like, int, optional 1-D array of integer freguency weights; the number of times each observation vector should be repeated. aweights : array_like, optional 1-D array of observation vector weights. These relative weights are typically large for observations considered "important" and smaller for observations considered less "important". If ``ddof=0`` the array of weights can be used to assign probabilities to observation vectors. Returns ------- R : ndarray The correlation coefficient matrix of the variables. """ c = np.cov(x, y, rowvar, fweights=fweights, aweights=aweights) try: d = np.diag(c) except ValueError: # scalar covariance # nan if incorrect value (nan, inf, 0), 1 otherwise return c / c stddev = np.sqrt(d.real) c /= stddev[:, None] c /= stddev[None, :] # Clip real and imaginary parts to [-1, 1]. This does not guarantee # abs(a[i,j]) <= 1 for complex arrays, but is the best we can do without # excessive work. np.clip(c.real, -1, 1, out=c.real) if np.iscomplexobj(c): np.clip(c.imag, -1, 1, out=c.imag) return c
26.222222
79
0.61686
124
0.027654
379
0.084523
0
0
0
0
2,988
0.666369
53f2926766ffb4a7606e6a1c06800d6ce10ac775
3,893
py
Python
src/stochastic_tour.py
DavidNKraemer/ams553-final-project
fc23fe5f126a8bd9ea593c0b339883ec71820a05
[ "MIT" ]
null
null
null
src/stochastic_tour.py
DavidNKraemer/ams553-final-project
fc23fe5f126a8bd9ea593c0b339883ec71820a05
[ "MIT" ]
null
null
null
src/stochastic_tour.py
DavidNKraemer/ams553-final-project
fc23fe5f126a8bd9ea593c0b339883ec71820a05
[ "MIT" ]
null
null
null
import numpy as np import random from collections import namedtuple def generate_prob_matrix(n): matrix = np.random.rand(n, n) for i in range(n): matrix[i][i] = 0 for i in range(n): matrix[i] = (1/np.sum(matrix[i]))*matrix[i] return matrix def categorical(p): return np.random.choice(len(p), 1, p=p)[0] Drone = namedtuple('Drone', 'speed probability') Site = namedtuple('Site', 'location') class System: def __init__(self, sites, drones): self.sites = {} self.drones = {} n = len(sites) for i, drone in enumerate(drones): self.drones[i] = drone for i, site in enumerate(sites): self.sites[i] = site distance = np.zeros([n, n]) for i in range(n): for j in range(n): if i < j: x = np.subtract(sites[i], sites[j]) d = np.linalg.norm(x) distance[i][j] = d distance[j][i] = d self.distance = distance def get_site(self, site_id): return self.sites[site_id] def get_drone(self, drone_id): return self.drones[drone_id] def compute_path_distance(self, path): n = len(path) d = 0 for i in range(n - 1): d += self.distance[path[i]][path[i + 1]] return d def compute_path_time(self, path, drone_id): d = self.compute_path_distance(path) return d/self.get_drone(drone_id).speed def generate_path_of_length(self, length, drone_id): path = [] P = self.get_drone(drone_id).probability num_sites = len(self.sites) s = categorical([1/num_sites]*num_sites) path.append(s) site = s for i in range(length): site = categorical(P[site]) path.append(site) return path def generate_path(self, s, t, drone_id): path = [s] P = self.get_drone(drone_id).probability site = categorical(P[s]) path.append(site) while site != t: site = categorical(P[site]) path.append(site) return path @staticmethod def generate_random_system(n, k): locations = np.random.rand(n, 2) sites = [] for i in locations: sites.append(Site(i)) drones = [] for i in range(k): speed = abs(random.random()) probability = generate_prob_matrix(n) drones.append(Drone(speed, probability)) return System(sites, drones) def _compute_arrival_times(path, drone_id, sites, speed): arrival_times = [] t = 0 for i in range(len(path) - 1): t += system.compute_path_time(path[i:i+2], drone_id=drone_id) arrival_times.append((drone_id, path[i], path[i+1], t)) return arrival_times def _generate_arrival_times(system, num_drones, length): arrival_times = [[] for _ in range(len(system.sites))] events = [] for i in range(system): pass events.extend(compute_arrival_times(path, i)) def get_key(item): return item[3] events = sorted(events, key=get_key) for event in events: drone_id = event[0] site_id = event[2] time = event[3] arrival_times[site_id].append((drone_id, time)) return arrival_times def compute_cost(system, n): arrival_times = generate_arrival_times(system, n) interarrival_times = [[] for _ in range(len(system.sites))] for i in range(len(arrival_times)): arrivals = arrival_times[i] for j in range(len(arrivals) - 1): interarrival_times[i].append(arrivals[j+1][1] - arrivals[j][1]) interarrival_avgs = [compute_average(i) for i in interarrival_times] return max(interarrival_avgs) def compute_average(data): return (1/len(data))*sum(data)
25.781457
75
0.5813
2,145
0.550989
0
0
404
0.103776
0
0
42
0.010789
53f4891624f4d3bc5f0cf1971fce25d204c1cf18
1,325
py
Python
orbit/actions/conditional_action_test.py
mcasanova1445/models
37be0fdb4abccca633bb3199a4e6f3f71cd174d9
[ "Apache-2.0" ]
1
2020-09-14T10:46:07.000Z
2020-09-14T10:46:07.000Z
orbit/actions/conditional_action_test.py
mdsaifhaider/models
7214e17eb425963ec3d0295be215d5d26deaeb32
[ "Apache-2.0" ]
8
2020-05-19T00:52:30.000Z
2020-06-04T23:57:20.000Z
orbit/actions/conditional_action_test.py
mdsaifhaider/models
7214e17eb425963ec3d0295be215d5d26deaeb32
[ "Apache-2.0" ]
2
2021-10-07T04:47:04.000Z
2021-12-18T04:18:19.000Z
# Copyright 2022 The Orbit Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for orbit.actions.conditional_action.""" from orbit import actions import tensorflow as tf class ConditionalActionTest(tf.test.TestCase): def test_conditional_action(self): # Define a function to raise an AssertionError, since we can't in a lambda. def raise_assertion(arg): raise AssertionError(str(arg)) conditional_action = actions.ConditionalAction( condition=lambda x: x['value'], action=raise_assertion) conditional_action({'value': False}) # Nothing is raised. with self.assertRaises(AssertionError) as ctx: conditional_action({'value': True}) self.assertEqual(ctx.exception.message, "{'value': True}") if __name__ == '__main__': tf.test.main()
33.125
79
0.739623
570
0.430189
0
0
0
0
0
0
782
0.590189
53f4cffa9d98d6fc50ab66c96fe1f4f487091562
880
py
Python
Customizations/Tagging/show_tags.task.py
phnomcobra/valarie-content
b1f6242605badd2b0b2e53c4320f5d963b5e0b21
[ "MIT" ]
null
null
null
Customizations/Tagging/show_tags.task.py
phnomcobra/valarie-content
b1f6242605badd2b0b2e53c4320f5d963b5e0b21
[ "MIT" ]
null
null
null
Customizations/Tagging/show_tags.task.py
phnomcobra/valarie-content
b1f6242605badd2b0b2e53c4320f5d963b5e0b21
[ "MIT" ]
null
null
null
#!/usr/bin/python ################################################################################ # DOCUMENTS # # Justin Dierking # [email protected] # 614 692 2050 # # 04/22/2018 Original Construction ################################################################################ import traceback import json class Task: def __init__(self): self.output = [] self.status = STATUS_NOT_EXECUTED def execute(self, cli): try: keys = cli.AGTCollections("tags") self.status = STATUS_SUCCESS for key in keys.find(): #key.set() self.output.append(json.dumps(key.object, indent = 4)) except Exception: self.status = STATUS_EXCEPTION self.output.append(traceback.format_exc()) return self.status
25.882353
80
0.465909
552
0.627273
0
0
0
0
0
0
303
0.344318
53f8fdaf42e35a017e458aac366d4271e4baa22e
1,932
py
Python
examples/python/masked_hist.py
DerThorsten/seglib
4655079e390e301dd93e53f5beed6c9737d6df9f
[ "MIT" ]
null
null
null
examples/python/masked_hist.py
DerThorsten/seglib
4655079e390e301dd93e53f5beed6c9737d6df9f
[ "MIT" ]
null
null
null
examples/python/masked_hist.py
DerThorsten/seglib
4655079e390e301dd93e53f5beed6c9737d6df9f
[ "MIT" ]
null
null
null
import vigra import numpy import pylab from seglib import cgp2d from seglib.preprocessing import norm01 import seglib.edge_detectors.pixel as edp import seglib.region_descriptors.pixel as rdp from seglib.preprocessing import norm01 from seglib.histogram import jointHistogram,histogram from seglib.region_descriptors.pixel.sift import denseSift # change me to your path img = "img/text.jpg" img = numpy.squeeze(vigra.readImage(img))#[0:75,0:75,:] binCount = 30 sigma = 1.5 histImg = numpy.zeros(img.shape[0:2]+(binCount*3,)) imgBig = None sizes = [3,4,5,8,10,15,20,25,40,100] scalings = [5,10,15] for size in sizes: for scaling in scalings: size = int (size) scaling = float(scaling) print size,scaling labels ,nseg= vigra.analysis.slicSuperpixels(vigra.colors.transform_RGB2Lab(img),scaling,size) labels = vigra.analysis.labelImage(labels).astype(numpy.uint64) cgp,tgrid = cgp2d.cgpFromLabels(labels) if imgBig is None: imgBig=vigra.sampling.resize(img,cgp.shape) #cgp2d.visualize(imgBig,cgp=cgp) print "accumulate cell " hist = cgp.accumulateCellHistogram(cellType=2,image=img,binCount=binCount,sigma=sigma) hist = hist.reshape([cgp.numCells(2),-1]) for c in range(histImg.shape[2]): histImg[:,:,c] += (size)*cgp.featureToImage(cellType=2,features=hist[:,c],ignoreInactive=False,useTopologicalShape=False) histImg=numpy.require(histImg,dtype=numpy.float32) histImg=vigra.taggedView(histImg, 'xyc') histImg = vigra.gaussianSmoothing(histImg,sigma=1.0) #for c in range(histImg.shape[2]): # #print c # pylab.imshow( numpy.swapaxes( norm01(histImg[:,:,c]) ,0,1) ) # pylab.show() # # print "hist",hist.shape imgdt = rdp.deepDetexturize(srcImg=img,img=histImg,nIteration=10, nCluster=10,reductionAlg='pca',nldEdgeThreshold=10.0,nldScale=10.0,distance=None)#'cityblock')
27.6
133
0.70911
0
0
0
0
0
0
0
0
283
0.14648
53fa17d1fb343f99d7928294d83a0d41844594ce
748
py
Python
backup/models.py
helwete/simple-backup
c7dd1a08d398f5b4005c187e274e192b2e024f30
[ "MIT" ]
null
null
null
backup/models.py
helwete/simple-backup
c7dd1a08d398f5b4005c187e274e192b2e024f30
[ "MIT" ]
null
null
null
backup/models.py
helwete/simple-backup
c7dd1a08d398f5b4005c187e274e192b2e024f30
[ "MIT" ]
null
null
null
from datetime import date from django.conf import settings from django.db import models # Create your models here. def user_directory_path(instance, filename): # file will be uploaded to MEDIA_ROOT/user_<id>/<filename> today = date.today() return '{0}/{2}/{1}'.format(instance.user.username, filename, today.strftime("%Y/%m/%d/")) class Upload(models.Model): uploaded_file = models.FileField(null=True, blank=True, upload_to=user_directory_path) file_name = models.CharField(max_length=255, null=True) date_uploaded = models.DateField(auto_now_add=True, null=True) user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, null=True) def __str__(self): return self.uploaded_file.name
35.619048
94
0.743316
400
0.534759
0
0
0
0
0
0
108
0.144385
53fa743e6670e6a8830a736afc87f494f4f511b4
2,713
py
Python
Kmeans Cluster/Kmeans_Compare.py
Jojoxiao/Machine-Learning-for-Beginner-by-Python3
71b91c9cba5803bd78d4d31be6dabb1d3989e968
[ "MIT" ]
397
2018-05-28T02:07:32.000Z
2022-03-30T09:53:37.000Z
Kmeans Cluster/Kmeans_Compare.py
976634681/Machine-Learning-for-Beginner-by-Python3
d9effcbb1b390dc608a0f4c0a28f0ad03892047a
[ "MIT" ]
4
2019-01-14T16:41:02.000Z
2021-03-11T13:23:06.000Z
Kmeans Cluster/Kmeans_Compare.py
976634681/Machine-Learning-for-Beginner-by-Python3
d9effcbb1b390dc608a0f4c0a28f0ad03892047a
[ "MIT" ]
235
2018-06-28T05:31:40.000Z
2022-03-11T03:20:07.000Z
#-*- coding:utf-8 -*- # &Author AnFany # 引入方法 import Kmeans_AnFany as K_Af # AnFany import Kmeans_Sklearn as K_Sk # Sklearn import matplotlib.pyplot as plt from pylab import mpl # 作图显示中文 mpl.rcParams['font.sans-serif'] = ['FangSong'] # 设置中文字体新宋体 mpl.rcParams['axes.unicode_minus'] = False import numpy as np # 利用sklearn生成数据集 from sklearn.datasets import make_blobs X, Y = make_blobs(n_samples=600, centers=6, n_features=2) # 绘制散点图 def fig_scatter(exdata, eydata, titl='训练数据散点图', co=['r', 'g', 'k', 'b', 'y', 'm'], marker=['o','^','H','v','d','>']): typeclass = sorted(list(set(eydata))) for ii in range(len(typeclass)): datax = exdata[eydata == typeclass[ii]] plt.scatter(datax[:, 0], datax[:, -1], c=co[ii], s=50, marker=marker[ii]) plt.title(titl) #plt.legend(['%d类'%i for i in typeclass], bbox_to_anchor=(1.2, 0.9)) plt.xlabel('特征1') plt.ylabel('特征2') # 调用不同的方法 # AnFany kresult = K_Af.op_kmeans(X, countcen=6) # Sklearn sk = K_Sk.KMeans(init='k-means++', n_clusters=6, n_init=10) train = sk.fit(X) result = sk.predict(X) skru = K_Sk.trans(result) #绘制算法后的类别的散点图 def sca(Xdata, Center, signdict, co=['r', 'g', 'y', 'b', 'c', 'm'], marker=['o','^','H','s','d','*'], titl = 'AnFany 结果'): du = 1 for jj in signdict: xdata = Xdata[signdict[jj]] plt.scatter(xdata[:, 0], xdata[:, -1], c=co[jj], s=50, marker=marker[jj], label='%d类' % jj) # 绘制样本散点图 for ss in Center: if du: plt.scatter(ss[0], ss[1], c='k', s=100, marker='8', label='类别中心') #绘制类别中心点 du = 0 else: plt.scatter(ss[0], ss[1], c='k', s=100, marker='8') # 绘制类别中心点 plt.legend(bbox_to_anchor=(1.2, 1)) plt.title(titl) plt.xlabel('特征1') plt.ylabel('特征2') # 定义欧几里得距离 def dis(sample, center): cen = np.array([center]) sample = np.array(sample) if len(sample) != 0: usb = np.sum((sample - cen) ** 2, axis=1) ** 0.5 return usb else: return 0 # 计算最终的分类结果的成本值 def Cost(Xdata, typedict): center = {} for kk in typedict: center[kk] = np.mean(Xdata[typedict[kk]], axis=0) # 均值 cio = 0 for cc in typedict: cio += np.sum(dis(Xdata[typedict[cc]], center[cc])) return cio # 最终的结果展示 plt.subplot(2, 2, 1) fig_scatter(X, Y) plt.subplot(2, 2, 2) sca(X, kresult[0], kresult[2]) plt.subplot(2, 2, 3) sca(X, train.cluster_centers_, skru, titl='Sklearn 结果') plt.subplot(2, 2, 4) plt.axis('off') plt.text(0.3, 0.6, 'AnFany 最终的分类成本值为:%.5f'%Cost(X, kresult[2])) plt.text(0.3, 0.3, 'Sklearn 最终的分类成本值为:%.5f'%Cost(X, skru)) plt.show()
25.59434
123
0.573535
0
0
0
0
0
0
0
0
846
0.281344
53faaa8c310593f3046382b5d7e3fa8922d7e1b7
5,544
py
Python
control_panel.py
Stayermax/5dof-bartender-robot
dd04303afd2c252e6f7105e33ba35b01f3915194
[ "MIT" ]
null
null
null
control_panel.py
Stayermax/5dof-bartender-robot
dd04303afd2c252e6f7105e33ba35b01f3915194
[ "MIT" ]
null
null
null
control_panel.py
Stayermax/5dof-bartender-robot
dd04303afd2c252e6f7105e33ba35b01f3915194
[ "MIT" ]
null
null
null
#!/usr/bin/env python """ Control panel file """ import pddl_solver as pddl import ik import rospy from get_object_position import get_object_position import time from constants import * from spawn_models import reset_model_position, reset_all, spawn_model, spawn_all_models from delete_models import delete_all, delete_model def control_panel(): robot = ik.MoveGroupPythonIntefaceTutorial() # robot.go_to_init_state() # robot.open_gripper() bottle = 'bottle_1' # simulatiuon current_bottle_orig_pos = get_object_position(bottle) # real_world # current_bottle_orig_pos = Real_poses(bottle) # current_bottle_orig_pos[-1] += BZS while(True): print() cmd = raw_input("Enter command:\n open, close, init,\n gtb, hover, gtc, move,\n pour, cb, rb, ra,\n pgr, parm, pj,\n setj, att, box,\n del, dela, spawn, exit:\n") if(cmd == 'open'): # open the gripper robot.open_gripper() elif(cmd == 'close'): # close the gripper goal = float(raw_input("Enter closing goal in range [-0.12; 0]:\n")) if(goal==""): goal = -0.075 while(goal > 0 or goal < -0.12): goal = float(raw_input("Enter closing goal in range [-0.12; 0]:\n")) robot.close_gripper(goal) elif(cmd == 'init'): # go to initial pose robot.go_to_init_state() elif(cmd == 'gtb'): # go to bottle x,y,z = current_bottle_orig_pos h = raw_input("Set z level: ") if(h == ""): h = BZS else: h = float(h) robot.go_to_xyz(x, y, z + h) elif(cmd == 'hover'): # hover over the bottle x,y,z = current_bottle_orig_pos robot.go_to_xyz(x, y, BUO) elif(cmd == 'gtc'): # go to cup # simulation x,y,z = get_object_position('cup_1') # real_world # pos, angle = Real_world_PourPos[cup] # x,y,z = pos robot.go_to_xyz(x, y, CUO) elif(cmd == 'move'): # go to cup x,y,z = robot.get_arm_pose() dir = raw_input("Enter coord: x,y or z:\n") while(dir not in ['x','y','z']): dir = raw_input("Enter coord: x,y or z:\n") step = float(raw_input("Enter step size:\n")) if(dir == 'x'): x += step elif(dir == 'y'): y += step elif(dir == 'z'): z += step robot.go_to_xyz(x, y, z) elif(cmd == 'pour'): # turn gripper on pouring angle robot.rotate_gripper(angle = 1) rospy.sleep(1.5) robot.rotate_gripper(angle = 0) elif(cmd == 'cb'): # change bottle b_n = int(raw_input("Enter bottle number from 1 to 6\n")) while(b_n not in [1,2,3,4,5,6]): b_n = int(raw_input("Enter bottle number from 1 to 6\n")) bottle = 'bottle_' + str(b_n) # simulatiuon current_bottle_orig_pos = get_object_position(bottle) # real_world # current_bottle_orig_pos = Real_poses(bottle) elif(cmd == 'rb'): # reset bottle position reset_model_position(bottle) elif(cmd == 'ra'): # reset all models positions reset_all() elif(cmd == 'pgr'): # print gripper postiion pos = robot.get_gripper_pose() print("Current gripper coordinates: " + str(pos)) elif(cmd == 'parm'): # print arm postiion pos = robot.get_arm_pose() print("Current arm coordinates: " + str(pos)) elif(cmd == 'pj'): # print arm joints current_joints = robot.get_arm_joints() print("Current joints poistion: " + str(current_joints)) elif(cmd == 'setj'): # set robot joint angles joints = robot.get_arm_joints() # joints[0] = float(raw_input("Enter theta_0")) # We don't want to change the arm direction t1 = raw_input("Enter theta_1: ") t2 = raw_input("Enter theta_2: ") t3 = raw_input("Enter theta_3: ") if(t1 != ''): joints[1] = float(t1) if(t2 != ''): joints[2] = float(t2) if(t3 != ''): joints[3] = float(t3) joints[4] = 0 robot.set_joints(joints) elif(cmd == 'att'): # attaches object to the gripper robot.attach_object(bottle) attached_objects = robot.scene.get_attached_objects([bottle]) print("Attached objects: " + str(attached_objects)) elif(cmd == 'box'): robot.add_box() robot.attach_object('box') attached_objects = robot.scene.get_attached_objects([bottle]) print("Attached objects: " + str(attached_objects)) elif(cmd == 'del'): delete_model(bottle) print("Bottle " + str(bottle.split('_')[1]) + " was deleted") elif(cmd == 'dela'): delete_all() print("All models were deleted") elif(cmd == 'spawn'): spawn_model(bottle) print("Bottle " + str(bottle.split('_')[1]) + " was spawned") elif(cmd == 'exit'): # exit control panel script print('Finish performance') return else: print('Wrong command') if __name__ == '__main__': control_panel()
40.173913
170
0.530483
0
0
0
0
0
0
0
0
1,662
0.299784
53fac3e7275b1080c646a6ed12952be14a9e25f1
1,427
py
Python
Enigma/Enigma.py
archanpatkar/Enigma
dbbc1fda99bf451a0284f051c724ed43915dfe2a
[ "MIT" ]
3
2019-06-25T06:46:50.000Z
2021-07-27T14:14:32.000Z
Enigma/Enigma.py
archanpatkar/Enigma
dbbc1fda99bf451a0284f051c724ed43915dfe2a
[ "MIT" ]
null
null
null
Enigma/Enigma.py
archanpatkar/Enigma
dbbc1fda99bf451a0284f051c724ed43915dfe2a
[ "MIT" ]
1
2021-07-27T14:20:30.000Z
2021-07-27T14:20:30.000Z
from Enigma.Rotor import Rotor from Enigma.Reflector import Reflector from Enigma.Plugboard import Plugboard class Enigma: def __init__(self , rotors = [ Rotor(0,"IC") , Rotor(0,"IIC") , Rotor(0,"IIIC") ] , plugboard = Plugboard() , reflector = Reflector("A")): self.rotors = rotors for i in range(len(rotors)): if i + 1 < len(rotors): rotors[i].on("Sidereal", lambda *args: rotors[i+1].step()) self.Plugboard = plugboard; self.Reflector = reflector; def encrypt(self,data): data = data.upper().replace(" ",""); string = ""; for char in data: string += self.each(char,True); return string; def decrypt(self,data): data = data.upper(); string = ""; for char in data: string += self.each(char,False); return string; def each(self,char,flag): self.rotors[0].step() output = self.Plugboard.get(char) for rotor in self.rotors: if flag: output = rotor.scramble(output) else: output = rotor.unscramble(output) output = self.Reflector.get(output) for rotor in self.rotors[::-1]: if flag: output = rotor.scramble(output) else: output = rotor.unscramble(output) return self.Plugboard.get(output);
32.431818
143
0.5459
1,316
0.922214
0
0
0
0
0
0
37
0.025929
53fad9cdfe9f1c4fdba68eaa168284de33fce059
647
py
Python
var/spack/repos/builtin/packages/exiv2/package.py
xiki-tempula/spack
9d66c05e93ab8a933fc59915040c0e0c86a4aac4
[ "ECL-2.0", "Apache-2.0", "MIT" ]
9
2018-04-18T07:51:40.000Z
2021-09-10T03:56:57.000Z
var/spack/repos/builtin/packages/exiv2/package.py
xiki-tempula/spack
9d66c05e93ab8a933fc59915040c0e0c86a4aac4
[ "ECL-2.0", "Apache-2.0", "MIT" ]
907
2018-04-18T11:17:57.000Z
2022-03-31T13:20:25.000Z
var/spack/repos/builtin/packages/exiv2/package.py
xiki-tempula/spack
9d66c05e93ab8a933fc59915040c0e0c86a4aac4
[ "ECL-2.0", "Apache-2.0", "MIT" ]
29
2018-11-05T16:14:23.000Z
2022-02-03T16:07:09.000Z
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class Exiv2(CMakePackage): """Exiv2 is a Cross-platform C++ library and a command line utility to manage image metadata """ homepage = "https://www.exiv2.org/" url = "https://github.com/Exiv2/exiv2/archive/v0.27.2.tar.gz" version('0.27.2', sha256='3dbcaf01fbc5b98d42f091d1ff0d4b6cd9750dc724de3d9c0d113948570b2934') depends_on('zlib', type='link') depends_on('[email protected]:', type='link')
30.809524
96
0.710974
426
0.658423
0
0
0
0
0
0
482
0.744977
53fb4aef0b525310a37b5aa5c278d91c9afe8fd1
2,711
py
Python
magicauth/send_token.py
JMIdeaMaker/django-magicauth
ffca3423c46f8f3d7e49eaf374b33265d4730587
[ "MIT" ]
null
null
null
magicauth/send_token.py
JMIdeaMaker/django-magicauth
ffca3423c46f8f3d7e49eaf374b33265d4730587
[ "MIT" ]
null
null
null
magicauth/send_token.py
JMIdeaMaker/django-magicauth
ffca3423c46f8f3d7e49eaf374b33265d4730587
[ "MIT" ]
null
null
null
import math from django.contrib.auth import get_user_model from django.contrib.sites.shortcuts import get_current_site from django.core.mail import send_mail from django.template import loader from magicauth import settings as magicauth_settings from django.conf import settings as django_settings from magicauth.models import MagicToken import sendgrid from sendgrid import SendGridAPIClient from sendgrid.helpers.mail import Mail sg = sendgrid.SendGridAPIClient(django_settings.SENDGRID_API_KEY) class SendTokenMixin(object): """ Helper for sending an email containing a link containing the MagicToken. """ def create_token(self, user): token = MagicToken.objects.create(user=user) return token def get_user_from_email(self, user_email): """ Query the DB for the user corresponding to the email. - We use get_user_model() instead of User (in case the Django app has customised the User class) - We use magicauth_settings.EMAIL_FIELD, which is the name of the field in the user model. By default "username" but not always. """ user_class = get_user_model() email_field = magicauth_settings.EMAIL_FIELD field_lookup = {f"{email_field}__iexact": user_email} user = user_class.objects.get(**field_lookup) return user def send_email(self, user, user_email, token, extra_context=None): email_subject = magicauth_settings.EMAIL_SUBJECT html_template = magicauth_settings.EMAIL_HTML_TEMPLATE text_template = magicauth_settings.EMAIL_TEXT_TEMPLATE from_email = magicauth_settings.FROM_EMAIL context = { "token": token, "user": user, "site": get_current_site(self.request), "TOKEN_DURATION_MINUTES": math.floor(magicauth_settings.TOKEN_DURATION_SECONDS / 60), "TOKEN_DURATION_SECONDS": magicauth_settings.TOKEN_DURATION_SECONDS, } if extra_context: context.update(extra_context) text_message = loader.render_to_string(text_template, context) html_message = loader.render_to_string(html_template, context) mail = Mail( from_email=( django_settings.MAGICAUTH_FROM_EMAIL, django_settings.MAGICAUTH_SENDER ), to_emails=[user_email], subject=email_subject, html_content=html_message ) sg.send(mail) def send_token(self, user_email, extra_context=None): user = self.get_user_from_email(user_email) token = self.create_token(user) self.send_email(user, user_email, token, extra_context)
36.146667
98
0.69384
2,206
0.813722
0
0
0
0
0
0
516
0.190336
53fbcfdc398532d49a5138646d1108fbc979d12a
2,148
py
Python
qcdb/util/paths.py
loriab/qccddb
d9e156ef8b313ac0633211fc6b841f84a3ddde24
[ "BSD-3-Clause" ]
8
2019-03-28T11:54:59.000Z
2022-03-19T03:31:37.000Z
qcdb/util/paths.py
loriab/qccddb
d9e156ef8b313ac0633211fc6b841f84a3ddde24
[ "BSD-3-Clause" ]
39
2018-10-31T23:02:18.000Z
2021-12-12T22:11:37.000Z
qcdb/util/paths.py
loriab/qccddb
d9e156ef8b313ac0633211fc6b841f84a3ddde24
[ "BSD-3-Clause" ]
9
2018-03-12T20:51:50.000Z
2022-02-28T15:18:34.000Z
import os import sys ## {{{ http://code.activestate.com/recipes/52224/ (r1) def search_file(filename, search_path): """Given an os.pathsep divided `search_path`, find first occurrence of `filename`. Returns full path to file if found or None if unfound. """ file_found = False paths = search_path.split(os.pathsep) # paths = string.split(search_path, os.pathsep) for path in paths: if os.path.exists(os.path.join(path, filename)): file_found = True break if file_found: return os.path.abspath(os.path.join(path, filename)) else: return None ## end of http://code.activestate.com/recipes/52224/ }}} def all_casings(input_string): """Function to return a generator of all lettercase permutations of *input_string*. """ if not input_string: yield "" else: first = input_string[:1] if first.lower() == first.upper(): for sub_casing in all_casings(input_string[1:]): yield first + sub_casing else: for sub_casing in all_casings(input_string[1:]): yield first.lower() + sub_casing yield first.upper() + sub_casing def import_ignorecase(module, lenv=None): """Function to import *module* in any possible lettercase permutation. Returns module object if available, None if not. `lenv` is list (not str) of addl sys.path members to try. """ lenv = [] if lenv is None else lenv with add_path(lenv): modobj = None for per in list(all_casings(module)): try: modobj = __import__(per) except ImportError: pass else: break return modobj class add_path: """https://stackoverflow.com/a/39855753""" def __init__(self, paths): # paths must be list self.paths = paths def __enter__(self): for pth in reversed(self.paths): sys.path.insert(0, pth) def __exit__(self, exc_type, exc_value, traceback): for pth in self.paths: sys.path.remove(pth)
26.85
74
0.603352
374
0.174115
534
0.248603
0
0
0
0
661
0.307728
53fbd095d48c73b6a23ec7ef2c3b6688ff51dfc5
2,380
py
Python
tests/models/DCN_test.py
JiangBowen-master/DeepCTR
291ffb0ff3b8322f64bd839f963d5c7a70e6b358
[ "Apache-2.0" ]
1
2021-09-20T14:12:35.000Z
2021-09-20T14:12:35.000Z
tests/models/DCN_test.py
JiangBowen-master/DeepCTR
291ffb0ff3b8322f64bd839f963d5c7a70e6b358
[ "Apache-2.0" ]
1
2022-02-10T06:29:19.000Z
2022-02-10T06:29:19.000Z
tests/models/DCN_test.py
JiangBowen-master/DeepCTR
291ffb0ff3b8322f64bd839f963d5c7a70e6b358
[ "Apache-2.0" ]
null
null
null
import pytest import tensorflow as tf from deepctr.estimator import DCNEstimator from deepctr.models import DCN from ..utils import check_model, get_test_data, SAMPLE_SIZE, get_test_data_estimator, check_estimator, \ Estimator_TEST_TF1 @pytest.mark.parametrize( 'cross_num,hidden_size,sparse_feature_num,cross_parameterization', [(0, (8,), 2, 'vector'), (1, (), 1, 'vector'), (1, (8,), 3, 'vector'), (0, (8,), 2, 'matrix'), (1, (), 1, 'matrix'), (1, (8,), 3, 'matrix'), ] ) def test_DCN(cross_num, hidden_size, sparse_feature_num, cross_parameterization): model_name = "DCN" sample_size = SAMPLE_SIZE x, y, feature_columns = get_test_data(sample_size, sparse_feature_num=sparse_feature_num, dense_feature_num=sparse_feature_num) model = DCN(feature_columns, feature_columns, cross_num=cross_num, cross_parameterization=cross_parameterization, dnn_hidden_units=hidden_size, dnn_dropout=0.5) check_model(model, model_name, x, y) @pytest.mark.parametrize( 'cross_num,hidden_size,sparse_feature_num', [(1, (8,), 3) ] ) def test_DCNEstimator(cross_num, hidden_size, sparse_feature_num): if not Estimator_TEST_TF1 and tf.__version__ < "2.2.0": return model_name = "DCN" sample_size = SAMPLE_SIZE linear_feature_columns, dnn_feature_columns, input_fn = get_test_data_estimator(sample_size, sparse_feature_num=sparse_feature_num, dense_feature_num=sparse_feature_num) model = DCNEstimator(linear_feature_columns, dnn_feature_columns, cross_num=cross_num, dnn_hidden_units=hidden_size, dnn_dropout=0.5) check_estimator(model, input_fn) # def test_DCN_invalid(embedding_size=8, cross_num=0, hidden_size=()): # feature_dim_dict = {'sparse': [SparseFeat('sparse_1', 2), SparseFeat('sparse_2', 5), SparseFeat('sparse_3', 10)], # 'dense': [SparseFeat('dense_1', 1), SparseFeat('dense_1', 1), SparseFeat('dense_1', 1)]} # with pytest.raises(ValueError): # _ = DCN(None, embedding_size=embedding_size, cross_num=cross_num, dnn_hidden_units=hidden_size, dnn_dropout=0.5) if __name__ == "__main__": pass
42.5
122
0.654622
0
0
0
0
1,626
0.683193
0
0
644
0.270588
53fbe12da973d06be5b6afaae786b7644d276650
1,309
py
Python
workflows/post_process_run/fv3post/gsutil.py
jacnugent/fv3net
84958651bdd17784fdab98f87ad0d65414c03368
[ "MIT" ]
5
2021-03-20T22:42:40.000Z
2021-06-30T18:39:36.000Z
workflows/post_process_run/fv3post/gsutil.py
jacnugent/fv3net
84958651bdd17784fdab98f87ad0d65414c03368
[ "MIT" ]
195
2021-09-16T05:47:18.000Z
2022-03-31T22:03:15.000Z
workflows/post_process_run/fv3post/gsutil.py
ai2cm/fv3net
e62038aee0a97d6207e66baabd8938467838cf51
[ "MIT" ]
1
2021-06-16T22:04:24.000Z
2021-06-16T22:04:24.000Z
import os import subprocess import backoff class GSUtilResumableUploadException(Exception): pass def _decode_to_str_if_bytes(s, encoding="utf-8"): if isinstance(s, bytes): return s.decode(encoding) else: return s def authenticate(): try: credentials = os.environ["GOOGLE_APPLICATION_CREDENTIALS"] except KeyError: pass else: subprocess.check_call( ["gcloud", "auth", "activate-service-account", "--key-file", credentials] ) @backoff.on_exception(backoff.expo, GSUtilResumableUploadException, max_tries=3) def upload_dir(d, dest): try: # Pipe stderr to stdout because gsutil logs upload progress there. subprocess.check_output( ["gsutil", "-m", "rsync", "-r", "-e", d, dest], stderr=subprocess.STDOUT ) except subprocess.CalledProcessError as e: output = _decode_to_str_if_bytes(e.output) if "ResumableUploadException" in output: raise GSUtilResumableUploadException() else: raise e def download_directory(dir_, dest): os.makedirs(dest, exist_ok=True) subprocess.check_call(["gsutil", "-m", "rsync", "-r", dir_, dest]) def cp(source, destination): subprocess.check_call(["gsutil", "cp", source, destination])
25.666667
85
0.654698
57
0.043545
0
0
549
0.419404
0
0
245
0.187166
53fc42709c54959b0375cdc103e3419eb44ee072
3,012
py
Python
deploy_tix/__main__.py
rpappalax/deploy-tix
a53c7fa7898b9f0c2f530c8abd8bab322a2eb7bc
[ "MIT" ]
null
null
null
deploy_tix/__main__.py
rpappalax/deploy-tix
a53c7fa7898b9f0c2f530c8abd8bab322a2eb7bc
[ "MIT" ]
20
2015-02-24T08:56:47.000Z
2018-07-25T16:35:30.000Z
deploy_tix/__main__.py
rpappalax/deploy-tix
a53c7fa7898b9f0c2f530c8abd8bab322a2eb7bc
[ "MIT" ]
3
2015-04-01T21:39:50.000Z
2020-09-10T19:40:43.000Z
import argparse from deploy_tix.bugzilla_rest_client import BugzillaRESTClient from deploy_tix.release_notes import ReleaseNotes from output_helper import OutputHelper def main(args=None): parser = argparse.ArgumentParser( description='Scripts for creating / updating deployment tickets in \ Bugzilla', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument( '-a', '--application', help='Example: loop-server', required=True) parser.add_argument( '-B', '--bugzilla-mozilla', help='Set this switch to post directly to bugzilla.mozilla.org \ (without switch posts to: bugzilla-dev.allizom.org)', action='store_true', default=False, required=False) subparsers = parser.add_subparsers(help='Ticket action') # parser for ticket - {create} option parser_create = \ subparsers.add_parser('NEW', help='Create a NEW deployment ticket.') parser_create.add_argument( '-o', '--repo-owner', help='Example: mozilla-services', default='mozilla-services', required=False) parser_create.add_argument( '-e', '--environment', help='Enter: STAGE, PROD', default='STAGE', required=False) parser_create.add_argument( '-m', '--cc-mail', help='Example: [email protected] \ NOTE: must be a registered username!', default='', required=False) # parser for ticket - {upate} option parser_update = subparsers.add_parser( 'UPDATE', help='UPDATE an existing deployment ticket' ) parser_update.add_argument( '-i', '--bug-id', help='Example: 1234567', required=False) parser_update.add_argument( '-c', '--comment', help='Enter: <your bug comment>', required=True) args = vars(parser.parse_args()) application = args['application'] bugzilla_mozilla = args['bugzilla_mozilla'] ticket = BugzillaRESTClient(bugzilla_mozilla) if all(key in args for key in ['bug_id', 'comment']): bug_id = args['bug_id'] comment = args['comment'] ticket.bug_update(application, comment, bug_id) if all(key in args for key in ['repo_owner', 'application', 'environment']): # noqa repo_owner = args['repo_owner'] environment = args['environment'].lower() if args['cc_mail']: cc_mail = args['cc_mail'] else: cc_mail = '' status = 'NEW' output = OutputHelper() output.log('Create deployment ticket', True, True) notes = ReleaseNotes(repo_owner, application, environment) description = notes.get_release_notes() release_num = notes.last_tag output.log('Release Notes', True) output.log(description) ticket.bug_create( release_num, application, environment, status, description, cc_mail )
30.12
87
0.625166
0
0
0
0
0
0
0
0
937
0.311089
53fce9990550dc9cdc1a65b09b6de93156132380
2,583
py
Python
site-packages/visual/examples/drape.py
lebarsfa/vpython-wx
38df062e5532b79f632f4f2a1abae86754c264a9
[ "BSL-1.0" ]
68
2015-01-17T05:41:58.000Z
2021-04-24T08:35:24.000Z
site-packages/visual/examples/drape.py
lebarsfa/vpython-wx
38df062e5532b79f632f4f2a1abae86754c264a9
[ "BSL-1.0" ]
16
2015-01-02T19:36:06.000Z
2018-09-09T21:01:25.000Z
site-packages/visual/examples/drape.py
lebarsfa/vpython-wx
38df062e5532b79f632f4f2a1abae86754c264a9
[ "BSL-1.0" ]
37
2015-02-04T04:23:00.000Z
2020-06-07T03:24:41.000Z
from visual import * print(""" Click to place spheres under falling string. Right button drag or Ctrl-drag to rotate view. Middle button drag or Alt-drag to zoom in or out. On a two-button mouse, middle is left + right. """) # David Scherer scene.title = "Drape" restlength = 0.02 m = 0.010 * restlength g = 9.8 dt = 0.002 k = 3 damp = (1-0)**dt nspheres = 3 floor = 0 # Create the stringy thing: band = curve( x = arange(-1,1,restlength), y = 1, radius = 0.02 ) band.p = band.pos * 0 scene.range = 1.5 scene.autoscale = 0 # Let the user position obstacles: spheres = [] for i in range(nspheres): s = sphere( pos = scene.mouse.getclick().pos, #(i*0.6 - 0.7,0.5 + i*0.1,0), radius = 0.25, color = (abs(sin(i)),cos(i)**2,(i%10)/10.0) ) spheres.append( s ) while True: rate(1.0 / dt) if scene.mouse.clicked: i = len(spheres) s = sphere( pos = scene.mouse.getclick().pos, radius = 0.25, color = (abs(sin(i)),cos(i)**2,(i%10)/10.0) ) spheres.append( s ) if floor: below = less(band.pos[:,1],-1) band.p[:,1] = where( below, 0, band.p[:,1] ) band.pos[:,1] = where( below, -1, band.pos[:,1] ) # need a more physical way to make 'damped springs' than this! band.p = band.p * damp #band.p[0] = 0 # nail down left endpoint #band.p[-1] = 0 # nail down right endpoint band.pos = band.pos + band.p/m*dt #gravity band.p[:,1] = band.p[:,1] - m * g * dt # force[n] is the force on point n from point n+1 (to the right): length = (band.pos[1:] - band.pos[:-1]) dist = sqrt(sum(length*length,-1)) force = k * ( dist - restlength ) force = length/dist[:,newaxis] * force[:,newaxis] band.p[:-1] = band.p[:-1] + force*dt band.p[1:] = band.p[1:] - force*dt # color based on "stretch": blue -> white -> red c = clip( dist/restlength * 0.5, 0, 2 ) # blue (compressed) -> white (relaxed) -> red (tension) band.red[1:] = where( less(c,1), c, 1 ) band.green[1:] = where( less(c,1), c, 2-c ) band.blue[1:] = where( less(c,1), 1, 2-c ) for s in spheres: dist = mag( band.pos - s.pos )[:,newaxis] inside = less( dist, s.radius ) if sometrue(inside): R = ( band.pos - s.pos ) / dist surface = s.pos + (s.radius)*R band.pos = surface*inside + band.pos*(1-inside) pdotR = sum(asarray(band.p)*asarray(R),-1) band.p = band.p - R*pdotR[:,newaxis]*inside
27.189474
81
0.542005
0
0
0
0
0
0
0
0
652
0.25242
53fd39f8be55af2124122647f83ca83013ed5b72
8,921
py
Python
sdc/utilities/sdc_typing_utils.py
dlee992/sdc
1ebf55c00ef38dfbd401a70b3945e352a5a38b87
[ "BSD-2-Clause" ]
540
2017-06-19T16:29:24.000Z
2019-05-21T09:30:07.000Z
sdc/utilities/sdc_typing_utils.py
dlee992/sdc
1ebf55c00ef38dfbd401a70b3945e352a5a38b87
[ "BSD-2-Clause" ]
389
2019-10-30T18:56:46.000Z
2022-03-09T08:21:36.000Z
sdc/utilities/sdc_typing_utils.py
dlee992/sdc
1ebf55c00ef38dfbd401a70b3945e352a5a38b87
[ "BSD-2-Clause" ]
36
2017-06-19T16:29:15.000Z
2019-04-26T09:22:39.000Z
# ***************************************************************************** # Copyright (c) 2020, Intel Corporation All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; # OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ***************************************************************************** """ | This file contains SDC utility functions related to typing compilation phase """ import numpy import numba import sdc from numba import types from numba.core.errors import TypingError from numba.np import numpy_support from sdc.datatypes.indexes import * from sdc.str_arr_type import string_array_type, StringArrayType from sdc.datatypes.categorical.types import Categorical sdc_old_index_types = (types.Array, StringArrayType, ) sdc_pandas_index_types = ( EmptyIndexType, PositionalIndexType, RangeIndexType, Int64IndexType, MultiIndexType, ) + sdc_old_index_types sdc_indexes_range_like = ( PositionalIndexType, RangeIndexType, ) # TO-DO: support caching of data allocated for range indexes at request for .values sdc_indexes_wo_values_cache = ( EmptyIndexType, PositionalIndexType, RangeIndexType, ) sdc_pandas_df_column_types = ( types.Array, StringArrayType, Categorical, ) class TypeChecker: """ Validate object type and raise TypingError if the type is invalid, e.g.: Method nsmallest(). The object n given: bool expected: int """ msg_template = '{} The object {}\n given: {}\n expected: {}' def __init__(self, func_name): """ Parameters ---------- func_name: :obj:`str` name of the function where types checking """ self.func_name = func_name def raise_exc(self, data, expected_types, name=''): """ Raise exception with unified message Parameters ---------- data: :obj:`any` real type of the data expected_types: :obj:`str` expected types inserting directly to the exception name: :obj:`str` name of the parameter """ msg = self.msg_template.format(self.func_name, name, data, expected_types) raise TypingError(msg) def check(self, data, accepted_type, name=''): """ Check data type belongs to specified type Parameters ---------- data: :obj:`any` real type of the data accepted_type: :obj:`type` accepted type name: :obj:`str` name of the parameter """ if not isinstance(data, accepted_type): self.raise_exc(data, accepted_type.__name__, name=name) class SDCLimitation(Exception): """Exception to be raised in case of SDC limitation""" pass def kwsparams2list(params): """Convert parameters dict to a list of string of a format 'key=value'""" return ['{}={}'.format(k, v) for k, v in params.items()] def sigparams2list(param_names, defaults): """Creates a list of strings of a format 'key=value' from parameter names and default values""" return [(f'{param}' if param not in defaults else f'{param}={defaults[param]}') for param in param_names] def has_literal_value(var, value): """Used during typing to check that variable var is a Numba literal value equal to value""" if not isinstance(var, types.Literal): return False if value is None: return isinstance(var, types.NoneType) or var.literal_value is value elif isinstance(value, type(bool)): return var.literal_value is value else: return var.literal_value == value def has_python_value(var, value): """Used during typing to check that variable var was resolved as Python type and has specific value""" if not isinstance(var, type(value)): return False if value is None or isinstance(value, type(bool)): return var is value else: return var == value def is_default(var, value): return has_literal_value(var, value) or has_python_value(var, value) or isinstance(var, types.Omitted) def check_is_numeric_array(type_var): """Used during typing to check that type_var is a numeric numpy arrays""" return check_is_array_of_dtype(type_var, types.Number) def check_index_is_numeric(ty_series): """Used during typing to check that series has numeric index""" return isinstance(ty_series.index.dtype, types.Number) def check_types_comparable(ty_left, ty_right): """Used during typing to check that specified types can be compared""" if hasattr(ty_left, 'dtype'): ty_left = ty_left.dtype if hasattr(ty_right, 'dtype'): ty_right = ty_right.dtype # add the rest of supported types here if isinstance(ty_left, types.Number): return isinstance(ty_right, types.Number) if isinstance(ty_left, types.UnicodeType): return isinstance(ty_right, types.UnicodeType) if isinstance(ty_left, types.Boolean): return isinstance(ty_right, types.Boolean) if isinstance(ty_left, (types.Tuple, types.UniTuple)): # FIXME: just for now to unblock compilation return ty_left == ty_right return False def check_arrays_comparable(ty_left, ty_right): """Used during typing to check that underlying arrays of specified types can be compared""" return ((ty_left == string_array_type and ty_right == string_array_type) or (check_is_numeric_array(ty_left) and check_is_numeric_array(ty_right))) def check_is_array_of_dtype(type_var, dtype): """Used during typing to check that type_var is a numeric numpy array of specific dtype""" return isinstance(type_var, types.Array) and isinstance(type_var.dtype, dtype) def find_common_dtype_from_numpy_dtypes(array_types, scalar_types): """Used to find common numba dtype for a sequences of numba dtypes each representing some numpy dtype""" np_array_dtypes = [numpy_support.as_dtype(dtype) for dtype in array_types] np_scalar_dtypes = [numpy_support.as_dtype(dtype) for dtype in scalar_types] np_common_dtype = numpy.find_common_type(np_array_dtypes, np_scalar_dtypes) numba_common_dtype = numpy_support.from_dtype(np_common_dtype) return numba_common_dtype def find_index_common_dtype(left, right): """Used to find common dtype for indexes of two series and verify if index dtypes are equal""" left_index_dtype = left.dtype right_index_dtype = right.dtype index_dtypes_match = left_index_dtype == right_index_dtype if not index_dtypes_match: numba_index_common_dtype = find_common_dtype_from_numpy_dtypes( [left_index_dtype, right_index_dtype], []) else: numba_index_common_dtype = left_index_dtype return index_dtypes_match, numba_index_common_dtype def gen_impl_generator(codegen, impl_name): """Generate generator of an implementation""" def _df_impl_generator(*args, **kwargs): func_text, global_vars = codegen(*args, **kwargs) loc_vars = {} exec(func_text, global_vars, loc_vars) _impl = loc_vars[impl_name] return _impl return _df_impl_generator def check_signed_integer(ty): return isinstance(ty, types.Integer) and ty.signed def _check_dtype_param_type(dtype): """ Returns True is dtype is a valid type for dtype parameter and False otherwise. Used in RangeIndex ctor and other methods that take dtype parameter. """ valid_dtype_types = (types.NoneType, types.Omitted, types.UnicodeType, types.NumberClass) return isinstance(dtype, valid_dtype_types) or dtype is None
34.311538
109
0.690954
1,546
0.173299
0
0
0
0
0
0
3,978
0.445914
53fde8ce197812a38b7631459a915158d4d2d39f
1,074
py
Python
Hackerrank/Contests/Project Euler/euler010.py
PROxZIMA/Competitive-Coding
ba6b365ea130b6fcaa15c5537b530ed363bab793
[ "MIT" ]
1
2021-01-10T13:29:21.000Z
2021-01-10T13:29:21.000Z
Hackerrank/Contests/Project Euler/euler010.py
PROxZIMA/Competitive-Coding
ba6b365ea130b6fcaa15c5537b530ed363bab793
[ "MIT" ]
null
null
null
Hackerrank/Contests/Project Euler/euler010.py
PROxZIMA/Competitive-Coding
ba6b365ea130b6fcaa15c5537b530ed363bab793
[ "MIT" ]
null
null
null
from math import sqrt # Naive method: Loop through N and check if every number is prime or not. If prime add to sum. Time complexity is O(√n). Time of execution ~ 8sec for n = 1000000 def prime(n): yield 2 yield 3 for p in range(5, n+1, 2): if p % 3 == 0: continue else: for i in range (5, int(sqrt(p)) + 1, 6): if p % i == 0 or p % (i + 2) == 0: break else: yield p s = set(prime(1000000)) for _ in range(int(input())): n = int(input()) print(sum(i for i in s if i <= n)) # Sieve implementation: Time complexity of O(n*log(log(n))). Time of execution ~ 2sec for n = 1000000 limit = 1000000 sieve = [0] + [1, 0] * 500000 sieve[0], sieve[1], sieve[2] = 0, 0, 2 p = 3 while p <= limit: if sieve[p]: sieve[p] = sieve[p-1] + p for i in range(p*p, limit+1, p): sieve[i] = 0 else: sieve[p] = sieve[p-1] sieve[p+1] = sieve[p] p += 2 for _ in range(int(input())): print(sieve[int(input())])
23.347826
161
0.515829
0
0
298
0.276952
0
0
0
0
264
0.245353
53fe751d15505be94879d0853534a2ee2c6e3129
3,891
py
Python
DQM/L1TMonitorClient/python/L1EmulatorErrorFlagClient_cfi.py
ckamtsikis/cmssw
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
[ "Apache-2.0" ]
852
2015-01-11T21:03:51.000Z
2022-03-25T21:14:00.000Z
DQM/L1TMonitorClient/python/L1EmulatorErrorFlagClient_cfi.py
ckamtsikis/cmssw
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
[ "Apache-2.0" ]
30,371
2015-01-02T00:14:40.000Z
2022-03-31T23:26:05.000Z
DQM/L1TMonitorClient/python/L1EmulatorErrorFlagClient_cfi.py
ckamtsikis/cmssw
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
[ "Apache-2.0" ]
3,240
2015-01-02T05:53:18.000Z
2022-03-31T17:24:21.000Z
import FWCore.ParameterSet.Config as cms from DQMServices.Core.DQMEDHarvester import DQMEDHarvester l1EmulatorErrorFlagClient = DQMEDHarvester("L1EmulatorErrorFlagClient", # # for each L1 system, give: # - SystemLabel: system label # - HwValLabel: system label as used in hardware validation package # (the package producing the ErrorFlag histogram) # - SystemMask: system mask: if 1, the system is masked in the summary plot # - SystemFolder: the folder where the ErrorFlag histogram is looked for # # the position in the parameter set gives, in reverse order, the position in the reportSummaryMap # in the emulator column (left column) L1Systems = cms.VPSet( cms.PSet( SystemLabel = cms.string("ECAL"), HwValLabel = cms.string("ETP"), SystemMask = cms.uint32(1), SystemFolder = cms.string("") ), cms.PSet( SystemLabel = cms.string("HCAL"), HwValLabel = cms.string("HTP"), SystemMask = cms.uint32(1), SystemFolder = cms.string("") ), cms.PSet( SystemLabel = cms.string("RCT"), HwValLabel = cms.string("RCT"), SystemMask = cms.uint32(0), SystemFolder = cms.string("") ), cms.PSet( SystemLabel = cms.string("Stage1Layer2"), HwValLabel = cms.string("Stage1Layer2"), SystemMask = cms.uint32(0), SystemFolder = cms.string("") ), cms.PSet( SystemLabel = cms.string("DTTF"), HwValLabel = cms.string("DTF"), SystemMask = cms.uint32(0), SystemFolder = cms.string("") ), cms.PSet( SystemLabel = cms.string("DTTPG"), HwValLabel = cms.string("DTP"), SystemMask = cms.uint32(1), SystemFolder = cms.string("") ), cms.PSet( SystemLabel = cms.string("CSCTF"), HwValLabel = cms.string("CTF"), SystemMask = cms.uint32(1), SystemFolder = cms.string("") ), cms.PSet( SystemLabel = cms.string("CSCTPG"), HwValLabel = cms.string("CTP"), SystemMask = cms.uint32(1), SystemFolder = cms.string("") ), cms.PSet( SystemLabel = cms.string("RPC"), HwValLabel = cms.string("RPC"), SystemMask = cms.uint32(0), SystemFolder = cms.string("") ), cms.PSet( SystemLabel = cms.string("GMT"), HwValLabel = cms.string("GMT"), SystemMask = cms.uint32(0), SystemFolder = cms.string("") ), cms.PSet( SystemLabel = cms.string("GT"), HwValLabel = cms.string("GT"), SystemMask = cms.uint32(1), SystemFolder = cms.string("L1TEMU/Stage1GTexpert") ) ) )
45.776471
101
0.40992
0
0
0
0
0
0
0
0
704
0.18093
53ff445026af64cf9c890da3e25303bb69266c4d
17,382
py
Python
codalab/model/tables.py
jzwang43/codalab-worksheets
b1d4c6cc4b72f4dfa35a15f876e2d0ce9a03d28d
[ "Apache-2.0" ]
null
null
null
codalab/model/tables.py
jzwang43/codalab-worksheets
b1d4c6cc4b72f4dfa35a15f876e2d0ce9a03d28d
[ "Apache-2.0" ]
null
null
null
codalab/model/tables.py
jzwang43/codalab-worksheets
b1d4c6cc4b72f4dfa35a15f876e2d0ce9a03d28d
[ "Apache-2.0" ]
null
null
null
""" The SQLAlchemy table objects for the CodaLab bundle system tables. """ # TODO: Replace String and Text columns with Unicode and UnicodeText as appropriate # This way, SQLAlchemy will automatically perform conversions to and from UTF-8 # encoding, or use appropriate database engine-specific data types for Unicode # data. Currently, only worksheet.title uses the Unicode column type. from sqlalchemy import Column, ForeignKey, Index, MetaData, Table, UniqueConstraint from sqlalchemy.types import ( BigInteger, Boolean, DateTime, Enum, Float, Integer, LargeBinary, String, Text, Unicode, ) from sqlalchemy.sql.schema import ForeignKeyConstraint db_metadata = MetaData() bundle = Table( 'bundle', db_metadata, Column( 'id', BigInteger().with_variant(Integer, "sqlite"), primary_key=True, nullable=False, autoincrement=True, ), Column('uuid', String(63), nullable=False), Column('bundle_type', String(63), nullable=False), # The command will be NULL except for run bundles. Column('command', Text, nullable=True), # The data_hash will be NULL if the bundle's value is still being computed. Column('data_hash', String(63), nullable=True), Column('state', String(63), nullable=False), Column('owner_id', String(255), nullable=True), Column('is_anonymous', Boolean, nullable=False, default=False), UniqueConstraint('uuid', name='uix_1'), Index('bundle_data_hash_index', 'data_hash'), Index('state_index', 'state'), # Needed for the bundle manager. ) # Includes things like name, description, etc. bundle_metadata = Table( 'bundle_metadata', db_metadata, Column( 'id', BigInteger().with_variant(Integer, "sqlite"), primary_key=True, nullable=False, autoincrement=True, ), Column('bundle_uuid', String(63), ForeignKey(bundle.c.uuid), nullable=False), Column('metadata_key', String(63), nullable=False), Column('metadata_value', Text, nullable=False), Index('metadata_kv_index', 'metadata_key', 'metadata_value', mysql_length=63), ) # For each child_uuid, we have: key = child_path, target = (parent_uuid, parent_path) bundle_dependency = Table( 'bundle_dependency', db_metadata, Column( 'id', BigInteger().with_variant(Integer, "sqlite"), primary_key=True, nullable=False, autoincrement=True, ), Column('child_uuid', String(63), ForeignKey(bundle.c.uuid), nullable=False), Column('child_path', Text, nullable=False), # Deliberately omit ForeignKey(bundle.c.uuid), because bundles can have # dependencies to bundles not (yet) in the system. Column('parent_uuid', String(63), nullable=False), Column('parent_path', Text, nullable=False), ) # The worksheet table does not have many columns now, but it will eventually # include columns for owner, group, permissions, etc. worksheet = Table( 'worksheet', db_metadata, Column( 'id', BigInteger().with_variant(Integer, "sqlite"), primary_key=True, nullable=False, autoincrement=True, ), Column('uuid', String(63), nullable=False), Column('name', String(255), nullable=False), Column('owner_id', String(255), nullable=True), Column( 'title', Unicode(255), nullable=True ), # Short human-readable description of the worksheet Column( 'frozen', DateTime, nullable=True ), # When the worksheet was frozen (forever immutable) if it is. Column('is_anonymous', Boolean, nullable=False, default=False), Column( 'date_created', DateTime ), # When the worksheet was created; Set to null if the worksheet created before v0.5.31; Set to current timestamp by default Column( 'date_last_modified', DateTime ), # When the worksheet was last modified; Set to null if the worksheet created before v0.5.31; Set to current_timestamp by default UniqueConstraint('uuid', name='uix_1'), Index('worksheet_name_index', 'name'), Index('worksheet_owner_index', 'owner_id'), ) worksheet_item = Table( 'worksheet_item', db_metadata, Column( 'id', BigInteger().with_variant(Integer, "sqlite"), primary_key=True, nullable=False, autoincrement=True, ), Column('worksheet_uuid', String(63), ForeignKey(worksheet.c.uuid), nullable=False), # A worksheet item is either: # - type = bundle (bundle_uuid != null) # - type = worksheet (subworksheet_uuid != null) # - type = markup (value != null) # - type = directive (value != null) # Deliberately omit ForeignKey(bundle.c.uuid), because worksheets can contain # bundles and worksheets not (yet) in the system. Column('bundle_uuid', String(63), nullable=True), Column('subworksheet_uuid', String(63), nullable=True), Column('value', Text, nullable=False), # TODO: make this nullable Column('type', String(20), nullable=False), Column('sort_key', Integer, nullable=True), Index('worksheet_item_worksheet_uuid_index', 'worksheet_uuid'), Index('worksheet_item_bundle_uuid_index', 'bundle_uuid'), Index('worksheet_item_subworksheet_uuid_index', 'subworksheet_uuid'), ) # Worksheet tags worksheet_tag = Table( 'worksheet_tag', db_metadata, Column( 'id', BigInteger().with_variant(Integer, "sqlite"), primary_key=True, nullable=False, autoincrement=True, ), Column('worksheet_uuid', String(63), ForeignKey(worksheet.c.uuid), nullable=False), Column('tag', String(63), nullable=False), Index('worksheet_tag_worksheet_uuid_index', 'worksheet_uuid'), Index('worksheet_tag_tag_index', 'tag'), ) group = Table( 'group', db_metadata, Column( 'id', BigInteger().with_variant(Integer, "sqlite"), primary_key=True, nullable=False, autoincrement=True, ), Column('uuid', String(63), nullable=False), Column('name', String(255), nullable=False), Column('user_defined', Boolean), Column('owner_id', String(255), nullable=True), UniqueConstraint('uuid', name='uix_1'), Index('group_name_index', 'name'), Index('group_owner_id_index', 'owner_id'), ) user_group = Table( 'user_group', db_metadata, Column( 'id', BigInteger().with_variant(Integer, "sqlite"), primary_key=True, nullable=False, autoincrement=True, ), Column('group_uuid', String(63), ForeignKey(group.c.uuid), nullable=False), Column('user_id', String(63), ForeignKey("user.user_id"), nullable=False), # Whether a user is able to modify this group. Column('is_admin', Boolean), Index('group_uuid_index', 'group_uuid'), Index('user_id_index', 'user_id'), ) # Permissions for bundles group_bundle_permission = Table( 'group_bundle_permission', db_metadata, Column( 'id', BigInteger().with_variant(Integer, "sqlite"), primary_key=True, nullable=False, autoincrement=True, ), Column('group_uuid', String(63), ForeignKey(group.c.uuid), nullable=False), # Reference to a bundle Column('object_uuid', String(63), ForeignKey(bundle.c.uuid), nullable=False), # Permissions encoded as integer (see below) Column('permission', Integer, nullable=False), ) # Permissions for worksheets group_object_permission = Table( 'group_object_permission', db_metadata, Column( 'id', BigInteger().with_variant(Integer, "sqlite"), primary_key=True, nullable=False, autoincrement=True, ), Column('group_uuid', String(63), ForeignKey(group.c.uuid), nullable=False), # Reference to a worksheet object Column('object_uuid', String(63), ForeignKey(worksheet.c.uuid), nullable=False), # Permissions encoded as integer (see below) Column('permission', Integer, nullable=False), ) # A permission value is one of the following: none (0), read (1), or all (2). GROUP_OBJECT_PERMISSION_NONE = 0x00 GROUP_OBJECT_PERMISSION_READ = 0x01 GROUP_OBJECT_PERMISSION_ALL = 0x02 # A notifications value is one of the following: NOTIFICATIONS_NONE = 0x00 # Receive no notifications NOTIFICATIONS_IMPORTANT = 0x01 # Receive only important notifications NOTIFICATIONS_GENERAL = 0x02 # Receive general notifications (new features) # Store information about users. user = Table( 'user', db_metadata, Column( 'id', BigInteger().with_variant(Integer, "sqlite"), primary_key=True, nullable=False, autoincrement=True, ), # Basic information Column('user_id', String(63), nullable=False), Column('user_name', String(63), nullable=False, unique=True), Column( 'email', String(254), nullable=False, unique=True ), # Length of 254 to be compliant with RFC3696/5321 Column( 'notifications', Integer, nullable=False, default=NOTIFICATIONS_GENERAL ), # Which emails user wants to receive Column('last_login', DateTime), # Null if user has never logged in Column( 'is_active', Boolean, nullable=False, default=True ), # Set to False instead of deleting users to maintain foreign key integrity Column('first_name', String(30, convert_unicode=True)), Column('last_name', String(30, convert_unicode=True)), Column('date_joined', DateTime, nullable=False), Column('has_access', Boolean, default=False, nullable=True), Column('is_verified', Boolean, nullable=False, default=False), Column('is_superuser', Boolean, nullable=False, default=False), Column('password', String(128), nullable=False), # Additional information Column('affiliation', String(255, convert_unicode=True), nullable=True), Column('url', String(255, convert_unicode=True), nullable=True), # Quotas Column('time_quota', Float, nullable=False), # Number of seconds allowed Column('parallel_run_quota', Integer, nullable=False), # Number of parallel jobs allowed Column('time_used', Float, nullable=False), # Number of seconds already used Column('disk_quota', Float, nullable=False), # Number of bytes allowed Column('disk_used', Float, nullable=False), # Number of bytes already used Index('user_user_id_index', 'user_id'), Index('user_user_name_index', 'user_name'), UniqueConstraint('user_id', name='uix_1'), ) # Stores (email) verification keys user_verification = Table( 'user_verification', db_metadata, Column( 'id', BigInteger().with_variant(Integer, "sqlite"), primary_key=True, nullable=False, autoincrement=True, ), Column('user_id', String(63), ForeignKey(user.c.user_id), nullable=False), Column('date_created', DateTime, nullable=False), Column('date_sent', DateTime, nullable=True), Column('key', String(64), nullable=False), ) # Stores password reset codes user_reset_code = Table( 'user_reset_code', db_metadata, Column( 'id', BigInteger().with_variant(Integer, "sqlite"), primary_key=True, nullable=False, autoincrement=True, ), Column('user_id', String(63), ForeignKey(user.c.user_id), nullable=False), Column('date_created', DateTime, nullable=False), Column('code', String(64), nullable=False), ) # OAuth2 Tables oauth2_client = Table( 'oauth2_client', db_metadata, Column( 'id', BigInteger().with_variant(Integer, "sqlite"), primary_key=True, nullable=False, autoincrement=True, ), Column('client_id', String(63), nullable=False), Column('name', String(63), nullable=True), Column('secret', String(255), nullable=True), Column('user_id', String(63), ForeignKey(user.c.user_id), nullable=True), Column( 'grant_type', Enum("authorization_code", "password", "client_credentials", "refresh_token"), nullable=False, ), Column('response_type', Enum("code", "token"), nullable=False), Column('scopes', Text, nullable=False), # comma-separated list of allowed scopes Column('redirect_uris', Text, nullable=False), # comma-separated list of allowed redirect URIs UniqueConstraint('client_id', name='uix_1'), ) oauth2_token = Table( 'oauth2_token', db_metadata, Column( 'id', BigInteger().with_variant(Integer, "sqlite"), primary_key=True, nullable=False, autoincrement=True, ), Column('client_id', String(63), ForeignKey(oauth2_client.c.client_id), nullable=False), Column('user_id', String(63), ForeignKey(user.c.user_id), nullable=False), Column('scopes', Text, nullable=False), Column('access_token', String(255), unique=True), Column('refresh_token', String(255), unique=True), Column('expires', DateTime, nullable=False), ) oauth2_auth_code = Table( 'oauth2_auth_code', db_metadata, Column( 'id', BigInteger().with_variant(Integer, "sqlite"), primary_key=True, nullable=False, autoincrement=True, ), Column('client_id', String(63), ForeignKey(oauth2_client.c.client_id), nullable=False), Column('user_id', String(63), ForeignKey(user.c.user_id), nullable=False), Column('scopes', Text, nullable=False), Column('code', String(100), nullable=False), Column('expires', DateTime, nullable=False), Column('redirect_uri', String(255), nullable=False), ) # Store information about users' questions or feedback. chat = Table( 'chat', db_metadata, Column( 'id', BigInteger().with_variant(Integer, "sqlite"), primary_key=True, nullable=False, autoincrement=True, ), # Primary key Column('time', DateTime, nullable=False), # When did the user send this query? Column('sender_user_id', String(63), nullable=True), # Who sent it? Column('recipient_user_id', String(63), nullable=True), # Who received it? Column('message', Text, nullable=False), # What's the content of the chat? Column( 'worksheet_uuid', String(63), nullable=True ), # What is the id of the worksheet that the sender is on? Column( 'bundle_uuid', String(63), nullable=True ), # What is the id of the bundle that the sender is on? ) # Store information about workers. worker = Table( 'worker', db_metadata, Column('user_id', String(63), ForeignKey(user.c.user_id), primary_key=True, nullable=False), Column('worker_id', String(127), primary_key=True, nullable=False), Column('group_uuid', String(63), ForeignKey(group.c.uuid), nullable=True), Column('tag', Text, nullable=True), # Tag that allows for scheduling runs on specific workers. Column('cpus', Integer, nullable=False), # Number of CPUs on worker. Column('gpus', Integer, nullable=False), # Number of GPUs on worker. Column('memory_bytes', BigInteger, nullable=False), # Total memory of worker. Column('free_disk_bytes', BigInteger, nullable=True), # Available disk space on worker. Column( 'checkin_time', DateTime, nullable=False ), # When the worker last checked in with the bundle service. Column('socket_id', Integer, nullable=False), # Socket ID worker listens for messages on. Column( 'shared_file_system', Boolean, nullable=False ), # Whether the worker and the server have a shared filesystem. Column( 'tag_exclusive', Boolean, nullable=False ), # Whether worker runs bundles if and only if they match tags. Column( 'exit_after_num_runs', Integer, nullable=False ), # Number of jobs allowed to run on worker. Column('is_terminating', Boolean, nullable=False), ) # Store information about all sockets currently allocated to each worker. worker_socket = Table( 'worker_socket', db_metadata, Column('user_id', String(63), ForeignKey(user.c.user_id), nullable=False), Column('worker_id', String(127), nullable=False), # No foreign key constraint on the worker table so that we can create a socket # for the worker before adding the worker to the worker table. Column('socket_id', Integer, primary_key=True, nullable=False), ) # Store information about the bundles currently running on each worker. worker_run = Table( 'worker_run', db_metadata, Column('user_id', String(63), ForeignKey(user.c.user_id), nullable=False), Column('worker_id', String(127), nullable=False), ForeignKeyConstraint(['user_id', 'worker_id'], ['worker.user_id', 'worker.worker_id']), Column('run_uuid', String(63), ForeignKey(bundle.c.uuid), nullable=False), Index('uuid_index', 'run_uuid'), ) # Store information about the dependencies available on each worker. worker_dependency = Table( 'worker_dependency', db_metadata, Column('user_id', String(63), ForeignKey(user.c.user_id), primary_key=True, nullable=False), Column('worker_id', String(127), primary_key=True, nullable=False), ForeignKeyConstraint(['user_id', 'worker_id'], ['worker.user_id', 'worker.worker_id']), # Serialized list of dependencies for the user/worker combination. # See WorkerModel for the serialization method. Column('dependencies', LargeBinary, nullable=False), )
36.904459
136
0.676159
0
0
0
0
0
0
0
0
6,636
0.381774
53ff8a47a271e5535277c6325b7ff8df26908ae6
31,403
py
Python
grpc/plugins/connection/gnmi.py
hansthienpondt/ansible-networking-collections
278c88fceac297693a31df3cb54c942284823fbd
[ "BSD-3-Clause" ]
null
null
null
grpc/plugins/connection/gnmi.py
hansthienpondt/ansible-networking-collections
278c88fceac297693a31df3cb54c942284823fbd
[ "BSD-3-Clause" ]
null
null
null
grpc/plugins/connection/gnmi.py
hansthienpondt/ansible-networking-collections
278c88fceac297693a31df3cb54c942284823fbd
[ "BSD-3-Clause" ]
null
null
null
# (c) 2020 Nokia # # Licensed under the BSD 3 Clause license # SPDX-License-Identifier: BSD-3-Clause # from __future__ import (absolute_import, division, print_function) __metaclass__ = type DOCUMENTATION = """ --- author: - "Hans Thienpondt (@HansThienpondt)" - "Sven Wisotzky (@wisotzky)" connection: gnmi short_description: Provides a persistent gRPC connection for gNMI API service description: - This gRPC plugin provides methods to interact with the gNMI service. - OpenConfig gNMI specification https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md - gNMI API https://raw.githubusercontent.com/openconfig/gnmi/master/proto/gnmi/gnmi.proto - This connection plugin provides a persistent communication channel to remote devices using gRPC including the underlying transport (TLS). - The plugin binds to the gNMI gRPC service. It provide wrappers for gNMI requests (Capabilities, Get, Set, Subscribe) requirements: - grpcio - protobuf options: host: description: - Target host FQDN or IP address to establish gRPC connection. default: inventory_hostname vars: - name: ansible_host port: type: int description: - Specifies the port on the remote device that listens for connections when establishing the gRPC connection. If None only the C(host) part will be used. ini: - section: defaults key: remote_port env: - name: ANSIBLE_REMOTE_PORT vars: - name: ansible_port remote_user: description: - The username used to authenticate to the remote device when the gRPC connection is first established. If the remote_user is not specified, the connection will use the username of the logged in user. - Can be configured from the CLI via the C(--user) or C(-u) options. ini: - section: defaults key: remote_user env: - name: ANSIBLE_REMOTE_USER vars: - name: ansible_user password: description: - Configures the user password used to authenticate to the remote device when first establishing the gRPC connection. vars: - name: ansible_password - name: ansible_ssh_pass private_key_file: description: - The PEM encoded private key file used to authenticate to the remote device when first establishing the grpc connection. ini: - section: grpc_connection key: private_key_file env: - name: ANSIBLE_PRIVATE_KEY_FILE vars: - name: ansible_private_key_file root_certificates_file: description: - The PEM encoded root certificate file used to create a SSL-enabled channel, if the value is None it reads the root certificates from a default location chosen by gRPC at runtime. ini: - section: grpc_connection key: root_certificates_file env: - name: ANSIBLE_ROOT_CERTIFICATES_FILE vars: - name: ansible_root_certificates_file certificate_chain_file: description: - The PEM encoded certificate chain file used to create a SSL-enabled channel. If the value is None, no certificate chain is used. ini: - section: grpc_connection key: certificate_chain_file env: - name: ANSIBLE_CERTIFICATE_CHAIN_FILE vars: - name: ansible_certificate_chain_file certificate_path: description: - Folder to search for certificate and key files ini: - section: grpc_connection key: certificate_path env: - name: ANSIBLE_CERTIFICATE_PATH vars: - name: ansible_certificate_path gnmi_encoding: description: - Encoding used for gNMI communication - Must be either JSON or JSON_IETF - If not provided, will run CapabilityRequest for auto-detection ini: - section: grpc_connection key: gnmi_encoding env: - name: ANSIBLE_GNMI_ENCODING vars: - name: ansible_gnmi_encoding grpc_channel_options: description: - Key/Value pairs (dict) to define gRPC channel options to be used - gRPC reference U(https://grpc.github.io/grpc/core/group__grpc__arg__keys.html) - Provide the I(ssl_target_name_override) option to override the TLS subject or subjectAltName (only in the case secure connections are used). The option must be provided in cases, when the FQDN or IPv4 address that is used to connect to the device is different from the subject name that is provided in the host certificate. This is needed, because the TLS validates hostname or IP address to avoid man-in-the-middle attacks. vars: - name: ansible_grpc_channel_options grpc_environment: description: - Key/Value pairs (dict) to define environment settings specific to gRPC - The standard mechanism to provide/set the environment in Ansible cannot be used, because those environment settings are not passed to the client process that establishes the gRPC connection. - Set C(GRPC_VERBOSITY) and C(GRPC_TRACE) to setup gRPC logging. Need to add code for log forwarding of gRPC related log messages to the persistent messages log (see below). - Set C(HTTPS_PROXY) to specify your proxy settings (if needed). - Set C(GRPC_SSL_CIPHER_SUITES) in case the default TLS ciphers do not match what is offered by the gRPC server. vars: - name: ansible_grpc_environment persistent_connect_timeout: type: int description: - Configures, in seconds, the amount of time to wait when trying to initially establish a persistent connection. If this value expires before the connection to the remote device is completed, the connection will fail. default: 5 ini: - section: persistent_connection key: connect_timeout env: - name: ANSIBLE_PERSISTENT_CONNECT_TIMEOUT vars: - name: ansible_connect_timeout persistent_command_timeout: type: int description: - Configures the default timeout value (in seconds) when awaiting a response after issuing a call to a RPC. If the RPC does not return before the timeout exceed, an error is generated and the connection is closed. default: 300 ini: - section: persistent_connection key: command_timeout env: - name: ANSIBLE_PERSISTENT_COMMAND_TIMEOUT vars: - name: ansible_command_timeout persistent_log_messages: type: boolean description: - This flag will enable logging the command executed and response received from target device in the ansible log file. For this option to work the 'log_path' ansible configuration option is required to be set to a file path with write access. - Be sure to fully understand the security implications of enabling this option as it could create a security vulnerability by logging sensitive information in log file. default: False ini: - section: persistent_connection key: log_messages env: - name: ANSIBLE_PERSISTENT_LOG_MESSAGES vars: - name: ansible_persistent_log_messages """ import os import re import json import base64 import datetime try: import grpc HAS_GRPC = True except ImportError: HAS_GRPC = False try: from google import protobuf HAS_PROTOBUF = True except ImportError: HAS_PROTOBUF = False from ansible.errors import AnsibleConnectionFailure, AnsibleError from ansible.plugins.connection import NetworkConnectionBase from ansible.plugins.connection import ensure_connect from google.protobuf import json_format from ansible_collections.nokia.grpc.plugins.connection.pb import gnmi_pb2 from ansible.module_utils._text import to_text class Connection(NetworkConnectionBase): """ Connection plugin for gRPC To use gRPC connections in Ansible one (or more) sub-plugin(s) for the required gRPC service(s) must be loaded. To load gRPC sub-plugins use the method `register_service()` with the name of the sub-plugin to be registered. After loading the sub-plugin, Ansible modules can call methods provided by that sub-plugin. There is a wrapper available that consumes the attribute name {sub-plugin name}__{method name} to call a specific method of that sub-plugin. """ transport = "nokia.grpc.gnmi" has_pipelining = True def __init__(self, play_context, new_stdin, *args, **kwargs): super(Connection, self).__init__( play_context, new_stdin, *args, **kwargs ) self._task_uuid = to_text(kwargs.get("task_uuid", "")) if not HAS_PROTOBUF: raise AnsibleError( "protobuf is required to use gRPC connection type. " + "Please run 'pip install protobuf'" ) if not HAS_GRPC: raise AnsibleError( "grpcio is required to use gRPC connection type. " + "Please run 'pip install grpcio'" ) self._connected = False def readFile(self, optionName): """ Reads a binary certificate/key file Parameters: optionName(str): used to read filename from options Returns: File content Raises: AnsibleConnectionFailure: file does not exist or read excpetions """ path = self.get_option('certificate_path') if not path: path = '/etc/ssl:/etc/ssl/certs:/etc/ca-certificates' filename = self.get_option(optionName) if filename: if filename.startswith('~'): filename = os.path.expanduser(filename) if not filename.startswith('/'): for entry in path.split(':'): if os.path.isfile(os.path.join(entry, filename)): filename = os.path.join(entry, filename) break if os.path.isfile(filename): try: with open(filename, 'rb') as f: return f.read() except Exception as exc: raise AnsibleConnectionFailure( 'Failed to read cert/keys file %s: %s' % (filename, exc) ) else: raise AnsibleConnectionFailure( 'Cert/keys file %s does not exist' % filename ) return None def _connect(self): """ Establish gRPC connection to remote node and create gNMI stub. This method will establish the persistent gRPC connection, if not already done. After this, the gNMI stub will be created. To get visibility about gNMI capabilities of the remote device, a gNM CapabilityRequest will be sent and result will be persisted. Parameters: None Returns: None """ if self.connected: self.queue_message('v', 'gRPC connection to host %s already exist' % self._target) return grpcEnv = self.get_option('grpc_environment') or {} if not isinstance(grpcEnv, dict): raise AnsibleConnectionFailure("grpc_environment must be a dict") for key in grpcEnv: if grpcEnv[key]: os.environ[key] = str(grpcEnv[key]) else: try: del os.environ[key] except KeyError: # no such setting in current environment, but thats ok pass self._login_credentials = [ ('username', self.get_option('remote_user')), ('password', self.get_option('password')) ] host = self.get_option('host') port = self.get_option('port') self._target = host if port is None else '%s:%d' % (host, port) self._timeout = self.get_option('persistent_command_timeout') certs = {} certs['root_certificates'] = self.readFile('root_certificates_file') certs['certificate_chain'] = self.readFile('certificate_chain_file') certs['private_key'] = self.readFile('private_key_file') options = self.get_option('grpc_channel_options') if options: if not isinstance(options, dict): raise AnsibleConnectionFailure("grpc_channel_options must be a dict") options = options.items() if certs['root_certificates'] or certs['private_key'] or certs['certificate_chain']: self.queue_message('v', 'Starting secure gRPC connection') creds = grpc.ssl_channel_credentials(**certs) self._channel = grpc.secure_channel(self._target, creds, options=options) else: self.queue_message('v', 'Starting insecure gRPC connection') self._channel = grpc.insecure_channel(self._target, options=options) self.queue_message('v', "gRPC connection established for user %s to %s" % (self.get_option('remote_user'), self._target)) self.queue_message('v', 'Creating gNMI stub') self._stub = gnmi_pb2.gNMIStub(self._channel) self._encoding = self.get_option('gnmi_encoding') if not self._encoding: self.queue_message('v', 'Run CapabilityRequest()') request = gnmi_pb2.CapabilityRequest() response = self._stub.Capabilities(request, metadata=self._login_credentials) self.queue_message('v', 'CapabilityRequest() succeeded') self._gnmiVersion = response.gNMI_version self._yangModels = response.supported_models if gnmi_pb2.Encoding.Value('JSON_IETF') in response.supported_encodings: self._encoding = 'JSON_IETF' elif gnmi_pb2.Encoding.Value('JSON') in response.supported_encodings: self._encoding = 'JSON' else: raise AnsibleConnectionFailure("No compatible supported encoding found (JSON or JSON_IETF)") else: if self._encoding not in ['JSON_IETF', 'JSON']: raise AnsibleConnectionFailure("Incompatible encoding '%s' requested (JSON or JSON_IETF)" % self._encoding) self._encoding_value = gnmi_pb2.Encoding.Value(self._encoding) self._connected = True self.queue_message('v', 'gRPC/gNMI connection has established successfully') def close(self): """ Closes the active gRPC connection to the target host Parameters: None Returns: None """ if self._connected: self.queue_message('v', "Closing gRPC connection to target host") self._channel.close() super(Connection, self).close() # ----------------------------------------------------------------------- def _encodeXpath(self, xpath='/'): """ Encodes XPATH to dict representation that allows conversion to gnmi_pb.Path object Parameters: xpath (str): path string using XPATH syntax Returns: (dict): path dict using gnmi_pb2.Path structure for easy conversion """ mypath = [] xpath = xpath.strip('\t\n\r /') if xpath: path_elements = re.split('''/(?=(?:[^\[\]]|\[[^\[\]]+\])*$)''', xpath) for e in path_elements: entry = {'name': e.split("[", 1)[0]} eKeys = re.findall('\[(.*?)\]', e) dKeys = dict(x.split('=', 1) for x in eKeys) if dKeys: entry['key'] = dKeys mypath.append(entry) return {'elem': mypath} return {} def _decodeXpath(self, path): """ Decodes XPATH from dict representation converted from gnmi_pb.Path object Parameters: path (dict): decoded gnmi_pb2.Path object Returns: (str): path string using XPATH syntax """ result = [] if 'elem' not in path: return "" for elem in path['elem']: tmp = elem['name'] if 'key' in elem: for k, v in elem['key'].items(): tmp += "[%s=%s]" % (k, v) result.append(tmp) return '/'.join(result) def _encodeVal(self, data): """ Encodes value to dict representation that allows conversion to gnmi_pb.TypedValue object Parameters: data (ANY): data to be encoded as gnmi_pb.TypedValue object Returns: (dict): dict using gnmi_pb.TypedValue structure for easy conversion """ value = base64.b64encode(json.dumps(data).encode()) if self._encoding == 'JSON_IETF': return {'jsonIetfVal': value} else: return {'jsonVal': value} def _decodeVal(self, val): """ Decodes value from dict representation converted from gnmi_pb.TypedValue object Parameters: val (dict): decoded gnmi_pb.TypedValue object Returns: (ANY): extracted data """ if 'jsonIetfVal' in val: return json.loads(base64.b64decode(val['jsonIetfVal'])) elif 'jsonVal' in val: return json.loads(base64.b64decode(val['jsonVal'])) else: raise AnsibleConnectionFailure("Ansible gNMI plugin does not support encoding for value: %s" % json.dumps(val)) def _dictToList(self, aDict): for key in aDict.keys(): if key.startswith('___'): aDict[key[3:]] = [self._dictToList(val) if isinstance(val, dict) else val for val in aDict[key].values()] del aDict[key] else: if isinstance(aDict[key], dict): aDict[key] = self._dictToList(aDict[key]) return aDict def _mergeToSingleDict(self, rawData): result = {} for entry in rawData: if 'syncResponse' in entry and entry['syncResponse']: # Ignore: SyncResponse is sent after initial update break elif 'update' not in entry: # Ignore: entry without updates break elif 'timestamp' not in entry: # Subscribe response, enter update context entry = entry['update'] else: # Get response, keep context pass prfx = result if ('prefix' in entry) and ('elem' in entry['prefix']): prfx_elements = entry['prefix']['elem'] else: prfx_elements = [] for elem in prfx_elements: eleName = elem['name'] if 'key' in elem: eleKey = json.dumps(elem['key']) eleName = '___'+eleName # Path Element has key => must be list() if eleName in prfx: # Path Element exists => Change Context prfx = prfx[eleName] if eleKey not in prfx: # List entry does not exist => Create prfx[eleKey] = elem['key'] prfx = prfx[eleKey] else: # Path Element does not exist => Create prfx[eleName] = {} prfx = prfx[eleName] prfx[eleKey] = elem['key'] prfx = prfx[eleKey] else: # Path Element hasn't key => must be dict() if eleName in prfx: # Path Element exists => Change Context prfx = prfx[eleName] else: # Path Element does not exist => Create prfx[eleName] = {} prfx = prfx[eleName] for _upd in entry['update']: if 'val' not in _upd: # requested path without content (no value) => skip continue elif ('path' in _upd) and ('elem' in _upd['path']): path_elements = _upd['path']['elem'] cPath = prfx elif prfx_elements: path_elements = prfx_elements cPath = result else: # No path at all, replace the objecttree with value result = self._decodeVal(_upd['val']) prfx = result continue # If path_elements has more than just a single entry, # we need to create/navigate to the specified subcontext for elem in path_elements[:-1]: eleName = elem['name'] if 'key' in elem: eleKey = json.dumps(elem['key']) eleName = '___'+eleName # Path Element has key => must be list() if eleName in cPath: # Path Element exists => Change Context cPath = cPath[eleName] if eleKey not in cPath: # List entry does not exist => Create cPath[eleKey] = elem['key'] cPath = cPath[eleKey] else: # Path Element does not exist => Create cPath[eleName] = {} cPath = cPath[eleName] cPath[eleKey] = elem['key'] cPath = cPath[eleKey] else: # Path Element hasn't key => must be dict() if eleName in cPath: # Path Element exists => Change Context cPath = cPath[eleName] else: # Path Element does not exist => Create cPath[eleName] = {} cPath = cPath[eleName] # The last entry of path_elements is the leaf element # that needs to be created/updated leaf_elem = path_elements[-1] if 'key' in leaf_elem: eleKey = json.dumps(leaf_elem['key']) eleName = '___'+leaf_elem['name'] if eleName not in cPath: cPath[eleName] = {} cPath = cPath[eleName] cPath[eleKey] = self._decodeVal(_upd['val']) else: cPath[leaf_elem['name']] = self._decodeVal(_upd['val']) return self._dictToList(result) def _simplifyUpdates(self, rawData): for msg in rawData: entry = json_format.MessageToDict(msg) if 'syncResponse' in entry: # Ignore: SyncResponse is sent after initial update pass elif 'update' in entry: result = {} update = entry['update'] if 'prefix' in update: result['prefix'] = '/'+self._decodeXpath(update['prefix']) if 'timestamp' in update: result['timestamp'] = datetime.datetime.fromtimestamp(float(update['timestamp'])/1000000000).isoformat() if 'update' in update: result['values'] = {self._decodeXpath(u['path']): self._decodeVal(u['val']) for u in update['update']} yield result else: # Ignore: Invalid message format pass # ----------------------------------------------------------------------- @ensure_connect def gnmiCapabilities(self): """ Executes a gNMI Capabilities request Parameters: None Returns: str: gNMI capabilities converted into JSON format """ request = gnmi_pb2.CapabilityRequest() auth = self._login_credentials try: response = self._stub.Capabilities(request, metadata=auth) except grpc.RpcError as e: raise AnsibleConnectionFailure("%s" % e) return json_format.MessageToJson(response) @ensure_connect def gnmiGet(self, *args, **kwargs): """ Executes a gNMI Get request Encoding that is used for data serialization is automatically determined based on the remote device capabilities. This gNMI plugin has implemented suppport for JSON_IETF (preferred) and JSON (fallback). Parameters: type (str): Type of data that is requested: ALL, CONFIG, STATE prefix (str): Path prefix that is added to all paths (XPATH syntax) paths (list): List of paths (str) to be captured Returns: str: GetResponse message converted into JSON format """ # Remove all input parameters from kwargs that are not set input = dict(filter(lambda x: x[1], kwargs.items())) # Adjust input parameters to match specification for gNMI SetRequest if 'prefix' in input: input['prefix'] = self._encodeXpath(input['prefix']) if 'path' in input: input['path'] = [self._encodeXpath(path) for path in input['path']] if 'type' in input: input['type'] = input['type'].upper() input['encoding'] = self._encoding_value request = json_format.ParseDict(input, gnmi_pb2.GetRequest()) auth = self._login_credentials try: response = self._stub.Get(request, metadata=auth) except grpc.RpcError as e: raise AnsibleConnectionFailure("%s" % e) output = self._mergeToSingleDict(json_format.MessageToDict(response)['notification']) return json.dumps(output, indent=4).encode() @ensure_connect def gnmiSet(self, *args, **kwargs): """ Executes a gNMI Set request Encoding that is used for data serialization is automatically determined based on the remote device capabilities. This gNMI plugin has implemented suppport for JSON_IETF (preferred) and JSON (fallback). Parameters: prefix (str): Path prefix that is added to all paths (XPATH syntax) update (list): Path/Value pairs to be updated replace (list): Path/Value pairs to be replaced delete (list): Paths (str) to be deleted Returns: str: SetResponse message converted into JSON format """ # Remove all input parameters from kwargs that are not set input = dict(filter(lambda x: x[1], kwargs.items())) # Backup options are not to be used in gNMI SetRequest if 'backup' in input: del input['backup'] if 'backup_options' in input: del input['backup_options'] # Adjust input parameters to match specification for gNMI SetRequest if 'prefix' in input: input['prefix'] = self._encodeXpath(input['prefix']) if 'delete' in input: input['delete'] = [self._encodeXpath(entry) for entry in input['delete']] if 'update' in input: for entry in input['update']: entry['path'] = self._encodeXpath(entry['path']) entry['val'] = self._encodeVal(entry['val']) if 'replace' in input: for entry in input['replace']: entry['path'] = self._encodeXpath(entry['path']) entry['val'] = self._encodeVal(entry['val']) request = json_format.ParseDict(input, gnmi_pb2.SetRequest()) auth = self._login_credentials try: response = self._stub.Set(request, metadata=auth) except grpc.RpcError as e: raise AnsibleConnectionFailure("%s" % e) output = json_format.MessageToDict(response) output['timestamp'] = datetime.datetime.fromtimestamp(float(output['timestamp'])/1000000000).isoformat() if 'prefix' in output: output['prefix'] = self._decodeXpath(output['prefix']) for item in output['response']: item['path'] = self._decodeXpath(item['path']) return json.dumps(output, indent=4).encode() @ensure_connect def gnmiSubscribe(self, *args, **kwargs): """ Executes a gNMI Subscribe request Encoding that is used for data serialization is automatically determined based on the remote device capabilities. This gNMI plugin has implemented suppport for JSON_IETF (preferred) and JSON (fallback). Parameters: prefix (str): Path prefix that is added to all paths (XPATH syntax) mode (str): Mode of subscription (STREAM, ONCE) subscription (list of dict): Subscription specification (path, interval, submode) duration (int): timeout, to stop receiving qos (int): DSCP marking that is used updates_only (bool): Send only updates to initial state allow_aggregation (bool): Aggregate elements marked as eligible for aggregation Returns: str: Updates received converted into JSON format """ # Remove all input parameters from kwargs that are not set input = dict(filter(lambda x: x[1], kwargs.items())) # Adjust input parameters to match specification for gNMI SubscribeRequest if 'mode' in input: input['mode'] = input['mode'].upper() input['encoding'] = self._encoding_value if 'prefix' in input: input['prefix'] = self._encodeXpath(input['prefix']) if 'subscription' in input: for item in input['subscription']: item['path'] = self._encodeXpath(item['path']) # Extract duration from input attributes if 'duration' in input: duration = input['duration'] del input['duration'] else: duration = 20 request = json_format.ParseDict({'subscribe': input}, gnmi_pb2.SubscribeRequest()) auth = self._login_credentials try: output = [] responses = self._stub.Subscribe(iter([request]), duration, metadata=auth) if input['mode'] == 'ONCE': responses = [json_format.MessageToDict(response) for response in responses] output = self._mergeToSingleDict(responses) else: for update in self._simplifyUpdates(responses): output.append(update) except grpc.RpcError as e: if e.code() == grpc.StatusCode.DEADLINE_EXCEEDED: if input['mode'] == 'ONCE': raise AnsibleConnectionFailure("gNMI ONCE Subscription timed out") else: # RPC timed out, which is okay pass else: raise AnsibleConnectionFailure("%s" % e) return json.dumps(output, indent=4).encode()
37.74399
124
0.583384
23,542
0.749674
913
0.029074
7,336
0.233608
0
0
16,131
0.513677
9900a4818a6a2131c9358bacda678af44a4371c0
4,056
py
Python
testcases/cloud_admin/services_up_test.py
tbeckham/eutester
1440187150ce284bd87147e71ac7f0fda194b4d9
[ "BSD-2-Clause" ]
null
null
null
testcases/cloud_admin/services_up_test.py
tbeckham/eutester
1440187150ce284bd87147e71ac7f0fda194b4d9
[ "BSD-2-Clause" ]
null
null
null
testcases/cloud_admin/services_up_test.py
tbeckham/eutester
1440187150ce284bd87147e71ac7f0fda194b4d9
[ "BSD-2-Clause" ]
null
null
null
#!/usr/bin/python # Software License Agreement (BSD License) # # Copyright (c) 2009-2011, Eucalyptus Systems, Inc. # All rights reserved. # # Redistribution and use of this software in source and binary forms, with or # without modification, are permitted provided that the following conditions # are met: # # Redistributions of source code must retain the above # copyright notice, this list of conditions and the # following disclaimer. # # Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the # following disclaimer in the documentation and/or other # materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # # Author: clarkmatthew import eucaops from eutester.eutestcase import EutesterTestCase import time class MyTestCase(EutesterTestCase): def __init__(self, config_file=None, password=None): self.setuptestcase() self.setup_parser() self.parser.add_argument("--timeout", default=600) self.get_args() def clean_method(self): self.debug('No clean_method defined for this test') pass def wait_for_services_operational(self, timeout=None): """ Definition: Test attempts to query the state of a subset of core services. The test will continue to poll the system until it finds an ENABLED instance of each service. In the HA case it will wait for an ENABLED and DISABLED instance of each. """ timeout= timeout or self.args.timeout last_err = "" elapsed = 0 start = time.time() self.tester = None while (not self.tester and elapsed < timeout): elapsed = int(time.time() - start) self.status('Attempting to create tester object. Elapsed:' + str(elapsed)) try: self.tester = eucaops.Eucaops(config_file=self.args.config_file, password=self.args.password) except Exception, e: tb = eucaops.Eucaops.get_traceback() last_err = str(tb) + "\n" + str(e) print 'Services not up because of: ' + last_err + '\n' if not self.tester: raise Exception(str(last_err) + 'Could not create tester object after elapsed:' + str(elapsed)) timeout = timeout - elapsed self.status('starting wait for all services operational, timeout:' + str(timeout)) self.tester.service_manager.wait_for_all_services_operational(timeout) self.status('All services are up') self.tester.service_manager.print_services_list() if __name__ == "__main__": testcase = MyTestCase() ### Use the list of tests passed from config/command line to determine what subset of tests to run ### or use a predefined list "VolumeTagging", "InstanceTagging", "SnapshotTagging", "ImageTagging" list = testcase.args.tests or ["wait_for_services_operational"] ### Convert test suite methods to EutesterUnitTest objects unit_list = [ ] for test in list: unit_list.append( testcase.create_testunit_by_name(test) ) ### Run the EutesterUnitTest objects, dont worry about clean on exit until we need it for this method result = testcase.run_test_case_list(unit_list,clean_on_exit=False) exit(result)
42.25
115
0.704389
1,804
0.444773
0
0
0
0
0
0
2,383
0.587525
99019a837f86e3b14c54300ab0d06ff51f85071a
173
py
Python
intValues.py
jules552/ProjetISN
20da3572b59af25a166022bc2f5b25d46add2650
[ "Unlicense" ]
null
null
null
intValues.py
jules552/ProjetISN
20da3572b59af25a166022bc2f5b25d46add2650
[ "Unlicense" ]
null
null
null
intValues.py
jules552/ProjetISN
20da3572b59af25a166022bc2f5b25d46add2650
[ "Unlicense" ]
null
null
null
MAP = 1 SPEED = 1.5 VELOCITYRESET = 6 WIDTH = 1280 HEIGHT = 720 X = WIDTH / 2 - 50 Y = HEIGHT / 2 - 50 MOUSER = 325 TICKRATES = 120 nfc = False raspberry = False
14.416667
20
0.606936
0
0
0
0
0
0
0
0
0
0
990280dc9a383a0a37cbb821de57615b46aa6a23
401
py
Python
April/Apr_25_2019/builder.py
while1618/DailyCodingProblem
187909f78281828da543439646cdf52d64c2bd0c
[ "MIT" ]
1
2019-11-17T10:56:28.000Z
2019-11-17T10:56:28.000Z
April/Apr_25_2019/builder.py
while1618/DailyCodingProblem
187909f78281828da543439646cdf52d64c2bd0c
[ "MIT" ]
null
null
null
April/Apr_25_2019/builder.py
while1618/DailyCodingProblem
187909f78281828da543439646cdf52d64c2bd0c
[ "MIT" ]
1
2021-11-02T01:00:37.000Z
2021-11-02T01:00:37.000Z
# This problem was asked by Facebook. # # A builder is looking to build a row of N houses that can be of K different colors. # He has a goal of minimizing cost while ensuring that no two neighboring houses are of the same color. # # Given an N by K matrix where the nth row and kth column represents the cost to build the nth house with kth color, # return the minimum cost which achieves this goal.
44.555556
116
0.763092
0
0
0
0
0
0
0
0
393
0.98005
99050763178e67f3f1f7faee3c71dfb0a78b6af1
4,521
py
Python
experiments/delaney/plot.py
pfnet-research/bayesgrad
5db613391777b20b7a367c274804f0b736991b0a
[ "MIT" ]
57
2018-06-30T01:47:19.000Z
2022-03-03T17:21:42.000Z
experiments/delaney/plot.py
pfnet-research/bayesgrad
5db613391777b20b7a367c274804f0b736991b0a
[ "MIT" ]
null
null
null
experiments/delaney/plot.py
pfnet-research/bayesgrad
5db613391777b20b7a367c274804f0b736991b0a
[ "MIT" ]
8
2018-07-07T06:18:40.000Z
2021-02-23T21:58:45.000Z
import argparse import numpy as np import os import sys import matplotlib matplotlib.use('agg') import matplotlib.pyplot as plt sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))) from saliency.visualizer.smiles_visualizer import SmilesVisualizer def visualize(dir_path): parent_dir = os.path.dirname(dir_path) saliency_vanilla = np.load(os.path.join(dir_path, "saliency_vanilla.npy")) saliency_smooth = np.load(os.path.join(dir_path, "saliency_smooth.npy")) saliency_bayes = np.load(os.path.join(dir_path, "saliency_bayes.npy")) visualizer = SmilesVisualizer() os.makedirs(os.path.join(parent_dir, "result_vanilla"), exist_ok=True) os.makedirs(os.path.join(parent_dir, "result_smooth"), exist_ok=True) os.makedirs(os.path.join(parent_dir, "result_bayes"), exist_ok=True) test_idx = np.load(os.path.join(dir_path, "test_idx.npy")) answer = np.load(os.path.join(dir_path, "answer.npy")) output = np.load(os.path.join(dir_path, "output.npy")) smiles_all = np.load(os.path.join(parent_dir, "smiles.npy")) def calc_range(saliency): vmax = float('-inf') vmin = float('inf') for v in saliency: vmax = max(vmax, np.max(v)) vmin = min(vmin, np.min(v)) return vmin, vmax v_range_vanilla = calc_range(saliency_vanilla) v_range_smooth = calc_range(saliency_smooth) v_range_bayes = calc_range(saliency_bayes) def get_scaler(v_range): def scaler(saliency_): saliency = np.copy(saliency_) minv, maxv = v_range if maxv == minv: saliency = np.zeros_like(saliency) else: pos = saliency >= 0.0 saliency[pos] = saliency[pos]/maxv nega = saliency < 0.0 saliency[nega] = saliency[nega]/(np.abs(minv)) return saliency return scaler scaler_vanilla = get_scaler(v_range_vanilla) scaler_smooth = get_scaler(v_range_smooth) scaler_bayes = get_scaler(v_range_bayes) def color(x): if x > 0: # Red for positive value return 1., 1. - x, 1. - x else: # Blue for negative value x *= -1 return 1. - x, 1. - x, 1. for i, id in enumerate(test_idx): smiles = smiles_all[id] out = output[i] ans = answer[i] # legend = "t:{}, p:{}".format(ans, out) legend = '' ext = '.png' # '.svg' # visualizer.visualize( # saliency_vanilla[id], smiles, save_filepath=os.path.join(parent_dir, "result_vanilla", str(id) + ext), # visualize_ratio=1.0, legend=legend, scaler=scaler_vanilla, color_fn=color) # visualizer.visualize( # saliency_smooth[id], smiles, save_filepath=os.path.join(parent_dir, "result_smooth", str(id) + ext), # visualize_ratio=1.0, legend=legend, scaler=scaler_smooth, color_fn=color) visualizer.visualize( saliency_bayes[id], smiles, save_filepath=os.path.join(parent_dir, "result_bayes", str(id) + ext), visualize_ratio=1.0, legend=legend, scaler=scaler_bayes, color_fn=color) def plot_result(prediction, answer, save_filepath='result.png'): plt.scatter(prediction, answer, marker='.') plt.plot([-100, 100], [-100, 100], c='r') max_v = max(np.max(prediction), np.max(answer)) min_v = min(np.min(prediction), np.min(answer)) plt.xlim([min_v-0.1, max_v+0.1]) plt.xlabel("prediction") plt.ylim([min_v-0.1, max_v+0.1]) plt.ylabel("ground truth") plt.savefig(save_filepath) plt.close() def main(): parser = argparse.ArgumentParser( description='Regression with own dataset.') parser.add_argument('--dirpath', '-d', type=str, default='./results/M_30_3_32_32') args = parser.parse_args() path = args.dirpath n_split = 5 output = [] answer = [] for i in range(n_split): suffix = str(i) + "-" + str(n_split) output.append(np.load(os.path.join(path, suffix, "output.npy"))) answer.append(np.load(os.path.join(path, suffix, "answer.npy"))) output = np.concatenate(output) answer = np.concatenate(answer) plot_result(output, answer, save_filepath=os.path.join(path, "result.png")) for i in range(n_split): suffix = str(i) + "-" + str(n_split) print(suffix) visualize(os.path.join(path, suffix)) if __name__ == '__main__': main()
35.320313
116
0.628622
0
0
0
0
0
0
0
0
877
0.193984
99062a5160d0b8327745e2f7901f243a1d23d8b8
853
py
Python
public/js/tinymice/plugins/bootstrap/jquery-file-tree/connectors/jqueryFileTree.py
btybug/main.albumbugs
2343466bae7ee3d8941abc4c9684667cccc3e103
[ "MIT" ]
13
2016-05-25T16:12:49.000Z
2021-04-09T01:49:24.000Z
public/js/tinymice/plugins/bootstrap/jquery-file-tree/connectors/jqueryFileTree.py
btybug/main.albumbugs
2343466bae7ee3d8941abc4c9684667cccc3e103
[ "MIT" ]
265
2015-10-19T02:40:55.000Z
2022-03-28T07:24:49.000Z
public/js/tinymice/plugins/bootstrap/jquery-file-tree/connectors/jqueryFileTree.py
btybug/main.albumbugs
2343466bae7ee3d8941abc4c9684667cccc3e103
[ "MIT" ]
7
2016-02-08T11:41:40.000Z
2021-06-08T18:18:02.000Z
# # jQuery File Tree # Python/Django connector script # By Martin Skou # import os import urllib def dirlist(request): r=['<ul class="jqueryFileTree" style="display: none;">'] try: r=['<ul class="jqueryFileTree" style="display: none;">'] d=urllib.unquote(request.POST.get('dir','c:\\temp')) for f in os.listdir(d): ff=os.path.join(d,f) if os.path.isdir(ff): r.append('<li class="directory collapsed"><a href="#" rel="%s/">%s</a></li>' % (ff,f)) else: e=os.path.splitext(f)[1][1:] # get .ext and remove dot r.append('<li class="file ext_%s"><a href="#" rel="%s">%s</a></li>' % (e,ff,f)) r.append('</ul>') except Exception,e: r.append('Could not load directory: %s' % str(e)) r.append('</ul>') return HttpResponse(''.join(r))
32.807692
101
0.548652
0
0
0
0
0
0
0
0
383
0.449004
990961ddde648d8a6e8bdae1002af6b0a3fe992c
1,639
py
Python
gpytorch/lazy/chol_lazy_tensor.py
harvineet/gpytorch
8aa8f1a4298ef61cfea9c4d11c75576a84ffcc3e
[ "MIT" ]
null
null
null
gpytorch/lazy/chol_lazy_tensor.py
harvineet/gpytorch
8aa8f1a4298ef61cfea9c4d11c75576a84ffcc3e
[ "MIT" ]
null
null
null
gpytorch/lazy/chol_lazy_tensor.py
harvineet/gpytorch
8aa8f1a4298ef61cfea9c4d11c75576a84ffcc3e
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 import torch from .lazy_tensor import LazyTensor from .root_lazy_tensor import RootLazyTensor from .. import settings class CholLazyTensor(RootLazyTensor): def __init__(self, chol): if isinstance(chol, LazyTensor): # Probably is an instance of NonLazyTensor chol = chol.evaluate() # Check that we have a lower triangular matrix if settings.debug.on(): mask = torch.ones(chol.shape[-2:], dtype=chol.dtype, device=chol.device).triu_(1) if torch.max(chol.mul(mask)).item() > 1e-3 and torch.equal(chol, chol): raise RuntimeError("CholLazyVaraiable should take a lower-triangular matrix in the constructor.") # Run super constructor super(CholLazyTensor, self).__init__(chol) @property def _chol(self): if not hasattr(self, "_chol_memo"): self._chol_memo = self.root.evaluate() return self._chol_memo @property def _chol_diag(self): if not hasattr(self, "_chol_diag_memo"): self._chol_diag_memo = self._chol.diagonal(dim1=-2, dim2=-1).clone() return self._chol_diag_memo def inv_quad_logdet(self, inv_quad_rhs=None, logdet=False, reduce_inv_quad=True): inv_quad_term = None logdet_term = None if inv_quad_rhs is not None: inv_quad_term, _ = super(CholLazyTensor, self).inv_quad_logdet( inv_quad_rhs, logdet=False, reduce_inv_quad=reduce_inv_quad ) if logdet: logdet_term = self._chol_diag.pow(2).log().sum(-1) return inv_quad_term, logdet_term
33.44898
113
0.654667
1,493
0.910921
0
0
357
0.217816
0
0
239
0.145821
9909642cf635ba7b413ffb8f974cd5801c613d72
5,765
py
Python
pirates/audio/AmbientManagerBase.py
ksmit799/POTCO-PS
520d38935ae8df4b452c733a82c94dddac01e275
[ "Apache-2.0" ]
8
2017-01-24T04:33:29.000Z
2020-11-01T08:36:24.000Z
pirates/audio/AmbientManagerBase.py
ksmit799/Pirates-Online-Remake
520d38935ae8df4b452c733a82c94dddac01e275
[ "Apache-2.0" ]
1
2017-03-02T18:05:17.000Z
2017-03-14T06:47:10.000Z
pirates/audio/AmbientManagerBase.py
ksmit799/Pirates-Online-Remake
520d38935ae8df4b452c733a82c94dddac01e275
[ "Apache-2.0" ]
11
2017-03-02T18:46:07.000Z
2020-11-01T08:36:26.000Z
# File: A (Python 2.4) from pandac.PandaModules import AudioSound from direct.directnotify import DirectNotifyGlobal from direct.interval.IntervalGlobal import LerpFunc, Sequence from direct.showbase.DirectObject import DirectObject class AmbientSound: notify = DirectNotifyGlobal.directNotify.newCategory('AmbientSound') def __init__(self, path, masterAmbientVolume, loop = True, isMusic = False): self.isMusic = isMusic if self.isMusic: self.sfx = loader.loadMusic(path) else: self.sfx = loader.loadSfx(path) self.path = path self.loop = loop self.setLoop(loop) self.setVolume(0) self.masterAmbientVolume = masterAmbientVolume self.reloadAttempt = 0 self.curPriority = 0 self.duration = 0 self.finalVolume = 0 self.startVolume = 0 self.activeInterval = None def unload(self): if self.activeInterval: self.activeInterval.finish() del self.activeInterval self.sfx.stop() del self.sfx def play(self): self.sfx.play() def getVolume(self): return self.sfx.getVolume() def setVolume(self, vol): self.sfx.setVolume(vol) def getLoop(self): return self.sfx.getLoop() def setLoop(self, loop): self.sfx.setLoop(loop) def set3dAttributes(self, *args): self.sfx.set3dAttributes(*args) def requestChangeVolume(self, duration, finalVolume, priority): if priority < self.curPriority: return None self.curPriority = priority if not self.sfx.getActive(): if self.reloadAttempt < 1: self.reloadAttempt += 1 if self.isMusic: self.sfx = loader.loadMusic(self.path) else: self.sfx = loader.loadSfx(self.path) if self.sfx: self.sfx.setLoop(self.loop) self.duration = duration self.startVolume = self.getVolume() self.finalVolume = finalVolume if self.activeInterval: self.activeInterval.pause() del self.activeInterval self.activeInterval = Sequence(LerpFunc(self.changeVolumeTask, fromData = self.startVolume, toData = self.finalVolume, duration = self.duration)) self.activeInterval.start() def changeMasterAmbientVolume(self, newMasterAmbientVolume): if not self.masterAmbientVolume == newMasterAmbientVolume: self.masterAmbientVolume = newMasterAmbientVolume if self.activeInterval and self.activeInterval.isPlaying(): pass elif self.sfx.status() == 2: newVol = float(self.finalVolume) * self.masterAmbientVolume self.sfx.setVolume(newVol) def changeVolumeTask(self, t): curVolume = t * self.masterAmbientVolume self.sfx.setVolume(curVolume) if not hasattr(self, 'reportCounter'): self.reportCounter = 0 self.reportCounter += 1 if self.reportCounter % 10 == 0: pass 1 if curVolume > 0 and self.sfx.status() == 1: self.sfx.play() if curVolume <= 0 and self.sfx.status() == 2: self.sfx.stop() self.curPriority = 0 class AmbientManagerBase(DirectObject): notify = DirectNotifyGlobal.directNotify.newCategory('AmbientManagerBase') def __init__(self): self.ambientDict = { } self.masterAmbientVolume = 1.0 def load(self, name, path, looping = True, isMusic = False): retval = False if self.ambientDict.has_key(name): if self.ambientDict[name].path == path: self.notify.warning('ambient name=%s path=%s already loaded' % (name, path)) else: self.notify.warning('ambient name %s is already bound to %s' % self.ambientDict[name].path) else: newAmbient = AmbientSound(path, self.masterAmbientVolume, looping, isMusic) self.ambientDict[name] = newAmbient def unload(self, name): if self.ambientDict.has_key(name): self.ambientDict[name].unload() del self.ambientDict[name] else: self.notify.warning('music: %s not in ambientDict' % name) def requestFadeIn(self, name, duration = 5, finalVolume = 1.0, priority = 0): self.requestChangeVolume(name, duration, finalVolume, priority) def requestFadeOut(self, name, duration = 5, finalVolume = 0.0, priority = 0): self.requestChangeVolume(name, duration, finalVolume, priority) def requestChangeVolume(self, name, duration, finalVolume, priority = 0): if self.ambientDict.has_key(name): self.ambientDict[name].requestChangeVolume(duration, finalVolume, priority) def delete(self): for name in self.ambientDict.keys(): self.ambientDict[name].unload() self.ambientDict = { } def silence(self): for name in self.ambientDict.keys(): self.ambientDict[name].requestChangeVolume(0.0, 0.0, priority = 1) def changeMasterAmbientVolume(self, newMasterAmbientVolume): if not newMasterAmbientVolume == self.masterAmbientVolume: self.masterAmbientVolume = newMasterAmbientVolume for name in self.ambientDict.keys(): self.ambientDict[name].changeMasterAmbientVolume(self.masterAmbientVolume)
30.828877
153
0.601214
5,492
0.952645
0
0
0
0
0
0
181
0.031396
99096743e56d22ad0a53c9983c2e48c412dd1c0f
890
py
Python
test/tests/import_test.py
jmgc/pyston
9f672c1bbb75710ac17dd3d9107da05c8e9e8e8f
[ "BSD-2-Clause", "Apache-2.0" ]
1
2020-02-06T14:28:45.000Z
2020-02-06T14:28:45.000Z
test/tests/import_test.py
jmgc/pyston
9f672c1bbb75710ac17dd3d9107da05c8e9e8e8f
[ "BSD-2-Clause", "Apache-2.0" ]
null
null
null
test/tests/import_test.py
jmgc/pyston
9f672c1bbb75710ac17dd3d9107da05c8e9e8e8f
[ "BSD-2-Clause", "Apache-2.0" ]
1
2020-02-06T14:29:00.000Z
2020-02-06T14:29:00.000Z
import import_target print import_target.x import import_target import_target.foo() c = import_target.C() print import_target.import_nested_target.y import_target.import_nested_target.bar() d = import_target.import_nested_target.D() print "testing importfrom:" from import_target import x as z print z import_nested_target = 15 from import_nested_target import y print "This should still be 15:",import_nested_target import import_nested_target print import_nested_target.__name__ print import_nested_target.y import_target.import_nested_target.y = import_nested_target.y + 1 print import_nested_target.y print z print y print __name__ print __import__("import_target") is import_target import sys import _multiprocessing del _multiprocessing del sys.modules["_multiprocessing"] import _multiprocessing import time del time del sys.modules["time"] import time print time.sleep(0)
20.227273
65
0.837079
0
0
0
0
0
0
0
0
86
0.096629
99098c029853719101bfb8070fc7fe3e4ddbd2c3
6,801
py
Python
hexrd/ui/matrix_editor.py
HEXRD/hexrdgui
d92915463f237e0521b5830655ae73bc5bcd9f80
[ "BSD-3-Clause" ]
13
2020-02-18T00:23:02.000Z
2022-02-24T20:04:36.000Z
hexrd/ui/matrix_editor.py
HEXRD/hexrdgui
d92915463f237e0521b5830655ae73bc5bcd9f80
[ "BSD-3-Clause" ]
656
2020-01-14T02:33:40.000Z
2022-03-26T15:31:17.000Z
hexrd/ui/matrix_editor.py
HEXRD/hexrdgui
d92915463f237e0521b5830655ae73bc5bcd9f80
[ "BSD-3-Clause" ]
6
2020-01-17T15:02:53.000Z
2020-11-01T22:02:48.000Z
import numpy as np from PySide2.QtCore import QSignalBlocker, Signal from PySide2.QtWidgets import QGridLayout, QWidget from hexrd.ui.scientificspinbox import ScientificDoubleSpinBox DEFAULT_ENABLED_STYLE_SHEET = 'background-color: white' DEFAULT_DISABLED_STYLE_SHEET = 'background-color: #F0F0F0' INVALID_MATRIX_STYLE_SHEET = 'background-color: red' class MatrixEditor(QWidget): data_modified = Signal() def __init__(self, data, parent=None): super().__init__(parent) self._data = data # If this is not None, then only the elements present in the # list (as (i, j) items) will be enabled. self._enabled_elements = None # If this is set, it will be called every time the data updates # to apply equality constraints. self._apply_constraints_func = None # Whether or not the matrix is currently invalid self.matrix_invalid = False # Reason the matrix is currently invalid self.matrix_invalid_reason = '' self.setLayout(QGridLayout()) self.add_spin_boxes() self.update_gui() def add_spin_boxes(self): layout = self.layout() for i in range(self.rows): for j in range(self.cols): sb = self.create_spin_box() layout.addWidget(sb, i, j) def create_spin_box(self): sb = ScientificDoubleSpinBox() sb.setKeyboardTracking(False) sb.valueChanged.connect(self.element_modified) return sb def element_modified(self): self.update_data() @property def data(self): return self._data @data.setter def data(self, v): if not np.array_equal(self._data, v): if self._data.shape != v.shape: msg = (f'Shape {v.shape} does not match original shape ' f'{self._data.shape}') raise AttributeError(msg) self._data = v self.reset_disabled_values() self.update_gui() @property def rows(self): return self.data.shape[0] @property def cols(self): return self.data.shape[1] def update_data(self): self.data[:] = self.gui_data self.apply_constraints() self.data_modified.emit() def update_gui(self): self.gui_data = self.data @property def gui_data(self): row_range = range(self.rows) col_range = range(self.cols) return [[self.gui_value(i, j) for j in col_range] for i in row_range] @gui_data.setter def gui_data(self, v): blockers = [QSignalBlocker(w) for w in self.all_widgets] # noqa: F841 for i in range(self.rows): for j in range(self.cols): self.set_gui_value(i, j, v[i][j]) @property def all_widgets(self): row_range = range(self.rows) col_range = range(self.cols) return [self.widget(i, j) for j in col_range for i in row_range] @property def enabled_widgets(self): widgets = [] for i in range(self.rows): for j in range(self.cols): if (i, j) in self.enabled_elements: widgets.append(self.widget(i, j)) return widgets def widget(self, row, col): return self.layout().itemAtPosition(row, col).widget() def gui_value(self, row, col): return self.widget(row, col).value() def set_gui_value(self, row, col, val): self.widget(row, col).setValue(val) def set_matrix_invalid(self, s): self.matrix_invalid = True self.matrix_invalid_reason = s self.update_tooltips() self.update_enable_states() def set_matrix_valid(self): self.matrix_invalid = False self.matrix_invalid_reason = '' self.update_tooltips() self.update_enable_states() def update_tooltips(self): if self.matrix_invalid: tooltip = self.matrix_invalid_reason else: tooltip = '' for w in self.enabled_widgets: w.setToolTip(tooltip) def update_enable_states(self): enable_all = self.enabled_elements is None for i in range(self.rows): for j in range(self.cols): w = self.widget(i, j) enable = enable_all or (i, j) in self.enabled_elements w.setEnabled(enable) enabled_str = 'enabled' if enable else 'disabled' style_sheet = getattr(self, f'{enabled_str}_style_sheet') w.setStyleSheet(style_sheet) def reset_disabled_values(self): # Resets all disabled values to zero, then applies constraints for i in range(self.rows): for j in range(self.cols): if not self.widget(i, j).isEnabled(): self.data[i, j] = 0.0 self.apply_constraints() self.update_gui() @property def enabled_style_sheet(self): if self.matrix_invalid: return INVALID_MATRIX_STYLE_SHEET return DEFAULT_ENABLED_STYLE_SHEET @property def disabled_style_sheet(self): return DEFAULT_DISABLED_STYLE_SHEET @property def enabled_elements(self): return self._enabled_elements @enabled_elements.setter def enabled_elements(self, v): if self._enabled_elements != v: self._enabled_elements = v self.update_enable_states() self.reset_disabled_values() @property def apply_constraints_func(self): return self._apply_constraints_func @apply_constraints_func.setter def apply_constraints_func(self, v): if self._apply_constraints_func != v: self._apply_constraints_func = v self.apply_constraints() def apply_constraints(self): if (func := self.apply_constraints_func) is None: return func(self.data) self.update_gui() if __name__ == '__main__': import sys from PySide2.QtWidgets import QApplication, QDialog, QVBoxLayout if len(sys.argv) < 2: sys.exit('Usage: <script> <matrix_size>') rows, cols = [int(x) for x in sys.argv[1].split('x')] data = np.ones((rows, cols)) app = QApplication(sys.argv) dialog = QDialog() layout = QVBoxLayout() dialog.setLayout(layout) editor = MatrixEditor(data) layout.addWidget(editor) # def constraints(x): # x[2][2] = x[1][1] # editor.enabled_elements = [(1, 1), (3, 4)] # editor.apply_constraints_func = constraints def on_data_modified(): print(f'Data modified: {editor.data}') editor.data_modified.connect(on_data_modified) dialog.finished.connect(app.quit) dialog.show() app.exec_()
27.987654
78
0.617115
5,629
0.827672
0
0
2,288
0.336421
0
0
764
0.112336
990aa6cbf16ed34f5030609c03ab43c0f0ed8c2a
674
py
Python
data/train/python/990aa6cbf16ed34f5030609c03ab43c0f0ed8c2aurls.py
harshp8l/deep-learning-lang-detection
2a54293181c1c2b1a2b840ddee4d4d80177efb33
[ "MIT" ]
84
2017-10-25T15:49:21.000Z
2021-11-28T21:25:54.000Z
data/train/python/990aa6cbf16ed34f5030609c03ab43c0f0ed8c2aurls.py
vassalos/deep-learning-lang-detection
cbb00b3e81bed3a64553f9c6aa6138b2511e544e
[ "MIT" ]
5
2018-03-29T11:50:46.000Z
2021-04-26T13:33:18.000Z
data/train/python/990aa6cbf16ed34f5030609c03ab43c0f0ed8c2aurls.py
vassalos/deep-learning-lang-detection
cbb00b3e81bed3a64553f9c6aa6138b2511e544e
[ "MIT" ]
24
2017-11-22T08:31:00.000Z
2022-03-27T01:22:31.000Z
from django.conf.urls.defaults import * urlpatterns = patterns('pytorque.views', (r'^$', 'central_dispatch_view'), (r'^browse$', 'central_dispatch_view'), (r'^monitor$', 'central_dispatch_view'), (r'^submit$', 'central_dispatch_view'), (r'^stat$', 'central_dispatch_view'), (r'^login/$', 'login'), (r'^logout/$', 'logout'), # (r'^$', 'central_dispatch_view'), (r'^user/(?P<username>\w{0,50})/$', 'index'), (r'^user/(?P<username>\w{0,50})/browse$', 'browse'), # (r'^user/(?P<username>\w{0,50})/monitor', 'monitor'), # (r'^user/(?P<username>\w{0,50})/submit', 'submit'), # (r'^user/(?P<username>\w{0,50})/stat', 'stat'), )
33.7
58
0.569733
0
0
0
0
0
0
0
0
508
0.753709
990b3873866758deed49ecf19b9f6e265d5bd2a4
3,616
py
Python
checkerpy/types/all/typedtuple.py
yedivanseven/CheckerPy
04612086d25fecdd0b20ca0a050db8620c437b0e
[ "MIT" ]
1
2018-01-12T19:20:51.000Z
2018-01-12T19:20:51.000Z
checkerpy/types/all/typedtuple.py
yedivanseven/CheckerPy
04612086d25fecdd0b20ca0a050db8620c437b0e
[ "MIT" ]
null
null
null
checkerpy/types/all/typedtuple.py
yedivanseven/CheckerPy
04612086d25fecdd0b20ca0a050db8620c437b0e
[ "MIT" ]
null
null
null
from typing import Tuple, Union, Any, Sequence from collections import deque, defaultdict, OrderedDict from ...validators.one import JustLen from ...functional.mixins import CompositionClassMixin from ..one import Just dict_keys = type({}.keys()) odict_keys = type(OrderedDict({}).keys()) dict_values = type({}.values()) odict_values = type(OrderedDict({}).values()) dict_items = type({}.items()) odict_items = type(OrderedDict({}).items()) NAMED_TYPES = (frozenset, slice, range, deque, defaultdict, OrderedDict, dict_keys, dict_values, dict_items, odict_keys, odict_values, odict_items) TypesT = Union[type, Sequence[type]] class TypedTuple(CompositionClassMixin): """Checks for different type(s) of each element in a defined-length tuple. Parameters ---------- value : tuple The tuple to check the length and element types of. name : str, optional The name of the tuple to check the length and the element type(s) of. Defaults to None. types : tuple(type), tuple(tuple(type)) Tuple of the length to check for with either one type for each element of `value` or a tuple of types for each element of `value`. Use the ellipsis literal ... to skip type checking of the tuple element at that position. Returns ------- tuple The tuple passed in. Methods ------- o(callable) : CompositionOf Daisy-chains the tuple length and type checker to another `callable`, returning the functional composition of both. The argument `types` is passed through to the `TypedTuple` checker when when calling the composition. Raises ------ WrongTypeError If `value` is not a tuple or if any of its elements do not have (one of) the permitted type(s). LenError If the tuple passed in does not have the same length as `types` or if the type specification does not have a meaningful length. TypeError If `types` is not a tuple or any of its elements are not of type type. See Also -------- All, JustLen, CompositionOf """ def __new__(cls, value: tuple, name=None, *, types=(), **kwargs) -> tuple: cls.__name = str(name) if name is not None else '' cls.__string = cls.__name or str(value) types, length = cls.__valid(types) value = JustLen.JustTuple(value, name=name, length=length) for index, element in enumerate(value): if not cls.__is_or_contains_ellipsis(types[index]): element_name = f'element {index} in tuple {cls.__string}' _ = Just(types[index])(element, name=element_name) return value @classmethod def __valid(cls, types: Sequence[TypesT]) -> Tuple[TypesT, int]: if type(types) not in (tuple, list, deque): message = cls.__wrong_type_message_for(types) raise TypeError(message) return types, len(types) @staticmethod def __wrong_type_message_for(types: Any) -> str: type_name = type(types).__name__ if isinstance(types, NAMED_TYPES): of_type = type_name else: of_type = f'{type_name} like {types}' return f'Type of types argument must be tuple, not {of_type}!' @staticmethod def __is_or_contains_ellipsis(types: TypesT) -> bool: is_ellipsis = types is ... try: contains_ellipsis = ... in types except TypeError: contains_ellipsis = False return is_ellipsis or contains_ellipsis
35.45098
78
0.641316
2,940
0.813053
0
0
854
0.236173
0
0
1,578
0.436394
54c99a336aaeb2a2bf8fbb1530f743b492eca07a
2,019
py
Python
data/analyzer/linux/lib/common/abstracts.py
iswenhao/Panda-Sandbox
a04069d404cb4326ff459e703f14625dc45759ed
[ "MIT" ]
2
2021-01-12T15:42:05.000Z
2021-01-13T04:59:39.000Z
data/analyzer/linux/lib/common/abstracts.py
iswenhao/Panda-Sandbox
a04069d404cb4326ff459e703f14625dc45759ed
[ "MIT" ]
null
null
null
data/analyzer/linux/lib/common/abstracts.py
iswenhao/Panda-Sandbox
a04069d404cb4326ff459e703f14625dc45759ed
[ "MIT" ]
null
null
null
# Copyright (C) 2014-2016 Cuckoo Foundation. # This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org # See the file 'docs/LICENSE' for copying permission. from lib.api.process import Process from lib.exceptions.exceptions import CuckooPackageError class Package(object): """Base abstract analysis package.""" PATHS = [] def __init__(self, options={}): """@param options: options dict.""" self.options = options self.pids = [] def set_pids(self, pids): """Update list of monitored PIDs in the package context. @param pids: list of pids. """ self.pids = pids def start(self): """Run analysis package. @raise NotImplementedError: this method is abstract. """ raise NotImplementedError def check(self): """Check.""" return True def execute(self, cmd): """Start an executable for analysis. @param path: executable path @param args: executable arguments @return: process pid """ p = Process() if not p.execute(cmd): raise CuckooPackageError("Unable to execute the initial process, " "analysis aborted.") return p.pid def package_files(self): """A list of files to upload to host. The list should be a list of tuples (<path on guest>, <name of file in package_files folder>). (package_files is a folder that will be created in analysis folder). """ return None def finish(self): """Finish run. If specified to do so, this method dumps the memory of all running processes. """ if self.options.get("procmemdump"): for pid in self.pids: p = Process(pid=pid) p.dump_memory() return True def get_pids(self): return [] class Auxiliary(object): priority = 0 def get_pids(self): return []
27.657534
102
0.583952
1,753
0.868252
0
0
0
0
0
0
1,028
0.509163
54ca6e875f242dc42891ee212f00bf7ca42878a5
182
py
Python
rdmo/options/apps.py
Raspeanut/rdmo
9f785010a499c372a2f8368ccf76d2ea4150adcb
[ "Apache-2.0" ]
1
2021-12-13T16:32:25.000Z
2021-12-13T16:32:25.000Z
rdmo/options/apps.py
Raspeanut/rdmo
9f785010a499c372a2f8368ccf76d2ea4150adcb
[ "Apache-2.0" ]
null
null
null
rdmo/options/apps.py
Raspeanut/rdmo
9f785010a499c372a2f8368ccf76d2ea4150adcb
[ "Apache-2.0" ]
1
2021-05-20T09:31:49.000Z
2021-05-20T09:31:49.000Z
from django.apps import AppConfig from django.utils.translation import ugettext_lazy as _ class OptionsConfig(AppConfig): name = 'rdmo.options' verbose_name = _('Options')
22.75
55
0.763736
89
0.489011
0
0
0
0
0
0
23
0.126374
54d0c3f0ae68b706ed041587d739745d17917113
380
py
Python
main/admin.py
sirodoht/mal
82295e1b6a03cd9a7ee1357ca3f5be7a26d0ffe9
[ "MIT" ]
2
2020-03-29T18:47:18.000Z
2020-05-12T07:03:36.000Z
main/admin.py
sirodoht/mal
82295e1b6a03cd9a7ee1357ca3f5be7a26d0ffe9
[ "MIT" ]
null
null
null
main/admin.py
sirodoht/mal
82295e1b6a03cd9a7ee1357ca3f5be7a26d0ffe9
[ "MIT" ]
null
null
null
from django.contrib import admin from django.contrib.auth.admin import UserAdmin from main import models class Admin(UserAdmin): list_display = ("id", "username", "email", "date_joined", "last_login") admin.site.register(models.User, Admin) class DocumentAdmin(admin.ModelAdmin): list_display = ("id", "title") admin.site.register(models.Document, DocumentAdmin)
20
75
0.747368
172
0.452632
0
0
0
0
0
0
57
0.15
54d0c963fcd5c7b6f9c7de58ed61e6d2623f1f5a
3,501
py
Python
cloudshell/cli/configurator.py
QualiSystems/cloudshell-cli
9a38ff37e91e7798511e860603f5a8a79b782472
[ "Apache-2.0" ]
4
2017-01-31T14:05:19.000Z
2019-04-10T16:35:44.000Z
cloudshell/cli/configurator.py
QualiSystems/cloudshell-cli
9a38ff37e91e7798511e860603f5a8a79b782472
[ "Apache-2.0" ]
89
2016-05-25T14:17:38.000Z
2022-03-17T13:09:59.000Z
cloudshell/cli/configurator.py
QualiSystems/cloudshell-cli
9a38ff37e91e7798511e860603f5a8a79b782472
[ "Apache-2.0" ]
6
2016-07-21T12:24:10.000Z
2022-02-21T06:33:18.000Z
#!/usr/bin/python # -*- coding: utf-8 -*- import sys from abc import ABCMeta, abstractmethod from collections import defaultdict from cloudshell.cli.factory.session_factory import ( CloudInfoAccessKeySessionFactory, GenericSessionFactory, SessionFactory, ) from cloudshell.cli.service.cli import CLI from cloudshell.cli.session.ssh_session import SSHSession from cloudshell.cli.session.telnet_session import TelnetSession ABC = ABCMeta("ABC", (object,), {"__slots__": ()}) if sys.version_info >= (3, 0): from functools import lru_cache else: from functools32 import lru_cache class CLIServiceConfigurator(object): REGISTERED_SESSIONS = (CloudInfoAccessKeySessionFactory(SSHSession), TelnetSession) """Using factories instead of """ def __init__( self, resource_config, logger, cli=None, registered_sessions=None, reservation_context=None, ): """Initialize CLI service configurator. :param cloudshell.shell.standards.resource_config_generic_models.GenericCLIConfig resource_config: # noqa: E501 :param logging.Logger logger: :param cloudshell.cli.service.cli.CLI cli: :param registered_sessions: Session types and order :param cloudshell.shell.core.driver_context.ReservationContextDetails reservation_context: """ self._cli = cli or CLI() self._resource_config = resource_config self._logger = logger self._registered_sessions = registered_sessions or self.REGISTERED_SESSIONS self._reservation_context = reservation_context @property def _cli_type(self): """Connection type property [ssh|telnet|console|auto].""" return self._resource_config.cli_connection_type @property @lru_cache() def _session_dict(self): session_dict = defaultdict(list) for sess in self._registered_sessions: session_dict[sess.SESSION_TYPE.lower()].append(sess) return session_dict def initialize_session(self, session): if not isinstance(session, SessionFactory): session = GenericSessionFactory(session) return session.init_session( self._resource_config, self._logger, self._reservation_context ) def _defined_sessions(self): return [ self.initialize_session(sess) for sess in self._session_dict.get( self._cli_type.lower(), self._registered_sessions ) ] def get_cli_service(self, command_mode): """Use cli.get_session to open CLI connection and switch into required mode. :param CommandMode command_mode: operation mode, can be default_mode/enable_mode/config_mode/etc. :return: created session in provided mode :rtype: cloudshell.cli.service.session_pool_context_manager.SessionPoolContextManager # noqa: E501 """ return self._cli.get_session( self._defined_sessions(), command_mode, self._logger ) class AbstractModeConfigurator(ABC, CLIServiceConfigurator): """Used by shells to run enable/config command.""" @property @abstractmethod def enable_mode(self): pass @property @abstractmethod def config_mode(self): pass def enable_mode_service(self): return self.get_cli_service(self.enable_mode) def config_mode_service(self): return self.get_cli_service(self.config_mode)
32.119266
120
0.694087
2,896
0.827192
0
0
531
0.151671
0
0
982
0.280491
54d0e7ae83bd72293871a6d51b4fbe8e0a0e701d
142
py
Python
examples/ingenerator.py
quynhanh-ngx/pytago
de976ad8d85702ae665e97978bc4a75d282c857f
[ "MIT" ]
206
2021-06-24T16:16:13.000Z
2022-03-31T07:44:17.000Z
examples/ingenerator.py
quynhanh-ngx/pytago
de976ad8d85702ae665e97978bc4a75d282c857f
[ "MIT" ]
13
2021-06-24T17:51:36.000Z
2022-02-23T10:07:17.000Z
examples/ingenerator.py
quynhanh-ngx/pytago
de976ad8d85702ae665e97978bc4a75d282c857f
[ "MIT" ]
14
2021-06-26T02:19:45.000Z
2022-03-30T03:02:49.000Z
def main(): n = 111 gen = (n * 7 for x in range(10)) if 777 in gen: print("Yes!") if __name__ == '__main__': main()
14.2
36
0.485915
0
0
0
0
0
0
0
0
16
0.112676
54d2af6cc6ffcbe94ad442887d35faa47a8ec2cd
1,090
py
Python
source/packages/scs-pm-server/src/python-server/app.py
amittkSharma/scs_predictive_maintenance
105a218b47d81d02f7e799287bd1e9279db452ce
[ "MIT" ]
null
null
null
source/packages/scs-pm-server/src/python-server/app.py
amittkSharma/scs_predictive_maintenance
105a218b47d81d02f7e799287bd1e9279db452ce
[ "MIT" ]
1
2022-02-05T17:13:00.000Z
2022-02-05T17:13:00.000Z
source/packages/scs-pm-server/src/python-server/app.py
amittkSharma/scs_predictive_maintenance
105a218b47d81d02f7e799287bd1e9279db452ce
[ "MIT" ]
null
null
null
import json import logging import joblib import pandas as pd from flask import Flask, jsonify, request from flask_cors import CORS, cross_origin app = Flask(__name__) CORS(app) @app.route("/api/machinePrediction", methods=['GET']) def home(): incomingMachineId = request.args.get('machineId') modelPath = request.args.get('modelPath') column_names = request.args.get('columnNames') data_points = request.args.get('dataPoints') app.logger.info('Received machine id is %s', incomingMachineId) app.logger.info('Model path is %s', modelPath) json_object = json.loads(data_points) pairs = json_object.items() vitals_value = [] for key, value in pairs: vitals_value.append(value) modelObj = joblib.load(modelPath) data = [vitals_value] df = pd.DataFrame(data=data, columns = column_names) modelPrediction = modelObj.predict(df) app.logger.info('Model prediction is: %s', modelPrediction) return jsonify(modelPrediction[0]) if __name__ == "__main__": app.run(debug=True) # To start the server # python3 app.py
24.222222
67
0.709174
0
0
0
0
816
0.748624
0
0
193
0.177064
54d3039f58743cfa00e492ea3768046369054479
4,411
py
Python
tests/test_remove_from_dependee_chain.py
ess-dmsc/nexus-constructor
ae0026c48f8f2d4d88d3ff00e45cb6591983853b
[ "BSD-2-Clause" ]
3
2019-05-31T08:38:25.000Z
2022-01-06T09:23:21.000Z
tests/test_remove_from_dependee_chain.py
ess-dmsc/nexus-constructor
ae0026c48f8f2d4d88d3ff00e45cb6591983853b
[ "BSD-2-Clause" ]
709
2019-02-06T08:23:07.000Z
2022-03-29T23:03:37.000Z
tests/test_remove_from_dependee_chain.py
ess-dmsc/nexus-constructor
ae0026c48f8f2d4d88d3ff00e45cb6591983853b
[ "BSD-2-Clause" ]
2
2020-03-06T09:58:56.000Z
2020-08-04T18:32:57.000Z
import pytest from PySide2.QtGui import QVector3D from nexus_constructor.model.component import Component from nexus_constructor.model.dataset import Dataset from nexus_constructor.model.instrument import Instrument from nexus_constructor.model.value_type import ValueTypes values = Dataset( name="scalar_value", type=ValueTypes.DOUBLE, size=[1], values=90.0, parent_node=None, ) @pytest.fixture def instrument(): return Instrument(parent_node=None) def test_remove_from_beginning_1(instrument): component1 = Component("component1", instrument) rot = component1.add_rotation( name="rotation1", axis=QVector3D(1.0, 0.0, 0.0), angle=values.values, values=values, ) component1.depends_on = rot assert len(rot.dependents) == 1 rot.remove_from_dependee_chain() assert component1.depends_on is None def test_remove_from_beginning_2(instrument): component1 = Component("component1", instrument) rot1 = component1.add_rotation( name="rotation1", axis=QVector3D(1.0, 0.0, 0.0), angle=values.values, values=values, ) rot2 = component1.add_rotation( name="rotation2", axis=QVector3D(1.0, 0.0, 0.0), angle=values.values, values=values, ) component1.depends_on = rot1 rot1.depends_on = rot2 assert len(rot2.dependents) == 1 rot1.remove_from_dependee_chain() assert len(rot2.dependents) == 1 assert rot2.dependents[0] == component1 assert component1.depends_on == rot2 def test_remove_from_beginning_3(instrument): component1 = Component("component1", instrument) component2 = Component("component2", instrument) rot1 = component1.add_rotation( name="rotation1", axis=QVector3D(1.0, 0.0, 0.0), angle=values.values, values=values, ) rot2 = component2.add_rotation( name="rotation2", axis=QVector3D(1.0, 0.0, 0.0), angle=values.values, values=values, ) component1.depends_on = rot1 component2.depends_on = rot2 rot1.depends_on = rot2 assert len(rot2.dependents) == 2 rot1.remove_from_dependee_chain() assert len(rot2.dependents) == 2 assert component2 in rot2.dependents assert component1 in rot2.dependents assert component1.depends_on == rot2 assert component1.transforms.link.linked_component == component2 def test_remove_from_middle(): component1 = Component("component1", instrument) component2 = Component("component2", instrument) component3 = Component("component3", instrument) rot1 = component1.add_rotation( name="rotation1", axis=QVector3D(1.0, 0.0, 0.0), angle=values.values, values=values, ) rot2 = component2.add_rotation( name="rotation2", axis=QVector3D(1.0, 0.0, 0.0), angle=values.values, values=values, ) rot3 = component3.add_rotation( name="rotation3", axis=QVector3D(1.0, 0.0, 0.0), angle=values.values, values=values, ) component1.depends_on = rot1 component2.depends_on = rot2 component3.depends_on = rot3 component1.transforms.link.linked_component = component2 component2.transforms.link.linked_component = component3 rot2.remove_from_dependee_chain() assert rot1.depends_on == rot3 assert component1.transforms.link.linked_component == component3 assert rot1 in rot3.dependents assert component3 in rot3.dependents def test_remove_from_end(): component1 = Component("component1", instrument) rot1 = component1.add_rotation( name="rotation1", axis=QVector3D(1.0, 0.0, 0.0), angle=values.values, values=values, ) rot2 = component1.add_rotation( name="rotation2", axis=QVector3D(1.0, 0.0, 0.0), angle=values.values, values=values, depends_on=rot1, ) rot3 = component1.add_rotation( name="rotation3", axis=QVector3D(1.0, 0.0, 0.0), angle=values.values, values=values, depends_on=rot2, ) component1.depends_on = rot3 rot1.remove_from_dependee_chain() assert rot1.depends_on is None assert not rot1.dependents assert component1.depends_on == rot3 assert rot2.dependents[0] == rot3 assert len(component1.transforms) == 2
28.275641
68
0.670143
0
0
0
0
73
0.01655
0
0
231
0.052369
54d32f6738e6ad2c2884cf8b772cee6a6620a984
11,013
py
Python
fastmvsnet/train1.py
molspace/FastMVS_experiments
b897015d77600687ca2addf99bb6a6f0de524e5f
[ "MIT" ]
null
null
null
fastmvsnet/train1.py
molspace/FastMVS_experiments
b897015d77600687ca2addf99bb6a6f0de524e5f
[ "MIT" ]
null
null
null
fastmvsnet/train1.py
molspace/FastMVS_experiments
b897015d77600687ca2addf99bb6a6f0de524e5f
[ "MIT" ]
null
null
null
#!/usr/bin/env python import argparse import os.path as osp import logging import time import sys sys.path.insert(0, osp.dirname(__file__) + '/..') import torch import torch.nn as nn from fastmvsnet.config import load_cfg_from_file from fastmvsnet.utils.io import mkdir from fastmvsnet.utils.logger import setup_logger from fastmvsnet.utils.torch_utils import set_random_seed from fastmvsnet.model1 import build_pointmvsnet as build_model from fastmvsnet.solver import build_optimizer, build_scheduler from fastmvsnet.utils.checkpoint import Checkpointer from fastmvsnet.dataset1 import build_data_loader from fastmvsnet.utils.tensorboard_logger import TensorboardLogger from fastmvsnet.utils.metric_logger import MetricLogger from fastmvsnet.utils.file_logger import file_logger def parse_args(): parser = argparse.ArgumentParser(description="PyTorch Fast-MVSNet Training") parser.add_argument( "--cfg", dest="config_file", default="", metavar="FILE", help="path to config file", type=str, ) parser.add_argument( "opts", help="Modify config options using the command-line", default=None, nargs=argparse.REMAINDER, ) args = parser.parse_args() return args def train_model(model, loss_fn, metric_fn, image_scales, inter_scales, isFlow, data_loader, optimizer, curr_epoch, tensorboard_logger, log_period=1, output_dir="", ): logger = logging.getLogger("fastmvsnet.train") meters = MetricLogger(delimiter=" ") model.train() end = time.time() total_iteration = data_loader.__len__() path_list = [] for iteration, data_batch in enumerate(data_loader): data_time = time.time() - end curr_ref_img_path = data_batch["ref_img_path"] path_list.extend(curr_ref_img_path) data_batch = {k: v.cuda(non_blocking=True) for k, v in data_batch.items() if isinstance(v, torch.Tensor)} preds = model(data_batch, image_scales, inter_scales, isFlow) optimizer.zero_grad() loss_dict = loss_fn(preds, data_batch, isFlow) metric_dict = metric_fn(preds, data_batch, isFlow) losses = sum(loss_dict.values()) #print("LOSS DICT", loss_dict['coarse_loss']) #print("LOSSES", loss_dict.values()) meters.update(loss=losses, **loss_dict, **metric_dict) losses.backward() # print(poop) optimizer.step() batch_time = time.time() - end end = time.time() meters.update(time=batch_time, data=data_time) if iteration % log_period == 0: logger.info( meters.delimiter.join( [ "EPOCH: {epoch:2d}", "iter: {iter:4d}", "{meters}", "lr: {lr:.2e}", "max mem: {memory:.0f}", ] ).format( epoch=curr_epoch, iter=iteration, meters=str(meters), lr=optimizer.param_groups[0]["lr"], memory=torch.cuda.max_memory_allocated() / (1024.0 ** 2), ) ) tensorboard_logger.add_scalars(loss_dict, curr_epoch * total_iteration + iteration, prefix="train") tensorboard_logger.add_scalars(metric_dict, curr_epoch * total_iteration + iteration, prefix="train") if iteration % (100 * log_period) == 0: file_logger(data_batch, preds, curr_epoch * total_iteration + iteration, output_dir, prefix="train") return meters def validate_model(model, loss_fn, metric_fn, image_scales, inter_scales, isFlow, data_loader, curr_epoch, tensorboard_logger, log_period=1, output_dir="", ): logger = logging.getLogger("fastmvsnet.validate") meters = MetricLogger(delimiter=" ") model.train() end = time.time() total_iteration = data_loader.__len__() with torch.no_grad(): for iteration, data_batch in enumerate(data_loader): data_time = time.time() - end curr_ref_img_path = data_batch["ref_img_path"] data_batch = {k: v.cuda(non_blocking=True) for k, v in data_batch.items() if isinstance(v, torch.Tensor)} preds = model(data_batch, image_scales, inter_scales, isFlow) loss_dict = loss_fn(preds, data_batch, isFlow) metric_dict = metric_fn(preds, data_batch, isFlow) losses = sum(loss_dict.values()) meters.update(loss=losses, **loss_dict, **metric_dict) batch_time = time.time() - end end = time.time() meters.update(time=batch_time, data=data_time) if iteration % log_period == 0: logger.info( meters.delimiter.join( [ "EPOCH: {epoch:2d}", "iter: {iter:4d}", "{meters}", ] ).format( epoch=curr_epoch, iter=iteration, meters=str(meters), ) ) tensorboard_logger.add_scalars(meters.meters, curr_epoch * total_iteration + iteration, prefix="valid") if iteration % (100 * log_period) == 0: file_logger(data_batch, preds, curr_epoch * total_iteration + iteration, output_dir, prefix="valid") return meters def train(cfg, output_dir=""): logger = logging.getLogger("fastmvsnet.trainer") # build model set_random_seed(cfg.RNG_SEED) model, loss_fn, metric_fn = build_model(cfg) logger.info("Build model:\n{}".format(str(model))) model = nn.DataParallel(model).cuda() # build optimizer optimizer = build_optimizer(cfg, model) # build lr scheduler scheduler = build_scheduler(cfg, optimizer) # build checkpointer checkpointer = Checkpointer(model, optimizer=optimizer, scheduler=scheduler, save_dir=output_dir, logger=logger) checkpoint_data = checkpointer.load(cfg.MODEL.WEIGHT, resume=cfg.AUTO_RESUME) ckpt_period = cfg.TRAIN.CHECKPOINT_PERIOD # build data loader train_data_loader = build_data_loader(cfg, mode="train") val_period = cfg.TRAIN.VAL_PERIOD val_data_loader = build_data_loader(cfg, mode="val") if val_period > 0 else None # build tensorboard logger (optionally by comment) tensorboard_logger = TensorboardLogger(output_dir) # train max_epoch = cfg.SCHEDULER.MAX_EPOCH start_epoch = checkpoint_data.get("epoch", 0) best_metric_name = "best_{}".format(cfg.TRAIN.VAL_METRIC) best_metric = checkpoint_data.get(best_metric_name, None) logger.info("Start training from epoch {}".format(start_epoch)) for epoch in range(start_epoch, max_epoch): cur_epoch = epoch + 1 scheduler.step() start_time = time.time() train_meters = train_model(model, loss_fn, metric_fn, image_scales=cfg.MODEL.TRAIN.IMG_SCALES, inter_scales=cfg.MODEL.TRAIN.INTER_SCALES, isFlow=(cur_epoch > cfg.SCHEDULER.INIT_EPOCH), data_loader=train_data_loader, optimizer=optimizer, curr_epoch=epoch, tensorboard_logger=tensorboard_logger, log_period=cfg.TRAIN.LOG_PERIOD, output_dir=output_dir, ) epoch_time = time.time() - start_time logger.info("Epoch[{}]-Train {} total_time: {:.2f}s".format( cur_epoch, train_meters.summary_str, epoch_time)) # checkpoint if cur_epoch % ckpt_period == 0 or cur_epoch == max_epoch: checkpoint_data["epoch"] = cur_epoch checkpoint_data[best_metric_name] = best_metric checkpointer.save("model_{:03d}".format(cur_epoch), **checkpoint_data) # validate if val_period < 1: continue if cur_epoch % val_period == 0 or cur_epoch == max_epoch: val_meters = validate_model(model, loss_fn, metric_fn, image_scales=cfg.MODEL.VAL.IMG_SCALES, inter_scales=cfg.MODEL.VAL.INTER_SCALES, isFlow=(cur_epoch > cfg.SCHEDULER.INIT_EPOCH), data_loader=val_data_loader, curr_epoch=epoch, tensorboard_logger=tensorboard_logger, log_period=cfg.TEST.LOG_PERIOD, output_dir=output_dir, ) logger.info("Epoch[{}]-Val {}".format(cur_epoch, val_meters.summary_str)) # best validation cur_metric = val_meters.meters[cfg.TRAIN.VAL_METRIC].global_avg if best_metric is None or cur_metric > best_metric: best_metric = cur_metric checkpoint_data["epoch"] = cur_epoch checkpoint_data[best_metric_name] = best_metric checkpointer.save("model_best", **checkpoint_data) logger.info("Best val-{} = {}".format(cfg.TRAIN.VAL_METRIC, best_metric)) return model def main(): args = parse_args() num_gpus = torch.cuda.device_count() cfg = load_cfg_from_file(args.config_file) cfg.merge_from_list(args.opts) cfg.freeze() output_dir = cfg.OUTPUT_DIR if output_dir: config_path = osp.splitext(args.config_file)[0] config_path = config_path.replace("configs", "outputs1") output_dir = output_dir.replace('@', config_path) mkdir(output_dir) logger = setup_logger("fastmvsnet", output_dir, prefix="train") logger.info("Using {} GPUs".format(num_gpus)) logger.info(args) logger.info("Loaded configuration file {}".format(args.config_file)) logger.info("Running with config:\n{}".format(cfg)) train(cfg, output_dir) if __name__ == "__main__": main()
37.080808
119
0.562608
0
0
0
0
0
0
0
0
1,020
0.092618
54d41bf8d53f9ade04da7c58f9daea5fe0658840
857
py
Python
modulo2/3-detectores/3.2-detector/models.py
fossabot/unifacisa-visao-computacional
14aef22a3e7fe10ee820d31ce12ad21a3cad7b0b
[ "MIT" ]
null
null
null
modulo2/3-detectores/3.2-detector/models.py
fossabot/unifacisa-visao-computacional
14aef22a3e7fe10ee820d31ce12ad21a3cad7b0b
[ "MIT" ]
null
null
null
modulo2/3-detectores/3.2-detector/models.py
fossabot/unifacisa-visao-computacional
14aef22a3e7fe10ee820d31ce12ad21a3cad7b0b
[ "MIT" ]
1
2021-02-06T00:49:32.000Z
2021-02-06T00:49:32.000Z
# Estrutura básica para projetos de Machine Learning e Deep Learning # Por Adriano Santos. from torch import nn, relu import torch.nn.functional as F import torch.optim as optim import torch from torchvision import models class ResNet(nn.Module): def __init__(self, saida, pretreinado=True): super(ResNet, self).__init__() resnet = models.resnet34(pretrained=pretreinado) layers = list(resnet.children())[:8] self.features1 = nn.Sequential(*layers[:6]) self.features2 = nn.Sequential(*layers[6:]) self.classificador = nn.Sequential(nn.BatchNorm1d(512), nn.Linear(512, saida)) def forward(self, x): x = self.features1(x) x = self.features2(x) x = F.relu(x) x = nn.AdaptiveAvgPool2d((1,1))(x) x = x.view(x.shape[0], -1) return self.classificador(x)
29.551724
86
0.655776
632
0.736597
0
0
0
0
0
0
90
0.104895
54d5248eff89e3f435c1da7e63250cb5c736a60a
3,231
py
Python
python/setup.py
sbrodeur/evert
c7005ba29576145ab650144f9b9230eaf7bec460
[ "BSD-3-Clause" ]
28
2017-10-04T13:58:43.000Z
2021-11-06T10:46:51.000Z
python/setup.py
sbrodeur/evert
c7005ba29576145ab650144f9b9230eaf7bec460
[ "BSD-3-Clause" ]
7
2017-12-04T17:17:55.000Z
2021-07-29T08:58:26.000Z
python/setup.py
sbrodeur/evert
c7005ba29576145ab650144f9b9230eaf7bec460
[ "BSD-3-Clause" ]
10
2017-11-07T14:51:08.000Z
2019-06-05T04:17:44.000Z
#!/usr/bin/env python # Copyright (c) 2017, Simon Brodeur # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its contributors # may be used to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, # INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT # NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY # OF SUCH DAMAGE. """ setup.py file for installing Python bindings using SWIG """ from distutils.core import setup, Extension evert_module = Extension('_evert', define_macros = [('MAJOR_VERSION', '1'), ('MINOR_VERSION', '0')], include_dirs = ['../include'], sources=['../src/elBeam.cpp', '../src/elBSP.cpp', '../src/elGLUT.cpp', '../src/elListener.cpp', '../src/elOrientedPoint.cpp', '../src/elPathSolution.cpp', '../src/elPolygon.cpp', '../src/elRay.cpp', '../src/elRoom.cpp', '../src/elSource.cpp', '../src/elTimer.cpp', '../src/elVector.cpp', '../src/elViewer.cpp', 'evert.i'], libraries = ['GL', 'GLU', 'glut'], library_dirs = [], language='c++', swig_opts=['-c++', '-I../include'], #extra_compile_args=['-std=c++11'], ) setup (name = 'evert', version = '1.0', author = "Samuli Laine", description = """Accelerated beam tracing algorithm""", ext_modules = [evert_module], py_modules = ["evert"], )
46.826087
89
0.556484
0
0
0
0
0
0
0
0
2,110
0.653049
54d6049e6360802df5527ba35f15e6ff291748e2
530
py
Python
somegame/fps_osd.py
kodo-pp/somegame-but-not-that-one
6252d34b84fe7c83ada9e699df17688c50dd7596
[ "MIT" ]
null
null
null
somegame/fps_osd.py
kodo-pp/somegame-but-not-that-one
6252d34b84fe7c83ada9e699df17688c50dd7596
[ "MIT" ]
null
null
null
somegame/fps_osd.py
kodo-pp/somegame-but-not-that-one
6252d34b84fe7c83ada9e699df17688c50dd7596
[ "MIT" ]
null
null
null
import pygame from loguru import logger from somegame.osd import OSD class FpsOSD(OSD): def __init__(self, game): super().__init__(game) logger.info('Loading font') self.font = pygame.font.Font(pygame.font.get_default_font(), 32) def draw(self, surface): fps = self.game.get_average_fps() fps_text = '<unknown>' if fps is None else '{:.1f}'.format(fps) tmp_surf = self.font.render('{} FPS'.format(fps_text), True, (255, 255, 255)) surface.blit(tmp_surf, (0, 0))
29.444444
85
0.635849
457
0.862264
0
0
0
0
0
0
41
0.077358
54d6ce148b09071a1e33198868f6c84a03813ea1
11,846
py
Python
python/chronos/test/bigdl/chronos/data/experimental/test_xshardstsdataset.py
sgwhat/BigDL
25b402666fbb26b0bc18fc8100e9a00469844778
[ "Apache-2.0" ]
null
null
null
python/chronos/test/bigdl/chronos/data/experimental/test_xshardstsdataset.py
sgwhat/BigDL
25b402666fbb26b0bc18fc8100e9a00469844778
[ "Apache-2.0" ]
null
null
null
python/chronos/test/bigdl/chronos/data/experimental/test_xshardstsdataset.py
sgwhat/BigDL
25b402666fbb26b0bc18fc8100e9a00469844778
[ "Apache-2.0" ]
null
null
null
# # Copyright 2016 The BigDL Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import pytest import numpy as np import pandas as pd import random import os from unittest import TestCase from bigdl.chronos.data import TSDataset from bigdl.chronos.data.experimental import XShardsTSDataset from bigdl.orca.data.pandas import read_csv from bigdl.orca.common import init_orca_context, stop_orca_context, OrcaContext from pandas.testing import assert_frame_equal from numpy.testing import assert_array_almost_equal def generate_spark_df(): init_orca_context(cores=8) sc = OrcaContext.get_spark_context() rdd = sc.range(0, 100) from pyspark.ml.linalg import DenseVector df = rdd.map(lambda x: (DenseVector(np.random.randn(1, ).astype(np.float)), int(np.random.randint(0, 2, size=())), int(x))).toDF(["feature", "id", "date"]) return df def get_ugly_ts_df(): data = np.random.random_sample((100, 5)) mask = np.random.random_sample((100, 5)) newmask = mask.copy() mask[newmask >= 0.4] = 2 mask[newmask < 0.4] = 1 mask[newmask < 0.2] = 0 data[mask == 0] = None data[mask == 1] = np.nan df = pd.DataFrame(data, columns=['a', 'b', 'c', 'd', 'e']) df['a'][0] = np.nan # make sure column 'a' has a N/A df["datetime"] = pd.date_range('1/1/2019', periods=100) df.loc[50:100, "datetime"] = pd.date_range('1/1/2019', periods=50) df["id"] = np.array(['00']*50 + ['01']*50) return df class TestXShardsTSDataset(TestCase): def setUp(self): self.resource_path = os.path.join(os.path.split(__file__)[0], "../../resources/") def tearDown(self): pass @classmethod def tearDownClass(cls): # stop possible active_spark_context from pyspark import SparkContext from bigdl.orca.ray import OrcaRayContext if SparkContext._active_spark_context is not None: print("Stopping spark_orca context") sc = SparkContext.getOrCreate() if sc.getConf().get("spark.master").startswith("spark://"): from bigdl.dllib.nncontext import stop_spark_standalone stop_spark_standalone() sc.stop() def test_xshardstsdataset_initialization(self): shards_single = read_csv(os.path.join(self.resource_path, "single.csv")) tsdata = XShardsTSDataset.from_xshards(shards_single, dt_col="datetime", target_col="value", extra_feature_col=["extra feature"], id_col="id") assert tsdata._id_list == [0] assert tsdata.feature_col == ["extra feature"] assert tsdata.target_col == ["value"] assert tsdata.dt_col == "datetime" assert tsdata.shards.num_partitions() == 1 tsdata = XShardsTSDataset.from_xshards(shards_single, dt_col="datetime", target_col=["value"], extra_feature_col="extra feature", id_col="id") assert tsdata._id_list == [0] assert tsdata.feature_col == ["extra feature"] assert tsdata.target_col == ["value"] assert tsdata.dt_col == "datetime" assert tsdata.shards.num_partitions() == 1 tsdata = XShardsTSDataset.from_xshards(shards_single, dt_col="datetime", target_col=["value"], extra_feature_col="extra feature") assert tsdata._id_list == ["0"] assert tsdata.feature_col == ["extra feature"] assert tsdata.target_col == ["value"] assert tsdata.dt_col == "datetime" assert tsdata.shards.num_partitions() == 1 def test_xshardstsdataset_initialization_multiple(self): shards_multiple = read_csv(os.path.join(self.resource_path, "multiple.csv")) # legal input tsdata = XShardsTSDataset.from_xshards(shards_multiple, dt_col="datetime", target_col="value", extra_feature_col=["extra feature"], id_col="id") assert tsdata._id_list == [0, 1] assert tsdata.feature_col == ["extra feature"] assert tsdata.target_col == ["value"] assert tsdata.dt_col == "datetime" assert tsdata.shards.num_partitions() == 2 tsdata = XShardsTSDataset.from_xshards(shards_multiple, dt_col="datetime", target_col=["value"], extra_feature_col="extra feature", id_col="id") assert tsdata._id_list == [0, 1] assert tsdata.feature_col == ["extra feature"] assert tsdata.target_col == ["value"] assert tsdata.dt_col == "datetime" assert tsdata.shards.num_partitions() == 2 tsdata = XShardsTSDataset.from_xshards(shards_multiple, dt_col="datetime", target_col=["value"], extra_feature_col="extra feature") assert tsdata._id_list == ['0'] assert tsdata.feature_col == ["extra feature"] assert tsdata.target_col == ["value"] assert tsdata.dt_col == "datetime" assert tsdata.shards.num_partitions() == 1 def test_xshardstsdataset_split(self): shards_multiple = read_csv(os.path.join(self.resource_path, "multiple.csv")) # only train and test tsdata_train, tsdata_valid, tsdata_test =\ XShardsTSDataset.from_xshards(shards_multiple, dt_col="datetime", target_col="value", extra_feature_col=["extra feature"], id_col="id", with_split=True, val_ratio=0, test_ratio=0.1) # standard split with all three sets tsdata_train, tsdata_valid, tsdata_test =\ XShardsTSDataset.from_xshards(shards_multiple, dt_col="datetime", target_col="value", extra_feature_col=["extra feature"], id_col="id", with_split=True, val_ratio=0.1, test_ratio=0.1, largest_look_back=5, largest_horizon=2) tsdata_train.feature_col.append("new extra feature") assert len(tsdata_train.feature_col) == 2 assert len(tsdata_valid.feature_col) == 1 assert len(tsdata_test.feature_col) == 1 tsdata_train.target_col[0] = "new value" assert tsdata_train.target_col[0] == "new value" assert tsdata_valid.target_col[0] != "new value" assert tsdata_test.target_col[0] != "new value" def test_xshardstsdataset_roll_multiple_id(self): shards_multiple = read_csv(os.path.join(self.resource_path, "multiple.csv")) horizon = random.randint(1, 10) lookback = random.randint(1, 20) tsdata = XShardsTSDataset.from_xshards(shards_multiple, dt_col="datetime", target_col="value", extra_feature_col=["extra feature"], id_col="id") with pytest.raises(RuntimeError): tsdata.to_xshards() # roll train tsdata.roll(lookback=lookback, horizon=horizon) shards_numpy = tsdata.to_xshards() collected_numpy = shards_numpy.collect() # collect and valid x = np.concatenate([collected_numpy[i]['x'] for i in range(len(collected_numpy))], axis=0) y = np.concatenate([collected_numpy[i]['y'] for i in range(len(collected_numpy))], axis=0) assert x.shape == ((50-lookback-horizon+1)*2, lookback, 2) assert y.shape == ((50-lookback-horizon+1)*2, horizon, 1) tsdata.roll(lookback=lookback, horizon=horizon, feature_col=["extra feature"], target_col="value") shards_numpy = tsdata.to_xshards() collected_numpy = shards_numpy.collect() # collect and valid x = np.concatenate([collected_numpy[i]['x'] for i in range(len(collected_numpy))], axis=0) y = np.concatenate([collected_numpy[i]['y'] for i in range(len(collected_numpy))], axis=0) assert x.shape == ((50-lookback-horizon+1)*2, lookback, 2) assert y.shape == ((50-lookback-horizon+1)*2, horizon, 1) tsdata.roll(lookback=lookback, horizon=horizon, feature_col=[], target_col="value") shards_numpy = tsdata.to_xshards() collected_numpy = shards_numpy.collect() # collect and valid x = np.concatenate([collected_numpy[i]['x'] for i in range(len(collected_numpy))], axis=0) y = np.concatenate([collected_numpy[i]['y'] for i in range(len(collected_numpy))], axis=0) assert x.shape == ((50-lookback-horizon+1)*2, lookback, 1) assert y.shape == ((50-lookback-horizon+1)*2, horizon, 1) # roll test horizon = 0 lookback = random.randint(1, 20) tsdata.roll(lookback=lookback, horizon=horizon) shards_numpy = tsdata.to_xshards() collected_numpy = shards_numpy.collect() # collect and valid x = np.concatenate([collected_numpy[i]['x'] for i in range(len(collected_numpy))], axis=0) assert x.shape == ((50-lookback-horizon+1)*2, lookback, 2) def test_xshardstsdataset_impute(self): from tempfile import TemporaryDirectory tmp_df = get_ugly_ts_df() with TemporaryDirectory() as tmpdir: file_name = os.path.join(tmpdir, 'impute.csv') tmp_df.to_csv(file_name, index=False) shards_tmp = read_csv(file_name) for val in ["last", "const", "linear"]: tsdata = XShardsTSDataset.from_xshards(shards_tmp, dt_col="datetime", target_col="e", extra_feature_col=["a", "b", "c", "d"], id_col="id") tsdata.impute(mode=val) collected_df = tsdata.shards.collect() collected_df = pd.concat(collected_df, axis=0) assert collected_df.isna().sum().sum() == 0 assert len(collected_df) == 100 def test_xshardstsdataset_sparkdf(self): df = generate_spark_df() # with id tsdata = XShardsTSDataset.from_sparkdf(df, dt_col="date", target_col="feature", id_col="id") tsdata.roll(lookback=4, horizon=2) data = tsdata.to_xshards().collect() assert data[0]['x'].shape[1] == 4 assert data[0]['x'].shape[2] == 1 assert data[0]['y'].shape[1] == 2 assert data[0]['y'].shape[2] == 1 assert tsdata.shards.num_partitions() == 2 # with only 1 id tsdata = XShardsTSDataset.from_sparkdf(df, dt_col="date", target_col="feature") tsdata.roll(lookback=4, horizon=2) data = tsdata.to_xshards().collect() assert data[0]['x'].shape[1] == 4 assert data[0]['x'].shape[2] == 1 assert data[0]['y'].shape[1] == 2 assert data[0]['y'].shape[2] == 1 assert tsdata.shards.num_partitions() == 1
46.454902
100
0.593534
9,829
0.829732
0
0
534
0.045079
0
0
1,794
0.151444
54d7680f93fc7f5f7a46d60f37723337c7dce6f3
2,603
py
Python
zoom_functions.py
WXSD-Sales/ZoomToWebex
16cc663620e2ef2904b0e2857d709aee96b78eb7
[ "MIT" ]
1
2021-10-21T01:36:33.000Z
2021-10-21T01:36:33.000Z
zoom_functions.py
WXSD-Sales/integration-samples
2f18be740329f3c35c78c268a6d4544cae5d313e
[ "MIT" ]
null
null
null
zoom_functions.py
WXSD-Sales/integration-samples
2f18be740329f3c35c78c268a6d4544cae5d313e
[ "MIT" ]
null
null
null
import json import tornado.gen import traceback from base64 import b64encode from tornado.httpclient import AsyncHTTPClient, HTTPRequest, HTTPError from settings import Settings from mongo_db_controller import ZoomUserDB @tornado.gen.coroutine def zoomRefresh(zoom_user): url = "https://zoom.us/oauth/token" payload = "grant_type=refresh_token&" payload += "refresh_token={0}".format(zoom_user.get('refresh_token')) #we need to base 64 encode it #and then decode it to acsii as python 3 stores it as a byte string userAndPass = b64encode("{0}:{1}".format(Settings.zoom_client_id, Settings.zoom_client_secret).encode()).decode("ascii") headers = { 'authorization': 'Basic {0}'.format(userAndPass), 'content-type': "application/x-www-form-urlencoded" } request = HTTPRequest(url, method="POST", headers=headers, body=payload) http_client = AsyncHTTPClient() print(zoom_user) print('making zoomRefresh') print(payload) try: response = yield http_client.fetch(request) resp = json.loads(response.body.decode("utf-8")) print("zoomRefresh /access_token Response: {0}".format(resp)) zoom_user = ZoomUserDB.db.insert_user(zoom_user['person_id'], resp['access_token'], resp['expires_in'], resp['refresh_token'], "zoom") print('new zoom_user:{0}'.format(zoom_user)) except HTTPError as he: print('zoomRefresh HTTPError:') print(he.code) print(he.response.body) if he.code == 401: ZoomUserDB.db.delete_user(zoom_user['person_id'], "zoom") zoom_user = None raise tornado.gen.Return(zoom_user) @tornado.gen.coroutine def zoomGET(endpoint_url, zoom_user): url = "https://api.zoom.us/v2{0}".format(endpoint_url) headers = {"Authorization":"Bearer {0}".format(zoom_user.get('token'))} request = HTTPRequest(url, method="GET", headers=headers) http_client = AsyncHTTPClient() response = None try: response = yield http_client.fetch(request) body = response.body.decode('utf-8') response = json.loads(body) except HTTPError as he: if he.code == 401: print('token may be expired, attempting refresh') zoom_user = yield zoomRefresh(zoom_user) if zoom_user: response, zoom_user = yield zoomGET(endpoint_url, zoom_user) else: try: print(he.response.body) except Exception as e: pass traceback.print_exc() raise tornado.gen.Return((response, zoom_user))
38.850746
142
0.661929
0
0
2,330
0.895121
2,376
0.912793
0
0
584
0.224357
54d83fe60a2207f45c149a5e0cac230756ba7376
1,484
py
Python
crypten/mpc/__init__.py
gmuraru/CrypTen
e39a7aaf65436706321fe4e3fc055308c78b6b92
[ "MIT" ]
null
null
null
crypten/mpc/__init__.py
gmuraru/CrypTen
e39a7aaf65436706321fe4e3fc055308c78b6b92
[ "MIT" ]
null
null
null
crypten/mpc/__init__.py
gmuraru/CrypTen
e39a7aaf65436706321fe4e3fc055308c78b6b92
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import os from crypten.mpc import primitives # noqa: F401 from crypten.mpc import provider # noqa: F40 from .context import run_multiprocess from .mpc import MPCTensor from .ptype import ptype __all__ = ["MPCTensor", "primitives", "provider", "ptype", "run_multiprocess"] # the different private type attributes of an mpc encrypted tensor arithmetic = ptype.arithmetic binary = ptype.binary # Set provider __SUPPORTED_PROVIDERS = { "TFP": provider.TrustedFirstParty, "TTP": provider.TrustedThirdParty, "HE": provider.HomomorphicProvider, } __default_provider = __SUPPORTED_PROVIDERS[ os.environ.get("CRYPTEN_PROVIDER_NAME", "TFP") ] def set_default_provider(new_default_provider): global __default_provider assert_msg = "Provider %s is not supported" % new_default_provider if isinstance(new_default_provider, str): assert new_default_provider in __SUPPORTED_PROVIDERS.keys(), assert_msg else: assert new_default_provider in __SUPPORTED_PROVIDERS.values(), assert_msg __default_provider = new_default_provider os.environ["CRYPTEN_PROVIDER_NAME"] = new_default_provider.NAME def get_default_provider(): return __default_provider def ttp_required(): return __default_provider == provider.TrustedThirdParty
28.538462
81
0.768194
0
0
0
0
0
0
0
0
451
0.303908
54d943f36b7e93ff9b844e618cfa99e6c35ca662
2,011
py
Python
contrib/python/src/python/pants/contrib/python/checks/tasks/checkstyle/pyflakes.py
lahosken/pants
1b0340987c9b2eab9411416803c75b80736716e4
[ "Apache-2.0" ]
null
null
null
contrib/python/src/python/pants/contrib/python/checks/tasks/checkstyle/pyflakes.py
lahosken/pants
1b0340987c9b2eab9411416803c75b80736716e4
[ "Apache-2.0" ]
null
null
null
contrib/python/src/python/pants/contrib/python/checks/tasks/checkstyle/pyflakes.py
lahosken/pants
1b0340987c9b2eab9411416803c75b80736716e4
[ "Apache-2.0" ]
null
null
null
# coding=utf-8 # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md). # Licensed under the Apache License, Version 2.0 (see LICENSE). from __future__ import (absolute_import, division, generators, nested_scopes, print_function, unicode_literals, with_statement) from pyflakes.checker import Checker as FlakesChecker from pants.contrib.python.checks.tasks.checkstyle.common import CheckstylePlugin, Nit class FlakeError(Nit): # TODO(wickman) There is overlap between this and Flake8 -- consider integrating # checkstyle plug-ins into the PEP8 tool directly so that this can be inherited # by flake8. # Code reference is here: http://flake8.readthedocs.org/en/latest/warnings.html CLASS_ERRORS = { 'DuplicateArgument': 'F831', 'ImportShadowedByLoopVar': 'F402', 'ImportStarUsed': 'F403', 'LateFutureImport': 'F404', 'Redefined': 'F810', 'RedefinedInListComp': 'F812', 'RedefinedWhileUnused': 'F811', 'UndefinedExport': 'F822', 'UndefinedLocal': 'F823', 'UndefinedName': 'F821', 'UnusedImport': 'F401', 'UnusedVariable': 'F841', } def __init__(self, python_file, flake_message): line_range = python_file.line_range(flake_message.lineno) super(FlakeError, self).__init__( self.get_error_code(flake_message), Nit.ERROR, python_file.filename, flake_message.message % flake_message.message_args, line_range, python_file.lines[line_range]) @classmethod def get_error_code(cls, message): return cls.CLASS_ERRORS.get(message.__class__.__name__, 'F999') class PyflakesChecker(CheckstylePlugin): """Detect common coding errors via the pyflakes package.""" def nits(self): checker = FlakesChecker(self.python_file.tree, self.python_file.filename) for message in sorted(checker.messages, key=lambda msg: msg.lineno): if FlakeError.get_error_code(message) not in self.options.ignore: yield FlakeError(self.python_file, message)
35.910714
93
0.721532
1,564
0.777723
290
0.144207
116
0.057683
0
0
740
0.367976
54d95a6a219b638ddca6d85bef7b830f95b22592
2,426
py
Python
pharmrep/forum/models.py
boyombo/pharmrep
2293ceb235dec949c58fa40d1ee43fce172e0ceb
[ "MIT" ]
null
null
null
pharmrep/forum/models.py
boyombo/pharmrep
2293ceb235dec949c58fa40d1ee43fce172e0ceb
[ "MIT" ]
null
null
null
pharmrep/forum/models.py
boyombo/pharmrep
2293ceb235dec949c58fa40d1ee43fce172e0ceb
[ "MIT" ]
null
null
null
from django.db import models from django.contrib.auth.models import User from django.contrib import admin from django.utils.translation import ugettext_lazy as _ class Forum(models.Model): title = models.CharField(max_length=60) description = models.TextField(blank=True, default='') updated = models.DateTimeField(auto_now=True) created = models.DateTimeField(auto_now=True) creator = models.ForeignKey(User, blank=True, null=True) def __unicode__(self): return self.title def num_posts(self): return sum([t.num_posts() for t in self.topic_set.all()]) def last_post(self): if self.topic_set.count(): last = None for t in self.topic_set.all(): l = t.last_post() if l: if not last: last = l elif l.created > last.created: last = l return last class Topic(models.Model): title = models.CharField(max_length=60) description = models.TextField(max_length=10000, blank=True, null=True) forum = models.ForeignKey(Forum) created = models.DateTimeField(auto_now=True) creator = models.ForeignKey(User, blank=True, null=True) updated = models.DateTimeField(auto_now=True) closed = models.BooleanField(blank=True, default=False) def num_posts(self): return self.post_set.count() def num_replies(self): return max(0, self.post_set.count() - 1) def last_post(self): if self.post_set.count(): return self.post_set.order_by("created")[0] def __unicode__(self): return unicode(self.creator) + " - " + self.title class Post(models.Model): title = models.CharField(max_length=60) created = models.DateTimeField(auto_now_add=True) creator = models.ForeignKey(User, blank=True, null=True) updated = models.DateTimeField(auto_now=True) topic = models.ForeignKey(Topic) body = models.TextField(max_length=10000) user_ip = models.GenericIPAddressField(blank=True, null=True) def __unicode__(self): return u"%s - %s - %s" % (self.creator, self.topic, self.title) def short(self): return u"%s - %s\n%s" % (self.creator, self.title, self.created.strftime("%b %d, %I:%M %p")) short.allow_tags = True class ProfaneWord(models.Model): word = models.CharField(max_length=60) def __unicode__(self): return self.word
32.783784
100
0.659934
2,255
0.929514
0
0
0
0
0
0
62
0.025556
54d9afa8624f72c8f6f8e3ffc3c4fcb52e42ad11
1,744
py
Python
iri-node/fabfile.py
jinnerbichler/home-automflashion
f93442712322ab819651f453437c11f685640e83
[ "Apache-2.0" ]
8
2018-02-06T15:18:08.000Z
2020-07-12T20:16:22.000Z
iri-node/fabfile.py
jinnerbichler/home-autoflashion
f93442712322ab819651f453437c11f685640e83
[ "Apache-2.0" ]
1
2018-09-02T17:10:57.000Z
2018-10-02T04:14:43.000Z
iri-node/fabfile.py
jinnerbichler/home-autoflashion
f93442712322ab819651f453437c11f685640e83
[ "Apache-2.0" ]
1
2019-08-14T04:39:48.000Z
2019-08-14T04:39:48.000Z
import time from fabric.api import run, env, task, put, cd, local, sudo env.use_ssh_config = True env.hosts = ['iota_node'] @task(default=True) def iri(): run('mkdir -p /srv/private-tangle/') with cd('/srv/private-tangle'): put('.', '.') run('docker-compose --project-name private-tangle pull') run('docker-compose --project-name private-tangle up -d --force-recreate iri') @task def tools(): with cd('/srv/private-tangle'): put('.', '.') run('docker-compose --project-name private-tangle pull') run('docker-compose --project-name private-tangle up -d --no-deps --force-recreate coordinator explorer') run('docker-compose --project-name private-tangle logs -f --tail 100 coordinator explorer') @task def stop(): with cd('/srv/private-tangle'): run('docker-compose --project-name private-tangle stop') @task def stop_coord(): with cd('/srv/private-tangle'): run('docker-compose --project-name private-tangle stop coordinator') @task def down(): with cd('/srv/private-tangle'): run('docker-compose --project-name private-tangle down -v') @task def logs(): with cd('/srv/private-tangle'): run('docker-compose --project-name private-tangle logs -f --tail 100') @task def logs_coord(): with cd('/srv/private-tangle'): run('docker-compose --project-name private-tangle logs -f --tail 100 coordinator') @task def logs_all(): with cd('/srv/private-tangle'): run('docker-compose logs -f') @task def reset(): # stop services and delete database down() time.sleep(1) run('rm -rf /srv/private-tangle/testnet_db/') # restart all services iri() time.sleep(5) tools()
24.222222
113
0.639908
0
0
0
0
1,593
0.913417
0
0
1,014
0.581422
54da3dc2f38e9f403fcf4bc41db3259f59c8f372
1,763
py
Python
features.py
ptorresmanque/MachineLearning_v2.0
795e47b9cfc68f4e0fefb700d43af6c59e2f1d73
[ "MIT" ]
null
null
null
features.py
ptorresmanque/MachineLearning_v2.0
795e47b9cfc68f4e0fefb700d43af6c59e2f1d73
[ "MIT" ]
null
null
null
features.py
ptorresmanque/MachineLearning_v2.0
795e47b9cfc68f4e0fefb700d43af6c59e2f1d73
[ "MIT" ]
null
null
null
import sqlite3 from random import randint, choice import numpy as np conn = sqlite3.connect('ej.db') c = conn.cursor() #OBTENIENDO TAMAnOS MAXIMOS MINIMOS Y PROMEDIO# c.execute('SELECT MAX(alto) FROM features') resultado = c.fetchone() if resultado: altoMax = resultado[0] c.execute('SELECT MIN(alto) FROM features') resultado = c.fetchone() if resultado: altoMin = resultado[0] altoProm = abs((altoMax + altoMin) / 2) #print altoMax , altoProm , altoMin arrAlto = [altoMax , altoProm , altoMin] c.execute('SELECT MAX(ancho) FROM features') resultado = c.fetchone() if resultado: anchoMax = resultado[0] c.execute('SELECT MIN(ancho) FROM features') resultado = c.fetchone() if resultado: anchoMin = resultado[0] anchoProm = abs((anchoMax + anchoMin) / 2) anchoMaxProm = abs((anchoMax + anchoProm) / 2) anchoMinProm = abs((anchoMin + anchoProm) / 2) arrAncho = [anchoMax, anchoMaxProm, anchoProm, anchoMinProm, anchoMin] #### CREANDO CLASES NEGATIVAS for i in range(0,3): for j in range(0,5): for _ in range(10): negAncho = arrAncho[j] negAlto = arrAlto[i] rand_alto_max = int(negAlto * 1.5) rand_alto_min = int(negAlto * 0.5) r3 = rand_alto_max * 2 rand_ancho_max = int(negAncho*1.5) rand_ancho_min = int(negAncho*0.5) r33 = rand_ancho_max * 2 f1 = choice([np.random.randint(1, rand_alto_min), np.random.randint(rand_alto_max, r3)]) f2 = choice([np.random.randint(1, rand_ancho_min), np.random.randint(rand_ancho_max, r33)]) c.execute("insert into features (ancho, alto, area, clase) values (?, ?, ?, ?)", (f2, f1, f2*f1, 0)) conn.commit() conn.close()
23.506667
103
0.640953
0
0
0
0
0
0
0
0
318
0.180374
54da935d3d5cf04aac496677e269b59710d17100
5,503
py
Python
dev/ideas/cython/playing_around.py
achilleas-k/brian2
906563b6b1321585b082f79f74f1b4ab386347ec
[ "BSD-2-Clause" ]
null
null
null
dev/ideas/cython/playing_around.py
achilleas-k/brian2
906563b6b1321585b082f79f74f1b4ab386347ec
[ "BSD-2-Clause" ]
null
null
null
dev/ideas/cython/playing_around.py
achilleas-k/brian2
906563b6b1321585b082f79f74f1b4ab386347ec
[ "BSD-2-Clause" ]
null
null
null
from pylab import * import cython import time, timeit from brian2.codegen.runtime.cython_rt.modified_inline import modified_cython_inline import numpy from scipy import weave import numexpr import theano from theano import tensor as tt tau = 20 * 0.001 N = 1000000 b = 1.2 # constant current mean, the modulation varies freq = 10.0 t = 0.0 dt = 0.0001 _array_neurongroup_a = a = linspace(.05, 0.75, N) _array_neurongroup_v = v = rand(N) ns = {'_array_neurongroup_a': a, '_array_neurongroup_v': v, '_N': N, 'dt': dt, 't': t, 'tau': tau, 'b': b, 'freq': freq,# 'sin': numpy.sin, 'pi': pi, } code = ''' cdef int _idx cdef int _vectorisation_idx cdef int N = <int>_N cdef double a, v, _v #cdef double [:] _cy_array_neurongroup_a = _array_neurongroup_a #cdef double [:] _cy_array_neurongroup_v = _array_neurongroup_v cdef double* _cy_array_neurongroup_a = &(_array_neurongroup_a[0]) cdef double* _cy_array_neurongroup_v = &(_array_neurongroup_v[0]) for _idx in range(N): _vectorisation_idx = _idx a = _cy_array_neurongroup_a[_idx] v = _cy_array_neurongroup_v[_idx] _v = a*sin(2.0*freq*pi*t) + b + v*exp(-dt/tau) + (-a*sin(2.0*freq*pi*t) - b)*exp(-dt/tau) #_v = a*b+0.0001*sin(v) #_v = a*b+0.0001*v v = _v _cy_array_neurongroup_v[_idx] = v ''' def timefunc_cython_inline(): cython.inline(code, locals=ns) f_mod, f_arg_list = modified_cython_inline(code, locals=ns, globals={}) def timefunc_cython_modified_inline(): f_mod.__invoke(*f_arg_list) #modified_cython_inline(code, locals=ns) def timefunc_python(): for _idx in xrange(N): _vectorisation_idx = _idx a = _array_neurongroup_a[_idx] v = _array_neurongroup_v[_idx] _v = a*sin(2.0*freq*pi*t) + b + v*exp(-dt/tau) + (-a*sin(2.0*freq*pi*t) - b)*exp(-dt/tau) v = _v _array_neurongroup_v[_idx] = v def timefunc_numpy(): _v = a*sin(2.0*freq*pi*t) + b + v*exp(-dt/tau) + (-a*sin(2.0*freq*pi*t) - b)*exp(-dt/tau) v[:] = _v def timefunc_numpy_smart(): _sin_term = sin(2.0*freq*pi*t) _exp_term = exp(-dt/tau) _a_term = (_sin_term-_sin_term*_exp_term) _v = v _v *= _exp_term _v += a*_a_term _v += -b*_exp_term + b def timefunc_numpy_blocked(): ext = exp(-dt/tau) sit = sin(2.0*freq*pi*t) bs = 20000 for i in xrange(0, N, bs): ab = a[i:i+bs] vb = v[i:i+bs] absit = ab*sit + b vb *= ext vb += absit vb -= absit*ext def timefunc_numexpr(): v[:] = numexpr.evaluate('a*sin(2.0*freq*pi*t) + b + v*exp(-dt/tau) + (-a*sin(2.0*freq*pi*t) - b)*exp(-dt/tau)') def timefunc_numexpr_smart(): _sin_term = sin(2.0*freq*pi*t) _exp_term = exp(-dt/tau) _a_term = (_sin_term-_sin_term*_exp_term) _const_term = -b*_exp_term + b #v[:] = numexpr.evaluate('a*_a_term+v*_exp_term+_const_term') numexpr.evaluate('a*_a_term+v*_exp_term+_const_term', out=v) def timefunc_weave(*args): code = ''' // %s int N = _N; for(int _idx=0; _idx<N; _idx++) { double a = _array_neurongroup_a[_idx]; double v = _array_neurongroup_v[_idx]; double _v = a*sin(2.0*freq*pi*t) + b + v*exp(-dt/tau) + (-a*sin(2.0*freq*pi*t) - b)*exp(-dt/tau); v = _v; _array_neurongroup_v[_idx] = v; } ''' % str(args) weave.inline(code, ns.keys(), ns, compiler='gcc', extra_compile_args=list(args)) def timefunc_weave_slow(): timefunc_weave('-O3', '-march=native') def timefunc_weave_fast(): timefunc_weave('-O3', '-march=native', '-ffast-math') def get_theano_func(): a = tt.dvector('a') v = tt.dvector('v') freq = tt.dscalar('freq') t = tt.dscalar('t') dt = tt.dscalar('dt') tau = tt.dscalar('tau') return theano.function([a, v, freq, t, dt, tau], a*tt.sin(2.0*freq*pi*t) + b + v*tt.exp(-dt/tau) + (-a*tt.sin(2.0*freq*pi*t) - b)*tt.exp(-dt/tau)) # return theano.function([a, v], # a*tt.sin(2.0*freq*pi*t) + b + v*tt.exp(-dt/tau) + (-a*tt.sin(2.0*freq*pi*t) - b)*tt.exp(-dt/tau)) theano.config.gcc.cxxflags = '-O3 -ffast-math' theano_func = get_theano_func() #print theano.pp(theano_func.maker.fgraph.outputs[0]) #print #theano.printing.debugprint(theano_func.maker.fgraph.outputs[0]) #theano.printing.pydotprint(theano_func, 'func.png') #exit() def timefunc_theano(): v[:] = theano_func(a, v, freq, t, dt, tau) def dotimeit(f): v[:] = 1 f() print '%s: %.2f' % (f.__name__.replace('timefunc_', ''), timeit.timeit(f.__name__+'()', setup='from __main__ import '+f.__name__, number=100)) def check_values(f): v[:] = 1 v[:5] = linspace(0, 1, 5) f() print '%s: %s' % (f.__name__.replace('timefunc_', ''), v[:5]) if __name__=='__main__': funcs = [#timefunc_cython_inline, timefunc_cython_modified_inline, timefunc_numpy, timefunc_numpy_smart, timefunc_numpy_blocked, timefunc_numexpr, timefunc_numexpr_smart, timefunc_weave_slow, timefunc_weave_fast, timefunc_theano, ] if 1: print 'Values' print '======' for f in funcs: check_values(f) print if 1: print 'Times' print '=====' for f in funcs: dotimeit(f)
30.743017
125
0.589678
0
0
0
0
0
0
0
0
1,898
0.344903
54db106024a4f46cf548821fe280245ccaf57da7
114
py
Python
azbankgateways/views/__init__.py
lordmahyar/az-iranian-bank-gateways
e9eb7101f2b91318847d63d783c22c4a8d430ba3
[ "MIT" ]
196
2020-12-07T11:29:19.000Z
2022-03-23T09:32:56.000Z
azbankgateways/views/__init__.py
lordmahyar/az-iranian-bank-gateways
e9eb7101f2b91318847d63d783c22c4a8d430ba3
[ "MIT" ]
25
2021-01-13T11:56:35.000Z
2022-03-14T19:41:51.000Z
azbankgateways/views/__init__.py
lordmahyar/az-iranian-bank-gateways
e9eb7101f2b91318847d63d783c22c4a8d430ba3
[ "MIT" ]
44
2021-01-08T18:27:47.000Z
2022-03-22T03:36:04.000Z
from .banks import callback_view, go_to_bank_gateway from .samples import sample_payment_view, sample_result_view
38
60
0.877193
0
0
0
0
0
0
0
0
0
0
54db89c835de6895b4c1b46df78297a288ccdb1f
3,254
py
Python
dev/unittest/update.py
PowerDNS/exabgp
bbf69f25853e10432fbe588b5bc2f8d9f1e5dda2
[ "BSD-3-Clause" ]
8
2015-01-11T09:57:26.000Z
2019-07-05T05:57:02.000Z
dev/unittest/update.py
Acidburn0zzz/exabgp
bbf69f25853e10432fbe588b5bc2f8d9f1e5dda2
[ "BSD-3-Clause" ]
1
2018-11-15T22:10:09.000Z
2018-11-15T22:20:31.000Z
dev/unittest/update.py
Acidburn0zzz/exabgp
bbf69f25853e10432fbe588b5bc2f8d9f1e5dda2
[ "BSD-3-Clause" ]
6
2015-09-11T01:51:06.000Z
2020-03-10T19:16:18.000Z
#!/usr/bin/env python # encoding: utf-8 """ update.py Created by Thomas Mangin on 2009-09-06. Copyright (c) 2009-2013 Exa Networks. All rights reserved. """ import unittest from exabgp.configuration.environment import environment env = environment.setup('') from exabgp.bgp.message.update.update import * from exabgp.bgp.message.update.attribute.community import to_Community from exabgp.bgp.message.update.attribute.community import Community, Communities class TestData (unittest.TestCase): def test_2_prefix (self): self.assertEqual(str(to_NLRI('10.0.0.0','24')),'10.0.0.0/24') def test_6_prefix (self): self.assertEqual(to_NLRI('1.2.3.4','0').pack(),''.join([chr(c) for c in [0,]])) def test_7_prefix (self): self.assertEqual(to_NLRI('1.2.3.4','8').pack(),''.join([chr(c) for c in [8,1,]])) def test_8_prefix (self): self.assertEqual(to_NLRI('1.2.3.4','16').pack(),''.join([chr(c) for c in [16,1,2]])) def test_9_prefix (self): self.assertEqual(to_NLRI('1.2.3.4','24').pack(),''.join([chr(c) for c in [24,1,2,3]])) def test_10_prefix (self): self.assertEqual(to_NLRI('1.2.3.4','32').pack(),''.join([chr(c) for c in [32,1,2,3,4]])) def test_1_community (self): self.assertEqual(Community(256),256) def test_2_community (self): self.assertEqual(to_Community('0x100'),256) def test_3_community (self): self.assertEqual(to_Community('1:1'),65537) def test_4_community (self): communities = Communities() community = to_Community('1:1') communities.add(community) self.assertEqual(communities.pack(),''.join([chr(c) for c in [0xc0,0x08,0x04,0x00,0x01,0x00,0x01]])) def test_1_ipv4 (self): header = ''.join([chr(c) for c in [0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x0, 0x22, 0x2]]) message = ''.join([chr(c) for c in [0x0, 0x0, 0x0, 0xb, 0x40, 0x1, 0x1, 0x0, 0x40, 0x2, 0x4, 0x2, 0x1, 0xfd, 0xe8, 0x18, 0xa, 0x0, 0x1]]) update = new_Update(message) self.assertEqual(str(update.nlri[0]),'10.0.1.0/24') def test_1_ipv6_1 (self): header = ''.join([chr(c) for c in [0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x0, 0x47, 0x2]]) message = ''.join([chr(c) for c in [0x0, 0x0, 0x0, 0x30, 0x40, 0x1, 0x1, 0x0, 0x50, 0x2, 0x0, 0x4, 0x2, 0x1, 0xff, 0xfe, 0x80, 0x4, 0x4, 0x0, 0x0, 0x0, 0x0, 0x80, 0xe, 0x1a, 0x0, 0x2, 0x1, 0x10, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x20, 0x12, 0x34, 0x56, 0x78]]) update = to_Update([],[to_NLRI('1234:5678::',32)]) self.assertEqual(str(update.nlri[0]),'1234:5678::/32') def test_1_ipv6_2 (self): route = RouteIP('1234:5678::',64) route.next_hop = '8765:4321::1' announced = route.announce(1,1) message = announced[19:] update = new_Update(message) print update.nlri print update.withdraw print update.attributes[MPRNLRI.ID][0] # def test_2_ipv4_broken (self): # header = ''.join([chr(c) for c in h]) # message = ''.join([chr(c) for c in m]) # message = ''.join([chr(c) for c in [0x0, 0x0, 0x0, 0xf, 0x40, 0x1, 0x1, 0x0, 0x40, 0x2, 0x4, 0x2, 0x1, 0xfd, 0xe8, 0x0, 0x0, 0x0, 0x0, 0x18, 0xa, 0x0, 0x1]]) # update = new_Update(message) if __name__ == '__main__': unittest.main()
38.282353
313
0.667486
2,434
0.748002
0
0
0
0
0
0
668
0.205286
54dbf6330b24d0c6aff3e7ee1c31934c49d43385
12,082
py
Python
nuscenes/eval/detection/evaluate.py
WJ-Lai/NightFusion
1555692eceb6b85127d21cd43e6fc780b7f91ffd
[ "Apache-2.0" ]
null
null
null
nuscenes/eval/detection/evaluate.py
WJ-Lai/NightFusion
1555692eceb6b85127d21cd43e6fc780b7f91ffd
[ "Apache-2.0" ]
1
2019-04-24T12:14:59.000Z
2019-04-24T12:14:59.000Z
nuscenes/eval/detection/evaluate.py
WJ-Lai/NightFusion
1555692eceb6b85127d21cd43e6fc780b7f91ffd
[ "Apache-2.0" ]
null
null
null
# nuScenes dev-kit. # Code written by Holger Caesar & Oscar Beijbom, 2018. # Licensed under the Creative Commons [see licence.txt] import argparse import json import os import random import time from typing import Tuple, Dict, Any import numpy as np from nuscenes import NuScenes from nuscenes.eval.detection.algo import accumulate, calc_ap, calc_tp from nuscenes.eval.detection.config import config_factory from nuscenes.eval.detection.constants import TP_METRICS from nuscenes.eval.detection.data_classes import DetectionConfig, MetricDataList, DetectionMetrics, EvalBoxes from nuscenes.eval.detection.loaders import load_prediction, load_gt, add_center_dist, filter_eval_boxes from nuscenes.eval.detection.render import summary_plot, class_pr_curve, class_tp_curve, dist_pr_curve, visualize_sample class NuScenesEval: """ This is the official nuScenes detection evaluation code. Results are written to the provided output_dir. nuScenes uses the following metrics: - Mean Average Precision (mAP): Uses center-distance as matching criterion; averaged over distance thresholds. - True Positive (TP) metrics: Average of translation, velocity, scale, orientation and attribute errors. - nuScenes Detection Score (NDS): The weighted sum of the above. Here is an overview of the functions in this method: - init: Loads GT annotations an predictions stored in JSON format and filters the boxes. - run: Performs evaluation and dumps the metric data to disk. - render: Renders various plots and dumps to disk. We assume that: - Every sample_token is given in the results, although there may be not predictions for that sample. Please see https://github.com/nutonomy/nuscenes-devkit for more details. """ def __init__(self, nusc: NuScenes, config: DetectionConfig, result_path: str, eval_set: str, output_dir: str = None, verbose: bool = True): """ Initialize a NuScenesEval object. :param nusc: A NuScenes object. :param config: A DetectionConfig object. :param result_path: Path of the nuScenes JSON result file. :param eval_set: The dataset split to evaluate on, e.g. train or val. :param output_dir: Folder to save plots and results to. :param verbose: Whether to print to stdout. """ self.nusc = nusc self.result_path = result_path self.eval_set = eval_set self.output_dir = output_dir self.verbose = verbose self.cfg = config # Make dirs. self.plot_dir = os.path.join(self.output_dir, 'plots') if not os.path.isdir(self.output_dir): os.makedirs(self.output_dir) if not os.path.isdir(self.plot_dir): os.makedirs(self.plot_dir) # Load data. self.pred_boxes, self.meta = load_prediction(self.result_path, self.cfg.max_boxes_per_sample, verbose=verbose) self.gt_boxes = load_gt(self.nusc, self.eval_set, verbose=verbose) assert set(self.pred_boxes.sample_tokens) == set(self.gt_boxes.sample_tokens), \ "Samples in split doesn't match samples in predictions." # Add center distances. self.pred_boxes = add_center_dist(nusc, self.pred_boxes) self.gt_boxes = add_center_dist(nusc, self.gt_boxes) # Filter boxes (distance, points per box, etc.). if verbose: print('Filtering predictions') self.pred_boxes = filter_eval_boxes(nusc, self.pred_boxes, self.cfg.class_range, verbose=verbose) if verbose: print('Filtering ground truth annotations') self.gt_boxes = filter_eval_boxes(nusc, self.gt_boxes, self.cfg.class_range, verbose=verbose) self.sample_tokens = self.gt_boxes.sample_tokens def evaluate(self) -> Tuple[DetectionMetrics, MetricDataList]: """ Performs the actual evaluation. :return: A tuple of high-level and the raw metric data. """ start_time = time.time() # ----------------------------------- # Step 1: Accumulate metric data for all classes and distance thresholds. # ----------------------------------- if self.verbose: print('Accumulating metric data') metric_data_list = MetricDataList() for class_name in self.cfg.class_names: for dist_th in self.cfg.dist_ths: md = accumulate(self.gt_boxes, self.pred_boxes, class_name, self.cfg.dist_fcn, dist_th) metric_data_list.set(class_name, dist_th, md) # ----------------------------------- # Step 2: Calculate metrics from the data. # ----------------------------------- if self.verbose: print('Calculating metrics') metrics = DetectionMetrics(self.cfg) for class_name in self.cfg.class_names: for dist_th in self.cfg.dist_ths: metric_data = metric_data_list[(class_name, dist_th)] ap = calc_ap(metric_data, self.cfg.min_recall, self.cfg.min_precision) metrics.add_label_ap(class_name, dist_th, ap) for metric_name in TP_METRICS: metric_data = metric_data_list[(class_name, self.cfg.dist_th_tp)] if class_name in ['traffic_cone'] and metric_name in ['attr_err', 'vel_err', 'orient_err']: tp = np.nan elif class_name in ['barrier'] and metric_name in ['attr_err', 'vel_err']: tp = np.nan else: tp = calc_tp(metric_data, self.cfg.min_recall, metric_name) metrics.add_label_tp(class_name, metric_name, tp) metrics.add_runtime(time.time() - start_time) return metrics, metric_data_list def render(self, metrics: DetectionMetrics, md_list: MetricDataList) -> None: """ Renders various PR and TP curves. :param metrics: DetectionMetrics instance. :param md_list: MetricDataList instance. """ def savepath(name): return os.path.join(self.plot_dir, name + '.pdf') summary_plot(md_list, metrics, min_precision=self.cfg.min_precision, min_recall=self.cfg.min_recall, dist_th_tp=self.cfg.dist_th_tp, savepath=savepath('summary')) for detection_name in self.cfg.class_names: class_pr_curve(md_list, metrics, detection_name, self.cfg.min_precision, self.cfg.min_recall, savepath=savepath(detection_name + '_pr')) class_tp_curve(md_list, metrics, detection_name, self.cfg.min_recall, self.cfg.dist_th_tp, savepath=savepath(detection_name + '_tp')) for dist_th in self.cfg.dist_ths: dist_pr_curve(md_list, metrics, dist_th, self.cfg.min_precision, self.cfg.min_recall, savepath=savepath('dist_pr_' + str(dist_th))) def main(self, plot_examples: int = 0, render_curves: bool = True) -> Dict[str, Any]: """ Main function that loads the evaluation code, visualizes samples, runs the evaluation and renders stat plots. :param plot_examples: How many example visualizations to write to disk. :param render_curves: Whether to render PR and TP curves to disk. :return: A dict that stores the high-level metrics and meta data. """ if plot_examples > 0: # Select a random but fixed subset to plot. random.seed(43) sample_tokens = list(self.sample_tokens) random.shuffle(sample_tokens) sample_tokens = sample_tokens[:plot_examples] # Visualize samples. example_dir = os.path.join(self.output_dir, 'examples') if not os.path.isdir(example_dir): os.mkdir(example_dir) for sample_token in sample_tokens: visualize_sample(self.nusc, sample_token, self.gt_boxes if self.eval_set != 'test' else EvalBoxes(), # Don't render test GT. self.pred_boxes, eval_range=max(self.cfg.class_range.values()), savepath=os.path.join(example_dir, '{}.png'.format(sample_token))) # Run evaluation. metrics, metric_data_list = self.evaluate() # Render PR and TP curves. if render_curves: self.render(metrics, metric_data_list) # Dump the metric data, meta and metrics to disk. if self.verbose: print('Saving metrics to: %s' % self.output_dir) metrics_summary = metrics.serialize() metrics_summary['meta'] = self.meta.copy() with open(os.path.join(self.output_dir, 'metrics_summary.json'), 'w') as f: json.dump(metrics_summary, f, indent=2) with open(os.path.join(self.output_dir, 'metrics_details.json'), 'w') as f: json.dump(metric_data_list.serialize(), f, indent=2) # Print high-level metrics. print('mAP: %.4f' % (metrics_summary['mean_ap'])) err_name_mapping = { 'trans_err': 'mATE', 'scale_err': 'mASE', 'orient_err': 'mAOE', 'vel_err': 'mAVE', 'attr_err': 'mAAE' } for tp_name, tp_val in metrics_summary['tp_errors'].items(): print('%s: %.4f' % (err_name_mapping[tp_name], tp_val)) print('NDS: %.4f' % (metrics_summary['nd_score'])) print('Eval time: %.1fs' % metrics_summary['eval_time']) return metrics_summary if __name__ == "__main__": # Settings. parser = argparse.ArgumentParser(description='Evaluate nuScenes result submission.', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('result_path', type=str, help='The submission as a JSON file.') parser.add_argument('--output_dir', type=str, default='~/nuscenes-metrics', help='Folder to store result metrics, graphs and example visualizations.') parser.add_argument('--eval_set', type=str, default='val', help='Which dataset split to evaluate on, train, val or test.') parser.add_argument('--dataroot', type=str, default='/data/sets/nuscenes', help='Default nuScenes data directory.') parser.add_argument('--version', type=str, default='v1.0-trainval', help='Which version of the nuScenes dataset to evaluate on, e.g. v1.0-trainval.') parser.add_argument('--config_name', type=str, default='cvpr_2019', help='Name of the configuration to use for evaluation, e.g. cvpr_2019.') parser.add_argument('--plot_examples', type=int, default=10, help='How many example visualizations to write to disk.') parser.add_argument('--render_curves', type=int, default=1, help='Whether to render PR and TP curves to disk.') parser.add_argument('--verbose', type=int, default=1, help='Whether to print to stdout.') args = parser.parse_args() result_path_ = os.path.expanduser(args.result_path) output_dir_ = os.path.expanduser(args.output_dir) eval_set_ = args.eval_set dataroot_ = args.dataroot version_ = args.version config_name_ = args.config_name plot_examples_ = args.plot_examples render_curves_ = bool(args.render_curves) verbose_ = bool(args.verbose) cfg_ = config_factory(config_name_) nusc_ = NuScenes(version=version_, verbose=verbose_, dataroot=dataroot_) nusc_eval = NuScenesEval(nusc_, config=cfg_, result_path=result_path_, eval_set=eval_set_, output_dir=output_dir_, verbose=verbose_) nusc_eval.main(plot_examples=plot_examples_, render_curves=render_curves_)
45.421053
120
0.630525
8,984
0.743585
0
0
0
0
0
0
3,934
0.325608
54dcf21edb2556756e4c18e431858f02788f9d3a
9,520
py
Python
tests/get_problem_atcoder.py
aberent/api-client
845e5f1daa02cc7fee5a65234a24bb59a7b71083
[ "MIT" ]
null
null
null
tests/get_problem_atcoder.py
aberent/api-client
845e5f1daa02cc7fee5a65234a24bb59a7b71083
[ "MIT" ]
null
null
null
tests/get_problem_atcoder.py
aberent/api-client
845e5f1daa02cc7fee5a65234a24bb59a7b71083
[ "MIT" ]
null
null
null
import unittest from onlinejudge_api.main import main class DownloadAtCoderTest(unittest.TestCase): def test_icpc2013spring_a(self): """This problem contains both words `Input` and `Output` for the headings for sample outputs. """ url = 'http://jag2013spring.contest.atcoder.jp/tasks/icpc2013spring_a' expected = { "status": "ok", "messages": [], "result": { "url": "https://atcoder.jp/contests/jag2013spring/tasks/icpc2013spring_a", "tests": [{ "input": "2 2\n2 \n1 >= 3\n2 <= 5\n2\n1 >= 4\n2 >= 3\n", "output": "Yes\n" }, { "input": "2 2\n2 \n1 >= 5\n2 >= 5\n2\n1 <= 4\n2 <= 3\n", "output": "Yes\n" }, { "input": "2 2\n2 \n1 >= 3\n2 <= 3\n2\n1 <= 2\n2 >= 5\n", "output": "No\n" }, { "input": "1 2\n2\n1 <= 10\n1 >= 15\n", "output": "No\n" }, { "input": "5 5\n3\n2 <= 1\n3 <= 1\n4 <= 1\n4\n2 >= 2\n3 <= 1\n4 <= 1\n5 <= 1\n3\n3 >= 2\n4 <= 1\n5 <= 1\n2\n4 >= 2\n5 <= 1\n1\n5 >= 2 \n", "output": "Yes\n" }], "name": "Everlasting Zero", "context": { "contest": { "name": "Japan Alumni Group Spring Contest 2013", "url": "https://atcoder.jp/contests/jag2013spring" }, "alphabet": "A" }, "memoryLimit": 128, "timeLimit": 5000 }, } actual = main(['get-problem', url], debug=True) self.assertEqual(expected, actual) def test_arc035_a(self): """This problem uses <code> tags in the descriptoin text in the sample section. """ url = 'http://arc035.contest.atcoder.jp/tasks/arc035_a' expected = { "status": "ok", "messages": [], "result": { "url": "https://atcoder.jp/contests/arc035/tasks/arc035_a", "tests": [{ "input": "ab*\n", "output": "YES\n" }, { "input": "abc\n", "output": "NO\n" }, { "input": "a*bc*\n", "output": "YES\n" }, { "input": "***\n", "output": "YES\n" }], "name": "\u9ad8\u6a4b\u304f\u3093\u3068\u56de\u6587", "context": { "contest": { "name": "AtCoder Regular Contest 035", "url": "https://atcoder.jp/contests/arc035" }, "alphabet": "A" }, "memoryLimit": 256, "timeLimit": 2000 }, } actual = main(['get-problem', url], debug=True) self.assertEqual(expected, actual) def test_abc114_c(self): """This tests a problem which uses a new-style format HTML. """ url = 'https://atcoder.jp/contests/abc114/tasks/abc114_c' expected = { "status": "ok", "messages": [], "result": { "url": "https://atcoder.jp/contests/abc114/tasks/abc114_c", "tests": [{ "input": "575\n", "output": "4\n" }, { "input": "3600\n", "output": "13\n" }, { "input": "999999999\n", "output": "26484\n" }], "name": "755", "context": { "contest": { "name": "AtCoder Beginner Contest 114", "url": "https://atcoder.jp/contests/abc114" }, "alphabet": "C" }, "memoryLimit": 1024, "timeLimit": 2000 }, } actual = main(['get-problem', url], debug=True) self.assertEqual(expected, actual) def test_call_download_atcoder_abc003_4(self): """This tests a problem which uses an old-style format HTML. """ url = 'https://atcoder.jp/contests/abc003/tasks/abc003_4' expected = { "status": "ok", "messages": [], "result": { "url": "https://atcoder.jp/contests/abc003/tasks/abc003_4", "tests": [{ "input": "3 2\n2 2\n2 2\n", "output": "12\n" }, { "input": "4 5\n3 1\n3 0\n", "output": "10\n" }, { "input": "23 18\n15 13\n100 95\n", "output": "364527243\n" }, { "input": "30 30\n24 22\n145 132\n", "output": "976668549\n" }], "name": "AtCoder\u793e\u306e\u51ac", "context": { "contest": { "name": "AtCoder Beginner Contest 003", "url": "https://atcoder.jp/contests/abc003" }, "alphabet": "D" }, "memoryLimit": 64, "timeLimit": 2000 }, } actual = main(['get-problem', url], debug=True) self.assertEqual(expected, actual) def test_agc036_b(self): """In this problem, a sample output is empty. """ url = 'https://atcoder.jp/contests/agc036/tasks/agc036_b' expected = { "status": "ok", "messages": [], "result": { "url": "https://atcoder.jp/contests/agc036/tasks/agc036_b", "tests": [{ "input": "3 2\n1 2 3\n", "output": "2 3\n" }, { "input": "5 10\n1 2 3 2 3\n", "output": "3\n" }, { "input": "6 1000000000000\n1 1 2 2 3 3\n", "output": "\n" }, { "input": "11 97\n3 1 4 1 5 9 2 6 5 3 5\n", "output": "9 2 6\n" }], "name": "Do Not Duplicate", "context": { "contest": { "name": "AtCoder Grand Contest 036", "url": "https://atcoder.jp/contests/agc036" }, "alphabet": "B" }, "memoryLimit": 1024, "timeLimit": 2000 }, } actual = main(['get-problem', url], debug=True) self.assertEqual(expected, actual) def test_tenka1_2014_qualA_e(self): """This problem uses an unusual HTML markup. .. seealso:: https://github.com/kmyk/online-judge-tools/issues/618 """ url = 'https://atcoder.jp/contests/tenka1-2014-quala/tasks/tenka1_2014_qualA_e' expected = { "status": "ok", "messages": [], "result": { "url": "https://atcoder.jp/contests/tenka1-2014-quala/tasks/tenka1_2014_qualA_e", "tests": [{ "input": "5 3\nAAB\nABB\nCDE\nFFH\nGHH\n2\n1 1\n2 3\n", "output": "15\n7\n" }, { "input": "2 2\nAB\nBA\n2\n1 1\n2 1\n", "output": "2\n2\n" }, { "input": "5 5\nAABAA\nACDEA\nAFGHA\nAIJKA\nAAAAA\n1\n3 1\n", "output": "25\n" }], "name": "\u30d1\u30ba\u30eb\u306e\u79fb\u52d5", "context": { "contest": { "name": "\u5929\u4e0b\u4e00\u30d7\u30ed\u30b0\u30e9\u30de\u30fc\u30b3\u30f3\u30c6\u30b9\u30c82014\u4e88\u9078A", "url": "https://atcoder.jp/contests/tenka1-2014-quala" }, "alphabet": "E" }, "memoryLimit": 256, "timeLimit": 5000 }, } actual = main(['get-problem', url], debug=True) self.assertEqual(expected, actual) def test_non_existing_problem(self): """This tests an non-existing problem. """ url = 'http://abc001.contest.atcoder.jp/tasks/abc001_100' expected = { "status": "error", "messages": ["requests.exceptions.HTTPError: 404 Client Error: Not Found for url: https://atcoder.jp/contests/abc001/tasks/abc001_100"], "result": None, } actual = main(['get-problem', url], debug=True) self.assertEqual(expected, actual) def test_impossible_problem(self): """This tests a problem impossible to parse sample cases. """ url = 'https://chokudai001.contest.atcoder.jp/tasks/chokudai_001_a' expected = { "status": "error", "messages": ["onlinejudge.type.SampleParseError: failed to parse samples"], "result": None, } actual = main(['get-problem', url], debug=True) self.assertEqual(expected, actual)
36.615385
157
0.411029
9,462
0.993908
0
0
0
0
0
0
4,296
0.451261
54dcf64898b0684c67b6786b86aa9adc1e8b99c7
681
py
Python
odm/libexec/odm_tenant.py
UMCollab/ODM
95da49939dbcd54318a58a132aa76725fd9c0b5f
[ "MIT" ]
2
2019-04-26T13:26:02.000Z
2019-10-18T10:36:52.000Z
odm/libexec/odm_tenant.py
flowerysong/ODM
95da49939dbcd54318a58a132aa76725fd9c0b5f
[ "MIT" ]
1
2020-10-28T00:38:07.000Z
2020-10-28T00:38:07.000Z
odm/libexec/odm_tenant.py
flowerysong/ODM
95da49939dbcd54318a58a132aa76725fd9c0b5f
[ "MIT" ]
1
2019-02-21T16:41:24.000Z
2019-02-21T16:41:24.000Z
#!/usr/bin/env python3 # This file is part of ODM and distributed under the terms of the # MIT license. See COPYING. import json import sys import odm.cli def main(): cli = odm.cli.CLI(['action']) client = cli.client if cli.args.action == 'list-users': print(json.dumps(client.list_users(), indent=2)) elif cli.args.action == 'list-sites': print(json.dumps(client.list_sites(), indent=2)) elif cli.args.action == 'list-groups': print(json.dumps(client.list_groups(), indent=2)) else: print('Unsupported action {}'.format(cli.args.action), file=sys.stderr) sys.exit(1) if __name__ == '__main__': main()
21.28125
79
0.638767
0
0
0
0
0
0
0
0
192
0.281938
54dde115e15519f27b695b4a4ec6e5589e225fb7
17,182
py
Python
tests/test_tag_value_parser.py
quaresmajose/tools-python
53c917a1a2491a373efa23e4ef8570b5e863fabc
[ "Apache-2.0" ]
74
2015-12-25T09:43:18.000Z
2022-03-30T00:23:30.000Z
tests/test_tag_value_parser.py
quaresmajose/tools-python
53c917a1a2491a373efa23e4ef8570b5e863fabc
[ "Apache-2.0" ]
184
2016-11-23T15:57:16.000Z
2022-03-15T05:25:59.000Z
tests/test_tag_value_parser.py
quaresmajose/tools-python
53c917a1a2491a373efa23e4ef8570b5e863fabc
[ "Apache-2.0" ]
98
2015-12-13T12:20:34.000Z
2022-03-18T15:28:35.000Z
# Copyright (c) 2014 Ahmed H. Ismail # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys from unittest import TestCase import spdx from spdx.parsers.tagvalue import Parser from spdx.parsers.lexers.tagvalue import Lexer from spdx.parsers.tagvaluebuilders import Builder from spdx.parsers.loggers import StandardLogger from spdx.version import Version class TestLexer(TestCase): maxDiff = None def setUp(self): self.l = Lexer() self.l.build() def test_document(self): data = ''' SPDXVersion: SPDX-2.1 # Comment. DataLicense: CC0-1.0 DocumentName: Sample_Document-V2.1 SPDXID: SPDXRef-DOCUMENT DocumentNamespace: https://spdx.org/spdxdocs/spdx-example-444504E0-4F89-41D3-9A0C-0305E82C3301 DocumentComment: <text>This is a sample spreadsheet</text> ''' self.l.input(data) self.token_assert_helper(self.l.token(), 'DOC_VERSION', 'SPDXVersion', 2) self.token_assert_helper(self.l.token(), 'LINE', 'SPDX-2.1', 2) self.token_assert_helper(self.l.token(), 'DOC_LICENSE', 'DataLicense', 4) self.token_assert_helper(self.l.token(), 'LINE', 'CC0-1.0', 4) self.token_assert_helper(self.l.token(), 'DOC_NAME', 'DocumentName', 5) self.token_assert_helper(self.l.token(), 'LINE', 'Sample_Document-V2.1', 5) self.token_assert_helper(self.l.token(), 'SPDX_ID', 'SPDXID', 6) self.token_assert_helper(self.l.token(), 'LINE', 'SPDXRef-DOCUMENT', 6) self.token_assert_helper(self.l.token(), 'DOC_NAMESPACE', 'DocumentNamespace', 7) self.token_assert_helper(self.l.token(), 'LINE', 'https://spdx.org/spdxdocs/spdx-example-444504E0-4F89-41D3-9A0C-0305E82C3301', 7) self.token_assert_helper(self.l.token(), 'DOC_COMMENT', 'DocumentComment', 8) self.token_assert_helper(self.l.token(), 'TEXT', '<text>This is a sample spreadsheet</text>', 8) def test_external_document_references(self): data = ''' ExternalDocumentRef:DocumentRef-spdx-tool-2.1 http://spdx.org/spdxdocs/spdx-tools-v2.1-3F2504E0-4F89-41D3-9A0C-0305E82C3301 SHA1: d6a770ba38583ed4bb4525bd96e50461655d2759 ''' self.l.input(data) self.token_assert_helper(self.l.token(), 'EXT_DOC_REF', 'ExternalDocumentRef', 2) self.token_assert_helper(self.l.token(), 'DOC_REF_ID', 'DocumentRef-spdx-tool-2.1', 2) self.token_assert_helper(self.l.token(), 'DOC_URI', 'http://spdx.org/spdxdocs/spdx-tools-v2.1-3F25' '04E0-4F89-41D3-9A0C-0305E82C3301', 2) self.token_assert_helper(self.l.token(), 'EXT_DOC_REF_CHKSUM', 'SHA1: ' 'd6a770ba38583ed4bb4525bd96e50461655d2759', 2) def test_creation_info(self): data = ''' ## Creation Information Creator: Person: Gary O'Neall Creator: Organization: Source Auditor Inc. Creator: Tool: SourceAuditor-V1.2 Created: 2010-02-03T00:00:00Z CreatorComment: <text>This is an example of an SPDX spreadsheet format</text> ''' self.l.input(data) self.token_assert_helper(self.l.token(), 'CREATOR', 'Creator', 3) self.token_assert_helper(self.l.token(), 'PERSON_VALUE', "Person: Gary O'Neall", 3) self.token_assert_helper(self.l.token(), 'CREATOR', 'Creator', 4) self.token_assert_helper(self.l.token(), 'ORG_VALUE', 'Organization: Source Auditor Inc.', 4) self.token_assert_helper(self.l.token(), 'CREATOR', 'Creator', 5) self.token_assert_helper(self.l.token(), 'TOOL_VALUE', 'Tool: SourceAuditor-V1.2', 5) self.token_assert_helper(self.l.token(), 'CREATED', 'Created', 6) self.token_assert_helper(self.l.token(), 'DATE', '2010-02-03T00:00:00Z', 6) def test_review_info(self): data = ''' Reviewer: Person: Joe Reviewer ReviewDate: 2010-02-10T00:00:00Z ReviewComment: <text>This is just an example. Some of the non-standard licenses look like they are actually BSD 3 clause licenses</text> ''' self.l.input(data) self.token_assert_helper(self.l.token(), 'REVIEWER', 'Reviewer', 2) self.token_assert_helper(self.l.token(), 'PERSON_VALUE', "Person: Joe Reviewer", 2) self.token_assert_helper(self.l.token(), 'REVIEW_DATE', 'ReviewDate', 3) self.token_assert_helper(self.l.token(), 'DATE', '2010-02-10T00:00:00Z', 3) self.token_assert_helper(self.l.token(), 'REVIEW_COMMENT', 'ReviewComment', 4) self.token_assert_helper(self.l.token(), 'TEXT', '''<text>This is just an example. Some of the non-standard licenses look like they are actually BSD 3 clause licenses</text>''', 4) def test_pacakage(self): data = ''' SPDXID: SPDXRef-Package FilesAnalyzed: False PackageChecksum: SHA1: 2fd4e1c67a2d28fced849ee1bb76e7391b93eb12 PackageVerificationCode: 4e3211c67a2d28fced849ee1bb76e7391b93feba (SpdxTranslatorSpdx.rdf, SpdxTranslatorSpdx.txt) ExternalRef: SECURITY cpe23Type cpe:2.3:a:pivotal_software:spring_framework:4.1.0:*:*:*:*:*:*: ExternalRefComment: <text>Some comment about the package.</text> ''' self.l.input(data) self.token_assert_helper(self.l.token(), 'SPDX_ID', 'SPDXID', 2) self.token_assert_helper(self.l.token(), 'LINE', 'SPDXRef-Package', 2) self.token_assert_helper(self.l.token(), 'PKG_FILES_ANALYZED', 'FilesAnalyzed', 3) self.token_assert_helper(self.l.token(), 'LINE', 'False', 3) self.token_assert_helper(self.l.token(), 'PKG_CHKSUM', 'PackageChecksum', 4) self.token_assert_helper(self.l.token(), 'CHKSUM', 'SHA1: 2fd4e1c67a2d28fced849ee1bb76e7391b93eb12', 4) self.token_assert_helper(self.l.token(), 'PKG_VERF_CODE', 'PackageVerificationCode', 5) self.token_assert_helper(self.l.token(), 'LINE', '4e3211c67a2d28fced849ee1bb76e7391b93feba (SpdxTranslatorSpdx.rdf, SpdxTranslatorSpdx.txt)', 5) self.token_assert_helper(self.l.token(), 'PKG_EXT_REF', 'ExternalRef', 6) self.token_assert_helper(self.l.token(), 'LINE', 'SECURITY cpe23Type cpe:2.3:a:pivotal_software:spring_framework:4.1.0:*:*:*:*:*:*:', 6) self.token_assert_helper(self.l.token(), 'PKG_EXT_REF_COMMENT', 'ExternalRefComment', 7) self.token_assert_helper(self.l.token(), 'TEXT', '<text>Some comment about the package.</text>', 7) def test_unknown_tag(self): data = ''' SomeUnknownTag: SomeUnknownValue ''' self.l.input(data) self.token_assert_helper(self.l.token(), 'UNKNOWN_TAG', 'SomeUnknownTag', 2) self.token_assert_helper(self.l.token(), 'LINE', 'SomeUnknownValue', 2) def test_snippet(self): data = ''' SnippetSPDXID: SPDXRef-Snippet SnippetLicenseComments: <text>Some lic comment.</text> SnippetCopyrightText: <text>Some cr text.</text> SnippetComment: <text>Some snippet comment.</text> SnippetName: from linux kernel SnippetFromFileSPDXID: SPDXRef-DoapSource SnippetLicenseConcluded: Apache-2.0 LicenseInfoInSnippet: Apache-2.0 ''' self.l.input(data) self.token_assert_helper(self.l.token(), 'SNIPPET_SPDX_ID', 'SnippetSPDXID', 2) self.token_assert_helper(self.l.token(), 'LINE', 'SPDXRef-Snippet', 2) self.token_assert_helper(self.l.token(), 'SNIPPET_LICS_COMMENT', 'SnippetLicenseComments', 3) self.token_assert_helper(self.l.token(), 'TEXT', '<text>Some lic comment.</text>', 3) self.token_assert_helper(self.l.token(), 'SNIPPET_CR_TEXT', 'SnippetCopyrightText', 4) self.token_assert_helper(self.l.token(), 'TEXT', '<text>Some cr text.</text>', 4) self.token_assert_helper(self.l.token(), 'SNIPPET_COMMENT', 'SnippetComment', 5) self.token_assert_helper(self.l.token(), 'TEXT', '<text>Some snippet comment.</text>', 5) self.token_assert_helper(self.l.token(), 'SNIPPET_NAME', 'SnippetName', 6) self.token_assert_helper(self.l.token(), 'LINE', 'from linux kernel', 6) self.token_assert_helper(self.l.token(), 'SNIPPET_FILE_SPDXID', 'SnippetFromFileSPDXID', 7) self.token_assert_helper(self.l.token(), 'LINE', 'SPDXRef-DoapSource', 7) self.token_assert_helper(self.l.token(), 'SNIPPET_LICS_CONC', 'SnippetLicenseConcluded', 8) self.token_assert_helper(self.l.token(), 'LINE', 'Apache-2.0', 8) self.token_assert_helper(self.l.token(), 'SNIPPET_LICS_INFO', 'LicenseInfoInSnippet', 9) self.token_assert_helper(self.l.token(), 'LINE', 'Apache-2.0', 9) def token_assert_helper(self, token, ttype, value, line): assert token.type == ttype assert token.value == value assert token.lineno == line class TestParser(TestCase): maxDiff = None document_str = '\n'.join([ 'SPDXVersion: SPDX-2.1', 'DataLicense: CC0-1.0', 'DocumentName: Sample_Document-V2.1', 'SPDXID: SPDXRef-DOCUMENT', 'DocumentComment: <text>Sample Comment</text>', 'DocumentNamespace: https://spdx.org/spdxdocs/spdx-example-444504E0-4F89-41D3-9A0C-0305E82C3301' ]) creation_str = '\n'.join([ 'Creator: Person: Bob ([email protected])', 'Creator: Organization: Acme.', 'Created: 2010-02-03T00:00:00Z', 'CreatorComment: <text>Sample Comment</text>' ]) review_str = '\n'.join([ 'Reviewer: Person: Bob the Reviewer', 'ReviewDate: 2010-02-10T00:00:00Z', 'ReviewComment: <text>Bob was Here.</text>', 'Reviewer: Person: Alice the Reviewer', 'ReviewDate: 2011-02-10T00:00:00Z', 'ReviewComment: <text>Alice was also here.</text>' ]) package_str = '\n'.join([ 'PackageName: Test', 'SPDXID: SPDXRef-Package', 'PackageVersion: Version 0.9.2', 'PackageDownloadLocation: http://example.com/test', 'FilesAnalyzed: True', 'PackageSummary: <text>Test package</text>', 'PackageSourceInfo: <text>Version 1.0 of test</text>', 'PackageFileName: test-1.0.zip', 'PackageSupplier: Organization:ACME', 'PackageOriginator: Organization:ACME', 'PackageChecksum: SHA1: 2fd4e1c67a2d28fced849ee1bb76e7391b93eb12', 'PackageVerificationCode: 4e3211c67a2d28fced849ee1bb76e7391b93feba (something.rdf, something.txt)', 'PackageDescription: <text>A package.</text>', 'PackageComment: <text>Comment on the package.</text>', 'PackageCopyrightText: <text> Copyright 2014 Acme Inc.</text>', 'PackageLicenseDeclared: Apache-2.0', 'PackageLicenseConcluded: (LicenseRef-2.0 and Apache-2.0)', 'PackageLicenseInfoFromFiles: Apache-1.0', 'PackageLicenseInfoFromFiles: Apache-2.0', 'PackageLicenseComments: <text>License Comments</text>', 'ExternalRef: SECURITY cpe23Type cpe:2.3:a:pivotal_software:spring_framework:4.1.0:*:*:*:*:*:*:', 'ExternalRefComment: <text>Some comment about the package.</text>' ]) file_str = '\n'.join([ 'FileName: testfile.java', 'SPDXID: SPDXRef-File', 'FileType: SOURCE', 'FileChecksum: SHA1: 2fd4e1c67a2d28fced849ee1bb76e7391b93eb12', 'LicenseConcluded: Apache-2.0', 'LicenseInfoInFile: Apache-2.0', 'FileCopyrightText: <text>Copyright 2014 Acme Inc.</text>', 'ArtifactOfProjectName: AcmeTest', 'ArtifactOfProjectHomePage: http://www.acme.org/', 'ArtifactOfProjectURI: http://www.acme.org/', 'FileComment: <text>Very long file</text>' ]) unknown_tag_str = 'SomeUnknownTag: SomeUnknownValue' snippet_str = '\n'.join([ 'SnippetSPDXID: SPDXRef-Snippet', 'SnippetLicenseComments: <text>Some lic comment.</text>', 'SnippetCopyrightText: <text> Copyright 2008-2010 John Smith </text>', 'SnippetComment: <text>Some snippet comment.</text>', 'SnippetName: from linux kernel', 'SnippetFromFileSPDXID: SPDXRef-DoapSource', 'SnippetLicenseConcluded: Apache-2.0', 'LicenseInfoInSnippet: Apache-2.0', ]) complete_str = '{0}\n{1}\n{2}\n{3}\n{4}\n{5}'.format(document_str, creation_str, review_str, package_str, file_str, snippet_str) def setUp(self): self.p = Parser(Builder(), StandardLogger()) self.p.build() def test_doc(self): document, error = self.p.parse(self.complete_str) assert document is not None assert not error assert document.version == Version(major=2, minor=1) assert document.data_license.identifier == 'CC0-1.0' assert document.name == 'Sample_Document-V2.1' assert document.spdx_id == 'SPDXRef-DOCUMENT' assert document.comment == 'Sample Comment' assert document.namespace == 'https://spdx.org/spdxdocs/spdx-example-444504E0-4F89-41D3-9A0C-0305E82C3301' def test_creation_info(self): document, error = self.p.parse(self.complete_str) assert document is not None assert not error assert len(document.creation_info.creators) == 2 assert document.creation_info.comment == 'Sample Comment' assert (document.creation_info.created_iso_format == '2010-02-03T00:00:00Z') def test_review(self): document, error = self.p.parse(self.complete_str) assert document is not None assert not error assert len(document.reviews) == 2 def test_package(self): document, error = self.p.parse(self.complete_str) assert document is not None assert not error assert document.package.name == 'Test' assert document.package.spdx_id == 'SPDXRef-Package' assert document.package.version == 'Version 0.9.2' assert len(document.package.licenses_from_files) == 2 assert (document.package.conc_lics.identifier == 'LicenseRef-2.0 AND Apache-2.0') assert document.package.files_analyzed == True assert document.package.comment == 'Comment on the package.' assert document.package.pkg_ext_refs[-1].category == 'SECURITY' assert document.package.pkg_ext_refs[-1].pkg_ext_ref_type == 'cpe23Type' assert document.package.pkg_ext_refs[-1].locator == 'cpe:2.3:a:pivotal_software:spring_framework:4.1.0:*:*:*:*:*:*:' assert document.package.pkg_ext_refs[-1].comment == 'Some comment about the package.' def test_file(self): document, error = self.p.parse(self.complete_str) assert document is not None assert not error assert len(document.package.files) == 1 spdx_file = document.package.files[0] assert spdx_file.name == 'testfile.java' assert spdx_file.spdx_id == 'SPDXRef-File' assert spdx_file.type == spdx.file.FileType.SOURCE assert len(spdx_file.artifact_of_project_name) == 1 assert len(spdx_file.artifact_of_project_home) == 1 assert len(spdx_file.artifact_of_project_uri) == 1 def test_unknown_tag(self): try: from StringIO import StringIO except ImportError: from io import StringIO saved_out = sys.stdout sys.stdout = StringIO() document, error = self.p.parse(self.unknown_tag_str) self.assertEqual(sys.stdout.getvalue(), 'Found unknown tag : SomeUnknownTag at line: 1\n') sys.stdout = saved_out assert error assert document is not None def test_snippet(self): document, error = self.p.parse(self.complete_str) assert document is not None assert not error assert len(document.snippet) == 1 assert document.snippet[-1].spdx_id == 'SPDXRef-Snippet' assert document.snippet[-1].name == 'from linux kernel' assert document.snippet[-1].comment == 'Some snippet comment.' assert document.snippet[-1].copyright == ' Copyright 2008-2010 John Smith ' assert document.snippet[-1].license_comment == 'Some lic comment.' assert document.snippet[-1].snip_from_file_spdxid == 'SPDXRef-DoapSource' assert document.snippet[-1].conc_lics.identifier == 'Apache-2.0' assert document.snippet[-1].licenses_in_snippet[-1].identifier == 'Apache-2.0'
49.091429
178
0.650506
16,325
0.950122
0
0
0
0
0
0
7,968
0.463741
54df90a5374a87e257978dcb4c0e1caa9abfa7f7
2,024
py
Python
mount_drives.py
DT-was-an-ET/fanshim-python-pwm
dd3e6e29251000946e34d80704c040b5bcad7f8e
[ "MIT" ]
null
null
null
mount_drives.py
DT-was-an-ET/fanshim-python-pwm
dd3e6e29251000946e34d80704c040b5bcad7f8e
[ "MIT" ]
null
null
null
mount_drives.py
DT-was-an-ET/fanshim-python-pwm
dd3e6e29251000946e34d80704c040b5bcad7f8e
[ "MIT" ]
3
2020-02-27T13:45:19.000Z
2020-03-26T13:38:17.000Z
# Standard library imports from subprocess import call as subprocess_call from utility import fileexists from time import sleep as time_sleep from datetime import datetime mount_try = 1 not_yet = True done = False start_time = datetime.now() if fileexists("/home/rpi4-sftp/usb/drive_present.txt"): when_usba = 0 else: when_usba = -1 if fileexists("/home/duck-sftp/usb/drive_present.txt"): when_usbb = 0 else: when_usbb = -1 if fileexists("/home/pi/mycloud/drive_present.txt"): when_mycloud = 0 else: when_mycloud = -1 while (mount_try < 30) and not_yet: try: usba_mounted = fileexists("/home/rpi4-sftp/usb/drive_present.txt") usbb_mounted = fileexists("/home/duck-sftp/usb/drive_present.txt") mycloud_mounted = fileexists("/home/pi/mycloud/drive_present.txt") if not(usba_mounted and usbb_mounted and mycloud_mounted): print("Something Needs mounting this is try number: ", mount_try) subprocess_call(["sudo", "mount", "-a"]) mount_try += 1 usba_mounted_after = fileexists("/home/rpi4-sftp/usb/drive_present.txt") usbb_mounted_after = fileexists("/home/duck-sftp/usb/drive_present.txt") mycloud_mounted_after = fileexists("/home/pi/mycloud/drive_present.txt") if not(usba_mounted) and usba_mounted_after: when_usba = round((datetime.now() - start_time).total_seconds(),2) if not(usbb_mounted) and usbb_mounted_after: when_usbb = round((datetime.now() - start_time).total_seconds(),2) if not(mycloud_mounted) and mycloud_mounted_after: when_mycloud = round((datetime.now() - start_time).total_seconds(),2) if usba_mounted_after and usbb_mounted_after and mycloud_mounted_after: print("Success at :",when_usba,when_usbb,when_mycloud, " secs from start") not_yet = False done = True except: print("Count: ", count," error") time_sleep(1) if done: print("Great!") else: print("Failed to do all or drive_present.txt file not present; Times :",when_usba,when_usbb,when_mycloud) while True: time_sleep(20000)
36.142857
107
0.733202
0
0
0
0
0
0
0
0
556
0.274704
54e0817402b9c2ce35c6af23684ce91b4042e10a
5,639
py
Python
home/views.py
Kshitij-Kumar-Singh-Chauhan/docon
bff0547e7bbd030e027217a2ca7800a8da529b56
[ "MIT" ]
null
null
null
home/views.py
Kshitij-Kumar-Singh-Chauhan/docon
bff0547e7bbd030e027217a2ca7800a8da529b56
[ "MIT" ]
null
null
null
home/views.py
Kshitij-Kumar-Singh-Chauhan/docon
bff0547e7bbd030e027217a2ca7800a8da529b56
[ "MIT" ]
2
2021-06-17T05:35:07.000Z
2021-06-17T06:01:23.000Z
from django.http.response import HttpResponse from django.shortcuts import render from django.shortcuts import redirect, render from cryptography.fernet import Fernet from .models import Book, UserDetails from .models import Contact from .models import Book from .models import Report from .models import Diagnostic from datetime import datetime # Create your views here. def homePage(request): if(request.method == 'POST'): email = request.POST.get('email') password = request.POST.get('password') try: object = UserDetails.objects.get(email = email) key1 = object.key key1=key1[2:-1] key1 = bytes(key1,'utf-8') f = Fernet(key1) truepassword = object.password truepassword = truepassword[2:-1] truepassword = bytes(truepassword,'utf-8') truepassword = f.decrypt(truepassword).decode('utf-8') except: object = None if(object==None): context = { 'message': "Email Does Not Exist" } return render(request,"login.html",context) elif(password == truepassword): if object.profession == "PATIENT": object1=UserDetails.objects.filter(profession="DOCTOR") # name=(object.name) # appointment(request,email,name) context1={ 'message':'Welcome '+object.name, 'mail' : object.email, 'doctors':object1 } return render(request,"index.html",context1) else: context2={ 'message':'Welcome '+object.name, 'mail' : object.email } return render(request,"dindex.html",context2) else: return redirect("/") else: return render(request,"login.html",{}) def signUpPage(request): if(request.method == 'POST'): name = request.POST.get('name') email = request.POST.get('email') password = request.POST.get('password') passwordVerif = request.POST.get('passwordVerif') profession = request.POST.get('user') data = request.POST.get('data') if(email ==''): context = { 'message': "Please enter Email ID" } return render(request,"signup.html",context) elif(password == passwordVerif): key = Fernet.generate_key() f = Fernet(key) password = bytes(password,'utf-8') token = f.encrypt(password) key = str(key) print(key) UserDetails.objects.create(email=email, name=name , password=token, key = key, profession=profession, data=data) return redirect("/") else: context = { 'message': "Password doesn't match" } return render(request,"signup.html",context) else: return render(request,"signup.html",{}) # def index(request): # context={ 'alpha': 'This is sent'} # if request.method=='POST': # pass # else: return render(request, 'index.html',context) #HttpResponse('This is homepage') def about(request): return render(request, 'about.html') def services(request): return render(request, 'services.html') def contact(request): if request.method == "POST": email = request.POST.get('email') name = request.POST.get('name') phone = request.POST.get('phone') address = request.POST.get('address') contact = Contact(email=email , name=name, phone=phone,address=address,date=datetime.today()) contact.save() # messages.success(request, 'Your message has been sent !') return render(request,"contact.html") def book(request): if request.method == "POST": email = request.POST.get('email') name = request.POST.get('name') phone = request.POST.get('phone') address = request.POST.get('address') book = Book(email=email , name=name, phone=phone,problem=address,date=datetime.today()) book.save() return render(request,"book.html") def report(request): if request.method == "POST": email = request.POST.get('email') name = request.POST.get('name') phone = request.POST.get('phone') message = request.POST.get('message') report = Report(email=email , name=name, phone=phone, message=message, date=datetime.today()) report.save() return render(request,"report.html") def diag(request): if request.method == "POST": email = request.POST.get('email') name = request.POST.get('name') phone = request.POST.get('phone') tests = request.POST.get('drop1') tests = str(tests) if(email ==''): context = { 'message': "Please enter Email ID" } return render(request,"diag.html",context) else: diag = Diagnostic(email=email , name=name, phone=phone, tests=tests, date=datetime.today()) diag.save() # messages.success(request, 'Your message has been sent !') return render(request,"diag.html") # def appointment(request,email,name): # if request.method == "POST": # problem = request.POST.get('problem') # book = Appoint(problem=problem, email=email, name=name) # book.save() # return render(request,"index.html")
33.565476
124
0.567477
0
0
0
0
0
0
0
0
1,272
0.225572
54e0e5e26d187af365758e0bf02412dfcafdb712
1,028
py
Python
hkube_python_wrapper/storage/base_storage_manager.py
kube-HPC/python-wrapper.hkube
74713d9fea6689c116ade7d67b7ab67373a79d3b
[ "MIT" ]
1
2021-03-20T06:18:57.000Z
2021-03-20T06:18:57.000Z
hkube_python_wrapper/storage/base_storage_manager.py
kube-HPC/python-wrapper.hkube
74713d9fea6689c116ade7d67b7ab67373a79d3b
[ "MIT" ]
10
2020-04-24T06:58:59.000Z
2022-03-30T14:42:55.000Z
hkube_python_wrapper/storage/base_storage_manager.py
kube-HPC/python-wrapper.hkube
74713d9fea6689c116ade7d67b7ab67373a79d3b
[ "MIT" ]
null
null
null
class BaseStorageManager(object): def __init__(self, adpter): self.adapter = adpter def put(self, options): try: return self.adapter.put(options) except Exception: raise Exception('Failed to write data to storage') def get(self, options): try: data = self.adapter.get(options) return data except Exception as e: raise Exception('Failed to read data from storage' + str(e)) def list(self, options): try: return self.adapter.list(options) except Exception: raise Exception('Failed to list storage data') def listPrefix(self, options): try: return self.adapter.listPrefix(options) except Exception: raise Exception('Failed to listPrefix storage data') def delete(self, options): try: self.adapter.delete(options) except Exception: raise Exception('Failed to delete storage data')
27.783784
72
0.59144
1,026
0.998054
0
0
0
0
0
0
162
0.157588
54e0ed7eefaaeac2cfcbec8d464ffc806c518afa
9,892
py
Python
compressor/tests/templatetags.py
bigmlcom/django_compressor
66dfda503633018275fdb64ad46ef80dc9a3901d
[ "Apache-2.0" ]
null
null
null
compressor/tests/templatetags.py
bigmlcom/django_compressor
66dfda503633018275fdb64ad46ef80dc9a3901d
[ "Apache-2.0" ]
null
null
null
compressor/tests/templatetags.py
bigmlcom/django_compressor
66dfda503633018275fdb64ad46ef80dc9a3901d
[ "Apache-2.0" ]
null
null
null
from __future__ import with_statement import os import sys from mock import Mock from django.template import Template, Context, TemplateSyntaxError from django.test import TestCase from compressor.conf import settings from compressor.signals import post_compress from compressor.tests.base import css_tag, test_dir def render(template_string, context_dict=None): """ A shortcut for testing template output. """ if context_dict is None: context_dict = {} c = Context(context_dict) t = Template(template_string) return t.render(c).strip() class TemplatetagTestCase(TestCase): def setUp(self): self.old_enabled = settings.COMPRESS_ENABLED settings.COMPRESS_ENABLED = True self.context = {'MEDIA_URL': settings.COMPRESS_URL} def tearDown(self): settings.COMPRESS_ENABLED = self.old_enabled def test_empty_tag(self): template = u"""{% load compress %}{% compress js %}{% block js %} {% endblock %}{% endcompress %}""" self.assertEqual(u'', render(template, self.context)) def test_css_tag(self): template = u"""{% load compress %}{% compress css %} <link rel="stylesheet" href="{{ MEDIA_URL }}css/one.css" type="text/css"> <style type="text/css">p { border:5px solid green;}</style> <link rel="stylesheet" href="{{ MEDIA_URL }}css/two.css" type="text/css"> {% endcompress %}""" out = css_tag("/media/CACHE/css/e41ba2cc6982.css") self.assertEqual(out, render(template, self.context)) def test_uppercase_rel(self): template = u"""{% load compress %}{% compress css %} <link rel="StyleSheet" href="{{ MEDIA_URL }}css/one.css" type="text/css"> <style type="text/css">p { border:5px solid green;}</style> <link rel="StyleSheet" href="{{ MEDIA_URL }}css/two.css" type="text/css"> {% endcompress %}""" out = css_tag("/media/CACHE/css/e41ba2cc6982.css") self.assertEqual(out, render(template, self.context)) def test_nonascii_css_tag(self): template = u"""{% load compress %}{% compress css %} <link rel="stylesheet" href="{{ MEDIA_URL }}css/nonasc.css" type="text/css"> <style type="text/css">p { border:5px solid green;}</style> {% endcompress %} """ out = css_tag("/media/CACHE/css/799f6defe43c.css") self.assertEqual(out, render(template, self.context)) def test_js_tag(self): template = u"""{% load compress %}{% compress js %} <script src="{{ MEDIA_URL }}js/one.js" type="text/javascript"></script> <script type="text/javascript">obj.value = "value";</script> {% endcompress %} """ out = u'<script type="text/javascript" src="/media/CACHE/js/066cd253eada.js"></script>' self.assertEqual(out, render(template, self.context)) def test_nonascii_js_tag(self): template = u"""{% load compress %}{% compress js %} <script src="{{ MEDIA_URL }}js/nonasc.js" type="text/javascript"></script> <script type="text/javascript">var test_value = "\u2014";</script> {% endcompress %} """ out = u'<script type="text/javascript" src="/media/CACHE/js/e214fe629b28.js"></script>' self.assertEqual(out, render(template, self.context)) def test_nonascii_latin1_js_tag(self): template = u"""{% load compress %}{% compress js %} <script src="{{ MEDIA_URL }}js/nonasc-latin1.js" type="text/javascript" charset="latin-1"></script> <script type="text/javascript">var test_value = "\u2014";</script> {% endcompress %} """ out = u'<script type="text/javascript" src="/media/CACHE/js/be9e078b5ca7.js"></script>' self.assertEqual(out, render(template, self.context)) def test_compress_tag_with_illegal_arguments(self): template = u"""{% load compress %}{% compress pony %} <script type="pony/application">unicorn</script> {% endcompress %}""" self.assertRaises(TemplateSyntaxError, render, template, {}) def test_debug_toggle(self): template = u"""{% load compress %}{% compress js %} <script src="{{ MEDIA_URL }}js/one.js" type="text/javascript"></script> <script type="text/javascript">obj.value = "value";</script> {% endcompress %} """ class MockDebugRequest(object): GET = {settings.COMPRESS_DEBUG_TOGGLE: 'true'} context = dict(self.context, request=MockDebugRequest()) out = u"""<script src="/media/js/one.js" type="text/javascript"></script> <script type="text/javascript">obj.value = "value";</script>""" self.assertEqual(out, render(template, context)) def test_named_compress_tag(self): template = u"""{% load compress %}{% compress js inline foo %} <script type="text/javascript">obj.value = "value";</script> {% endcompress %} """ def listener(sender, **kwargs): pass callback = Mock(wraps=listener) post_compress.connect(callback) render(template) args, kwargs = callback.call_args context = kwargs['context'] self.assertEqual('foo', context['compressed']['name']) class PrecompilerTemplatetagTestCase(TestCase): def setUp(self): self.old_enabled = settings.COMPRESS_ENABLED self.old_precompilers = settings.COMPRESS_PRECOMPILERS precompiler = os.path.join(test_dir, 'precompiler.py') python = sys.executable settings.COMPRESS_ENABLED = True settings.COMPRESS_PRECOMPILERS = ( ('text/coffeescript', '%s %s' % (python, precompiler)), ) self.context = {'MEDIA_URL': settings.COMPRESS_URL} def tearDown(self): settings.COMPRESS_ENABLED = self.old_enabled settings.COMPRESS_PRECOMPILERS = self.old_precompilers def test_compress_coffeescript_tag(self): template = u"""{% load compress %}{% compress js %} <script type="text/coffeescript"># this is a comment.</script> {% endcompress %}""" out = script(src="/media/CACHE/js/e920d58f166d.js") self.assertEqual(out, render(template, self.context)) def test_compress_coffeescript_tag_and_javascript_tag(self): template = u"""{% load compress %}{% compress js %} <script type="text/coffeescript"># this is a comment.</script> <script type="text/javascript"># this too is a comment.</script> {% endcompress %}""" out = script(src="/media/CACHE/js/ef6b32a54575.js") self.assertEqual(out, render(template, self.context)) def test_coffeescript_and_js_tag_with_compress_enabled_equals_false(self): self.old_enabled = settings.COMPRESS_ENABLED settings.COMPRESS_ENABLED = False try: template = u"""{% load compress %}{% compress js %} <script type="text/coffeescript"># this is a comment.</script> <script type="text/javascript"># this too is a comment.</script> {% endcompress %}""" out = (script('# this is a comment.\n') + '\n' + script('# this too is a comment.')) self.assertEqual(out, render(template, self.context)) finally: settings.COMPRESS_ENABLED = self.old_enabled def test_compress_coffeescript_tag_compress_enabled_is_false(self): self.old_enabled = settings.COMPRESS_ENABLED settings.COMPRESS_ENABLED = False try: template = u"""{% load compress %}{% compress js %} <script type="text/coffeescript"># this is a comment.</script> {% endcompress %}""" out = script("# this is a comment.\n") self.assertEqual(out, render(template, self.context)) finally: settings.COMPRESS_ENABLED = self.old_enabled def test_compress_coffeescript_file_tag_compress_enabled_is_false(self): self.old_enabled = settings.COMPRESS_ENABLED settings.COMPRESS_ENABLED = False try: template = u""" {% load compress %}{% compress js %} <script type="text/coffeescript" src="{{ MEDIA_URL }}js/one.coffee"> </script> {% endcompress %}""" out = script(src="/media/CACHE/js/one.95cfb869eead.js") self.assertEqual(out, render(template, self.context)) finally: settings.COMPRESS_ENABLED = self.old_enabled def test_multiple_file_order_conserved(self): self.old_enabled = settings.COMPRESS_ENABLED settings.COMPRESS_ENABLED = False try: template = u""" {% load compress %}{% compress js %} <script type="text/coffeescript" src="{{ MEDIA_URL }}js/one.coffee"> </script> <script src="{{ MEDIA_URL }}js/one.js"></script> <script type="text/coffeescript" src="{{ MEDIA_URL }}js/one.js"> </script> {% endcompress %}""" out = '\n'.join([ script(src="/media/CACHE/js/one.95cfb869eead.js"), script(scripttype="", src="/media/js/one.js"), script(src="/media/CACHE/js/one.81a2cd965815.js"),]) self.assertEqual(out, render(template, self.context)) finally: settings.COMPRESS_ENABLED = self.old_enabled def script(content="", src="", scripttype="text/javascript"): """ returns a unicode text html script element. >>> script('#this is a comment', scripttype="text/applescript") '<script type="text/applescript">#this is a comment</script>' """ out_script = u'<script ' if scripttype: out_script += u'type="%s" ' % scripttype if src: out_script += u'src="%s" ' % src return out_script[:-1] + u'>%s</script>' % content
41.563025
107
0.616761
8,840
0.893651
0
0
0
0
0
0
4,634
0.468459
54e0f7ad3e850fa6d21aab5200a2493a26332352
3,324
py
Python
cle/cle/backends/relocations/generic.py
Ruide/angr-dev
964dc80c758e25c698c2cbcc454ef5954c5fa0a0
[ "BSD-2-Clause" ]
null
null
null
cle/cle/backends/relocations/generic.py
Ruide/angr-dev
964dc80c758e25c698c2cbcc454ef5954c5fa0a0
[ "BSD-2-Clause" ]
null
null
null
cle/cle/backends/relocations/generic.py
Ruide/angr-dev
964dc80c758e25c698c2cbcc454ef5954c5fa0a0
[ "BSD-2-Clause" ]
null
null
null
from ...address_translator import AT from ...errors import CLEOperationError from . import Relocation import struct import logging l = logging.getLogger('cle.relocations.generic') class GenericAbsoluteReloc(Relocation): @property def value(self): return self.resolvedby.rebased_addr class GenericAbsoluteAddendReloc(Relocation): @property def value(self): return self.resolvedby.rebased_addr + self.addend class GenericPCRelativeAddendReloc(Relocation): @property def value(self): return self.resolvedby.rebased_addr + self.addend - self.rebased_addr class GenericJumpslotReloc(Relocation): @property def value(self): if self.is_rela: return self.resolvedby.rebased_addr + self.addend else: return self.resolvedby.rebased_addr class GenericRelativeReloc(Relocation): @property def value(self): return self.owner_obj.mapped_base + self.addend def resolve_symbol(self, solist, bypass_compatibility=False): self.resolve(None) return True class GenericCopyReloc(Relocation): @property def value(self): return self.resolvedby.owner_obj.memory.read_addr_at(self.resolvedby.relative_addr) class MipsGlobalReloc(GenericAbsoluteReloc): pass class MipsLocalReloc(Relocation): def relocate(self, solist, bypass_compatibility=False): # pylint: disable=unused-argument if self.owner_obj.mapped_base == 0: self.resolve(None) return True # don't touch local relocations on the main bin delta = self.owner_obj.mapped_base - self.owner_obj._dynamic['DT_MIPS_BASE_ADDRESS'] if delta == 0: self.resolve(None) return True val = self.owner_obj.memory.read_addr_at(self.relative_addr) newval = val + delta self.owner_obj.memory.write_addr_at(self.relative_addr, newval) self.resolve(None) return True class RelocTruncate32Mixin(object): """ A mix-in class for relocations that cover a 32-bit field regardless of the architecture's address word length. """ # If True, 32-bit truncated value must equal to its original when zero-extended check_zero_extend = False # If True, 32-bit truncated value must equal to its original when sign-extended check_sign_extend = False def relocate(self, solist, bypass_compatibility=False): # pylint: disable=unused-argument if not self.resolve_symbol(solist): return False arch_bits = self.owner_obj.arch.bits assert arch_bits >= 32 # 16-bit makes no sense here val = self.value % (2**arch_bits) # we must truncate it to native range first if (self.check_zero_extend and val >> 32 != 0 or self.check_sign_extend and val >> 32 != ((1 << (arch_bits - 32)) - 1) if ((val >> 31) & 1) == 1 else 0): raise CLEOperationError("relocation truncated to fit: %s; consider making" " relevant addresses fit in the 32-bit address space." % self.__class__.__name__) by = struct.pack(self.owner_obj.arch.struct_fmt(32), val % (2**32)) self.owner_obj.memory.write_bytes(self.dest_addr, by)
36.130435
117
0.666968
3,125
0.940132
0
0
657
0.197653
0
0
619
0.186221
54e179a25d793c478f7e42c99a00025d13aed6d0
1,438
py
Python
codes/Lib/site-packages/openpyxl/writer/tests/test_style.py
charlescayno/automation
a4a34d87f372d49fd69740ad3ca46ae19bf2612d
[ "MIT" ]
null
null
null
codes/Lib/site-packages/openpyxl/writer/tests/test_style.py
charlescayno/automation
a4a34d87f372d49fd69740ad3ca46ae19bf2612d
[ "MIT" ]
null
null
null
codes/Lib/site-packages/openpyxl/writer/tests/test_style.py
charlescayno/automation
a4a34d87f372d49fd69740ad3ca46ae19bf2612d
[ "MIT" ]
null
null
null
# Copyright (c) 2010-2014 openpyxl import pytest from openpyxl.styles.borders import Border, Side from openpyxl.styles.fills import GradientFill from openpyxl.styles.colors import Color from openpyxl.writer.styles import StyleWriter from openpyxl.tests.helper import get_xml, compare_xml class DummyWorkbook: style_properties = [] def test_write_gradient_fill(): fill = GradientFill(degree=90, stop=[Color(theme=0), Color(theme=4)]) writer = StyleWriter(DummyWorkbook()) writer._write_gradient_fill(writer._root, fill) xml = get_xml(writer._root) expected = """<?xml version="1.0" ?> <styleSheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main"> <gradientFill degree="90" type="linear"> <stop position="0"> <color theme="0"/> </stop> <stop position="1"> <color theme="4"/> </stop> </gradientFill> </styleSheet> """ diff = compare_xml(xml, expected) assert diff is None, diff def test_write_borders(): borders = Border() writer = StyleWriter(DummyWorkbook()) writer._write_border(writer._root, borders) xml = get_xml(writer._root) expected = """<?xml version="1.0"?> <styleSheet xmlns="http://schemas.openxmlformats.org/spreadsheetml/2006/main"> <border> <left/> <right/> <top/> <bottom/> <diagonal/> </border> </styleSheet> """ diff = compare_xml(xml, expected) assert diff is None, diff
25.678571
78
0.684284
47
0.032684
0
0
0
0
0
0
557
0.387344
54e1fce9e0db363710daf71e66104aba025bc831
477
py
Python
ringapp/migrations/0009_auto_20150116_1759.py
rschwiebert/RingApp
35675b3dd81728d71b7dc70071be3185d7f99bf4
[ "MIT" ]
10
2015-02-02T12:40:05.000Z
2022-01-29T14:11:03.000Z
ringapp/migrations/0009_auto_20150116_1759.py
rschwiebert/RingApp
35675b3dd81728d71b7dc70071be3185d7f99bf4
[ "MIT" ]
22
2015-01-07T21:29:24.000Z
2022-03-19T01:15:13.000Z
ringapp/migrations/0009_auto_20150116_1759.py
rschwiebert/RingApp
35675b3dd81728d71b7dc70071be3185d7f99bf4
[ "MIT" ]
1
2016-08-07T15:41:51.000Z
2016-08-07T15:41:51.000Z
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('ringapp', '0008_auto_20150116_1755'), ] operations = [ migrations.AlterModelTable( name='invariance', table='invariance', ), migrations.AlterModelTable( name='invarianttype', table='invariant_types', ), ]
20.73913
47
0.589099
368
0.771488
0
0
0
0
0
0
113
0.236897
54e218f734c2d85cbff6df8c45d35331a499ae96
654
py
Python
front-end/testsuite-python-lib/Python-3.1/Lib/json/tests/test_dump.py
MalloyPower/parsing-python
b2bca5eed07ea2af7a2001cd4f63becdfb0570be
[ "MIT" ]
1
2020-11-26T18:53:46.000Z
2020-11-26T18:53:46.000Z
Lib/json/tests/test_dump.py
orestis/python
870a82aac7788ffa105e2a3e4480b3715c93bff6
[ "PSF-2.0" ]
null
null
null
Lib/json/tests/test_dump.py
orestis/python
870a82aac7788ffa105e2a3e4480b3715c93bff6
[ "PSF-2.0" ]
2
2018-08-06T04:37:38.000Z
2022-02-27T18:07:12.000Z
from unittest import TestCase from io import StringIO import json class TestDump(TestCase): def test_dump(self): sio = StringIO() json.dump({}, sio) self.assertEquals(sio.getvalue(), '{}') def test_dumps(self): self.assertEquals(json.dumps({}), '{}') def test_encode_truefalse(self): self.assertEquals(json.dumps( {True: False, False: True}, sort_keys=True), '{"false": true, "true": false}') self.assertEquals(json.dumps( {2: 3.0, 4.0: 5, False: 1, 6: True}, sort_keys=True), '{"false": 1, "2": 3.0, "4.0": 5, "6": true}')
29.727273
69
0.547401
585
0.894495
0
0
0
0
0
0
85
0.129969
54e3b8446107d9bccd2d0bc314395d7a3117387b
7,069
py
Python
src/resources/clients/python_client/visitstate.py
visit-dav/vis
c08bc6e538ecd7d30ddc6399ec3022b9e062127e
[ "BSD-3-Clause" ]
226
2018-12-29T01:13:49.000Z
2022-03-30T19:16:31.000Z
src/resources/clients/python_client/visitstate.py
visit-dav/vis
c08bc6e538ecd7d30ddc6399ec3022b9e062127e
[ "BSD-3-Clause" ]
5,100
2019-01-14T18:19:25.000Z
2022-03-31T23:08:36.000Z
src/resources/clients/python_client/visitstate.py
visit-dav/vis
c08bc6e538ecd7d30ddc6399ec3022b9e062127e
[ "BSD-3-Clause" ]
84
2019-01-24T17:41:50.000Z
2022-03-10T10:01:46.000Z
import sys class RPCType(object): CloseRPC = 0 DetachRPC = 1 AddWindowRPC = 2 DeleteWindowRPC = 3 SetWindowLayoutRPC = 4 SetActiveWindowRPC = 5 ClearWindowRPC = 6 ClearAllWindowsRPC = 7 OpenDatabaseRPC = 8 CloseDatabaseRPC = 9 ActivateDatabaseRPC = 10 CheckForNewStatesRPC = 11 CreateDatabaseCorrelationRPC = 12 AlterDatabaseCorrelationRPC = 13 DeleteDatabaseCorrelationRPC = 14 ReOpenDatabaseRPC = 15 ReplaceDatabaseRPC = 16 OverlayDatabaseRPC = 17 OpenComputeEngineRPC = 18 CloseComputeEngineRPC = 19 AnimationSetNFramesRPC = 20 AnimationPlayRPC = 21 AnimationReversePlayRPC = 22 AnimationStopRPC = 23 TimeSliderNextStateRPC = 24 TimeSliderPreviousStateRPC = 25 SetTimeSliderStateRPC = 26 SetActiveTimeSliderRPC = 27 AddPlotRPC = 28 SetPlotFrameRangeRPC = 29 DeletePlotKeyframeRPC = 30 MovePlotKeyframeRPC = 31 DeleteActivePlotsRPC = 32 HideActivePlotsRPC = 33 DrawPlotsRPC = 34 DisableRedrawRPC = 35 RedrawRPC = 36 SetActivePlotsRPC = 37 ChangeActivePlotsVarRPC = 38 AddOperatorRPC = 39 AddInitializedOperatorRPC = 40 PromoteOperatorRPC = 41 DemoteOperatorRPC = 42 RemoveOperatorRPC = 43 RemoveLastOperatorRPC = 44 RemoveAllOperatorsRPC = 45 SaveWindowRPC = 46 SetDefaultPlotOptionsRPC = 47 SetPlotOptionsRPC = 48 SetDefaultOperatorOptionsRPC = 49 SetOperatorOptionsRPC = 50 WriteConfigFileRPC = 51 ConnectToMetaDataServerRPC = 52 IconifyAllWindowsRPC = 53 DeIconifyAllWindowsRPC = 54 ShowAllWindowsRPC = 55 HideAllWindowsRPC = 56 UpdateColorTableRPC = 57 SetAnnotationAttributesRPC = 58 SetDefaultAnnotationAttributesRPC = 59 ResetAnnotationAttributesRPC = 60 SetKeyframeAttributesRPC = 61 SetPlotSILRestrictionRPC = 62 SetViewAxisArrayRPC = 63 SetViewCurveRPC = 64 SetView2DRPC = 65 SetView3DRPC = 66 ResetPlotOptionsRPC = 67 ResetOperatorOptionsRPC = 68 SetAppearanceRPC = 69 ProcessExpressionsRPC = 70 SetLightListRPC = 71 SetDefaultLightListRPC = 72 ResetLightListRPC = 73 SetAnimationAttributesRPC = 74 SetWindowAreaRPC = 75 PrintWindowRPC = 76 ResetViewRPC = 77 RecenterViewRPC = 78 ToggleAllowPopupRPC = 79 ToggleMaintainViewModeRPC = 80 ToggleBoundingBoxModeRPC = 81 ToggleCameraViewModeRPC = 82 TogglePerspectiveViewRPC = 83 ToggleSpinModeRPC = 84 ToggleLockTimeRPC = 85 ToggleLockToolsRPC = 86 ToggleLockViewModeRPC = 87 ToggleFullFrameRPC = 88 UndoViewRPC = 89 RedoViewRPC = 90 InvertBackgroundRPC = 91 ClearPickPointsRPC = 92 SetWindowModeRPC = 93 EnableToolRPC = 94 SetToolUpdateModeRPC = 95 CopyViewToWindowRPC = 96 CopyLightingToWindowRPC = 97 CopyAnnotationsToWindowRPC = 98 CopyPlotsToWindowRPC = 99 ClearCacheRPC = 100 ClearCacheForAllEnginesRPC = 101 SetViewExtentsTypeRPC = 102 ClearRefLinesRPC = 103 SetRenderingAttributesRPC = 104 QueryRPC = 105 CloneWindowRPC = 106 SetMaterialAttributesRPC = 107 SetDefaultMaterialAttributesRPC = 108 ResetMaterialAttributesRPC = 109 SetPlotDatabaseStateRPC = 110 DeletePlotDatabaseKeyframeRPC = 111 MovePlotDatabaseKeyframeRPC = 112 ClearViewKeyframesRPC = 113 DeleteViewKeyframeRPC = 114 MoveViewKeyframeRPC = 115 SetViewKeyframeRPC = 116 OpenMDServerRPC = 117 EnableToolbarRPC = 118 HideToolbarsRPC = 119 HideToolbarsForAllWindowsRPC = 120 ShowToolbarsRPC = 121 ShowToolbarsForAllWindowsRPC = 122 SetToolbarIconSizeRPC = 123 SaveViewRPC = 124 SetGlobalLineoutAttributesRPC = 125 SetPickAttributesRPC = 126 ExportColorTableRPC = 127 ExportEntireStateRPC = 128 ImportEntireStateRPC = 129 ImportEntireStateWithDifferentSourcesRPC = 130 ResetPickAttributesRPC = 131 AddAnnotationObjectRPC = 132 HideActiveAnnotationObjectsRPC = 133 DeleteActiveAnnotationObjectsRPC = 134 RaiseActiveAnnotationObjectsRPC = 135 LowerActiveAnnotationObjectsRPC = 136 SetAnnotationObjectOptionsRPC = 137 SetDefaultAnnotationObjectListRPC = 138 ResetAnnotationObjectListRPC = 139 ResetPickLetterRPC = 140 SetDefaultPickAttributesRPC = 141 ChooseCenterOfRotationRPC = 142 SetCenterOfRotationRPC = 143 SetQueryOverTimeAttributesRPC = 144 SetDefaultQueryOverTimeAttributesRPC = 145 ResetQueryOverTimeAttributesRPC = 146 ResetLineoutColorRPC = 147 SetInteractorAttributesRPC = 148 SetDefaultInteractorAttributesRPC = 149 ResetInteractorAttributesRPC = 150 GetProcInfoRPC = 151 SendSimulationCommandRPC = 152 UpdateDBPluginInfoRPC = 153 ExportDBRPC = 154 SetTryHarderCyclesTimesRPC = 155 OpenClientRPC = 156 OpenGUIClientRPC = 157 OpenCLIClientRPC = 158 SuppressQueryOutputRPC = 159 SetQueryFloatFormatRPC = 160 SetMeshManagementAttributesRPC = 161 SetDefaultMeshManagementAttributesRPC = 162 ResetMeshManagementAttributesRPC = 163 ResizeWindowRPC = 164 MoveWindowRPC = 165 MoveAndResizeWindowRPC = 166 SetStateLoggingRPC = 167 ConstructDataBinningRPC = 168 RequestMetaDataRPC = 169 SetTreatAllDBsAsTimeVaryingRPC = 170 SetCreateMeshQualityExpressionsRPC = 171 SetCreateTimeDerivativeExpressionsRPC = 172 SetCreateVectorMagnitudeExpressionsRPC = 173 CopyActivePlotsRPC = 174 SetPlotFollowsTimeRPC = 175 TurnOffAllLocksRPC = 176 SetDefaultFileOpenOptionsRPC = 177 SetSuppressMessagesRPC = 178 ApplyNamedSelectionRPC = 179 CreateNamedSelectionRPC = 180 DeleteNamedSelectionRPC = 181 LoadNamedSelectionRPC = 182 SaveNamedSelectionRPC = 183 SetNamedSelectionAutoApplyRPC = 184 UpdateNamedSelectionRPC = 185 InitializeNamedSelectionVariablesRPC = 186 MenuQuitRPC = 187 SetPlotDescriptionRPC = 188 MovePlotOrderTowardFirstRPC = 189 MovePlotOrderTowardLastRPC = 190 SetPlotOrderToFirstRPC = 191 SetPlotOrderToLastRPC = 192 RenamePickLabelRPC = 193 GetQueryParametersRPC = 194 DDTConnectRPC = 195 DDTFocusRPC = 196 ReleaseToDDTRPC = 197 MaxRPC = 198
34.651961
54
0.660914
7,055
0.99802
0
0
0
0
0
0
0
0
54e459da47af69f9dc842497504519a50554986e
774
py
Python
tests/__init__.py
zhangyiming07/QT4C
2d8d60efe0a4ad78a2618c5beeb0c456a63da067
[ "BSD-3-Clause" ]
53
2020-02-20T06:56:03.000Z
2022-03-03T03:09:25.000Z
tests/__init__.py
zhangyiming07/QT4C
2d8d60efe0a4ad78a2618c5beeb0c456a63da067
[ "BSD-3-Clause" ]
6
2020-03-03T03:15:53.000Z
2021-01-29T02:24:06.000Z
tests/__init__.py
zhangyiming07/QT4C
2d8d60efe0a4ad78a2618c5beeb0c456a63da067
[ "BSD-3-Clause" ]
17
2020-02-26T03:51:41.000Z
2022-03-24T02:23:51.000Z
# -*- coding: utf-8 -*- # # Tencent is pleased to support the open source community by making QT4C available. # Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved. # QT4C is licensed under the BSD 3-Clause License, except for the third-party components listed below. # A copy of the BSD 3-Clause License is included in this file. # '''单元测试 ''' import unittest import os import sys test_dir = os.path.dirname(os.path.abspath(__file__)) sys.path.insert(0, os.path.dirname(test_dir)) def main(): runner = unittest.TextTestRunner(verbosity=10 + sys.argv.count('-v')) suite = unittest.TestLoader().discover(test_dir, pattern='test_*.py') raise SystemExit(not runner.run(suite).wasSuccessful()) if __name__ == '__main__': main()
28.666667
103
0.719638
0
0
0
0
0
0
0
0
397
0.507673