hexsha
stringlengths
40
40
size
int64
5
2.06M
ext
stringclasses
10 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
3
248
max_stars_repo_name
stringlengths
5
125
max_stars_repo_head_hexsha
stringlengths
40
78
max_stars_repo_licenses
listlengths
1
10
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
3
248
max_issues_repo_name
stringlengths
5
125
max_issues_repo_head_hexsha
stringlengths
40
78
max_issues_repo_licenses
listlengths
1
10
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
3
248
max_forks_repo_name
stringlengths
5
125
max_forks_repo_head_hexsha
stringlengths
40
78
max_forks_repo_licenses
listlengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
5
2.06M
avg_line_length
float64
1
1.02M
max_line_length
int64
3
1.03M
alphanum_fraction
float64
0
1
count_classes
int64
0
1.6M
score_classes
float64
0
1
count_generators
int64
0
651k
score_generators
float64
0
1
count_decorators
int64
0
990k
score_decorators
float64
0
1
count_async_functions
int64
0
235k
score_async_functions
float64
0
1
count_documentation
int64
0
1.04M
score_documentation
float64
0
1
716227dcc03cade8b73786f23f543f0e5e37ee6c
2,516
py
Python
ezeeai/utils/hooks.py
jmarine/ezeeai
091b4ce3bc5794c534084bff3301b15ba8a9be1a
[ "Apache-2.0" ]
19
2019-06-12T03:14:59.000Z
2021-05-31T16:02:53.000Z
ezeeai/utils/hooks.py
jmarine/ezeeai
091b4ce3bc5794c534084bff3301b15ba8a9be1a
[ "Apache-2.0" ]
29
2019-06-27T10:15:38.000Z
2022-03-11T23:46:36.000Z
ezeeai/utils/hooks.py
jmarine/ezeeai
091b4ce3bc5794c534084bff3301b15ba8a9be1a
[ "Apache-2.0" ]
10
2019-05-14T17:45:44.000Z
2020-08-26T13:25:04.000Z
from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.training import session_run_hook from tensorflow.python.training.basic_session_run_hooks import NeverTriggerTimer, SecondOrStepTimer from tensorflow.python.training.session_run_hook import SessionRunArgs from tensorflow.python.util.tf_export import tf_export import smtplib from email.mime.text import MIMEText @tf_export("train.EmailAtStepHook") class EmailAtStepHook(session_run_hook.SessionRunHook): def __init__(self, user_info, server_info, every_n_iter=None, every_n_secs=None, at_end=False): only_log_at_end = ( at_end and (every_n_iter is None) and (every_n_secs is None)) if (not only_log_at_end and (every_n_iter is None) == (every_n_secs is None)): raise ValueError( "either at_end and/or exactly one of every_n_iter and every_n_secs " "must be provided.") if every_n_iter is not None and every_n_iter <= 0: raise ValueError("invalid every_n_iter=%s." % every_n_iter) self._timer = ( NeverTriggerTimer() if only_log_at_end else SecondOrStepTimer(every_secs=every_n_secs, every_steps=every_n_iter)) self._log_at_end = at_end self._user_info = user_info self._server_info = server_info self._timer.reset() self._iter_count = 0 def begin(self): pass def before_run(self, run_context): # pylint: disable=unused-argument self._should_trigger = self._timer.should_trigger_for_step(self._iter_count) def after_run(self, run_context, run_values): _ = run_context if self._should_trigger: self._send_email() self._iter_count += 1 def end(self, session): if self._log_at_end: self._send_email() def _send_email(self): smtpserver = 'smtp.gmail.com:587' header = 'From: %s' % self._server_info['email_address'] header += 'To: %s' % self._user_info['email_address'] header += 'Subject: %s' % "Training finished" message = header + "Training finished" server = smtplib.SMTP(smtpserver) server.starttls() server.login(self._server_info['login'], self._server_info['password']) problems = server.sendmail(self._server_info['email_address'], self._user_info['email_address'], message) server.quit()
37
113
0.68124
2,032
0.807631
0
0
2,068
0.82194
0
0
335
0.133148
71643b0981d730fe3a0cca31ee9497698e110f45
245
py
Python
tests/factory_fixtures/dummy_resource.py
whiletrace/dwellinglybackend
e766b3d612b4c92fd337b82498ab8ef68bd95e1f
[ "MIT" ]
15
2020-07-09T20:51:09.000Z
2021-11-28T21:59:02.000Z
tests/factory_fixtures/dummy_resource.py
codeforpdx/dwellinglybackend
92fee6d19a68ae00750927b8700eaa7195b57668
[ "MIT" ]
148
2020-03-28T22:10:30.000Z
2021-12-19T09:22:59.000Z
tests/factory_fixtures/dummy_resource.py
whiletrace/dwellinglybackend
e766b3d612b4c92fd337b82498ab8ef68bd95e1f
[ "MIT" ]
30
2020-03-12T02:31:27.000Z
2021-07-29T02:40:36.000Z
from flask import request from flask_restful import Resource from utils.gatekeeper import allowed_params class DummyResource(Resource): dummy_params = set() @allowed_params(dummy_params) def put(self): return request.json
20.416667
43
0.759184
137
0.559184
0
0
76
0.310204
0
0
0
0
7164421f4b7f16666c296653efa901ece81b5485
3,999
py
Python
quizzes/00.organize.me/hackerrank/sorted_set/server2.py
JiniousChoi/encyclopedia-in-code
77bc551a03a2a3e3808e50016ece14adb5cfbd96
[ "MIT" ]
2
2018-07-20T10:15:49.000Z
2018-07-20T10:16:54.000Z
quizzes/00.organize.me/hackerrank/sorted_set/server2.py
JiniousChoi/encyclopedia-in-code
77bc551a03a2a3e3808e50016ece14adb5cfbd96
[ "MIT" ]
2
2018-06-26T09:12:44.000Z
2019-12-18T00:09:14.000Z
quizzes/00.organize.me/hackerrank/sorted_set/server2.py
JiniousChoi/encyclopedia-in-code
77bc551a03a2a3e3808e50016ece14adb5cfbd96
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 import socket, threading from queue import Queue import sys, struct # NOTE: Use this path to create the UDS Server socket SERVER_SOCKET_PATH = "./socket"; class Result: def __init__(self): self._evt = threading.Event() self._result = None def set_result(self, value): self._result = value self._evt.set() def result(self): self._evt.wait() return self._result class ActorExit(Exception): pass class Actor(object): def __init__(self): self._mailbox = Queue() def send(self, msg): self._mailbox.put(msg) def recv(self): msg = self._mailbox.get() if msg is ActorExit: raise ActorExit() return msg def close(self): self.send(ActorExit) def start(self): self._terminated = threading.Event() t = threading.Thread(target=self._bootstrap) t.daemon = True t.start() def _bootstrap(self): try: self.run() except ActorExit: pass finally: self._terminated.set() def join(self): self._terminated.wait() def run(self): while True: msg = self.recv() class Worker(Actor): def __init__(self): super().__init__() self.db = {} def submit(self, values): r = Result() self.send((values, r)) return r def run(self): while True: values, r = self.recv() r.set_result(self.execute(values)) def execute(self, values): cmd, *opts = values print('[*]', cmd, opts) if cmd == 1: #add s, k, v = opts self.db.setdefault(s, {}) self.db[s][k] = v return [0] elif cmd == 2: #remove s, k = opts if s in self.db and k in self.db[s]: self.db[s].pop(k) return [0] elif cmd == 3: #get size s = opts[0] size = len(self.db[s]) if s in self.db else 0 return [1, size] elif cmd == 4: #get value s, k = opts if s in self.db and k in self.db[s]: score = self.db[s][k] else: score = 0 return [1, score] elif cmd == 5: #range *sets, _, lower, upper = opts res = [] for s in sets: if s not in self.db: continue for k,v in self.db[s].items(): if lower <= v <= upper: res.append((k,v)) res.sort() return [len(res)*2] + [e for kv in res for e in kv] elif cmd == 6: #disconnect return None else: raise Exception("Not supported CMD(%s)" % (cmd)) FMT = "!L" def read_number_from_socket(connection): return struct.unpack(FMT, connection.recv(4))[0] def write_number_to_socket(connection, number): connection.send(struct.pack(FMT, number)) def process_client_connection(connection, worker): while True: value_num = read_number_from_socket(connection) values = [] for _ in range(value_num): values.append(read_number_from_socket(connection)) res = worker.submit(values) if res.result() == None: break for num in res.result(): write_number_to_socket(connection, num) connection.close() def main(): worker = Worker() worker.start() s = socket.socket(socket.AF_UNIX) s.bind(SERVER_SOCKET_PATH) s.listen(1) while True: cl, addr = s.accept() t = threading.Thread(target = process_client_connection, args=(cl, worker)) t.start() #worker.close() s.close() if __name__ == '__main__': main()
24.838509
83
0.507877
2,745
0.686422
0
0
0
0
0
0
189
0.047262
7166d9cc0195426344d7d645ff648763fd1d1b77
40
py
Python
vnpy/gateway/rohon/__init__.py
funrunskypalace/vnpy
2d87aede685fa46278d8d3392432cc127b797926
[ "MIT" ]
323
2015-11-21T14:45:29.000Z
2022-03-16T08:54:37.000Z
vnpy/gateway/rohon/__init__.py
funrunskypalace/vnpy
2d87aede685fa46278d8d3392432cc127b797926
[ "MIT" ]
9
2017-03-21T08:26:21.000Z
2021-08-23T06:41:17.000Z
vnpy/gateway/rohon/__init__.py
funrunskypalace/vnpy
2d87aede685fa46278d8d3392432cc127b797926
[ "MIT" ]
148
2016-09-26T03:25:39.000Z
2022-02-06T14:43:48.000Z
from .rohon_gateway import RohonGateway
20
39
0.875
0
0
0
0
0
0
0
0
0
0
7169f3d04044834201fc8a2b35d915d5a016859d
1,283
py
Python
dnd/mobile/urls.py
dndtools2/dndtools2
6bd794349b84f3018dd0bd12712535924557c166
[ "MIT" ]
null
null
null
dnd/mobile/urls.py
dndtools2/dndtools2
6bd794349b84f3018dd0bd12712535924557c166
[ "MIT" ]
null
null
null
dnd/mobile/urls.py
dndtools2/dndtools2
6bd794349b84f3018dd0bd12712535924557c166
[ "MIT" ]
null
null
null
from django.conf.urls import patterns, url, include from .views import force_desktop_version, return_to_mobile_version app_name = 'mobile' urlpatterns = [ # force desktop url(r'^force-desktop-version/$', force_desktop_version, name='force_desktop_version'), # return to mobile version url(r'^return-to-mobile-version/$', return_to_mobile_version, name='return_to_mobile_version'), # index url(r'^', include('dnd.mobile.index.urls')), # character classes url(r'^classes/', include('dnd.mobile.character_classes.urls')), # feats url(r'^feats/', include('dnd.mobile.feats.urls')), # items url(r'^items/', include('dnd.mobile.items.urls')), # languages url(r'^languages/', include('dnd.mobile.languages.urls')), # monsters url(r'^monsters/', include('dnd.mobile.monsters.urls')), # races url(r'^races/', include('dnd.mobile.races.urls')), # rulebooks url(r'^rulebooks/', include('dnd.mobile.rulebooks.urls')), # rules url(r'^rules/', include('dnd.mobile.rules.urls')), # skills url(r'^skills/', include('dnd.mobile.skills.urls')), # spells url(r'^spells/', include('dnd.mobile.spells.urls')), # deities url(r'^deities/', include('dnd.mobile.deities.urls')), ]
26.183673
99
0.653936
0
0
0
0
0
0
0
0
700
0.545596
716d93f8130aaab6f0fe666657a995579882463d
698
py
Python
ros_aruco.py
esteng/guiding-multi-step
3f0db0ba70b5851cc83878f4ed48cf82342a2ddf
[ "BSD-2-Clause" ]
69
2019-09-30T13:42:02.000Z
2022-03-28T08:37:51.000Z
ros_aruco.py
esteng/guiding-multi-step
3f0db0ba70b5851cc83878f4ed48cf82342a2ddf
[ "BSD-2-Clause" ]
5
2019-10-23T20:03:42.000Z
2021-07-10T09:43:50.000Z
ros_aruco.py
esteng/guiding-multi-step
3f0db0ba70b5851cc83878f4ed48cf82342a2ddf
[ "BSD-2-Clause" ]
18
2019-11-17T20:57:46.000Z
2022-03-15T10:46:25.000Z
""" Calibrate with the ROS package aruco_detect """ import rospy import roslib from geometry_msgs.msg import Transform class ROSArUcoCalibrate: def __init__(self, aruco_tag_len=0.0795): print("Please roslaunch roslaunch aruco_detect aruco_detect.launch before you run!") self.aruco_tf_topic = "/fiducial_transforms" self._aruco_tf_info_sub = rospy.Subscriber(self.aruco_tf_topic, Transform, self._tfCb) self.aruco_tf = None def _tfCb(self, tf_msg): if tf_msg is None: rospy.logwarn("_tfCb: tf_msg is None!") self.aruco_tf = tf_msg def get_tf(self): aruco_tf = self.aruco_tf return aruco_tf
24.928571
94
0.679083
574
0.82235
0
0
0
0
0
0
174
0.249284
716e210884f18d925519c5ee8a6aa1f846b9c04f
3,977
py
Python
utils/utils.py
mmalandra-kb4/service-metrics-gatherer
f9a795a43d491ef59a32121ab4ed5c2c62cb968b
[ "Apache-2.0" ]
null
null
null
utils/utils.py
mmalandra-kb4/service-metrics-gatherer
f9a795a43d491ef59a32121ab4ed5c2c62cb968b
[ "Apache-2.0" ]
null
null
null
utils/utils.py
mmalandra-kb4/service-metrics-gatherer
f9a795a43d491ef59a32121ab4ed5c2c62cb968b
[ "Apache-2.0" ]
2
2022-01-28T18:31:21.000Z
2022-03-03T14:42:48.000Z
""" * Copyright 2019 EPAM Systems * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. """ import logging import re import os import json from urllib.parse import urlparse import datetime logger = logging.getLogger("metricsGatherer.utils") def remove_credentials_from_url(url): parsed_url = urlparse(url) new_netloc = re.sub("^.+?:.+?@", "", parsed_url.netloc) return url.replace(parsed_url.netloc, new_netloc) def get_credentials_from_url(url): parsed_url = urlparse(url) new_netloc = re.search("^(.+?):(.+?)@", parsed_url.netloc) try: username = new_netloc.group(1).strip() password = new_netloc.group(2).strip() return username, password except: # noqa return "", "" def read_json_file(folder, filename, to_json=False): """Read fixture from file""" with open(os.path.join(folder, filename), "r") as file: return file.read() if not to_json else json.loads(file.read()) def is_the_time_for_task_starting(allowed_start_time, allowed_end_time): start = datetime.time(int(allowed_start_time.split(":")[0]), int(allowed_start_time.split(":")[1])) end = datetime.time(int(allowed_end_time.split(":")[0]), int(allowed_end_time.split(":")[1])) now_time = datetime.datetime.now().time() if start > end: return (now_time >= start and now_time <= datetime.time(23, 59)) or\ (now_time >= datetime.time(0, 0) and now_time <= end) return now_time >= start and now_time <= end def take_the_date_to_check(): now_time = datetime.datetime.now().time() if (now_time >= datetime.time(12, 0) and now_time <= datetime.time(23, 59)): return datetime.datetime.now() return datetime.datetime.now() - datetime.timedelta(days=1) def build_url(main_url, url_params): """Build url by concating url and url_params""" return main_url + "/" + "/".join(url_params) def unite_project_name(project_id, prefix): return prefix + project_id def parse_conditions(conditions): parsed_conditions = [] for condition in conditions.split("|"): if not condition.strip(): continue chosen_operator = "" for operator in [">=", "<=", "==", "=", "<", ">"]: if operator in condition: chosen_operator = operator break condition_changed = condition.replace(chosen_operator, " ").split() if len(condition_changed) == 2: metric_score = None try: metric_score = int(condition_changed[1].strip()) except: # noqa try: metric_score = float(condition_changed[1].strip()) except: # noqa pass if metric_score is not None: parsed_conditions.append( (condition_changed[0].strip(), chosen_operator, metric_score)) return parsed_conditions def compare_metrics(cur_metric, metric_threshold, operator): if operator == ">=": return cur_metric >= metric_threshold if operator == ">": return cur_metric > metric_threshold if operator == "<=": return cur_metric <= metric_threshold if operator == "<": return cur_metric < metric_threshold if operator in ["==", "="]: return cur_metric == metric_threshold return False def convert_metrics_to_string(cur_metrics): return ";".join(["%s:%s" % (metric[0], metric[1]) for metric in cur_metrics])
33.70339
103
0.647473
0
0
0
0
0
0
0
0
808
0.203168
716fc75d575164c084b19d0f3c008a98785ed3a6
20,287
py
Python
OSAnalysisHelper.py
nassermarafi/SRCSWArchetypes
105a5e40ef0ba1951108dc52b382ae0c5457057a
[ "MIT" ]
7
2020-04-29T08:44:12.000Z
2022-03-05T04:00:11.000Z
OSAnalysisHelper.py
nassermarafi/SRCSWArchetypes
105a5e40ef0ba1951108dc52b382ae0c5457057a
[ "MIT" ]
null
null
null
OSAnalysisHelper.py
nassermarafi/SRCSWArchetypes
105a5e40ef0ba1951108dc52b382ae0c5457057a
[ "MIT" ]
4
2019-12-20T04:38:11.000Z
2021-11-21T18:25:34.000Z
from __future__ import absolute_import __author__ = 'marafi' def SolutionAlgorithim(OData, Dt, Tol, Steps): #Insert within the While loop, make sure parameter "ok" is defined import OpenSeesAPI OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {')) OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Lower Dt: %f and Tol: %f ... "'%(Dt,Tol))) OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Newton Line Search ... "')) OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,1000,0)) OData.AddObject(OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch(Tolerance=0.8)) OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze %d %f ]'%(Steps,Dt))) OData.AddObject(OpenSeesAPI.TCL.TCLScript('}')) OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {')) OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Newton with Initial Tangent ... "')) OData.AddObject(OpenSeesAPI.Analysis.Test.NormDispIncr(Tol,1000,0)) OData.AddObject(OpenSeesAPI.Analysis.Algorithm.Newton(Initial=True)) OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze %d %f ]'%(Steps,Dt))) OData.AddObject(OpenSeesAPI.TCL.TCLScript('}')) OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {')) OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Broyden ... "')) OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,1000,0)) OData.AddObject(OpenSeesAPI.Analysis.Algorithm.Broyden(8)) OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze %d %f ]'%(Steps,Dt))) OData.AddObject(OpenSeesAPI.TCL.TCLScript('}')) OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {')) OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying KrylovNewton ... "')) OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,1000,0)) OData.AddObject(OpenSeesAPI.Analysis.Algorithm.KrylovNewton()) OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze %d %f ]'%(Steps,Dt))) OData.AddObject(OpenSeesAPI.TCL.TCLScript('}')) def SolutionAlgorithimV2(OData, Dt, Tol, Steps): #Insert within the While loop, make sure parameter "ok" is defined import OpenSeesAPI OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {')) OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Lower Dt: %f and Tol: %f ... "'%(Dt,Tol))) OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Krylov... "')) OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,1000,0)) OData.AddObject(OpenSeesAPI.Analysis.Algorithm.KrylovNewton(MaxDim = 6)) OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze %d %f ]'%(Steps,Dt))) OData.AddObject(OpenSeesAPI.TCL.TCLScript('}')) OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {')) OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying NewtonLineSearch... "')) OData.AddObject(OpenSeesAPI.Analysis.Test.NormDispIncr(Tol,1000,0)) OData.AddObject(OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch(Tolerance=0.8)) OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze %d %f ]'%(Steps,Dt))) OData.AddObject(OpenSeesAPI.TCL.TCLScript('}')) OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {')) OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying NewtonLineSearch Bisection... "')) OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,1000,0)) OData.AddObject(OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch('Bisection')) OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze %d %f ]'%(Steps,Dt))) OData.AddObject(OpenSeesAPI.TCL.TCLScript('}')) OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {')) OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying NewtonLineSearch Secant... "')) OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,1000,0)) OData.AddObject(OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch('Secant')) OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze %d %f ]'%(Steps,Dt))) OData.AddObject(OpenSeesAPI.TCL.TCLScript('}')) OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {')) OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying NewtonLineSearch RegulaFalsi... "')) OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,1000,0)) OData.AddObject(OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch('RegulaFalsi')) OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze %d %f ]'%(Steps,Dt))) OData.AddObject(OpenSeesAPI.TCL.TCLScript('}')) def SolutionAlgorithimKrylovOnly(OData, Dt, Tol, Steps, MaxDim = 6): #Insert within the While loop, make sure parameter "ok" is defined import OpenSeesAPI OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {')) OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Lower Dt: %e and Tol: %e ... "'%(Dt,Tol))) OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Krylov... "')) OData.AddObject(OpenSeesAPI.Analysis.Test.NormDispIncr(Tol, 1000, 2)) # OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,1000,0)) OData.AddObject(OpenSeesAPI.Analysis.Algorithm.KrylovNewton(MaxDim = MaxDim)) OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze %d %e ]'%(Steps,Dt))) OData.AddObject(OpenSeesAPI.TCL.TCLScript('}')) def SenSolutionAlgorithim(OData, Dt, Steps, Tol = 1e-12, KrylovMaxDim = 12, MinDt = 1e-12, NoOfIterations=3000): import OpenSeesAPI OData.AddObject(OpenSeesAPI.TCL.TCLScript('set conv_tol %e'%Tol)) OData.AddObject(OpenSeesAPI.TCL.TCLScript('set max_iter %d;'%NoOfIterations)) OData.AddObject(OpenSeesAPI.Analysis.Test.NormDispIncr(Tol, 3000, 0)) # OData.AddObject(OpenSeesAPI.TCL.TCLScript('test EnergyIncr $conv_tol $max_iter;')) # OData.AddObject(OpenSeesAPI.TCL.TCLScript('algorithm Newton;')) # OData.AddObject(OpenSeesAPI.TCL.TCLScript('integrator Newmark 0.5 0.25;')) # OData.AddObject(OpenSeesAPI.TCL.TCLScript('analysis Transient;')) OData.AddObject(OpenSeesAPI.TCL.TCLScript('set dt %e;'%Dt)) OData.AddObject(OpenSeesAPI.TCL.TCLScript('set min_dt %e;'%MinDt)) OData.AddObject(OpenSeesAPI.TCL.TCLScript('set n_steps %d;'%Steps)) OData.AddObject(OpenSeesAPI.TCL.TCLScript('set cur_step 1;')) OData.AddObject(OpenSeesAPI.TCL.TCLScript('set div 10.0;')) OData.AddObject(OpenSeesAPI.TCL.TCLScript('set tol 1.0e-12;')) # OData.AddObject(OpenSeesAPI.TCL.TCLScript('set eigenvalue [eigen 9];')) # OData.AddObject(OpenSeesAPI.TCL.TCLScript('modalDamping 0.02;')) OData.AddObject(OpenSeesAPI.TCL.TCLScript('while {$cur_step < $n_steps} {')) OData.AddObject(OpenSeesAPI.Analysis.Test.NormDispIncr(Tol, NoOfIterations, 0)) # OData.AddObject(OpenSeesAPI.TCL.TCLScript(' test EnergyIncr $conv_tol $max_iter;')) OData.AddObject(OpenSeesAPI.TCL.TCLScript(' algorithm Newton;')) OData.AddObject(OpenSeesAPI.TCL.TCLScript(' set ok [analyze 1 $dt];')) OData.AddObject(OpenSeesAPI.TCL.TCLScript(' if {$ok != 0} {')) OData.AddObject(OpenSeesAPI.TCL.TCLScript(' set dt_temp [expr $dt];')) OData.AddObject(OpenSeesAPI.TCL.TCLScript(' puts "> analysis failed to converge at step $cur_step";')) OData.AddObject(OpenSeesAPI.TCL.TCLScript(' puts "> trying KrylovNewton";')) OData.AddObject(OpenSeesAPI.TCL.TCLScript(' algorithm KrylovNewton -maxDim %d;'%KrylovMaxDim)) OData.AddObject(OpenSeesAPI.TCL.TCLScript(' set ok [analyze 1 $dt];')) OData.AddObject(OpenSeesAPI.TCL.TCLScript(' if {$ok != 0} {')) OData.AddObject(OpenSeesAPI.TCL.TCLScript(' set t 0.0;')) OData.AddObject(OpenSeesAPI.TCL.TCLScript(' set mini_t 0.0;')) OData.AddObject(OpenSeesAPI.TCL.TCLScript(' set dt_temp [expr round($dt/$div/$tol)*$tol];')) OData.AddObject(OpenSeesAPI.TCL.TCLScript(' set mini_dt_temp 0.0;')) OData.AddObject(OpenSeesAPI.TCL.TCLScript(' while {$t < $dt} {')) OData.AddObject(OpenSeesAPI.TCL.TCLScript(' if {$dt_temp < $min_dt} {')) OData.AddObject(OpenSeesAPI.TCL.TCLScript(' puts "<< model did not converge (reason: time step less than $min_dt)";')) OData.AddObject(OpenSeesAPI.TCL.TCLScript(' puts "<< exiting safely";')) OData.AddObject(OpenSeesAPI.TCL.TCLScript(' wipe;')) OData.AddObject(OpenSeesAPI.TCL.TCLScript(' exit;')) OData.AddObject(OpenSeesAPI.TCL.TCLScript(' };')) OData.AddObject(OpenSeesAPI.TCL.TCLScript(' if {$dt_temp < [expr $dt/pow($div, 2)]} {')) OData.AddObject(OpenSeesAPI.Analysis.Test.NormDispIncr(Tol*10, NoOfIterations, 0)) # OData.AddObject(OpenSeesAPI.TCL.TCLScript(' test EnergyIncr [expr $conv_tol*10.0] $max_iter;')) OData.AddObject(OpenSeesAPI.TCL.TCLScript(' };')) OData.AddObject(OpenSeesAPI.TCL.TCLScript(' set ok [analyze 1 $dt_temp];')) OData.AddObject(OpenSeesAPI.TCL.TCLScript(' if {$ok == 0} {')) OData.AddObject(OpenSeesAPI.TCL.TCLScript(' set t [expr round(($t + $dt_temp)/$tol)*$tol];')) OData.AddObject(OpenSeesAPI.TCL.TCLScript(' set mini_t [expr round(($mini_t + $dt_temp)/$tol)*$tol];')) OData.AddObject(OpenSeesAPI.TCL.TCLScript(' if {$mini_t >= $mini_dt_temp} {set dt_temp [expr round($dt_temp*$div/$tol)*$tol]};')) OData.AddObject(OpenSeesAPI.TCL.TCLScript(' } else {')) OData.AddObject(OpenSeesAPI.TCL.TCLScript(' set mini_t 0.0;')) OData.AddObject(OpenSeesAPI.TCL.TCLScript(' set mini_dt_temp [expr round($dt_temp/$tol)*$tol];')) OData.AddObject(OpenSeesAPI.TCL.TCLScript(' set dt_temp [expr round($dt_temp/$div/$tol)*$tol];')) OData.AddObject(OpenSeesAPI.TCL.TCLScript(' };')) OData.AddObject(OpenSeesAPI.TCL.TCLScript(' };')) OData.AddObject(OpenSeesAPI.TCL.TCLScript(' };')) OData.AddObject(OpenSeesAPI.TCL.TCLScript(' };')) OData.AddObject(OpenSeesAPI.TCL.TCLScript(' if {$cur_step % 1 == 0} {')) OData.AddObject(OpenSeesAPI.TCL.TCLScript(' puts "Running Tim History Step: $cur_step out of %d (Sen Algo.)";'%Steps)) OData.AddObject(OpenSeesAPI.TCL.TCLScript(' };')) OData.AddObject(OpenSeesAPI.TCL.TCLScript(' incr cur_step;')) OData.AddObject(OpenSeesAPI.TCL.TCLScript('};')) def PushOverSolutionAlgorithim(OData, StepSize, Tol, ControlNode): #Insert within the While loop, make sure parameter "ok" is defined import OpenSeesAPI OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {')) OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Smaller Step: %f and Tol: %f ... "'%(StepSize,Tol))) OData.AddObject(OpenSeesAPI.Analysis.Integrator.Static.DisplacementControl(ControlNode, 1, StepSize)) OData.AddObject(OpenSeesAPI.TCL.TCLScript('}')) OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {')) OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying KrylovNewton ... "')) OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,1000,0)) OData.AddObject(OpenSeesAPI.Analysis.Algorithm.KrylovNewton()) OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]')) OData.AddObject(OpenSeesAPI.TCL.TCLScript('}')) OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {')) OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Newton Line Search ... "')) OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,1000,0)) OData.AddObject(OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch(Tolerance=0.8)) OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]')) OData.AddObject(OpenSeesAPI.TCL.TCLScript('}')) # OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {')) # OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Newton with Initial Tangent ... "')) # OData.AddObject(OpenSeesAPI.Analysis.Test.NormDispIncr(Tol,1000,0)) # OData.AddObject(OpenSeesAPI.Analysis.Algorithm.Newton(Initial=True)) # OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]')) # OData.AddObject(OpenSeesAPI.TCL.TCLScript('}')) # # OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {')) # OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Broyden ... "')) # OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,1000,0)) # OData.AddObject(OpenSeesAPI.Analysis.Algorithm.Broyden(8)) # OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]')) # OData.AddObject(OpenSeesAPI.TCL.TCLScript('}')) OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {')) OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Newton Line Search BiSection ... "')) OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,1000,0)) OData.AddObject(OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch('Bisection')) OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]')) OData.AddObject(OpenSeesAPI.TCL.TCLScript('}')) OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {')) OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Newton Line Search Secant... "')) OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,1000,0)) OData.AddObject(OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch('Secant')) OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]')) OData.AddObject(OpenSeesAPI.TCL.TCLScript('}')) OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {')) OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Newton Line Search RegulaFalsi ... "')) OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,1000,0)) OData.AddObject(OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch('RegulaFalsi')) OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]')) OData.AddObject(OpenSeesAPI.TCL.TCLScript('}')) def PushOverSolutionAlgorithimDispIncr(OData, StepSize, Tol, ControlNode): #Insert within the While loop, make sure parameter "ok" is defined import OpenSeesAPI OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {')) OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Smaller Step: %f and Tol: %f ... "'%(StepSize,Tol))) OData.AddObject(OpenSeesAPI.Analysis.Integrator.Static.DisplacementControl(ControlNode, 1, StepSize)) OData.AddObject(OpenSeesAPI.TCL.TCLScript('}')) OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {')) OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying KrylovNewton ... "')) OData.AddObject(OpenSeesAPI.Analysis.Test.NormDispIncr(Tol,1000,0)) OData.AddObject(OpenSeesAPI.Analysis.Algorithm.KrylovNewton()) OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]')) OData.AddObject(OpenSeesAPI.TCL.TCLScript('}')) OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {')) OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Newton Line Search ... "')) OData.AddObject(OpenSeesAPI.Analysis.Test.NormDispIncr(Tol,1000,0)) OData.AddObject(OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch(Tolerance=0.8)) OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]')) OData.AddObject(OpenSeesAPI.TCL.TCLScript('}')) OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {')) OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Newton Line Search BiSection ... "')) OData.AddObject(OpenSeesAPI.Analysis.Test.NormDispIncr(Tol,1000,0)) OData.AddObject(OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch('Bisection')) OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]')) OData.AddObject(OpenSeesAPI.TCL.TCLScript('}')) OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {')) OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Newton Line Search Secant... "')) OData.AddObject(OpenSeesAPI.Analysis.Test.NormDispIncr(Tol,1000,0)) OData.AddObject(OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch('Secant')) OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]')) OData.AddObject(OpenSeesAPI.TCL.TCLScript('}')) OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {')) OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Newton Line Search RegulaFalsi ... "')) OData.AddObject(OpenSeesAPI.Analysis.Test.NormDispIncr(Tol,1000,0)) OData.AddObject(OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch('RegulaFalsi')) OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]')) OData.AddObject(OpenSeesAPI.TCL.TCLScript('}')) def PushOverSolutionAlgorithimConstantAlgorithm(OData, StepSize, Tol, ControlNode, Iter=1000): import OpenSeesAPI OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {')) OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Smaller Step: %f and Tol: %f ... "'%(StepSize,Tol))) OData.AddObject(OpenSeesAPI.TCL.TCLScript('}')) OData.AddObject(OpenSeesAPI.Analysis.Integrator.Static.DisplacementControl(ControlNode, 1, StepSize)) OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {')) OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying KrylovNewton ... "')) OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,1000,0)) OData.AddObject(OpenSeesAPI.Analysis.Algorithm.KrylovNewton()) OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]')) OData.AddObject(OpenSeesAPI.TCL.TCLScript('}')) def PushOverSolutionAlgorithimConstantAlgorithmDispIncr(OData, StepSize, Tol, ControlNode, NoOfIterations=1000): import OpenSeesAPI OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {')) OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Smaller Step: %f and Tol: %f ... "'%(StepSize,Tol))) OData.AddObject(OpenSeesAPI.TCL.TCLScript('}')) OData.AddObject(OpenSeesAPI.Analysis.Integrator.Static.DisplacementControl(ControlNode, 1, StepSize)) OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {')) OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying KrylovNewton ... "')) OData.AddObject(OpenSeesAPI.Analysis.Test.NormDispIncr(Tol,NoOfIterations,2)) OData.AddObject(OpenSeesAPI.Analysis.Algorithm.KrylovNewton()) OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]')) OData.AddObject(OpenSeesAPI.TCL.TCLScript('}')) def PushOverSolutionAlgorithimConstantTol(OData, Tol, Iter=1000): import OpenSeesAPI OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {')) OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying KrylovNewton ... "')) OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,Iter,0)) OData.AddObject(OpenSeesAPI.Analysis.Algorithm.KrylovNewton()) OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]')) OData.AddObject(OpenSeesAPI.TCL.TCLScript('}')) OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {')) OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Newton Line Search ... "')) OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,Iter,0)) OData.AddObject(OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch(Tolerance=0.8)) OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]')) OData.AddObject(OpenSeesAPI.TCL.TCLScript('}')) OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {')) OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Newton Line Search BiSection ... "')) OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,Iter,0)) OData.AddObject(OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch('Bisection')) OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]')) OData.AddObject(OpenSeesAPI.TCL.TCLScript('}')) OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {')) OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Newton Line Search Secant... "')) OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,Iter,0)) OData.AddObject(OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch('Secant')) OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]')) OData.AddObject(OpenSeesAPI.TCL.TCLScript('}')) OData.AddObject(OpenSeesAPI.TCL.TCLScript('if {$ok != 0} {')) OData.AddObject(OpenSeesAPI.TCL.TCLScript('puts "Trying Newton Line Search RegulaFalsi ... "')) OData.AddObject(OpenSeesAPI.Analysis.Test.EnergyIncr(Tol,Iter,0)) OData.AddObject(OpenSeesAPI.Analysis.Algorithm.NewtonLineSearch('RegulaFalsi')) OData.AddObject(OpenSeesAPI.TCL.TCLScript('set ok [analyze 1]')) OData.AddObject(OpenSeesAPI.TCL.TCLScript('}'))
63.199377
137
0.724257
0
0
0
0
0
0
0
0
5,922
0.291911
717008cf6d0ff4d98caa231046b8d209403318a1
6,193
py
Python
unityparser/commands.py
socialpoint-labs/unity-yaml-parser
91c175140ed32aed301bc34d4311f370da69a8ba
[ "MIT" ]
76
2019-06-17T13:17:59.000Z
2022-03-11T19:39:24.000Z
unityparser/commands.py
socialpoint-labs/unity-yaml-parser
91c175140ed32aed301bc34d4311f370da69a8ba
[ "MIT" ]
17
2019-06-07T09:04:27.000Z
2022-02-16T19:01:38.000Z
unityparser/commands.py
socialpoint-labs/unity-yaml-parser
91c175140ed32aed301bc34d4311f370da69a8ba
[ "MIT" ]
9
2019-10-08T16:07:35.000Z
2021-12-08T15:27:00.000Z
import re from argparse import ArgumentParser from multiprocessing import Pool, Manager, Process from pathlib import Path from .utils import UnityDocument YAML_HEADER = '%YAML' class UnityProjectTester: """ Class to run tests on a given Unity project folder """ AVAILABLE_COMMANDS = ('test_no_yaml_is_modified',) def __init__(self): self.options = None def run(self): top_parser = ArgumentParser() subparser = top_parser.add_subparsers() subparser.required = True for cmd in UnityProjectTester.AVAILABLE_COMMANDS: fn = getattr(self, cmd) parser = subparser.add_parser(cmd, help=fn.__doc__) parser.set_defaults(func=fn) top_parser.add_argument('project_path', help='Path to the Unity project folder') top_parser.add_argument('--exclude', help='Exclude regexp when searching project files. Can be specified multiple times.', default=None, action='append') top_parser.add_argument('--keep-changes', help='If a file changes after serialization, do not revert the changes.', default=False, action='store_true') top_parser.add_argument('--dry-run', help='Dont\'t modify.', default=False, action='store_true') try: self.options = top_parser.parse_args() except TypeError: top_parser.print_help() return 2 # run given function self.options.func() def test_no_yaml_is_modified(self): """ Recurse the whole project folder looking for '.asset' files, load and save them all, and check that there are no modifications """ if self.options.dry_run: print("Dry-run mode enabled: YAMLs won't be dumped.") if self.options.keep_changes: print("Keep changes mode will not have any effect during dry run.") elif self.options.keep_changes: print("Keep changes mode enabled: Changes to files will be kept.") project_path = Path(self.options.project_path) asset_file_paths = [p for p in project_path.rglob('*.asset')] print("Found {} '.asset' files".format(len(asset_file_paths))) def is_path_included(path): # compare regexp against absolute path return not any(rexp.search(str(path.resolve())) for rexp in rexps) if self.options.exclude is not None: rexps = [re.compile(rexp) for rexp in self.options.exclude] valid_file_paths = [p for p in filter(is_path_included, asset_file_paths)] print("Excluded {} '.asset' files".format(len(asset_file_paths) - len(valid_file_paths))) else: valid_file_paths = asset_file_paths file_results = [] with Manager() as manager: print_queue = manager.Queue() diff_list = manager.list() queue_process = Process(target=UnityProjectTester.read_output, args=(print_queue,)) queue_process.start() with Pool() as pool: for f in valid_file_paths: async_res = pool.apply_async(UnityProjectTester.open_and_save, (f, print_queue, diff_list, self.options.keep_changes, self.options.dry_run)) file_results.append((f, async_res)) pool.close() pool.join() # signal end of queue with None token print_queue.put(None) queue_process.join() error_results = list(filter(lambda r: not r[1].successful(), file_results)) if len(error_results): # raise the first exception file_path, result = error_results[0] print("Python process evaluating file {} failed with the following exception:".format( file_path.resolve()), flush=True) result.get() if len(diff_list): print("{} files are different now:".format(len(diff_list))) print('\n'.join([str(f.resolve()) for f in diff_list])) @staticmethod def read_output(print_queue): msg = print_queue.get() while msg is not None: print(msg, flush=True) msg = print_queue.get() @staticmethod def open_and_save(asset_file_path, print_queue, diff_list, keep_changes=False, dry_run=False): # check YAML version header, save original content with open(str(asset_file_path), 'rb') as fp: header = fp.read(len(YAML_HEADER)) try: is_yaml_file = header.decode('utf-8') == YAML_HEADER except UnicodeDecodeError: is_yaml_file = False finally: if not is_yaml_file: print_queue.put("Ignoring non-yaml file {}".format(asset_file_path)) return else: fp.seek(0) print_queue.put("Processing {}".format(asset_file_path)) a_file_content = fp.read() doc = UnityDocument.load_yaml(str(asset_file_path)) if dry_run: return try: doc.dump_yaml() with open(str(asset_file_path), 'rb') as fp: b_file_content = fp.read() # compare if a_file_content != b_file_content: diff_list.append(asset_file_path) if not keep_changes: with open(str(asset_file_path), 'wb') as fp: fp.write(a_file_content) except Exception: with open(str(asset_file_path), 'wb') as fp: fp.write(a_file_content) raise if __name__ == '__main__': # None is considered successful code = UnityProjectTester().run() or 0 exit(code)
39.698718
117
0.56467
5,888
0.950751
0
0
1,615
0.260778
0
0
1,157
0.186824
71707cf255fd21e42d8d8ac95ead6668d4d78aed
582
py
Python
DP/Leetcode 221. Maximal Square.py
kaizhengny/LeetCode
67d64536ab80f4966699fe7460d165f2a98d6a82
[ "MIT" ]
31
2020-06-23T00:40:04.000Z
2022-01-08T11:06:24.000Z
DP/Leetcode 221. Maximal Square.py
kaizhengny/LeetCode
67d64536ab80f4966699fe7460d165f2a98d6a82
[ "MIT" ]
null
null
null
DP/Leetcode 221. Maximal Square.py
kaizhengny/LeetCode
67d64536ab80f4966699fe7460d165f2a98d6a82
[ "MIT" ]
7
2020-04-30T08:46:03.000Z
2021-08-28T16:25:54.000Z
class Solution: def maximalSquare(self, matrix: List[List[str]]) -> int: if not matrix: return 0 m, n = len(matrix), len(matrix[0]) dp = [[0]*n for _ in range(m)] res = 0 for i in range(m): dp[i][0] = int(matrix[i][0]) for j in range(n): dp[0][j] = int(matrix[0][j]) for i in range(1, m): for j in range(1, n): if matrix[i][j] == '1': dp[i][j] = min(dp[i-1][j],dp[i-1][j-1],dp[i][j-1])+1 res = max(res, dp[i][j]) return res**2
36.375
72
0.429553
582
1
0
0
0
0
0
0
3
0.005155
7171d1486ab6a395eb9ff27ecf4115ab48da0237
3,767
py
Python
dokang/harvesters/__init__.py
Polyconseil/dokang
b0ab3e4aabfb97adb2a2e877a42fc1896e5fcf08
[ "BSD-3-Clause" ]
6
2016-07-04T17:16:42.000Z
2018-11-13T08:10:21.000Z
dokang/harvesters/__init__.py
Polyconseil/dokang
b0ab3e4aabfb97adb2a2e877a42fc1896e5fcf08
[ "BSD-3-Clause" ]
6
2016-02-23T15:08:51.000Z
2017-01-02T11:57:45.000Z
dokang/harvesters/__init__.py
Polyconseil/dokang
b0ab3e4aabfb97adb2a2e877a42fc1896e5fcf08
[ "BSD-3-Clause" ]
5
2015-04-05T14:07:11.000Z
2017-04-13T14:08:02.000Z
# -*- coding: utf-8 -*- # Copyright (c) Polyconseil SAS. All rights reserved. import hashlib import json import logging import os import re from .html import html_config, HtmlHarvester # pylint: disable=unused-import from .sphinx import ( # pylint: disable=unused-import sphinx_config, sphinx_rtd_config, SphinxHarvester, ReadTheDocsSphinxHarvester ) logger = logging.getLogger(__name__) def _must_process_path(path, include, exclude): for exp in include: if exp.match(path): return True for exp in exclude: if exp.match(path): return False return True def _compute_hash(path): h = hashlib.md5() with open(path, 'rb') as fp: while 1: buff = fp.read(8192) if not buff: break h.update(buff) return h.hexdigest() def harvest_set(base_dir, doc_set, config, hashes, force): """Harvest a document set and return documents as dictionaries. ``config`` is the harvester configuration. It should contain a key for each supported file extensions. ``hashes`` is a dictionary that links the path of each indexed file to its hash. It is used to decide whether the document should be indexed again. ``force`` indicates whether to reindex a document even if it has not ben modified since the last indexation. This function is a generator. It yields dictionaries. Each dictionary should represent a document and contain the following keys in addition to the keys returned by the harvester itself. Each text-like value should be a string (in Python 3) or a unicode object (in Python 2). path The path of the document relative to the root of the document set. set The id of the document set. It should be ``doc_set``. """ config_copy = config.copy() include = [re.compile(exp) for exp in config_copy.pop('include') or ()] exclude = [re.compile(exp) for exp in config_copy.pop('exclude') or ()] extensions = config_copy for dir_path, _dir_names, file_names in os.walk(base_dir): for filename in file_names: path = os.path.join(dir_path, filename) relative_path = os.path.relpath(path, base_dir) if not _must_process_path(relative_path, include, exclude): logger.debug('Excluded file "%s": include/exclude rules.', relative_path) continue _, extension = os.path.splitext(filename) extension = extension.lstrip('.') # remove leading dot harvester_class = extensions.get(extension) if harvester_class is None: logger.debug('Excluded file "%s": no harvester found for %s.', relative_path, extension) continue current_hash = _compute_hash(path) indexed_hash = hashes.get(relative_path) if not force and (indexed_hash == current_hash): logger.debug('Excluded file: "%s": not modified since last indexation.', relative_path) continue try: logger.debug('Indexing file "%s"', relative_path) doc = harvester_class().harvest_file(path) except Exception: # pylint: disable=broad-except logger.exception("Could not index document %s", path) else: if doc: if relative_path == 'index.html': with open(os.path.join(base_dir, '.dokang'), 'w') as fp: json.dump({'title': doc['title']}, fp) doc['path'] = relative_path doc['set'] = doc_set doc['hash'] = current_hash yield doc
38.050505
104
0.617733
0
0
2,912
0.773029
0
0
0
0
1,383
0.367136
7171ec803ebbc9d578b8e216bcbe447dfe0af3a6
27
py
Python
__init__.py
semccomas/string-method-gmxapi
fb68dce792d35df739225b1048e0816a4a61d45e
[ "MIT" ]
6
2020-10-15T16:43:19.000Z
2022-01-21T09:09:13.000Z
__init__.py
semccomas/string-method-gmxapi
fb68dce792d35df739225b1048e0816a4a61d45e
[ "MIT" ]
9
2020-07-01T08:36:49.000Z
2021-06-23T07:15:53.000Z
__init__.py
semccomas/string-method-gmxapi
fb68dce792d35df739225b1048e0816a4a61d45e
[ "MIT" ]
5
2020-07-15T06:08:00.000Z
2021-07-02T14:24:59.000Z
__all__ = ["stringmethod"]
13.5
26
0.703704
0
0
0
0
0
0
0
0
14
0.518519
71727855c3b5a49ba770b23fd1b96b453bcf8530
855
py
Python
carPooling/migrations/0018_auto_20190521_1651.py
yangtao4389/pinche
81463761058f67d47cea980f29a061b1e1b2d08a
[ "Apache-2.0" ]
1
2020-09-30T01:27:57.000Z
2020-09-30T01:27:57.000Z
carPooling/migrations/0018_auto_20190521_1651.py
yangtao4389/pinche
81463761058f67d47cea980f29a061b1e1b2d08a
[ "Apache-2.0" ]
9
2020-06-05T19:51:33.000Z
2022-03-11T23:40:25.000Z
carPooling/migrations/0018_auto_20190521_1651.py
yangtao4389/pinche
81463761058f67d47cea980f29a061b1e1b2d08a
[ "Apache-2.0" ]
null
null
null
# Generated by Django 2.0.4 on 2019-05-21 16:51 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('carPooling', '0017_carpoolingrecunbook'), ] operations = [ migrations.AlterField( model_name='carpoolinguserconf', name='c_name', field=models.CharField(max_length=128, null=True, verbose_name='真实姓名'), ), migrations.AlterField( model_name='carpoolinguserconf', name='c_phone', field=models.CharField(db_index=True, max_length=11, verbose_name='电话号码'), ), migrations.AlterField( model_name='carpoolinguserconf', name='c_weixin_id', field=models.CharField(db_index=True, max_length=128, null=True, verbose_name='微信id'), ), ]
29.482759
98
0.611696
782
0.893714
0
0
0
0
0
0
213
0.243429
717345e66810546b06a5a6c9cdbe99a57810c275
357
py
Python
src/fuckbot/ticker.py
Zer0-One/fuckbot
02f5a112988e25a9f04a9a941a55f11cf51c3d8f
[ "BSD-2-Clause" ]
null
null
null
src/fuckbot/ticker.py
Zer0-One/fuckbot
02f5a112988e25a9f04a9a941a55f11cf51c3d8f
[ "BSD-2-Clause" ]
null
null
null
src/fuckbot/ticker.py
Zer0-One/fuckbot
02f5a112988e25a9f04a9a941a55f11cf51c3d8f
[ "BSD-2-Clause" ]
1
2022-01-24T21:20:43.000Z
2022-01-24T21:20:43.000Z
import discord import logging TRADING_API_URL='https://cloud.iexapis.com/stable/stock/{0}/quote' TRADING_API_ICON='https://iextrading.com/favicon.ico' def ticker_embed(symbol): ticker = discord.Embed(title=f"{symbol}".upper(), type="rich", color=3029236, url=TRADING_API_URL.format(symbol)) ticker.set_author(name="IEXTrading") return ticker
29.75
117
0.756303
0
0
0
0
0
0
0
0
115
0.322129
7173dccce721752a801b4b3463958745f87a8a0c
9,769
py
Python
minos/lib/util/StateSet.py
johny-c/minos
660e991f44118382f4a3cb7566670c4159d33fe3
[ "MIT" ]
1
2020-02-18T08:19:32.000Z
2020-02-18T08:19:32.000Z
minos/lib/util/StateSet.py
johny-c/minos
660e991f44118382f4a3cb7566670c4159d33fe3
[ "MIT" ]
4
2019-12-27T12:44:58.000Z
2021-05-07T17:41:09.000Z
minos/lib/util/StateSet.py
johny-c/minos
660e991f44118382f4a3cb7566670c4159d33fe3
[ "MIT" ]
1
2019-10-15T00:28:39.000Z
2019-10-15T00:28:39.000Z
import bz2 import csv import collections import math from enum import Enum class Select(Enum): FIRST = 'first' RANGE_KEY = 'range_key' RANGE_VALUE = 'range_value' class SelectPolicy: def __init__(self, policy, field=None): self.policy = policy self.field = field class StateSet: """ Wrapper for set of episode val/test states """ def __init__(self, scenes_file=None, states_files=None, scene_filter=None, episode_filter=None, max_states_per_scene=None, select_policy=SelectPolicy(Select.FIRST)): self.states = [] self.scenes = [] self.scenes_by_id = {} self.states_by_scene = {} self.select_policy = select_policy if scenes_file: self._load_scenes(scenes_file, scene_filter) if states_files: if type(states_files) is str: self._load_states(states_files, max_states_per_scene, episode_filter) elif isinstance(states_files, collections.Iterable): for states_file in states_files: self._load_states(states_file, max_states_per_scene, episode_filter) self._embed_states_in_scenes() def get_splits(self, max_states_per_scene=None): """Get dictionary of StateSets keyed by scene 'set' i.e. dataset split""" scenes_by_split = {} for scene in self.scenes: scenes_by_split.setdefault(scene['set'], []).append(scene) state_sets_dict = {} for split, scenes in scenes_by_split.items(): ss = StateSet() ss._populate_from_lists(scenes, self.states_by_scene, max_states_per_scene) state_sets_dict[split] = ss return state_sets_dict def get_scenes(self): return self.scenes def get_states(self): return self.states def get_states_by_scene_id(self, scene_id): return self.states_by_scene[scene_id] def _select_n_states(self, states, n): # Select n states from big list of states policy = self.select_policy.policy field = self.select_policy.field if n is not None and n < len(states): if policy == Select.FIRST: if field is not None: # sort by field states = sorted(states, key=lambda x: x[field]) return states[:n] elif policy == Select.RANGE_KEY: # sort by field states = sorted(states, key=lambda x: x[field]) # select by evenly dividing indices r = len(states)/float(n) selected = [] for i in range(n): si = int(math.floor(math.ceil(r*i)/2)) selected.append(states[si]) return selected elif policy == Select.RANGE_VALUE: # sort by field and get range (value) states = sorted(states, key=lambda x: x[field]) fmin = states[0][field] fmax = states[-1][field] # print('Range is %f to %f' % (fmin,fmax)) # from range, divide up into n buckets r = (fmax-fmin)/float(n) buckets = [] for i in range(n): buckets.append([]) for state in states: bi = int(min(math.ceil((state[field] - fmin)/r), n-1)) buckets[bi].append(state) # make sure all buckets have something for i, bucket in enumerate(buckets): if len(bucket) == 0: # print('Nothing in bucket %d' % i) # still some from other buckets pi = max(i-1, 0) ni = min(i+1, n-1) nlen = len(buckets[ni]) plen = len(buckets[pi]) if nlen > plen: # take half from bucket[ni] and put in current bucket k = math.floor(nlen/2) buckets[i] = buckets[ni][:k] buckets[ni] = buckets[ni][k:] else: k = math.floor(plen/2) buckets[i] = buckets[pi][:k] buckets[pi] = buckets[pi][k:] selected = [] for bucket in buckets: bii = math.floor(len(bucket)/2) selected.append(bucket[bii]) return selected else: raise ValueError('Unsupported select_policy ' + policy) else: return states def _populate_from_lists(self, my_scenes, my_states_by_scene, max_states_per_scene): self.scenes = my_scenes for scene in my_scenes: scene_id = scene['id'] self.scenes_by_id[scene_id] = scene if scene_id in my_states_by_scene: my_states = self._select_n_states(my_states_by_scene[scene_id], max_states_per_scene) self.states_by_scene[scene_id] = my_states self.states += my_states def _load_scenes(self, filename, scene_filter): with bz2.open(filename, 'rt') if filename.endswith('bz2') else open(filename) as f: reader = csv.DictReader(f) self.scenes = [] for r in reader: for v in ['nrooms', 'nobjects', 'nlevels']: if v in r: r[v] = int(r[v]) for v in ['dimX', 'dimY', 'dimZ', 'floorArea']: if v in r: r[v] = float(r[v]) if scene_filter and not scene_filter(r): continue self.scenes.append(r) self.scenes_by_id[r['id']] = r self.scenes.sort(key=lambda x: x['nobjects']) def _load_states(self, filename, max_states_per_scene, state_filter): with bz2.open(filename, 'rt') if filename.endswith('bz2') else open(filename) as f: reader = csv.DictReader(f) all_states = [r for r in reader] # Convert scene state and group by sceneId counter = 0 for r in all_states: for v in ['startX', 'startY', 'startZ', 'startAngle', 'goalX', 'goalY', 'goalZ', 'dist', 'pathDist']: r[v] = float(r[v]) if v in r else None for v in ['episodeId', 'pathNumDoors', 'pathNumRooms', 'level']: r[v] = int(r[v]) if v in r else None scene_id = r['sceneId'] scene_states = self.states_by_scene.setdefault(scene_id, []) rec = { 'episode_id': counter, 'scene_id': r['sceneId'], 'room_id': r['roomId'], 'start': {'position': [r['startX'], r['startY'], r['startZ']], 'angle': r['startAngle']}, 'goal': {'id': r['goalObjectId'], 'position': [r['goalX'], r['goalY'], r['goalZ']]}, 'dist': r['dist'] } for k in ['pathDist', 'pathNumRooms', 'pathRoomIds', 'pathNumDoors', 'pathDoorIds', 'level']: if k in r: rec[k] = r[k] if not state_filter or state_filter(rec): scene_states.append(rec) counter = counter + 1 # Filter down to states per scene and create big list of all scenes states = [] for scene_id, scene_states in self.states_by_scene.items(): self.states_by_scene[scene_id] = self._select_n_states(scene_states, max_states_per_scene) states += self.states_by_scene[scene_id] self.states = states def _embed_states_in_scenes(self): for state in self.states: scene_id = state['scene_id'] if scene_id in self.scenes_by_id: self.scenes_by_id[scene_id].setdefault('states', []).append(state) scenes_with_no_states = [] for i, scene in enumerate(self.scenes): if 'states' not in scene or len(scene['states']) == 0: scenes_with_no_states.append(scene['id']) del self.scenes_by_id[scene['id']] self.scenes = [s for s in self.scenes if s['id'] not in scenes_with_no_states] #print('Removed scenes with no episode states: ' + ','.join(scenes_with_no_states)) def main(): import argparse # Argument processing parser = argparse.ArgumentParser(description='Load state set') parser.add_argument('-n', '--limit', type=int, help='Number of states per scene') parser.add_argument('--select', default=Select.FIRST, type=Select, help='Number of states per scene') parser.add_argument('--field', default=None, help='Field to use for selection') parser.add_argument('--scenes', type=str, default=None, help='Scenes file to load') parser.add_argument('input', help='Input file to load') args = parser.parse_args() state_set = StateSet(scenes_file=args.scenes, states_files=args.input, max_states_per_scene=args.limit, select_policy=SelectPolicy(args.select, args.field)) for state in state_set.states: print(state) if __name__ == "__main__": main()
41.747863
117
0.522469
8,505
0.870611
0
0
0
0
0
0
1,492
0.152728
7174375d0908f71b2864b3f93d7df2286d52caea
29
py
Python
pagetags/configuration/development.py
pmatigakis/pagetags
5e81d01493548edc2677453819c32de3cf75d159
[ "MIT" ]
null
null
null
pagetags/configuration/development.py
pmatigakis/pagetags
5e81d01493548edc2677453819c32de3cf75d159
[ "MIT" ]
null
null
null
pagetags/configuration/development.py
pmatigakis/pagetags
5e81d01493548edc2677453819c32de3cf75d159
[ "MIT" ]
null
null
null
DEBUG = True TESTING = False
9.666667
15
0.724138
0
0
0
0
0
0
0
0
0
0
71757b823cf5dc703ab76426a5d125f92b1f4a70
665
py
Python
hpcrocket/pyfilesystem/factory.py
SvenMarcus/hpc-rocket
b28917e7afe6e2e839d1ae58f2e21fba6e3eb61c
[ "MIT" ]
7
2022-01-03T13:52:40.000Z
2022-03-10T16:26:04.000Z
hpcrocket/pyfilesystem/factory.py
SvenMarcus/ssh-slurm-runner
91ea1a052a0362b5b8676b6e429aa3c890359e73
[ "MIT" ]
18
2021-04-16T15:53:55.000Z
2021-09-13T17:38:44.000Z
hpcrocket/pyfilesystem/factory.py
SvenMarcus/hpclaunch
1a0459167bf5d7b26b1d7e46a1b1d073a4a55650
[ "MIT" ]
null
null
null
from hpcrocket.core.filesystem import Filesystem, FilesystemFactory from hpcrocket.core.launchoptions import Options from hpcrocket.pyfilesystem.localfilesystem import LocalFilesystem from hpcrocket.pyfilesystem.sshfilesystem import SSHFilesystem class PyFilesystemFactory(FilesystemFactory): def __init__(self, options: Options) -> None: self._options = options def create_local_filesystem(self) -> Filesystem: return LocalFilesystem(".") def create_ssh_filesystem(self) -> Filesystem: connection = self._options.connection proxyjumps = self._options.proxyjumps return SSHFilesystem(connection, proxyjumps)
35
67
0.77594
415
0.62406
0
0
0
0
0
0
3
0.004511
7175fb970f1844dacf40b20065573654fbebe36d
4,053
py
Python
cqlsh_tests/cqlsh_tools.py
vincewhite/cassandra-dtest
a01dce6af73a8656e8740227a811fe63025fb3f4
[ "Apache-2.0" ]
null
null
null
cqlsh_tests/cqlsh_tools.py
vincewhite/cassandra-dtest
a01dce6af73a8656e8740227a811fe63025fb3f4
[ "Apache-2.0" ]
null
null
null
cqlsh_tests/cqlsh_tools.py
vincewhite/cassandra-dtest
a01dce6af73a8656e8740227a811fe63025fb3f4
[ "Apache-2.0" ]
null
null
null
import csv import random import cassandra from cassandra.cluster import ResultSet from typing import List class DummyColorMap(object): def __getitem__(self, *args): return '' def csv_rows(filename, delimiter=None): """ Given a filename, opens a csv file and yields it line by line. """ reader_opts = {} if delimiter is not None: reader_opts['delimiter'] = delimiter with open(filename, 'rb') as csvfile: for row in csv.reader(csvfile, **reader_opts): yield row def assert_csvs_items_equal(filename1, filename2): with open(filename1, 'r') as x, open(filename2, 'r') as y: assert list(x.readlines()) == list(y.readlines()) def random_list(gen=None, n=None): if gen is None: def gen(): return random.randint(-1000, 1000) if n is None: def length(): return random.randint(1, 5) else: def length(): return n return [gen() for _ in range(length())] def write_rows_to_csv(filename, data): with open(filename, 'wb') as csvfile: writer = csv.writer(csvfile) for row in data: writer.writerow(row) csvfile.close def deserialize_date_fallback_int(byts, protocol_version): timestamp_ms = cassandra.marshal.int64_unpack(byts) try: return cassandra.util.datetime_from_timestamp(timestamp_ms / 1000.0) except OverflowError: return timestamp_ms def monkeypatch_driver(): """ Monkeypatches the `cassandra` driver module in the same way that clqsh does. Returns a dictionary containing the original values of the monkeypatched names. """ cache = {'BytesType_deserialize': cassandra.cqltypes.BytesType.deserialize, 'DateType_deserialize': cassandra.cqltypes.DateType.deserialize, 'support_empty_values': cassandra.cqltypes.CassandraType.support_empty_values} cassandra.cqltypes.BytesType.deserialize = staticmethod(lambda byts, protocol_version: bytearray(byts)) cassandra.cqltypes.DateType.deserialize = staticmethod(deserialize_date_fallback_int) cassandra.cqltypes.CassandraType.support_empty_values = True if hasattr(cassandra, 'deserializers'): cache['DesDateType'] = cassandra.deserializers.DesDateType del cassandra.deserializers.DesDateType return cache def unmonkeypatch_driver(cache): """ Given a dictionary that was used to cache parts of `cassandra` for monkeypatching, restore those values to the `cassandra` module. """ cassandra.cqltypes.BytesType.deserialize = staticmethod(cache['BytesType_deserialize']) cassandra.cqltypes.DateType.deserialize = staticmethod(cache['DateType_deserialize']) cassandra.cqltypes.CassandraType.support_empty_values = cache['support_empty_values'] if hasattr(cassandra, 'deserializers'): cassandra.deserializers.DesDateType = cache['DesDateType'] def assert_resultset_contains(got: ResultSet, expected: List[tuple]) -> None: """ So this is slow. I would hope a ResultSet has the capability of pulling data by PK or clustering, however I'm not finding it atm. As such, this method isn't intended for use with large datasets. :param got: ResultSet, expect schema of [a, b] :param expected: list of tuples with 2 members corresponding with a/b schema of ResultSet """ # Adding a touch of sanity check so people don't mis-use this. n^2 is bad. assert len(expected) <= 1000, 'This is a slow comparison method. Don\'t use for > 1000 tuples.' # First quick check: if we have a different count, we can just die. assert len(got.current_rows) == len(expected) for t in expected: assert len(t) == 2, 'Got unexpected tuple len. Expected 2, got tuple: {}'.format(t) found = False for row in got.current_rows: if found: break if row.a == t[0] and row.b == t[1]: found = True assert found, 'Failed to find expected row: {}'.format(t)
33.495868
107
0.683691
81
0.019985
337
0.083148
0
0
0
0
1,276
0.314829
71764b0e93fc239b103c34e487ed538048a2ed7d
5,394
py
Python
tests/unit/sagemaker/tensorflow/test_estimator_init.py
LastRemote/sagemaker-python-sdk
fddf29d9e4383cd3f939253eef47ee79a464dd37
[ "Apache-2.0" ]
1,690
2017-11-29T20:13:37.000Z
2022-03-31T12:58:11.000Z
tests/unit/sagemaker/tensorflow/test_estimator_init.py
LastRemote/sagemaker-python-sdk
fddf29d9e4383cd3f939253eef47ee79a464dd37
[ "Apache-2.0" ]
2,762
2017-12-04T05:18:03.000Z
2022-03-31T23:40:11.000Z
tests/unit/sagemaker/tensorflow/test_estimator_init.py
LastRemote/sagemaker-python-sdk
fddf29d9e4383cd3f939253eef47ee79a464dd37
[ "Apache-2.0" ]
961
2017-11-30T16:44:03.000Z
2022-03-30T23:12:09.000Z
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. from __future__ import absolute_import from mock import Mock, patch from packaging import version import pytest from sagemaker.tensorflow import TensorFlow REGION = "us-west-2" ENV_INPUT = {"env_key1": "env_val1", "env_key2": "env_val2", "env_key3": "env_val3"} @pytest.fixture() def sagemaker_session(): return Mock(name="sagemaker_session", boto_region_name=REGION) def _build_tf(sagemaker_session, **kwargs): return TensorFlow( sagemaker_session=sagemaker_session, entry_point="dummy.py", role="dummy-role", instance_count=1, instance_type="ml.c4.xlarge", **kwargs, ) @patch("sagemaker.fw_utils.python_deprecation_warning") def test_estimator_py2_deprecation_warning(warning, sagemaker_session): estimator = _build_tf(sagemaker_session, framework_version="2.1.1", py_version="py2") assert estimator.py_version == "py2" warning.assert_called_with("tensorflow", "2.1.1") def test_py2_version_deprecated(sagemaker_session): with pytest.raises(AttributeError) as e: _build_tf(sagemaker_session, framework_version="2.1.2", py_version="py2") msg = ( "Python 2 containers are only available with 2.1.1 and lower versions. " "Please use a Python 3 container." ) assert msg in str(e.value) def test_py2_version_is_not_deprecated(sagemaker_session): estimator = _build_tf(sagemaker_session, framework_version="1.15.0", py_version="py2") assert estimator.py_version == "py2" estimator = _build_tf(sagemaker_session, framework_version="2.0.0", py_version="py2") assert estimator.py_version == "py2" def test_framework_name(sagemaker_session): tf = _build_tf(sagemaker_session, framework_version="1.15.2", py_version="py3") assert tf._framework_name == "tensorflow" def test_tf_add_environment_variables(sagemaker_session): tf = _build_tf( sagemaker_session, framework_version="1.15.2", py_version="py3", environment=ENV_INPUT, ) assert tf.environment == ENV_INPUT def test_tf_miss_environment_variables(sagemaker_session): tf = _build_tf( sagemaker_session, framework_version="1.15.2", py_version="py3", environment=None, ) assert not tf.environment def test_enable_sm_metrics(sagemaker_session): tf = _build_tf( sagemaker_session, framework_version="1.15.2", py_version="py3", enable_sagemaker_metrics=True, ) assert tf.enable_sagemaker_metrics def test_disable_sm_metrics(sagemaker_session): tf = _build_tf( sagemaker_session, framework_version="1.15.2", py_version="py3", enable_sagemaker_metrics=False, ) assert not tf.enable_sagemaker_metrics def test_disable_sm_metrics_if_fw_ver_is_less_than_1_15( sagemaker_session, tensorflow_training_version, tensorflow_training_py_version ): if version.Version(tensorflow_training_version) > version.Version("1.14"): pytest.skip("This test is for TF 1.14 and lower.") tf = _build_tf( sagemaker_session, framework_version=tensorflow_training_version, py_version=tensorflow_training_py_version, image_uri="old-image", ) assert tf.enable_sagemaker_metrics is None def test_enable_sm_metrics_if_fw_ver_is_at_least_1_15( sagemaker_session, tensorflow_training_version, tensorflow_training_py_version ): if version.Version(tensorflow_training_version) < version.Version("1.15"): pytest.skip("This test is for TF 1.15 and higher.") tf = _build_tf( sagemaker_session, framework_version=tensorflow_training_version, py_version=tensorflow_training_py_version, ) assert tf.enable_sagemaker_metrics def test_require_image_uri_if_fw_ver_is_less_than_1_11( sagemaker_session, tensorflow_training_version, tensorflow_training_py_version ): if version.Version(tensorflow_training_version) > version.Version("1.10"): pytest.skip("This test is for TF 1.10 and lower.") with pytest.raises(ValueError) as e: _build_tf( sagemaker_session, framework_version=tensorflow_training_version, py_version=tensorflow_training_py_version, ) expected_msg = ( "TF {version} supports only legacy mode. Please supply the image URI directly with " "'image_uri=520713654638.dkr.ecr.{region}.amazonaws.com/" "sagemaker-tensorflow:{version}-cpu-py2' and set 'model_dir=False'. If you are using any " "legacy parameters (training_steps, evaluation_steps, checkpoint_path, requirements_file), " "make sure to pass them directly as hyperparameters instead." ).format(version=tensorflow_training_version, region=REGION) assert expected_msg in str(e.value)
32.890244
100
0.725436
0
0
0
0
422
0.078235
0
0
1,512
0.280311
71770ce551bdcd43974b0f18b616fb25201796c0
827
py
Python
testing.py
sofwerx/mycroft-articlekeyword-skill
7cab109db512d3a6465db241b18018e9415f4a9f
[ "Unlicense" ]
null
null
null
testing.py
sofwerx/mycroft-articlekeyword-skill
7cab109db512d3a6465db241b18018e9415f4a9f
[ "Unlicense" ]
null
null
null
testing.py
sofwerx/mycroft-articlekeyword-skill
7cab109db512d3a6465db241b18018e9415f4a9f
[ "Unlicense" ]
null
null
null
import subprocess proc = subprocess.Popen(['python3', 'articlekeywords.py', 'aih.txt' , '5'], stdout=subprocess.PIPE ) #print(type(proc.communicate()[0])) # path = '/opt/mycroft/skills/mycroft-bitcoinprice-skill/' text = proc.stdout.read() rows = text.splitlines() #print(text.splitlines()) count = 0 s = "" for row in rows: divide = row.split() wordCount = len(divide) if wordCount > 1: count = count + 1 s += str(count) s += " " s += str(divide[1]) s += " " print(s) # with open(path + 'out.csv', 'r') as content_file: # text = content_file.read() # self.speak_dialog("bitcoin.price", data={'price': str(text)}) #file_path = '/opt/mycroft/skills/mycroft-bitcoinprice-skill/out.csv' #wordCount = 10 # # text = Path(file_path).read_text() # #print(exit_code)
21.205128
101
0.622733
0
0
0
0
0
0
0
0
453
0.547763
717864c0c5586a731d9e7b34b779d6af81159c7a
4,509
py
Python
slcyGeneral.py
mirrorcoloured/slcypi
c47975b3523f770d12a521c82e2dfca181e3f35b
[ "MIT" ]
null
null
null
slcyGeneral.py
mirrorcoloured/slcypi
c47975b3523f770d12a521c82e2dfca181e3f35b
[ "MIT" ]
null
null
null
slcyGeneral.py
mirrorcoloured/slcypi
c47975b3523f770d12a521c82e2dfca181e3f35b
[ "MIT" ]
null
null
null
# Python 2.7.1 import RPi.GPIO as GPIO from twython import Twython import time import sys import os import pygame APP_KEY='zmmlyAJzMDIntLpDYmSH98gbw' APP_SECRET='ksfSVa2hxvTQKYy4UR9tjpb57CAynMJDsygz9qOyzlH24NVwpW' OAUTH_TOKEN='794094183841566720-BagrHW91yH8C3Mdh9SOlBfpL6wrSVRW' OAUTH_TOKEN_SECRET='d0Uucq2dkSHrFHZGLM1X8Hw05d80ajKYGl1zTRxZQSKTm' applepislcy = Twython(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET) ### GENERAL ### def Cleanup(): GPIO.cleanup() def Sleep(seconds): """Puts the program to sleep""" time.sleep(seconds) def Alert(channel): """Simple alert function for testing event interrupts""" print('Alert on channel',channel) def TimeString(): """Returns the current time""" t = time.localtime() return str(t[0])+'.'+str(t[1])+'.'+str(t[2])+'.'+str(t[3])+'.'+str(t[4])+'.'+str(t[5]) def LoadPins(mapping,inp): """Organizes an input into a pin mapping dict mapping <list>, ['IA','IB'] inp <dict>, <list>, <int> {'IA':1,'IB':2}, [1,2] """ if type(inp) is int and len(mapping) == 1: return {mapping[0]:inp} elif type(inp) is list and len(mapping) == len(inp): o = {} for i in range(len(inp)): o[mapping[i]] = inp[i] return o elif type(inp) is dict: return inp else: print('Invalid input for pins:',inp,type(inp)) print('Expected:',mapping) return {} def BoolToSign(inp): """Converts boolean bits into signed bits 0 -> -1 1 -> 1""" return (inp * 2) - 1 def SignToBool(inp): """Converts signed bits into boolean bits -1 -> 0 1 -> 1""" return (inp + 1) / 2 ### PYGAME ### def WindowSetup(size=(300,50),caption='',text='',background=(0,0,0),foreground=(255,255,255)): """Sets up a pygame window to take keyboard input size <tuple>, width by height caption <str>, window title bar text <str>, text to display in window, accepts \n background <tuple>, foreground <tuple>, (r,g,b) color """ pygame.init() screen = pygame.display.set_mode(size,0,32) pygame.display.set_caption(caption) myfont = pygame.font.SysFont('Monospace',15) labels = [] lines = text.split('\n') for line in lines: labels.append(myfont.render(line,1,foreground)) screen.fill(background) y = 0 for label in labels: screen.blit(label, (0,y)) y += 15 pygame.display.update() def InputLoop(eventmap): """Begins a pygame loop, mapping key inputs to functions eventmap <dict>, {pygame.K_t:myfunction} """ index = 0 while True: events = pygame.event.get() for event in events: if event.type == pygame.KEYDOWN: #print("{0}: You pressed {1:c}".format ( index , event.key )) if event.key in eventmap: eventmap[event.key]() elif event.type == pygame.QUIT: pygame.quit() sys.exit() def InputLoopDemo(): def dog(): print('woof') def cat(): print('meow') def fish(): print('blub') WindowSetup(caption='pet simulator',text='d for dog\nc for cat\nf for fish') InputLoop({pygame.K_d:dog, pygame.K_c:cat, pygame.K_f:fish}) ### TWITTER ### def Tweet(twit,statustext): """Tweets a message twit <Twython>, create with Twython(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET) statustext <str>, must be <= 140 characters """ if len(statustext) > 140: print('ERROR: Character limit 140 exceeded:',len(statustext)) else: twit.update_status(status=statustext) def TweetPicture(twit,file,statustext): """Tweets a message with a picture twit <Twython>, create with Twython(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET) file <str>, path and filename to picture statustext <str>, must be <= 140 characters """ photo = open(file, 'rb') response = twitter.upload_media(media=photo) twit.update_status(status=statustext, media_ids=[response['media_id']]) def TweetVideo(twit,file,statustext): """Tweets a message with a video twit <Twython>, create with Twython(APP_KEY, APP_SECRET, OAUTH_TOKEN, OAUTH_TOKEN_SECRET) file <str>, path and filename to video statustext <str>, must be <= 140 characters """ video = open(file, 'rb') response = twitter.upload_video(media=video, media_type='video/mp4') twit.update_status(status=statustext, media_ids=[response['media_id']])
30.883562
94
0.635174
0
0
0
0
0
0
0
0
1,889
0.41894
717a01e3e2c90ae46a5bad6b2a2010bbac8dace6
1,856
py
Python
python/pyarmnn/scripts/generate_docs.py
PetervdPerk-NXP/pyarmnn-release
2008c270f7c7c84a930842c845138628c8b95713
[ "MIT" ]
7
2020-02-27T07:45:14.000Z
2021-01-25T12:07:12.000Z
python/pyarmnn/scripts/generate_docs.py
MitchellTesla/PyArmNN
cbe37a0364b00f32ac2a8ced74eed5d576a0d52c
[ "MIT" ]
5
2020-07-28T15:01:12.000Z
2022-02-04T18:24:02.000Z
python/pyarmnn/scripts/generate_docs.py
MitchellTesla/PyArmNN
cbe37a0364b00f32ac2a8ced74eed5d576a0d52c
[ "MIT" ]
3
2020-07-31T11:41:24.000Z
2021-06-06T07:58:39.000Z
# Copyright © 2019 Arm Ltd. All rights reserved. # Copyright 2020 NXP # SPDX-License-Identifier: MIT import os import tarfile import pyarmnn as ann import shutil from typing import List, Union from pdoc.cli import main package_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..') def __copy_file_to_dir(file_paths: Union[List[str], str], target_dir_path: str): """Copies multiple files to a directory. Args: file_paths (Union[List(str)]): List of files to copy target_dir_path (str): Target directory. Returns: None """ file_paths = [] + file_paths if not (os.path.exists(target_dir_path) and os.path.isdir(target_dir_path)): os.makedirs(target_dir_path) for file_path in file_paths: if not (os.path.exists(file_path) and os.path.isfile(file_path)): raise RuntimeError('Not a file: {}'.format(file_path)) file_name = os.path.basename(file_path) shutil.copyfile(file_path, os.path.join(str(target_dir_path), file_name)) def archive_docs(path: str, version: str): """Creates an archive. Args: path (str): Path which will be archived. version (str): Version of Arm NN. Returns: None """ output_filename = f'pyarmnn_docs-{version}.tar' with tarfile.open(os.path.join(package_dir, output_filename), "w") as tar: tar.add(path) if __name__ == "__main__": readme_filename = os.path.join(package_dir, '..', '..', 'README.md') with open(readme_filename, 'r') as readme_file: top_level_pyarmnn_doc = ''.join(readme_file.readlines()) ann.__doc__ = top_level_pyarmnn_doc main() target_path = os.path.join(package_dir, 'docs') archive_docs(target_path, ann.__version__)
27.701493
82
0.644935
0
0
0
0
0
0
0
0
565
0.304254
717a53af9750d33e9be1b7de3f152d83339bf874
969
py
Python
tests/gejun_sum.py
jeffzhengye/pylearn
a140d0fca8a371faada194cb0126192675cc2045
[ "Unlicense" ]
2
2016-02-17T06:00:35.000Z
2020-11-23T13:34:00.000Z
tests/gejun_sum.py
jeffzhengye/pylearn
a140d0fca8a371faada194cb0126192675cc2045
[ "Unlicense" ]
null
null
null
tests/gejun_sum.py
jeffzhengye/pylearn
a140d0fca8a371faada194cb0126192675cc2045
[ "Unlicense" ]
null
null
null
__author__ = 'jeffye' def sum_consecutives(s): i = 1 li = [] if i < len(s): n = 1 while s[i] != s[i + 1] and s[i] != s[i - 1]: sum = s[i] i = i + 1 return sum while s[i] == s[i + 1]: n = n + 1 sum = s[i] * n i = i + 1 return sum li.append(sum) return li def sum_consecutives_corrected(s): start = 0 li = [] n = 1 while start < len(s): if start == len(s) - 1: # last element li.append(s[start]) break elif s[start] == s[start + n]: # equal, just record the length n += 1 else: # first not equal, sum all previous equal elements and append to li li.append(sum(s[start: start + n])) start += n n = 1 return li if __name__ == '__main__': test_li = [-5, -5, 7, 7, 12, 0] # should return [-10,14,12,0] print sum_consecutives_corrected(test_li)
20.617021
82
0.472652
0
0
0
0
0
0
0
0
159
0.164087
717d2436cb249576851958abd14c5cdda155ab5a
565
py
Python
PySDDP/term.py
tscher/PySDDP
ece69b77c951cbb1f046ac184f6fe4fc025ad690
[ "MIT" ]
9
2021-01-07T13:35:47.000Z
2022-02-06T14:30:33.000Z
PySDDP/term.py
AndreMarcato/PySDDP
e6b1e60df6a5598c30552be61b07ed642e46399c
[ "MIT" ]
null
null
null
PySDDP/term.py
AndreMarcato/PySDDP
e6b1e60df6a5598c30552be61b07ed642e46399c
[ "MIT" ]
3
2021-01-08T11:37:23.000Z
2021-04-19T15:07:26.000Z
class term(object): # Dados de cadastro das usinas termeletrica (presentes no TERM.DAT) Codigo = None Nome = None Potencia = None FCMax = None TEIF = None IP = None GTMin = None # Dados Adicionais Especificados no arquivo de configuracao termica (CONFT) Sist = None Status = None Classe = None # Dados Adicionais Especificados no arquivo de classe termica (CLAST) Custo = None NomeClasse = None TipoComb = None def insere(self, custo, gmax): self.custo = custo self.gmax = gmax
23.541667
79
0.640708
564
0.99823
0
0
0
0
0
0
211
0.373451
717f58f0b458c75ac48a3f2890221b9b52dcce70
88
py
Python
plerr/__main__.py
b2bs-team/pylint-errors
f1362c8afbe6b7075f805560d7699f63ad35a10b
[ "MIT" ]
2
2020-10-28T23:53:59.000Z
2020-10-29T03:31:20.000Z
plerr/__main__.py
b2bs-team/pylint-errors
f1362c8afbe6b7075f805560d7699f63ad35a10b
[ "MIT" ]
null
null
null
plerr/__main__.py
b2bs-team/pylint-errors
f1362c8afbe6b7075f805560d7699f63ad35a10b
[ "MIT" ]
1
2020-10-28T23:53:47.000Z
2020-10-28T23:53:47.000Z
"""plerr entrypoint""" from plerr import cli if __name__ == '__main__': cli.main()
14.666667
26
0.659091
0
0
0
0
0
0
0
0
32
0.363636
71801cfc804d913976cbde0f2c680802285aa66d
817
py
Python
code/send.py
CamouOkau/messenger_new_years_bot
38f3c26b6c5b4dae7fe48f8b61680ec903c0deac
[ "MIT" ]
null
null
null
code/send.py
CamouOkau/messenger_new_years_bot
38f3c26b6c5b4dae7fe48f8b61680ec903c0deac
[ "MIT" ]
null
null
null
code/send.py
CamouOkau/messenger_new_years_bot
38f3c26b6c5b4dae7fe48f8b61680ec903c0deac
[ "MIT" ]
null
null
null
import sys import time from datetime import datetime from bot import FbMessengerBot if __name__ == "__main__": if len(sys.argv) < 3: print("No email or password provided") else: bot = FbMessengerBot(sys.argv[1], sys.argv[2]) with open("users.txt", "r") as file: users = dict.fromkeys(file.read().split("\n")) for user in users: users[user] = bot.uid(user) with open("message.txt", "r") as file: message = file.read() time_now = datetime.now() send_time = datetime(time_now.year + 1, 1, 1) wait_time = (send_time - time_now).total_seconds() print("Waiting...") time.sleep(wait_time) for uid in users.values(): bot.send_message(message, uid) bot.logout()
29.178571
58
0.575275
0
0
0
0
0
0
0
0
87
0.106487
71803fa300d2ccbae9efe9edab91921379251431
4,361
py
Python
senlin_tempest_plugin/api/policies/test_policy_update_negative.py
ghanshyammann/senlin-tempest-plugin
9f33bbe723eb381f93c2248a6a277efef3d92ec3
[ "Apache-2.0" ]
null
null
null
senlin_tempest_plugin/api/policies/test_policy_update_negative.py
ghanshyammann/senlin-tempest-plugin
9f33bbe723eb381f93c2248a6a277efef3d92ec3
[ "Apache-2.0" ]
null
null
null
senlin_tempest_plugin/api/policies/test_policy_update_negative.py
ghanshyammann/senlin-tempest-plugin
9f33bbe723eb381f93c2248a6a277efef3d92ec3
[ "Apache-2.0" ]
null
null
null
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest.lib import decorators from tempest.lib import exceptions from senlin_tempest_plugin.api import base from senlin_tempest_plugin.common import utils class TestPolicyUpdateNegativeNotFound(base.BaseSenlinAPITest): @decorators.attr(type=['negative']) @decorators.idempotent_id('5df90d82-9889-4c6f-824c-30272bcfa767') def test_policy_update_policy_not_found(self): ex = self.assertRaises(exceptions.NotFound, self.client.update_obj, 'policies', '5df90d82-9889-4c6f-824c-30272bcfa767', {'policy': {'name': 'new-name'}}) message = ex.resp_body['error']['message'] self.assertEqual( "The policy '5df90d82-9889-4c6f-824c-30272bcfa767' " "could not be found.", str(message)) @decorators.attr(type=['negative']) @decorators.idempotent_id('29414add-9cba-4b72-a7bb-36718671dcab') def test_policy_update_policy_invalid_param(self): ex = self.assertRaises(exceptions.BadRequest, self.client.update_obj, 'policies', '5df90d82-9889-4c6f-824c-30272bcfa767', {'policy': {'boo': 'foo'}}) message = ex.resp_body['error']['message'] self.assertEqual( "Additional properties are not allowed (u'boo' was " "unexpected)", str(message)) @decorators.attr(type=['negative']) @decorators.idempotent_id('bf26ed1e-1d26-4472-b4c8-0bcca1c0a838') def test_policy_update_policy_empty_param(self): ex = self.assertRaises(exceptions.BadRequest, self.client.update_obj, 'policies', '5df90d82-9889-4c6f-824c-30272bcfa767', {}) message = ex.resp_body['error']['message'] self.assertEqual( "Malformed request data, missing 'policy' key in " "request body.", str(message)) class TestPolicyUpdateNegativeBadRequest(base.BaseSenlinAPITest): def setUp(self): super(TestPolicyUpdateNegativeBadRequest, self).setUp() # Create a policy policy_id = utils.create_a_policy(self) self.addCleanup(utils.delete_a_policy, self, policy_id) self.policy_id = policy_id @decorators.attr(type=['negative']) @decorators.idempotent_id('31242de5-55ac-4589-87a1-a9940e4beca2') def test_policy_update_no_property_updated(self): # No property is updated. params = { 'policy': {} } # Verify badrequest exception(400) is raised. ex = self.assertRaises(exceptions.BadRequest, self.client.update_obj, 'policies', self.policy_id, params) message = ex.resp_body['error']['message'] self.assertEqual( "'name' is a required property", str(message)) @decorators.attr(type=['negative']) @decorators.idempotent_id('d2ca7de6-0069-48c9-b3de-ee975a2428dc') def test_policy_update_spec_not_updatable(self): # Try to update spec of policy. # Note: name is the only property that can be updated # after policy is created. params = { 'policy': { 'name': 'new-name', 'spec': {'k1': 'v1'} } } # Verify badrequest exception(400) is raised. ex = self.assertRaises(exceptions.BadRequest, self.client.update_obj, 'policies', self.policy_id, params) message = ex.resp_body['error']['message'] self.assertEqual( "Additional properties are not allowed (u'spec' was " "unexpected)", str(message))
40.37963
75
0.616372
3,648
0.836505
0
0
3,231
0.740885
0
0
1,648
0.377895
718204a2b383cce840b6e0f7101b4542d7502bc6
136
py
Python
boa3_test/test_sc/interop_test/contract/DestroyContract.py
hal0x2328/neo3-boa
6825a3533384cb01660773050719402a9703065b
[ "Apache-2.0" ]
25
2020-07-22T19:37:43.000Z
2022-03-08T03:23:55.000Z
boa3_test/test_sc/interop_test/contract/DestroyContract.py
hal0x2328/neo3-boa
6825a3533384cb01660773050719402a9703065b
[ "Apache-2.0" ]
419
2020-04-23T17:48:14.000Z
2022-03-31T13:17:45.000Z
boa3_test/test_sc/interop_test/contract/DestroyContract.py
hal0x2328/neo3-boa
6825a3533384cb01660773050719402a9703065b
[ "Apache-2.0" ]
15
2020-05-21T21:54:24.000Z
2021-11-18T06:17:24.000Z
from boa3.builtin import public from boa3.builtin.interop.contract import destroy_contract @public def Main(): destroy_contract()
17
58
0.794118
0
0
0
0
42
0.308824
0
0
0
0
71820cfe7864a17de8d5ffb455a24ec586958eca
4,363
py
Python
tests/test_vmax.py
qinfeng2011/wltp
317ad38fb96599a29d22e40f69b6aeb4d205611d
[ "Apache-2.0" ]
null
null
null
tests/test_vmax.py
qinfeng2011/wltp
317ad38fb96599a29d22e40f69b6aeb4d205611d
[ "Apache-2.0" ]
null
null
null
tests/test_vmax.py
qinfeng2011/wltp
317ad38fb96599a29d22e40f69b6aeb4d205611d
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2019 European Commission (JRC); # Licensed under the EUPL (the 'Licence'); # You may not use this work except in compliance with the Licence. # You may obtain a copy of the Licence at: http://ec.europa.eu/idabc/eupl import functools as fnt import logging import random import numpy as np import numpy.testing as npt import pandas as pd import pytest from pandas import IndexSlice as _ix from wltp import engine, vehicle, downscale, vmax from wltp.io import gear_names, veh_names from . import vehdb logging.basicConfig(level=logging.DEBUG) log = logging.getLogger(__name__) def test_v_max(h5_accdb): from . import conftest veh_samples = None # DEBUG: to reduce clutter in the console. # veh_samples = 12 # DEBUG: to study buggy cars. # veh_samples = [76] # diff det_by_nlim # veh_samples = [3, 21, 22, 104, ] # diff gear # veh_samples = [38] # diff vmax order higher 1st # veh_samples = [31] # [23] def make_v_maxes(vehnum): props, wot, n2vs = vehdb.load_vehicle_accdb(h5_accdb, vehnum) wot = wot.rename({"Pwot": "p"}, axis=1) wot["n"] = wot.index gwots = engine.interpolate_wot_on_v_grid(wot, n2vs) gwots = engine.calc_p_avail_in_gwots(gwots, SM=0.1) gwots["p_resist"] = vehicle.calc_road_load_power( gwots.index, props.f0, props.f1, props.f2 ) rec = vmax.calc_v_max(gwots) return (props["v_max"], rec.v_max, props["gear_v_max"], rec.g_vmax, rec.wot) def _package_wots_df(gear_wot_dfs): assert gear_wot_dfs ## Merge all index values into the index of the 1st DF, # or else, themerged-df contains n-gear dupes in each index-value. # # first_df, *rest_dfs = gear_wot_dfs.values() # full_index = np.unique(np.hstack(df.index for df in gear_wot_dfs)) # first_df = first_df.reindex(full_index) wots_df = pd.concat( # [first_df] + rest_dfs, gear_wot_dfs.values(), axis=1, # join="inner", keys=gear_names(gear_wot_dfs.keys()), names=["item", "gear"], verify_integrity=True, ) return wots_df veh_nums = vehdb.all_vehnums(h5_accdb) if not isinstance(veh_samples, (list, tuple)): veh_samples = random.sample(veh_nums, veh_samples) if veh_samples else veh_nums recs = [make_v_maxes(vehnum) for vehnum in veh_samples] vehres = pd.DataFrame( recs, columns="vmax_accdb vmax_python gmax_accdb gmax_python wot".split(), index=veh_names(veh_samples), ).astype({"gmax_accdb": "Int64", "gmax_python": "Int64"}) wots_df = pd.concat( vehres["wot"].values, keys=veh_names(veh_samples), names=["vehicle"] ) vehres = vehres.drop("wot", axis=1) vehres["vmax_diff"] = (vehres["vmax_python"] - vehres["vmax_accdb"]).abs() vehres["gmax_diff"] = (vehres["gmax_python"] - vehres["gmax_accdb"]).abs() with pd.option_context( "display.max_rows", 130, "display.max_columns", 20, "display.width", 120, # "display.precision", # 4, # "display.chop_threshold", # 1e-8, "display.float_format", "{:0.2f}".format, ): print( f"++ nones: {vehres.vmax_python.sum()} (out of {len(veh_samples)})" f"\n++++\n{vehres}" # f"\n++++\n{wots_df.sample(80, axis=0)}" ) with pd.option_context( "display.max_columns", 20, "display.width", 120, "display.float_format", "{:0.4f}".format, ): print(f"\n++++\n{vehres.describe().T}") vehres = vehres.dropna(axis=1) # npt.assert_array_equal(vmaxes["vmax_python"], vmaxes["vmax_accdb"]) aggregate_tol = 1e-4 # The digits copied from terminal. assert ( vehres["vmax_diff"].describe() - [125.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000] < aggregate_tol ).all() assert ( vehres["gmax_diff"].describe() - [125.0000, 0.1040, 0.3552, 0.0000, 0.0000, 0.0000, 0.0000, 2.0000] < aggregate_tol ).all() assert (vehres["vmax_diff"] == 0).sum() == 125 and ( vehres["gmax_diff"] == 0 ).sum() == 125
32.080882
87
0.603942
0
0
0
0
0
0
0
0
1,596
0.365803
718275b3e8d58cfc1c69bd90b16b90b94fc076c8
881
py
Python
util/canonicaljson.py
giuseppe/quay
a1b7e4b51974edfe86f66788621011eef2667e6a
[ "Apache-2.0" ]
2,027
2019-11-12T18:05:48.000Z
2022-03-31T22:25:04.000Z
util/canonicaljson.py
giuseppe/quay
a1b7e4b51974edfe86f66788621011eef2667e6a
[ "Apache-2.0" ]
496
2019-11-12T18:13:37.000Z
2022-03-31T10:43:45.000Z
util/canonicaljson.py
giuseppe/quay
a1b7e4b51974edfe86f66788621011eef2667e6a
[ "Apache-2.0" ]
249
2019-11-12T18:02:27.000Z
2022-03-22T12:19:19.000Z
import collections def canonicalize(json_obj, preserve_sequence_order=True): """ This function canonicalizes a Python object that will be serialized as JSON. Example usage: json.dumps(canonicalize(my_obj)) Args: json_obj (object): the Python object that will later be serialized as JSON. Returns: object: json_obj now sorted to its canonical form. """ if isinstance(json_obj, collections.MutableMapping): sorted_obj = sorted( { key: canonicalize(val, preserve_sequence_order) for key, val in json_obj.items() }.items() ) return collections.OrderedDict(sorted_obj) elif isinstance(json_obj, (list, tuple)): seq = [canonicalize(val, preserve_sequence_order) for val in json_obj] return seq if preserve_sequence_order else sorted(seq) return json_obj
32.62963
96
0.681044
0
0
0
0
0
0
0
0
308
0.349603
71829ce0488364233ac4688992792bd2903978d0
1,170
py
Python
datasette_plugin_geo/inspect.py
russss/datasette-geo
d4cecc020848bbde91e9e17bf352f7c70bc3dccf
[ "Apache-2.0" ]
9
2019-05-02T14:44:57.000Z
2022-01-19T20:56:50.000Z
datasette_plugin_geo/inspect.py
russss/datasette-geo
d4cecc020848bbde91e9e17bf352f7c70bc3dccf
[ "Apache-2.0" ]
5
2019-04-30T12:22:03.000Z
2021-05-29T20:08:42.000Z
datasette_plugin_geo/inspect.py
russss/datasette-geo
d4cecc020848bbde91e9e17bf352f7c70bc3dccf
[ "Apache-2.0" ]
2
2019-07-31T19:16:43.000Z
2021-05-28T20:12:36.000Z
from datasette import hookimpl from datasette.utils import detect_spatialite from shapely import wkt def get_spatial_tables(conn): if not detect_spatialite(conn): return {} spatial_tables = {} c = conn.cursor() c.execute( """SELECT f_table_name, f_geometry_column, srid, spatial_index_enabled FROM geometry_columns""" ) for row in c.fetchall(): if row[3] != 1: print( "Column {column} in table {table} has no spatial index; datasette-geo will ignore it.".format( column=row[1], table=row[0] ) ) continue spatial_tables[row[0]] = row[1] return spatial_tables def get_bounds(conn, spatial_tables): c = conn.cursor() res = {} for table, column in spatial_tables.items(): c.execute( "SELECT AsText(Envelope(GUnion({column}))) FROM {table}".format( table=table, column=column ) ) data = c.fetchone()[0] if data is None: continue bbox = wkt.loads(data) res[table] = bbox.bounds return res
26.590909
110
0.564957
0
0
0
0
0
0
0
0
254
0.217094
7182cc9e1a275d7846a31a780b10f6ed97021067
1,440
py
Python
microcosm_pubsub/context.py
Sinon/microcosm-pubsub
c98a188fcd5b3f358c7171dae0c39a33c5774a4e
[ "Apache-2.0" ]
5
2016-07-23T21:20:50.000Z
2021-07-15T00:27:47.000Z
microcosm_pubsub/context.py
Sinon/microcosm-pubsub
c98a188fcd5b3f358c7171dae0c39a33c5774a4e
[ "Apache-2.0" ]
76
2016-03-22T23:41:21.000Z
2020-07-27T17:35:36.000Z
microcosm_pubsub/context.py
Sinon/microcosm-pubsub
c98a188fcd5b3f358c7171dae0c39a33c5774a4e
[ "Apache-2.0" ]
8
2016-06-01T18:43:41.000Z
2021-04-27T20:22:15.000Z
""" Message context. """ from typing import Dict from microcosm.api import defaults, typed from microcosm.config.types import boolean from microcosm_logging.decorators import logger from microcosm_pubsub.constants import TTL_KEY, URI_KEY from microcosm_pubsub.message import SQSMessage @defaults( enable_ttl=typed(boolean, default_value=True), initial_ttl=typed(int, default_value=32), ) @logger class SQSMessageContext: """ Factory for per-message contexts. """ def __init__(self, graph): self.enable_ttl = graph.config.sqs_message_context.enable_ttl self.initial_ttl = graph.config.sqs_message_context.initial_ttl def __call__(self, context: SQSMessage, **kwargs) -> Dict[str, str]: """ Create a new context from a message. """ return self.from_sqs_message(context, **kwargs) def from_sqs_message(self, message: SQSMessage, **kwargs): context: Dict = dict(message.opaque_data) context.update( # include the message id message_id=message.message_id, **kwargs, ) # include the TTL (if enabled) if self.enable_ttl: ttl = message.ttl if message.ttl is not None else self.initial_ttl context[TTL_KEY] = str(ttl - 1) # include the URI (if there is one) if message.uri: context[URI_KEY] = message.uri return context
26.181818
78
0.6625
1,030
0.715278
0
0
1,148
0.797222
0
0
225
0.15625
71860bda1bd4506337b0b07e0b43aaca3e5c2511
2,185
py
Python
azure_ml/pytorch_classifier/train_parameterized.py
murdockcrc/python-tricks
57f7ad9c00a045c1f9f18f89bed6e73be6c85b69
[ "MIT" ]
null
null
null
azure_ml/pytorch_classifier/train_parameterized.py
murdockcrc/python-tricks
57f7ad9c00a045c1f9f18f89bed6e73be6c85b69
[ "MIT" ]
null
null
null
azure_ml/pytorch_classifier/train_parameterized.py
murdockcrc/python-tricks
57f7ad9c00a045c1f9f18f89bed6e73be6c85b69
[ "MIT" ]
null
null
null
import os import argparse import torch import torch.optim as optim import torchvision import torchvision.transforms as transforms from model import Net from azureml.core import Run run = Run.get_context() if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( '--data_path', type=str, help='Path to the training data' ) parser.add_argument( '--learning_rate', type=float, default=0.001, help='Learning rate for SGD' ) parser.add_argument( '--momentum', type=float, default=0.9, help='Momentum for SGD' ) args = parser.parse_args() print("===== DATA =====") print("DATA PATH: " + args.data_path) print("LIST FILES IN DATA PATH...") print(os.listdir(args.data_path)) print("================") # prepare DataLoader for CIFAR10 data transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) ]) trainset = torchvision.datasets.CIFAR10( root=args.data_path, train=True, download=False, transform=transform, ) trainloader = torch.utils.data.DataLoader( trainset, batch_size=4, shuffle=True, num_workers=2 ) # define convolutional network net = Net() # set up pytorch loss / optimizer criterion = torch.nn.CrossEntropyLoss() optimizer = optim.SGD( net.parameters(), lr=args.learning_rate, momentum=args.momentum, ) # train the network for epoch in range(2): running_loss = 0.0 for i, data in enumerate(trainloader, 0): # unpack the data inputs, labels = data # zero the parameter gradients optimizer.zero_grad() # forward + backward + optimize outputs = net(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step() # print statistics running_loss += loss.item() if i % 2000 == 1999: loss = running_loss / 2000 run.log('loss', loss) # log loss metric to AML print(f'epoch={epoch + 1}, batch={i + 1:5}: loss {loss:.2f}') running_loss = 0.0 print('Finished Training')
23
69
0.622426
0
0
0
0
0
0
0
0
516
0.236156
71866e54c9be9ceced231705351ad07d4dec3246
244
py
Python
src/tests/test_app_db.py
kazqvaizer/arq-sqlalchemy-boilerplate
c14596ed358a061e6eb2a380f4bd962242b123f3
[ "MIT" ]
6
2021-12-20T14:49:14.000Z
2022-03-21T14:32:49.000Z
src/tests/test_app_db.py
kazqvaizer/arq-sqlalchemy-boilerplate
c14596ed358a061e6eb2a380f4bd962242b123f3
[ "MIT" ]
null
null
null
src/tests/test_app_db.py
kazqvaizer/arq-sqlalchemy-boilerplate
c14596ed358a061e6eb2a380f4bd962242b123f3
[ "MIT" ]
null
null
null
import pytest from app.db import session_scope pytestmark = pytest.mark.asyncio async def test_engine_configured(env): async with session_scope() as session: assert str(session.bind.engine.url) == env("SQLALCHEMY_DATABASE_URI")
22.181818
77
0.762295
0
0
0
0
0
0
159
0.651639
25
0.102459
7187ac8a1ef00393974831033262a38cc227b4e0
3,063
py
Python
catalyst/core/callbacks/formatters.py
cgarciae/catalyst
391ff89ab0d9a1961b88719e894f917ac0fb7fc3
[ "Apache-2.0" ]
1
2019-11-26T06:41:33.000Z
2019-11-26T06:41:33.000Z
catalyst/core/callbacks/formatters.py
cgarciae/catalyst
391ff89ab0d9a1961b88719e894f917ac0fb7fc3
[ "Apache-2.0" ]
null
null
null
catalyst/core/callbacks/formatters.py
cgarciae/catalyst
391ff89ab0d9a1961b88719e894f917ac0fb7fc3
[ "Apache-2.0" ]
null
null
null
from abc import ABC, abstractmethod from datetime import datetime import json import logging from catalyst import utils from catalyst.core import _State class MetricsFormatter(ABC, logging.Formatter): """ Abstract metrics formatter """ def __init__(self, message_prefix): """ Args: message_prefix: logging format string that will be prepended to message """ super().__init__(f"{message_prefix}{{message}}", style="{") @abstractmethod def _format_message(self, state: _State): pass def format(self, record: logging.LogRecord): """ Format message string """ # noinspection PyUnresolvedReferences state = record.state record.msg = self._format_message(state) return super().format(record) class TxtMetricsFormatter(MetricsFormatter): """ Translate batch metrics in human-readable format. This class is used by ``logging.Logger`` to make a string from record. For details refer to official docs for 'logging' module. Note: This is inner class used by Logger callback, no need to use it directly! """ def __init__(self): """ Initializes the ``TxtMetricsFormatter`` """ super().__init__("[{asctime}] ") def _format_metrics(self, metrics): # metrics : dict[str: dict[str: float]] metrics_formatted = {} for key, value in metrics.items(): metrics_formatted_ = [ utils.format_metric(m_name, m_value) for m_name, m_value in sorted(value.items()) ] metrics_formatted_ = " | ".join(metrics_formatted_) metrics_formatted[key] = metrics_formatted_ return metrics_formatted def _format_message(self, state: _State): message = [""] metrics = self._format_metrics(state.metric_manager.epoch_values) for key, value in metrics.items(): message.append( f"{state.stage_epoch_log}/{state.num_epochs} " f"* Epoch {state.epoch_log} ({key}): {value}" ) message = "\n".join(message) return message class JsonMetricsFormatter(MetricsFormatter): """ Translate batch metrics in json format. This class is used by ``logging.Logger`` to make a string from record. For details refer to official docs for 'logging' module. Note: This is inner class used by Logger callback, no need to use it directly! """ def __init__(self): """ Initializes the ``JsonMetricsFormatter`` """ super().__init__("") def _format_message(self, state: _State): res = dict( metirics=state.metric_manager.epoch_values.copy(), epoch=state.epoch, time=datetime.now().isoformat() ) return json.dumps(res, indent=True, ensure_ascii=False) __all__ = ["MetricsFormatter", "TxtMetricsFormatter", "JsonMetricsFormatter"]
27.845455
77
0.615083
2,820
0.920666
0
0
74
0.024159
0
0
1,225
0.399935
7187ff57f53912dbb2c2ffb581f78542068a9ec6
7,612
py
Python
fuzzy/fuzzy.py
Suraj1127/fuzzy-matcher
a3a6ecc6954d79ca65e2517f93db44cc432e7a90
[ "MIT" ]
null
null
null
fuzzy/fuzzy.py
Suraj1127/fuzzy-matcher
a3a6ecc6954d79ca65e2517f93db44cc432e7a90
[ "MIT" ]
null
null
null
fuzzy/fuzzy.py
Suraj1127/fuzzy-matcher
a3a6ecc6954d79ca65e2517f93db44cc432e7a90
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 """ Description: Python script to append the common columns in one sheet from another sheet using fuzzy matching. """ import pip def import_or_install(package): try: __import__(package) except ImportError: pip.main(['install', package]) import os import sys import argparse import_or_install('numpy') import_or_install('pandas') import_or_install('fuzzywuzzy') import numpy as np import pandas as pd from fuzzywuzzy import process, fuzz class FuzzyMatcher: """ FuzzyMatcher class to perform the fuzzy matching. """ def __init__(self, df_1, df_2, columns_1, columns_2, append_in='second'): """ The constructor takes five arguments. The last argument 'append_in' is optional. Parameters: df_1: the first table in pandas.DataFrame format or the name of the CSV file for the first table df_2: the second table in pandas.DataFrame format or the name of the CSV file for the second table columns_1: list of common columns in the first table columns_2: list of common columns in the second table append_in (optional): 'first' if the common columns are to be appended in the first table 'second' if the common columns are to be appended in the second table """ if type(df_1) == str: df_1 = pd.read_csv(df_1) if type(df_2) == str: df_2 = pd.read_csv(df_2) df_1.columns = df_1.columns.str.lower().str.strip() df_2.columns = df_2.columns.str.lower().str.strip() columns_1 = [i.lower().strip() for i in columns_1] columns_2 = [i.lower().strip() for i in columns_2] if append_in == 'first': temp = df_1 df_1 = df_2 df_2 = temp temp = columns_1 columns_1 = columns_2 columns_2 = temp self.df_1 = df_1.rename(columns=dict(zip(columns_1, columns_2))) self.columns = columns_2 self.df_2 = self._fuzzy_match(self.df_1, df_2, self.columns[0]) @staticmethod def _string_matching(name, collection, mapping_): """ Returns similar name using fuzzy matching. """ if name in collection: return name if name in mapping_: return mapping_[name] similar = process.extractOne(name, collection, scorer=fuzz.ratio)[0] mapping_[name] = similar return similar def _fuzzy_match(self, df_1_t, df_2_t, common_column_t): """ Returns dataframe with the common column appended. Notice that the appended columns end with '_t'. """ collection = set(df_1_t[common_column_t]) mapping_ = {} df_2_t[common_column_t + '_t'] = df_2_t[common_column_t].apply(self._string_matching, args=(collection, mapping_)) return df_2_t @property def fuzzy_match(self): """ Returns the dataframe consisting of all the appended columns. """ for i_t, common_column in enumerate(self.columns[1:], start=1): self.df_2[common_column + '_t'] = np.nan group_1 = self.df_1.groupby(self.columns[:i_t]) group_2 = self.df_2.groupby([i + '_t' for i in self.columns[:i_t]]) for key, df_slice_2 in group_2: df_slice_1 = group_1.get_group(key) df_slice_2 = self._fuzzy_match(df_slice_1, df_slice_2, common_column) self.df_2.loc[df_slice_2.index, common_column + '_t'] = df_slice_2.loc[:, common_column + '_t'] return self.df_2 def save(self, filename): """ Saves the result dataframe to a CSV file, filename. """ self.df_2.to_csv(filename) def parse_args(parser): """ Parsing and configuration of the command line arguments. """ parser = argparse.ArgumentParser() parser.add_argument('--firstcsv', type=str, required=True, help='CSV file for first table.') parser.add_argument('--secondcsv', type=str, required=True, help='CSV file for second table.') parser.add_argument('--destination', type=str, default='output.csv', help='Destination filename.') parser.add_argument('--commoncolumns1', type=str, required=True, help='Common columns for first table.') parser.add_argument('--commoncolumns2', type=str, required=True, help='Common columns for second table in the same order.') parser.add_argument("--in", dest="_in", default='second', choices=['second', 'first'], help='Table to append the columns. ') return check_args(parser.parse_args()) def check_args(args): """ Checking the arguments if they are entered properly. Validations performed: 1. Compulsory arguments are entered. 2. The entered filenames are present in the current folder. 3. The entered column names are present in the corresponding files. 4. If the destination filename is already present in the directory, ask the user if it can be overwritten. """ # for --firstcsv and --secondcsv for filename in [args.firstcsv, args.secondcsv]: if not os.path.isfile(filename): raise Exception("File {} is not present in the currrent folder.".format(filename)) # --commoncolumns1 commoncolumns1 = [i.strip().lower() for i in args.commoncolumns1.split(',')] temp = set(commoncolumns1) - set(pd.read_csv(args.firstcsv, nrows=1).columns.str.lower().str.strip()) if temp: raise Exception("The following columns are not present in the file, {}:\n{}".format(args.firstcsv, temp)) # --commoncolumns2 commoncolumns2 = [i.strip().lower() for i in args.commoncolumns2.split(',')] temp = set(commoncolumns2) - set(pd.read_csv(args.secondcsv, nrows=1).columns.str.lower().str.strip()) if temp: raise Exception("The following columns are not present in the file, {}:\n{}".format(args.secondcsv, temp)) # --destination if os.path.isfile(args.destination): print("The file {} already exists. Do you want to overwrite it? y/n".format(args.destination)) ans = input().strip().lower() if ans == 'n': print("Please enter different destination filename and run the script again.") sys.exit() return args if __name__ == "__main__": # instantiate the ArgumentParser class and parse the arguments parser = argparse.ArgumentParser() arguments = parse_args(parser) # save the arguments as some variables which later would be passed to FuzzyMatcher class filename_1 = arguments.firstcsv filename_2 = arguments.secondcsv result_filename = arguments.destination # clean and lowercase-ize the columns names common_columns_1 = [i.strip().lower() for i in arguments.commoncolumns1.split(',')] common_columns_2 = [i.strip().lower() for i in arguments.commoncolumns2.split(',')] # instantiate the FuzzyMatcher object, perform the fuzzy match, and save the result to the destination CSV file fuzzy_matcher = FuzzyMatcher(filename_1, filename_2, common_columns_1, common_columns_2, append_in=arguments._in) fuzzy_matcher.fuzzy_match fuzzy_matcher.save(result_filename)
35.078341
128
0.626379
3,584
0.470836
0
0
1,219
0.160142
0
0
2,806
0.368628
718a2a5b0f6feb828e1a124e9a30a273db18a144
9,770
py
Python
exoatlas/visualizations/panels/BubblePanel.py
zkbt/exopop
5e8b9d391fe9e2d39c623d7ccd7eca8fd0f0f3f8
[ "MIT" ]
4
2020-06-24T16:38:27.000Z
2022-01-23T01:57:19.000Z
exoatlas/visualizations/panels/BubblePanel.py
zkbt/exopop
5e8b9d391fe9e2d39c623d7ccd7eca8fd0f0f3f8
[ "MIT" ]
4
2018-09-20T23:12:30.000Z
2019-05-15T15:31:58.000Z
exoatlas/visualizations/panels/BubblePanel.py
zkbt/exopop
5e8b9d391fe9e2d39c623d7ccd7eca8fd0f0f3f8
[ "MIT" ]
null
null
null
from .Panel import * __all__ = ['BubblePanel'] default_size = plt.matplotlib.rcParams['lines.markersize']**2 class BubblePanel(Panel): ''' BubblePanel is a general wrapper for making scatter plots where planets are represented as bubbles that can have informative sizes and/or colors. ''' def __init__(self, xaxis=None, yaxis=None, size=None, size_normalization=None, color=None, cmap='plasma', vmin=None, vmax=None, color_normalization=None, **kw): ''' Initialize a plotting panel. Parameters ---------- size : PlottableAxis, str, float, None What should the sizes of points be or encode? size_normalization : float If sizes depend on quantities, how should they be normalized? color : PlottableAxis, str, float, None What should the colors of points be or encode? cmap : str, cmap from plt.matplotlib.cm If the colors depend on quantities, what cmap should be used for them? vmin : float, astropy.units.quantity.Quantity If the colors depend on quantities, what should the bottom of the cmap be? vmax : float, astropy.units.quantity.Quantity If the colors depend on quantities, what should the top of the cmap be? color_normalization : matplotlib.colors.Normalize If color depend on quantities, how should the values be normalized. If color_normalization is defined, any values provided here for vmin and vmax will be ignored. **kw : dict Other keywords will be passed on to *all* Panel/Plottable initializations (which may include x, y, size, and color). If you need more fine-grained control over which axis gets which keyword, consider initializing those panels one-by-one. ''' # initialize the basics of the panel with the plottable axes Panel.__init__(self, xaxis=xaxis, yaxis=yaxis, **kw) # set up how we should scale the sizes of points size = clean_axis(size) try: # try to make a variable size axis self.plottable['size'] = size(panel=self, **kw) default_size_normalization = self.plottable['size'].size_normalization except TypeError: # otherwise, use a single size for all points self.plottable['size'] = size default_size_normalization = 1 #self.plottable['x'].panel = self #self.plottable['y'].panel = self # make sure a size normalization has been defined self.size_normalization = size_normalization or default_size_normalization # set up how we should set the colors of points color = clean_axis(color) try: # try to make a variable color axis self.plottable['color'] = color(panel=self, **kw) default_lim = self.plottable['color'].lim except TypeError: # otherwise, use a single color for all points self.plottable['color'] = color default_lim = [None, None] # if an actual cmap was provided, use it if isinstance(cmap, plt.matplotlib.colors.Colormap): self.cmap = cmap # otherwise, treat the cmap as a string key else: self.cmap = plt.matplotlib.cm.cmap_d[cmap] # make sure the color map limits are set self.vmin = vmin or default_lim[0] self.vmax = vmax or default_lim[1] # if a custom normalization is used, reset vmin + vmax self.color_normalization = color_normalization if isinstance(self.color_normalization, plt.matplotlib.colors.Normalize): # pull the normalization's min/max for information self.vmin = color_normalization.vmin self.vmax = color_normalization.vmax # apply (x,y) axis labels, scales, limits appropriately for axis in 'xy': for attribute in ['label', 'scale', 'lim']: setattr(self, f'{axis}{attribute}', getattr(self.plottable[axis], attribute)) #DEBUG self.summarize() def get_sizes(self): ''' The sizes of the bubbles. Returns ------- s : an input for plt.scatter Either a single scalar, or an array with variable sizes for each bubble according to some quantity. ''' # should we ignore any variable size instructions? if self.pop.respond_to_size == False: size = self.pop.plotkw.get('s', None) # if desired, set variable sizes elif isinstance(self.plottable['size'], PlottableAxis): # get the raw values for the sizes x = self.plottable['size'].value() # calculate the normalized size size = default_size*x/self.size_normalization # otherwise, set a single size else: # get default, first from pop and then from panel size = self.pop.plotkw.get('s', self.plottable['size']) # return a valid input to plt.scatter(s=...) return size def get_colors(self): ''' The colors of the bubbles. Returns ------- c : an input for plt.scatter Either a single color, or an array with variable colors for each bubble according to some quantity. ''' # should we ignore any variable color instructions? if self.pop.respond_to_color == False: color = self.pop.color # should we use a variable color? elif isinstance(self.plottable['color'], PlottableAxis): # get the raw values to go into the color x = self.plottable['color'].value() # FIXME - make sure to check vmin/vmax are valid #if (self.vmin is None) or (self.vmax is None): # raise AtlasError(f''' # It looks like you're trying to use # {self.plottable['color']} to set variable # colors for bubbles. To do so, please make # sure it has finite values defined for its # .vmin and .vmax attributes. # ''') # make sure we have *some* normalizer defined f = plt.matplotlib.colors.Normalize self.color_normalization = (self.color_normalization or f(vmin=self.vmin, vmax=self.vmax)) normalized = self.color_normalization(x) color = self.cmap(normalized) # finally, should we just use a default color? else: # get default, first from pop and then from panel color = self.pop.color if color is None: color = self.plottable['color'] # return a valid input to any one of the following: # plt.scatter(c=...) # plt.scatter(edgecolors=...) # plt.scatter(facecolors=...) return color def kw(self, key=None, **kwargs): ''' Do a little decision-making about the plotting keyword arguments, pulling defaults from each population where needed. Parameter --------- key : str The population for which we should pull keywords. If None, go with the current population. **kwargs : dict All other keywords will be directed toward overwriting individual population defaults. ''' # identify the population we're working with if key is None: key = self.key #else: self.point_at(key) # define some default keywords, which can be over-written default = dict(s=self.get_sizes(), marker=self.pop.marker, linewidth=self.pop.linewidth, alpha=self.pop.alpha, zorder=self.pop.zorder, label=self.pop.label) # sort out whether faces and/or edges should get color c=self.get_colors() if self.pop.filled: default['facecolors'] = c else: default['facecolors'] = 'none' if self.pop.outlined: default['edgecolors'] = c else: default['edgecolors'] = 'none' # if any other keywords are provided, overwrite these defaults for k, v in kwargs.items(): default[k] = v return default def plot(self, key, ax=None, labelkw={}, **kwargs): ''' Add the points for a particular population to this panel. Parameters ---------- key : str The population (as an item in the self.pops dictionary) to add. ax : Into what ax should we place this plot? If None, use default. labelkw : dict Keywords for labeling the planet names. **kwargs : dict Any extra keywords will be passed on to `scatter` ''' # focus attention on that population self.point_at(key) # make sure we're plotting into the appropriate axes try: plt.sca(self.ax) except AttributeError: self.setup(ax=ax) # add the scattered points self.scattered[key] = self.ax.scatter(self.x, self.y, **self.kw(key,**kwargs)) # set the scales, limits, labels self.finish_plot(labelkw=labelkw)
35.787546
91
0.570624
9,657
0.988434
0
0
0
0
0
0
5,383
0.550972
718a929c80bd8d634b1687ba5560ac7c6a4f6fe7
264
py
Python
venv/lib/python2.7/dist-packages/landscape/sysinfo/load.py
pengwu/scapy_env
3db9c5dea2e219048a2387649d6d89be342903d9
[ "MIT" ]
null
null
null
venv/lib/python2.7/dist-packages/landscape/sysinfo/load.py
pengwu/scapy_env
3db9c5dea2e219048a2387649d6d89be342903d9
[ "MIT" ]
null
null
null
venv/lib/python2.7/dist-packages/landscape/sysinfo/load.py
pengwu/scapy_env
3db9c5dea2e219048a2387649d6d89be342903d9
[ "MIT" ]
null
null
null
import os from twisted.internet.defer import succeed class Load(object): def register(self, sysinfo): self._sysinfo = sysinfo def run(self): self._sysinfo.add_header("System load", str(os.getloadavg()[0])) return succeed(None)
18.857143
72
0.666667
207
0.784091
0
0
0
0
0
0
13
0.049242
718c6a96017a844d29bf1f77cede2d377a4c970c
675
py
Python
src/boh_api/viewsets.py
dougmorato/bag-of-holding
8a7bc45ced8837bdb00da60dcfb496bb0271f161
[ "Apache-2.0" ]
null
null
null
src/boh_api/viewsets.py
dougmorato/bag-of-holding
8a7bc45ced8837bdb00da60dcfb496bb0271f161
[ "Apache-2.0" ]
1
2021-06-10T23:58:45.000Z
2021-06-10T23:58:45.000Z
src/boh_api/viewsets.py
dougmorato/bag-of-holding
8a7bc45ced8837bdb00da60dcfb496bb0271f161
[ "Apache-2.0" ]
null
null
null
from rest_framework import viewsets from boh import models from . import serializers class OrganizationViewSet(viewsets.ModelViewSet): queryset = models.Organization.objects.all() serializer_class = serializers.OrganizationSerializer class ApplicationViewSet(viewsets.ModelViewSet): queryset = models.Application.objects.all() serializer_class = serializers.ApplicationSerializer class TagViewSet(viewsets.ModelViewSet): queryset = models.Tag.objects.all() serializer_class = serializers.TagSerializer class PersonViewSet(viewsets.ModelViewSet): queryset = models.Person.objects.all() serializer_class = serializers.PersonSerializer
25.961538
57
0.8
576
0.853333
0
0
0
0
0
0
0
0
718d447c90c45e89882aa6196cb3c3ab761ce174
2,207
py
Python
githubintro-fe2d832af2bad7d6b27d036c205cc9d8414b2183/CommunicationAnimation.py
TatendaNoreen/Python
df9799bbea84af03c1fb3b29fada1e16c04bab80
[ "MIT" ]
null
null
null
githubintro-fe2d832af2bad7d6b27d036c205cc9d8414b2183/CommunicationAnimation.py
TatendaNoreen/Python
df9799bbea84af03c1fb3b29fada1e16c04bab80
[ "MIT" ]
null
null
null
githubintro-fe2d832af2bad7d6b27d036c205cc9d8414b2183/CommunicationAnimation.py
TatendaNoreen/Python
df9799bbea84af03c1fb3b29fada1e16c04bab80
[ "MIT" ]
null
null
null
import matplotlib matplotlib.use('TkAgg') import matplotlib.pyplot import agentframework import csv import matplotlib.animation #create environment in which agents will operate environment=[] #read csv downloaded file f = open('in.txt', newline='') reader = csv.reader(f, quoting=csv.QUOTE_NONNUMERIC) for row in reader: rowlist=[] # A list of rows environment.append(rowlist) for value in row: # A list of value #print(value) # Floats rowlist.append(value) f.close() # Don't close until you are done with the reader; # the data is read on request. #def distance_between(agents_row_a, agents_row_b): # return (((agents_row_a.x - agents_row_b.x)**2) + # ((agents_row_a.y - agents_row_b.y)**2))**0.5 num_of_agents = 10 num_of_iterations = 10 neighbourhood = 20 fig = matplotlib.pyplot.figure(figsize=(7, 7)) ax = fig.add_axes([0, 0, 1, 1]) # Make the agents and connecting with the environment. agents = [] def update(frame_number): fig.clear() for i in range(num_of_agents): agents.append(agentframework.Agent(environment,agents)) # Move and eat agents with every move or iteration. for j in range(num_of_iterations): for i in range(num_of_agents): agents[i].move() agents[i].eat() agents[i].share_with_neighbours(neighbourhood) # Loop through the agents in self.agents . # Calculate the distance between self and the current other agent: # distance = self.distance_between(agent) # If distance is less than or equal to the neighbourhood # Sum self.store and agent.store . # Divide sum by two to calculate average. # self.store = average # agent.store = average # End if # End loop # plot matplotlib.pyplot.xlim(0, 299) matplotlib.pyplot.ylim(0, 299) for i in range(num_of_agents): matplotlib.pyplot.scatter(agents[i].x,agents[i].y) matplotlib.pyplot.imshow(environment) animation = matplotlib.animation.FuncAnimation(fig, update, interval=1) matplotlib.pyplot.show()
24.797753
71
0.645673
0
0
0
0
0
0
0
0
850
0.385138
718e41b1051f8c81e49363a47885bbfedb81564d
2,027
py
Python
external/model-preparation-algorithm/tests/conftest.py
opencv/openvino_training_extensions
f5d809741e192a2345558efc75899a475019cf98
[ "Apache-2.0" ]
775
2019-03-01T02:13:33.000Z
2020-09-07T22:49:15.000Z
external/model-preparation-algorithm/tests/conftest.py
opencv/openvino_training_extensions
f5d809741e192a2345558efc75899a475019cf98
[ "Apache-2.0" ]
229
2019-02-28T21:37:08.000Z
2020-09-07T15:11:49.000Z
external/model-preparation-algorithm/tests/conftest.py
opencv/openvino_training_extensions
f5d809741e192a2345558efc75899a475019cf98
[ "Apache-2.0" ]
290
2019-02-28T20:32:11.000Z
2020-09-07T05:51:41.000Z
# Copyright (C) 2022 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # try: import e2e.fixtures from e2e.conftest_utils import * # noqa from e2e.conftest_utils import pytest_addoption as _e2e_pytest_addoption # noqa from e2e import config # noqa from e2e.utils import get_plugins_from_packages pytest_plugins = get_plugins_from_packages([e2e]) except ImportError: _e2e_pytest_addoption = None pass import config import pytest from ote_sdk.test_suite.pytest_insertions import * from ote_sdk.test_suite.training_tests_common import REALLIFE_USECASE_CONSTANT pytest_plugins = get_pytest_plugins_from_ote() ote_conftest_insertion(default_repository_name='ote/training_extensions/external/model-preparation-algorithm') @pytest.fixture def ote_test_domain_fx(): return 'model-preparation-algorithm' @pytest.fixture def ote_test_scenario_fx(current_test_parameters_fx): assert isinstance(current_test_parameters_fx, dict) if current_test_parameters_fx.get('usecase') == REALLIFE_USECASE_CONSTANT: return 'performance' else: return 'integration' @pytest.fixture(scope='session') def ote_templates_root_dir_fx(): import os.path as osp import logging logger = logging.getLogger(__name__) root = osp.dirname(osp.dirname(osp.realpath(__file__))) root = f'{root}/configs/' logger.debug(f'overloaded ote_templates_root_dir_fx: return {root}') return root @pytest.fixture(scope='session') def ote_reference_root_dir_fx(): import os.path as osp import logging logger = logging.getLogger(__name__) root = osp.dirname(osp.dirname(osp.realpath(__file__))) root = f'{root}/tests/reference/' logger.debug(f'overloaded ote_reference_root_dir_fx: return {root}') return root # pytest magic def pytest_generate_tests(metafunc): ote_pytest_generate_tests_insertion(metafunc) def pytest_addoption(parser): ote_pytest_addoption_insertion(parser)
32.174603
111
0.750863
0
0
0
0
1,046
0.516034
0
0
411
0.202763
718e43027722775db4c64b0811dfc59a1835349b
2,418
py
Python
ibis/udf/validate.py
rtpsw/ibis
d7318fdf87121cd8fadbcf0369a2b217aab3053a
[ "Apache-2.0" ]
986
2017-06-07T07:33:01.000Z
2022-03-31T13:00:46.000Z
ibis/udf/validate.py
marlenezw/ibis
14b9baf3e1021e8698e7f0ae3c0ae5747543431c
[ "Apache-2.0" ]
2,623
2017-06-07T18:29:11.000Z
2022-03-31T20:27:31.000Z
ibis/udf/validate.py
marlenezw/ibis
14b9baf3e1021e8698e7f0ae3c0ae5747543431c
[ "Apache-2.0" ]
238
2017-06-26T19:02:58.000Z
2022-03-31T15:18:29.000Z
"""Validation for UDFs. Warning: This is an experimental module and API here can change without notice. DO NOT USE DIRECTLY. """ from inspect import Parameter, Signature, signature from typing import Any, Callable, List import ibis.common.exceptions as com from ibis.expr.datatypes import DataType def _parameter_count(funcsig: Signature) -> int: """Get the number of positional-or-keyword or position-only parameters in a function signature. Parameters ---------- funcsig : inspect.Signature A UDF signature Returns ------- int The number of parameters """ return sum( param.kind in {param.POSITIONAL_OR_KEYWORD, param.POSITIONAL_ONLY} for param in funcsig.parameters.values() if param.default is Parameter.empty ) def validate_input_type( input_type: List[DataType], func: Callable ) -> Signature: """Check that the declared number of inputs (the length of `input_type`) and the number of inputs to `func` are equal. If the signature of `func` uses *args, then no check is done (since no check can be done). Parameters ---------- input_type : List[DataType] func : callable Returns ------- inspect.Signature """ funcsig = signature(func) params = funcsig.parameters.values() # We can only do validation if all the positional arguments are explicit # (i.e. no *args) if not any(param.kind is Parameter.VAR_POSITIONAL for param in params): declared_parameter_count = len(input_type) function_parameter_count = _parameter_count(funcsig) if declared_parameter_count != function_parameter_count: raise TypeError( 'Function signature {!r} has {:d} parameters, ' 'input_type has {:d}. These must match. Non-column ' 'parameters must be defined as keyword only, i.e., ' 'def foo(col, *, function_param).'.format( func.__name__, function_parameter_count, declared_parameter_count, ) ) return funcsig def validate_output_type(output_type: Any) -> None: """Check that the output type is a single datatype.""" if isinstance(output_type, list): raise com.IbisTypeError( 'The output type of a UDF must be a single datatype.' )
28.447059
79
0.639371
0
0
0
0
0
0
0
0
1,131
0.467742
71915f8963ebf873674df05ecd7d2ac82cadfb43
5,629
py
Python
packages/stattik/stattik/schema/schema.py
stattikcms/stattik
5c96d600d105461edb95a11d8050dee3c32edd1e
[ "MIT" ]
1
2021-11-05T06:24:28.000Z
2021-11-05T06:24:28.000Z
packages/stattik/stattik/schema/schema.py
stattikcms/stattik
5c96d600d105461edb95a11d8050dee3c32edd1e
[ "MIT" ]
null
null
null
packages/stattik/stattik/schema/schema.py
stattikcms/stattik
5c96d600d105461edb95a11d8050dee3c32edd1e
[ "MIT" ]
null
null
null
import inspect from ariadne import make_executable_schema, QueryType, MutationType, SubscriptionType from .resolver import * # # Schema # class GrammarError(Exception): pass keywords = ['query', 'mutation', 'subscription', 'source'] class SchemaMetaDict(dict): ''' Dictionary that allows decorated schema entry functions to be overloaded ''' def __setitem__(self, key, value): if key in self and callable(value) and hasattr(value, 'name'): value.next_func = self[key] if not hasattr(value.next_func, 'name'): raise GrammarError(f'Redefinition of {key}. Perhaps an earlier {key} is missing @_') super().__setitem__(key, value) def __getitem__(self, key): #if key not in self and key.isupper() and key[:1] != '_': if key not in self and key.isupper() and not key[:1] in keywords: return key.upper() else: return super().__getitem__(key) def _query_decorator(name): def decorate(func): func.tag = 'query' func.name = name return func return decorate def _mutation_decorator(name): def decorate(func): func.tag = 'mutation' func.name = name return func return decorate def _subscription_decorator(name): def decorate(func): func.tag = 'subscription' func.name = name return func return decorate def _source_decorator(name): def decorate(func): func.tag = 'source' func.name = name return func return decorate class SchemaMeta(type): @classmethod def __prepare__(meta, *args, **kwargs): d = SchemaMetaDict() d['query'] = _query_decorator d['mutation'] = _mutation_decorator d['subscription'] = _subscription_decorator d['source'] = _source_decorator return d def __new__(meta, selfname, bases, attributes): #del attributes['_'] for key in keywords: del attributes[key] self = super().__new__(meta, selfname, bases, attributes) self._build(list(attributes.items())) return self class Schema(metaclass=SchemaMeta): def __init__(self, parent=None): self.parent = parent self.children = [] if parent: parent.add_child(self) self.db = parent.db else: self.db = self self.entries = self.__class__.entries @classmethod def produce(self, parent=None): schema = self(parent) return schema def add_child(self, schema): self.children.append(schema) def get_gql(self): gql = [inspect.getdoc(self)] for child in self.children: gql.append(child.get_gql()) return "\n".join(gql) def register(self): for entry in self.entries: entry.register(self) for child in self.children: child.register() def add(self, r): self.entries.append(r) @classmethod def __collect_functions(self, definitions): ''' Collect all of the tagged grammar entries ''' entries = [ (name, value) for name, value in definitions if callable(value) and hasattr(value, 'name') ] return entries @classmethod def _build(self, definitions): if vars(self).get('_build', False): return # Collect all of the entry functions from the class definition functions = self.__collect_functions(definitions) self.entries = self.__build_entries(functions) @classmethod def __build_entries(self, functions): entries = [] errors = '' for name, func in functions: entry = self._build_entry(func) entries.append(entry) return entries @classmethod def _build_entry(self, func): tag = func.tag name = func.name prodname = func.__name__ unwrapped = inspect.unwrap(func) filename = unwrapped.__code__.co_filename lineno = unwrapped.__code__.co_firstlineno logger.debug(f"_build_entry:tag: {tag}") logger.debug(f"_build_entry:name: {name}") logger.debug(f"_build_entry:prodname: {prodname}") logger.debug(f"_build_entry:unwrapped: {unwrapped}") #entry = Resolver(name, func, prodname=prodname, filename=filename, lineno=lineno) entry = entry_factories[tag](self, name, func, prodname=prodname, filename=filename, lineno=lineno) logger.debug(f"_build_entry:entry: {entry}") return entry # This is for testing or in case you don't want a database as the root schema class RootSchema(Schema): """ type Query { dummy: Int! } type Mutation { setDummy(val: Int!): Int } type Subscription { dummy: Int } """ instance = None def __init__(self, parent=None): super().__init__(parent) Schema.instance = self self.query_type = QueryType() self.mutation_type = MutationType() self.subscription_type = SubscriptionType() @classmethod def produce(self): if self.instance: return self.instance self.instance = schema = self() return schema def make_executable(self): self.register() #return make_executable_schema(type_defs, self.query) return make_executable_schema( self.get_gql(), self.query_type, self.mutation_type, self.subscription_type )
28.004975
107
0.607035
4,718
0.83816
0
0
2,120
0.376621
0
0
1,052
0.186889
7193df3e00cf1bbbc7e779239b2adfcf9b4f4173
78,616
py
Python
toontown/battle/DistributedBattleBaseAI.py
DankMickey/Project-Altis-Educational-Source
0a74999fb52d4e690a41b984703119f63c372d20
[ "Apache-2.0" ]
1
2021-06-25T02:56:32.000Z
2021-06-25T02:56:32.000Z
toontown/battle/DistributedBattleBaseAI.py
kool601/Project-Altis-Educational-Source
0a74999fb52d4e690a41b984703119f63c372d20
[ "Apache-2.0" ]
null
null
null
toontown/battle/DistributedBattleBaseAI.py
kool601/Project-Altis-Educational-Source
0a74999fb52d4e690a41b984703119f63c372d20
[ "Apache-2.0" ]
2
2017-12-20T17:46:56.000Z
2021-06-25T02:56:36.000Z
import random from otp.ai.AIBase import * from direct.distributed.ClockDelta import * from toontown.battle.BattleBase import * from toontown.battle.BattleCalculatorAI import * from toontown.toonbase.ToontownBattleGlobals import * from toontown.battle.SuitBattleGlobals import * from pandac.PandaModules import * from toontown.battle import BattleExperienceAI from direct.distributed import DistributedObjectAI from direct.fsm import ClassicFSM, State from direct.fsm import State from direct.task import Task from direct.directnotify import DirectNotifyGlobal from toontown.ai import DatabaseObject from toontown.toon import DistributedToonAI from toontown.toon import InventoryBase from toontown.toonbase import ToontownGlobals from toontown.toon import NPCToons from otp.ai.MagicWordGlobal import * from toontown.pets import DistributedPetProxyAI class DistributedBattleBaseAI(DistributedObjectAI.DistributedObjectAI, BattleBase): notify = DirectNotifyGlobal.directNotify.newCategory('DistributedBattleBaseAI') def __init__(self, air, zoneId, finishCallback = None, maxSuits = 4, bossBattle = 0, tutorialFlag = 0, interactivePropTrackBonus = -1): DistributedObjectAI.DistributedObjectAI.__init__(self, air) self.serialNum = 0 self.zoneId = zoneId self.maxSuits = maxSuits self.setBossBattle(bossBattle) self.tutorialFlag = tutorialFlag self.interactivePropTrackBonus = interactivePropTrackBonus self.finishCallback = finishCallback self.avatarExitEvents = [] self.responses = {} self.adjustingResponses = {} self.joinResponses = {} self.adjustingSuits = [] self.adjustingToons = [] self.numSuitsEver = 0 BattleBase.__init__(self) self.streetBattle = 1 self.pos = Point3(0, 0, 0) self.initialSuitPos = Point3(0, 0, 0) self.toonExp = {} self.toonOrigQuests = {} self.toonItems = {} self.toonOrigMerits = {} self.toonMerits = {} self.toonParts = {} self.battleCalc = BattleCalculatorAI(self, tutorialFlag) if self.air.suitInvasionManager.getInvading(): mult = getInvasionMultiplier() self.battleCalc.setSkillCreditMultiplier(mult) if self.air.holidayManager.isMoreXpHolidayRunning(): mult = getMoreXpHolidayMultiplier() self.battleCalc.setSkillCreditMultiplier(mult) self.fsm = None self.clearAttacks() self.ignoreFaceOffDone = 0 self.needAdjust = 0 self.movieHasBeenMade = 0 self.movieHasPlayed = 0 self.rewardHasPlayed = 0 self.movieRequested = 0 self.ignoreResponses = 0 self.ignoreAdjustingResponses = 0 self.taskNames = [] self.exitedToons = [] self.suitsKilled = [] self.suitsKilledThisBattle = [] self.suitsKilledPerFloor = [] self.suitsEncountered = [] self.newToons = [] self.newSuits = [] self.numNPCAttacks = 0 self.npcAttacks = {} self.pets = {} self.fireCount = 0 self.fsm = ClassicFSM.ClassicFSM('DistributedBattleAI', [State.State('FaceOff', self.enterFaceOff, self.exitFaceOff, ['WaitForInput', 'Resume']), State.State('WaitForJoin', self.enterWaitForJoin, self.exitWaitForJoin, ['WaitForInput', 'Resume']), State.State('WaitForInput', self.enterWaitForInput, self.exitWaitForInput, ['MakeMovie', 'Resume']), State.State('MakeMovie', self.enterMakeMovie, self.exitMakeMovie, ['PlayMovie', 'Resume']), State.State('PlayMovie', self.enterPlayMovie, self.exitPlayMovie, ['WaitForJoin', 'Reward', 'Resume']), State.State('Reward', self.enterReward, self.exitReward, ['Resume']), State.State('Resume', self.enterResume, self.exitResume, []), State.State('Off', self.enterOff, self.exitOff, ['FaceOff', 'WaitForJoin'])], 'Off', 'Off') self.joinableFsm = ClassicFSM.ClassicFSM('Joinable', [State.State('Joinable', self.enterJoinable, self.exitJoinable, ['Unjoinable']), State.State('Unjoinable', self.enterUnjoinable, self.exitUnjoinable, ['Joinable'])], 'Unjoinable', 'Unjoinable') self.joinableFsm.enterInitialState() self.runableFsm = ClassicFSM.ClassicFSM('Runable', [State.State('Runable', self.enterRunable, self.exitRunable, ['Unrunable']), State.State('Unrunable', self.enterUnrunable, self.exitUnrunable, ['Runable'])], 'Unrunable', 'Unrunable') self.runableFsm.enterInitialState() self.adjustFsm = ClassicFSM.ClassicFSM('Adjust', [State.State('Adjusting', self.enterAdjusting, self.exitAdjusting, ['NotAdjusting', 'Adjusting']), State.State('NotAdjusting', self.enterNotAdjusting, self.exitNotAdjusting, ['Adjusting'])], 'NotAdjusting', 'NotAdjusting') self.adjustFsm.enterInitialState() self.fsm.enterInitialState() self.startTime = globalClock.getRealTime() self.adjustingTimer = Timer() def clearAttacks(self): self.toonAttacks = {} self.suitAttacks = getDefaultSuitAttacks() def requestDelete(self): if hasattr(self, 'fsm'): self.fsm.request('Off') self.__removeTaskName(self.uniqueName('make-movie')) DistributedObjectAI.DistributedObjectAI.requestDelete(self) def delete(self): self.notify.debug('deleting battle') self.fsm.request('Off') self.ignoreAll() self.__removeAllTasks() del self.fsm del self.joinableFsm del self.runableFsm del self.adjustFsm self.__cleanupJoinResponses() self.timer.stop() del self.timer self.adjustingTimer.stop() del self.adjustingTimer self.battleCalc.cleanup() del self.battleCalc for suit in self.suits: del suit.battleTrap del self.finishCallback for petProxy in self.pets.values(): petProxy.requestDelete() DistributedObjectAI.DistributedObjectAI.delete(self) def pause(self): self.timer.stop() self.adjustingTimer.stop() def unpause(self): self.timer.resume() self.adjustingTimer.resume() def abortBattle(self): self.notify.debug('%s.abortBattle() called.' % self.doId) toonsCopy = self.toons[:] for toonId in toonsCopy: self.__removeToon(toonId) if self.fsm.getCurrentState().getName() == 'PlayMovie' or self.fsm.getCurrentState().getName() == 'MakeMovie': self.exitedToons.append(toonId) self.d_setMembers() self.b_setState('Resume') self.__removeAllTasks() self.timer.stop() self.adjustingTimer.stop() def __removeSuit(self, suit): self.notify.debug('__removeSuit(%d)' % suit.doId) self.suits.remove(suit) self.activeSuits.remove(suit) if self.luredSuits.count(suit) == 1: self.luredSuits.remove(suit) self.suitGone = 1 del suit.battleTrap def findSuit(self, id): for s in self.suits: if s.doId == id: return s return None def __removeTaskName(self, name): if self.taskNames.count(name): self.taskNames.remove(name) self.notify.debug('removeTaskName() - %s' % name) taskMgr.remove(name) def __removeAllTasks(self): for n in self.taskNames: self.notify.debug('removeAllTasks() - %s' % n) taskMgr.remove(n) self.taskNames = [] def __removeToonTasks(self, toonId): name = self.taskName('running-toon-%d' % toonId) self.__removeTaskName(name) name = self.taskName('to-pending-av-%d' % toonId) self.__removeTaskName(name) def getLevelDoId(self): return 0 def getBattleCellId(self): return 0 def getPosition(self): self.notify.debug('getPosition() - %s' % self.pos) return [self.pos[0], self.pos[1], self.pos[2]] def getInitialSuitPos(self): p = [] p.append(self.initialSuitPos[0]) p.append(self.initialSuitPos[1]) p.append(self.initialSuitPos[2]) return p def setBossBattle(self, bossBattle): self.bossBattle = bossBattle def getBossBattle(self): return self.bossBattle def b_setState(self, state): self.notify.debug('network:setState(%s)' % state) stime = globalClock.getRealTime() + SERVER_BUFFER_TIME self.sendUpdate('setState', [state, globalClockDelta.localToNetworkTime(stime)]) self.setState(state) def setState(self, state): self.fsm.request(state) def getState(self): return [self.fsm.getCurrentState().getName(), globalClockDelta.getRealNetworkTime()] def d_setMembers(self): self.notify.debug('network:setMembers()') self.sendUpdate('setMembers', self.getMembers()) def getMembers(self): suits = [] for s in self.suits: suits.append(s.doId) joiningSuits = '' for s in self.joiningSuits: joiningSuits += str(suits.index(s.doId)) pendingSuits = '' for s in self.pendingSuits: pendingSuits += str(suits.index(s.doId)) activeSuits = '' for s in self.activeSuits: activeSuits += str(suits.index(s.doId)) luredSuits = '' for s in self.luredSuits: luredSuits += str(suits.index(s.doId)) suitTraps = '' for s in self.suits: if s.battleTrap == NO_TRAP: suitTraps += '9' elif s.battleTrap == BattleCalculatorAI.TRAP_CONFLICT: suitTraps += '9' else: suitTraps += str(s.battleTrap) toons = [] for t in self.toons: toons.append(t) joiningToons = '' for t in self.joiningToons: joiningToons += str(toons.index(t)) pendingToons = '' for t in self.pendingToons: pendingToons += str(toons.index(t)) activeToons = '' for t in self.activeToons: activeToons += str(toons.index(t)) runningToons = '' for t in self.runningToons: runningToons += str(toons.index(t)) self.notify.debug('getMembers() - suits: %s joiningSuits: %s pendingSuits: %s activeSuits: %s luredSuits: %s suitTraps: %s toons: %s joiningToons: %s pendingToons: %s activeToons: %s runningToons: %s' % (suits, joiningSuits, pendingSuits, activeSuits, luredSuits, suitTraps, toons, joiningToons, pendingToons, activeToons, runningToons)) return [suits, joiningSuits, pendingSuits, activeSuits, luredSuits, suitTraps, toons, joiningToons, pendingToons, activeToons, runningToons, globalClockDelta.getRealNetworkTime()] def d_adjust(self): self.notify.debug('network:adjust()') self.sendUpdate('adjust', [globalClockDelta.getRealNetworkTime()]) def getInteractivePropTrackBonus(self): return self.interactivePropTrackBonus def getZoneId(self): return self.zoneId def getTaskZoneId(self): return self.zoneId def d_setMovie(self): self.notify.debug('network:setMovie()') self.sendUpdate('setMovie', self.getMovie()) self.__updateEncounteredCogs() def getMovie(self): suitIds = [] for s in self.activeSuits: suitIds.append(s.doId) p = [self.movieHasBeenMade] p.append(self.activeToons) p.append(suitIds) for t in self.activeToons: if t in self.toonAttacks: ta = self.toonAttacks[t] index = -1 id = ta[TOON_ID_COL] if id != -1: index = self.activeToons.index(id) track = ta[TOON_TRACK_COL] if (track == NO_ATTACK or attackAffectsGroup(track, ta[TOON_LVL_COL])) and track != NPCSOS and track != PETSOS: target = -1 if track == HEAL: if ta[TOON_LVL_COL] == 1: ta[TOON_HPBONUS_COL] = random.randint(0, 10000) elif track == SOS or track == NPCSOS or track == PETSOS: target = ta[TOON_TGT_COL] elif track == HEAL: if self.activeToons.count(ta[TOON_TGT_COL]) != 0: target = self.activeToons.index(ta[TOON_TGT_COL]) else: target = -1 elif suitIds.count(ta[TOON_TGT_COL]) != 0: target = suitIds.index(ta[TOON_TGT_COL]) else: target = -1 p = p + [index, track, ta[TOON_LVL_COL], target] p = p + ta[4:] else: index = self.activeToons.index(t) attack = getToonAttack(index) p = p + attack for i in range(4 - len(self.activeToons)): p = p + getToonAttack(-1) for sa in self.suitAttacks: index = -1 id = sa[SUIT_ID_COL] if id != -1: index = suitIds.index(id) if sa[SUIT_ATK_COL] == -1: targetIndex = -1 else: targetIndex = sa[SUIT_TGT_COL] if targetIndex == -1: self.notify.debug('suit attack: %d must be group' % sa[SUIT_ATK_COL]) else: toonId = self.activeToons[targetIndex] p = p + [index, sa[SUIT_ATK_COL], targetIndex] sa[SUIT_TAUNT_COL] = 0 if sa[SUIT_ATK_COL] != -1: suit = self.findSuit(id) sa[SUIT_TAUNT_COL] = getAttackTauntIndexFromIndex(suit, sa[SUIT_ATK_COL]) p = p + sa[3:] return p def d_setChosenToonAttacks(self): self.notify.debug('network:setChosenToonAttacks()') self.sendUpdate('setChosenToonAttacks', self.getChosenToonAttacks()) def getChosenToonAttacks(self): ids = [] tracks = [] levels = [] targets = [] for t in self.activeToons: if t in self.toonAttacks: ta = self.toonAttacks[t] else: ta = getToonAttack(t) ids.append(t) tracks.append(ta[TOON_TRACK_COL]) levels.append(ta[TOON_LVL_COL]) targets.append(ta[TOON_TGT_COL]) return [ids, tracks, levels, targets] def d_setBattleExperience(self): self.notify.debug('network:setBattleExperience()') self.sendUpdate('setBattleExperience', self.getBattleExperience()) def getBattleExperience(self): returnValue = BattleExperienceAI.getBattleExperience(4, self.activeToons, self.toonExp, self.battleCalc.toonSkillPtsGained, self.toonOrigQuests, self.toonItems, self.toonOrigMerits, self.toonMerits, self.toonParts, self.suitsKilled, self.helpfulToons) return returnValue def getToonUberStatus(self): fieldList = [] uberIndex = LAST_REGULAR_GAG_LEVEL + 1 for toon in self.activeToons: toonList = [] for trackIndex in range(MAX_TRACK_INDEX): toonList.append(toon.inventory.numItem(track, uberIndex)) fieldList.append(encodeUber(toonList)) return fieldList def addSuit(self, suit): self.notify.debug('addSuit(%d)' % suit.doId) self.newSuits.append(suit) self.suits.append(suit) suit.battleTrap = NO_TRAP self.numSuitsEver += 1 def __joinSuit(self, suit): self.joiningSuits.append(suit) toPendingTime = MAX_JOIN_T + SERVER_BUFFER_TIME taskName = self.taskName('to-pending-av-%d' % suit.doId) self.__addJoinResponse(suit.doId, taskName) self.taskNames.append(taskName) taskMgr.doMethodLater(toPendingTime, self.__serverJoinDone, taskName, extraArgs=(suit.doId, taskName)) def __serverJoinDone(self, avId, taskName): self.notify.debug('join for av: %d timed out on server' % avId) self.__removeTaskName(taskName) self.__makeAvPending(avId) return Task.done def __makeAvPending(self, avId): self.notify.debug('__makeAvPending(%d)' % avId) self.__removeJoinResponse(avId) self.__removeTaskName(self.taskName('to-pending-av-%d' % avId)) if self.toons.count(avId) > 0: self.joiningToons.remove(avId) self.pendingToons.append(avId) else: suit = self.findSuit(avId) if suit != None: if not suit.isEmpty(): if not self.joiningSuits.count(suit) == 1: self.notify.warning('__makeAvPending(%d) in zone: %d' % (avId, self.zoneId)) self.notify.warning('toons: %s' % self.toons) self.notify.warning('joining toons: %s' % self.joiningToons) self.notify.warning('pending toons: %s' % self.pendingToons) self.notify.warning('suits: %s' % self.suits) self.notify.warning('joining suits: %s' % self.joiningSuits) self.notify.warning('pending suits: %s' % self.pendingSuits) self.joiningSuits.remove(suit) self.pendingSuits.append(suit) else: self.notify.warning('makeAvPending() %d not in toons or suits' % avId) return self.d_setMembers() self.needAdjust = 1 self.__requestAdjust() def suitRequestJoin(self, suit): self.notify.debug('suitRequestJoin(%d)' % suit.getDoId()) if self.suitCanJoin(): self.addSuit(suit) self.__joinSuit(suit) self.d_setMembers() suit.prepareToJoinBattle() return 1 else: self.notify.warning('suitRequestJoin() - not joinable - joinable state: %s max suits: %d' % (self.joinableFsm.getCurrentState().getName(), self.maxSuits)) return 0 def addToon(self, avId): self.notify.debug('addToon(%d)' % avId) toon = self.getToon(avId) if toon == None: return 0 toon.stopToonUp() event = simbase.air.getAvatarExitEvent(avId) self.avatarExitEvents.append(event) self.accept(event, self.__handleUnexpectedExit, extraArgs=[avId]) event = 'inSafezone-%s' % avId self.avatarExitEvents.append(event) self.accept(event, self.__handleSuddenExit, extraArgs=[avId, 0]) self.newToons.append(avId) self.toons.append(avId) toon = simbase.air.doId2do.get(avId) if toon: if hasattr(self, 'doId'): toon.b_setBattleId(self.doId) else: toon.b_setBattleId(-1) messageToonAdded = 'Battle adding toon %s' % avId messenger.send(messageToonAdded, [avId]) if self.fsm != None and self.fsm.getCurrentState().getName() == 'PlayMovie': self.responses[avId] = 1 else: self.responses[avId] = 0 self.adjustingResponses[avId] = 0 if avId not in self.toonExp: p = [] for t in Tracks: p.append(toon.experience.getExp(t)) self.toonExp[avId] = p if avId not in self.toonOrigMerits: self.toonOrigMerits[avId] = toon.cogMerits[:] if avId not in self.toonMerits: self.toonMerits[avId] = [0, 0, 0, 0, 0] if avId not in self.toonOrigQuests: flattenedQuests = [] for quest in toon.quests: flattenedQuests.extend(quest) self.toonOrigQuests[avId] = flattenedQuests if avId not in self.toonItems: self.toonItems[avId] = ([], []) return 1 def __joinToon(self, avId, pos): self.joiningToons.append(avId) toPendingTime = MAX_JOIN_T + SERVER_BUFFER_TIME taskName = self.taskName('to-pending-av-%d' % avId) self.__addJoinResponse(avId, taskName, toon=1) taskMgr.doMethodLater(toPendingTime, self.__serverJoinDone, taskName, extraArgs=(avId, taskName)) self.taskNames.append(taskName) def __updateEncounteredCogs(self): for toon in self.activeToons: if toon in self.newToons: for suit in self.activeSuits: if hasattr(suit, 'dna'): self.suitsEncountered.append({'type': suit.dna.name, 'activeToons': self.activeToons[:]}) else: self.notify.warning('Suit has no DNA in zone %s: toons involved = %s' % (self.zoneId, self.activeToons)) return self.newToons.remove(toon) for suit in self.activeSuits: if suit in self.newSuits: if hasattr(suit, 'dna'): self.suitsEncountered.append({'type': suit.dna.name, 'activeToons': self.activeToons[:]}) else: self.notify.warning('Suit has no DNA in zone %s: toons involved = %s' % (self.zoneId, self.activeToons)) return self.newSuits.remove(suit) def __makeToonRun(self, toonId, updateAttacks): self.activeToons.remove(toonId) self.toonGone = 1 self.runningToons.append(toonId) taskName = self.taskName('running-toon-%d' % toonId) taskMgr.doMethodLater(TOON_RUN_T, self.__serverRunDone, taskName, extraArgs=(toonId, updateAttacks, taskName)) self.taskNames.append(taskName) def __serverRunDone(self, toonId, updateAttacks, taskName): self.notify.debug('run for toon: %d timed out on server' % toonId) self.__removeTaskName(taskName) self.__removeToon(toonId) self.d_setMembers() if len(self.toons) == 0: self.notify.debug('last toon is gone - battle is finished') self.b_setState('Resume') else: if updateAttacks == 1: self.d_setChosenToonAttacks() self.needAdjust = 1 self.__requestAdjust() return Task.done def __requestAdjust(self): if not self.fsm: return cstate = self.fsm.getCurrentState().getName() if cstate == 'WaitForInput' or cstate == 'WaitForJoin': if self.adjustFsm.getCurrentState().getName() == 'NotAdjusting': if self.needAdjust == 1: self.d_adjust() self.adjustingSuits = [] for s in self.pendingSuits: self.adjustingSuits.append(s) self.adjustingToons = [] for t in self.pendingToons: self.adjustingToons.append(t) self.adjustFsm.request('Adjusting') else: self.notify.debug('requestAdjust() - dont need to') else: self.notify.debug('requestAdjust() - already adjusting') else: self.notify.debug('requestAdjust() - in state: %s' % cstate) def __handleUnexpectedExit(self, avId): #TODO: fixme #disconnectCode = self.air.getAvatarDisconnectReason(avId) disconnectCode = "placeHolder dc code, need self.air.getAvatarDisconnectReason(avId)" self.notify.warning('toon: %d exited unexpectedly, reason %s' % (avId, disconnectCode)) #userAborted = disconnectCode == ToontownGlobals.DisconnectCloseWindow #TODO: fixme userAborted = False self.__handleSuddenExit(avId, userAborted) def __handleSuddenExit(self, avId, userAborted): self.__removeToon(avId, userAborted=userAborted) if self.fsm.getCurrentState().getName() == 'PlayMovie' or self.fsm.getCurrentState().getName() == 'MakeMovie': self.exitedToons.append(avId) self.d_setMembers() if len(self.toons) == 0: self.notify.debug('last toon is gone - battle is finished') self.__removeAllTasks() self.timer.stop() self.adjustingTimer.stop() self.b_setState('Resume') else: self.needAdjust = 1 self.__requestAdjust() def __removeSuit(self, suit): self.notify.debug('__removeSuit(%d)' % suit.doId) self.suits.remove(suit) self.activeSuits.remove(suit) if self.luredSuits.count(suit) == 1: self.luredSuits.remove(suit) self.suitGone = 1 del suit.battleTrap def __removeToon(self, toonId, userAborted = 0): self.notify.debug('__removeToon(%d)' % toonId) if self.toons.count(toonId) == 0: return self.battleCalc.toonLeftBattle(toonId) self.__removeToonTasks(toonId) self.toons.remove(toonId) if self.joiningToons.count(toonId) == 1: self.joiningToons.remove(toonId) if self.pendingToons.count(toonId) == 1: self.pendingToons.remove(toonId) if self.activeToons.count(toonId) == 1: activeToonIdx = self.activeToons.index(toonId) self.notify.debug('removing activeToons[%d], updating suitAttacks SUIT_HP_COL to match' % activeToonIdx) for i in range(len(self.suitAttacks)): if activeToonIdx < len(self.suitAttacks[i][SUIT_HP_COL]): del self.suitAttacks[i][SUIT_HP_COL][activeToonIdx] else: self.notify.warning("suitAttacks %d doesn't have an HP column for active toon index %d" % (i, activeToonIdx)) self.activeToons.remove(toonId) if self.runningToons.count(toonId) == 1: self.runningToons.remove(toonId) if self.adjustingToons.count(toonId) == 1: self.notify.warning('removeToon() - toon: %d was adjusting!' % toonId) self.adjustingToons.remove(toonId) self.toonGone = 1 if toonId in self.pets: self.pets[toonId].requestDelete() del self.pets[toonId] self.__removeResponse(toonId) self.__removeAdjustingResponse(toonId) self.__removeJoinResponses(toonId) event = simbase.air.getAvatarExitEvent(toonId) self.avatarExitEvents.remove(event) self.ignore(event) event = 'inSafezone-%s' % toonId self.avatarExitEvents.remove(event) self.ignore(event) toon = simbase.air.doId2do.get(toonId) if toon: toon.b_setBattleId(0) messageToonReleased = 'Battle releasing toon %s' % toon.doId messenger.send(messageToonReleased, [toon.doId]) if not userAborted: toon = self.getToon(toonId) if toon != None: toon.hpOwnedByBattle = 0 toon.d_setHp(toon.hp) toon.d_setInventory(toon.inventory.makeNetString()) self.air.cogPageManager.toonEncounteredCogs(toon, self.suitsEncountered, self.getTaskZoneId()) elif len(self.suits) > 0 and not self.streetBattle: self.notify.info('toon %d aborted non-street battle; clearing inventory and hp.' % toonId) toon = DistributedToonAI.DistributedToonAI(self.air) toon.doId = toonId empty = InventoryBase.InventoryBase(toon) toon.b_setInventory(empty.makeNetString()) toon.b_setHp(0) db = DatabaseObject.DatabaseObject(self.air, toonId) db.storeObject(toon, ['setInventory', 'setHp']) self.notify.info('killing mem leak from temporary DistributedToonAI %d' % toonId) toon.deleteDummy() def getToon(self, toonId): if toonId in self.air.doId2do: return self.air.doId2do[toonId] else: self.notify.warning('getToon() - toon: %d not in repository!' % toonId) return def toonRequestRun(self): toonId = self.air.getAvatarIdFromSender() if self.ignoreResponses == 1: self.notify.debug('ignoring response from toon: %d' % toonId) return self.notify.debug('toonRequestRun(%d)' % toonId) if not self.isRunable(): self.notify.warning('toonRequestRun() - not runable') return updateAttacks = 0 if self.activeToons.count(toonId) == 0: self.notify.warning('toon tried to run, but not found in activeToons: %d' % toonId) return for toon in self.activeToons: if toon in self.toonAttacks: ta = self.toonAttacks[toon] track = ta[TOON_TRACK_COL] level = ta[TOON_LVL_COL] if ta[TOON_TGT_COL] == toonId or track == HEAL and attackAffectsGroup(track, level) and len(self.activeToons) <= 2: healerId = ta[TOON_ID_COL] self.notify.debug('resetting toon: %ds attack' % healerId) self.toonAttacks[toon] = getToonAttack(toon, track=UN_ATTACK) self.responses[healerId] = 0 updateAttacks = 1 self.__makeToonRun(toonId, updateAttacks) self.d_setMembers() self.needAdjust = 1 self.__requestAdjust() def toonRequestJoin(self, x, y, z): toonId = self.air.getAvatarIdFromSender() self.notify.debug('toonRequestJoin(%d)' % toonId) self.signupToon(toonId, x, y, z) def toonDied(self): toonId = self.air.getAvatarIdFromSender() self.notify.debug('toonDied(%d)' % toonId) if toonId in self.toons: toon = self.getToon(toonId) if toon: toon.hp = -1 toon.inventory.zeroInv(1) self.__handleSuddenExit(toonId, 0) def signupToon(self, toonId, x, y, z): if self.toons.count(toonId): return if self.toonCanJoin(): if self.addToon(toonId): self.__joinToon(toonId, Point3(x, y, z)) self.d_setMembers() else: self.notify.warning('toonRequestJoin() - not joinable') self.d_denyLocalToonJoin(toonId) def d_denyLocalToonJoin(self, toonId): self.notify.debug('network: denyLocalToonJoin(%d)' % toonId) self.sendUpdateToAvatarId(toonId, 'denyLocalToonJoin', []) def resetResponses(self): self.responses = {} for t in self.toons: self.responses[t] = 0 self.ignoreResponses = 0 def allToonsResponded(self): for t in self.toons: if self.responses[t] == 0: return 0 self.ignoreResponses = 1 return 1 def __allPendingActiveToonsResponded(self): for t in self.pendingToons + self.activeToons: if self.responses[t] == 0: return 0 self.ignoreResponses = 1 return 1 def __allActiveToonsResponded(self): for t in self.activeToons: if self.responses[t] == 0: return 0 self.ignoreResponses = 1 return 1 def __removeResponse(self, toonId): del self.responses[toonId] if self.ignoreResponses == 0 and len(self.toons) > 0: currStateName = self.fsm.getCurrentState().getName() if currStateName == 'WaitForInput': if self.__allActiveToonsResponded(): self.notify.debug('removeResponse() - dont wait for movie') self.__requestMovie() elif currStateName == 'PlayMovie': if self.__allPendingActiveToonsResponded(): self.notify.debug('removeResponse() - surprise movie done') self.__movieDone() elif currStateName == 'Reward' or currStateName == 'BuildingReward': if self.__allActiveToonsResponded(): self.notify.debug('removeResponse() - surprise reward done') self.handleRewardDone() def __resetAdjustingResponses(self): self.adjustingResponses = {} for t in self.toons: self.adjustingResponses[t] = 0 self.ignoreAdjustingResponses = 0 def __allAdjustingToonsResponded(self): for t in self.toons: if self.adjustingResponses[t] == 0: return 0 self.ignoreAdjustingResponses = 1 return 1 def __removeAdjustingResponse(self, toonId): if toonId in self.adjustingResponses: del self.adjustingResponses[toonId] if self.ignoreAdjustingResponses == 0 and len(self.toons) > 0: if self.__allAdjustingToonsResponded(): self.__adjustDone() def __addJoinResponse(self, avId, taskName, toon = 0): if toon == 1: for jr in self.joinResponses.values(): jr[avId] = 0 self.joinResponses[avId] = {} for t in self.toons: self.joinResponses[avId][t] = 0 self.joinResponses[avId]['taskName'] = taskName def __removeJoinResponses(self, avId): self.__removeJoinResponse(avId) removedOne = 0 for j in self.joinResponses.values(): if avId in j: del j[avId] removedOne = 1 if removedOne == 1: for t in self.joiningToons: if self.__allToonsRespondedJoin(t): self.__makeAvPending(t) def __removeJoinResponse(self, avId): if avId in self.joinResponses: taskMgr.remove(self.joinResponses[avId]['taskName']) del self.joinResponses[avId] def __allToonsRespondedJoin(self, avId): jr = self.joinResponses[avId] for t in self.toons: if jr[t] == 0: return 0 return 1 def __cleanupJoinResponses(self): for jr in self.joinResponses.values(): taskMgr.remove(jr['taskName']) del jr def adjustDone(self): toonId = self.air.getAvatarIdFromSender() if self.ignoreAdjustingResponses == 1: self.notify.debug('adjustDone() - ignoring toon: %d' % toonId) return elif self.adjustFsm.getCurrentState().getName() != 'Adjusting': self.notify.warning('adjustDone() - in state %s' % self.fsm.getCurrentState().getName()) return elif self.toons.count(toonId) == 0: self.notify.warning('adjustDone() - toon: %d not in toon list' % toonId) return self.adjustingResponses[toonId] += 1 self.notify.debug('toon: %d done adjusting' % toonId) if self.__allAdjustingToonsResponded(): self.__adjustDone() def timeout(self): toonId = self.air.getAvatarIdFromSender() if self.ignoreResponses == 1: self.notify.debug('timeout() - ignoring toon: %d' % toonId) return elif self.fsm.getCurrentState().getName() != 'WaitForInput': self.notify.warning('timeout() - in state: %s' % self.fsm.getCurrentState().getName()) return elif self.toons.count(toonId) == 0: self.notify.warning('timeout() - toon: %d not in toon list' % toonId) return self.toonAttacks[toonId] = getToonAttack(toonId) self.d_setChosenToonAttacks() self.responses[toonId] += 1 self.notify.debug('toon: %d timed out' % toonId) if self.__allActiveToonsResponded(): self.__requestMovie(timeout=1) def movieDone(self): toonId = self.air.getAvatarIdFromSender() if self.ignoreResponses == 1: self.notify.debug('movieDone() - ignoring toon: %d' % toonId) return elif self.fsm.getCurrentState().getName() != 'PlayMovie': self.notify.warning('movieDone() - in state %s' % self.fsm.getCurrentState().getName()) return elif self.toons.count(toonId) == 0: self.notify.warning('movieDone() - toon: %d not in toon list' % toonId) return self.responses[toonId] += 1 self.notify.debug('toon: %d done with movie' % toonId) if self.__allPendingActiveToonsResponded(): self.__movieDone() else: self.timer.stop() self.timer.startCallback(TIMEOUT_PER_USER, self.__serverMovieDone) def rewardDone(self): toonId = self.air.getAvatarIdFromSender() stateName = self.fsm.getCurrentState().getName() if self.ignoreResponses == 1: self.notify.debug('rewardDone() - ignoring toon: %d' % toonId) return elif stateName not in ('Reward', 'BuildingReward', 'FactoryReward', 'MintReward', 'StageReward', 'CountryClubReward'): self.notify.warning('rewardDone() - in state %s' % stateName) return elif self.toons.count(toonId) == 0: self.notify.warning('rewardDone() - toon: %d not in toon list' % toonId) return self.responses[toonId] += 1 self.notify.debug('toon: %d done with reward' % toonId) if self.__allActiveToonsResponded(): self.handleRewardDone() else: self.timer.stop() self.timer.startCallback(TIMEOUT_PER_USER, self.serverRewardDone) def assignRewards(self): if self.rewardHasPlayed == 1: self.notify.debug('handleRewardDone() - reward has already played') return self.rewardHasPlayed = 1 BattleExperienceAI.assignRewards(self.activeToons, self.battleCalc.toonSkillPtsGained, self.suitsKilled, self.getTaskZoneId(), self.helpfulToons) def joinDone(self, avId): toonId = self.air.getAvatarIdFromSender() if self.toons.count(toonId) == 0: self.notify.warning('joinDone() - toon: %d not in toon list' % toonId) return if avId not in self.joinResponses: self.notify.debug('joinDone() - no entry for: %d - ignoring: %d' % (avId, toonId)) return jr = self.joinResponses[avId] if toonId in jr: jr[toonId] += 1 self.notify.debug('client with localToon: %d done joining av: %d' % (toonId, avId)) if self.__allToonsRespondedJoin(avId): self.__makeAvPending(avId) def requestAttack(self, track, level, av): toonId = self.air.getAvatarIdFromSender() if self.ignoreResponses == 1: self.notify.debug('requestAttack() - ignoring toon: %d' % toonId) return elif self.fsm.getCurrentState().getName() != 'WaitForInput': self.notify.warning('requestAttack() - in state: %s' % self.fsm.getCurrentState().getName()) return elif self.activeToons.count(toonId) == 0: self.notify.warning('requestAttack() - toon: %d not in toon list' % toonId) return self.notify.debug('requestAttack(%d, %d, %d, %d)' % (toonId, track, level, av)) toon = self.getToon(toonId) if toon == None: self.notify.warning('requestAttack() - no toon: %d' % toonId) return validResponse = 1 if track == SOS: self.notify.debug('toon: %d calls for help' % toonId) self.air.writeServerEvent('friendSOS', toonId, '%s' % av) self.toonAttacks[toonId] = getToonAttack(toonId, track=SOS, target=av) elif track == NPCSOS: self.notify.debug('toon: %d calls for help' % toonId) self.air.writeServerEvent('NPCSOS', toonId, '%s' % av) toon = self.getToon(toonId) if toon == None: return if av in toon.NPCFriendsDict: npcCollision = 0 if av in self.npcAttacks: callingToon = self.npcAttacks[av] if self.activeToons.count(callingToon) == 1: self.toonAttacks[toonId] = getToonAttack(toonId, track=PASS) npcCollision = 1 if npcCollision == 0: self.toonAttacks[toonId] = getToonAttack(toonId, track=NPCSOS, level=5, target=av) self.numNPCAttacks += 1 self.npcAttacks[av] = toonId elif track == PETSOS: self.notify.debug('toon: %d calls for pet: %d' % (toonId, av)) self.air.writeServerEvent('PETSOS', toonId, '%s' % av) toon = self.getToon(toonId) if toon == None: return if not self.validate(toonId, level in toon.petTrickPhrases, 'requestAttack: invalid pet trickId: %s' % level): return self.toonAttacks[toonId] = getToonAttack(toonId, track=PETSOS, level=level, target=av) elif track == UN_ATTACK: self.notify.debug('toon: %d changed its mind' % toonId) self.toonAttacks[toonId] = getToonAttack(toonId, track=UN_ATTACK) if toonId in self.responses: self.responses[toonId] = 0 validResponse = 0 elif track == PASS: self.toonAttacks[toonId] = getToonAttack(toonId, track=PASS) elif track == FIRE: if simbase.air.doId2do[toonId].getPinkSlips() < self.getFireCount() + 1: #Not allowed to fire, force them to pass >:D self.toonAttacks[toonId] = getToonAttack(toonId, track=PASS) else: #Allowed to fire self.setFireCount(self.fireCount + 1) self.toonAttacks[toonId] = getToonAttack(toonId, track=FIRE, target=av) else: if not self.validate(toonId, track >= 0 and track <= MAX_TRACK_INDEX, 'requestAttack: invalid track %s' % track): return if not self.validate(toonId, level >= 0 and level <= MAX_LEVEL_INDEX, 'requestAttack: invalid level %s' % level): return if toon.inventory.numItem(track, level) == 0: self.notify.warning('requestAttack() - toon has no item track: %d level: %d' % (track, level)) self.toonAttacks[toonId] = getToonAttack(toonId) return if track == HEAL: if self.runningToons.count(av) == 1 or attackAffectsGroup(track, level) and len(self.activeToons) < 2: self.toonAttacks[toonId] = getToonAttack(toonId, track=UN_ATTACK) validResponse = 0 else: self.toonAttacks[toonId] = getToonAttack(toonId, track=track, level=level, target=av) else: self.toonAttacks[toonId] = getToonAttack(toonId, track=track, level=level, target=av) if av == -1 and not attackAffectsGroup(track, level): validResponse = 0 self.d_setChosenToonAttacks() if validResponse == 1: self.responses[toonId] += 1 self.notify.debug('toon: %d chose an attack' % toonId) if self.__allActiveToonsResponded(): self.__requestMovie() def requestPetProxy(self, av): toonId = self.air.getAvatarIdFromSender() if self.ignoreResponses == 1: self.notify.debug('requestPetProxy() - ignoring toon: %d' % toonId) return elif self.fsm.getCurrentState().getName() != 'WaitForInput': self.notify.warning('requestPetProxy() - in state: %s' % self.fsm.getCurrentState().getName()) return elif self.activeToons.count(toonId) == 0: self.notify.warning('requestPetProxy() - toon: %d not in toon list' % toonId) return self.notify.debug('requestPetProxy(%s, %s)' % (toonId, av)) toon = self.getToon(toonId) if toon == None: self.notify.warning('requestPetProxy() - no toon: %d' % toonId) return petId = toon.getPetId() zoneId = self.zoneId if petId == av: if not toonId in self.pets: def handleGetPetProxy(success, pet, petId = petId, zoneId = zoneId, toonId = toonId): if success: petProxy = DistributedPetProxyAI.DistributedPetProxyAI(self.air) petProxy.setOwnerId(pet.getOwnerId()) petProxy.setPetName(pet.getPetName()) petProxy.setTraitSeed(pet.getTraitSeed()) petProxy.setSafeZone(pet.getSafeZone()) petProxy.setForgetfulness(pet.getForgetfulness()) petProxy.setBoredomThreshold(pet.getBoredomThreshold()) petProxy.setRestlessnessThreshold(pet.getRestlessnessThreshold()) petProxy.setPlayfulnessThreshold(pet.getPlayfulnessThreshold()) petProxy.setLonelinessThreshold(pet.getLonelinessThreshold()) petProxy.setSadnessThreshold(pet.getSadnessThreshold()) petProxy.setFatigueThreshold(pet.getFatigueThreshold()) petProxy.setHungerThreshold(pet.getHungerThreshold()) petProxy.setConfusionThreshold(pet.getConfusionThreshold()) petProxy.setExcitementThreshold(pet.getExcitementThreshold()) petProxy.setAngerThreshold(pet.getAngerThreshold()) petProxy.setSurpriseThreshold(pet.getSurpriseThreshold()) petProxy.setAffectionThreshold(pet.getAffectionThreshold()) petProxy.setHead(pet.getHead()) petProxy.setEars(pet.getEars()) petProxy.setNose(pet.getNose()) petProxy.setTail(pet.getTail()) petProxy.setBodyTexture(pet.getBodyTexture()) petProxy.setColor(pet.getColor()) petProxy.setColorScale(pet.getColorScale()) petProxy.setEyeColor(pet.getEyeColor()) petProxy.setGender(pet.getGender()) petProxy.setLastSeenTimestamp(pet.getLastSeenTimestamp()) petProxy.setBoredom(pet.getBoredom()) petProxy.setRestlessness(pet.getRestlessness()) petProxy.setPlayfulness(pet.getPlayfulness()) petProxy.setLoneliness(pet.getLoneliness()) petProxy.setSadness(pet.getSadness()) petProxy.setAffection(pet.getAffection()) petProxy.setHunger(pet.getHunger()) petProxy.setConfusion(pet.getConfusion()) petProxy.setExcitement(pet.getExcitement()) petProxy.setFatigue(pet.getFatigue()) petProxy.setAnger(pet.getAnger()) petProxy.setSurprise(pet.getSurprise()) petProxy.setTrickAptitudes(pet.getTrickAptitudes()) pet.requestDelete() def deleted(task): petProxy.doNotDeallocateChannel = True petProxy.generateWithRequiredAndId(petId, self.air.districtId, self.zoneId) petProxy.broadcastDominantMood() self.pets[toonId] = petProxy return task.done self.acceptOnce(self.air.getAvatarExitEvent(petId), lambda: taskMgr.doMethodLater(0, deleted, self.uniqueName('petdel-%d' % petId))) else: self.notify.warning('error generating petProxy: %s' % petId) self.getPetProxyObject(petId, handleGetPetProxy) def suitCanJoin(self): return len(self.suits) < self.maxSuits and self.isJoinable() def toonCanJoin(self): return len(self.toons) < 4 and self.isJoinable() def __requestMovie(self, timeout = 0): if self.adjustFsm.getCurrentState().getName() == 'Adjusting': self.notify.debug('__requestMovie() - in Adjusting') self.movieRequested = 1 else: movieDelay = 0 if len(self.activeToons) == 0: self.notify.warning('only pending toons left in battle %s, toons = %s' % (self.doId, self.toons)) elif len(self.activeSuits) == 0: self.notify.warning('only pending suits left in battle %s, suits = %s' % (self.doId, self.suits)) elif len(self.activeToons) > 1 and not timeout: movieDelay = 1 self.fsm.request('MakeMovie') if movieDelay: taskMgr.doMethodLater(0.8, self.__makeMovie, self.uniqueName('make-movie')) self.taskNames.append(self.uniqueName('make-movie')) else: self.__makeMovie() def __makeMovie(self, task = None): self.notify.debug('makeMovie()') if self._DOAI_requestedDelete: self.notify.warning('battle %s requested delete, then __makeMovie was called!' % self.doId) if hasattr(self, 'levelDoId'): self.notify.warning('battle %s in level %s' % (self.doId, self.levelDoId)) return self.__removeTaskName(self.uniqueName('make-movie')) if self.movieHasBeenMade == 1: self.notify.debug('__makeMovie() - movie has already been made') return self.movieRequested = 0 self.movieHasBeenMade = 1 self.movieHasPlayed = 0 self.rewardHasPlayed = 0 for t in self.activeToons: if t not in self.toonAttacks: self.toonAttacks[t] = getToonAttack(t) attack = self.toonAttacks[t] if attack[TOON_TRACK_COL] == PASS or attack[TOON_TRACK_COL] == UN_ATTACK: self.toonAttacks[t] = getToonAttack(t) if self.toonAttacks[t][TOON_TRACK_COL] != NO_ATTACK: self.addHelpfulToon(t) self.battleCalc.calculateRound() for t in self.activeToons: self.sendEarnedExperience(t) toon = self.getToon(t) if toon != None: toon.hpOwnedByBattle = 1 if toon.immortalMode: toon.toonUp(toon.maxHp) self.d_setMovie() self.b_setState('PlayMovie') return Task.done def sendEarnedExperience(self, toonId): toon = self.getToon(toonId) if toon != None: expList = self.battleCalc.toonSkillPtsGained.get(toonId, None) if expList == None: toon.d_setEarnedExperience([]) else: roundList = [] for exp in expList: roundList.append(int(exp + 0.5)) toon.d_setEarnedExperience(roundList) def enterOff(self): return def exitOff(self): return def enterFaceOff(self): return def exitFaceOff(self): return def enterWaitForJoin(self): self.notify.debug('enterWaitForJoin()') if len(self.activeSuits) > 0: self.b_setState('WaitForInput') else: self.notify.debug('enterWaitForJoin() - no active suits') self.runableFsm.request('Runable') self.resetResponses() self.__requestAdjust() def exitWaitForJoin(self): pass def enterWaitForInput(self): self.notify.debug('enterWaitForInput()') self.joinableFsm.request('Joinable') self.runableFsm.request('Runable') self.resetResponses() self.__requestAdjust() if not self.tutorialFlag: self.timer.startCallback(SERVER_INPUT_TIMEOUT, self.__serverTimedOut) self.npcAttacks = {} for toonId in self.toons: if bboard.get('autoRestock-%s' % toonId, False): toon = self.air.doId2do.get(toonId) if toon is not None: toon.doRestock(0) def exitWaitForInput(self): self.npcAttacks = {} self.timer.stop() def __serverTimedOut(self): self.notify.debug('wait for input timed out on server') self.ignoreResponses = 1 self.__requestMovie(timeout=1) def enterMakeMovie(self): self.notify.debug('enterMakeMovie()') self.runableFsm.request('Unrunable') self.resetResponses() def exitMakeMovie(self): pass def enterPlayMovie(self): self.notify.debug('enterPlayMovie()') self.joinableFsm.request('Joinable') self.runableFsm.request('Unrunable') self.resetResponses() movieTime = TOON_ATTACK_TIME * (len(self.activeToons) + self.numNPCAttacks) + SUIT_ATTACK_TIME * len(self.activeSuits) + SERVER_BUFFER_TIME self.numNPCAttacks = 0 self.notify.debug('estimated upper bound of movie time: %f' % movieTime) self.timer.startCallback(movieTime, self.__serverMovieDone) def __serverMovieDone(self): self.notify.debug('movie timed out on server') self.ignoreResponses = 1 self.__movieDone() def serverRewardDone(self): self.notify.debug('reward timed out on server') self.ignoreResponses = 1 self.handleRewardDone() def handleRewardDone(self): self.b_setState('Resume') def exitPlayMovie(self): self.timer.stop() def __movieDone(self): self.notify.debug('__movieDone() - movie is finished') if self.movieHasPlayed == 1: self.notify.debug('__movieDone() - movie had already finished') return self.movieHasBeenMade = 0 self.movieHasPlayed = 1 self.ignoreResponses = 1 needUpdate = 0 toonHpDict = {} for toon in self.activeToons: toonHpDict[toon] = [0, 0, 0] actualToon = self.getToon(toon) self.notify.debug('BEFORE ROUND: toon: %d hp: %d' % (toon, actualToon.hp)) deadSuits = [] trapDict = {} suitsLuredOntoTraps = [] npcTrapAttacks = [] for activeToon in self.activeToons + self.exitedToons: if activeToon in self.toonAttacks: attack = self.toonAttacks[activeToon] track = attack[TOON_TRACK_COL] npc_level = None if track == NPCSOS: track, npc_level, npc_hp = NPCToons.getNPCTrackLevelHp(attack[TOON_TGT_COL]) if track == None: track = NPCSOS elif track == TRAP: npcTrapAttacks.append(attack) toon = self.getToon(attack[TOON_ID_COL]) av = attack[TOON_TGT_COL] if toon != None and av in toon.NPCFriendsDict: toon.NPCFriendsDict[av] -= 1 if toon.NPCFriendsDict[av] <= 0: del toon.NPCFriendsDict[av] toon.d_setNPCFriendsDict(toon.NPCFriendsDict) continue if track != NO_ATTACK: toonId = attack[TOON_ID_COL] level = attack[TOON_LVL_COL] if npc_level != None: level = npc_level if attack[TOON_TRACK_COL] == NPCSOS: toon = self.getToon(toonId) av = attack[TOON_TGT_COL] if toon != None and av in toon.NPCFriendsDict: toon.NPCFriendsDict[av] -= 1 if toon.NPCFriendsDict[av] <= 0: del toon.NPCFriendsDict[av] toon.d_setNPCFriendsDict(toon.NPCFriendsDict) elif track == PETSOS: pass elif track == FIRE: pass elif track != SOS: toon = self.getToon(toonId) if toon != None: check = toon.inventory.useItem(track, level) if check == -1: self.air.writeServerEvent('suspicious', toonId, 'Toon generating movie for non-existant gag track %s level %s' % (track, level)) self.notify.warning('generating movie for non-existant gag track %s level %s! avId: %s' % (track, level, toonId)) toon.d_setInventory(toon.inventory.makeNetString()) hps = attack[TOON_HP_COL] if track == SOS: self.notify.debug('toon: %d called for help' % toonId) elif track == NPCSOS: self.notify.debug('toon: %d called for help' % toonId) elif track == PETSOS: self.notify.debug('toon: %d called for pet' % toonId) for i in range(len(self.activeToons)): toon = self.getToon(self.activeToons[i]) if toon != None: if i < len(hps): hp = hps[i] if hp > 0: toonHpDict[toon.doId][0] += hp self.notify.debug('pet heal: toon: %d healed for hp: %d' % (toon.doId, hp)) else: self.notify.warning('Invalid targetIndex %s in hps %s.' % (i, hps)) elif track == NPC_RESTOCK_GAGS: for at in self.activeToons: toon = self.getToon(at) if toon != None: toon.inventory.NPCMaxOutInv(npc_level) toon.d_setInventory(toon.inventory.makeNetString()) elif track == HEAL: if levelAffectsGroup(HEAL, level): for i in range(len(self.activeToons)): at = self.activeToons[i] if at != toonId or attack[TOON_TRACK_COL] == NPCSOS: toon = self.getToon(at) if toon != None: if i < len(hps): hp = hps[i] else: self.notify.warning('Invalid targetIndex %s in hps %s.' % (i, hps)) hp = 0 toonHpDict[toon.doId][0] += hp self.notify.debug('HEAL: toon: %d healed for hp: %d' % (toon.doId, hp)) else: targetId = attack[TOON_TGT_COL] toon = self.getToon(targetId) if toon != None and targetId in self.activeToons: targetIndex = self.activeToons.index(targetId) if targetIndex < len(hps): hp = hps[targetIndex] else: self.notify.warning('Invalid targetIndex %s in hps %s.' % (targetIndex, hps)) hp = 0 toonHpDict[toon.doId][0] += hp elif attackAffectsGroup(track, level, attack[TOON_TRACK_COL]): for suit in self.activeSuits: targetIndex = self.activeSuits.index(suit) if targetIndex < 0 or targetIndex >= len(hps): self.notify.warning('Got attack (%s, %s) on target suit %s, but hps has only %s entries: %s' % (track, level, targetIndex, len(hps), hps)) else: hp = hps[targetIndex] if hp > 0 and track == LURE: if suit.battleTrap == UBER_GAG_LEVEL_INDEX: pass suit.battleTrap = NO_TRAP needUpdate = 1 if suit.doId in trapDict: del trapDict[suit.doId] if suitsLuredOntoTraps.count(suit) == 0: suitsLuredOntoTraps.append(suit) if track == TRAP: targetId = suit.doId if targetId in trapDict: trapDict[targetId].append(attack) else: trapDict[targetId] = [attack] needUpdate = 1 died = attack[SUIT_DIED_COL] & 1 << targetIndex if died != 0: if deadSuits.count(suit) == 0: deadSuits.append(suit) else: targetId = attack[TOON_TGT_COL] target = self.findSuit(targetId) if target != None: targetIndex = self.activeSuits.index(target) if targetIndex < 0 or targetIndex >= len(hps): self.notify.warning('Got attack (%s, %s) on target suit %s, but hps has only %s entries: %s' % (track, level, targetIndex, len(hps), hps)) else: hp = hps[targetIndex] if track == TRAP: if targetId in trapDict: trapDict[targetId].append(attack) else: trapDict[targetId] = [attack] if hp > 0 and track == LURE: oldBattleTrap = target.battleTrap if oldBattleTrap == UBER_GAG_LEVEL_INDEX: pass target.battleTrap = NO_TRAP needUpdate = 1 if target.doId in trapDict: del trapDict[target.doId] if suitsLuredOntoTraps.count(target) == 0: suitsLuredOntoTraps.append(target) if oldBattleTrap == UBER_GAG_LEVEL_INDEX: for otherSuit in self.activeSuits: if not otherSuit == target: otherSuit.battleTrap = NO_TRAP if otherSuit.doId in trapDict: del trapDict[otherSuit.doId] died = attack[SUIT_DIED_COL] & 1 << targetIndex if died != 0: if deadSuits.count(target) == 0: deadSuits.append(target) self.exitedToons = [] for suitKey in trapDict.keys(): attackList = trapDict[suitKey] attack = attackList[0] target = self.findSuit(attack[TOON_TGT_COL]) if attack[TOON_LVL_COL] == UBER_GAG_LEVEL_INDEX: targetId = suitKey target = self.findSuit(targetId) if len(attackList) == 1: if suitsLuredOntoTraps.count(target) == 0: self.notify.debug('movieDone() - trap set') target.battleTrap = attack[TOON_LVL_COL] needUpdate = 1 else: target.battleTrap = NO_TRAP else: self.notify.debug('movieDone() - traps collided') if target != None: target.battleTrap = NO_TRAP if self.battleCalc.trainTrapTriggered: self.notify.debug('Train trap triggered, clearing all traps') for otherSuit in self.activeSuits: self.notify.debug('suit =%d, oldBattleTrap=%d' % (otherSuit.doId, otherSuit.battleTrap)) otherSuit.battleTrap = NO_TRAP currLuredSuits = self.battleCalc.getLuredSuits() if len(self.luredSuits) == len(currLuredSuits): for suit in self.luredSuits: if currLuredSuits.count(suit.doId) == 0: needUpdate = 1 break else: needUpdate = 1 self.luredSuits = [] for i in currLuredSuits: suit = self.air.doId2do[i] self.luredSuits.append(suit) self.notify.debug('movieDone() - suit: %d is lured' % i) for attack in npcTrapAttacks: track, level, hp = NPCToons.getNPCTrackLevelHp(attack[TOON_TGT_COL]) for suit in self.activeSuits: if self.luredSuits.count(suit) == 0 and suit.battleTrap == NO_TRAP: suit.battleTrap = level needUpdate = 1 for suit in deadSuits: self.notify.debug('removing dead suit: %d' % suit.doId) if suit.isDeleted(): self.notify.debug('whoops, suit %d is deleted.' % suit.doId) else: self.notify.debug('suit had revives? %d' % suit.getMaxSkeleRevives()) encounter = {'type': suit.dna.name, 'level': suit.getActualLevel(), 'track': suit.dna.dept, 'isSkelecog': suit.getSkelecog(), 'isForeman': suit.isForeman(), 'isVP': 0, 'isCFO': 0, 'isSupervisor': suit.isSupervisor(), 'isVirtual': suit.isVirtual(), 'hasRevives': suit.getMaxSkeleRevives(), 'activeToons': self.activeToons[:]} self.suitsKilled.append(encounter) self.suitsKilledThisBattle.append(encounter) self.air.suitInvasionManager.handleSuitDefeated() self.__removeSuit(suit) needUpdate = 1 suit.resume() lastActiveSuitDied = 0 if len(self.activeSuits) == 0 and len(self.pendingSuits) == 0: lastActiveSuitDied = 1 for i in range(4): attack = self.suitAttacks[i][SUIT_ATK_COL] if attack != NO_ATTACK: suitId = self.suitAttacks[i][SUIT_ID_COL] suit = self.findSuit(suitId) if suit == None: self.notify.warning('movieDone() - suit: %d is gone!' % suitId) continue if not (hasattr(suit, 'dna') and suit.dna): toonId = self.air.getAvatarIdFromSender() self.notify.warning('_movieDone avoiding crash, sender=%s but suit has no dna' % toonId) self.air.writeServerEvent('suspicious', toonId, '_movieDone avoiding crash, suit has no dna') continue adict = getSuitAttack(suit.getStyleName(), suit.getLevel(), attack) hps = self.suitAttacks[i][SUIT_HP_COL] if adict['group'] == ATK_TGT_GROUP: for activeToon in self.activeToons: toon = self.getToon(activeToon) if toon != None: targetIndex = self.activeToons.index(activeToon) toonDied = self.suitAttacks[i][TOON_DIED_COL] & 1 << targetIndex if targetIndex >= len(hps): self.notify.warning('DAMAGE: toon %s is no longer in battle!' % activeToon) else: hp = hps[targetIndex] if hp > 0: self.notify.debug('DAMAGE: toon: %d hit for dmg: %d' % (activeToon, hp)) if toonDied != 0: toonHpDict[toon.doId][2] = 1 toonHpDict[toon.doId][1] += hp elif adict['group'] == ATK_TGT_SINGLE: targetIndex = self.suitAttacks[i][SUIT_TGT_COL] if targetIndex >= len(self.activeToons): self.notify.warning('movieDone() - toon: %d gone!' % targetIndex) break toonId = self.activeToons[targetIndex] toon = self.getToon(toonId) toonDied = self.suitAttacks[i][TOON_DIED_COL] & 1 << targetIndex if targetIndex >= len(hps): self.notify.warning('DAMAGE: toon %s is no longer in battle!' % toonId) else: hp = hps[targetIndex] if hp > 0: self.notify.debug('DAMAGE: toon: %d hit for dmg: %d' % (toonId, hp)) if toonDied != 0: toonHpDict[toon.doId][2] = 1 toonHpDict[toon.doId][1] += hp deadToons = [] for activeToon in self.activeToons: hp = toonHpDict[activeToon] toon = self.getToon(activeToon) if toon != None: self.notify.debug('AFTER ROUND: currtoonHP: %d toonMAX: %d hheal: %d damage: %d' % (toon.hp, toon.maxHp, hp[0], hp[1])) toon.hpOwnedByBattle = 0 hpDelta = hp[0] - hp[1] if hpDelta >= 0: toon.toonUp(hpDelta, quietly=1) else: toon.takeDamage(-hpDelta, quietly=1) if toon.hp <= 0: self.notify.debug('movieDone() - toon: %d was killed' % activeToon) toon.inventory.zeroInv(1) deadToons.append(activeToon) self.notify.debug('AFTER ROUND: toon: %d setHp: %d' % (toon.doId, toon.hp)) if toon.unlimitedGags: toon.doRestock(noUber=0, noPaid=0) for deadToon in deadToons: self.__removeToon(deadToon) needUpdate = 1 self.clearAttacks() self.d_setMovie() self.d_setChosenToonAttacks() self.localMovieDone(needUpdate, deadToons, deadSuits, lastActiveSuitDied) def enterResume(self): for suit in self.suits: self.notify.info('battle done, resuming suit: %d' % suit.doId) if suit.isDeleted(): self.notify.info('whoops, suit %d is deleted.' % suit.doId) else: suit.resume() self.suits = [] self.joiningSuits = [] self.pendingSuits = [] self.adjustingSuits = [] self.activeSuits = [] self.luredSuits = [] for toonId in self.toons: toon = simbase.air.doId2do.get(toonId) if toon: toon.b_setBattleId(0) messageToonReleased = 'Battle releasing toon %s' % toon.doId messenger.send(messageToonReleased, [toon.doId]) for exitEvent in self.avatarExitEvents: self.ignore(exitEvent) eventMsg = {} for encounter in self.suitsKilledThisBattle: cog = encounter['type'] level = encounter['level'] msgName = '%s%s' % (cog, level) if encounter['isSkelecog']: msgName += '+' if msgName in eventMsg: eventMsg[msgName] += 1 else: eventMsg[msgName] = 1 msgText = '' for msgName, count in eventMsg.items(): if msgText != '': msgText += ',' msgText += '%s%s' % (count, msgName) self.air.writeServerEvent('battleCogsDefeated', self.doId, '%s|%s' % (msgText, self.getTaskZoneId())) def exitResume(self): pass def isJoinable(self): return self.joinableFsm.getCurrentState().getName() == 'Joinable' def enterJoinable(self): self.notify.debug('enterJoinable()') def exitJoinable(self): pass def enterUnjoinable(self): self.notify.debug('enterUnjoinable()') def exitUnjoinable(self): pass def isRunable(self): return self.runableFsm.getCurrentState().getName() == 'Runable' def enterRunable(self): self.notify.debug('enterRunable()') def exitRunable(self): pass def enterUnrunable(self): self.notify.debug('enterUnrunable()') def exitUnrunable(self): pass def __estimateAdjustTime(self): self.needAdjust = 0 adjustTime = 0 if len(self.pendingSuits) > 0 or self.suitGone == 1: self.suitGone = 0 pos0 = self.suitPendingPoints[0][0] pos1 = self.suitPoints[0][0][0] adjustTime = self.calcSuitMoveTime(pos0, pos1) if len(self.pendingToons) > 0 or self.toonGone == 1: self.toonGone = 0 if adjustTime == 0: pos0 = self.toonPendingPoints[0][0] pos1 = self.toonPoints[0][0][0] adjustTime = self.calcToonMoveTime(pos0, pos1) return adjustTime def enterAdjusting(self): self.notify.debug('enterAdjusting()') self.timer.stop() self.__resetAdjustingResponses() self.adjustingTimer.startCallback(self.__estimateAdjustTime() + SERVER_BUFFER_TIME, self.__serverAdjustingDone) def __serverAdjustingDone(self): if self.needAdjust == 1: self.adjustFsm.request('NotAdjusting') self.__requestAdjust() else: self.notify.debug('adjusting timed out on the server') self.ignoreAdjustingResponses = 1 self.__adjustDone() def exitAdjusting(self): currStateName = self.fsm.getCurrentState().getName() if currStateName == 'WaitForInput': self.timer.restart() elif currStateName == 'WaitForJoin': self.b_setState('WaitForInput') self.adjustingTimer.stop() def __addTrainTrapForNewSuits(self): hasTrainTrap = False trapInfo = None for otherSuit in self.activeSuits: if otherSuit.battleTrap == UBER_GAG_LEVEL_INDEX: hasTrainTrap = True if hasTrainTrap: for curSuit in self.activeSuits: if not curSuit.battleTrap == UBER_GAG_LEVEL_INDEX: oldBattleTrap = curSuit.battleTrap curSuit.battleTrap = UBER_GAG_LEVEL_INDEX self.battleCalc.addTrainTrapForJoiningSuit(curSuit.doId) self.notify.debug('setting traintrack trap for joining suit %d oldTrap=%s' % (curSuit.doId, oldBattleTrap)) def __adjustDone(self): for s in self.adjustingSuits: self.pendingSuits.remove(s) self.activeSuits.append(s) self.adjustingSuits = [] for toon in self.adjustingToons: if self.pendingToons.count(toon) == 1: self.pendingToons.remove(toon) else: self.notify.warning('adjustDone() - toon: %d not pending!' % toon.doId) if self.activeToons.count(toon) == 0: self.activeToons.append(toon) self.ignoreResponses = 0 self.sendEarnedExperience(toon) else: self.notify.warning('adjustDone() - toon: %d already active!' % toon.doId) self.adjustingToons = [] self.__addTrainTrapForNewSuits() self.d_setMembers() self.adjustFsm.request('NotAdjusting') if self.needAdjust == 1: self.notify.debug('__adjustDone() - need to adjust again') self.__requestAdjust() def enterNotAdjusting(self): self.notify.debug('enterNotAdjusting()') if self.movieRequested == 1: if len(self.activeToons) > 0 and self.__allActiveToonsResponded(): self.__requestMovie() def exitNotAdjusting(self): pass def getPetProxyObject(self, petId, callback): doneEvent = 'generate-%d' % petId def handlePetProxyRead(pet): callback(1, pet) self.air.sendActivate(petId, self.air.districtId, 0) self.acceptOnce(doneEvent, handlePetProxyRead) def _getNextSerialNum(self): num = self.serialNum self.serialNum += 1 return num def setFireCount(self, amount): self.fireCount = amount def getFireCount(self): return self.fireCount @magicWord(category=CATEGORY_PROGRAMMER) def skipMovie(): invoker = spellbook.getInvoker() battleId = invoker.getBattleId() if not battleId: return 'You are not currently in a battle!' battle = simbase.air.doId2do.get(battleId) battle._DistributedBattleBaseAI__movieDone() return 'Battle movie skipped.'
42.221267
279
0.551567
77,428
0.984889
0
0
335
0.004261
0
0
7,751
0.098593
719475f300d53be54d446d8d9cab1b9a95946543
371
py
Python
tracking_test.py
HsunGong/Augmented-Advertisement
ae9d0f5796c13e837a1a547d888647aeb61f0b04
[ "MIT" ]
5
2020-07-10T03:16:24.000Z
2022-01-14T01:12:23.000Z
tracking_test.py
HsunGong/Augmented-Advertisement
ae9d0f5796c13e837a1a547d888647aeb61f0b04
[ "MIT" ]
4
2021-08-25T16:13:24.000Z
2022-02-10T03:34:06.000Z
tracking_test.py
HsunGong/Augmented-Advertisement
ae9d0f5796c13e837a1a547d888647aeb61f0b04
[ "MIT" ]
1
2021-10-22T02:53:39.000Z
2021-10-22T02:53:39.000Z
# Copyright (c) Group Three-Forest SJTU. All Rights Reserved. from tracking.tracking import * # a = tracking_video_rectangle("video/","1.mp4",[[273,352],[266,616],[412,620],[416,369]]) a = tracking_video_rectangle_tovideo("video/","1.mp4", "1.png", [[273,352],[266,616],[412,620],[416,369]], result = 'result__.avi', method_num = 5, edge = 4, middle_halt = 250)
53
177
0.668464
0
0
0
0
0
0
0
0
190
0.512129
7195924eb07d641386ea892a9ee9a4835feb2275
11,102
py
Python
gym_flock/envs/old/flocking_position.py
katetolstaya/gym-flock
3236d1dafcb1b9be0cf78b471672e8becb2d37af
[ "MIT" ]
19
2019-07-29T22:19:58.000Z
2022-01-27T04:38:38.000Z
gym_flock/envs/old/flocking_position.py
henghenghahei849/gym-flock
b09bdfbbe4a96fe052958d1f9e1e9dd314f58419
[ "MIT" ]
null
null
null
gym_flock/envs/old/flocking_position.py
henghenghahei849/gym-flock
b09bdfbbe4a96fe052958d1f9e1e9dd314f58419
[ "MIT" ]
5
2019-10-03T14:44:49.000Z
2021-12-09T20:39:39.000Z
import gym from gym import spaces, error, utils from gym.utils import seeding import numpy as np from scipy.spatial.distance import pdist, squareform import configparser from os import path import matplotlib.pyplot as plt from matplotlib.pyplot import gca font = {'family' : 'sans-serif', 'weight' : 'bold', 'size' : 14} class FlockingEnv(gym.Env): def __init__(self): config_file = path.join(path.dirname(__file__), "params_flock.cfg") config = configparser.ConfigParser() config.read(config_file) config = config['flock'] self.fig = None self.line1 = None self.filter_len = int(config['filter_length']) self.nx_system = 4 self.n_nodes = int(config['network_size']) self.comm_radius = float(config['comm_radius']) self.dt = float(config['system_dt']) self.v_max = float(config['max_vel_init']) self.v_bias = self.v_max # 0.5 * self.v_max self.r_max = float(config['max_rad_init']) self.std_dev = float(config['std_dev']) * self.dt self.pooling = [] if config.getboolean('sum_pooling'): self.pooling.append(np.nansum) if config.getboolean('min_pooling'): self.pooling.append(np.nanmin) if config.getboolean('max_pooling'): self.pooling.append(np.nanmax) self.n_pools = len(self.pooling) # number of features and outputs self.n_features = int(config['N_features']) self.nx = int(self.n_features / self.n_pools / self.filter_len) self.nu = int(config['N_outputs']) # outputs self.x_agg = np.zeros((self.n_nodes, self.nx * self.filter_len, self.n_pools)) self.x = np.zeros((self.n_nodes, self.nx_system)) self.u = np.zeros((self.n_nodes, self.nu)) self.mean_vel = np.zeros((self.n_nodes, self.nu)) # TODO self.max_accel = 40 self.max_z = 200 # self.b = np.ones((self.n_nodes,1)) # self.action_space = spaces.Box(low=-self.max_accel, high=self.max_accel, shape=(self.n_nodes, 2), dtype=np.float32 ) # self.observation_space = spaces.Box(low=-self.max_z, high=self.max_z, shape=( # self.n_nodes, self.nx * self.filter_len * self.n_pools) , dtype=np.float32) self.action_space = spaces.Box(low=-self.max_accel, high=self.max_accel, shape=(2,) , dtype=np.float32 ) self.observation_space = spaces.Box(low=-self.max_z, high=self.max_z, shape=(self.n_features, ), dtype=np.float32) self.seed() def render(self, mode='human'): if self.fig is None: plt.ion() fig = plt.figure() ax = fig.add_subplot(111) line1, = ax.plot(self.x[:, 0], self.x[:, 1], 'bo') # Returns a tuple of line objects, thus the comma ax.plot([0], [0], 'kx') plt.ylim(-1.0 * self.r_max, 1.0 * self.r_max) plt.xlim(-1.0 * self.r_max, 1.0 * self.r_max) a = gca() a.set_xticklabels(a.get_xticks(), font) a.set_yticklabels(a.get_yticks(), font) plt.title('GNN Controller') self.fig = fig self.line1 = line1 self.line1.set_xdata(self.x[:, 0]) self.line1.set_ydata(self.x[:, 1]) self.fig.canvas.draw() self.fig.canvas.flush_events() def seed(self, seed=None): self.np_random, seed = seeding.np_random(seed) return [seed] def step(self, u): x = self.x x_ = np.zeros((self.n_nodes, self.nx_system)) #u = np.vstack((np.zeros((self.n_leaders, 2)), u)) # x position x_[:, 0] = x[:, 0] + x[:, 2] * self.dt # y position x_[:, 1] = x[:, 1] + x[:, 3] * self.dt # x velocity x_[:, 2] = x[:, 2] + 0.1 * u[:, 0] * self.dt + np.random.normal(0, self.std_dev,(self.n_nodes,)) # y velocity x_[:, 3] = x[:, 3] + 0.1 * u[:, 1] * self.dt + np.random.normal(0, self.std_dev,(self.n_nodes,)) # TODO - check the 0.1 self.x = x_ self.x_agg = self.aggregate(self.x, self.x_agg) self.u = u return self._get_obs(), -self.instant_cost(), False, {} def instant_cost(self): # sum of differences in velocities return np.sum(np.var(self.x[:, 2:4], axis=0)) #+ np.sum(np.square(self.u)) * 0.00001 #return np.sum(np.square(self.x[:,2:4] - self.mean_vel)) def _get_obs(self): reshaped = self.x_agg.reshape((self.n_nodes, self.n_features)) clipped = np.clip(reshaped, a_min=-self.max_z, a_max=self.max_z) return clipped #[self.n_leaders:, :] def reset(self): x = np.zeros((self.n_nodes, self.nx_system)) degree = 0 min_dist = 0 while degree < 2 or min_dist < 0.1: # < 0.25: # 0.25: #0.5: #min_dist < 0.25: # randomly initialize the state of all agents length = np.sqrt(np.random.uniform(0, self.r_max, size=(self.n_nodes,))) angle = np.pi * np.random.uniform(0, 2, size=(self.n_nodes,)) x[:, 0] = length * np.cos(angle) x[:, 1] = length * np.sin(angle) bias = np.random.uniform(low=-self.v_bias, high=self.v_bias, size=(2,)) x[:, 2] = np.random.uniform(low=-self.v_max, high=self.v_max, size=(self.n_nodes,)) + bias[0] x[:, 3] = np.random.uniform(low=-self.v_max, high=self.v_max, size=(self.n_nodes,)) + bias[1] # compute distances between agents x_t_loc = x[:, 0:2] # x,y location determines connectivity a_net = squareform(pdist(x_t_loc.reshape((self.n_nodes, 2)), 'euclidean')) # no self loops a_net = a_net + 2 * self.comm_radius * np.eye(self.n_nodes) # compute minimum distance between agents and degree of network min_dist = np.min(np.min(a_net)) a_net = a_net < self.comm_radius degree = np.min(np.sum(a_net.astype(int), axis=1)) self.mean_vel = np.mean(x[:,2:4],axis=0) self.x = x self.x_agg = np.zeros((self.n_nodes, self.nx * self.filter_len, self.n_pools)) self.x_agg = self.aggregate(self.x, self.x_agg) return self._get_obs() # def render(self, mode='human'): # pass def close(self): pass def aggregate(self, xt, x_agg): """ Perform aggegration operation for all possible pooling operations using helper functions get_pool and get_comms Args: x_agg (): Last time step's aggregated info xt (): Current state of all agents Returns: Aggregated state values """ x_features = self.get_x_features(xt) a_net = self.get_connectivity(xt) for k in range(0, self.n_pools): comm_data = self.get_comms(np.dstack((x_features, self.get_features(x_agg[:, :, k]))), a_net) x_agg[:, :, k] = self.get_pool(comm_data, self.pooling[k]) return x_agg def get_connectivity(self, x): """ Get the adjacency matrix of the network based on agent locations by computing pairwise distances using pdist Args: x (): current states of all agents Returns: adjacency matrix of network """ x_t_loc = x[:, 0:2] # x,y location determines connectivity a_net = squareform(pdist(x_t_loc.reshape((self.n_nodes, 2)), 'euclidean')) a_net = (a_net < self.comm_radius).astype(float) np.fill_diagonal(a_net, 0) return a_net def get_x_features(self, xt): # TODO """ Compute the non-linear features necessary for implementing Turner 2003 Args: xt (): current state of all agents Returns: matrix of features for each agent """ diff = xt.reshape((self.n_nodes, 1, self.nx_system)) - xt.reshape((1, self.n_nodes, self.nx_system)) r2 = np.multiply(diff[:, :, 0], diff[:, :, 0]) + np.multiply(diff[:, :, 1], diff[:, :, 1]) + np.eye( self.n_nodes) return np.dstack((diff[:, :, 2], np.divide(diff[:, :, 0], np.multiply(r2, r2)), np.divide(diff[:, :, 0], r2), diff[:, :, 3], np.divide(diff[:, :, 1], np.multiply(r2, r2)), np.divide(diff[:, :, 1], r2))) def get_features(self, agg): """ Matrix of Args: agg (): the aggregated matrix from the last time step Returns: matrix of aggregated features from all nodes at current time """ return np.tile(agg[:, :-self.nx].reshape((self.n_nodes, 1, -1)), (1, self.n_nodes, 1)) # TODO check indexing def get_comms(self, mat, a_net): """ Enforces that agents who are not connected in the network cannot observe each others' states Args: mat (): matrix of state information for the whole graph a_net (): adjacency matrix for flock network (weighted networks unsupported for now) Returns: mat (): sparse matrix with NaN values where agents can't communicate """ a_net[a_net == 0] = np.nan return mat * a_net.reshape(self.n_nodes, self.n_nodes, 1) def get_pool(self, mat, func): """ Perform pooling operations on the matrix of state information. The replacement of values with NaNs for agents who can't communicate must already be enforced. Args: mat (): matrix of state information func (): pooling function (np.nansum(), np.nanmin() or np.nanmax()). Must ignore NaNs. Returns: information pooled from neighbors for each agent """ return func(mat, axis=1).reshape((self.n_nodes, self.n_features)) # TODO check this axis = 1 def controller(self): """ The controller for flocking from Turner 2003. Args: x (): the current state Returns: the optimal action """ x = self.x s_diff = x.reshape((self.n_nodes, 1, self.nx_system)) - x.reshape((1, self.n_nodes, self.nx_system)) r2 = np.multiply(s_diff[:, :, 0], s_diff[:, :, 0]) + np.multiply(s_diff[:, :, 1], s_diff[:, :, 1]) + np.eye( self.n_nodes) p = np.dstack((s_diff, self.potential_grad(s_diff[:, :, 0], r2), self.potential_grad(s_diff[:, :, 1], r2))) p_sum = np.nansum(p, axis=1).reshape((self.n_nodes, self.nx_system + 2)) return np.hstack(((- p_sum[:, 4] - p_sum[:, 2]).reshape((-1, 1)), (- p_sum[:, 3] - p_sum[:, 5]).reshape(-1, 1))) def potential_grad(self, pos_diff, r2): """ Computes the gradient of the potential function for flocking proposed in Turner 2003. Args: pos_diff (): difference in a component of position among all agents r2 (): distance squared between agents Returns: corresponding component of the gradient of the potential """ grad = -2.0 * np.divide(pos_diff, np.multiply(r2, r2)) + 2 * np.divide(pos_diff, r2) grad[r2 > self.comm_radius] = 0 return grad
38.682927
126
0.582418
10,759
0.969105
0
0
0
0
0
0
3,591
0.323455
719665fcbb1b48dc2e95347865f8f0d20166bbd8
2,127
py
Python
conf/constants.py
codingwangfeng/GoodGoodName
02bfeb3ae65fd9ba0354f5b67237fcad4c0e11cb
[ "MIT" ]
null
null
null
conf/constants.py
codingwangfeng/GoodGoodName
02bfeb3ae65fd9ba0354f5b67237fcad4c0e11cb
[ "MIT" ]
null
null
null
conf/constants.py
codingwangfeng/GoodGoodName
02bfeb3ae65fd9ba0354f5b67237fcad4c0e11cb
[ "MIT" ]
null
null
null
# -*-coding:utf-8-*- # from functools import reduce from functools import reduce SANCAI_jixiang = [1, 3, 5, 7, 8, 11, 13, 15, 16, 18, 21, 23, 24, 25, 31, 32, 33, 35, 37, 39, 41, 45, 47, 48, 52, 57, 61, 63, 65, 67, 68, 81] # 吉祥运暗示数(代表健全,幸福,名誉等) SANCAI_xiaoji = [6, 17, 26, 27, 29, 30, 38, 49, 51, 55, 58, 71, 73, 75] # 次吉祥运暗示数(代表多少有些障碍,但能获得吉运) SANCAI_xiong = [2, 4, 9, 10, 12, 14, 19, 20, 22, 28, 34, 36, 40, 42, 43, 44, 46, 50, 53, 54, 56, 59, 60, 62, 64, 66, 69, 70, 72, 74, 76, 77, 78, 79, 80] # 凶数运暗示数(代表逆境,沉浮,薄弱,病难,困难,多灾等) SANCAI_wise = [3, 13, 16, 21, 23, 29, 31, 37, 39, 41, 45, 47] # 首领运暗示数(智慧 )仁勇全备,立上位,能领导众人) SANCAI_wealth = [15, 16, 24, 29, 32, 33, 41, 52] # 财富运暗示数(多钱财,富贵,白手可获巨财) SANCAI_artist = [13, 14, 18, 26, 29, 33, 35, 38, 48] # 艺能运暗示数(富有艺术天才,对审美,艺术,演艺,体育有通达之能) SANCAI_goodwife = [5, 6, 11, 13, 15, 16, 24, 32, 35] # 女德运暗示数(具有妇德,品性温良,助夫爱子) SANCAI_death = [21, 23, 26, 28, 29, 33, 39] # 女性孤寡运暗示数(难觅夫君,家庭不和,夫妻两虎相斗,离婚,严重者夫妻一方早亡) SANCAI_alone = [4, 10, 12, 14, 22, 28, 34] # 孤独运暗示数(妻凌夫或夫克妻) SANCAI_merry = [5, 6, 15, 16, 32, 39, 41] # 双妻运暗示数 SANCAI_stubbon = [7, 17, 18, 25, 27, 28, 37, 47] # 刚情运暗示数(性刚固执,意气用事) SANCAI_gentle = [5, 6, 11, 15, 16, 24, 31, 32, 35] # 温和运暗示数(性情平和,能得上下信望) # 可以自己配置觉得好的数字 # 参考好的搭配 refer_good_num_list = [SANCAI_jixiang, SANCAI_xiaoji, SANCAI_wise, SANCAI_wealth, SANCAI_artist, SANCAI_goodwife, SANCAI_merry, SANCAI_gentle] # 自己设定的好的搭配 good_num_list = [SANCAI_jixiang, SANCAI_xiaoji, SANCAI_wise, SANCAI_wealth, SANCAI_artist, SANCAI_goodwife, SANCAI_merry, SANCAI_gentle] # 参考坏的搭配 refer_bad_num_list = [SANCAI_xiong, SANCAI_death, SANCAI_alone, SANCAI_stubbon] # 自己设定的坏的搭配 bad_num_list = [SANCAI_xiong, SANCAI_death, SANCAI_alone] good_num_set = set(reduce((lambda x, y: x + y), good_num_list, [])) bad_num_set = set(reduce((lambda x, y: x + y), bad_num_list, [])) print('五格好分值:', good_num_set) print('五格差分值:', bad_num_set) # 筛选出有好没坏的三才五格 best_num_set = [x for x in good_num_set if x not in bad_num_set] print('想要的三才五格数字:', best_num_set) RESULT_UNKNOWN = '结果未知'
49.465116
120
0.640809
0
0
0
0
0
0
0
0
1,098
0.394538
7196a7afa44165b6070e17839c160c5651229421
406
py
Python
main/migrations/0006_labourer_allproj.py
kevinmuturi5/farm-Management-system
61929d7998d92d56daac67c2f8ace3cc76b6ee8b
[ "MIT" ]
1
2020-11-24T14:39:54.000Z
2020-11-24T14:39:54.000Z
main/migrations/0006_labourer_allproj.py
kevinmuturi5/farm-Management-system
61929d7998d92d56daac67c2f8ace3cc76b6ee8b
[ "MIT" ]
null
null
null
main/migrations/0006_labourer_allproj.py
kevinmuturi5/farm-Management-system
61929d7998d92d56daac67c2f8ace3cc76b6ee8b
[ "MIT" ]
null
null
null
# Generated by Django 3.1.2 on 2020-10-18 16:07 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('main', '0005_auto_20201018_1902'), ] operations = [ migrations.AddField( model_name='labourer', name='allproj', field=models.ManyToManyField(blank=True, to='main.Listing'), ), ]
21.368421
72
0.603448
313
0.770936
0
0
0
0
0
0
111
0.273399
7196e863b7922259efe8d454892b5eb76fb7593e
27,897
py
Python
bzt/modules/blazemeter/blazemeter_reporter.py
beachwood23/taurus
698ac747bae5d4940a879a8526add67c11ef42da
[ "Apache-2.0" ]
null
null
null
bzt/modules/blazemeter/blazemeter_reporter.py
beachwood23/taurus
698ac747bae5d4940a879a8526add67c11ef42da
[ "Apache-2.0" ]
34
2017-08-31T22:54:12.000Z
2022-03-16T00:39:48.000Z
bzt/modules/blazemeter/blazemeter_reporter.py
beachwood23/taurus
698ac747bae5d4940a879a8526add67c11ef42da
[ "Apache-2.0" ]
null
null
null
""" Module for reporting into http://www.blazemeter.com/ service Copyright 2015 BlazeMeter Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import copy import logging import os import platform import sys import time import traceback import zipfile from collections import defaultdict, OrderedDict from io import BytesIO from urllib.error import HTTPError import requests from bzt import TaurusInternalException, TaurusConfigError, TaurusNetworkError from bzt.bza import User, Session, Test from bzt.engine import Reporter, Singletone from bzt.utils import b, humanize_bytes, iteritems, open_browser, BetterDict, to_json, dehumanize_time from bzt.modules.aggregator import AggregatorListener, DataPoint, KPISet, ResultsProvider, ConsolidatingAggregator from bzt.modules.monitoring import Monitoring, MonitoringListener from bzt.modules.blazemeter.project_finder import ProjectFinder from bzt.modules.blazemeter.const import NOTE_SIZE_LIMIT class BlazeMeterUploader(Reporter, AggregatorListener, MonitoringListener, Singletone): """ Reporter class :type _test: bzt.bza.Test :type _master: bzt.bza.Master :type _session: bzt.bza.Session """ def __init__(self): super(BlazeMeterUploader, self).__init__() self.browser_open = 'start' self.kpi_buffer = [] self.send_interval = 30 self._last_status_check = time.time() self.send_data = True self.upload_artifacts = True self.send_monitoring = True self.monitoring_buffer = None self.public_report = False self.last_dispatch = 0 self.results_url = None self._user = User() self._test = None self._master = None self._session = None self.first_ts = sys.maxsize self.last_ts = 0 self.report_name = None self._dpoint_serializer = DatapointSerializer(self) def prepare(self): """ Read options for uploading, check that they're sane """ super(BlazeMeterUploader, self).prepare() self.send_interval = dehumanize_time(self.settings.get("send-interval", self.send_interval)) self.send_monitoring = self.settings.get("send-monitoring", self.send_monitoring) monitoring_buffer_limit = self.settings.get("monitoring-buffer-limit", 500) self.monitoring_buffer = MonitoringBuffer(monitoring_buffer_limit, self.log) self.browser_open = self.settings.get("browser-open", self.browser_open) self.public_report = self.settings.get("public-report", self.public_report) self.upload_artifacts = self.parameters.get("upload-artifacts", self.upload_artifacts) self._dpoint_serializer.multi = self.settings.get("report-times-multiplier", self._dpoint_serializer.multi) token = self.settings.get("token", "") if not token: self.log.warning("No BlazeMeter API key provided, will upload anonymously") self._user.token = token # usual fields self._user.logger_limit = self.settings.get("request-logging-limit", self._user.logger_limit) self._user.address = self.settings.get("address", self._user.address).rstrip("/") self._user.data_address = self.settings.get("data-address", self._user.data_address).rstrip("/") self._user.timeout = dehumanize_time(self.settings.get("timeout", self._user.timeout)) if isinstance(self._user.http_session, requests.Session): self.log.debug("Installing http client") self._user.http_session = self.engine.get_http_client() self._user.http_request = self._user.http_session.request # direct data feeding case sess_id = self.parameters.get("session-id") if sess_id: self._session = Session(self._user, {'id': sess_id}) self._session['userId'] = self.parameters.get("user-id", None) self._session['testId'] = self.parameters.get("test-id", None) self._test = Test(self._user, {'id': self._session['testId']}) exc = TaurusConfigError("Need signature for session") self._session.data_signature = self.parameters.get("signature", exc) self._session.kpi_target = self.parameters.get("kpi-target", self._session.kpi_target) self.send_data = self.parameters.get("send-data", self.send_data) else: try: self._user.ping() # to check connectivity and auth except HTTPError: self.log.error("Cannot reach online results storage, maybe the address/token is wrong") raise if token: wsp = self._user.accounts().workspaces() if not wsp: raise TaurusNetworkError("Your account has no active workspaces, please contact BlazeMeter support") finder = ProjectFinder(self.parameters, self.settings, self._user, wsp, self.log) self._test = finder.resolve_external_test() else: self._test = Test(self._user, {'id': None}) self.report_name = self.parameters.get("report-name", self.settings.get("report-name", self.report_name)) if self.report_name == 'ask' and sys.stdin.isatty(): self.report_name = input("Please enter report-name: ") if isinstance(self.engine.aggregator, ResultsProvider): self.engine.aggregator.add_listener(self) for service in self.engine.services: if isinstance(service, Monitoring): service.add_listener(self) def startup(self): """ Initiate online test """ super(BlazeMeterUploader, self).startup() self._user.log = self.log.getChild(self.__class__.__name__) if not self._session: url = self._start_online() self.log.info("Started data feeding: %s", url) if self.browser_open in ('start', 'both'): open_browser(url) if self._user.token and self.public_report: report_link = self._master.make_report_public() self.log.info("Public report link: %s", report_link) def _start_online(self): """ Start online test """ self.log.info("Initiating data feeding...") if self._test['id']: self._session, self._master = self._test.start_external() else: self._session, self._master, self.results_url = self._test.start_anonymous_external_test() self._test['id'] = self._session['testId'] if self._test.token: self.results_url = self._master.address + '/app/#/masters/%s' % self._master['id'] if self.report_name: self._session.set({"name": str(self.report_name)}) return self.results_url def __get_jtls_and_more(self): """ Compress all files in artifacts dir to single zipfile :rtype: (io.BytesIO,dict) """ mfile = BytesIO() listing = {} logs = set() for handler in self.engine.log.parent.handlers: if isinstance(handler, logging.FileHandler): logs.add(handler.baseFilename) max_file_size = self.settings.get('artifact-upload-size-limit', 10) * 1024 * 1024 # 10MB with zipfile.ZipFile(mfile, mode='w', compression=zipfile.ZIP_DEFLATED, allowZip64=True) as zfh: for root, _, files in os.walk(self.engine.artifacts_dir): for filename in files: full_path = os.path.join(root, filename) if full_path in logs: logs.remove(full_path) fsize = os.path.getsize(full_path) if fsize <= max_file_size: zfh.write(full_path, os.path.join(os.path.relpath(root, self.engine.artifacts_dir), filename)) listing[full_path] = fsize else: msg = "File %s exceeds maximum size quota of %s and won't be included into upload" self.log.warning(msg, filename, max_file_size) for filename in logs: # upload logs unconditionally zfh.write(filename, os.path.basename(filename)) listing[filename] = os.path.getsize(filename) return mfile, listing def __upload_artifacts(self): """ If token provided, upload artifacts folder contents and bzt.log """ if not self._session.token: return worker_index = self.engine.config.get('modules').get('shellexec').get('env').get('TAURUS_INDEX_ALL') if worker_index: suffix = '-%s' % worker_index else: suffix = '' artifacts_zip = "artifacts%s.zip" % suffix mfile, zip_listing = self.__get_jtls_and_more() self.log.info("Uploading all artifacts as %s ...", artifacts_zip) self._session.upload_file(artifacts_zip, mfile.getvalue()) self._session.upload_file(artifacts_zip + '.tail.bz', self.__format_listing(zip_listing)) handlers = self.engine.log.parent.handlers for handler in handlers: if isinstance(handler, logging.FileHandler): fname = handler.baseFilename self.log.info("Uploading %s", fname) fhead, ftail = os.path.splitext(os.path.split(fname)[-1]) modified_name = fhead + suffix + ftail with open(fname, 'rb') as _file: self._session.upload_file(modified_name, _file.read()) _file.seek(-4096, 2) tail = _file.read() tail = tail[tail.index(b("\n")) + 1:] self._session.upload_file(modified_name + ".tail.bz", tail) def post_process(self): """ Upload results if possible """ if not self._session: self.log.debug("No feeding session obtained, nothing to finalize") return self.log.debug("KPI bulk buffer len in post-proc: %s", len(self.kpi_buffer)) try: self.log.info("Sending remaining KPI data to server...") if self.send_data: self.__send_data(self.kpi_buffer, False, True) self.kpi_buffer = [] if self.send_monitoring: self.__send_monitoring() finally: self._postproc_phase2() if self.results_url: if self.browser_open in ('end', 'both'): open_browser(self.results_url) self.log.info("Online report link: %s", self.results_url) def _postproc_phase2(self): try: if self.upload_artifacts: self.__upload_artifacts() except (IOError, TaurusNetworkError): self.log.warning("Failed artifact upload: %s", traceback.format_exc()) finally: self._last_status_check = self.parameters.get('forced-last-check', self._last_status_check) self.log.debug("Set last check time to: %s", self._last_status_check) tries = self.send_interval # NOTE: you dirty one... while not self._last_status_check and tries > 0: self.log.info("Waiting for ping...") time.sleep(self.send_interval) tries -= 1 self._postproc_phase3() def _postproc_phase3(self): try: if self.send_data: self.end_online() if self._user.token and self.engine.stopping_reason: exc_class = self.engine.stopping_reason.__class__.__name__ note = "%s: %s" % (exc_class, str(self.engine.stopping_reason)) self.append_note_to_session(note) if self._master: self.append_note_to_master(note) except KeyboardInterrupt: raise except BaseException as exc: self.log.debug("Failed to finish online: %s", traceback.format_exc()) self.log.warning("Failed to finish online: %s", exc) def end_online(self): """ Finish online test """ if not self._session: self.log.debug("Feeding not started, so not stopping") else: self.log.info("Ending data feeding...") if self._user.token: self._session.stop() else: self._session.stop_anonymous() def append_note_to_session(self, note): self._session.fetch() if 'note' in self._session: note = self._session['note'] + '\n' + note note = note.strip() if note: self._session.set({'note': note[:NOTE_SIZE_LIMIT]}) def append_note_to_master(self, note): self._master.fetch() if 'note' in self._master: note = self._master['note'] + '\n' + note note = note.strip() if note: self._master.set({'note': note[:NOTE_SIZE_LIMIT]}) def check(self): """ Send data if any in buffer """ self.log.debug("KPI bulk buffer len: %s", len(self.kpi_buffer)) if self.last_dispatch < (time.time() - self.send_interval): self.last_dispatch = time.time() if self.send_data and len(self.kpi_buffer): self.__send_data(self.kpi_buffer) self.kpi_buffer = [] if self.send_monitoring: self.__send_monitoring() return super(BlazeMeterUploader, self).check() def __send_data(self, data, do_check=True, is_final=False): """ :type data: list[bzt.modules.aggregator.DataPoint] """ if not self._session: return self.engine.aggregator.converter(data) serialized = self._dpoint_serializer.get_kpi_body(data, is_final) self._session.send_kpi_data(serialized, do_check) def aggregated_second(self, data): """ Send online data :param data: DataPoint """ if self.send_data: self.kpi_buffer.append(data) def monitoring_data(self, data): if self.send_monitoring: self.monitoring_buffer.record_data(data) def __send_monitoring(self): engine_id = self.engine.config.get('modules').get('shellexec').get('env').get('TAURUS_INDEX_ALL', '') if not engine_id: engine_id = "0" data = self.monitoring_buffer.get_monitoring_json(self._session) self._session.send_monitoring_data(engine_id, data) def __format_listing(self, zip_listing): lines = [] for fname in sorted(zip_listing.keys()): bytestr = humanize_bytes(zip_listing[fname]) if fname.startswith(self.engine.artifacts_dir): fname = fname[len(self.engine.artifacts_dir) + 1:] lines.append(bytestr + " " + fname) return "\n".join(lines) class MonitoringBuffer(object): def __init__(self, size_limit, parent_log): self.size_limit = size_limit self.data = defaultdict(OrderedDict) self.log = parent_log.getChild(self.__class__.__name__) # data :: dict(datasource -> dict(interval -> datapoint)) # datapoint :: dict(metric -> value) def record_data(self, data): for monitoring_item in data: item = copy.deepcopy(monitoring_item) source = item.pop('source') timestamp = int(item['ts']) item['interval'] = 1 buff = self.data[source] if timestamp in buff: buff[timestamp].update(item) else: buff[timestamp] = item sources = list(self.data) for source in sources: if len(self.data[source]) > self.size_limit: self._downsample(self.data[source]) self.log.debug("Monitoring buffer size '%s': %s", source, len(self.data[source])) def _downsample(self, buff): size = 1 while len(buff) > self.size_limit: self._merge_small_intervals(buff, size) size += 1 def _merge_small_intervals(self, buff, size): timestamps = list(buff) merged_already = set() for left, right in zip(timestamps, timestamps[1:]): if left in merged_already: continue if buff[left]['interval'] <= size: self._merge_datapoints(buff[left], buff[right]) buff.pop(right) merged_already.add(left) merged_already.add(right) @staticmethod def _merge_datapoints(left, right): sum_size = float(left['interval'] + right['interval']) for metric in set(right): if metric in ('ts', 'interval'): continue if metric in left: left[metric] = (left[metric] * left['interval'] + right[metric] * right['interval']) / sum_size else: left[metric] = right[metric] left['interval'] = sum_size def get_monitoring_json(self, session): """ :type session: Session """ results = {} hosts = [] kpis = {} for source, buff in iteritems(self.data): for timestamp, item in iteritems(buff): if source == 'local': source = platform.node() if source not in results: results[source] = { "name": source, "intervals": OrderedDict() } if source not in hosts: hosts.append(source) src = results[source] tstmp = timestamp * 1000 tstmp_key = '%d' % tstmp if tstmp_key not in src['intervals']: src['intervals'][tstmp_key] = { "start": tstmp, "duration": item['interval'] * 1000, "indicators": {} } for field, value in iteritems(item): if field.lower().startswith('conn-all'): field = 'Connections' elif field.lower().startswith('cpu'): field = 'CPU' elif field.lower().startswith('mem'): field = 'Memory' value *= 100 elif field == 'bytes-recv' or field.lower().startswith('net'): field = 'Network I/O' elif field == 'engine-loop': field = 'Busy Taurus' else: continue # maybe one day BZA will accept all other metrics... if field not in kpis: kpis[field] = field src['intervals'][tstmp_key]['indicators'][field] = { "value": value, "name": field, "std": 0, "mean": 0, "sum": 0, "min": 0, "max": 0, "sumOfSquares": 0, "n": 1 } kpis = {"Network I/O": "Network I/O", "Memory": "Memory", "CPU": "CPU", "Connections": "Connections"} return { "reportInfo": { "sessionId": session['id'], "timestamp": time.time(), "userId": session['userId'], "testId": session['testId'], "type": "MONITOR", "testName": "" }, "kpis": kpis, "hosts": hosts, "results": results } class DatapointSerializer(object): def __init__(self, owner): """ :type owner: BlazeMeterUploader """ super(DatapointSerializer, self).__init__() self.owner = owner self.multi = 1000 # miltiplier factor for reporting def get_kpi_body(self, data_buffer, is_final): # - reporting format: # {labels: <data>, # see below # sourceID: <id of BlazeMeterClient object>, # [is_final: True]} # for last report # # - elements of 'data' are described in __get_label() # # - elements of 'intervals' are described in __get_interval() # every interval contains info about response codes that were received on it. report_items = BetterDict() if data_buffer: self.owner.first_ts = min(self.owner.first_ts, data_buffer[0][DataPoint.TIMESTAMP]) self.owner.last_ts = max(self.owner.last_ts, data_buffer[-1][DataPoint.TIMESTAMP]) # following data is received in the cumulative way for label, kpi_set in iteritems(data_buffer[-1][DataPoint.CUMULATIVE]): report_item = self.__get_label(label, kpi_set) self.__add_errors(report_item, kpi_set) # 'Errors' tab report_items[label] = report_item # fill 'Timeline Report' tab with intervals data # intervals are received in the additive way if report_items: for dpoint in data_buffer: time_stamp = dpoint[DataPoint.TIMESTAMP] for label, kpi_set in iteritems(dpoint[DataPoint.CURRENT]): exc = TaurusInternalException('Cumulative KPISet is non-consistent') report_item = report_items.get(label, exc) report_item['intervals'].append(self.__get_interval(kpi_set, time_stamp)) report_items = [report_items[key] for key in sorted(report_items.keys())] # convert dict to list data = {"labels": report_items, "sourceID": id(self.owner)} if is_final: data['final'] = True return to_json(data) @staticmethod def __add_errors(report_item, kpi_set): errors = kpi_set[KPISet.ERRORS] for error in errors: if error["type"] == KPISet.ERRTYPE_ERROR: report_item['errors'].append({ 'm': error['msg'], "rc": error['rc'], "count": error['cnt'], }) elif error["type"] == KPISet.ERRTYPE_SUBSAMPLE: report_item['failedEmbeddedResources'].append({ "count": error['cnt'], "rm": error['msg'], "rc": error['rc'], "url": list(error['urls'])[0] if error['urls'] else None, }) else: report_item['assertions'].append({ 'failureMessage': error['msg'], 'name': error['tag'] if error['tag'] else 'All Assertions', 'failures': error['cnt'] }) def __get_label(self, name, cumul): return { "n": cumul[KPISet.SAMPLE_COUNT], # total count of samples "name": name if name else 'ALL', # label "interval": 1, # not used "intervals": [], # list of intervals, fill later "samplesNotCounted": 0, # not used "assertionsNotCounted": 0, # not used "failedEmbeddedResources": [], # not used "failedEmbeddedResourcesSpilloverCount": 0, # not used "otherErrorsCount": 0, # not used "errors": [], # list of errors, fill later "assertions": [], # list of assertions, fill later "percentileHistogram": [], # not used "percentileHistogramLatency": [], # not used "percentileHistogramBytes": [], # not used "empty": False, # not used "summary": self.__get_summary(cumul) # summary info } def __get_summary(self, cumul): return { "first": self.owner.first_ts, "last": self.owner.last_ts, "duration": self.owner.last_ts - self.owner.first_ts, "failed": cumul[KPISet.FAILURES], "hits": cumul[KPISet.SAMPLE_COUNT], "avg": int(self.multi * cumul[KPISet.AVG_RESP_TIME]), "min": int(self.multi * cumul[KPISet.PERCENTILES]["0.0"]) if "0.0" in cumul[KPISet.PERCENTILES] else 0, "max": int(self.multi * cumul[KPISet.PERCENTILES]["100.0"]) if "100.0" in cumul[KPISet.PERCENTILES] else 0, "std": int(self.multi * cumul[KPISet.STDEV_RESP_TIME]), "tp90": int(self.multi * cumul[KPISet.PERCENTILES]["90.0"]) if "90.0" in cumul[KPISet.PERCENTILES] else 0, "tp95": int(self.multi * cumul[KPISet.PERCENTILES]["95.0"]) if "95.0" in cumul[KPISet.PERCENTILES] else 0, "tp99": int(self.multi * cumul[KPISet.PERCENTILES]["99.0"]) if "99.0" in cumul[KPISet.PERCENTILES] else 0, "latencyAvg": int(self.multi * cumul[KPISet.AVG_LATENCY]), "latencyMax": 0, "latencyMin": 0, "latencySTD": 0, "bytes": cumul[KPISet.BYTE_COUNT], "bytesMax": 0, "bytesMin": 0, "bytesAvg": int(cumul[KPISet.BYTE_COUNT] / float(cumul[KPISet.SAMPLE_COUNT])), "bytesSTD": 0, "otherErrorsSpillcount": 0, } def __get_interval(self, item, time_stamp): # rc_list - list of info about response codes: # {'n': <number of code encounters>, # 'f': <number of failed request (e.q. important for assertions)> # 'rc': <string value of response code>} rc_list = [] for r_code, cnt in iteritems(item[KPISet.RESP_CODES]): fails = [err['cnt'] for err in item[KPISet.ERRORS] if str(err['rc']) == r_code] rc_list.append({"n": cnt, 'f': fails, "rc": r_code}) return { "ec": item[KPISet.FAILURES], "ts": time_stamp, "na": item[KPISet.CONCURRENCY], "n": item[KPISet.SAMPLE_COUNT], "failed": item[KPISet.FAILURES], "rc": rc_list, "t": { "min": int(self.multi * item[KPISet.PERCENTILES]["0.0"]) if "0.0" in item[KPISet.PERCENTILES] else 0, "max": int(self.multi * item[KPISet.PERCENTILES]["100.0"]) if "100.0" in item[ KPISet.PERCENTILES] else 0, "sum": self.multi * item[KPISet.AVG_RESP_TIME] * item[KPISet.SAMPLE_COUNT], "n": item[KPISet.SAMPLE_COUNT], "std": self.multi * item[KPISet.STDEV_RESP_TIME], "avg": self.multi * item[KPISet.AVG_RESP_TIME] }, "lt": { "min": 0, "max": 0, "sum": self.multi * item[KPISet.AVG_LATENCY] * item[KPISet.SAMPLE_COUNT], "n": item[KPISet.SAMPLE_COUNT], "std": 0, "avg": self.multi * item[KPISet.AVG_LATENCY] }, "by": { "min": 0, "max": 0, "sum": item[KPISet.BYTE_COUNT], "n": item[KPISet.SAMPLE_COUNT], "std": 0, "avg": item[KPISet.BYTE_COUNT] / float(item[KPISet.SAMPLE_COUNT]) }, }
40.547965
120
0.566979
26,462
0.948561
0
0
1,437
0.051511
0
0
5,834
0.209126
71975dd9b4598f0884460876d889b91d528834d3
20,434
py
Python
nitorch/nn/losses/_spatial.py
wyli/nitorch
3ecd18944cf45fb9193c4c6ffc32953c4d1c71ac
[ "MIT" ]
1
2021-04-09T21:24:47.000Z
2021-04-09T21:24:47.000Z
nitorch/nn/losses/_spatial.py
wyli/nitorch
3ecd18944cf45fb9193c4c6ffc32953c4d1c71ac
[ "MIT" ]
null
null
null
nitorch/nn/losses/_spatial.py
wyli/nitorch
3ecd18944cf45fb9193c4c6ffc32953c4d1c71ac
[ "MIT" ]
null
null
null
""" Losses that assume an underlying spatial organization (gradients, curvature, etc.) """ import torch import torch.nn as tnn from nitorch.core.pyutils import make_list, prod from nitorch.core.utils import slice_tensor from nitorch.spatial import diff1d from ._base import Loss class LocalFeatures(tnn.Module): """Base class for feature extractors. Is it really useful? """ def __init__(self, bound='dct2', voxel_size=1, *args, **kwargs): """ Parameters ---------- bound : BoundType, default='dct2' Boundary conditions, used to compute derivatives at the edges. voxel_size : float or list[float], default=1 Voxel size """ super().__init__(*args, **kwargs) self.bound = bound self.voxel_size = voxel_size class Diff(LocalFeatures): """Finite differences.""" def __init__(self, order=1, side='c', dim=None, *args, **kwargs): """ Parameters ---------- order : int, default=1 Finite differences order side : {'c', 'f', 'b'} or list[{'c', 'f', 'b'}], default='c' Type of finite-differencesto extract about each voxel: * 'c' : central -> `g[i] = (x[i+1] - x[i-1])/2` * 'f' : forward -> `g[i] = (x[i+1] - x[i])` * 'b' : backward -> `g[i] = (x[i] - x[i-1])` dim : int or list[int], optional Dimensions along which to compute the finite differences. By default, all except the first two (batch and channel). bound : BoundType or list[BoundType], default='dct2' Boundary conditions, used to compute derivatives at the edges. voxel_size : float or list[float], default=1 Voxel size reduction : {'mean', 'sum'} or callable, default='mean' Type of reduction to apply. """ super().__init__(*args, **kwargs) self.order = order self.side = side self.dim = dim def forward(self, x, **overload): """ Parameters ---------- x : tensor Input tensor with shape (batch, channel, *spatial) overload : dict All parameters defined at build time can be overridden at call time. Returns ------- g : tensor Finite differences with shape (batch, channel, *spatial, len(dim), len(side)) If `dim` or `side` are scalars, not lists, their respective dimension is dropped in the output tensor. E.g., if `side='c'`, the output shape is (batch, channel, *spatial, len(dim)) """ order = overload.get('order', self.order) side = make_list(overload.get('side', self.side)) drop_side_dim = not isinstance(side, (tuple, list)) side = make_list(side) dim = overload.get('dim', self.dim) dim = list(range(2, x.dim())) if dim is None else dim drop_dim_dim = not isinstance(dim, (tuple, list)) dim = make_list(dim) nb_dim = len(dim) voxel_size = overload.get('voxel_size', self.voxel_size) voxel_size = make_list(voxel_size, nb_dim) bound = make_list(overload.get('bound', self.bound), nb_dim) diffs = [] for d, vx, bnd in zip(dim, voxel_size, bound): sides = [] for s in side: grad = diff1d(x, order=order, dim=d, voxel_size=vx, side=s, bound=bnd) sides.append(grad) sides = torch.stack(sides, dim=-1) diffs.append(sides) diffs = torch.stack(diffs, dim=-2) if drop_dim_dim: diffs = slice_tensor(diffs, 0, dim=-2) if drop_side_dim: diffs = slice_tensor(diffs, 0, dim=-1) return diffs class MembraneLoss(Loss): """Compute the membrane energy (squared gradients) of a tensor. The membrane energy of a field is the integral of its squared gradient magnitude (l2 norm). This class extends this concept to other norms of the gradient (l1, l{1,2}). In the l2 case, if we name "f" the unit of the field and "m" the spatial unit of a voxel, the output loss has unit `(f/m)**2`. If `factor` is used to weight each voxel by its volume (as should be done in a proper integration) the unit becomes `(f/m)**2 * m**d = f**2 * m**(d-2)`. In the l1 case, it is `f/m` in the absence of weighting and `f * m**(d-1)` with volume weighting. """ def __init__(self, voxel_size=1, factor=1, bound='dct2', l1=None, *args, **kwargs): """ Parameters ---------- voxel_size : float or list[float], default=1 Voxel size. Useful for anisotropic tensors (where the sampling rate is higher in some directions than others). factor : float or list[float], default=1 Scale the loss by a per-dimension factor. Useful when working with resized tensor to compensate for different number of voxels. bound : BoundType, default='dct2' Boundary conditions, used to compute derivatives at the edges. l1 : bool or int or list[int], default=None Dimensions along which to apply a square root reduction ('l1 norm'), after taking the square. Dimensions are those of the gradient map with shape (batch, channel, *spatial, direction, side) * False: nowhere == (squared) l2 norm * True: everywhere == l1 norm * Otherwise: l_{1,2} norm (group sparsity) """ super().__init__(*args, **kwargs) self.voxel_size = voxel_size self.factor = factor self.bound = bound self.l1 = l1 def forward(self, x, **overload): """ Parameters ---------- x : tensor Input tensor overload : dict All parameters defined at build time can be overridden at call time. Returns ------- loss : scalar or tensor The output shape depends on the type of reduction used. If 'mean' or 'sum', this function returns a scalar. """ nb_dim = x.dim() - 2 voxel_size = make_list(overload.get('voxel_size', self.voxel_size), nb_dim) factor = make_list(overload.get('factor', self.factor), nb_dim) bound = make_list(overload.get('bound', self.bound), nb_dim) l1 = overload.get('l1', self.l1) # Compute spatial gradients # # TODO: when penalty == 'l2', for some boundary conditions, there's no # need to compute both forward and backward gradients as they are # the same (but shifted). For now, to avoid having to detect which # cases can be accelerated, I always compute both (more general). loss = Diff(side=['f', 'b'], bound=bound, voxel_size=voxel_size)(x) loss = loss.square() # Apply l1 if l1 not in (None, False): if l1 is True: loss = loss.sqrt() else: l1 = make_list(l1) loss = loss.sum(dim=l1).sqrt() # TODO: use self.reduction instead of sum? # Reduce loss = super().forward(loss) # Scale factor = prod(factor) if factor != 1: loss = loss * factor return loss class BendingLoss(Loss): """Compute the bending energy (squared gradients) of a tensor. The bending energy of a field is the integral of its squared second-order derivatives magnitude (l2 norm). This class extends this concept to other norms of the gradient (l1, l{1,2}). In the l2 case, if we name "f" the unit of the field and "m" the spatial unit of a voxel, the output loss has unit `(f/m**2)**2`. If `factor` is used to weight each voxel by its volume (as should be done in a proper integration) the unit becomes `(f/m**2)**2 * m**d = f**2 * m**(d-4)`. In the l1 case, it is `f/m**2` in the absence of weighting and `f * m**(d-2)` with volume weighting. """ def __init__(self, voxel_size=1, factor=1, bound='dct2', l1=None, *args, **kwargs): """ Parameters ---------- voxel_size : float or list[float], default=1 Voxel size. Useful for anisotropic tensors (where the sampling rate is higher in some directions than others). factor : float or list[float], default=1 Scale the loss by a per-dimension factor. Useful when working with resized tensor to compensate for different number of voxels. bound : BoundType, default='dct2' Boundary conditions, used to compute derivatives at the edges. l1 : bool or int or list[int], default=None Dimensions along which to apply a square root reduction ('l1 norm'), after taking the square. Dimensions are those of the gradient map with shape (batch, channel, *spatial, direction) * False: nowhere == (squared) l2 norm * True: everywhere == l1 norm * Otherwise: l_{1,2} norm (group sparsity) """ super().__init__(*args, **kwargs) self.voxel_size = voxel_size self.factor = factor self.bound = bound self.l1 = l1 def forward(self, x, **overload): """ Parameters ---------- x : tensor Input tensor overload : dict All parameters defined at build time can be overridden at call time. Returns ------- loss : scalar or tensor The output shape depends on the type of reduction used. If 'mean' or 'sum', this function returns a scalar. """ nb_dim = x.dim() - 2 voxel_size = make_list(overload.get('voxel_size', self.voxel_size), nb_dim) factor = make_list(overload.get('factor', self.factor), nb_dim) bound = make_list(overload.get('bound', self.bound), nb_dim) l1 = overload.get('l1', self.l1) # Compute spatial gradients loss = Diff(order=2, side='c', bound=bound, voxel_size=voxel_size)(x) loss = loss.square() # Apply l1 if l1 not in (None, False): if l1 is True: loss = loss.sqrt() else: l1 = make_list(l1) loss = loss.sum(dim=l1).sqrt() # Reduce loss = super().forward(loss) # Scale factor = prod(factor) if factor != 1: loss = loss * factor return loss class LameShearLoss(Loss): """Strain-part of the (Linear)-Elastic energy (penalty on shears). = second Lame constant = shear modulus The shear energy of a deformation field is the integral of the square magnitude (l2 norm) of the symetric part diagonal terms of its Jacobian. This class extends this concept to other norms of the gradient (l1, l{1,2}). In the l2 case, E = sum_{i != j} (dv[i]/dx[j]) ** 2. """ def __init__(self, voxel_size=1, factor=1, bound='dct2', l1=None, exclude_zooms=False, *args, **kwargs): """ Parameters ---------- voxel_size : float or list[float], default=1 Voxel size. Useful for anisotropic tensors (where the sampling rate is higher in some directions than others). factor : float or list[float], default=1 Scale the loss by a per-dimension factor. Useful when working with resized tensor to compensate for different number of voxels. bound : BoundType, default='dct2' Boundary conditions, used to compute derivatives at the edges. l1 : bool or int or list[int], default=None Dimensions along which to apply a square root reduction ('l1 norm'), after taking the square. Dimensions are those of the gradient map with shape (batch, channel, *spatial, side) * False: nowhere == (squared) l2 norm * True: everywhere == l1 norm * Otherwise: l_{1,2} norm (group sparsity) Here, `channel` map to elements of the Jacobian matrix, while `side` map to the combination of sides (forward/backward) used when extracting finite differences. Therefore, the number of channels is dim*(dim+1)//2 and the number of sides is 4. exclude_zooms : bool, default=False Do not include diagonal elements of the Jacobian in the penalty (i.e., penalize only shears) """ super().__init__(*args, **kwargs) self.voxel_size = voxel_size self.factor = factor self.bound = bound self.l1 = l1 self.exclude_zooms = exclude_zooms def forward(self, x, **overload): """ Parameters ---------- x : (batch, ndim, *spatial) tensor Input displacement tensor (in channel first order) overload : dict All parameters defined at build time can be overridden at call time. Returns ------- loss : scalar or tensor The output shape depends on the type of reduction used. If 'mean' or 'sum', this function returns a scalar. """ nb_dim = x.dim() - 2 voxel_size = make_list(overload.get('voxel_size', self.voxel_size), nb_dim) factor = make_list(overload.get('factor', self.factor), nb_dim) bound = make_list(overload.get('bound', self.bound), nb_dim) l1 = overload.get('l1', self.l1) exclude_zooms = overload.get('exclude_zooms', self.exclude_zooms) # Compute spatial gradients loss_diag = [] # diagonal elements of the Jacobian loss_offdiag = [] # off-diagonal elements of hte (symmetric) Jacobian for i in range(nb_dim): # symmetric part x_i = x[:, i:i+1, ...] subloss_diag = [] subloss_offdiag = [] for j in range(nb_dim): for side_i in ('f', 'b'): diff = Diff(dim=[j+2], side=side_i, bound=bound, voxel_size=voxel_size) diff_ij = diff(x_i) if i == j: # diagonal elements if not exclude_zooms: subloss_diag.append(diff_ij) else: # off diagonal elements x_j = x[:, j:j+1, ...] for side_j in ('f', 'b'): diff = Diff(dim=[i+2], side=side_j, bound=bound, voxel_size=voxel_size) diff_ji = diff(x_j) subloss_offdiag.append((diff_ij + diff_ji)/2) if not exclude_zooms: loss_diag.append(torch.stack(subloss_diag, dim=-1)) loss_offdiag.append(torch.stack(subloss_offdiag, dim=-1)) if not exclude_zooms: loss_diag = torch.cat(loss_diag, dim=1) loss_offdiag = torch.cat(loss_offdiag, dim=1) if l1 not in (None, False): # Apply l1 reduction if l1 is True: if not exclude_zooms: loss_diag = loss_diag.abs() loss_offdiag = loss_offdiag.abs() else: l1 = make_list(l1) if not exclude_zooms: loss_diag = loss_diag.square().sum(dim=l1, keepdim=True).sqrt() loss_offdiag = loss_offdiag.square().sum(dim=l1, keepdim=True).sqrt() else: # Apply l2 reduction if not exclude_zooms: loss_diag = loss_diag.square() loss_offdiag = loss_offdiag.square() # Mean reduction across sides if not exclude_zooms: loss_diag = loss_diag.mean(dim=-1) loss_offdiag = loss_offdiag.mean(dim=-1) # Weighted reduction across elements if not exclude_zooms: if loss_diag.shape[1] == 1: # element dimension already reduced -> we need a small hack loss = (loss_diag.square() + 2*loss_offdiag.square()) / (nb_dim**2) loss = loss.sum(dim=1, keepdim=True).sqrt() else: # simple weighted average loss = (loss_diag.sum(dim=1, keepdim=True) + loss_offdiag.sum(dim=1, keepdim=True)*2) / (nb_dim**2) else: loss = loss_offdiag.sum(dim=1, keepdim=True)*2 / (nb_dim**2) # Reduce loss = super().forward(loss) # Scale factor = prod(factor) if factor != 1: loss = loss * factor return loss class LameZoomLoss(Loss): """Compression-part of the (Linear)-Elastic energy (penalty on volume change). = first Lame constant The compression energy of a deformation field is the integral of the square magnitude (l2 norm) of the trace its Jacobian. This class extends this concept to other norms of the gradient (l1, l{1,2}). In the l2 case, E = sum_{ij} (dv[i]/dx[j] + dv[j]/dx[i]) ** 2. """ def __init__(self, voxel_size=1, factor=1, bound='dct2', l1=None, *args, **kwargs): """ Parameters ---------- voxel_size : float or list[float], default=1 Voxel size. Useful for anisotropic tensors (where the sampling rate is higher in some directions than others). factor : float or list[float], default=1 Scale the loss by a per-dimension factor. Useful when working with resized tensor to compensate for different number of voxels. bound : BoundType, default='dct2' Boundary conditions, used to compute derivatives at the edges. l1 : bool or int or list[int], default=None Dimensions along which to apply a square root reduction ('l1 norm'), after taking the square. Dimensions are those of the gradient map with shape (batch, channel, *spatial, direction, side) * False: nowhere == (squared) l2 norm * True: everywhere == l1 norm * Otherwise: l_{1,2} norm (group sparsity) """ super().__init__(*args, **kwargs) self.voxel_size = voxel_size self.factor = factor self.bound = bound self.l1 = l1 def forward(self, x, **overload): """ Parameters ---------- x : tensor Input tensor overload : dict All parameters defined at build time can be overridden at call time. Returns ------- loss : scalar or tensor The output shape depends on the type of reduction used. If 'mean' or 'sum', this function returns a scalar. """ nb_dim = x.dim() - 2 voxel_size = make_list(overload.get('voxel_size', self.voxel_size), nb_dim) factor = make_list(overload.get('factor', self.factor), nb_dim) bound = make_list(overload.get('bound', self.bound), nb_dim) l1 = overload.get('l1', self.l1) # Compute spatial gradients loss = [] for i in range(nb_dim): x_i = x[:, i:i+1, ...] diff = Diff(dim=[i], side=['f', 'b'], bound=bound, voxel_size=voxel_size) loss.append(diff(x_i)) loss = torch.cat(loss, dim=1) loss = loss.square() # Apply l1 if l1 not in (None, False): if l1 is True: loss = loss.sqrt() else: l1 = make_list(l1) loss = loss.sum(dim=l1, keepdim=True).sqrt() # Mean reduction across sides loss = loss.mean(dim=-1) # Reduce loss = super().forward(loss) # Scale factor = prod(factor) if factor != 1: loss = loss * factor return loss
35.414211
90
0.553049
20,136
0.985416
0
0
0
0
0
0
11,542
0.564843
7197c87f66af380e5e98dd30c64711ce25f12d71
607
py
Python
items/models.py
roberthtamayose/digitalmenu
19c6633844934fd95f861674946da386411a19c9
[ "MIT" ]
null
null
null
items/models.py
roberthtamayose/digitalmenu
19c6633844934fd95f861674946da386411a19c9
[ "MIT" ]
null
null
null
items/models.py
roberthtamayose/digitalmenu
19c6633844934fd95f861674946da386411a19c9
[ "MIT" ]
null
null
null
from django.db import models from django.utils import timezone class Categoria(models.Model): nome = models.CharField(max_length=255) def __str__(self): return self.nome class Item(models.Model): nome = models.CharField(max_length=255) data_criacao = models.DateTimeField(default=timezone.now) descricao = models.TextField(blank=True) categoria = models.ForeignKey(Categoria, on_delete=models.DO_NOTHING) ocultar = models.BooleanField(default=False) foto = models.ImageField(blank=True, upload_to='fotos/%y/%m/') def __str__(self): return self.nome
27.590909
73
0.726524
538
0.886326
0
0
0
0
0
0
14
0.023064
719810055bee113941d00e469e5cff1dcf6bfa92
114
py
Python
app/services/__init__.py
zeroday0619/XenXenXenSe
5af079e5edde3a6e4a1f5868052480d7b140d87c
[ "MIT" ]
1
2021-04-23T08:56:05.000Z
2021-04-23T08:56:05.000Z
app/services/__init__.py
Alex4386/XenXenXenSe
c60e50f26a7c3b306ee3cbb140b3ad7f39c21d93
[ "MIT" ]
null
null
null
app/services/__init__.py
Alex4386/XenXenXenSe
c60e50f26a7c3b306ee3cbb140b3ad7f39c21d93
[ "MIT" ]
null
null
null
from app.services.console import Console from app.services.server import Server __main__ = ["server", "console"]
22.8
40
0.780702
0
0
0
0
0
0
0
0
17
0.149123
719876b6e33d3caa67b41082a88c72293d4411b5
2,801
py
Python
launch/twist_mux_launch.py
nuclearsandwich-ros/twist_mux-release
d92dcda0255e727b899d3bac62ef3d89c19cb38e
[ "Apache-2.0" ]
31
2017-11-25T17:13:00.000Z
2022-01-20T14:39:12.000Z
launch/twist_mux_launch.py
nuclearsandwich-ros/twist_mux-release
d92dcda0255e727b899d3bac62ef3d89c19cb38e
[ "Apache-2.0" ]
27
2015-05-22T13:35:04.000Z
2021-12-29T07:26:02.000Z
launch/twist_mux_launch.py
nuclearsandwich-ros/twist_mux-release
d92dcda0255e727b899d3bac62ef3d89c19cb38e
[ "Apache-2.0" ]
51
2015-10-16T11:41:24.000Z
2022-03-28T07:33:24.000Z
#!/usr/bin/env python3 # Copyright 2020 Gaitech Korea Co., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Author: Brighten Lee import os from ament_index_python.packages import get_package_share_directory from launch import LaunchDescription from launch.actions import DeclareLaunchArgument from launch.substitutions import LaunchConfiguration from launch_ros.actions import Node def generate_launch_description(): default_config_locks = os.path.join(get_package_share_directory('twist_mux'), 'config', 'twist_mux_locks.yaml') default_config_topics = os.path.join(get_package_share_directory('twist_mux'), 'config', 'twist_mux_topics.yaml') default_config_joystick = os.path.join(get_package_share_directory('twist_mux'), 'config', 'joystick.yaml') return LaunchDescription([ DeclareLaunchArgument( 'config_locks', default_value=default_config_locks, description='Default locks config file'), DeclareLaunchArgument( 'config_topics', default_value=default_config_topics, description='Default topics config file'), DeclareLaunchArgument( 'config_joy', default_value=default_config_joystick, description='Default joystick config file'), DeclareLaunchArgument( 'cmd_vel_out', default_value='twist_mux/cmd_vel', description='cmd vel output topic'), Node( package='twist_mux', executable='twist_mux', output='screen', remappings={('/cmd_vel_out', LaunchConfiguration('cmd_vel_out'))}, parameters=[ LaunchConfiguration('config_locks'), LaunchConfiguration('config_topics'), LaunchConfiguration('config_joy')] ), Node( package='twist_mux', executable='twist_marker', output='screen', remappings={('/twist', LaunchConfiguration('cmd_vel_out'))}, parameters=[{ 'frame_id': 'base_link', 'scale': 1.0, 'vertical_position': 2.0}]) ])
38.902778
84
0.63513
0
0
0
0
0
0
0
0
1,114
0.397715
7199385be37350560f528085cc7c3bcbd212b172
5,298
py
Python
Tests/testLiveService.py
psu-capstone-teamD/ElementalAuth
d896efad5a3e4cb453c324afc456aa82f82da239
[ "MIT" ]
2
2017-08-21T00:52:35.000Z
2018-10-31T17:38:42.000Z
Tests/testLiveService.py
psu-capstone-teamD/ElementalAuth
d896efad5a3e4cb453c324afc456aa82f82da239
[ "MIT" ]
27
2017-07-27T21:10:35.000Z
2017-08-24T21:19:23.000Z
Tests/testLiveService.py
psu-capstone-teamD/ElementalAuth
d896efad5a3e4cb453c324afc456aa82f82da239
[ "MIT" ]
2
2017-07-08T00:57:08.000Z
2017-07-24T19:21:12.000Z
import sys import unittest import requests_mock from mock import patch sys.path.append('services/LiveService') from LiveService import LiveService L = LiveService() baseURL = "https://yanexx65s8e1.live.elementalclouddev.com/api" class LiveServiceTest(unittest.TestCase): '''@patch('services.LiveService.LiveService.time', return_value=1502345833) def testSetHeaders(self, mock_time): headers = L.setHeaders("/schedules") self.assertEqual(headers, {'X-Auth-Expires': '1502345863', 'X-Auth-Key': '9c9a72cd3a8feec48539f1943afbef8d', 'Content-type': 'application/xml', 'X-Auth-User': '', 'Accept': 'application/xml'})''' @requests_mock.Mocker() def testGetStatus(self, m): m.get(baseURL + "/live_events/150/status", status_code=200) resp = L.getLiveEventStatus(150) self.assertEqual(resp.status_code, 200) @requests_mock.Mocker() def testGetEvents(self, m): m.get(baseURL + "/live_events", status_code=200) m.get(baseURL + "/live_events?filter=running", status_code=200) resp = L.getLiveEvents(None) self.assertEqual(resp.status_code, 200) resp = L.getLiveEvents("running") self.assertEqual(resp.status_code, 200) @requests_mock.Mocker() def testGetEvent(self, m): m.get(baseURL + "/live_events/164", status_code=200) resp = L.getLiveEvent(164) self.assertEqual(resp.status_code, 200) @requests_mock.Mocker() def testGetSchedules(self, m): m.get(baseURL + "/schedules", status_code=200) resp = L.getSchedules() self.assertEqual(resp.status_code, 200) @requests_mock.Mocker() def testGetLiveProfiles(self, m): m.get(baseURL + "/live_event_profiles", status_code=200) resp = L.getLiveProfiles() self.assertEqual(resp.status_code, 200) @requests_mock.Mocker() def testGetLiveProfile(self, m): m.get(baseURL + "/live_event_profiles/11", status_code=200) resp = L.getLiveProfile(11) self.assertEqual(resp.status_code, 200) @requests_mock.Mocker() def testCreateLiveEvent(self, m): with open('Tests/test_XML/live_event.xml', 'r') as infile: xml = infile.read() m.post(baseURL + "/live_events", status_code=201) resp = L.createEvent(xml) self.assertEqual(resp.status_code, 201) @requests_mock.Mocker() def testCreateSchedule(self, m): with open('Tests/test_XML/schedule.xml', 'r') as infile: xml = infile.read() m.post(baseURL + "/schedules", status_code=201) resp = L.createSchedule(xml) self.assertEqual(resp.status_code, 201) @requests_mock.Mocker() def testCreateProfile(self, m): with open('Tests/test_XML/schedule.xml', 'r') as infile: xml = infile.read() m.post(baseURL + "/schedules", status_code=201) resp = L.createSchedule(xml) self.assertEqual(resp.status_code, 201) @requests_mock.Mocker() def testUpdateEvent(self, m): with open('Tests/test_XML/live_event.xml', 'r') as infile: xml = infile.read() m.put(baseURL + "/live_events/50", status_code=200) resp = L.updateLiveEvent(50, xml) self.assertEqual(resp.status_code, 200) @requests_mock.Mocker() def testUpdatePlaylist(self, m): with open('Tests/test_XML/live_event.xml', 'r') as infile: xml = infile.read() m.post(baseURL + "/live_events/92/playlist", status_code=200) resp = L.updatePlaylist(92, xml) self.assertEqual(resp.status_code, 200) @requests_mock.Mocker() def testUpdateSchedule(self, m): with open('Tests/test_XML/schedule.xml', 'r') as infile: xml = infile.read() m.put(baseURL + "/schedules/13", status_code=200) resp = L.updateSchedule(13, xml) self.assertEqual(resp.status_code, 200) @requests_mock.Mocker() def testUpdateProfile(self, m): with open('Tests/test_XML/live_profile.xml', 'r') as infile: xml = infile.read() m.put(baseURL + "/live_event_profiles/33", status_code=200) resp = L.updateProfile(33, xml) self.assertEqual(resp.status_code, 200) @requests_mock.Mocker() def testRemoveLiveEvent(self, m): m.delete(baseURL + "/live_events/191", status_code=200) resp = L.removeEvent(191) self.assertEqual(resp.status_code, 200) @requests_mock.Mocker() def testRemoveSchedule(self, m): m.delete(baseURL + "/schedules/13", status_code=200) resp = L.removeSchedule(13) self.assertEqual(resp.status_code, 200) @requests_mock.Mocker() def testRemoveProfile(self, m): m.delete(baseURL + "/live_event_profiles/33", status_code=200) resp = L.removeProfile(33) self.assertEqual(resp.status_code, 200) @requests_mock.Mocker() def testStartEvent(self, m): m.post(baseURL + "/live_events/50/start", status_code=200) resp = L.startLiveEvent(50) self.assertEqual(resp.status_code, 200) if __name__ == '__main__': unittest.main()
35.557047
85
0.634957
5,013
0.946206
0
0
4,354
0.82182
0
0
1,184
0.223481
719a07f87262fe8ff8cbef8ec2795807ff5db531
10,005
py
Python
tests/models/test_stacking.py
LionelMassoulard/aikit
98b2abaa3bf47ab46f2fd3c270010293de06dba9
[ "BSD-2-Clause" ]
null
null
null
tests/models/test_stacking.py
LionelMassoulard/aikit
98b2abaa3bf47ab46f2fd3c270010293de06dba9
[ "BSD-2-Clause" ]
null
null
null
tests/models/test_stacking.py
LionelMassoulard/aikit
98b2abaa3bf47ab46f2fd3c270010293de06dba9
[ "BSD-2-Clause" ]
null
null
null
# -*- coding: utf-8 -*- """ Created on Fri Sep 14 11:49:10 2018 @author: Lionel Massoulard """ import pytest import numpy as np import pandas as pd from sklearn.base import is_regressor, is_classifier from sklearn.exceptions import NotFittedError from sklearn.model_selection import KFold from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import RandomForestRegressor from sklearn.linear_model import LogisticRegression, Ridge from sklearn.dummy import DummyRegressor from aikit.models.stacking import OutSamplerTransformer, StackerClassifier, StackerRegressor def test_OutSamplerTransformer_classifier(): np.random.seed(123) X = np.random.randn(100, 10) y = 1 * (np.random.randn(100) > 0) model = OutSamplerTransformer(RandomForestClassifier(n_estimators=10, random_state=123)) model.fit(X, y) p1 = model.model.predict_proba(X) p2 = model.transform(X) assert not is_classifier(model) assert not is_regressor(model) assert np.abs(p1[:, 1] - p2[:, 0]).max() <= 10 ** (-10) assert p2.shape == (100, 1) assert model.get_feature_names() == ["RandomForestClassifier__1"] y = np.array(["a", "b", "c"])[np.random.randint(0, 3, 100)] model = OutSamplerTransformer(RandomForestClassifier(n_estimators=10, random_state=123)) model.fit(X, y) p1 = model.model.predict_proba(X) p2 = model.transform(X) assert p1.shape == (100, 3) assert p2.shape == (100, 3) assert np.abs(p1 - p2).max() <= 10 ** (-10) assert model.get_feature_names() == [ "RandomForestClassifier__a", "RandomForestClassifier__b", "RandomForestClassifier__c", ] def test_OutSampleTransformer_classifier_unbalanced(): np.random.seed(123) X = np.random.randn(100, 2) y = np.array(["AA"] * 33 + ["BB"] * 33 + ["CC"] * 33 + ["DD"]) model = OutSamplerTransformer(RandomForestClassifier(n_estimators=10, random_state=123)) p3 = model.fit_transform(X, y) assert (p3.max(axis=1) > 0).all() def test_OutSamplerTransformer_classifier_fit_transform(): X = np.random.randn(100, 10) y = 1 * (np.random.randn(100) > 0) cv = KFold(n_splits=10, shuffle=True, random_state=123) model = OutSamplerTransformer(LogisticRegression(C=1,random_state=123), cv=cv) model.fit(X, y) y1 = model.transform(X) model = OutSamplerTransformer(LogisticRegression(C=1,random_state=123), cv=cv) y2 = model.fit_transform(X, y) assert np.abs(y1 - y2).flatten().max() >= 0.01 # vector should be different def test_OutSamplerTransformer_regressor(): np.random.seed(123) X = np.random.randn(100, 10) y = np.random.randn(100) model = OutSamplerTransformer(RandomForestRegressor(n_estimators=10,random_state=123), cv=10) model.fit(X, y) y1 = model.model.predict(X) y2 = model.transform(X) assert not is_classifier(model) assert not is_regressor(model) assert np.abs(y1 - y2[:, 0]).max() <= 10 ** (-10) assert y2.shape == (100, 1) assert model.get_feature_names() == ["RandomForestRegressor__target"] def test_OutSamplerTransformer_regressor_fit_transform(): np.random.seed(123) X = np.random.randn(100, 10) y = np.random.randn(100) cv = KFold(n_splits=10, shuffle=True, random_state=123) model = OutSamplerTransformer(DummyRegressor(), cv=cv) model.fit(X, y) y1 = model.transform(X) model = OutSamplerTransformer(DummyRegressor(), cv=cv) y2 = model.fit_transform(X, y) assert np.abs(y1 - y2).flatten().max() >= 0.01 # vector should be different def test_approx_cross_validation_OutSamplerTransformer_regressor(): np.random.seed(123) X = np.random.randn(100, 10) y = np.random.randn(100) model = OutSamplerTransformer(RandomForestRegressor(random_state=123), cv=10) cv_res, yhat = model.approx_cross_validation(X, y, cv=10, method="transform", no_scoring=True) assert cv_res is None assert yhat.ndim == 2 assert yhat.shape == (y.shape[0], 1) with pytest.raises(NotFittedError): model.transform(X) cv = KFold(n_splits=10, shuffle=True, random_state=123) model = OutSamplerTransformer(DummyRegressor(), cv=cv) yhat1 = model.fit_transform(X, y) cv_res, yhat2 = model.approx_cross_validation(X, y, cv=cv, method="transform", no_scoring=True, return_predict=True) # Approx cross val and fit transform should return the same thing here assert np.abs((yhat1 - yhat2).flatten()).max() <= 10 ** (-5) yhat3 = np.zeros((y.shape[0], 1)) for train, test in cv.split(X, y): model = DummyRegressor() model.fit(X[train, :], y[train]) yhat3[test, 0] = model.predict(X[test, :]) assert np.abs((yhat1 - yhat3).flatten()).max() <= 10 ** (-5) assert np.abs((yhat1 - yhat2).flatten()).max() <= 10 ** (-5) def test_approx_cross_validation_OutSamplerTransformer_classifier(): np.random.seed(123) X = np.random.randn(100, 10) y = 1 * (np.random.randn(100) > 0) model = OutSamplerTransformer(RandomForestClassifier(random_state=123), cv=10) cv_res, yhat = model.approx_cross_validation(X, y, cv=10, method="transform", no_scoring=True) assert cv_res is None assert yhat.ndim == 2 assert yhat.shape == (y.shape[0], 1) with pytest.raises(NotFittedError): model.transform(X) with pytest.raises(NotFittedError): model.model.predict(X) cv = KFold(n_splits=10, shuffle=True, random_state=123) model = OutSamplerTransformer(LogisticRegression(C=1,random_state=123), cv=cv) yhat1 = model.fit_transform(X, y) model = OutSamplerTransformer(LogisticRegression(C=1,random_state=123), cv=cv) cv_res, yhat2 = model.approx_cross_validation(X, y, cv=cv, method="transform", no_scoring=True, return_predict=True) # Approx cross val and fit transform should return the same thing here assert np.abs((yhat1 - yhat2).flatten()).max() <= 10 ** (-5) yhat3 = np.zeros((y.shape[0], 1)) for train, test in cv.split(X, y): model = LogisticRegression() model.fit(X[train, :], y[train]) yhat3[test, 0] = model.predict_proba(X[test, :])[:, 1] assert np.abs((yhat1 - yhat3).flatten()).max() <= 10 ** (-5) assert np.abs((yhat1 - yhat2).flatten()).max() <= 10 ** (-5) def test_StackerRegressor(): np.random.seed(123) X = np.random.randn(100, 10) y = np.random.randn(100) stacker = StackerRegressor(models=[RandomForestRegressor(n_estimators=10,random_state=123), Ridge(random_state=123)], cv=10, blender=Ridge(random_state=123)) stacker.fit(X, y) yhat = stacker.predict(X) assert yhat.ndim == 1 assert yhat.shape[0] == X.shape[0] assert is_regressor(stacker) assert not is_classifier(stacker) with pytest.raises(AttributeError): stacker.predict_proba(X) with pytest.raises(AttributeError): stacker.classes_ def test_StackerClassifier(): np.random.seed(123) X = np.random.randn(100, 10) y = 1 * (np.random.randn(100) > 0) stacker = StackerClassifier( models=[RandomForestClassifier(random_state=123), LogisticRegression(C=1,random_state=123)], cv=10, blender=LogisticRegression(C=1,random_state=123) ) stacker.fit(X, y) yhat = stacker.predict(X) assert yhat.ndim == 1 assert yhat.shape[0] == X.shape[0] assert list(set(yhat)) == [0, 1] assert list(stacker.classes_) == [0, 1] yhat_proba = stacker.predict_proba(X) assert yhat_proba.shape == (y.shape[0], 2) assert not is_regressor(stacker) assert is_classifier(stacker) def test_approx_cross_validation_StackerRegressor(): np.random.seed(123) X = np.random.randn(100, 10) y = np.random.randn(100) stacker = StackerRegressor(models=[RandomForestRegressor(n_estimators=10,random_state=123), Ridge(random_state=123)], cv=10, blender=Ridge(random_state=123)) cv_res, yhat = stacker.approx_cross_validation( X, y, cv=10, method="predict", scoring=["neg_mean_squared_error"], return_predict=True, verbose=False ) assert cv_res is not None assert isinstance(cv_res, pd.DataFrame) assert cv_res.shape[0] == 10 assert "test_neg_mean_squared_error" in cv_res assert "train_neg_mean_squared_error" in cv_res assert yhat.ndim == 1 assert yhat.shape[0] == y.shape[0] with pytest.raises(NotFittedError): stacker.predict(X) for m in stacker.models: with pytest.raises(NotFittedError): m.predict(X) def test_approx_cross_validation_StackerClassifier(): np.random.seed(123) X = np.random.randn(100, 10) y = 1 * (np.random.randn(100) > 0) stacker = StackerClassifier( models=[RandomForestClassifier(n_estimators=10,random_state=123), LogisticRegression(C=1,random_state=123)], cv=10, blender=LogisticRegression(C=1,random_state=123) ) cv_res, yhat = stacker.approx_cross_validation( X, y, cv=10, method="predict_proba", scoring=["accuracy"], return_predict=True, verbose=False ) assert cv_res is not None assert isinstance(cv_res, pd.DataFrame) assert cv_res.shape[0] == 10 assert "test_accuracy" in cv_res assert "train_accuracy" in cv_res assert yhat.ndim == 2 assert yhat.shape == (y.shape[0], 2) with pytest.raises(NotFittedError): stacker.predict(X) for m in stacker.models: with pytest.raises(NotFittedError): m.predict(X) def _verif_all(): test_OutSamplerTransformer_classifier() test_OutSamplerTransformer_regressor() test_OutSamplerTransformer_classifier_fit_transform() test_OutSamplerTransformer_regressor_fit_transform() test_approx_cross_validation_OutSamplerTransformer_regressor() test_approx_cross_validation_OutSamplerTransformer_classifier() test_StackerClassifier() test_StackerRegressor() test_approx_cross_validation_StackerClassifier() test_approx_cross_validation_StackerRegressor()
29.254386
172
0.687856
0
0
0
0
0
0
0
0
646
0.064568
719a305b1e0f6ee4015df4fc0e1d42b61d553b49
1,611
py
Python
employee/views/check_rental.py
odrolliv13/Hex-Photos
d1b42b63394783164f843fe6343491f04fe11e0c
[ "Apache-2.0" ]
null
null
null
employee/views/check_rental.py
odrolliv13/Hex-Photos
d1b42b63394783164f843fe6343491f04fe11e0c
[ "Apache-2.0" ]
null
null
null
employee/views/check_rental.py
odrolliv13/Hex-Photos
d1b42b63394783164f843fe6343491f04fe11e0c
[ "Apache-2.0" ]
null
null
null
from django import forms from django.conf import settings from django.http import HttpResponse, HttpResponseRedirect, Http404 from manager import models as pmod from . import templater from django.conf import settings import decimal, datetime # This view will display all users and then on a new page display all the current rentals for a given user def process_request(request): if not request.user.is_authenticated(): return HttpResponseRedirect('/shop') if request.user.is_staff == False: return HttpResponseRedirect('/shop') if request.urlparams[0] == "": #This form will display all users form = CheckRentalForm(initial ={ 'user': "", }) if request.method == 'POST': form = CheckRentalForm(request.POST) if form.is_valid(): #From here the page will redirect to show all the current rentals for the user picked complete = "/employee/customer_rentals/" + str(form.cleaned_data['user'].id) return HttpResponseRedirect(complete) tvars = { 'form': form, } return templater.render_to_response(request, 'return_rental.html', tvars) else: try: complete_rental = pmod.Rental.objects.get(id=request.urlparams[0]) form = CheckRentalForm(initial ={ 'user': "", }) except: pass form = "dfd" tvars = { 'form': form, } return templater.render_to_response(request, 'return_rental.html', tvars) class CheckRentalForm(forms.Form): user = forms.ModelChoiceField(queryset=pmod.User.objects.exclude(is_active=False), label="User", widget=forms.Select(attrs={'class':'form-control'}))
30.980769
150
0.703911
186
0.115456
0
0
0
0
0
0
384
0.238361
719bca03a01e24f7c868ad83a281e40679838ca7
1,521
py
Python
jupyter/settings.py
nguyenngtt/GSE---TEAM-A
4f78c1ace051d4f2ff30a039aa481aa9b79d3242
[ "MIT" ]
3
2021-11-21T08:47:18.000Z
2021-11-28T10:35:10.000Z
jupyter/settings.py
nguyenngtt/GSE---TEAM-A
4f78c1ace051d4f2ff30a039aa481aa9b79d3242
[ "MIT" ]
6
2021-11-29T02:00:49.000Z
2022-02-08T09:21:38.000Z
jupyter/settings.py
nguyenngtt/GSE---TEAM-A
4f78c1ace051d4f2ff30a039aa481aa9b79d3242
[ "MIT" ]
3
2021-12-11T08:11:08.000Z
2022-01-10T12:51:48.000Z
import pandas as pd import numpy as np import os import logging # suppress warnings import warnings; warnings.filterwarnings('ignore'); from tqdm.autonotebook import tqdm # register `pandas.progress_apply` and `pandas.Series.map_apply` with `tqdm` tqdm.pandas() # https://pandas.pydata.org/pandas-docs/stable/user_guide/options.html#available-options # adjust pandas display pd.options.display.max_columns = 30 # default 20 pd.options.display.max_rows = 200 # default 60 pd.options.display.float_format = '{:.2f}'.format # pd.options.display.precision = 2 pd.options.display.max_colwidth = 200 # default 50; None = all # Number of array items in summary at beginning and end of each dimension # np.set_printoptions(edgeitems=3) # default 3 np.set_printoptions(suppress=True) # no scientific notation for small numbers # IPython (Jupyter) setting: # Print out every value instead of just "last_expr" (default) from IPython.core.interactiveshell import InteractiveShell InteractiveShell.ast_node_interactivity = "all" import matplotlib as mpl from matplotlib import pyplot as plt # defaults: mpl.rcParamsDefault rc_params = {'figure.figsize': (8, 4), 'axes.labelsize': 'large', 'axes.titlesize': 'large', 'xtick.labelsize': 'large', 'ytick.labelsize': 'large', 'savefig.dpi': 100, 'figure.dpi': 100 } # adjust matplotlib defaults mpl.rcParams.update(rc_params) import seaborn as sns sns.set_style("darkgrid") # sns.set()
30.42
88
0.724523
0
0
0
0
0
0
0
0
775
0.509533
719d88c236122420bab454b120302ded66f22838
828
py
Python
var/spack/repos/builtin/packages/py-cyvcf2/package.py
jeanbez/spack
f4e51ce8f366c85bf5aa0eafe078677b42dae1ba
[ "ECL-2.0", "Apache-2.0", "MIT-0", "MIT" ]
null
null
null
var/spack/repos/builtin/packages/py-cyvcf2/package.py
jeanbez/spack
f4e51ce8f366c85bf5aa0eafe078677b42dae1ba
[ "ECL-2.0", "Apache-2.0", "MIT-0", "MIT" ]
8
2021-11-09T20:28:40.000Z
2022-03-15T03:26:33.000Z
var/spack/repos/builtin/packages/py-cyvcf2/package.py
jeanbez/spack
f4e51ce8f366c85bf5aa0eafe078677b42dae1ba
[ "ECL-2.0", "Apache-2.0", "MIT-0", "MIT" ]
2
2019-02-08T20:37:20.000Z
2019-03-31T15:19:26.000Z
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack.package import * class PyCyvcf2(PythonPackage): """fast vcf parsing with cython + htslib""" homepage = "https://github.com/brentp/cyvcf2" pypi = "cyvcf2/cyvcf2-0.11.7.tar.gz" version('0.11.7', sha256='a4b6229b89a0a1043684c65cbdd702c366a8800dc3591fb44c4b5a08640cbeec') depends_on('[email protected]:', type=('build', 'run')) depends_on('py-setuptools', type='build') depends_on('[email protected]:', type='build') depends_on('py-numpy', type=('build', 'run')) depends_on('py-coloredlogs', type=('build', 'run')) depends_on('py-click', type=('build', 'run')) depends_on('curl')
31.846154
96
0.689614
599
0.72343
0
0
0
0
0
0
524
0.63285
719e5a0939a4c90bfd66956e7385e51aac9d612e
340
py
Python
pset_functions/db_search/p1.py
mottaquikarim/pydev-psets
9749e0d216ee0a5c586d0d3013ef481cc21dee27
[ "MIT" ]
5
2019-04-08T20:05:37.000Z
2019-12-04T20:48:45.000Z
pset_functions/db_search/p1.py
mottaquikarim/pydev-psets
9749e0d216ee0a5c586d0d3013ef481cc21dee27
[ "MIT" ]
8
2019-04-15T15:16:05.000Z
2022-02-12T10:33:32.000Z
pset_functions/db_search/p1.py
mottaquikarim/pydev-psets
9749e0d216ee0a5c586d0d3013ef481cc21dee27
[ "MIT" ]
2
2019-04-10T00:14:42.000Z
2020-02-26T20:35:21.000Z
""" GPA Calculator """ # Write a function called "simple_gpa" to find GPA when student enters a letter grade as a string. Assign the result to a variable called "gpa". """ Use these conversions: A+ --> 4.0 A --> 4.0 A- --> 3.7 B+ --> 3.3 B --> 3.0 B- --> 2.7 C+ --> 2.3 C --> 2.0 C- --> 1.7 D+ --> 1.3 D --> 1.0 D- --> 0.7 F --> 0.0 """
14.166667
144
0.538235
0
0
0
0
0
0
0
0
334
0.982353
719e7932fde71fc017391588fcca49763cf61208
5,283
py
Python
test_soundcard.py
flying-sheep/SoundCard
b476c8142b460fc8161d374b282fe846d72a0780
[ "BSD-3-Clause" ]
1
2020-01-27T00:59:12.000Z
2020-01-27T00:59:12.000Z
test_soundcard.py
flying-sheep/SoundCard
b476c8142b460fc8161d374b282fe846d72a0780
[ "BSD-3-Clause" ]
null
null
null
test_soundcard.py
flying-sheep/SoundCard
b476c8142b460fc8161d374b282fe846d72a0780
[ "BSD-3-Clause" ]
null
null
null
import sys import soundcard import numpy import pytest ones = numpy.ones(1024) signal = numpy.concatenate([[ones], [-ones]]).T def test_speakers(): for speaker in soundcard.all_speakers(): assert isinstance(speaker.name, str) assert hasattr(speaker, 'id') assert isinstance(speaker.channels, int) assert speaker.channels > 0 def test_microphones(): for microphone in soundcard.all_microphones(): assert isinstance(microphone.name, str) assert hasattr(microphone, 'id') assert isinstance(microphone.channels, int) assert microphone.channels > 0 def test_default_playback(): soundcard.default_speaker().play(signal, 44100, channels=2) def test_default_record(): recording = soundcard.default_microphone().record(1024, 44100) assert len(recording == 1024) def test_default_blockless_record(): recording = soundcard.default_microphone().record(None, 44100) @pytest.fixture def loopback_speaker(): import sys if sys.platform == 'win32': # must install https://www.vb-audio.com/Cable/index.htm return soundcard.get_speaker('Cable') elif sys.platform == 'darwin': # must install soundflower return soundcard.get_speaker('Soundflower64') elif sys.platform == 'linux': # pacmd load-module module-null-sink channels=6 rate=48000 return soundcard.get_speaker('Null') else: raise RuntimeError('Unknown platform {}'.format(sys.platform)) @pytest.fixture def loopback_player(loopback_speaker): with loopback_speaker.player(48000, channels=2, blocksize=512) as player: yield player @pytest.fixture def loopback_microphone(): if sys.platform == 'win32': # must install https://www.vb-audio.com/Cable/index.htm return soundcard.get_microphone('Cable') elif sys.platform == 'darwin': # must install soundflower return soundcard.get_microphone('Soundflower64') elif sys.platform == 'linux': return soundcard.get_microphone('Null', include_loopback=True) else: raise RuntimeError('Unknown platform {}'.format(sys.platform)) @pytest.fixture def loopback_recorder(loopback_microphone): with loopback_microphone.recorder(48000, channels=2, blocksize=512) as recorder: yield recorder def test_loopback_playback(loopback_player, loopback_recorder): loopback_player.play(signal) recording = loopback_recorder.record(1024*10) assert recording.shape[1] == 2 left, right = recording.T assert left.mean() > 0 assert right.mean() < 0 assert (left > 0.5).sum() == len(signal) assert (right < -0.5).sum() == len(signal) def test_loopback_reverse_recorder_channelmap(loopback_player, loopback_microphone): with loopback_microphone.recorder(48000, channels=[1, 0], blocksize=512) as loopback_recorder: loopback_player.play(signal) recording = loopback_recorder.record(1024*12) assert recording.shape[1] == 2 left, right = recording.T assert right.mean() > 0 assert left.mean() < 0 assert (right > 0.5).sum() == len(signal) assert (left < -0.5).sum() == len(signal) def test_loopback_reverse_player_channelmap(loopback_speaker, loopback_recorder): with loopback_speaker.player(48000, channels=[1, 0], blocksize=512) as loopback_player: loopback_player.play(signal) recording = loopback_recorder.record(1024*12) assert recording.shape[1] == 2 left, right = recording.T assert right.mean() > 0 assert left.mean() < 0 assert (right > 0.5).sum() == len(signal) assert (left < -0.5).sum() == len(signal) def test_loopback_mono_player_channelmap(loopback_speaker, loopback_recorder): with loopback_speaker.player(48000, channels=[0], blocksize=512) as loopback_player: loopback_player.play(signal[:,0]) recording = loopback_recorder.record(1024*12) assert recording.shape[1] == 2 left, right = recording.T assert left.mean() > 0 if sys.platform == 'linux': # unmapped channels on linux are filled with the mean of other channels assert right.mean() < left.mean() else: assert abs(right.mean()) < 0.01 # something like zero assert (left > 0.5).sum() == len(signal) def test_loopback_mono_recorder_channelmap(loopback_player, loopback_microphone): with loopback_microphone.recorder(48000, channels=[0], blocksize=512) as loopback_recorder: loopback_player.play(signal) recording = loopback_recorder.record(1024*12) assert len(recording.shape) == 1 or recording.shape[1] == 1 assert recording.mean() > 0 assert (recording > 0.5).sum() == len(signal) def test_loopback_multichannel_channelmap(loopback_speaker, loopback_microphone): with loopback_speaker.player(48000, channels=[2, 0], blocksize=512) as loopback_player: with loopback_microphone.recorder(48000, channels=[2, 0], blocksize=512) as loopback_recorder: loopback_player.play(signal) recording = loopback_recorder.record(1024*12) assert len(recording.shape) == 2 left, right = recording.T assert left.mean() > 0 assert right.mean() < 0 assert (left > 0.5).sum() == len(signal) assert (right < -0.5).sum() == len(signal)
38.845588
102
0.696952
0
0
288
0.054514
1,367
0.258754
0
0
469
0.088775
719fb32d418ed1529b6d751555ff2385cebf2266
623
py
Python
Last 3 digits of 11^x.py
jaiveergill/Last-Three-Digits-of-11-x
def4519b9b46e41b4c4f2b3a5dbe5566316dd83e
[ "MIT" ]
null
null
null
Last 3 digits of 11^x.py
jaiveergill/Last-Three-Digits-of-11-x
def4519b9b46e41b4c4f2b3a5dbe5566316dd83e
[ "MIT" ]
null
null
null
Last 3 digits of 11^x.py
jaiveergill/Last-Three-Digits-of-11-x
def4519b9b46e41b4c4f2b3a5dbe5566316dd83e
[ "MIT" ]
null
null
null
# This is a simple program to find the last three digits of 11 raised to any given number. # The main algorithm that does the work is on line 10 def trim_num(num): if len(str(num)) > 3: # no need to trim if the number is 3 or less digits long return str(num)[(len(str(num)) - 3):] # trims the number return num def main(exp): init_val = str((((exp-1) * (exp))/2) % 10 + (exp % 100) / 10) + str(exp % 10) + "1" # The main algorithm which needs to be cleaned (only the last three digits should be shown) return "{}".format(trim_num(init_val)) # To use it, simply copy the code and run the function
44.5
179
0.662921
0
0
0
0
0
0
0
0
369
0.592295
719fd87192b7b49949a8b70a475fd96677b03575
6,137
py
Python
osr_odometry/scripts/osr_odom_ackerman2.py
ljb2208/osr-rover-code
f4791d835cd760446777a226d37bb3114256affd
[ "Apache-2.0" ]
null
null
null
osr_odometry/scripts/osr_odom_ackerman2.py
ljb2208/osr-rover-code
f4791d835cd760446777a226d37bb3114256affd
[ "Apache-2.0" ]
null
null
null
osr_odometry/scripts/osr_odom_ackerman2.py
ljb2208/osr-rover-code
f4791d835cd760446777a226d37bb3114256affd
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python import time from osr_msgs.msg import Joystick, Commands, Encoder, RunStop from nav_msgs.msg import Odometry from geometry_msgs.msg import Point, Pose, Quaternion, Twist, Vector3 import rospy import tf import math import numpy class Odometry2(): def __init__(self, baseFrame, wheelTrack, mpt, d4, maxTickPerSec, pubTF=False): self.encValid = False self.priorTime = rospy.Time.now() self.priorEncs = [0,0,0,0,0,0] self.mpt = mpt self.pubTF = pubTF # distance between wheels self.wheelTrack = wheelTrack self.d4 = d4 self.baseFrame = baseFrame self.maxTickPerSec = maxTickPerSec self.x = 0.0 self.y = 0.0 self.th = 0.0 self.odomPub = rospy.Publisher("/odom", Odometry, queue_size = 1) if self.pubTF: self.odomBroadcaster = tf.TransformBroadcaster() self.twistCovar = numpy.diag([0.001, 0.001, 0.001, 0.1, 0.1, 0.1]).ravel() self.poseCovar = numpy.diag([0.001, 0.001, 0.001, 0.1, 0.1, 0.1]).ravel() def onEncoderMessage(self, message): self.calculateOdometry(message) def isValid(self, message): dencLeft = abs(message.rel_enc[1] - self.priorEncs[1]) dencRight = abs(message.rel_enc[4] - self.priorEncs[4]) dt = self.getElapsedTime(message.header.stamp) if (dencLeft/dt) > self.maxTickPerSec: rospy.logwarn("Invalid relative encoder value on left wheel. No odom calculated") return False if (dencRight/dt) > self.maxTickPerSec: rospy.logwarn("Invalid relative encoder value on right wheel. No odom calculated") return False return True def publishTransform(self, x, y, quaternion, timestamp): self.odomBroadcaster.sendTransform( (x, y, 0), (quaternion.x, quaternion.y, quaternion.z, quaternion.w), timestamp, self.baseFrame, "odom") def publishOdomMessage(self, x, y, vx, vy, vth, quaternion, timestamp): odom = Odometry() odom.header.frame_id = "odom" odom.child_frame_id = self.baseFrame odom.header.stamp = timestamp odom.pose.pose.position.x = x odom.pose.pose.position.y = y odom.pose.pose.position.z = 0 odom.pose.covariance = self.poseCovar odom.pose.pose.orientation = quaternion odom.twist.twist.linear.x = vx odom.twist.twist.linear.y = vy odom.twist.twist.linear.z = 0 odom.twist.twist.angular.z = vth odom.twist.covariance = self.twistCovar self.odomPub.publish(odom) def getElapsedTime(self, timestamp, save=False): dt = (timestamp - self.priorTime).to_sec() if save: self.priorTime = timestamp return dt def calculateTurnRadius(self, dLeft, dRight): dlr = dLeft - dRight # calculate radius of turn if dlr != 0 and dLeft != 0 and dRight != 0: lv = self.d4 + dLeft / dRight * self.d4 # print ("lv: " + str(lv)) r = lv / (1 - (dLeft / dRight)) else: r = 0 dist = (dLeft + dRight) / 2 # calculate angle change if (r != 0): dTheta = dist / -r else: dTheta = 0 return r, dTheta def calculateOdometry(self, message): currentTime = message.header.stamp encs = message.rel_enc if not self.isValid(message): return dt = self.getElapsedTime(currentTime, save=True) dLeft = self.mpt * (encs[1] - self.priorEncs[1]) dRight = self.mpt * (encs[4] - self.priorEncs[4]) # dth = (dRight - dLeft) / self.wheelTrack radius, dTheta = self.calculateTurnRadius(dLeft, dRight) # calculate centre of turn circle xOrig = self.x + radius * math.cos(self.th) yOrig = self.y + radius * math.sin(self.th) # calculate new co-ordinates xNew = xOrig + (self.x - xOrig) * math.cos(dTheta) - (self.y - yOrig) * math.sin(dTheta) yNew = yOrig + (self.x - xOrig) * math.sin(dTheta) + (self.y - yOrig) * math.cos(dTheta) #calculate change in x,y values dx = xNew - self.x dy = yNew - self.y self.th += dTheta if (self.th > (math.pi * 2)): self.th -= (math.pi * 2) elif (self.th < (-math.pi * 2)): self.th += (math.pi * 2) self.x = xNew self.y = yNew # convert to ros co-ords xRos = self.y yRos = -self.x vxRos = dy / dt vyRos = -dx / dt vth = dTheta /dt quaternion = self.getQuaternion(self.th) if self.pubTF: self.publishTransform(xRos, yRos, quaternion, currentTime) self.publishOdomMessage(xRos, yRos, vxRos, vyRos, vth, quaternion, currentTime) self.priorEncs = encs def getQuaternion(self, th): quaternion = Quaternion() quaternion.x = 0.0 quaternion.y = 0.0 quaternion.z = math.sin(th / 2.0) quaternion.w = math.cos(th / 2.0) return quaternion if __name__ == '__main__': rospy.init_node('osr_odometry2') rospy.loginfo("Starting the osr odometry2 node") baseFrame = rospy.get_param("/odometry/base_frame_id", "base_link") # mpt = rospy.get_param("/odometry/mpt", 0.000026322) mpt = rospy.get_param("/odometry/mpt", 0.000100708) wheelTrack = rospy.get_param("/odometry/wheel_track", 0.455) d4 = rospy.get_param("/odometry/d4", 0.2559) maxTickPerSec = rospy.get_param("/odometry/maxTickPerSec", 8000) publishTF = rospy.get_param("~publishTF", False) odom = Odometry2(baseFrame, wheelTrack, mpt, d4, maxTickPerSec, pubTF=publishTF) encSub = rospy.Subscriber("/encoder", Encoder, odom.onEncoderMessage) rate = rospy.Rate(20) while not rospy.is_shutdown(): rate.sleep()
30.532338
96
0.579599
5,064
0.825159
0
0
0
0
0
0
686
0.111781
71a0b40b2d964c1cdacc2a99529ad40612493ff0
4,199
py
Python
src/simulation-conditioning/utilities/data-generation-scripts/Wavefield_Marmousi_pml_401x301_1000-1287_120-232_4k_20kp100_A_train.py
alisiahkoohi/importance-of-transfer-learning
bb4c7943f4ff64a2f1785503328b4cbb4f5111aa
[ "MIT" ]
null
null
null
src/simulation-conditioning/utilities/data-generation-scripts/Wavefield_Marmousi_pml_401x301_1000-1287_120-232_4k_20kp100_A_train.py
alisiahkoohi/importance-of-transfer-learning
bb4c7943f4ff64a2f1785503328b4cbb4f5111aa
[ "MIT" ]
4
2020-09-25T22:32:41.000Z
2022-02-09T23:36:02.000Z
src/simulation-conditioning/utilities/data-generation-scripts/Wavefield_Marmousi_pml_401x301_1000-1287_120-232_4k_20kp100_A_train.py
slimgroup/Software.siahkoohi2019itl
bb4c7943f4ff64a2f1785503328b4cbb4f5111aa
[ "MIT" ]
null
null
null
import numpy as np import h5py import os from devito.logger import info from devito import TimeFunction, clear_cache from examples.seismic.acoustic import AcousticWaveSolver from examples.seismic import Model, RickerSource, Receiver, TimeAxis from math import floor from scipy.interpolate import griddata import argparse parser = argparse.ArgumentParser(description='') parser.add_argument('--data_path', dest='data_path', type=str, default='/home/ec2-user/data', help='raw data path') parser.add_argument('--save_dir', dest='save_dir', type=str, default='/home/ec2-user/data', help='saving directory') args = parser.parse_args() data_path = args.data_path save_dir = args.save_dir origin = (0., 0.) spacing=(7.5, 7.5) tn=1100. nbpml=40 # Define your vp in km/sec (x, z) vp = np.fromfile(os.path.join(data_path, 'vp_marmousi_bi'), dtype='float32', sep="") vp = np.reshape(vp, (1601, 401)) # vp = vp[400:1401, 0:401] shape=[401, 301] values = np.zeros([vp.shape[0]*vp.shape[1], ]) points = np.zeros([vp.shape[0]*vp.shape[1], 2]) k = 0 for indx in range(0, vp.shape[0]): for indy in range(0, vp.shape[1]): values[k] = vp[indx, indy] points[k, 0] = indx points[k, 1] = indy k = k + 1 # nx, ny = shape[0], shape[1] X, Y = np.meshgrid(np.array(np.linspace(1000, 1287, shape[0])), np.array(np.linspace(120, 232, shape[1]))) int_vp = griddata(points, values, (X, Y), method='cubic') int_vp = np.transpose(int_vp) vp = int_vp # create model model = Model(origin, spacing, shape, 2, vp, nbpml=nbpml) # Derive timestepping from model spacing dt = model.critical_dt t0 = 0.0 nt = int(1 + (tn-t0) / dt) # Number of timesteps time = np.linspace(t0, tn, nt) # Discretized time axis datasize0 = int(np.shape(range(0, shape[0], 4))[0]) datasize1 = int(np.shape(range(100, nt, 20))[0]) datasize = datasize0*datasize1 strTrainA = os.path.join(save_dir, 'Wavefield_Marmousi_pml_401x301_1000-1287_120-232_4k_20kp100_A_train.hdf5') strTrainB = os.path.join(save_dir, 'Wavefield_Marmousi_pml_401x301_1000-1287_120-232_4k_20kp100_B_train.hdf5') dataset_train = "train_dataset" file_trainA = h5py.File(strTrainA, 'w-') datasetA = file_trainA.create_dataset(dataset_train, (datasize, shape[0]+2*nbpml, shape[1]+2*nbpml)) file_trainB = h5py.File(strTrainB, 'w-') datasetB = file_trainB.create_dataset(dataset_train, (datasize, shape[0]+2*nbpml, shape[1]+2*nbpml)) num_rec = 601 rec_samp = np.linspace(0., model.domain_size[0], num=num_rec); rec_samp = rec_samp[1]-rec_samp[0] time_range = TimeAxis(start=t0, stop=tn, step=dt) src = RickerSource(name='src', grid=model.grid, f0=0.025, time_range=time_range, space_order=1, npoint=1) src.coordinates.data[0, :] = np.array([1*spacing[0], 2*spacing[1]]).astype(np.float32) rec = Receiver(name='rec', grid=model.grid, time_range=time_range, npoint=num_rec) rec.coordinates.data[:, 0] = np.linspace(0., model.domain_size[0], num=num_rec) rec.coordinates.data[:, 1:] = src.coordinates.data[0, 1:] solverbad = AcousticWaveSolver(model, source=src, receiver=rec, kernel='OT2', isic=True, space_order=2, freesurface=False) solvergood = AcousticWaveSolver(model, source=src, receiver=rec, kernel='OT2', isic=True, space_order=20, freesurface=False) ulocgood = TimeFunction(name="u", grid=model.grid, time_order=2, space_order=20, save=nt) ulocbad = TimeFunction(name="u", grid=model.grid, time_order=2, space_order=2, save=nt) kk = 0 for xsrc in range(0, shape[0], 4): clear_cache() ulocgood.data.fill(0.) ulocbad.data.fill(0.) src.coordinates.data[0, :] = np.array([xsrc*spacing[0], 2*spacing[1]]).astype(np.float32) rec.coordinates.data[:, 0] = np.linspace(0., model.domain_size[0], num=num_rec) rec.coordinates.data[:, 1:] = src.coordinates.data[0, 1:] _, ulocgood, _ = solvergood.forward(m=model.m, src=src, time=nt-1, save=True) _, ulocbad, _ = solverbad.forward(m=model.m, src=src, time=nt-1, save=True) datasetA[kk:(kk+datasize1), :, :] = np.array(ulocgood.data[range(100, nt, 20), :, :]) datasetB[kk:(kk+datasize1), :, :] = np.array(ulocbad.data[range(100, nt, 20), :, :]) kk = kk + datasize1 file_trainA.close() file_trainB.close()
34.702479
116
0.700881
0
0
0
0
0
0
0
0
540
0.128602
71a155a137fa83ef0306a441e11bd003d9b6a750
154
py
Python
facto.py
divine-coder/CODECHEF-PYTHON
a1e34d6f9f75cf7b9497f1ef2f937cb4f64f1543
[ "MIT" ]
null
null
null
facto.py
divine-coder/CODECHEF-PYTHON
a1e34d6f9f75cf7b9497f1ef2f937cb4f64f1543
[ "MIT" ]
4
2020-10-04T07:49:30.000Z
2021-10-02T05:24:40.000Z
facto.py
divine-coder/CODECHEF-PYTHON
a1e34d6f9f75cf7b9497f1ef2f937cb4f64f1543
[ "MIT" ]
7
2020-10-04T07:46:55.000Z
2021-11-05T14:30:00.000Z
import math if __name__=='__main__': n=(int)(input()) for abc in range(n): t=(int)(input()) print math.factorial(t)
17.111111
31
0.512987
0
0
0
0
0
0
0
0
10
0.064935
71a1f9b966a655c142f90e8f1814eebae105ba9e
373
py
Python
setup.py
johnmartingodo/pyKinematicsKineticsToolbox
4ffc99885f3c637b8c33914a4e50ccb4595fc844
[ "MIT" ]
null
null
null
setup.py
johnmartingodo/pyKinematicsKineticsToolbox
4ffc99885f3c637b8c33914a4e50ccb4595fc844
[ "MIT" ]
null
null
null
setup.py
johnmartingodo/pyKinematicsKineticsToolbox
4ffc99885f3c637b8c33914a4e50ccb4595fc844
[ "MIT" ]
null
null
null
from setuptools import setup setup(name="pykinematicskineticstoolbox", version="0.0", description="Installable python package which collects useful kinematics and kinetics functions", author="John Martin K. Godø", author_email="[email protected]", license="MIT", packages=["pykinematicskineticstoolbox"], install_requires=["numpy"], )
31.083333
100
0.753351
0
0
0
0
0
0
0
0
216
0.57754
71a33e281903173f09972e5b14ecf88c5dd711ba
1,251
py
Python
summary/summary_avail.py
bit0fun/plugins
1f6f701bf1e60882b8fa61cb735e7033c8c29e3c
[ "BSD-3-Clause" ]
173
2019-01-17T12:40:47.000Z
2022-03-27T12:14:00.000Z
summary/summary_avail.py
bit0fun/plugins
1f6f701bf1e60882b8fa61cb735e7033c8c29e3c
[ "BSD-3-Clause" ]
284
2019-03-01T17:54:14.000Z
2022-03-29T13:27:51.000Z
summary/summary_avail.py
bit0fun/plugins
1f6f701bf1e60882b8fa61cb735e7033c8c29e3c
[ "BSD-3-Clause" ]
92
2019-02-26T03:45:40.000Z
2022-03-28T03:23:50.000Z
from datetime import datetime # ensure an rpc peer is added def addpeer(p, rpcpeer): pid = rpcpeer['id'] if pid not in p.persist['peerstate']: p.persist['peerstate'][pid] = { 'connected': rpcpeer['connected'], 'last_seen': datetime.now() if rpcpeer['connected'] else None, 'avail': 1.0 if rpcpeer['connected'] else 0.0 } # exponetially smooth online/offline states of peers def trace_availability(p, rpcpeers): p.persist['availcount'] += 1 leadwin = max(min(p.avail_window, p.persist['availcount'] * p.avail_interval), p.avail_interval) samples = leadwin / p.avail_interval alpha = 1.0 / samples beta = 1.0 - alpha for rpcpeer in rpcpeers['peers']: pid = rpcpeer['id'] addpeer(p, rpcpeer) if rpcpeer['connected']: p.persist['peerstate'][pid]['last_seen'] = datetime.now() p.persist['peerstate'][pid]['connected'] = True p.persist['peerstate'][pid]['avail'] = 1.0 * alpha + p.persist['peerstate'][pid]['avail'] * beta else: p.persist['peerstate'][pid]['connected'] = False p.persist['peerstate'][pid]['avail'] = 0.0 * alpha + p.persist['peerstate'][pid]['avail'] * beta
36.794118
108
0.60032
0
0
0
0
0
0
0
0
353
0.282174
71a38554040095f344a4dbd4dbed0540a3d29b06
505
py
Python
terrascript/dns/r.py
hugovk/python-terrascript
08fe185904a70246822f5cfbdc9e64e9769ec494
[ "BSD-2-Clause" ]
4
2022-02-07T21:08:14.000Z
2022-03-03T04:41:28.000Z
terrascript/dns/r.py
hugovk/python-terrascript
08fe185904a70246822f5cfbdc9e64e9769ec494
[ "BSD-2-Clause" ]
null
null
null
terrascript/dns/r.py
hugovk/python-terrascript
08fe185904a70246822f5cfbdc9e64e9769ec494
[ "BSD-2-Clause" ]
2
2022-02-06T01:49:42.000Z
2022-02-08T14:15:00.000Z
# terrascript/dns/r.py import terrascript class dns_a_record_set(terrascript.Resource): pass class dns_aaaa_record_set(terrascript.Resource): pass class dns_cname_record(terrascript.Resource): pass class dns_mx_record_set(terrascript.Resource): pass class dns_ns_record_set(terrascript.Resource): pass class dns_ptr_record(terrascript.Resource): pass class dns_srv_record_set(terrascript.Resource): pass class dns_txt_record_set(terrascript.Resource): pass
14.428571
48
0.778218
439
0.869307
0
0
0
0
0
0
22
0.043564
71a3ec3949c4d0b824f364cf880c163e7d4093ec
749
py
Python
JumpscaleCore/clients/tcprouter/TCPRouterFactory.py
gneumann333/jumpscaleX_core
777d249fa3668c6e802c2f765f4b82fb39c3e5fa
[ "Apache-2.0" ]
1
2020-06-21T11:18:52.000Z
2020-06-21T11:18:52.000Z
JumpscaleCore/clients/tcprouter/TCPRouterFactory.py
gneumann333/jumpscaleX_core
777d249fa3668c6e802c2f765f4b82fb39c3e5fa
[ "Apache-2.0" ]
644
2019-08-25T10:19:56.000Z
2020-12-23T09:41:04.000Z
JumpscaleCore/clients/tcprouter/TCPRouterFactory.py
gneumann333/jumpscaleX_core
777d249fa3668c6e802c2f765f4b82fb39c3e5fa
[ "Apache-2.0" ]
11
2019-08-29T21:38:50.000Z
2020-06-21T11:18:55.000Z
from Jumpscale import j from .TCPRouterClient import TCPRouterClient JSConfigs = j.baseclasses.object_config_collection class TCPRouterFactory(JSConfigs): __jslocation__ = "j.clients.tcp_router" _CHILDCLASS = TCPRouterClient def test(self): """ kosmos 'j.clients.tcp_router.test()' """ # get a client instance (TO CHECK: secret is already assigned to backend) cl = self.get( "test_instance", local_ip="0.0.0.0", local_port=18000, remote_url="127.0.0.1", remote_port=6379, secret="test", ) # connect to backend cl.connect() # stop connection cl.stop() print("TEST OK")
22.029412
81
0.580774
625
0.834446
0
0
0
0
0
0
242
0.323097
71a54794818c1c14503bf2853a8ad157b14a963f
8,837
py
Python
nmrglue/fileio/spinsolve.py
miguelarbesu/nmrglue
6ca36de7af1a2cf109f40bf5afe9c1ce73c9dcdc
[ "BSD-3-Clause" ]
null
null
null
nmrglue/fileio/spinsolve.py
miguelarbesu/nmrglue
6ca36de7af1a2cf109f40bf5afe9c1ce73c9dcdc
[ "BSD-3-Clause" ]
null
null
null
nmrglue/fileio/spinsolve.py
miguelarbesu/nmrglue
6ca36de7af1a2cf109f40bf5afe9c1ce73c9dcdc
[ "BSD-3-Clause" ]
null
null
null
""" Functions for reading Magritek Spinsolve binary (dx/1d) files and parameter (acqu.par/proc.par) files. """ import os from warnings import warn import numpy as np from . import fileiobase from . import jcampdx __developer_info__ = """ Spinsolve is the software used on the Magritek benchtop NMR devices. A spectrum is saved in a folder with several files. The spectral data is stored in these files: 'data.1d' (FID), 'spectrum.1d' (Fourier transformed) and 'spectrum_processed.1d' (FT + processed by spinsolve) Optional spectral data (System->Prefs->Setup->Global data storage): 'nmr_fid.dx' (FID stored in `JCAMP-DX standard <http://www.jcamp-dx.org/>`), 'spectrum.csv' and 'spectrum_processed.csv' (FT + processed by Spinsovle with ppm for each point and intensity delimited by ';') Other files: 'acqu.par' - all parameters that are used for acquisition 'Protocol.par' - text file used to reload data back into the Spinsolve software 'processing.script' - text file to transfer Spinsolve software protocol settings into MNOVA The Spinsolve Expert software has a slightly different output: [Needs to be double checked as I do not have access to this software -LCageman] - Output into JCAMP-DX is not possible - 'spectrum_processed.1d' is not generated - (new) 'fid.1d' - seems to be the same as 'data.1d' - (new) 'proc.par' - contains processing parameters in the same style as 'acqu.par' - (new) .pt1 files - seem to be plot files specific for the expert software, cannot be read by NMRglue """ def read(dir='.', specfile=None, acqupar="acqu.par", procpar="proc.par"): """ Reads spinsolve files from a directory When no spectrum filename is given (specfile), the following list is tried, in that specific order ["nmr_fid.dx", "data.1d", "fid.1d", "spectrum.1d", "spectrum_processed.1d"] To use the resolution enhanced spectrum use the './Enhanced' folder as input. Note that spectrum.1d and spectrum_processed.1d contain only data in the frequency domain, so no Fourier transformation is needed. Also, use dic["spectrum"]["xaxis"] to plot the x-axis Parameters ---------- dir : str Directory to read from specfile : str, optional Filename to import spectral data from. None uses standard filename from: ["nmr_fid.dx", "data.1d", "fid.1d", "spectrum.1d", "spectrum_processed.1d"] acqupar : str, optional Filename for acquisition parameters. None uses standard name. procpar : str, optional Filename for processing parameters. None uses standard name. Returns ------- dic : dict All parameters that can be present in the data folder: dic["spectrum"] - First bytes of spectrum(_processed).1d dic["acqu"] - Parameters present in acqu.par dic["proc"] - Parameters present in proc.par dic["dx"] - - Parameters present in the header of nmr_fid.dx data : ndarray Array of NMR data """ if os.path.isdir(dir) is not True: raise IOError("directory %s does not exist" % (dir)) # Create empty dic dic = {"spectrum": {}, "acqu": {}, "proc":{}, "dx":{}} # Read in acqu.par and write to dic acqupar = os.path.join(dir, acqupar) if os.path.isfile(acqupar): with open(acqupar, "r") as f: info = f.readlines() for line in info: line = line.replace("\n", "") k, v = line.split("=") dic["acqu"][k.strip()] = v.strip() # Read in proc.par and write to dic procpar = os.path.join(dir,procpar) if os.path.isfile(procpar): with open(procpar, "r") as f: info = f.readlines() for line in info: line = line.replace("\n", "") k, v = line.split("=") dic["proc"][k.strip()] = v.strip() # Define which spectrumfile to take, using 'specfile' when defined, otherwise # the files in 'priority_list' are tried, in that particular order priority_list = ["nmr_fid.dx", "data.1d", "fid.1d", "spectrum.1d", "spectrum_processed.1d", None] if specfile: inputfile = os.path.join(dir, specfile) if not os.path.isfile(inputfile): raise IOError("File %s does not exist" % (inputfile)) else: for priority in priority_list: if priority == None: raise IOError("directory %s does not contain spectral data" % (dir)) inputfile = os.path.join(dir, priority) if os.path.isfile(inputfile): break # Detect which file we are dealing with from the extension and read in the spectral data # Reading .dx file using existing nmrglue.fileio.jcampdx module if inputfile.split('.')[-1] == "dx": dic["dx"], raw_data = jcampdx.read(inputfile) data = np.empty((int(dic["dx"]["$TD"][0]), ), dtype='complex128') data = raw_data[0][:] + 1j * raw_data[1][:] # Reading .1d files elif inputfile.split('.')[-1] == "1d": with open(inputfile, "rb") as f: raw_data = f.read() # Write out parameters from the first 32 bytes into dic["spectrum"] keys = ["owner", "format", "version", "dataType", "xDim", "yDim", "zDim", "qDim"] for i, k in enumerate(keys): start = i * 4 end = start + 4 value = int.from_bytes( raw_data[start:end], "little") dic["spectrum"][k] = value data = np.frombuffer(raw_data[end:], "<f") # The first 1/3 of the file is xaxis data (s or ppm) split = data.shape[-1] // 3 xscale = data[0 : split] dic["spectrum"]["xaxis"] = xscale # The rest is real and imaginary data points interleaved data = data[split : : 2] + 1j * data[split + 1 : : 2] else: raise IOError("File %s cannot be interpreted, use .dx or .1d instead" % (inputfile)) return dic,data def guess_udic(dic,data): """ Guess parameters of universal dictionary from dic, data pair. Parameters ---------- dic : dict Dictionary of JCAMP-DX, acqu, proc and spectrum parameters. data : ndarray Array of NMR data. Returns ------- udic : dict Universal dictionary of spectral parameters. """ # Create an empty universal dictionary udic = fileiobase.create_blank_udic(1) # Update defalt parameters, first acqu.par parameters in dic are tried, then JCAMP-DX header parameters # size if data is not None: udic[0]["size"] = len(data) else: warn('No data, cannot set udic size') # sw try: udic[0]['sw'] = float(dic['acqu']['bandwidth']) * 1000 except KeyError: try: udic[0]['sw'] = float(dic['dx']['$SW'][0]) * float(dic['dx']['$BF1'][0]) except KeyError: try: if dic["spectrum"]["freqdata"]: udic[0]['sw'] = dic["spectrum"]["xaxis"][-1] - dic["spectrum"]["xaxis"][0] elif data is not None: udic[0]['sw'] = len(data) / dic["spectrum"]["xaxis"][-1] else: warn("Cannot set spectral width - set manually using: 'udic[0]['sw'] = x' where x is the spectral width in Hz") except KeyError: warn("Cannot set spectral width - set manually using: 'udic[0]['sw'] = x' where x is the spectral width in Hz") # obs try: udic[0]['obs'] = float(dic['acqu']['b1Freq']) except KeyError: try: udic[0]['obs'] = float(dic['dx']['$BF1'][0]) except KeyError: warn("Cannot set observe frequency - set manually using: 'udic[0]['obs'] = x' where x is magnetic field in MHz") # car try: udic[0]['car'] = float(dic['acqu']['lowestFrequency']) + (float(dic['acqu']['bandwidth']) * 1000 / 2) except KeyError: try: udic[0]['car'] = (float(dic['dx']['$REFERENCEPOINT'][0]) * -1 ) + (float(dic['dx']['$SW'][0]) * udic[0]['obs'] / 2) except KeyError: try: udic[0]['car'] = (float(dic['dx']['$BF1'][0]) - float(dic['dx']['$SF'][0])) * 1000000 except KeyError: warn("Cannot set carrier - try: 'udic[0]['car'] = x * udic[0]['obs']' where x is the center of the spectrum in ppm") # label try: udic[0]['label'] = dic['acqu']['rxChannel'] except KeyError: try: label_value = dic['dx'][".OBSERVENUCLEUS"][0].replace("^", "") udic[0]["label"] = label_value except KeyError: warn("Cannot set observed nucleus label") #keys left to default # udic[0]['complex'] # udic[0]['encoding'] # udic[0]['time'] = True # udic[0]['freq'] = False return udic
37.764957
132
0.593188
0
0
0
0
0
0
0
0
5,214
0.590019
71a6a1b4c00b5723fdf1d5cebd6d02a67810c5fb
21,781
py
Python
src/navigation_analytics/navigation_data.py
mielgosez/navigation_analytics
3c382e8200afe4d37fa0880f155bf1bb2f48b83f
[ "MIT" ]
null
null
null
src/navigation_analytics/navigation_data.py
mielgosez/navigation_analytics
3c382e8200afe4d37fa0880f155bf1bb2f48b83f
[ "MIT" ]
null
null
null
src/navigation_analytics/navigation_data.py
mielgosez/navigation_analytics
3c382e8200afe4d37fa0880f155bf1bb2f48b83f
[ "MIT" ]
null
null
null
import logging import copy import pickle import pandas as pd class BaseClass: def __init__(self, input_data: pd.DataFrame, logger: logging.Logger, metadata: dict): self.__input_data = input_data self.__logger = logger self.__metadata = metadata @property def logger(self): return self.__logger @property def metadata(self): return self.__metadata @property def input_data(self): return self.__input_data @input_data.setter def input_data(self, new_input_data: pd.DataFrame): self.__input_data = new_input_data @property def events_id(self): return self.__metadata['metadata']['primary_keys']['events'] @property def session_id(self): return self.__metadata['metadata']['primary_keys']['sessions'] @property def page_id(self): return self.__metadata['metadata']['primary_keys']['pages'] @property def group_id(self): return self.metadata['metadata']['valid_values']['groups']['group_id'] @property def valid_groups(self): return self.metadata['metadata']['valid_values']['groups']['valid'] @property def action_id(self): return self.metadata['metadata']['valid_values']['actions']['action_id'] @property def valid_actions(self): return self.metadata['metadata']['valid_values']['actions']['valid'] @property def search_action(self): return self.metadata['metadata']['valid_values']['actions']['search_action'] @property def visit_action(self): return self.metadata['metadata']['valid_values']['actions']['visit_action'] @property def timestamp_id(self): return self.metadata['metadata']['datetime'] @property def kpi_duration(self): return self.metadata['metadata']['valid_values']['kpis']['duration_page'] @property def kpi_position(self): return self.metadata['metadata']['valid_values']['kpis']['result_position'] @property def kpi_number_results(self): return self.metadata['metadata']['valid_values']['kpis']['number_results'] class DataValidator(BaseClass): def __init__(self, logger: logging.Logger, metadata: dict, input_data: pd.DataFrame): super().__init__(logger=logger, metadata=metadata, input_data=input_data) self.default_pipeline() # Pipelines def default_pipeline(self): self.check_events_are_unique() self.check_groups_are_valid() self.check_one_group_per_session() # Validation Rules def check_events_are_unique(self): """ Verifies that event identifier is primary key of input data. :return: Validation """ number_rows = self.input_data.shape[0] events_id = self.metadata['metadata']['primary_keys']['events'] number_events = len(self.input_data[events_id].unique()) if number_rows == number_events: self.logger.info(f'Validation - Events are unique: {number_rows} rows and {number_events} events.') else: self.logger.error(f'Validation - Events are not unique: {number_rows} rows and {number_events} events.') def check_groups_are_valid(self): """ Verifies that groups matches with those declared in metadata. :return: Validation """ group_id = self.metadata['metadata']['valid_values']['groups']['group_id'] groups_in_data = list(self.input_data[group_id].unique()) group_valid_names = list(self.metadata['metadata']['valid_values']['groups']['valid']) if set(groups_in_data) == set(group_valid_names): self.logger.info(f'Validation - Groups are valid: {", ".join(group_valid_names)}.') else: self.logger.error(f'Validation - Group names are not valid: ' f'Names in data are {", ".join(groups_in_data)}. ' f'Names in metadata are {", ".join(group_valid_names)}.') def check_one_group_per_session(self): """ Verifies that there's at most one group per session. :return: Validation """ group_id = self.metadata['metadata']['valid_values']['groups']['group_id'] session_id = self.metadata['metadata']['primary_keys']['sessions'] max_num_groups = self.input_data.groupby(session_id)[group_id].apply(lambda x: len(set(x))).max() if max_num_groups == 1: self.logger.info(f'Validation - Just one group per session.') else: self.logger.error(f'Validation - Groups per session is different to one. ' f'Maximum number of groups per session detected in data set is: {max_num_groups}') class SessionAnalyzer(BaseClass): def __init__(self, input_data: pd.DataFrame, metadata: dict, logger: logging.Logger): super().__init__(logger=logger, metadata=metadata, input_data=input_data) self.__results = dict() self.__session_data = self.create_session_look_up() self.__page_data = self.create_page_look_up() self.__page_data_out = self.create_page_look_up_out() self.__search_table = self.create_search_table() self.__duration_table = self.create_duration_table() def filter_session_by_group(self, group_id: str): """ Filter session by group id provided in the input. This is expected to be a recurrent operation. :param group_id: :return: """ if group_id not in self.valid_groups: self.logger.error(f'{group_id} is not a valid group.') return self.session_data.loc[self.session_data[self.group_id] == group_id, :] # Metrics def compute_click_through_rate(self, group_id: str = None): """ This function computes the click through rate, understanding this quantity as the ratio of searches ending up in a session landing in a page. Session Attribute. :param group_id: :return: """ result = None if group_id is None: key = 'click_through_rate' sub_key = 'all' # Merging sessions with page ids df = copy.deepcopy(self.session_data.merge(self.page_data, on=self.session_id, how='left')) # Computing boolean vector: True means session has a visit, False otherwise. result = df.groupby(by=self.session_id)[self.action_id].apply(lambda x: self.visit_action in set(x)) else: key = 'click_through_rate' sub_key = group_id if group_id in self.valid_groups: # Filtering sessions by required group. filtered_sessions = self.filter_session_by_group(group_id=group_id) df = copy.deepcopy(filtered_sessions.merge(self.page_data, on=self.session_id, how='left')) result = df.groupby(by='session_id').action.apply(lambda x: 'visitPage' in set(x)) else: self.logger.error(f'{group_id} is not a valid group.') # Computing ctr ctr = sum(result) / len(result) self.logger.info(f'Click Through Rate is equal to: {ctr}') # Storing results update_result = self.kpi_results try: update_result[key][key].append(ctr) update_result[key]['group'].append(sub_key) except KeyError: update_result[key] = dict() update_result[key][key] = [ctr] update_result[key]['group'] = [sub_key] self.kpi_results = update_result return ctr def compute_search_frequency(self, group_id: str = None, number_ranking: int = 10): """ Get the most common first result per session. This is a Session Attribute. :param number_ranking: Number of results to visualize. :param group_id: :return: """ if group_id is None: key = 'search_frequency' sub_key = 'all' df_sessions = self.session_data.copy() else: key = 'search_frequency' sub_key = group_id df_sessions = self.filter_session_by_group(group_id=group_id) df = df_sessions.merge(self.page_data, on=self.session_id, how='left') # Merge with duration table to retrieve datestamp data. df_all = df.merge(self.duration_table, on=self.page_id, how='left') df_all.dropna(inplace=True) # Most common first result df_all = df_all.groupby('session_id').apply(lambda x: x.loc[x[self.timestamp_id] == min(x[self.timestamp_id]), [self.kpi_position, self.timestamp_id]]) # Result result = df_all[self.kpi_position].value_counts(normalize=True)[:number_ranking] self.logger.info(f'Most common result is {result.index[0]}') # Store result updated_results = self.kpi_results try: updated_results[key][key].extend(list(result.values)) updated_results[key]['position'].extend(list(result.index)) updated_results[key]['group'].extend([sub_key]*len(result.index)) except KeyError: updated_results[key] = dict() updated_results[key][key] = list(result.values) updated_results[key]['position'] = list(result.index) updated_results[key]['group'] = [sub_key]*len(result.index) self.kpi_results = updated_results return result def compute_zero_result_rate(self, group_id: str = None): """ Computes the proportion of searches that end up in no results. :param group_id: :return: """ df = self.search_table.copy() # Compute number of searches resulting in found elements. df['success'] = [True if item == 0 else False for item in df[self.kpi_number_results]] if group_id is None: key = 'zero_result_rate' sub_key = 'all' result = df['success'] else: key = 'zero_result_rate' sub_key = group_id df_sessions = self.filter_session_by_group(group_id=group_id) df_pages = df_sessions.merge(self.page_data, on=self.session_id, how='left') df = df.merge(df_pages, on=self.page_id, how='left') df.dropna(inplace=True) result = df['success'] # Computing result value = sum(result) / len(result) self.logger.info(f'Zero result rate is: {value}') # Storing result. updated_results = self.kpi_results try: updated_results[key][key].append(value) updated_results[key]['group'].append(sub_key) except KeyError: updated_results[key] = dict() updated_results[key][key] = [value] updated_results[key]['group'] = [sub_key] self.kpi_results = updated_results return value def compute_session_length(self, group_id: str = None): """ Compute session's length :param group_id: :return: """ if group_id is None: key = 'session_length' sub_key = 'all' df = self.input_data else: key = 'session_length' sub_key = group_id df = self.filter_session_by_group(group_id=group_id) df = df.merge(self.input_data, on=self.session_id, how='left') # Compute results value = df.groupby(self.session_id)[self.timestamp_id].apply(lambda x: (max(x) - min(x)).total_seconds()) time_value = df.groupby(self.session_id)[self.timestamp_id].min() # Store results updated_results = self.kpi_results try: updated_results[key][key].extend(list(value.values)) updated_results[key]['session_date'].extend(list(time_value.values)) updated_results[key]['session_id'].extend(list(value.index)) updated_results[key]['group'].extend([sub_key]*len(value.index)) except KeyError: updated_results[key] = dict() updated_results[key][key] = list(value.values) updated_results[key]['session_date'] = list(time_value.values) updated_results[key]['session_id'] = list(value.index) updated_results[key]['group'] = [sub_key]*len(value.index) self.kpi_results = updated_results return value # Instantiation def update_data(self): self.page_data = self.create_page_look_up() self.page_data_out = self.create_page_look_up_out() self.session_data = self.create_session_look_up() self.duration_table = self.create_duration_table() self.search_table = self.create_search_table() def create_session_look_up(self): return self.input_data[[self.session_id, self.group_id]].drop_duplicates() def create_page_look_up_out(self): return self.input_data[[self.session_id, self.page_id]].drop_duplicates() def create_page_look_up(self): return self.input_data[[self.session_id, self.page_id, self.action_id]].drop_duplicates() def create_search_table(self): """ Preserves just search results from original dataset. :return: Information relevant only to searches """ local_df = self.input_data.copy() local_df = local_df.loc[local_df[self.action_id] == self.search_action, [self.events_id, self.timestamp_id, self.page_id, self.kpi_number_results]] return local_df def create_duration_table(self): """ Preserves just search results from original dataset. :return: Information relevant only to searches """ local_df = self.input_data.copy() local_df = local_df.loc[local_df[self.action_id] != self.search_action, [self.timestamp_id, self.page_id, self.kpi_position, self.kpi_duration]] # Remove redundant information on position and duration local_df = local_df.groupby(self.page_id).max() no_duration_info = local_df[self.kpi_duration].isna() no_position_info = local_df[self.kpi_position].isna() self.logger.warning(f'{no_position_info.sum()} NA values for {self.kpi_position}.') self.logger.warning(f'{no_duration_info.sum()} NA values for {self.kpi_duration}.') # Remove those observations where position of results do not exist while there is duration no_position_but_duration = [(2 * item[1] - item[0]) != 2 for item in zip(no_duration_info, no_position_info)] position_but_duration = [(2 * item[1] - item[0]) == 2 for item in zip(no_duration_info, no_position_info)] kpi_results = self.kpi_results kpi_results['invalid_results'] = local_df.loc[position_but_duration, :].copy() self.kpi_results = kpi_results self.logger.warning(f'{sum([not item for item in no_position_but_duration])} ' f'NA values for position with duration.') local_df = local_df.loc[no_position_but_duration, :] # The rest of cases fill 0 local_df.fillna(0, inplace=True) local_df.reset_index(inplace=True) local_df.sort_values(by=[self.timestamp_id, self.page_id], inplace=True) return local_df # Getters and setters @property def session_data(self): return self.__session_data @session_data.setter def session_data(self, new_session_data: pd.DataFrame): self.__session_data = new_session_data @property def page_data(self): return self.__page_data @page_data.setter def page_data(self, new_page_data: pd.DataFrame): self.__page_data = new_page_data @property def page_data_out(self): return self.__page_data_out @page_data_out.setter def page_data_out(self, new_page_data_out: pd.DataFrame): self.__page_data_out = new_page_data_out @property def number_sessions(self): return self.session_data.shape[0] @property def number_pages(self): return self.page_data.shape[0] @property def duration_table(self): return self.__duration_table @duration_table.setter def duration_table(self, new_duration_table: pd.DataFrame): self.__duration_table = new_duration_table @property def search_table(self): return self.__search_table @search_table.setter def search_table(self, new_search_table: pd.DataFrame): self.__search_table = new_search_table @property def kpi_results(self): return self.__results @kpi_results.setter def kpi_results(self, results: dict): self.__results = results class NavigationDataAnalyzer: def __init__(self, input_data: pd.DataFrame, metadata: dict, logger_level: int = logging.WARNING): self.__logger = logging.Logger(name='default_logger', level=logger_level) self.__input_data = input_data self.__metadata = metadata self.__data_validator = DataValidator(input_data=input_data, metadata=metadata, logger=self.logger) self.__session_analyzer = SessionAnalyzer(input_data=input_data, metadata=metadata, logger=self.logger) def get_number_events(self, group_name: str = None): """ Method used to retrieve the number of events in the dataset. It can be also be filtered by group name. This function assumes that events are the primary key of the dataset. :param group_name: Name of the study groups as defined in metadata (['valid_values']['groups']['valid']) :return: Number of events in the dataset (in total or per group) """ groups_id = self.metadata['metadata']['valid_values']['groups']['group_id'] valid_groups = self.metadata['metadata']['valid_values']['groups']['valid'] if group_name is None: return self.input_data.shape[0] else: if group_name in valid_groups: return self.input_data.loc[self.input_data[groups_id] == group_name].shape[0] else: self.logger.error(f'{group_name} is not a valid group name. ' f'Please select among those listed here: {", ".join(valid_groups)}') def save(self, name: str = 'navigation_data_analyzer.pickle'): objects_to_store = dict() objects_to_store['metadata'] = self.metadata objects_to_store['input_data'] = self.input_data objects_to_store['kpi_results'] = self.session_analyzer.kpi_results with open(name, 'wb') as fp: pickle.dump(objects_to_store, fp) @staticmethod def load(filepath: str): with open(filepath, 'rb') as fp: existing_object = pickle.load(fp) instance_object = NavigationDataAnalyzer(input_data=existing_object['input_data'], metadata=existing_object['metadata']) instance_object.session_analyzer.kpi_results = existing_object['kpi_results'] return instance_object def to_excel(self, filename: str): excel_writer = pd.ExcelWriter(filename) self.session_analyzer.session_data.to_excel(excel_writer, sheet_name='session_data', index=False) self.session_analyzer.page_data_out.to_excel(excel_writer, sheet_name='page_data', index=False) self.session_analyzer.duration_table.to_excel(excel_writer, sheet_name='duration_table', index=False) self.session_analyzer.search_table.to_excel(excel_writer, sheet_name='search_table', index=False) for key, value in self.session_analyzer.kpi_results.items(): results = pd.DataFrame(value) results.to_excel(excel_writer, sheet_name=f'kpi_{key}', index=False) groups_df = pd.DataFrame({'group': self.session_analyzer.valid_groups}) groups_df.to_excel(excel_writer, sheet_name='groups', index=False) excel_writer.save() excel_writer.close() # Getters and Setters @property def session_analyzer(self): return self.__session_analyzer @property def data_validator(self): return self.__data_validator @property def input_data(self): return self.__input_data @input_data.setter def input_data(self, new_input_data: pd.DataFrame): self.data_validator.input_data = new_input_data self.data_validator.default_pipeline() self.__input_data = new_input_data @property def metadata(self): return self.__metadata @metadata.setter def metadata(self, new_metadata: dict): self.__input_data = new_metadata @property def logger(self): return self.__logger @logger.setter def logger(self, new_logger): self.__logger = new_logger
40.186347
120
0.620724
21,708
0.996648
0
0
4,274
0.196226
0
0
4,951
0.227308
71a73e1712465a4bec511db6faf72a21ab1c2e2c
946
py
Python
openskill/statistics.py
CalColson/openskill.py
ab61ca57fa6e60140d0a292c73440f22ceabd9a2
[ "MIT" ]
120
2021-09-03T03:06:11.000Z
2022-03-28T05:54:54.000Z
openskill/statistics.py
CalColson/openskill.py
ab61ca57fa6e60140d0a292c73440f22ceabd9a2
[ "MIT" ]
48
2021-09-23T07:15:13.000Z
2022-03-31T14:47:25.000Z
openskill/statistics.py
CalColson/openskill.py
ab61ca57fa6e60140d0a292c73440f22ceabd9a2
[ "MIT" ]
6
2022-01-20T16:45:28.000Z
2022-03-28T23:48:07.000Z
import sys import scipy.stats normal = scipy.stats.norm(0, 1) def phi_major(x): return normal.cdf(x) def phi_minor(x): return normal.pdf(x) def v(x, t): xt = x - t denom = phi_major(xt) return -xt if (denom < sys.float_info.epsilon) else phi_minor(xt) / denom def w(x, t): xt = x - t denom = phi_major(xt) if denom < sys.float_info.epsilon: return 1 if (x < 0) else 0 return v(x, t) * (v(x, t) + xt) def vt(x, t): xx = abs(x) b = phi_major(t - xx) - phi_major(-t - xx) if b < 1e-5: if x < 0: return -x - t return -x + t a = phi_minor(-t - xx) - phi_minor(t - xx) return (-a if x < 0 else a) / b def wt(x, t): xx = abs(x) b = phi_major(t - xx) - phi_major(-t - xx) if b < sys.float_info.epsilon: return 1.0 return ((t - xx) * phi_minor(t - xx) + (t + xx) * phi_minor(-t - xx)) / b + vt( x, t ) * vt(x, t)
19.306122
83
0.516913
0
0
0
0
0
0
0
0
0
0
71abaaff24dc05f9c229f77e4b27cc8d68a5b7f5
14,189
py
Python
src/openalea/container/graph.py
revesansparole/oacontainer
066a15b8b1b22f857bf25ed443c5f39f4cbefb3e
[ "MIT" ]
null
null
null
src/openalea/container/graph.py
revesansparole/oacontainer
066a15b8b1b22f857bf25ed443c5f39f4cbefb3e
[ "MIT" ]
null
null
null
src/openalea/container/graph.py
revesansparole/oacontainer
066a15b8b1b22f857bf25ed443c5f39f4cbefb3e
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- # # Graph : graph package # # Copyright or Copr. 2006 INRIA - CIRAD - INRA # # File author(s): Jerome Chopard <[email protected]> # # Distributed under the Cecill-C License. # See accompanying file LICENSE.txt or copy at # http://www.cecill.info/licences/Licence_CeCILL-C_V1-en.html # # VPlants WebSite : https://gforge.inria.fr/projects/vplants/ # """This module provide a simple pure python implementation for a graph interface does not implement copy concept """ from id_dict import IdDict class GraphError(Exception): """ base class of all graph exceptions """ class InvalidEdge(GraphError, KeyError): """ exception raised when a wrong edge id is provided """ class InvalidVertex(GraphError, KeyError): """ exception raised when a wrong vertex id is provided """ class Graph(object): """Directed graph with multiple links in this implementation : - vertices are tuple of edge_in,edge_out - edges are tuple of source,target """ def __init__(self, graph=None, idgenerator="set"): """constructor if graph is not none make a copy of the topological structure of graph (i.e. don't use the same id) args: - graph (Graph): the graph to copy, default=None - idgenerator (str): type of idgenerator to use, default 'set' """ self._vertices = IdDict(idgenerator=idgenerator) self._edges = IdDict(idgenerator=idgenerator) if graph is not None: self.extend(graph) # ########################################################## # # Graph concept # # ########################################################## def source(self, eid): """Retrieve the source vertex of an edge args: - eid (int): edge id return: - (int): vertex id """ try: return self._edges[eid][0] except KeyError: raise InvalidEdge(eid) def target(self, eid): """Retrieve the target vertex of an edge args: - eid (int): edge id return: - (int): vertex id """ try: return self._edges[eid][1] except KeyError: raise InvalidEdge(eid) def edge_vertices(self, eid): """Retrieve both source and target vertex of an edge args: - eid (int): edge id return: - (int, int): source id, target id """ try: return self._edges[eid] except KeyError: raise InvalidEdge(eid) def edge(self, source, target): """Find the matching edge with same source and same target return None if it don't succeed args: - source (int): source vertex - target (int): target vertex return: - (int): edge id with same source and target - (None): if search is unsuccessful """ if target not in self: raise InvalidVertex(target) for eid in self.out_edges(source): if self.target(eid) == target: return eid return None def __contains__(self, vid): """magic alias for `has_vertex` """ return self.has_vertex(vid) def has_vertex(self, vid): """test whether a vertex belong to the graph args: - vid (int): id of vertex return: - (bool) """ return vid in self._vertices def has_edge(self, eid): """test whether an edge belong to the graph args: - eid (int): id of edge return: - (bool) """ return eid in self._edges def is_valid(self): """Test the validity of the graph return: - (bool) """ return True # ########################################################## # # Vertex List Graph Concept # # ########################################################## def vertices(self): """Iterator on all vertices return: - (iter of int) """ return iter(self._vertices) def __iter__(self): """Magic alias for `vertices` """ return iter(self._vertices) def nb_vertices(self): """Total number of vertices in the graph return: - (int) """ return len(self._vertices) def __len__(self): """Magic alias for `nb_vertices` """ return self.nb_vertices() def in_neighbors(self, vid): """Iterator on the neighbors of vid where edges are directed from neighbor to vid args: - vid (int): vertex id return: - (iter of int): iter of vertex id """ if vid not in self: raise InvalidVertex(vid) neighbors_list = [self.source(eid) for eid in self._vertices[vid][0]] return iter(set(neighbors_list)) def out_neighbors(self, vid): """Iterator on the neighbors of vid where edges are directed from vid to neighbor args: - vid (int): vertex id return: - (iter of int): iter of vertex id """ if vid not in self: raise InvalidVertex(vid) neighbors_list = [self.target(eid) for eid in self._vertices[vid][1]] return iter(set(neighbors_list)) def neighbors(self, vid): """Iterator on all neighbors of vid both in and out args: - vid (int): vertex id return: - (iter of int): iter of vertex id """ neighbors_list = list(self.in_neighbors(vid)) neighbors_list.extend(self.out_neighbors(vid)) return iter(set(neighbors_list)) def nb_in_neighbors(self, vid): """Number of in neighbors of vid where edges are directed from neighbor to vid args: - vid (int): vertex id return: - (int) """ neighbors_set = list(self.in_neighbors(vid)) return len(neighbors_set) def nb_out_neighbors(self, vid): """Number of out neighbors of vid where edges are directed from vid to neighbor args: - vid (int): vertex id return: - (int) """ neighbors_set = list(self.out_neighbors(vid)) return len(neighbors_set) def nb_neighbors(self, vid): """Total number of both in and out neighbors of vid args: - vid (int): vertex id return: - (int) """ neighbors_set = list(self.neighbors(vid)) return len(neighbors_set) # ########################################################## # # Edge List Graph Concept # # ########################################################## def _iter_edges(self, vid): """ internal function that perform 'edges' with vid not None """ link_in, link_out = self._vertices[vid] for eid in link_in: yield eid for eid in link_out: yield eid def edges(self, vid=None): """Iterate on all edges connected to a given vertex. If vid is None (default), iterate on all edges in the graph args: - vid (int): vertex holdings edges, default (None) return: - (iter of int): iterator on edge ids """ if vid is None: return iter(self._edges) if vid not in self: raise InvalidVertex(vid) return self._iter_edges(vid) def nb_edges(self, vid=None): """Number of edges connected to a given vertex. If vid is None (default), total number of edges in the graph args: - vid (int): vertex holdings edges, default (None) return: - (int) """ if vid is None: return len(self._edges) if vid not in self: raise InvalidVertex(vid) return len(self._vertices[vid][0]) + len(self._vertices[vid][1]) def in_edges(self, vid): """Iterate on all edges pointing to a given vertex. args: - vid (int): vertex target of edges return: - (iter of int): iterator on edge ids """ if vid not in self: raise InvalidVertex(vid) for eid in self._vertices[vid][0]: yield eid def out_edges(self, vid): """Iterate on all edges away from a given vertex. args: - vid (int): vertex source of edges return: - (iter of int): iterator on edge ids """ if vid not in self: raise InvalidVertex(vid) for eid in self._vertices[vid][1]: yield eid def nb_in_edges(self, vid): """Number of edges pointing to a given vertex. args: - vid (int): vertex target of edges return: - (int) """ if vid not in self: raise InvalidVertex(vid) return len(self._vertices[vid][0]) def nb_out_edges(self, vid): """Number of edges away from a given vertex. args: - vid (int): vertex source of edges return: - (int) """ if vid not in self: raise InvalidVertex(vid) return len(self._vertices[vid][1]) # ########################################################## # # Mutable Vertex Graph concept # # ########################################################## def add_vertex(self, vid=None): """Add a vertex to the graph. If vid is not provided create a new vid args: - vid (int): id to use. If None (default) will generate a new one return: - vid (int): id used for the new vertex """ try: return self._vertices.add((set(), set()), vid) except KeyError: raise InvalidVertex(vid) def remove_vertex(self, vid): """Remove a specified vertex of the graph. Also remove all edge attached to it. args: - vid (int): id of vertex to remove """ if vid not in self: raise InvalidVertex(vid) link_in, link_out = self._vertices[vid] for edge in list(link_in): self.remove_edge(edge) for edge in list(link_out): self.remove_edge(edge) del self._vertices[vid] def clear(self): """Remove all vertices and edges don't change references to objects """ self._edges.clear() self._vertices.clear() # ########################################################## # # Mutable Edge Graph concept # # ########################################################## def add_edge(self, sid, tid, eid=None): """Add an edge to the graph. If eid is not provided generate a new one. args: - sid (int): id of source vertex - tid (int): id of target vertex - eid (int): id to use. If None (default) will generate a new one return: - eid (int): id used for new edge """ if sid not in self: raise InvalidVertex(sid) if tid not in self: raise InvalidVertex(tid) try: eid = self._edges.add((sid, tid), eid) except KeyError: raise InvalidEdge(eid) self._vertices[sid][1].add(eid) self._vertices[tid][0].add(eid) return eid def remove_edge(self, eid): """Remove a specified edge from the graph. args: - eid (int): id of edge to remove """ if not self.has_edge(eid): raise InvalidEdge(eid) sid, tid = self._edges[eid] self._vertices[sid][1].remove(eid) self._vertices[tid][0].remove(eid) del self._edges[eid] def clear_edges(self): """Remove all the edges of the graph don't change references to objects """ self._edges.clear() for vid, (in_set, out_set) in self._vertices.iteritems(): in_set.clear() out_set.clear() # ########################################################## # # Extend Graph concept # # ########################################################## def extend(self, graph): """Add the specified graph to self, create new vid and eid args: - graph (Graph): the graph to add return: - (dict of (int, int)): mapping between vertex id in graph and vertex id in extended self - (dict of (int, int)): mapping between edge id in graph and edge id in extended self """ # vertex adding trans_vid = {} for vid in list(graph.vertices()): trans_vid[vid] = self.add_vertex() # edge adding trans_eid = {} for eid in list(graph.edges()): sid = trans_vid[graph.source(eid)] tid = trans_vid[graph.target(eid)] trans_eid[eid] = self.add_edge(sid, tid) return trans_vid, trans_eid def sub_graph(self, vids): """ """ raise NotImplemented # from copy import deepcopy # vids = set(vids) # # result = deepcopy(self) # result._vertices.clear() # result._edges.clear() # # for key, edges in self._vertices.items(): # if key in vids: # inedges, outedges = edges # sortedinedges = set( # [eid for eid in inedges if self.source(eid) in vids]) # sortedoutedges = set( # [eid for eid in outedges if self.target(eid) in vids]) # result._vertices.add((sortedinedges, sortedoutedges), key) # for eid in sortedoutedges: # result._edges.add(self._edges[eid], eid) # # return result
26.571161
78
0.512651
13,599
0.958418
964
0.06794
0
0
0
0
7,904
0.557051
71acaf064514ffdbe1a52492a693bd272d32dbf5
8,439
py
Python
nets/mobilenet_v2_ssd.py
GT-AcerZhang/PaddlePaddle-SSD
3833afe3470b7dc811409b3d8111b98dc31c6d0e
[ "Apache-2.0" ]
47
2020-03-25T01:42:45.000Z
2022-03-23T12:03:46.000Z
nets/mobilenet_v2_ssd.py
tianxiehu/PaddlePaddle-SSD
ae2ec69b65cc181fdb4275b295f145dc22e71ddb
[ "Apache-2.0" ]
1
2021-06-30T13:02:59.000Z
2022-01-13T09:48:07.000Z
nets/mobilenet_v2_ssd.py
tianxiehu/PaddlePaddle-SSD
ae2ec69b65cc181fdb4275b295f145dc22e71ddb
[ "Apache-2.0" ]
9
2020-06-01T13:28:44.000Z
2021-06-17T02:42:55.000Z
import paddle.fluid as fluid from paddle.fluid.initializer import MSRA from paddle.fluid.param_attr import ParamAttr class MobileNetV2SSD: def __init__(self, img, num_classes, img_shape): self.img = img self.num_classes = num_classes self.img_shape = img_shape def ssd_net(self, scale=1.0): # 300x300 bottleneck_params_list = [(1, 16, 1, 1), (6, 24, 2, 2), (6, 32, 3, 2), (6, 64, 4, 2), (6, 96, 3, 1)] # conv1 input = self.conv_bn_layer(input=self.img, num_filters=int(32 * scale), filter_size=3, stride=2, padding=1, if_act=True) # bottleneck sequences in_c = int(32 * scale) for layer_setting in bottleneck_params_list: t, c, n, s = layer_setting input = self.invresi_blocks(input=input, in_c=in_c, t=t, c=int(c * scale), n=n, s=s) in_c = int(c * scale) # 19x19 module11 = input tmp = self.invresi_blocks(input=input, in_c=in_c, t=6, c=int(160 * scale), n=3, s=2) # 10x10 module13 = self.invresi_blocks(input=tmp, in_c=int(160 * scale), t=6, c=int(320 * scale), n=1, s=1) module14 = self.extra_block(module13, 256, 512, 1) # 5x5 module15 = self.extra_block(module14, 128, 256, 1) # 3x3 module16 = self.extra_block(module15, 128, 256, 1) # 2x2 module17 = self.extra_block(module16, 64, 128, 1) mbox_locs, mbox_confs, box, box_var = fluid.layers.multi_box_head( inputs=[module11, module13, module14, module15, module16, module17], image=self.img, num_classes=self.num_classes, min_ratio=20, max_ratio=90, min_sizes=[60.0, 105.0, 150.0, 195.0, 240.0, 285.0], max_sizes=[[], 150.0, 195.0, 240.0, 285.0, 300.0], aspect_ratios=[[2.], [2., 3.], [2., 3.], [2., 3.], [2., 3.], [2., 3.]], base_size=self.img_shape[2], offset=0.5, flip=True) return mbox_locs, mbox_confs, box, box_var def conv_bn_layer(self, input, filter_size, num_filters, stride, padding, num_groups=1, if_act=True, use_cudnn=True): parameter_attr = ParamAttr(learning_rate=0.1, initializer=MSRA()) conv = fluid.layers.conv2d(input=input, num_filters=num_filters, filter_size=filter_size, stride=stride, padding=padding, groups=num_groups, use_cudnn=use_cudnn, param_attr=parameter_attr, bias_attr=False) bn = fluid.layers.batch_norm(input=conv) if if_act: return fluid.layers.relu6(bn) else: return bn def shortcut(self, input, data_residual): return fluid.layers.elementwise_add(input, data_residual) def inverted_residual_unit(self, input, num_in_filter, num_filters, ifshortcut, stride, filter_size, padding, expansion_factor): num_expfilter = int(round(num_in_filter * expansion_factor)) channel_expand = self.conv_bn_layer(input=input, num_filters=num_expfilter, filter_size=1, stride=1, padding=0, num_groups=1, if_act=True) bottleneck_conv = self.conv_bn_layer(input=channel_expand, num_filters=num_expfilter, filter_size=filter_size, stride=stride, padding=padding, num_groups=num_expfilter, if_act=True, use_cudnn=False) linear_out = self.conv_bn_layer(input=bottleneck_conv, num_filters=num_filters, filter_size=1, stride=1, padding=0, num_groups=1, if_act=False) if ifshortcut: out = self.shortcut(input=input, data_residual=linear_out) return out else: return linear_out def invresi_blocks(self, input, in_c, t, c, n, s): first_block = self.inverted_residual_unit(input=input, num_in_filter=in_c, num_filters=c, ifshortcut=False, stride=s, filter_size=3, padding=1, expansion_factor=t) last_residual_block = first_block last_c = c for i in range(1, n): last_residual_block = self.inverted_residual_unit(input=last_residual_block, num_in_filter=last_c, num_filters=c, ifshortcut=True, stride=1, filter_size=3, padding=1, expansion_factor=t) return last_residual_block def conv_bn(self, input, filter_size, num_filters, stride, padding, num_groups=1, act='relu', use_cudnn=True): parameter_attr = ParamAttr(learning_rate=0.1, initializer=MSRA()) conv = fluid.layers.conv2d(input=input, num_filters=num_filters, filter_size=filter_size, stride=stride, padding=padding, groups=num_groups, use_cudnn=use_cudnn, param_attr=parameter_attr, bias_attr=False) return fluid.layers.batch_norm(input=conv, act=act) def extra_block(self, input, num_filters1, num_filters2, num_groups): # 1x1 conv pointwise_conv = self.conv_bn(input=input, filter_size=1, num_filters=int(num_filters1), stride=1, num_groups=int(num_groups), padding=0) # 3x3 conv normal_conv = self.conv_bn(input=pointwise_conv, filter_size=3, num_filters=int(num_filters2), stride=2, num_groups=int(num_groups), padding=1) return normal_conv def build_ssd(img, num_classes, img_shape): ssd_model = MobileNetV2SSD(img, num_classes, img_shape) return ssd_model.ssd_net() if __name__ == '__main__': data = fluid.data(name='data', shape=[None, 3, 300, 300]) build_ssd(data, 21, img_shape=[3, 300, 300])
44.650794
114
0.422799
8,042
0.952957
0
0
0
0
0
0
109
0.012916
71ad91d94d2021895fed2197ad1e1027179c068d
5,844
py
Python
oneflow/python/test/ops/test_object_bbox_scale.py
caishenghang/oneflow
db239cc9f98e551823bf6ce2d4395bd5c339b1c5
[ "Apache-2.0" ]
2
2021-09-10T00:19:49.000Z
2021-11-16T11:27:20.000Z
oneflow/python/test/ops/test_object_bbox_scale.py
duijiudanggecl/oneflow
d2096ae14cf847509394a3b717021e2bd1d72f62
[ "Apache-2.0" ]
null
null
null
oneflow/python/test/ops/test_object_bbox_scale.py
duijiudanggecl/oneflow
d2096ae14cf847509394a3b717021e2bd1d72f62
[ "Apache-2.0" ]
1
2021-11-10T07:57:01.000Z
2021-11-10T07:57:01.000Z
""" Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import unittest import os import random import cv2 import numpy as np import oneflow as flow import oneflow.typing as oft def _random_sample_images(anno_file, image_dir, batch_size): from pycocotools.coco import COCO image_files = [] image_ids = [] batch_group_id = -1 coco = COCO(anno_file) img_ids = coco.getImgIds() while len(image_files) < batch_size: rand_img_id = random.choice(img_ids) img_h = coco.imgs[rand_img_id]["height"] img_w = coco.imgs[rand_img_id]["width"] group_id = int(img_h / img_w) if batch_group_id == -1: batch_group_id = group_id if group_id != batch_group_id: continue anno_ids = coco.getAnnIds(imgIds=[rand_img_id]) if len(anno_ids) == 0: continue image_files.append(os.path.join(image_dir, coco.imgs[rand_img_id]["file_name"])) image_ids.append(rand_img_id) assert len(image_files) == len(image_ids) images = [cv2.imread(image_file).astype(np.single) for image_file in image_files] bbox_list = _get_images_bbox_list(coco, image_ids) return images, bbox_list def _get_images_bbox_list(coco, image_ids): bbox_list = [] for img_id in image_ids: anno_ids = coco.getAnnIds(imgIds=[img_id]) anno_ids = list( filter(lambda anno_id: coco.anns[anno_id]["iscrowd"] == 0, anno_ids) ) bbox_array = np.array( [coco.anns[anno_id]["bbox"] for anno_id in anno_ids], dtype=np.single ) bbox_list.append(bbox_array) return bbox_list def _get_images_static_shape(images): image_shapes = [image.shape for image in images] image_static_shape = np.amax(image_shapes, axis=0) assert isinstance( image_static_shape, np.ndarray ), "image_shapes: {}, image_static_shape: {}".format( str(image_shapes), str(image_static_shape) ) image_static_shape = image_static_shape.tolist() image_static_shape.insert(0, len(image_shapes)) return image_static_shape def _get_bbox_static_shape(bbox_list): bbox_shapes = [bbox.shape for bbox in bbox_list] bbox_static_shape = np.amax(bbox_shapes, axis=0) assert isinstance( bbox_static_shape, np.ndarray ), "bbox_shapes: {}, bbox_static_shape: {}".format( str(bbox_shapes), str(bbox_static_shape) ) bbox_static_shape = bbox_static_shape.tolist() bbox_static_shape.insert(0, len(bbox_list)) return bbox_static_shape def _of_target_resize_bbox_scale(images, bbox_list, target_size, max_size): image_shape = _get_images_static_shape(images) bbox_shape = _get_bbox_static_shape(bbox_list) flow.clear_default_session() func_config = flow.FunctionConfig() func_config.default_data_type(flow.float) func_config.default_logical_view(flow.scope.mirrored_view()) @flow.global_function(function_config=func_config) def target_resize_bbox_scale_job( image_def: oft.ListListNumpy.Placeholder( shape=tuple(image_shape), dtype=flow.float ), bbox_def: oft.ListListNumpy.Placeholder( shape=tuple(bbox_shape), dtype=flow.float ), ): images_buffer = flow.tensor_list_to_tensor_buffer(image_def) resized_images_buffer, new_size, scale = flow.image_target_resize( images_buffer, target_size=target_size, max_size=max_size ) bbox_buffer = flow.tensor_list_to_tensor_buffer(bbox_def) scaled_bbox = flow.object_bbox_scale(bbox_buffer, scale) scaled_bbox_list = flow.tensor_buffer_to_tensor_list( scaled_bbox, shape=bbox_shape[1:], dtype=flow.float ) return scaled_bbox_list, new_size input_image_list = [np.expand_dims(image, axis=0) for image in images] input_bbox_list = [np.expand_dims(bbox, axis=0) for bbox in bbox_list] output_bbox_list, output_image_size = target_resize_bbox_scale_job( [input_image_list], [input_bbox_list] ).get() return output_bbox_list.numpy_lists()[0], output_image_size.numpy_list()[0] def _compare_bbox_scale( test_case, anno_file, image_dir, batch_size, target_size, max_size, print_debug_info=False, ): images, bbox_list = _random_sample_images(anno_file, image_dir, batch_size) of_bbox_list, image_size_list = _of_target_resize_bbox_scale( images, bbox_list, target_size, max_size ) for image, bbox, of_bbox, image_size in zip( images, bbox_list, of_bbox_list, image_size_list ): w, h = image_size oh, ow = image.shape[0:2] scale_h = h / oh scale_w = w / ow bbox[:, 0] *= scale_w bbox[:, 1] *= scale_h bbox[:, 2] *= scale_w bbox[:, 3] *= scale_h test_case.assertTrue(np.allclose(bbox, of_bbox)) @flow.unittest.skip_unless_1n1d() class TestObjectBboxScale(flow.unittest.TestCase): def test_object_bbox_scale(test_case): _compare_bbox_scale( test_case, "/dataset/mscoco_2017/annotations/instances_val2017.json", "/dataset/mscoco_2017/val2017", 4, 800, 1333, ) if __name__ == "__main__": unittest.main()
32.287293
88
0.688912
320
0.054757
0
0
1,212
0.207392
0
0
809
0.138433
71ae6ca7d57af38b1b86f8540325942204357879
1,767
py
Python
vagrant/kafka/bin/init.py
BertRaeymaekers/scrapbook
3c8483d4594356fbc84deb8d6496db3d856492c1
[ "MIT" ]
null
null
null
vagrant/kafka/bin/init.py
BertRaeymaekers/scrapbook
3c8483d4594356fbc84deb8d6496db3d856492c1
[ "MIT" ]
null
null
null
vagrant/kafka/bin/init.py
BertRaeymaekers/scrapbook
3c8483d4594356fbc84deb8d6496db3d856492c1
[ "MIT" ]
null
null
null
#! /usr/bin/env python3 import json import os.path import jinja2 DEFAULT_PARAMS = { "ansible_user": "vagrant" } if __name__ == "__main__": # Reading configuration here = os.path.dirname(os.path.realpath(__file__ + "/../")) with open(here + "/config.json", "r") as rf: config = json.load(rf) print(json.dumps(config, sort_keys=True, indent=4)) # Generating an inventory file with open(here + "/playbook/inventory/hosts", "w") as inventory: inventory.write("[kafka]\n") for host in config["hosts"]: # Setting default values and updating them when more specific. params = dict() params.update(DEFAULT_PARAMS) params.update(config["params"]) params.update(config["hosts"][host]) # Setting some extra ansible paramters. params["ansible_ssh_host"] = params["ip"] inventory.write("%s\t%s\n" % (host, " ".join(("%s=%s" % (k,v) for k,v in params.items())))) # Generating the Vagrantfile env = jinja2.Environment(loader=jinja2.FileSystemLoader(here + "/templates/")) template = env.get_template('Vagrantfile.j2') template.stream(**config).dump(here + '/vagrant/Vagrantfile') # Generating group vars for kafka with open(here + "/playbook/group_vars/kafka.yml", "w") as gv: gv.write("---\n") gv.write("hosts:\n") for (host, params) in config["hosts"].items(): gv.write(" %s: '%s.%s'\n" % (params["ip"], params["hostname"], config["params"]["domain" ])) gv.write("kafka:\n") gv.write(" hosts:\n") for (host, params) in config["hosts"].items(): gv.write(" - %s.%s\n" % (params["hostname"], config["params"]["domain" ]))
35.34
107
0.589134
0
0
0
0
0
0
0
0
628
0.355405
71aed94e4374b265d7146087fcd15cb6a8415441
883
py
Python
harvest/models/beastsimulator.py
lmaurits/harvest
df6b549096da8ae2f4ed38aa2be19c7e82fa60e3
[ "BSD-2-Clause" ]
1
2016-10-23T13:24:44.000Z
2016-10-23T13:24:44.000Z
harvest/models/beastsimulator.py
lmaurits/harvest
df6b549096da8ae2f4ed38aa2be19c7e82fa60e3
[ "BSD-2-Clause" ]
null
null
null
harvest/models/beastsimulator.py
lmaurits/harvest
df6b549096da8ae2f4ed38aa2be19c7e82fa60e3
[ "BSD-2-Clause" ]
null
null
null
import os import harvest.dataframe from harvest.models.simulator import Simulator class BeastSimulator(Simulator): def __init__(self, tree, n_features): Simulator.__init__(self, tree, n_features) def generate_beast_xml(self): # Subclasses should implement this return None def generate_data(self): # Generate BEAST XML file to do simulation xml = self.generate_beast_xml() temp_filename = xml.write_file(overwrite=True) # Run BEAST simulation os.system("beast %s > /dev/null" % temp_filename) # Delete BEAST XML file os.remove(temp_filename) # Read simulated data data = harvest.dataframe.read_from_beast_xml(xml.output_filename) # Delete simualted data os.remove(xml.output_filename) self.data = data self.data.datatype = self.datatype
30.448276
73
0.673839
798
0.903737
0
0
0
0
0
0
187
0.211778
71af526fe8ec36b7ab5df62ce53a7484137b158f
770
py
Python
assimilator.py
DutChen18/slime-clusters-cuda
186d198665a017cf0eacde33765b6cb3cb4aecb5
[ "MIT" ]
null
null
null
assimilator.py
DutChen18/slime-clusters-cuda
186d198665a017cf0eacde33765b6cb3cb4aecb5
[ "MIT" ]
null
null
null
assimilator.py
DutChen18/slime-clusters-cuda
186d198665a017cf0eacde33765b6cb3cb4aecb5
[ "MIT" ]
null
null
null
# pylint: skip-file import os from assimilator import * from Boinc import boinc_project_path class SlimeClustersAssimilator(Assimilator): def __init__(self): Assimilator.__init__(self) def assimilate_handler(self, wu, results, canonical_result): if canonical_result == None: return src_file = self.get_file_path(canonical_result) dst_dir = boinc_project_path.project_path('slime-clusters') dst_file = os.path.join(dst_dir, 'results.txt') if not os.path.exists(dst_dir): os.makedirs(dst_dir) with open(src_file, 'r') as src, open(dst_file, 'a') as dst: dst.writelines(src.readlines()) if __name__ == "__main__": SlimeClustersAssimilator().run()
29.615385
68
0.661039
610
0.792208
0
0
0
0
0
0
64
0.083117
71af9d8ca1143528cfcbc75651debdacf07e53c4
12,343
py
Python
modin/core/execution/ray/implementations/cudf_on_ray/dataframe/dataframe.py
Rubtsowa/modin
6550939753c76e896ef2bfd65bb9468d6ad161d7
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
modin/core/execution/ray/implementations/cudf_on_ray/dataframe/dataframe.py
Rubtsowa/modin
6550939753c76e896ef2bfd65bb9468d6ad161d7
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
modin/core/execution/ray/implementations/cudf_on_ray/dataframe/dataframe.py
Rubtsowa/modin
6550939753c76e896ef2bfd65bb9468d6ad161d7
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
# Licensed to Modin Development Team under one or more contributor license agreements. # See the NOTICE file distributed with this work for additional information regarding # copyright ownership. The Modin Development Team licenses this file to you under the # Apache License, Version 2.0 (the "License"); you may not use this file except in # compliance with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software distributed under # the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific language # governing permissions and limitations under the License. """Module houses class that implements ``PandasOnRayDataframe`` class using cuDF.""" import numpy as np import ray from ..partitioning.partition import cuDFOnRayDataframePartition from ..partitioning.partition_manager import cuDFOnRayDataframePartitionManager from modin.core.execution.ray.implementations.pandas_on_ray.dataframe.dataframe import ( PandasOnRayDataframe, ) from modin.error_message import ErrorMessage class cuDFOnRayDataframe(PandasOnRayDataframe): """ The class implements the interface in ``PandasOnRayDataframe`` using cuDF. Parameters ---------- partitions : np.ndarray A 2D NumPy array of partitions. index : sequence The index for the dataframe. Converted to a ``pandas.Index``. columns : sequence The columns object for the dataframe. Converted to a ``pandas.Index``. row_lengths : list, optional The length of each partition in the rows. The "height" of each of the block partitions. Is computed if not provided. column_widths : list, optional The width of each partition in the columns. The "width" of each of the block partitions. Is computed if not provided. dtypes : pandas.Series, optional The data types for the dataframe columns. """ _partition_mgr_cls = cuDFOnRayDataframePartitionManager def synchronize_labels(self, axis=None): """ Synchronize labels by applying the index object (Index or Columns) to the partitions eagerly. Parameters ---------- axis : {0, 1, None}, default: None The axis to apply to. If None, it applies to both axes. """ ErrorMessage.catch_bugs_and_request_email( axis is not None and axis not in [0, 1] ) cum_row_lengths = np.cumsum([0] + self._row_lengths) cum_col_widths = np.cumsum([0] + self._column_widths) def apply_idx_objs(df, idx, cols, axis): # cudf does not support set_axis. It only supports rename with 1-to-1 mapping. # Therefore, we need to create the dictionary that have the relationship between # current index and new ones. idx = {df.index[i]: idx[i] for i in range(len(idx))} cols = {df.index[i]: cols[i] for i in range(len(cols))} if axis == 0: return df.rename(index=idx) elif axis == 1: return df.rename(columns=cols) else: return df.rename(index=idx, columns=cols) keys = np.array( [ [ self._partitions[i][j].apply( apply_idx_objs, idx=self.index[ slice(cum_row_lengths[i], cum_row_lengths[i + 1]) ], cols=self.columns[ slice(cum_col_widths[j], cum_col_widths[j + 1]) ], axis=axis, ) for j in range(len(self._partitions[i])) ] for i in range(len(self._partitions)) ] ) self._partitions = np.array( [ [ cuDFOnRayDataframePartition( self._partitions[i][j].get_gpu_manager(), keys[i][j], self._partitions[i][j]._length_cache, self._partitions[i][j]._width_cache, ) for j in range(len(keys[i])) ] for i in range(len(keys)) ] ) def mask( self, row_indices=None, row_numeric_idx=None, col_indices=None, col_numeric_idx=None, ): """ Lazily select columns or rows from given indices. Parameters ---------- row_indices : list of hashable, optional The row labels to extract. row_numeric_idx : list of int, optional The row indices to extract. col_indices : list of hashable, optional The column labels to extract. col_numeric_idx : list of int, optional The column indices to extract. Returns ------- cuDFOnRayDataframe A new ``cuDFOnRayDataframe`` from the mask provided. Notes ----- If both `row_indices` and `row_numeric_idx` are set, `row_indices` will be used. The same rule applied to `col_indices` and `col_numeric_idx`. """ if isinstance(row_numeric_idx, slice) and ( row_numeric_idx == slice(None) or row_numeric_idx == slice(0, None) ): row_numeric_idx = None if isinstance(col_numeric_idx, slice) and ( col_numeric_idx == slice(None) or col_numeric_idx == slice(0, None) ): col_numeric_idx = None if ( row_indices is None and row_numeric_idx is None and col_indices is None and col_numeric_idx is None ): return self.copy() if row_indices is not None: row_numeric_idx = self.index.get_indexer_for(row_indices) if row_numeric_idx is not None: row_partitions_list = self._get_dict_of_block_index(0, row_numeric_idx) if isinstance(row_numeric_idx, slice): # Row lengths for slice are calculated as the length of the slice # on the partition. Often this will be the same length as the current # length, but sometimes it is different, thus the extra calculation. new_row_lengths = [ len(range(*idx.indices(self._row_lengths[p]))) for p, idx in row_partitions_list.items() ] # Use the slice to calculate the new row index new_index = self.index[row_numeric_idx] else: new_row_lengths = [len(idx) for _, idx in row_partitions_list.items()] new_index = self.index[sorted(row_numeric_idx)] else: row_partitions_list = { i: slice(None) for i in range(len(self._row_lengths)) } new_row_lengths = self._row_lengths new_index = self.index if col_indices is not None: col_numeric_idx = self.columns.get_indexer_for(col_indices) if col_numeric_idx is not None: col_partitions_list = self._get_dict_of_block_index(1, col_numeric_idx) if isinstance(col_numeric_idx, slice): # Column widths for slice are calculated as the length of the slice # on the partition. Often this will be the same length as the current # length, but sometimes it is different, thus the extra calculation. new_col_widths = [ len(range(*idx.indices(self._column_widths[p]))) for p, idx in col_partitions_list.items() ] # Use the slice to calculate the new columns new_columns = self.columns[col_numeric_idx] assert sum(new_col_widths) == len( new_columns ), "{} != {}.\n{}\n{}\n{}".format( sum(new_col_widths), len(new_columns), col_numeric_idx, self._column_widths, col_partitions_list, ) if self._dtypes is not None: new_dtypes = self.dtypes[col_numeric_idx] else: new_dtypes = None else: new_col_widths = [len(idx) for _, idx in col_partitions_list.items()] new_columns = self.columns[sorted(col_numeric_idx)] if self._dtypes is not None: new_dtypes = self.dtypes.iloc[sorted(col_numeric_idx)] else: new_dtypes = None else: col_partitions_list = { i: slice(None) for i in range(len(self._column_widths)) } new_col_widths = self._column_widths new_columns = self.columns if self._dtypes is not None: new_dtypes = self.dtypes else: new_dtypes = None key_and_gpus = np.array( [ [ [ self._partitions[row_idx][col_idx].mask( row_internal_indices, col_internal_indices ), self._partitions[row_idx][col_idx].get_gpu_manager(), ] for col_idx, col_internal_indices in col_partitions_list.items() if isinstance(col_internal_indices, slice) or len(col_internal_indices) > 0 ] for row_idx, row_internal_indices in row_partitions_list.items() if isinstance(row_internal_indices, slice) or len(row_internal_indices) > 0 ] ) shape = key_and_gpus.shape[:2] keys = ray.get(key_and_gpus[:, :, 0].flatten().tolist()) gpu_managers = key_and_gpus[:, :, 1].flatten().tolist() new_partitions = self._partition_mgr_cls._create_partitions( keys, gpu_managers ).reshape(shape) intermediate = self.__constructor__( new_partitions, new_index, new_columns, new_row_lengths, new_col_widths, new_dtypes, ) # Check if monotonically increasing, return if it is. Fast track code path for # common case to keep it fast. if ( row_numeric_idx is None or isinstance(row_numeric_idx, slice) or len(row_numeric_idx) == 1 or np.all(row_numeric_idx[1:] >= row_numeric_idx[:-1]) ) and ( col_numeric_idx is None or isinstance(col_numeric_idx, slice) or len(col_numeric_idx) == 1 or np.all(col_numeric_idx[1:] >= col_numeric_idx[:-1]) ): return intermediate # The new labels are often smaller than the old labels, so we can't reuse the # original order values because those were mapped to the original data. We have # to reorder here based on the expected order from within the data. # We create a dictionary mapping the position of the numeric index with respect # to all others, then recreate that order by mapping the new order values from # the old. This information is sent to `_reorder_labels`. if row_numeric_idx is not None: row_order_mapping = dict( zip(sorted(row_numeric_idx), range(len(row_numeric_idx))) ) new_row_order = [row_order_mapping[idx] for idx in row_numeric_idx] else: new_row_order = None if col_numeric_idx is not None: col_order_mapping = dict( zip(sorted(col_numeric_idx), range(len(col_numeric_idx))) ) new_col_order = [col_order_mapping[idx] for idx in col_numeric_idx] else: new_col_order = None return intermediate._reorder_labels( row_numeric_idx=new_row_order, col_numeric_idx=new_col_order )
41.006645
101
0.573767
11,132
0.901888
0
0
0
0
0
0
3,960
0.32083
71afcdef0e0e86f29155c36a2d10beb1ffdab1ce
1,527
py
Python
Exoplanet_Population.py
mw5868/University
076c9b001dbfe3765607877be4f89ccf86a88331
[ "MIT" ]
null
null
null
Exoplanet_Population.py
mw5868/University
076c9b001dbfe3765607877be4f89ccf86a88331
[ "MIT" ]
null
null
null
Exoplanet_Population.py
mw5868/University
076c9b001dbfe3765607877be4f89ccf86a88331
[ "MIT" ]
null
null
null
from astropy.table import Table, Column import matplotlib.pyplot as plt #url = "https://exoplanetarchive.ipac.caltech.edu/cgi-bin/nstedAPI/nph-nstedAPI?table=exoplanets&select=pl_hostname,ra,dec&order=dec&format=csv" url = "https://exoplanetarchive.ipac.caltech.edu/cgi-bin/nstedAPI/nph-nstedAPI?table=exoplanets" # This API returns Hostname, RA and Dec t = Table.read(url, format="csv") t_b = t[t["pl_letter"] == "b"] t_c = t[t["pl_letter"] == "c"] t_d = t[t["pl_letter"] == "d"] t_e = t[t["pl_letter"] == "e"] t_f = t[t["pl_letter"] == "f"] t_g = t[t["pl_letter"] == "g"] t_h = t[t["pl_letter"] == "h"] t_i = t[t["pl_letter"] == "i"] fig = plt.figure() ax = fig.add_subplot(1,1,1,aspect="equal") ax.scatter(t_b["ra"],t_b["dec"],color="Black",label = "2 Planets") ax.scatter(t_c["ra"],t_c["dec"],color="red", label = "3 Planets") ax.scatter(t_d["ra"],t_d["dec"],color="blue", label = "4 Planets") ax.scatter(t_e["ra"],t_e["dec"],color="green", label = "5 Planets") ax.scatter(t_f["ra"],t_f["dec"],color="yellow", label = "6 Planets") ax.scatter(t_g["ra"],t_g["dec"],color="purple", label = "7 Planets") ax.scatter(t_h["ra"],t_h["dec"],color="orange", label = "8 Planets") ax.scatter(t_i["ra"],t_i["dec"],color="cyan", label = "9 Planets") ax.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) ax.set_xlim(360,0) ax.set_ylim(-90,90) ax.set_ylabel("DEC") ax.set_xlabel("RA") ax.set_title("Positions of Explanets by number of planets in system") plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.) plt.show()
42.416667
144
0.668631
0
0
0
0
0
0
0
0
676
0.442698
71b09341fe6822cfcd03800b867e3d43b02d5db2
242
py
Python
pykuna/errors.py
marthoc/pykuna
f5bf02f26e1931b35becde6e1da58fb8bb0cc2d8
[ "MIT" ]
4
2018-12-16T18:05:14.000Z
2019-12-31T17:12:48.000Z
pykuna/errors.py
marthoc/pykuna
f5bf02f26e1931b35becde6e1da58fb8bb0cc2d8
[ "MIT" ]
2
2019-05-08T17:22:25.000Z
2020-03-10T20:52:42.000Z
pykuna/errors.py
marthoc/pykuna
f5bf02f26e1931b35becde6e1da58fb8bb0cc2d8
[ "MIT" ]
null
null
null
class KunaError(Exception): pass class AuthenticationError(KunaError): """Raised when authentication fails.""" pass class UnauthorizedError(KunaError): """Raised when an API call fails as unauthorized (401).""" pass
16.133333
62
0.698347
235
0.971074
0
0
0
0
0
0
97
0.400826
71b199d12891c79153389fe28f6188e598ac7c21
792
py
Python
src/pe_problem74.py
henrimitte/Project-Euler
77fd9f5b076d1ca2e5ed4ef94bf8d32d9ed611eb
[ "MIT" ]
null
null
null
src/pe_problem74.py
henrimitte/Project-Euler
77fd9f5b076d1ca2e5ed4ef94bf8d32d9ed611eb
[ "MIT" ]
null
null
null
src/pe_problem74.py
henrimitte/Project-Euler
77fd9f5b076d1ca2e5ed4ef94bf8d32d9ed611eb
[ "MIT" ]
null
null
null
from tools import factorial def solve(): fa = tuple(factorial(x) for x in range(10)) def _sum_factorial_of_digits(n: int) -> int: s = 0 while n > 0: s += fa[n % 10] n //= 10 return s limit = 1000000 loops = [0 for x in range(limit)] for i in range(limit): if not loops[i]: loop_not_found = True chain = [i] n = i while loop_not_found: n = _sum_factorial_of_digits(n) if n in chain: loop_not_found = False else: chain.append(n) loops[i] = len(chain) sixty = sum(filter(lambda v: v == 60, loops)) // 60 print(sixty) if __name__ == '__main__': solve()
22.628571
55
0.474747
0
0
0
0
0
0
0
0
10
0.012626
71b28ef18b75d4bcb886bea855f0ba76dd2bc9f2
27,966
py
Python
thingsboard_gateway/connectors/modbus/modbus_connector.py
ferguscan/thingsboard-gateway
bc20fdb8e46f840b8538a010db2714ec6071fa5b
[ "Apache-2.0" ]
null
null
null
thingsboard_gateway/connectors/modbus/modbus_connector.py
ferguscan/thingsboard-gateway
bc20fdb8e46f840b8538a010db2714ec6071fa5b
[ "Apache-2.0" ]
null
null
null
thingsboard_gateway/connectors/modbus/modbus_connector.py
ferguscan/thingsboard-gateway
bc20fdb8e46f840b8538a010db2714ec6071fa5b
[ "Apache-2.0" ]
null
null
null
# Copyright 2022. ThingsBoard # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from threading import Thread from time import sleep, time from queue import Queue from random import choice from string import ascii_lowercase from thingsboard_gateway.tb_utility.tb_utility import TBUtility # Try import Pymodbus library or install it and import try: from pymodbus.constants import Defaults except ImportError: print("Modbus library not found - installing...") TBUtility.install_package("pymodbus", ">=2.3.0") TBUtility.install_package('pyserial') from pymodbus.constants import Defaults try: from twisted.internet import reactor except ImportError: TBUtility.install_package('twisted') from twisted.internet import reactor from twisted.internet import reactor from pymodbus.bit_write_message import WriteSingleCoilResponse, WriteMultipleCoilsResponse from pymodbus.register_write_message import WriteMultipleRegistersResponse, WriteSingleRegisterResponse from pymodbus.register_read_message import ReadRegistersResponseBase from pymodbus.bit_read_message import ReadBitsResponseBase from pymodbus.client.sync import ModbusTcpClient, ModbusUdpClient, ModbusSerialClient from pymodbus.client.sync import ModbusRtuFramer, ModbusSocketFramer, ModbusAsciiFramer from pymodbus.exceptions import ConnectionException from pymodbus.server.asynchronous import StartTcpServer, StartUdpServer, StartSerialServer, StopServer from pymodbus.device import ModbusDeviceIdentification from pymodbus.version import version from pymodbus.datastore import ModbusSlaveContext, ModbusServerContext from pymodbus.datastore import ModbusSparseDataBlock from thingsboard_gateway.connectors.connector import Connector, log from thingsboard_gateway.connectors.modbus.constants import * from thingsboard_gateway.connectors.modbus.slave import Slave from thingsboard_gateway.connectors.modbus.backward_compability_adapter import BackwardCompatibilityAdapter from thingsboard_gateway.connectors.modbus.bytes_modbus_downlink_converter import BytesModbusDownlinkConverter CONVERTED_DATA_SECTIONS = [ATTRIBUTES_PARAMETER, TELEMETRY_PARAMETER] FRAMER_TYPE = { 'rtu': ModbusRtuFramer, 'socket': ModbusSocketFramer, 'ascii': ModbusAsciiFramer } SLAVE_TYPE = { 'tcp': StartTcpServer, 'udp': StartUdpServer, 'serial': StartSerialServer } FUNCTION_TYPE = { 'coils_initializer': 'co', 'holding_registers': 'hr', 'input_registers': 'ir', 'discrete_inputs': 'di' } FUNCTION_CODE_WRITE = { 'holding_registers': (6, 16), 'coils_initializer': (5, 15) } FUNCTION_CODE_READ = { 'holding_registers': 3, 'coils_initializer': 1, 'input_registers': 4, 'discrete_inputs': 2 } class ModbusConnector(Connector, Thread): process_requests = Queue(-1) def __init__(self, gateway, config, connector_type): self.statistics = {STATISTIC_MESSAGE_RECEIVED_PARAMETER: 0, STATISTIC_MESSAGE_SENT_PARAMETER: 0} super().__init__() self.__gateway = gateway self._connector_type = connector_type self.__backward_compatibility_adapter = BackwardCompatibilityAdapter(config, gateway.get_config_path()) self.__config = self.__backward_compatibility_adapter.convert() self.setName(self.__config.get("name", 'Modbus Default ' + ''.join(choice(ascii_lowercase) for _ in range(5)))) self.__connected = False self.__stopped = False self.daemon = True if self.__config.get('slave'): self.__slave_thread = Thread(target=self.__configure_and_run_slave, args=(self.__config['slave'],), daemon=True, name='Gateway as a slave') self.__slave_thread.start() if config['slave'].get('sendDataToThingsBoard', False): self.__modify_main_config() self.__slaves = [] self.__load_slaves() def is_connected(self): return self.__connected def open(self): self.__stopped = False self.start() def run(self): self.__connected = True while True: if not self.__stopped and not ModbusConnector.process_requests.empty(): thread = Thread(target=self.__process_slaves, daemon=True) thread.start() if self.__stopped: break sleep(.2) @staticmethod def __configure_and_run_slave(config): identity = None if config.get('identity'): identity = ModbusDeviceIdentification() identity.VendorName = config['identity'].get('vendorName', '') identity.ProductCode = config['identity'].get('productCode', '') identity.VendorUrl = config['identity'].get('vendorUrl', '') identity.ProductName = config['identity'].get('productName', '') identity.ModelName = config['identity'].get('ModelName', '') identity.MajorMinorRevision = version.short() blocks = {} for (key, value) in config.get('values').items(): values = {} converter = BytesModbusDownlinkConverter({}) for item in value: for section in ('attributes', 'timeseries', 'attributeUpdates', 'rpc'): for val in item.get(section, []): function_code = FUNCTION_CODE_WRITE[key][0] if val['objectsCount'] <= 1 else \ FUNCTION_CODE_WRITE[key][1] converted_value = converter.convert( {**val, 'device': config.get('deviceName', 'Gateway'), 'functionCode': function_code, 'byteOrder': config['byteOrder'], 'wordOrder': config['wordOrder']}, {'data': {'params': val['value']}}) values[val['address'] + 1] = converted_value blocks[FUNCTION_TYPE[key]] = ModbusSparseDataBlock(values) context = ModbusServerContext(slaves=ModbusSlaveContext(**blocks), single=True) SLAVE_TYPE[config['type']](context, identity=identity, address=(config.get('host'), config.get('port')) if ( config['type'] == 'tcp' or 'udp') else None, port=config.get('port') if config['type'] == 'serial' else None, framer=FRAMER_TYPE[config['method']]) def __modify_main_config(self): config = self.__config['slave'] values = config.pop('values') device = config for (register, reg_values) in values.items(): for value in reg_values: for section in ('attributes', 'timeseries', 'attributeUpdates', 'rpc'): if not device.get(section): device[section] = [] for item in value.get(section, []): device[section].append({**item, 'functionCode': FUNCTION_CODE_READ[ register] if section not in ('attributeUpdates', 'rpc') else item['functionCode']}) self.__config['master']['slaves'].append(device) def __load_slaves(self): self.__slaves = [ Slave(**{**device, 'connector': self, 'gateway': self.__gateway, 'callback': ModbusConnector.callback}) for device in self.__config.get('master', {'slaves': []}).get('slaves', [])] @classmethod def callback(cls, slave): cls.process_requests.put(slave) @property def connector_type(self): return self._connector_type def __convert_and_save_data(self, config_tuple): device, current_device_config, config, device_responses = config_tuple converted_data = {} try: converted_data = device.config[UPLINK_PREFIX + CONVERTER_PARAMETER].convert( config=config, data=device_responses) except Exception as e: log.error(e) to_send = {DEVICE_NAME_PARAMETER: converted_data[DEVICE_NAME_PARAMETER], DEVICE_TYPE_PARAMETER: converted_data[DEVICE_TYPE_PARAMETER], TELEMETRY_PARAMETER: [], ATTRIBUTES_PARAMETER: [] } if current_device_config.get('sendDataOnlyOnChange'): self.statistics[STATISTIC_MESSAGE_RECEIVED_PARAMETER] += 1 for converted_data_section in CONVERTED_DATA_SECTIONS: for current_section_dict in converted_data[converted_data_section]: for key, value in current_section_dict.items(): if device.config[LAST_PREFIX + converted_data_section].get(key) is None or \ device.config[LAST_PREFIX + converted_data_section][key] != value: device.config[LAST_PREFIX + converted_data_section][key] = value to_send[converted_data_section].append({key: value}) elif converted_data and current_device_config.get('sendDataOnlyOnChange') is None or \ not current_device_config.get('sendDataOnlyOnChange'): self.statistics[STATISTIC_MESSAGE_RECEIVED_PARAMETER] += 1 for converted_data_section in CONVERTED_DATA_SECTIONS: device.config[LAST_PREFIX + converted_data_section] = converted_data[ converted_data_section] to_send[converted_data_section] = converted_data[converted_data_section] if to_send.get(ATTRIBUTES_PARAMETER) or to_send.get(TELEMETRY_PARAMETER): self.__gateway.send_to_storage(self.get_name(), to_send) self.statistics[STATISTIC_MESSAGE_SENT_PARAMETER] += 1 def close(self): self.__stopped = True self.__stop_connections_to_masters() if reactor.running: StopServer() log.info('%s has been stopped.', self.get_name()) def get_name(self): return self.name def __process_slaves(self): # TODO: write documentation device = ModbusConnector.process_requests.get() device_responses = {'timeseries': {}, 'attributes': {}} current_device_config = {} try: for config_section in device_responses: if device.config.get(config_section) is not None: current_device_config = device.config self.__connect_to_current_master(device) if not device.config['master'].is_socket_open() or not len( current_device_config[config_section]): continue # Reading data from device for interested_data in range(len(current_device_config[config_section])): current_data = current_device_config[config_section][interested_data] current_data[DEVICE_NAME_PARAMETER] = device input_data = self.__function_to_device(device, current_data) device_responses[config_section][current_data[TAG_PARAMETER]] = { "data_sent": current_data, "input_data": input_data} log.debug("Checking %s for device %s", config_section, device) log.debug('Device response: ', device_responses) if device_responses.get('timeseries') or device_responses.get('attributes'): self.__convert_and_save_data((device, current_device_config, { **current_device_config, BYTE_ORDER_PARAMETER: current_device_config.get(BYTE_ORDER_PARAMETER, device.byte_order), WORD_ORDER_PARAMETER: current_device_config.get(WORD_ORDER_PARAMETER, device.word_order) }, device_responses)) except ConnectionException: sleep(5) log.error("Connection lost! Reconnecting...") except Exception as e: log.exception(e) def __connect_to_current_master(self, device=None): # TODO: write documentation connect_attempt_count = 5 connect_attempt_time_ms = 100 wait_after_failed_attempts_ms = 300000 if device.config.get('master') is None: device.config['master'], device.config['available_functions'] = self.__configure_master(device.config) if connect_attempt_count < 1: connect_attempt_count = 1 connect_attempt_time_ms = device.config.get('connectAttemptTimeMs', connect_attempt_time_ms) if connect_attempt_time_ms < 500: connect_attempt_time_ms = 500 wait_after_failed_attempts_ms = device.config.get('waitAfterFailedAttemptsMs', wait_after_failed_attempts_ms) if wait_after_failed_attempts_ms < 1000: wait_after_failed_attempts_ms = 1000 current_time = time() * 1000 if not device.config['master'].is_socket_open(): if device.config['connection_attempt'] >= connect_attempt_count and current_time - device.config[ 'last_connection_attempt_time'] >= wait_after_failed_attempts_ms: device.config['connection_attempt'] = 0 while not device.config['master'].is_socket_open() \ and device.config['connection_attempt'] < connect_attempt_count \ and current_time - device.config.get('last_connection_attempt_time', 0) >= connect_attempt_time_ms: device.config['connection_attempt'] = device.config[ 'connection_attempt'] + 1 device.config['last_connection_attempt_time'] = current_time log.debug("Modbus trying connect to %s", device) device.config['master'].connect() if device.config['connection_attempt'] == connect_attempt_count: log.warn("Maximum attempt count (%i) for device \"%s\" - encountered.", connect_attempt_count, device) if device.config['connection_attempt'] >= 0 and device.config['master'].is_socket_open(): device.config['connection_attempt'] = 0 device.config['last_connection_attempt_time'] = current_time @staticmethod def __configure_master(config): current_config = config current_config["rtu"] = FRAMER_TYPE[current_config['method']] if current_config.get('type') == 'tcp': master = ModbusTcpClient(current_config["host"], current_config["port"], current_config["rtu"], timeout=current_config["timeout"], retry_on_empty=current_config["retry_on_empty"], retry_on_invalid=current_config["retry_on_invalid"], retries=current_config["retries"]) elif current_config.get(TYPE_PARAMETER) == 'udp': master = ModbusUdpClient(current_config["host"], current_config["port"], current_config["rtu"], timeout=current_config["timeout"], retry_on_empty=current_config["retry_on_empty"], retry_on_invalid=current_config["retry_on_invalid"], retries=current_config["retries"]) elif current_config.get(TYPE_PARAMETER) == 'serial': master = ModbusSerialClient(method=current_config["method"], port=current_config["port"], timeout=current_config["timeout"], retry_on_empty=current_config["retry_on_empty"], retry_on_invalid=current_config["retry_on_invalid"], retries=current_config["retries"], baudrate=current_config["baudrate"], stopbits=current_config["stopbits"], bytesize=current_config["bytesize"], parity=current_config["parity"], strict=current_config["strict"]) else: raise Exception("Invalid Modbus transport type.") available_functions = { 1: master.read_coils, 2: master.read_discrete_inputs, 3: master.read_holding_registers, 4: master.read_input_registers, 5: master.write_coil, 6: master.write_register, 15: master.write_coils, 16: master.write_registers, } return master, available_functions def __stop_connections_to_masters(self): for slave in self.__slaves: if slave.config.get('master') is not None and slave.config.get('master').is_socket_open(): slave.config['master'].close() @staticmethod def __function_to_device(device, config): function_code = config.get('functionCode') result = None if function_code == 1: result = device.config['available_functions'][function_code](address=config[ADDRESS_PARAMETER], count=config.get(OBJECTS_COUNT_PARAMETER, config.get("registersCount", config.get( "registerCount", 1))) * 8, unit=device.config['unitId']) elif function_code in (2, 3, 4): result = device.config['available_functions'][function_code](address=config[ADDRESS_PARAMETER], count=config.get(OBJECTS_COUNT_PARAMETER, config.get("registersCount", config.get( "registerCount", 1))), unit=device.config['unitId']) elif function_code in (5, 15): result = device.config['available_functions'][function_code](address=config[ADDRESS_PARAMETER], value=config[PAYLOAD_PARAMETER], unit=device.config['unitId'] * 8) elif function_code in (6, 16): result = device.config['available_functions'][function_code](address=config[ADDRESS_PARAMETER], values=config[PAYLOAD_PARAMETER], unit=device.config['unitId']) else: log.error("Unknown Modbus function with code: %s", function_code) log.debug("With result %s", str(result)) if "Exception" in str(result): log.exception(result) return result def on_attributes_update(self, content): try: device = tuple(filter(lambda slave: slave.name == content[DEVICE_SECTION_PARAMETER], self.__slaves))[0] for attribute_updates_command_config in device.config['attributeUpdates']: for attribute_updated in content[DATA_PARAMETER]: if attribute_updates_command_config[TAG_PARAMETER] == attribute_updated: to_process = { DEVICE_SECTION_PARAMETER: content[DEVICE_SECTION_PARAMETER], DATA_PARAMETER: { RPC_METHOD_PARAMETER: attribute_updated, RPC_PARAMS_PARAMETER: content[DATA_PARAMETER][attribute_updated] } } attribute_updates_command_config['byteOrder'] = device.byte_order or 'LITTLE' attribute_updates_command_config['wordOrder'] = device.word_order or 'LITTLE' self.__process_request(to_process, attribute_updates_command_config, request_type='attributeUpdates') except Exception as e: log.exception(e) def server_side_rpc_handler(self, server_rpc_request): try: if server_rpc_request.get(DEVICE_SECTION_PARAMETER) is not None: log.debug("Modbus connector received rpc request for %s with server_rpc_request: %s", server_rpc_request[DEVICE_SECTION_PARAMETER], server_rpc_request) device = tuple( filter( lambda slave: slave.name == server_rpc_request[DEVICE_SECTION_PARAMETER], self.__slaves ) )[0] if isinstance(device.config[RPC_SECTION], dict): rpc_command_config = device.config[RPC_SECTION].get( server_rpc_request[DATA_PARAMETER][RPC_METHOD_PARAMETER]) if rpc_command_config is not None: self.__process_request(server_rpc_request, rpc_command_config) elif isinstance(device.config[RPC_SECTION], list): for rpc_command_config in device.config[RPC_SECTION]: if rpc_command_config[TAG_PARAMETER] == server_rpc_request[DATA_PARAMETER][ RPC_METHOD_PARAMETER]: self.__process_request(server_rpc_request, rpc_command_config) break else: log.error("Received rpc request, but method %s not found in config for %s.", server_rpc_request[DATA_PARAMETER].get(RPC_METHOD_PARAMETER), self.get_name()) self.__gateway.send_rpc_reply(server_rpc_request[DEVICE_SECTION_PARAMETER], server_rpc_request[DATA_PARAMETER][RPC_ID_PARAMETER], {server_rpc_request[DATA_PARAMETER][ RPC_METHOD_PARAMETER]: "METHOD NOT FOUND!"}) else: log.debug("Received RPC to connector: %r", server_rpc_request) except Exception as e: log.exception(e) def __process_request(self, content, rpc_command_config, request_type='RPC'): log.debug('Processing %s request', request_type) if rpc_command_config is not None: device = tuple(filter(lambda slave: slave.name == content[DEVICE_SECTION_PARAMETER], self.__slaves))[0] rpc_command_config[UNIT_ID_PARAMETER] = device.config['unitId'] rpc_command_config[BYTE_ORDER_PARAMETER] = device.config.get("byteOrder", "LITTLE") rpc_command_config[WORD_ORDER_PARAMETER] = device.config.get("wordOrder", "LITTLE") self.__connect_to_current_master(device) if rpc_command_config.get(FUNCTION_CODE_PARAMETER) in (6, 16): converted_data = device.config[DOWNLINK_PREFIX + CONVERTER_PARAMETER].convert(rpc_command_config, content) try: rpc_command_config[PAYLOAD_PARAMETER] = converted_data[0] except IndexError and TypeError: rpc_command_config[PAYLOAD_PARAMETER] = converted_data elif rpc_command_config.get(FUNCTION_CODE_PARAMETER) in (5, 15): converted_data = device.config[DOWNLINK_PREFIX + CONVERTER_PARAMETER].convert(rpc_command_config, content) rpc_command_config[PAYLOAD_PARAMETER] = converted_data try: response = self.__function_to_device(device, rpc_command_config) except Exception as e: log.exception(e) response = e if isinstance(response, (ReadRegistersResponseBase, ReadBitsResponseBase)): to_converter = { RPC_SECTION: {content[DATA_PARAMETER][RPC_METHOD_PARAMETER]: {"data_sent": rpc_command_config, "input_data": response}}} response = device.config[ UPLINK_PREFIX + CONVERTER_PARAMETER].convert( config={**device.config, BYTE_ORDER_PARAMETER: device.byte_order, WORD_ORDER_PARAMETER: device.word_order }, data=to_converter) log.debug("Received %s method: %s, result: %r", request_type, content[DATA_PARAMETER][RPC_METHOD_PARAMETER], response) elif isinstance(response, (WriteMultipleRegistersResponse, WriteMultipleCoilsResponse, WriteSingleCoilResponse, WriteSingleRegisterResponse)): log.debug("Write %r", str(response)) response = {"success": True} if content.get(RPC_ID_PARAMETER) or ( content.get(DATA_PARAMETER) is not None and content[DATA_PARAMETER].get(RPC_ID_PARAMETER)): if isinstance(response, Exception): self.__gateway.send_rpc_reply(content[DEVICE_SECTION_PARAMETER], content[DATA_PARAMETER][RPC_ID_PARAMETER], {content[DATA_PARAMETER][RPC_METHOD_PARAMETER]: str(response)}) else: self.__gateway.send_rpc_reply(content[DEVICE_SECTION_PARAMETER], content[DATA_PARAMETER][RPC_ID_PARAMETER], response) log.debug("%r", response)
50.389189
121
0.567582
24,704
0.883358
0
0
7,656
0.273761
0
0
3,458
0.12365
71b2acdd2d92ff5dd5a3e30aa5f776064be270a0
966
py
Python
specs/test_gru_on_flat_babyai.py
xwu20/wmg_agent
25378c8fc54eb6e0e8c9d969760a72e843572f09
[ "MIT" ]
23
2020-07-08T15:58:51.000Z
2022-01-13T04:22:03.000Z
specs/test_gru_on_flat_babyai.py
xwu20/wmg_agent
25378c8fc54eb6e0e8c9d969760a72e843572f09
[ "MIT" ]
3
2021-06-08T21:58:37.000Z
2022-01-13T03:00:32.000Z
specs/test_gru_on_flat_babyai.py
xwu20/wmg_agent
25378c8fc54eb6e0e8c9d969760a72e843572f09
[ "MIT" ]
11
2020-07-31T11:13:29.000Z
2021-11-10T08:37:12.000Z
# Copyright (c) Microsoft Corporation. # Licensed under the MIT license. ### CONTROLS (non-tunable) ### # general TYPE_OF_RUN = test_episodes # train, test, test_episodes, render NUM_EPISODES_TO_TEST = 1000 MIN_FINAL_REWARD_FOR_SUCCESS = 1.0 LOAD_MODEL_FROM = models/gru_flat_babyai.pth SAVE_MODELS_TO = None # worker.py ENV = BabyAI_Env ENV_RANDOM_SEED = 1 AGENT_RANDOM_SEED = 1 REPORTING_INTERVAL = 1 TOTAL_STEPS = 1 ANNEAL_LR = False # A3cAgent AGENT_NET = GRU_Network # BabyAI_Env BABYAI_ENV_LEVEL = BabyAI-GoToLocal-v0 USE_SUCCESS_RATE = True SUCCESS_RATE_THRESHOLD = 0.99 HELDOUT_TESTING = False NUM_TEST_EPISODES = 10000 OBS_ENCODER = Flat BINARY_REWARD = True ### HYPERPARAMETERS (tunable) ### # A3cAgent A3C_T_MAX = 4 LEARNING_RATE = 4e-05 DISCOUNT_FACTOR = 0.9 GRADIENT_CLIP = 512.0 ENTROPY_TERM_STRENGTH = 0.02 ADAM_EPS = 1e-12 REWARD_SCALE = 2.0 WEIGHT_DECAY = 0. # RNNs NUM_RNN_UNITS = 96 OBS_EMBED_SIZE = 512 AC_HIDDEN_LAYER_SIZE = 4096
19.714286
65
0.774327
0
0
0
0
0
0
0
0
234
0.242236
71b31d76fcd9783bbf00ab94b135126e5908e931
3,474
bzl
Python
haskell/private/actions/runghc.bzl
meisterT/rules_haskell
7c0a867fc23da104ea8cbff26864894abcf137bc
[ "Apache-2.0" ]
null
null
null
haskell/private/actions/runghc.bzl
meisterT/rules_haskell
7c0a867fc23da104ea8cbff26864894abcf137bc
[ "Apache-2.0" ]
null
null
null
haskell/private/actions/runghc.bzl
meisterT/rules_haskell
7c0a867fc23da104ea8cbff26864894abcf137bc
[ "Apache-2.0" ]
null
null
null
"""runghc support""" load(":private/context.bzl", "render_env") load(":private/packages.bzl", "expose_packages", "pkg_info_to_compile_flags") load( ":private/path_utils.bzl", "link_libraries", "ln", "target_unique_name", ) load( ":private/set.bzl", "set", ) load(":providers.bzl", "get_ghci_extra_libs") load("@bazel_skylib//lib:shell.bzl", "shell") def build_haskell_runghc( hs, runghc_wrapper, user_compile_flags, extra_args, hs_info, cc_info, output, package_databases, version, lib_info = None): """Build runghc script. Args: hs: Haskell context. hs_info: HaskellInfo. package_databases: package caches excluding the cache file of the package we're creating a runghc for. lib_info: If we're building runghc for a library target, pass HaskellLibraryInfo here, otherwise it should be None. Returns: None. """ (pkg_info_inputs, args) = pkg_info_to_compile_flags( hs, pkg_info = expose_packages( package_ids = hs.package_ids, package_databases = package_databases, version = version, ), prefix = "runghc-", ) if lib_info != None: for idir in set.to_list(hs_info.import_dirs): args += ["-i{0}".format(idir)] (ghci_extra_libs, ghc_env) = get_ghci_extra_libs( hs, cc_info, path_prefix = "$RULES_HASKELL_EXEC_ROOT", ) link_libraries(ghci_extra_libs, args) runghc_file = hs.actions.declare_file(target_unique_name(hs, "runghc")) # Extra arguments. # `compiler flags` is the default set of arguments for runghc, # augmented by `extra_args`. # The ordering is important, first compiler flags (from toolchain # and local rule), then from `extra_args`. This way the more # specific arguments are listed last, and then have more priority in # GHC. # Note that most flags for GHCI do have their negative value, so a # negative flag in `extra_args` can disable a positive flag set # in `user_compile_flags`, such as `-XNoOverloadedStrings` will disable # `-XOverloadedStrings`. args += hs.toolchain.compiler_flags + user_compile_flags + hs.toolchain.repl_ghci_args # ghc args need to be wrapped up in "--ghc-arg=" when passing to runghc runcompile_flags = ["--ghc-arg=%s" % a for a in args] runcompile_flags += extra_args hs.actions.expand_template( template = runghc_wrapper, output = runghc_file, substitutions = { "{ENV}": render_env(ghc_env), "{TOOL}": hs.tools.runghc.path, "{CC}": hs.toolchain.cc_wrapper.executable.path, "{ARGS}": " ".join([shell.quote(a) for a in runcompile_flags]), }, is_executable = True, ) # XXX We create a symlink here because we need to force # hs.tools.runghc and the best way to do that is # to use hs.actions.run. That action, in turn must produce # a result, so using ln seems to be the only sane choice. extra_inputs = depset(transitive = [ depset([ hs.tools.runghc, runghc_file, ]), package_databases, pkg_info_inputs, ghci_extra_libs, hs_info.source_files, hs.toolchain.cc_wrapper.runfiles.files, ]) ln(hs, runghc_file, output, extra_inputs)
31.017857
90
0.627231
0
0
0
0
0
0
0
0
1,592
0.458261
71b4b6265ccad83e3c8c7743ef9150f9f16b46b0
8,456
py
Python
tests/dicom/test_header_tweaks.py
pymedphys/pymedphys-archive-2019
6bb7c8d0da2e93ff56469bb47e65b15ece2ea25e
[ "Apache-2.0" ]
1
2020-12-20T14:13:56.000Z
2020-12-20T14:13:56.000Z
tests/dicom/test_header_tweaks.py
pymedphys/pymedphys-archive-2019
6bb7c8d0da2e93ff56469bb47e65b15ece2ea25e
[ "Apache-2.0" ]
6
2020-10-06T15:36:46.000Z
2022-02-27T05:15:17.000Z
tests/dicom/test_header_tweaks.py
cpbhatt/pymedphys
177b3db8e2a6e83c44835d0007d1d5c7a420fd99
[ "Apache-2.0" ]
1
2020-12-20T14:14:00.000Z
2020-12-20T14:14:00.000Z
# Copyright (C) 2019 Cancer Care Associates # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import subprocess import uuid import numpy as np import pydicom from pymedphys._dicom.create import dicom_dataset_from_dict from pymedphys._dicom.header import ( RED_adjustment_map_from_structure_names, adjust_machine_name, adjust_RED_by_structure_name, adjust_rel_elec_density, ) from pymedphys._dicom.utilities import remove_file HERE = os.path.dirname(__file__) ORIGINAL_DICOM_FILENAME = os.path.join( HERE, "scratch", "original-{}.dcm".format(str(uuid.uuid4())) ) ADJUSTED_DICOM_FILENAME = os.path.join( HERE, "scratch", "adjusted-{}.dcm".format(str(uuid.uuid4())) ) def compare_dicom_cli(command, original, expected): pydicom.write_file(ORIGINAL_DICOM_FILENAME, original) try: subprocess.check_call(command) cli_adjusted_ds = pydicom.read_file(ADJUSTED_DICOM_FILENAME, force=True) assert str(cli_adjusted_ds) == str(expected) finally: remove_file(ORIGINAL_DICOM_FILENAME) remove_file(ADJUSTED_DICOM_FILENAME) def test_adjust_machine_name(): new_name = "new_name" original_ds = dicom_dataset_from_dict( { "BeamSequence": [ {"TreatmentMachineName": "hello"}, {"TreatmentMachineName": "george"}, ] } ) expected_ds = dicom_dataset_from_dict( { "BeamSequence": [ {"TreatmentMachineName": new_name}, {"TreatmentMachineName": new_name}, ] } ) adjusted_ds = adjust_machine_name(original_ds, new_name) assert adjusted_ds != original_ds assert adjusted_ds == expected_ds command = "pymedphys dicom adjust-machine-name".split() + [ ORIGINAL_DICOM_FILENAME, ADJUSTED_DICOM_FILENAME, new_name, ] compare_dicom_cli(command, original_ds, expected_ds) def test_electron_density_append(): adjustment_map = { "to_be_changed 1": 1.0, "to_be_changed 2": 0.5, "to_be_changed 3": 1.5, } excess_adjustment_map = {**adjustment_map, **{"this_structure_doesnt_exist": 1.0}} original_ds = dicom_dataset_from_dict( { "StructureSetROISequence": [ {"ROINumber": 1, "ROIName": "to_be_changed 1"}, {"ROINumber": 2, "ROIName": "dont_change_me"}, {"ROINumber": 10, "ROIName": "to_be_changed 2"}, {"ROINumber": 99, "ROIName": "to_be_changed 3"}, ], "RTROIObservationsSequence": [ { "ReferencedROINumber": 1, "ROIPhysicalPropertiesSequence": [ { "ROIPhysicalProperty": "EFFECTIVE_Z", "ROIPhysicalPropertyValue": 6, } ], }, {"ReferencedROINumber": 2}, {"ReferencedROINumber": 10}, { "ReferencedROINumber": 99, "ROIPhysicalPropertiesSequence": [ { "ROIPhysicalProperty": "REL_ELEC_DENSITY", "ROIPhysicalPropertyValue": 0, } ], }, ], } ) expected_ds = dicom_dataset_from_dict( { "RTROIObservationsSequence": [ { "ReferencedROINumber": 1, "ROIPhysicalPropertiesSequence": [ { "ROIPhysicalProperty": "EFFECTIVE_Z", "ROIPhysicalPropertyValue": 6, }, { "ROIPhysicalProperty": "REL_ELEC_DENSITY", "ROIPhysicalPropertyValue": adjustment_map[ "to_be_changed 1" ], }, ], }, {"ReferencedROINumber": 2}, { "ReferencedROINumber": 10, "ROIPhysicalPropertiesSequence": [ { "ROIPhysicalProperty": "REL_ELEC_DENSITY", "ROIPhysicalPropertyValue": adjustment_map[ "to_be_changed 2" ], } ], }, { "ReferencedROINumber": 99, "ROIPhysicalPropertiesSequence": [ { "ROIPhysicalProperty": "REL_ELEC_DENSITY", "ROIPhysicalPropertyValue": adjustment_map[ "to_be_changed 3" ], } ], }, ] }, template_ds=original_ds, ) adjusted_ds = adjust_rel_elec_density(original_ds, adjustment_map) assert adjusted_ds != original_ds assert str(expected_ds) == str(adjusted_ds) adjusted_with_excess_ds = adjust_rel_elec_density( original_ds, excess_adjustment_map, ignore_missing_structure=True ) assert adjusted_with_excess_ds != original_ds assert str(expected_ds) == str(adjusted_with_excess_ds) excess_adjustment_map_as_list = [ ["{}".format(key), item] for key, item in excess_adjustment_map.items() ] excess_adjustment_map_flat = np.concatenate(excess_adjustment_map_as_list).tolist() command = ( "pymedphys dicom adjust-RED -i ".split() + [ORIGINAL_DICOM_FILENAME, ADJUSTED_DICOM_FILENAME] + excess_adjustment_map_flat ) compare_dicom_cli(command, original_ds, expected_ds) def test_structure_name_parse(): structure_names = [ "a RED=1", "b", "c", "d RED=2.2", "e red = 3", "f", "g Red: 4.7", "h RED=0.5 ", ] expected_adjustment_map = { "a RED=1": 1, "d RED=2.2": 2.2, "e red = 3": 3, "g Red: 4.7": 4.7, "h RED=0.5 ": 0.5, } adjustment_map = RED_adjustment_map_from_structure_names(structure_names) assert expected_adjustment_map == adjustment_map def test_structure_name_based_RED_append(): electron_density_to_use = 0.5 original_ds = dicom_dataset_from_dict( { "StructureSetROISequence": [ { "ROINumber": 1, "ROIName": "a_structure RED={}".format(electron_density_to_use), }, {"ROINumber": 2, "ROIName": "dont_change_me"}, ], "RTROIObservationsSequence": [ {"ReferencedROINumber": 1}, {"ReferencedROINumber": 2}, ], } ) expected_ds = dicom_dataset_from_dict( { "RTROIObservationsSequence": [ { "ReferencedROINumber": 1, "ROIPhysicalPropertiesSequence": [ { "ROIPhysicalProperty": "REL_ELEC_DENSITY", "ROIPhysicalPropertyValue": electron_density_to_use, } ], }, {"ReferencedROINumber": 2}, ] }, template_ds=original_ds, ) adjusted_ds = adjust_RED_by_structure_name(original_ds) assert adjusted_ds != original_ds assert str(expected_ds) == str(adjusted_ds) command = "pymedphys dicom adjust-RED-by-structure-name".split() + [ ORIGINAL_DICOM_FILENAME, ADJUSTED_DICOM_FILENAME, ] compare_dicom_cli(command, original_ds, expected_ds)
30.637681
87
0.534532
0
0
0
0
0
0
0
0
2,404
0.284295
71b4b95cd8eac603e64cc2b55ede32f9146ce21d
1,929
py
Python
tests/components/http/test_data_validator.py
itewk/home-assistant
769cf19052f8c9ef374d8ba8ae7705ccc7bf4cf4
[ "Apache-2.0" ]
23
2017-11-15T21:03:53.000Z
2021-03-29T21:33:48.000Z
tests/components/http/test_data_validator.py
itewk/home-assistant
769cf19052f8c9ef374d8ba8ae7705ccc7bf4cf4
[ "Apache-2.0" ]
39
2016-12-16T12:40:34.000Z
2017-02-13T17:53:42.000Z
tests/components/http/test_data_validator.py
itewk/home-assistant
769cf19052f8c9ef374d8ba8ae7705ccc7bf4cf4
[ "Apache-2.0" ]
10
2018-01-01T00:12:51.000Z
2021-12-21T23:08:05.000Z
"""Test data validator decorator.""" from unittest.mock import Mock from aiohttp import web import voluptuous as vol from homeassistant.components.http import HomeAssistantView from homeassistant.components.http.data_validator import RequestDataValidator async def get_client(aiohttp_client, validator): """Generate a client that hits a view decorated with validator.""" app = web.Application() app["hass"] = Mock(is_running=True) class TestView(HomeAssistantView): url = "/" name = "test" requires_auth = False @validator async def post(self, request, data): """Test method.""" return b"" TestView().register(app, app.router) client = await aiohttp_client(app) return client async def test_validator(aiohttp_client): """Test the validator.""" client = await get_client( aiohttp_client, RequestDataValidator(vol.Schema({vol.Required("test"): str})) ) resp = await client.post("/", json={"test": "bla"}) assert resp.status == 200 resp = await client.post("/", json={"test": 100}) assert resp.status == 400 resp = await client.post("/") assert resp.status == 400 async def test_validator_allow_empty(aiohttp_client): """Test the validator with empty data.""" client = await get_client( aiohttp_client, RequestDataValidator( vol.Schema( { # Although we allow empty, our schema should still be able # to validate an empty dict. vol.Optional("test"): str } ), allow_empty=True, ), ) resp = await client.post("/", json={"test": "bla"}) assert resp.status == 200 resp = await client.post("/", json={"test": 100}) assert resp.status == 400 resp = await client.post("/") assert resp.status == 200
27.169014
85
0.610679
223
0.115604
0
0
109
0.056506
1,663
0.862105
354
0.183515
71b4ce87227b2fcaa01e098fed2fec676e7173d5
7,410
py
Python
Conversely_Frontend/app/Server/ukjp/templates.py
sam-aldis/Conversley
1fc30d6b768cc03f727229a52e0879fac3af1e3a
[ "MIT" ]
null
null
null
Conversely_Frontend/app/Server/ukjp/templates.py
sam-aldis/Conversley
1fc30d6b768cc03f727229a52e0879fac3af1e3a
[ "MIT" ]
null
null
null
Conversely_Frontend/app/Server/ukjp/templates.py
sam-aldis/Conversley
1fc30d6b768cc03f727229a52e0879fac3af1e3a
[ "MIT" ]
null
null
null
import days STAGE_INIT = 0 STAGE_CHALLENGE_INIT = 1 STAGE_BOOKED = 2 def createJSONTemplate(data): pass messages = [ "Hey {{first_name}}, thankyou for your enquiry to be one of our Transformation Challengers", "We have 2 Challenges available for you:\n\nThe 8 Week Bikini Challenge which helps you shed 3-9kg of unwanted body fat, flattens your tummy and tones your arms, abs, legs and butt.\n\nOr our 9in6 Challenge which helps you drop 9+kgs of pure fat in just 6 Weeks.", "Please choose which challenge information you would like below..." ] callbacks = { "INIT_8WBC" : [ { "type": "message", "text" : "Thank you {{first_name}},\n\ The FREE 8 Week Bikini Challenge is a done for you - step by step PROVEN program that helps you lose the 3-7kg of unwanted body fat, flatten your tummy and tone your arms, legs and butt.\n\ \n\ This is your chance to transform your body in just 8 weeks for FREE" }, { "type" : "message", "text" : "In exchange for the program being FREE....we ask that you allow us to share your transformation story on our Facebook fan page for marketing purposes to help motivate and inspire the ladies of Perth. \n\ (Please note, a small refundable deposit applies to keep you motivated throughout the 8 weeks)" }, { "type": "message", "text": "The challenge is starting Monday 12th of June and to start your 8 Week Bikini Challenge, we just require you to attend the upcoming information meeting at the facility to quickly go over the program in person. \n\ \n\ There is absolutely no high pressure sales or obligation to join. Simply a meet and chat.\n\ \n\ To RSVP to the meeting click a suitable date below" }, { "type" : "json", "template" : "init_8wbc" } ], "INIT_9IN6" : [ { "type" : "message", "text" : "Thank you {{first_name}},\n\ The 9in6 Transformation Challenge is a done for you - step by step PROVEN program that helps you lose 9kg kilos of unwanted body fat, flatten your tummy and tone your arms, legs and butt in just 6 weeks.\n\ \ \nThis is your chance to transform your body in just 6 weeks for FREE!" }, { "type" : "message", "text" : "In exchange for the program, we ask that you allow us to showcase your transformation story on our Facebook fan page for marketing purposes to help motivate and inspire the ladies of Perth. When you complete the program its FREE. \n\ Please note, a small refundable \"incentive deposit\" applies to keep you motivated throughout the 6 weeks." }, { "type" : "message", "text" : "The challenge is starting Monday 12th of June and to start your 9kg 6-week challenge, we require you to attend the upcoming information meeting where we explain the program in person. \n\ \n\ There is absolutely no high pressure sales or obligation to join at the end, just an opportunity for you learn about the program and how you can lose 9kg in 6 weeks for FREE\n\ \n\ To RSVP to the meeting click a suitable date below" }, { "type" : "json", "template" : "init_9in6" } ], "TIME_TABLE_8WBC" : [ { "type" : "message", "text" : "Sure here's our lesson time table.." }, { "type" : "file", "url" : "http://thetransformationcentre.com.au/img/timetable.pdf" }, { "type" : "json", "template" : "init_8wbc" } ] } def build_json_templates(): JSON_TEMPLATES = { "init" :{ "template_type" : "generic", "elements" : [ { "title" : "The Transformation Centre", "image_url" : "http://thetransformationcentre.com.au/img/spinner/1.png", "subtitle":"Choose one of our Challenges below", "buttons":[ { "type":"postback", "payload":"INIT_8WBC", "title":"8 Week Bikini Challenge" },{ "type":"postback", "title":"9kg 6 Week Challenge", "payload":"INIT_9IN6" } ] } ] }, "init_8wbc" : { "template_type" : "generic", "elements" : [ { "title" : "8 Week Bikini Challenge Meeting", "subtitle":"RSVP by clicking a suitable data below", "buttons":[ # { # "type":"postback", # "payload":"BOOK_CONSULT_8WBC_DATE_" + days.getAppointmentDates(1)[2] + "_DAY_" + days.getAppointmentDates(1)[0] + "_TIME_" + days.getAppointmentTimesForDay(days.getAppointmentDates(1)[0])[1], # "title":days.getAppointmentDates(1)[0].title() + " " + days.getAppointmentTimesForDay(days.getAppointmentDates(1)[0])[0] + " " + days.getAppointmentDates(1)[1] # } # }, { "type":"postback", "title": "Sat 10th June 09.45", "payload":"BOOK_CONSULT_8WBC_DATE_10.05.2017_DAY_SATURDAY_TIME_0945" } ] } ] }, "init_9in6" : { "template_type" : "generic", "elements" : [ { "title" : "9kg 6 Week Challenge Info Meeting", "subtitle":"RSVP by clicking a suitable date below", "buttons":[ # { # "type":"postback", # "payload":"BOOK_CONSULT_9KG6WK_DATE_" + days.getAppointmentDates(1)[2] + "_DAY_" + days.getAppointmentDates(1)[0] + "_TIME_" + days.getAppointmentTimesForDay(days.getAppointmentDates(1)[0])[1], # "title":days.getAppointmentDates(1)[0].title() + " " + days.getAppointmentTimesForDay(days.getAppointmentDates(1)[0])[0] + " " + days.getAppointmentDates(1)[1] # } { "type":"postback", "title": "Sat 10th June 09.45", "payload":"BOOK_CONSULT_8WBC_DATE_10.05.2017_DAY_SATURDAY_TIME_0945" } # ,{ # "type":"postback", # "title": days.getAppointmentDates(2)[0].title() + " " + days.getAppointmentTimesForDay(days.getAppointmentDates(2)[0])[0] + " " + days.getAppointmentDates(2)[1], # "payload":"BOOK_CONSULT_9KG6WK_DATE_" + days.getAppointmentDates(2)[2] + "_DAY_" + days.getAppointmentDates(2)[0] + "_TIME_" + days.getAppointmentTimesForDay(days.getAppointmentDates(2)[0])[1] # } ] } ] } } return JSON_TEMPLATES
47.197452
276
0.523752
0
0
0
0
0
0
0
0
4,923
0.664372
71b54a23f9d4b30c276bd6f326098f146a43547e
1,349
py
Python
var/spack/repos/builtin/packages/pagmo2/package.py
jeanbez/spack
f4e51ce8f366c85bf5aa0eafe078677b42dae1ba
[ "ECL-2.0", "Apache-2.0", "MIT-0", "MIT" ]
null
null
null
var/spack/repos/builtin/packages/pagmo2/package.py
jeanbez/spack
f4e51ce8f366c85bf5aa0eafe078677b42dae1ba
[ "ECL-2.0", "Apache-2.0", "MIT-0", "MIT" ]
8
2021-11-09T20:28:40.000Z
2022-03-15T03:26:33.000Z
var/spack/repos/builtin/packages/pagmo2/package.py
jeanbez/spack
f4e51ce8f366c85bf5aa0eafe078677b42dae1ba
[ "ECL-2.0", "Apache-2.0", "MIT-0", "MIT" ]
2
2019-02-08T20:37:20.000Z
2019-03-31T15:19:26.000Z
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack.package import * class Pagmo2(CMakePackage): """Parallel Global Multiobjective Optimizer (and its Python alter ego PyGMO) is a C++ / Python platform to perform parallel computations of optimisation tasks (global and local) via the asynchronous generalized island model.""" homepage = "https://esa.github.io/pagmo2/" url = "https://github.com/esa/pagmo2/archive/v2.18.0.tar.gz" git = "https://github.com/esa/pagmo2.git" maintainers = ['liuyangzhuan'] version('master', branch='master') version('2.18.0', sha256='5ad40bf3aa91857a808d6b632d9e1020341a33f1a4115d7a2b78b78fd063ae31') depends_on('boost+system+serialization+thread') depends_on('intel-tbb') depends_on('mpi') depends_on('[email protected]:', type='build') variant('shared', default=True, description='Build shared libraries') def cmake_args(self): spec = self.spec args = [ '-DCMAKE_C_COMPILER=%s' % spec['mpi'].mpicc, '-DCMAKE_CXX_COMPILER=%s' % spec['mpi'].mpicxx, self.define_from_variant('BUILD_SHARED_LIBS', 'shared'), ] return args
33.725
96
0.679021
1,120
0.830245
0
0
0
0
0
0
843
0.624907
71b725d9d3a609a2e8415f6bcdfe99ef3f2dd580
4,984
py
Python
interferogram/sentinel/fetchCalES.py
earthobservatory/ariamh-pub
f33731e127f38ff33b02e02c07b16793c07651a6
[ "Apache-2.0" ]
4
2019-11-19T03:35:35.000Z
2020-12-07T18:43:11.000Z
interferogram/sentinel/fetchCalES.py
earthobservatory/ariamh-pub
f33731e127f38ff33b02e02c07b16793c07651a6
[ "Apache-2.0" ]
3
2019-06-05T03:35:55.000Z
2020-04-09T14:16:08.000Z
interferogram/sentinel/fetchCalES.py
earthobservatory/ariamh-pub
f33731e127f38ff33b02e02c07b16793c07651a6
[ "Apache-2.0" ]
6
2019-08-23T22:53:11.000Z
2021-11-06T15:15:30.000Z
#!/usr/bin/env python3 import os, sys, re, json, requests, datetime, tarfile, argparse from pprint import pprint import numpy as np from utils.UrlUtils import UrlUtils server = 'https://qc.sentinel1.eo.esa.int/' cal_re = re.compile(r'S1\w_AUX_CAL') def cmdLineParse(): ''' Command line parser. ''' parser = argparse.ArgumentParser(description='Fetch calibration auxiliary files ingested into HySDS') parser.add_argument('-o', '--output', dest='outdir', type=str, default='.', help='Path to output directory') parser.add_argument('-d', '--dry-run', dest='dry_run', action='store_true', help="Don't download anything; just output the URLs") return parser.parse_args() def download_file(url, outdir='.', session=None): ''' Download file to specified directory. ''' if session is None: session = requests.session() path = "%s.tgz" % os.path.join(outdir, os.path.basename(url)) print('Downloading URL: ', url) request = session.get(url, stream=True, verify=False) request.raise_for_status() with open(path,'wb') as f: for chunk in request.iter_content(chunk_size=1024): if chunk: f.write(chunk) f.flush() return path def untar_file(path, outdir): ''' Extract aux cal files. ''' if not tarfile.is_tarfile(path): raise RuntimeError("%s is not a tarfile." % path) with tarfile.open(path) as f: f.extractall(outdir) def get_active_ids(es_url): """Query for the active calibration IDs.""" query = { "query":{ "bool":{ "must":[ {"term":{"_id": "S1_AUX_CAL_ACTIVE"}}, ] } }, "sort":[ { "starttime": { "order": "desc" } } ] } es_index = "grq_*_s1-aux_cal_active" if es_url.endswith('/'): search_url = '%s%s/_search' % (es_url, es_index) else: search_url = '%s/%s/_search' % (es_url, es_index) r = requests.post(search_url, data=json.dumps(query)) if r.status_code == 200: result = r.json() #pprint(result) total = result['hits']['total'] if total == 0: raise RuntimeError("Failed to find S1_AUX_CAL_ACTIVE at %s." % search_url) return result['hits']['hits'][0]['_source']['metadata']['active_ids'] else: print("Failed to query %s:\n%s" % (es_url, r.text), file=sys.stderr) print("query: %s" % json.dumps(query, indent=2), file=sys.stderr) print("returned: %s" % r.text, file=sys.stderr) r.raise_for_status() def get_cal_url(id, es_url): """Query for the active calibration url.""" query = { "query":{ "bool":{ "must":[ {"term":{"_id": id}}, ] } }, "fields": ["urls", "metadata.archive_filename"] } es_index = "grq_*_s1-aux_cal" if es_url.endswith('/'): search_url = '%s%s/_search' % (es_url, es_index) else: search_url = '%s/%s/_search' % (es_url, es_index) r = requests.post(search_url, data=json.dumps(query)) if r.status_code == 200: result = r.json() pprint(result) total = result['hits']['total'] if total == 0: raise RuntimeError("Failed to find %s at %s." % (id, search_url)) urls = result['hits']['hits'][0]['fields']['urls'] archive_fname = result['hits']['hits'][0]['fields']['metadata.archive_filename'][0] url = [x for x in urls if x.startswith('http')][0] #print(urls) #print(url) #print(archive_fname) return os.path.join(url, archive_fname) else: print("Failed to query %s:\n%s" % (es_url, r.text), file=sys.stderr) print("query: %s" % json.dumps(query, indent=2), file=sys.stderr) print("returned: %s" % r.text, file=sys.stderr) r.raise_for_status() def fetch(outdir, dry_run): # get endpoint configurations uu = UrlUtils() es_url = uu.rest_url # get active calibration ids active_ids = get_active_ids(es_url) print(active_ids) # get urls for active calibration files cal_urls = [get_cal_url(i, es_url) for i in active_ids] print(cal_urls) if len(cal_urls) == 0: print('Failed to find calibration auxiliary files') if dry_run: print('\n'.join(cal_urls)) else: if not os.path.isdir(outdir): os.makedirs(outdir) for cal_url in cal_urls: try: cal_file = download_file(cal_url, outdir) except: print('Failed to download URL: ', cal_url) raise try: cal_dir = untar_file(cal_file, outdir) except: print('Failed to untar: ', cal_file) raise os.unlink(cal_file) if __name__ == '__main__': inps = cmdLineParse() fetch(inps.outdir, inps.dry_run)
29.491124
105
0.573234
0
0
0
0
0
0
0
0
1,359
0.272673
71b74d81702689c7914ede59827af8b7196bc18b
2,590
py
Python
www/conservancy/urls.py
stain/conservancy-website
9e41ddff766fe517a99198d60701193e8b68415e
[ "0BSD" ]
null
null
null
www/conservancy/urls.py
stain/conservancy-website
9e41ddff766fe517a99198d60701193e8b68415e
[ "0BSD" ]
null
null
null
www/conservancy/urls.py
stain/conservancy-website
9e41ddff766fe517a99198d60701193e8b68415e
[ "0BSD" ]
null
null
null
# Copyright 2005-2008, James Garrison # Copyright 2010, 2012 Bradley M. Kuhn # This software's license gives you freedom; you can copy, convey, # propagate, redistribute, modify and/or redistribute modified versions of # this program under the terms of the GNU Affero General Public License # (AGPL) as published by the Free Software Foundation (FSF), either # version 3 of the License, or (at your option) any later version of the # AGPL published by the FSF. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero # General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program in a file in the toplevel directory called # "AGPLv3". If not, see <http://www.gnu.org/licenses/>. from django.conf.urls import url, include from django.contrib import admin, admindocs from conservancy import feeds, frontpage, sponsors import conservancy.apps.fundgoal.views as fundgoal_views import conservancy.static.views as static_views admin.autodiscover() urlpatterns = [ url(r'^$', frontpage.view), url(r'^sponsors$', frontpage.view), url(r'^sponsors/$', sponsors.view), url(r'^sponsors/index.html$', sponsors.view), url(r'^admin/doc/', include('django.contrib.admindocs.urls')), url(r'^admin/', admin.site.urls), url(r'^feeds/blog/?$', feeds.BlogFeed()), url(r'^feeds/news/?$', feeds.PressReleaseFeed()), url(r'^feeds/omnibus/?$', feeds.OmnibusFeed()), url(r'^feeds/?$', feeds.view), url(r'^news(/|$)', include('conservancy.apps.news.urls')), url(r'^blog(/|$)', include('conservancy.apps.blog.urls')), # formerly static templated things... (dirs with templates) url(r'^error/(40[134]|500)(?:/index\.html|/|)$', static_views.handler), url(r'^error', static_views.index), url(r'^about', static_views.index), url(r'^donate', static_views.index), url(r'^copyleft-compliance', static_views.index, {'fundraiser_sought' : 'vmware-match-0'}), url(r'^projects', static_views.index), url(r'^npoacct', static_views.index, {'fundraiser_sought' : 'npoacct'}), url(r'^contractpatch', include('conservancy.apps.contractpatch.urls')), url(r'^overview', static_views.index), url(r'^privacy-policy', static_views.index), url(r'^supporter', include('conservancy.apps.supporter.urls')), url(r'^fundraiser_data', fundgoal_views.view), ]
44.655172
75
0.699614
0
0
0
0
0
0
0
0
1,545
0.596525
71b901299fb22334462ebfb480d8b6d820375ea4
1,430
py
Python
graphene_spike_tests/acceptances/test_query.py
FabienArcellier/spike-graphene-flask
bc7bce571a21826c3da852eb1c2e1904bbab99b4
[ "MIT" ]
1
2021-03-18T00:19:53.000Z
2021-03-18T00:19:53.000Z
graphene_spike_tests/acceptances/test_query.py
FabienArcellier/spike-graphene-flask
bc7bce571a21826c3da852eb1c2e1904bbab99b4
[ "MIT" ]
null
null
null
graphene_spike_tests/acceptances/test_query.py
FabienArcellier/spike-graphene-flask
bc7bce571a21826c3da852eb1c2e1904bbab99b4
[ "MIT" ]
null
null
null
import unittest from unittest.mock import Mock from graphene import Schema from graphene.test import Client from graphene_spike.query import Query class MainTest(unittest.TestCase): def setUp(self): self.schema = Schema(query=Query) self.client = client = Client(self.schema) def test_hello_should_work_without_argument(self): # Assign query_string = '{ hello }' # Acts executed = self.client.execute(query_string) # Assert self.assertEqual(executed['data'], {"hello": "Hello stranger, you have 18 !"}) def test_hello_should_write_the_giving_name(self): # Assign query_string = '{ hello(name: "Fabien") }' # Acts executed = self.client.execute(query_string) # Assert self.assertEqual(executed['data'], {"hello": "Hello Fabien, you have 18 !"}) def test_hello_should_write_the_giving_age(self): # Assign query_string = '{ hello(age: 24) }' # Acts executed = self.client.execute(query_string) # Assert self.assertEqual(executed['data'], {"hello": "Hello stranger, you have 24 !"}) def test_goodbye_should_giving_a_response(self): # Assign query_string = '{ goodbye }' # Acts executed = self.client.execute(query_string) # Assert self.assertEqual(executed['data'], {"goodbye": "See ya!"})
26.481481
86
0.630769
1,278
0.893706
0
0
0
0
0
0
313
0.218881
71b9373dfb805ca37a8bda9472585bd77a94fc2f
10,028
py
Python
clikan.py
davidventasmarin/clikan
401fe4053a14873872bb246739d55c55f8f6dcfa
[ "MIT" ]
null
null
null
clikan.py
davidventasmarin/clikan
401fe4053a14873872bb246739d55c55f8f6dcfa
[ "MIT" ]
null
null
null
clikan.py
davidventasmarin/clikan
401fe4053a14873872bb246739d55c55f8f6dcfa
[ "MIT" ]
null
null
null
from rich import print from rich.console import Console from rich.table import Table import click from click_default_group import DefaultGroup import yaml import os ##from terminaltables import SingleTable import sys from textwrap import wrap import collections import datetime import configparser import pkg_resources # part of setuptools VERSION = pkg_resources.require("clikan")[0].version class Config(object): """The config in this example only holds aliases.""" def __init__(self): self.path = os.getcwd() self.aliases = {} def read_config(self, filename): parser = configparser.RawConfigParser() parser.read([filename]) try: self.aliases.update(parser.items('aliases')) except configparser.NoSectionError: pass pass_config = click.make_pass_decorator(Config, ensure=True) class AliasedGroup(DefaultGroup): """This subclass of a group supports looking up aliases in a config file and with a bit of magic. """ def get_command(self, ctx, cmd_name): # Step one: bulitin commands as normal rv = click.Group.get_command(self, ctx, cmd_name) if rv is not None: return rv # Step two: find the config object and ensure it's there. This # will create the config object is missing. cfg = ctx.ensure_object(Config) # Step three: lookup an explicit command aliase in the config if cmd_name in cfg.aliases: actual_cmd = cfg.aliases[cmd_name] return click.Group.get_command(self, ctx, actual_cmd) # Alternative option: if we did not find an explicit alias we # allow automatic abbreviation of the command. "status" for # instance will match "st". We only allow that however if # there is only one command. matches = [x for x in self.list_commands(ctx) if x.lower().startswith(cmd_name.lower())] if not matches: return None elif len(matches) == 1: return click.Group.get_command(self, ctx, matches[0]) ctx.fail('Too many matches: %s' % ', '.join(sorted(matches))) def read_config(ctx, param, value): """Callback that is used whenever --config is passed. We use this to always load the correct config. This means that the config is loaded even if the group itself never executes so our aliases stay always available. """ cfg = ctx.ensure_object(Config) if value is None: value = os.path.join(os.path.dirname(__file__), 'aliases.ini') cfg.read_config(value) return value @click.version_option(VERSION) @click.command(cls=AliasedGroup, default='show', default_if_no_args=True) def clikan(): """clikan: CLI personal kanban """ @clikan.command() def configure(): """Place default config file in CLIKAN_HOME or HOME""" home = get_clikan_home() data_path = os.path.join(home, ".clikan.dat") config_path = os.path.join(home, ".clikan.yaml") if (os.path.exists(config_path) and not click.confirm('Config file exists. Do you want to overwrite?')): return with open(config_path, 'w') as outfile: conf = {'clikan_data': data_path} yaml.dump(conf, outfile, default_flow_style=False) click.echo("Creating %s" % config_path) @clikan.command() @click.argument('task') def add(task): """Add a task in todo""" if len(task) > 40: click.echo('Task must be shorter than 40 chars. Brevity counts.') else: config = read_config_yaml() dd = read_data(config) todos, inprogs, dones = split_items(config, dd) if ('limits' in config and 'todo' in config['limits'] and int(config['limits']['todo']) <= len(todos)): click.echo('No new todos, limit reached already.') else: od = collections.OrderedDict(sorted(dd['data'].items())) new_id = 1 if bool(od): new_id = next(reversed(od)) + 1 entry = ['todo', task, timestamp(), timestamp()] dd['data'].update({new_id: entry}) click.echo("Creating new task w/ id: %d -> %s" % (new_id, task)) write_data(config, dd) @clikan.command() @click.argument('id') def delete(id): """Delete task""" config = read_config_yaml() dd = read_data(config) item = dd['data'].get(int(id)) if item is None: click.echo('No existing task with that id.') else: item[0] = 'deleted' item[2] = timestamp() dd['deleted'].update({int(id): item}) dd['data'].pop(int(id)) write_data(config, dd) click.echo('Removed task %d.' % int(id)) @clikan.command() @click.argument('id') def promote(id): """Promote task""" config = read_config_yaml() dd = read_data(config) todos, inprogs, dones = split_items(config, dd) item = dd['data'].get(int(id)) if item[0] == 'todo': if ('limits' in config and 'wip' in config['limits'] and int(config['limits']['wip']) <= len(inprogs)): click.echo('No new tasks, limit reached already.') else: click.echo('Promoting task %s to in-progress.' % id) dd['data'][int(id)] = ['inprogress', item[1], timestamp(), item[3]] write_data(config, dd) elif item[0] == 'inprogress': click.echo('Promoting task %s to done.' % id) dd['data'][int(id)] = ['done', item[1], timestamp(), item[3]] write_data(config, dd) else: click.echo('Already done, can not promote %s' % id) @clikan.command() @click.argument('id') def regress(id): """Regress task""" config = read_config_yaml() dd = read_data(config) item = dd['data'].get(int(id)) if item[0] == 'done': click.echo('Regressing task %s to in-progress.' % id) dd['data'][int(id)] = ['inprogress', item[1], timestamp(), item[3]] write_data(config, dd) elif item[0] == 'inprogress': click.echo('Regressing task %s to todo.' % id) dd['data'][int(id)] = ['todo', item[1], timestamp(), item[3]] write_data(config, dd) else: click.echo('Already in todo, can not regress %s' % id) @clikan.command() def show(): console = Console() """Show tasks in clikan""" config = read_config_yaml() dd = read_data(config) todos, inprogs, dones = split_items(config, dd) if 'limits' in config and 'done' in config['limits']: dones = dones[0:int(config['limits']['done'])] else: dones = dones[0:10] todos = '\n'.join([str(x) for x in todos]) inprogs = '\n'.join([str(x) for x in inprogs]) dones = '\n'.join([str(x) for x in dones]) # td = [ # ['todo', 'in-progress', '[bold magenta]done[/bold magenta]'], # ['', '', ''], # ] #table = SingleTable(td, 'clikan v.{}'.format(VERSION)) # table.inner_heading_row_border = False # table.inner_row_border = True # table.justify_columns = {0: 'center', 1: 'center', 2: 'center'} table = Table(show_header=True, show_footer=True) table.add_column("[bold yellow]todo[/bold yellow]", no_wrap=True, footer="clikan") table.add_column('[bold green]in-progress[/bold green]', no_wrap=True) table.add_column('[bold magenta]done[/bold magenta]', no_wrap=True, footer="v.{}".format(VERSION)) # def wrap_lines(lines, column_index): # max_width = table.column_max_width(column_index) # packed = [line for line in lines if line.strip() != ''] # wrapped = [wrap(line, max_width, break_long_words=False, # replace_whitespace=False) for line in packed] # return '\n'.join(['\n'.join(w) for w in wrapped]) # for index, section in enumerate((todos, inprogs, dones)): # table.table_data[1][index] = wrap_lines(section.splitlines(), index) table.add_row(todos, inprogs, dones) console.print(table) #print(table.table) def read_data(config): """Read the existing data from the config datasource""" try: with open(config["clikan_data"], 'r') as stream: try: return yaml.load(stream, Loader=yaml.FullLoader) except yaml.YAMLError as exc: print("Ensure %s exists, as you specified it " "as the clikan data file." % config['clikan_data']) print(exc) except IOError: click.echo("No data, initializing data file.") write_data(config, {"data": {}, "deleted": {}}) with open(config["clikan_data"], 'r') as stream: return yaml.load(stream, Loader=yaml.FullLoader) def write_data(config, data): """Write the data to the config datasource""" with open(config["clikan_data"], 'w') as outfile: yaml.dump(data, outfile, default_flow_style=False) def get_clikan_home(): home = os.environ.get('CLIKAN_HOME') if not home: home = os.path.expanduser('~') return home def read_config_yaml(): """Read the app config from ~/.clikan.yaml""" try: home = get_clikan_home() with open(home + "/.clikan.yaml", 'r') as stream: try: return yaml.load(stream, Loader=yaml.FullLoader) except yaml.YAMLError: print("Ensure %s/.clikan.yaml is valid, expected YAML." % home) sys.exit() except IOError: print("Ensure %s/.clikan.yaml exists and is valid." % home) sys.exit() def split_items(config, dd): todos = [] inprogs = [] dones = [] for key, value in dd['data'].items(): if value[0] == 'todo': todos.append("[%d] %s" % (key, value[1])) elif value[0] == 'inprogress': inprogs.append("[%d] %s" % (key, value[1])) else: dones.insert(0, "[%d] %s" % (key, value[1])) return todos, inprogs, dones def timestamp(): return '{:%Y-%b-%d %H:%M:%S}'.format(datetime.datetime.now())
33.315615
102
0.603311
1,715
0.171021
0
0
5,356
0.534105
0
0
3,350
0.334065
71b98f59428322523fe15276f1dd95e05126903b
1,330
py
Python
social_auth_ragtag_id/backends.py
RagtagOpen/python-social-auth-ragtag-id
8d8e005231c09535098136213347934e9da7b3f2
[ "MIT" ]
null
null
null
social_auth_ragtag_id/backends.py
RagtagOpen/python-social-auth-ragtag-id
8d8e005231c09535098136213347934e9da7b3f2
[ "MIT" ]
3
2020-03-24T16:26:22.000Z
2021-02-02T21:55:45.000Z
social_auth_ragtag_id/backends.py
RagtagOpen/python-social-auth-ragtag-id
8d8e005231c09535098136213347934e9da7b3f2
[ "MIT" ]
null
null
null
from social_core.backends.oauth import BaseOAuth2 class RagtagOAuth2(BaseOAuth2): """Ragtag ID OAuth authentication backend""" name = "ragtag" AUTHORIZATION_URL = "https://id.ragtag.org/oauth/authorize/" ACCESS_TOKEN_URL = "https://id.ragtag.org/oauth/token/" ACCESS_TOKEN_METHOD = "POST" REVOKE_TOKEN_URL = "https://id.ragtag.org/oauth/revoke_token/" SCOPE_SEPARATOR = " " ID_KEY = "id" def get_user_details(self, response): """Return user details from Ragtag ID account""" return { "username": response.get("username"), "email": response.get("email"), "first_name": response.get("first_name"), "last_name": response.get("last_name"), } def user_data(self, access_token, *args, **kwargs): """Fetches user data from id.ragtag.org""" return self.get_json( "https://id.ragtag.org/api/me/", headers={"Authorization": "Bearer {}".format(access_token)}, ) def auth_params(self, state=None): params = super(RagtagOAuth2, self).auth_params(state=state) approval_prompt = self.setting("APPROVAL_PROMPT", "auto") if not approval_prompt == "auto": params["approval_prompt"] = self.setting("APPROVAL_PROMPT", "") return params
35.945946
75
0.627068
1,277
0.96015
0
0
0
0
0
0
476
0.357895
71b9f1ca619e6a3da629a83c1ba692653be95c14
409
py
Python
panel/api/models/provider.py
angeelgarr/DCPanel
1901a0f4b1b4273b60d3a218797fb6614d05b4c0
[ "MIT" ]
7
2016-01-06T13:28:35.000Z
2020-11-30T07:35:59.000Z
panel/api/models/provider.py
angeelgarr/DCPanel
1901a0f4b1b4273b60d3a218797fb6614d05b4c0
[ "MIT" ]
null
null
null
panel/api/models/provider.py
angeelgarr/DCPanel
1901a0f4b1b4273b60d3a218797fb6614d05b4c0
[ "MIT" ]
6
2017-07-18T06:41:56.000Z
2022-01-17T07:04:44.000Z
from django.db import models from django.contrib import admin class Provider(models.Model): name = models.CharField(max_length=50) domain = models.CharField(max_length=50) class Meta: ordering = ['name'] app_label = 'api' def __str__(self): return self.domain @admin.register(Provider) class ProviderAdmin(admin.ModelAdmin): list_display = ('name', 'domain')
20.45
44
0.682152
315
0.770171
0
0
102
0.249389
0
0
25
0.061125
71b9f7585fb3ca8d7750b533bdb679556becb780
853
py
Python
trial/src/sender.py
siddharthumakarthikeyan/Cable-Driven-Parallel-Robots-CDPR-Modelling
4e8d991d55ae7da91b3c90773c679f3369a4dafa
[ "MIT" ]
9
2021-06-01T12:19:58.000Z
2022-02-28T12:30:09.000Z
trial/src/sender.py
siddharthumakarthikeyan/Cable-Driven-Parallel-Robots-CDPR-Modelling
4e8d991d55ae7da91b3c90773c679f3369a4dafa
[ "MIT" ]
1
2021-09-27T12:24:50.000Z
2021-09-27T12:24:50.000Z
trial/src/sender.py
siddharthumakarthikeyan/Cable-Driven-Parallel-Robots-CDPR-Modelling
4e8d991d55ae7da91b3c90773c679f3369a4dafa
[ "MIT" ]
1
2021-08-02T00:48:11.000Z
2021-08-02T00:48:11.000Z
#!/usr/bin/env python # license removed for brevity import rospy from std_msgs.msg import String from gazebo_msgs.msg import LinkState def talker(): pub = rospy.Publisher('/gazebo/set_link_state', LinkState, queue_size=10) ppp = LinkState() rospy.init_node('talker', anonymous=True) rate = rospy.Rate(100) # 10hz i = 1 while not rospy.is_shutdown(): ppp.link_name = "platform" ppp.pose.position.x = 0.1 ppp.pose.position.y = 0.1 ppp.pose.position.z = 1 ppp.pose.orientation.x = 0 ppp.pose.orientation.y = 0 ppp.pose.orientation.z = 0 ppp.pose.orientation.w = 0 i = i+1 rospy.loginfo(ppp) pub.publish(ppp) rate.sleep() if __name__ == '__main__': try: talker() except rospy.ROSInterruptException: pass
24.371429
77
0.614302
0
0
0
0
0
0
0
0
108
0.126612
71bb038e552d16449011833ef1582532136fc5b7
1,073
py
Python
discriminator_dataset.py
kimmokal/CC-Art-Critics
af83762a5f22043f279c167cbd58e16737e3ec87
[ "MIT" ]
null
null
null
discriminator_dataset.py
kimmokal/CC-Art-Critics
af83762a5f22043f279c167cbd58e16737e3ec87
[ "MIT" ]
null
null
null
discriminator_dataset.py
kimmokal/CC-Art-Critics
af83762a5f22043f279c167cbd58e16737e3ec87
[ "MIT" ]
null
null
null
import torch from os import listdir, path from PIL import Image import torchvision class DiscriminatorDataset(torch.utils.data.Dataset): def __init__(self): super(DiscriminatorDataset, self).__init__() currentDir = path.dirname(__file__) abstractDir = path.join(currentDir, 'image_data/abstract') realisticDir = path.join(currentDir, 'image_data/realistic') abstractFiles = [path.join(abstractDir, f) for f in listdir( abstractDir) if path.isfile(path.join(abstractDir, f))] realisticFiles = [path.join(realisticDir, f) for f in listdir( realisticDir) if path.isfile(path.join(realisticDir, f))] self.abstractFilesLen = len(abstractFiles) self.allFiles = abstractFiles + realisticFiles def __len__(self): return len(self.allFiles) def __getitem__(self, index): filename = self.allFiles[index] pilImage = Image.open(filename).convert("RGB") return (torchvision.transforms.ToTensor()(pilImage), 1 if index < self.abstractFilesLen else 0)
38.321429
103
0.692451
987
0.919851
0
0
0
0
0
0
48
0.044734
71bcf0be9208fd0fbb5c709b03c8fca5ba790724
951
py
Python
emailmeld/sender.py
ionata/django-emailmeld
28326933d22957f8737ab8a9564daa9cbfca6d06
[ "BSD-2-Clause" ]
null
null
null
emailmeld/sender.py
ionata/django-emailmeld
28326933d22957f8737ab8a9564daa9cbfca6d06
[ "BSD-2-Clause" ]
1
2017-11-21T22:11:04.000Z
2017-11-22T00:37:49.000Z
emailmeld/sender.py
ionata/django-emailmeld
28326933d22957f8737ab8a9564daa9cbfca6d06
[ "BSD-2-Clause" ]
null
null
null
from django.core.mail.message import EmailMessage, EmailMultiAlternatives from django.utils.translation import ugettext_lazy as _ from django.template.loader import render_to_string from django.utils.safestring import mark_safe def send_mail_task(subject, message, from_email, recipient_list): message = EmailMessage("Discover Special Value - {0}".format(subject), message, from_email, recipient_list) message.send() def send_html_mail_task(subject, text_message, html_message, from_email, recipient_list, template='email/email_base.html'): if template is not None: html_message = render_to_string(template, {'content': mark_safe(html_message)}) # render html into an email template message = EmailMultiAlternatives("Discover Special Value - {0}".format(subject), html_message, from_email, recipient_list) message.content_subtype = "html" message.attach_alternative(text_message, "text/plain") message.send()
47.55
126
0.785489
0
0
0
0
0
0
0
0
146
0.153523
71be4424294b2ee2dc156eab695f7198203426e0
1,506
py
Python
tests/test_hap_server.py
sander-vd/HAP-python
991761ceadfd7796d454d61c87be7f5d4b75d432
[ "Apache-2.0" ]
3
2019-12-07T22:42:38.000Z
2022-01-20T08:44:46.000Z
tests/test_hap_server.py
sander-vd/HAP-python
991761ceadfd7796d454d61c87be7f5d4b75d432
[ "Apache-2.0" ]
null
null
null
tests/test_hap_server.py
sander-vd/HAP-python
991761ceadfd7796d454d61c87be7f5d4b75d432
[ "Apache-2.0" ]
1
2021-05-15T22:34:52.000Z
2021-05-15T22:34:52.000Z
"""Tests for the HAPServer.""" from socket import timeout from unittest.mock import Mock, MagicMock, patch import pytest from pyhap import hap_server @patch('pyhap.hap_server.HAPServer.server_bind', new=MagicMock()) @patch('pyhap.hap_server.HAPServer.server_activate', new=MagicMock()) def test_finish_request_pops_socket(): """Test that ``finish_request`` always clears the connection after a request.""" amock = Mock() client_addr = ('192.168.1.1', 55555) server_addr = ('', 51826) # Positive case: The request is handled server = hap_server.HAPServer(server_addr, amock, handler_type=lambda *args: MagicMock()) server.connections[client_addr] = amock server.finish_request(amock, client_addr) assert len(server.connections) == 0 # Negative case: The request fails with a timeout def raises(*args): raise timeout() server = hap_server.HAPServer(server_addr, amock, handler_type=raises) server.connections[client_addr] = amock server.finish_request(amock, client_addr) assert len(server.connections) == 0 # Negative case: The request raises some other exception server = hap_server.HAPServer(server_addr, amock, handler_type=lambda *args: 1 / 0) server.connections[client_addr] = amock with pytest.raises(Exception): server.finish_request(amock, client_addr) assert len(server.connections) == 0
32.73913
84
0.677955
0
0
0
0
1,351
0.897078
0
0
353
0.234396
71bf1e11839857da419f894d58ec4b485c55ada9
1,604
py
Python
app/views/main.py
charlesashby/marketvault-front-end
758cf8ba1d8486f45eac093ded78a15fc82df3dc
[ "MIT" ]
null
null
null
app/views/main.py
charlesashby/marketvault-front-end
758cf8ba1d8486f45eac093ded78a15fc82df3dc
[ "MIT" ]
null
null
null
app/views/main.py
charlesashby/marketvault-front-end
758cf8ba1d8486f45eac093ded78a15fc82df3dc
[ "MIT" ]
null
null
null
from flask import render_template, Blueprint, request from app.utils.search import MySQLClient from app.utils.preprocessor import TextPreprocessor mainbp = Blueprint("main", __name__) @mainbp.route("/search", methods=["GET"]) @mainbp.route("/", methods=["GET"]) def home(): stores_by_page = 10 topic = request.args.get("topic") category = request.args.get("category") daily_visitors = request.args.get("dailyvisitors") alexa_rank = request.args.get("alexarank") page = request.args.get("page") or 0 if all([topic is None, category is None, daily_visitors is None, alexa_rank is None]): stores = MySQLClient.random_stores(page * stores_by_page, stores_by_page) else: stores = MySQLClient.search_stores(category, daily_visitors, alexa_rank, topic, page * stores_by_page, stores_by_page) stores = [ { "url": store.url, "description": TextPreprocessor.clean_str(store.description), "title": TextPreprocessor.clean_str(store.title), "alexa_rank": store.alexa_rank, "category": store.category, "average_product_price": store.average_product_price, "daily_visitors": store.daily_visitors } for store in stores ] return render_template("search/index.html", stores=stores) @mainbp.route("/search/topics", methods=["GET"]) def search_topics(): substring = request.args.get("q") return [ { "id": topic.id, "text": topic.text } for topic in MySQLClient.search_topic_by_substring(substring) ]
31.45098
126
0.663342
0
0
0
0
1,409
0.878429
0
0
216
0.134663
71bf83bddad54a592ea34fa0a46b33394f925a8d
31,770
py
Python
bag_testbenches/ckt_dsn/analog/amplifier/opamp_two_stage.py
tinapiao/Software-IC-Automation
74b23cd94aa6e4658b110e93b5deb635e014f3a6
[ "BSD-3-Clause" ]
null
null
null
bag_testbenches/ckt_dsn/analog/amplifier/opamp_two_stage.py
tinapiao/Software-IC-Automation
74b23cd94aa6e4658b110e93b5deb635e014f3a6
[ "BSD-3-Clause" ]
null
null
null
bag_testbenches/ckt_dsn/analog/amplifier/opamp_two_stage.py
tinapiao/Software-IC-Automation
74b23cd94aa6e4658b110e93b5deb635e014f3a6
[ "BSD-3-Clause" ]
null
null
null
# -*- coding: utf-8 -*- """This module contains design algorithm for a traditional two stage operational amplifier.""" from typing import TYPE_CHECKING, List, Optional, Dict, Any, Tuple, Sequence from copy import deepcopy import numpy as np import scipy.optimize as sciopt from bag.math import gcd from bag.data.lti import LTICircuit, get_stability_margins, get_w_crossings, get_w_3db from bag.util.search import FloatBinaryIterator, BinaryIterator, minimize_cost_golden from bag.simulation.core import MeasurementManager from verification.mos.query import MOSDBDiscrete from .components import LoadDiodePFB, InputGm if TYPE_CHECKING: from verification.ac.core import ACTB class TailStage1(object): """Tail transistor of the first stage op amp. Due to layout restrictions, the tail transistor needs to have the same number of fingers and stack number as the input transistor. This method finds the optimal width/intent. """ def __init__(self, mos_db): # type: (MOSDBDiscrete) -> None self._db = mos_db self._intent_list = mos_db.get_dsn_param_values('intent') self._valid_widths = mos_db.width_list self._best_op = None def design(self, itarg_list, # type: List[float] vd_list, # type: List[float] vout_amp_list, # type: List[float] vb, # type: float l, # type: float seg, # type: int stack, # type: int ): # type: (...) -> None vgs_idx = self._db.get_fun_arg_index('vgs') self._best_op = best_score = None for intent in self._intent_list: for w in self._valid_widths: self._db.set_dsn_params(l=l, w=w, intent=intent, stack=stack) ib = self._db.get_function_list('ibias') gds = self._db.get_function_list('gds') vgs_min, vgs_max = ib[0].get_input_range(vgs_idx) vg_min = vgs_min + vb vg_max = vgs_max + vb # find vgs for each corner vgs_list, gds1_list, gds2_list = self._solve_vgs(itarg_list, vout_amp_list, vd_list, ib, gds, seg, vb, vg_min, vg_max) if vgs_list is not None: cur_score = max(gds2_list) if self._best_op is None or cur_score < best_score: best_score = cur_score self._best_op = (w, intent, seg, stack, vb, vgs_list, vout_amp_list, gds1_list, gds2_list) def _solve_vgs(self, itarg_list, vout_list, vd_list, ib_list, gds_list, seg, vb, vg_min, vg_max): vgs_list, gds1_list, gds2_list = [], [], [] for itarg, vout, vd, ibf, gdsf in zip(itarg_list, vout_list, vd_list, ib_list, gds_list): def zero_fun(vg): farg = self._db.get_fun_arg(vbs=vb - vd, vds=vd - vb, vgs=vg - vb) return seg * ibf(farg) - itarg v1, v2 = zero_fun(vg_min), zero_fun(vg_max) if v1 < 0 and v2 < 0 or v1 > 0 and v2 > 0: # no solution return None, None, None vg_sol = sciopt.brentq(zero_fun, vg_min, vg_max) # type: float vgs_opt = vg_sol - vb arg1 = self._db.get_fun_arg(vbs=vb - vd, vds=vd - vb, vgs=vgs_opt) arg2 = self._db.get_fun_arg(vbs=vb - vd, vds=vout - vb, vgs=vgs_opt) vgs_list.append(vgs_opt) gds1_list.append(seg * gdsf(arg1)) gds2_list.append(seg * gdsf(arg2)) return vgs_list, gds1_list, gds2_list def get_dsn_info(self): # type: () -> Optional[Dict[str, Any]] if self._best_op is None: return None w, intent, seg, stack, vb, vgs_list, vout_list, gds1_list, gds2_list = self._best_op self._db.set_dsn_params(w=w, intent=intent, stack=stack) cdd = self._db.get_function_list('cdd') cdd2_list = [] for vgs, vout, cddf in zip(vgs_list, vout_list, cdd): arg = self._db.get_fun_arg(vbs=0, vds=vout - vb, vgs=vgs) cur_cdd = cddf(arg) # type: float cdd2_list.append(seg * cur_cdd) return dict( w=w, intent=intent, vgs=vgs_list, gds1=gds1_list, gds2=gds2_list, cdd2=cdd2_list, ) class StageOneCurrentError(Exception): pass class OpAmpTwoStage(object): """A two stage fully differential operational amplifier. The first stage is a differential amplifier with diode + positive feedback load, the second stage is a psuedo-differential common source amplifier. This topology has the following advantages: 1. large output swing. 2. Common mode feedback is only required for the second stage. """ def __init__(self, nch_db, pch_db): # type: (MOSDBDiscrete, MOSDBDiscrete) -> None self._nch_db = nch_db self._pch_db = pch_db self._amp_info = None def design(self, i1_unit, # type: List[float] i1_min_size, # type: int vg_list, # type: List[float] vout_list, # type: List[float] cpar1, # type: float cload, # type: float f_unit, # type: float phase_margin, # type: float res_var, # type: float l, # type: float vstar_gm_min, # type: float ft_load_scale, # type: float vds_tail_min, # type: float seg_gm_min, # type: int vdd, # type: float pmos_input=True, # type: bool max_ref_ratio=20, # type: int load_stack_list=None, # type: Optional[List[int]] ): # type: (...) -> None # binary search for minimum stage 1 current, i1_size_iter = BinaryIterator(i1_min_size, None) i1_size_opt, opt_info = None, None while i1_size_iter.has_next(): i1_size = i1_size_iter.get_next() print('trying i1_size = %d' % i1_size) try: self._design_with_itarg(i1_size, i1_unit, vg_list, vout_list, cpar1, cload, f_unit, phase_margin, res_var, l, vstar_gm_min, ft_load_scale, vds_tail_min, seg_gm_min, vdd, pmos_input, max_ref_ratio, load_stack_list) success = True except StageOneCurrentError as err: print(err) success = False if success: print('success') opt_info = self._amp_info i1_size_opt = i1_size i1_size_iter.down() else: i1_size_iter.up() # linear search to find optimal scale2 scale2_int_max = int(opt_info['scale2']) if scale2_int_max == opt_info['scale2']: scale2_int_max -= 1 last_i1_size = i1_size_opt print('i1_size = %d, scale2 = %.4g' % (i1_size_opt, opt_info['scale2'])) for scale2_test in range(scale2_int_max, 0, -1): i1_size_test = int(np.floor(i1_size_opt * (1 + opt_info['scale2']) / (1 + scale2_test))) if i1_size_test <= last_i1_size or scale2_test == opt_info['scale2']: continue print('testing i1_size = %d, scale2 = %.4g' % (i1_size_test, scale2_test)) try: self._design_with_itarg(i1_size_test, i1_unit, vg_list, vout_list, cpar1, cload, f_unit, phase_margin, res_var, l, vstar_gm_min, ft_load_scale, vds_tail_min, seg_gm_min, vdd, pmos_input, max_ref_ratio, load_stack_list) except StageOneCurrentError as err: print(err) continue if self._amp_info['scale2'] <= scale2_test: # found new minimum. close in to find optimal i1 size opt_info = self._amp_info i1_size_opt = i1_size_test print('update: i1_size = %d, scale2 = %.4g' % (i1_size_opt, opt_info['scale2'])) i1_size_iter = BinaryIterator(last_i1_size + 1, i1_size_test) while i1_size_iter.has_next(): i1_size_cur_opt = i1_size_iter.get_next() print('testing i1_size = %d' % i1_size_cur_opt) try: self._design_with_itarg(i1_size_cur_opt, i1_unit, vg_list, vout_list, cpar1, cload, f_unit, phase_margin, res_var, l, vstar_gm_min, ft_load_scale, vds_tail_min, seg_gm_min, vdd, pmos_input, max_ref_ratio, load_stack_list) if self._amp_info['scale2'] <= opt_info['scale2']: opt_info = self._amp_info i1_size_opt = i1_size_cur_opt print('update: i1_size = %d, ' 'scale2 = %.4g' % (i1_size_opt, opt_info['scale2'])) i1_size_iter.down() else: i1_size_iter.up() except StageOneCurrentError as err: print(err) i1_size_iter.up() last_i1_size = i1_size_test self._amp_info = opt_info def _design_with_itarg(self, i1_size, # type: int i1_unit, # type: List[float] vg_list, # type: List[float] vout_list, # type: List[float] cpar1, # type: float cload, # type: float f_unit, # type: float phase_margin, # type: float res_var, # type: float l, # type: float vstar_gm_min, # type: float ft_load_scale, # type: float vds_tail_min, # type: float seg_gm_min, # type: int vdd, # type: float pmos_input, # type: bool max_ref_ratio, # type: int load_stack_list, # type: Optional[List[int]] ): # type: (...) -> None itarg_list = [i1 * i1_size for i1 in i1_unit] if pmos_input: load_db = self._nch_db gm_db = self._pch_db vds2_list = vout_list vb_gm = vdd vb_load = 0 else: load_db = self._pch_db gm_db = self._nch_db vds2_list = [vo - vdd for vo in vout_list] vb_gm = 0 vb_load = vdd load = LoadDiodePFB(load_db) gm = InputGm(gm_db) tail1 = TailStage1(gm_db) # design load print('designing load') load.design(itarg_list, vds2_list, ft_load_scale * f_unit, stack_list=load_stack_list) load_info = load.get_dsn_info() vgs_load_list = load_info['vgs'] gds_load_list = load_info['gds1'] gm2_list = load_info['gm2'] stack_diode = load_info['stack_diode'] stack_ngm = load_info['stack_ngm'] seg_diode = load_info['seg_diode'] seg_ngm = load_info['seg_ngm'] if pmos_input: vmid_list = vgs_load_list else: vmid_list = [vdd - vgs for vgs in vgs_load_list] # design input gm print('designing input gm') gm.design(itarg_list, vg_list, vmid_list, gds_load_list, vb_gm, vstar_gm_min, vds_tail_min, seg_min=seg_gm_min, stack_list=[stack_ngm]) gm_info = gm.get_dsn_info() gm1_list = gm_info['gm'] gds_in_list = gm_info['gds'] vtail_list = gm_info['vs'] seg_gm = gm_info['seg'] stack_gm = gm_info['stack'] gds1_list = [gds_in + gds_load for gds_in, gds_load in zip(gds_in_list, gds_load_list)] gain1_list = [gm1 / gds1 for gm1, gds1 in zip(gm1_list, gds1_list)] # design stage 1 tail print('designing tail') tail1.design(itarg_list, vtail_list, vout_list, vb_gm, l, seg_gm, stack_gm) tail1_info = tail1.get_dsn_info() vbias_list = [vgs_tail + vb_gm for vgs_tail in tail1_info['vgs']] # design stage 2 gm w_dict = {'load': load_info['w'], 'in': gm_info['w'], 'tail': tail1_info['w']} th_dict = {'load': load_info['intent'], 'in': gm_info['intent'], 'tail': tail1_info['intent']} stack_dict = {'tail': stack_gm, 'in': stack_gm, 'diode': stack_diode, 'ngm': stack_ngm} seg_dict = {'tail1': seg_gm, 'in': seg_gm, 'diode1': seg_diode, 'ngm1': seg_ngm, } print('designing stage 2') stage2_results = self._design_stage2(gm_db, load_db, vtail_list, vg_list, vmid_list, vout_list, vbias_list, vb_gm, vb_load, cload, cpar1, w_dict, th_dict, stack_dict, seg_dict, gm2_list, res_var, phase_margin, f_unit, max_ref_ratio) scale2 = seg_dict['diode2'] / seg_dict['diode1'] scaler = seg_dict['ref'] / seg_dict['tail1'] itot_list = [(2 * (1 + scale2) + scaler) * itarg for itarg in itarg_list] layout_info = dict( w_dict=w_dict, th_dict=th_dict, stack_dict=stack_dict, seg_dict=seg_dict, ) self._amp_info = dict( i1_size=i1_size, scale2=scale2, scaler=scaler, vtail=vtail_list, vmid=vmid_list, vbias=vbias_list, itot=itot_list, vstar=gm_info['vstar'], cin=gm_info['cgg'], gm1=gm1_list, gds1=gds1_list, gain1=gain1_list, rfb=stage2_results['rz'], cfb=stage2_results['cf'], gain_tot=stage2_results['gain'], f_3db=stage2_results['f_3db'], f_unit=stage2_results['f_unity'], phase_margin=stage2_results['phase_margin'], layout_info=layout_info, ) print('done') def get_dsn_info(self): # type: () -> Optional[Dict[str, Any]] return self._amp_info def get_specs_verification(self, top_specs): # type: (Dict[str, Any]) -> Dict[str, Any] top_specs = deepcopy(top_specs) dsn_specs = top_specs['dsn_specs'] ibias = dsn_specs['i1_unit'][0] * self._amp_info['i1_size'] * self._amp_info['scaler'] vdd = dsn_specs['vdd'] vindc = dsn_specs['vg_list'][0] voutdc = dsn_specs['vout_list'][0] f_unit = dsn_specs['f_unit'] gain_max = max(self._amp_info['gain_tot']) f_bw_log = int(np.floor(np.log10(f_unit / gain_max))) f_unit_log = int(np.ceil(np.log10(f_unit))) top_specs['layout_params'].update(self._amp_info['layout_info']) meas = top_specs['measurements'][0] meas['cfb'] = self._amp_info['cfb'] meas['rfb'] = self._amp_info['rfb'] ac_tb = meas['testbenches']['ac'] ac_tb['fstart'] = 10 ** (f_bw_log - 1) ac_tb['fstop'] = 10 ** (f_unit_log + 1) ac_sim_vars = ac_tb['sim_vars'] ac_sim_vars['vdd'] = vdd ac_sim_vars['cload'] = dsn_specs['cload'] ac_sim_vars['vincm'] = vindc ac_sim_vars['voutcm'] = voutdc ac_sim_vars['ibias'] = ibias ac_sim_vars['vdd'] = vdd ac_sim_vars['vinac'] = 1.0 ac_sim_vars['vindc'] = 0.0 """ top_specs['tb_dc']['tb_params']['vimax'] = vdd top_specs['tb_dc']['tb_params']['vimin'] = -vdd top_specs['tb_dc']['tb_params']['vindc'] = vindc top_specs['tb_dc']['tb_params']['voutcm'] = voutdc top_specs['tb_dc']['tb_params']['ibias'] = ibias top_specs['tb_dc']['tb_params']['vdd'] = vdd top_specs['tb_dc']['tb_params']['voutref'] = voutdc top_specs['tb_dc']['tb_params']['vout_start'] = -vdd + 0.15 top_specs['tb_dc']['tb_params']['vout_stop'] = vdd - 0.15 """ return top_specs def _design_stage2(self, gm_db, load_db, vtail_list, vg_list, vmid_list, vout_list, vbias_list, vb_gm, vb_load, cload, cpar1, w_dict, th_dict, stack_dict, seg_dict, gm2_list, res_var, phase_margin, f_unit, max_ref_ratio): seg_tail1 = seg_dict['tail1'] seg_diode1 = seg_dict['diode1'] seg_ngm1 = seg_dict['ngm1'] # step 1: find stage 2 unit size seg_gcd = gcd(gcd(seg_tail1, seg_diode1), seg_ngm1) if seg_gcd % 2 != 0: raise ValueError('All segment numbers must be even.') # divide seg_gcd by 2 to make sure all generated segment numbers are even seg_gcd //= 2 # make sure we have enough tail fingers for common mode feedback min_size = 2 if seg_tail1 // seg_gcd == 2 else 1 def ac_results_fun(cur_size): seg_dict['tail2'] = seg_tail1 // seg_gcd * cur_size seg_dict['diode2'] = seg_diode1 // seg_gcd * cur_size seg_dict['ngm2'] = seg_ngm1 // seg_gcd * cur_size cur_scale2 = cur_size / seg_gcd cur_gm2_list = [gm2 * cur_scale2 for gm2 in gm2_list] ac_results = self._find_rz_cf(gm_db, load_db, vtail_list, vg_list, vmid_list, vout_list, vbias_list, vb_gm, vb_load, cload, cpar1, w_dict, th_dict, stack_dict, seg_dict, cur_gm2_list, res_var, phase_margin) return ac_results def funity_fun(cur_size): ac_results_tmp = ac_results_fun(cur_size) fu_list = ac_results_tmp[0] if fu_list is None: return -1 # noinspection PyTypeChecker ans = min(fu_list) return ans # find min_size such that amplifier is stable min_bin_iter = BinaryIterator(min_size, None) while min_bin_iter.has_next(): test_size = min_bin_iter.get_next() test_fu = funity_fun(test_size) if test_fu >= 0: min_bin_iter.save() min_bin_iter.down() else: min_bin_iter.up() min_result = minimize_cost_golden(funity_fun, f_unit, offset=min_bin_iter.get_last_save()) if min_result.x is None: msg = 'Insufficient stage 1 current. funity_max=%.4g' raise StageOneCurrentError(msg % min_result.vmax) funity_list, rz_nom, cf_min, gain_list, f3db_list, pm_list = ac_results_fun(min_result.x) seg_tail2_tot = seg_dict['tail2'] seg_tail2 = (seg_tail2_tot // 4) * 2 seg_tailcm = seg_tail2_tot - seg_tail2 seg_tail_tot = 2 * (seg_dict['tail1'] + seg_tail2) seg_dict['tail2'] = seg_tail2 seg_dict['tailcm'] = seg_tailcm seg_dict['ref'] = max(2, -((-seg_tail_tot // max_ref_ratio) // 2) * 2) return dict( rz=rz_nom, cf=cf_min, gain=gain_list, f_3db=f3db_list, f_unity=funity_list, phase_margin=pm_list, ) @classmethod def _get_stage2_ss(cls, gm2_list, gds2_list, c2_list, cg2_list, cload, seg_gcd, cur_size): cur_gm2_list, cur_gds2_list, cur_c2_list, cur_cg2_list = [], [], [], [] for gm2, gds2, c2, cg2 in zip(gm2_list, gds2_list, c2_list, cg2_list): cur_gm2_list.append(gm2 * cur_size / seg_gcd) cur_gds2_list.append(gds2 * cur_size / seg_gcd) cur_c2_list.append(cload + c2 * cur_size / seg_gcd) cur_cg2_list.append(cg2 * cur_size / seg_gcd) return cur_gm2_list, cur_gds2_list, cur_c2_list, cur_cg2_list def _find_rz_cf(self, gm_db, load_db, vtail_list, vg_list, vmid_list, vout_list, vbias_list, vb_gm, vb_load, cload, cpar1, w_dict, th_dict, stack_dict, seg_dict, gm2_list, res_var, phase_margin, cap_tol=1e-15, cap_step=10e-15, cap_min=1e-15, cap_max=1e-9): """Find minimum miller cap that stabilizes the system. NOTE: This function assume phase of system for any miller cap value will not loop around 360, otherwise it may get the phase margin wrong. This assumption should be valid for this op amp. """ gz_worst = float(min(gm2_list)) gz_nom = gz_worst * (1 - res_var) # find maximum Cf needed to stabilize all corners cf_min = cap_min for env_idx, (vtail, vg, vmid, vout, vbias) in \ enumerate(zip(vtail_list, vg_list, vmid_list, vout_list, vbias_list)): cir = self._make_circuit(env_idx, gm_db, load_db, vtail, vg, vmid, vout, vbias, vb_gm, vb_load, cload, cpar1, w_dict, th_dict, stack_dict, seg_dict, gz_worst) bin_iter = FloatBinaryIterator(cf_min, None, cap_tol, search_step=cap_step) while bin_iter.has_next(): cur_cf = bin_iter.get_next() cir.add_cap(cur_cf, 'outp', 'xp') cir.add_cap(cur_cf, 'outn', 'xn') num, den = cir.get_num_den('in', 'out') cur_pm, _ = get_stability_margins(num, den) if cur_pm < phase_margin: if cur_cf > cap_max: # no way to make amplifier stable, just return return None, None, None, None, None, None bin_iter.up() else: bin_iter.save() bin_iter.down() cir.add_cap(-cur_cf, 'outp', 'xp') cir.add_cap(-cur_cf, 'outn', 'xn') # bin_iter is guaranteed to save at least one value, so don't need to worry about # cf_min being None cf_min = bin_iter.get_last_save() # find gain, unity gain bandwidth, and phase margin across corners gain_list, f3db_list, funity_list, pm_list = [], [], [], [] for env_idx, (vtail, vg, vmid, vout, vbias) in \ enumerate(zip(vtail_list, vg_list, vmid_list, vout_list, vbias_list)): cir = self._make_circuit(env_idx, gm_db, load_db, vtail, vg, vmid, vout, vbias, vb_gm, vb_load, cload, cpar1, w_dict, th_dict, stack_dict, seg_dict, gz_nom) cir.add_cap(cf_min, 'outp', 'xp') cir.add_cap(cf_min, 'outn', 'xn') num, den = cir.get_num_den('in', 'out') pn = np.poly1d(num) pd = np.poly1d(den) gain_list.append(abs(pn(0) / pd(0))) f3db_list.append(get_w_3db(num, den) / 2 / np.pi) funity_list.append(get_w_crossings(num, den)[0] / 2 / np.pi) pm_list.append(get_stability_margins(num, den)[0]) return funity_list, 1 / gz_nom, cf_min, gain_list, f3db_list, pm_list @classmethod def _make_circuit(cls, env_idx, gm_db, load_db, vtail, vg, vmid, vout, vbias, vb_gm, vb_load, cload, cpar1, w_dict, th_dict, stack_dict, seg_dict, gz, neg_cap=False, no_fb=False): cur_env = gm_db.env_list[env_idx] gm_db.set_dsn_params(w=w_dict['tail'], intent=th_dict['tail'], stack=stack_dict['tail']) tail1_params = gm_db.query(env=cur_env, vbs=0, vds=vtail - vb_gm, vgs=vbias - vb_gm) tail2_params = gm_db.query(env=cur_env, vbs=0, vds=vout - vb_gm, vgs=vbias - vb_gm) gm_db.set_dsn_params(w=w_dict['in'], intent=th_dict['in'], stack=stack_dict['in']) gm1_params = gm_db.query(env=cur_env, vbs=vb_gm - vtail, vds=vmid - vtail, vgs=vg - vtail) load_db.set_dsn_params(w=w_dict['load'], intent=th_dict['load'], stack=stack_dict['diode']) diode1_params = load_db.query(env=cur_env, vbs=0, vds=vmid - vb_load, vgs=vmid - vb_load) diode2_params = load_db.query(env=cur_env, vbs=0, vds=vout - vb_load, vgs=vmid - vb_load) load_db.set_dsn_params(stack=stack_dict['ngm']) ngm1_params = load_db.query(env=cur_env, vbs=0, vds=vmid - vb_load, vgs=vmid - vb_load) ngm2_params = load_db.query(env=cur_env, vbs=0, vds=vout - vb_load, vgs=vmid - vb_load) cir = LTICircuit() # stage 1 cir.add_transistor(tail1_params, 'tail', 'gnd', 'gnd', 'gnd', fg=seg_dict['tail1'], neg_cap=neg_cap) cir.add_transistor(gm1_params, 'midp', 'inn', 'tail', 'gnd', fg=seg_dict['in'], neg_cap=neg_cap) cir.add_transistor(gm1_params, 'midn', 'inp', 'tail', 'gnd', fg=seg_dict['in'], neg_cap=neg_cap) cir.add_transistor(diode1_params, 'midp', 'midp', 'gnd', 'gnd', fg=seg_dict['diode1'], neg_cap=neg_cap) cir.add_transistor(diode1_params, 'midn', 'midn', 'gnd', 'gnd', fg=seg_dict['diode1'], neg_cap=neg_cap) cir.add_transistor(ngm1_params, 'midn', 'midp', 'gnd', 'gnd', fg=seg_dict['ngm1'], neg_cap=neg_cap) cir.add_transistor(ngm1_params, 'midp', 'midn', 'gnd', 'gnd', fg=seg_dict['ngm1'], neg_cap=neg_cap) # stage 2 cir.add_transistor(tail2_params, 'outp', 'gnd', 'gnd', 'gnd', fg=seg_dict['tail2'], neg_cap=neg_cap) cir.add_transistor(tail2_params, 'outn', 'gnd', 'gnd', 'gnd', fg=seg_dict['tail2'], neg_cap=neg_cap) cir.add_transistor(diode2_params, 'outp', 'midn', 'gnd', 'gnd', fg=seg_dict['diode2'], neg_cap=neg_cap) cir.add_transistor(diode2_params, 'outn', 'midp', 'gnd', 'gnd', fg=seg_dict['diode2'], neg_cap=neg_cap) cir.add_transistor(ngm2_params, 'outp', 'midn', 'gnd', 'gnd', fg=seg_dict['ngm2'], neg_cap=neg_cap) cir.add_transistor(ngm2_params, 'outn', 'midp', 'gnd', 'gnd', fg=seg_dict['ngm2'], neg_cap=neg_cap) # parasitic cap cir.add_cap(cpar1, 'midp', 'gnd') cir.add_cap(cpar1, 'midn', 'gnd') # load cap cir.add_cap(cload, 'outp', 'gnd') cir.add_cap(cload, 'outn', 'gnd') # feedback resistors if not no_fb: cir.add_conductance(gz, 'xp', 'midn') cir.add_conductance(gz, 'xn', 'midp') # diff-to-single conversion cir.add_vcvs(0.5, 'inp', 'gnd', 'in', 'gnd') cir.add_vcvs(-0.5, 'inn', 'gnd', 'in', 'gnd') cir.add_vcvs(1, 'out', 'gnd', 'outp', 'outn') return cir class OpAmpTwoStageChar(MeasurementManager): def __init__(self, data_dir, # type: str meas_name, # type: str impl_lib, # type: str specs, # type: Dict[str, Any] wrapper_lookup, # type: Dict[str, str] sim_view_list, # type: Sequence[Tuple[str, str]] env_list, # type: Sequence[str] ): MeasurementManager.__init__(self, data_dir, meas_name, impl_lib, specs, wrapper_lookup, sim_view_list, env_list) def get_initial_state(self): # type: () -> str """Returns the initial FSM state.""" return 'ac0' def get_testbench_info(self, state, prev_output): rfb0 = self.specs['rfb'] cfb0 = self.specs['cfb'] find_cfb = self.specs.get('find_cfb', True) res_var = self.specs['res_var'] cmin_scale = self.specs['cmin_scale'] cmax_scale = self.specs['cmax_scale'] num_pts = self.specs['num_pts'] tmp = super(OpAmpTwoStageChar, self).get_testbench_info('ac', prev_output) tb_name, tb_type, tb_specs, tb_params = tmp if state == 'ac0' and find_cfb: cfb_list = np.linspace(cfb0 * cmin_scale, cfb0 * cmax_scale, num_pts).tolist() tb_specs['sim_vars']['rfb'] = rfb0 * (1 - res_var) tb_specs['sim_vars']['cfb'] = cfb_list else: if find_cfb: cfb = self.get_state_output('ac0')['cfb'] else: cfb = cfb0 tb_specs['sim_vars']['rfb'] = rfb0 tb_specs['sim_vars']['cfb'] = cfb return tb_name, tb_type, tb_specs, tb_params def process_output(self, state, data, tb_manager): # type: (str, Dict[str, Any], ACTB) -> Tuple[bool, str, Dict[str, Any]] phase_margin = self.specs['phase_margin'] find_cfb = self.specs.get('find_cfb', True) output_list = ['vout'] results = tb_manager.get_ugb_and_pm(data, output_list) if state == 'ac0' and find_cfb: done = False next_state = 'ac1' cfb = self._find_min_cfb(phase_margin, results) output = dict(cfb=cfb) else: done = True next_state = '' if find_cfb: cfb = self.get_state_output('ac0')['cfb'] else: cfb = self.specs['cfb'] gain_results = tb_manager.get_gain_and_w3db(data, output_list, output_dict=results) corner_list = results['corner'].tolist() gain_list = gain_results['gain_vout'].tolist() bw_list = gain_results['w3db_vout'].tolist() funity_list = results['funity_vout'].tolist() pm_list = results['pm_vout'].tolist() output = dict(cfb=cfb, corners=corner_list, gain=gain_list, bw=bw_list, funity=funity_list, pm=pm_list) return done, next_state, output @classmethod def _find_min_cfb(cls, phase_margin, results): axis_names = ['corner', 'cfb'] corner_list = results['corner'] corner_sort_arg = np.argsort(corner_list) # type: Sequence[int] # rearrange array axis sweep_vars = results['sweep_params']['pm_vout'] order = [sweep_vars.index(name) for name in axis_names] pm_data = np.transpose(results['pm_vout'], axes=order) # determine minimum cfb cfb_vec = results['cfb'] cfb_idx_min = 0 for corner_idx in corner_sort_arg: bin_iter = BinaryIterator(cfb_idx_min, cfb_vec.size) while bin_iter.has_next(): cur_cfb_idx = bin_iter.get_next() pm = pm_data[corner_idx, cur_cfb_idx] if pm >= phase_margin: bin_iter.save() bin_iter.down() else: bin_iter.up() cfb_idx_min = bin_iter.get_last_save() if cfb_idx_min is None: # No solution; cannot make amplifier stable break if cfb_idx_min is None: raise ValueError('Cannot determine cfb.') else: cfb = cfb_vec[cfb_idx_min] return cfb.item()
42.53012
100
0.549292
31,073
0.978061
0
0
5,564
0.175134
0
0
5,684
0.178911