hexsha
stringlengths
40
40
size
int64
5
2.06M
ext
stringclasses
10 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
3
248
max_stars_repo_name
stringlengths
5
125
max_stars_repo_head_hexsha
stringlengths
40
78
max_stars_repo_licenses
sequencelengths
1
10
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
3
248
max_issues_repo_name
stringlengths
5
125
max_issues_repo_head_hexsha
stringlengths
40
78
max_issues_repo_licenses
sequencelengths
1
10
max_issues_count
int64
1
67k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
3
248
max_forks_repo_name
stringlengths
5
125
max_forks_repo_head_hexsha
stringlengths
40
78
max_forks_repo_licenses
sequencelengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
5
2.06M
avg_line_length
float64
1
1.02M
max_line_length
int64
3
1.03M
alphanum_fraction
float64
0
1
count_classes
int64
0
1.6M
score_classes
float64
0
1
count_generators
int64
0
651k
score_generators
float64
0
1
count_decorators
int64
0
990k
score_decorators
float64
0
1
count_async_functions
int64
0
235k
score_async_functions
float64
0
1
count_documentation
int64
0
1.04M
score_documentation
float64
0
1
13f3a6d9012ba4c4473a1ffb1f1db1418326ee1f
7,566
py
Python
src/autonomous/purepursuit.py
Sloomey/DeepSpace2019
dda035c0ac100209b03a2ff04d86df09c6de9a85
[ "MIT" ]
null
null
null
src/autonomous/purepursuit.py
Sloomey/DeepSpace2019
dda035c0ac100209b03a2ff04d86df09c6de9a85
[ "MIT" ]
null
null
null
src/autonomous/purepursuit.py
Sloomey/DeepSpace2019
dda035c0ac100209b03a2ff04d86df09c6de9a85
[ "MIT" ]
null
null
null
import math from constants import Constants from utils import vector2d from wpilib import SmartDashboard as Dash from autonomous import pursuitpoint class PurePursuit(): """An implementation of the Pure Pursuit path tracking algorithm.""" def __init__(self, path): self.path = path self.pursuit_points = [pursuitpoint.PursuitPoint(p, c) for p, c in zip( self.path.getPoints(), self.path.getCurvatures())] self.last_lookahead_index = 0 self.cur_curvature = 0 self.target_velocities = vector2d.Vector2D() self.closest_point_index = 0 def computeVelocities(self): """Compute the velocities along the path.""" # Compute the velocities along the path using the curvature and Constants.CURVE_VELOCITY for ppoint in self.pursuit_points: if abs(ppoint.curvature) <= Constants.CURVATURE_THRESHOLD: velocity = Constants.MAX_VELOCITY else: velocity = min(Constants.MAX_VELOCITY, Constants.CURVE_VELOCITY/ppoint.curvature) ppoint.velocity = velocity # Limit the acceleration of the velocities for i in reversed(range(0, len(self.pursuit_points)-1)): distance = self.pursuit_points[i].point.getDistance( self.pursuit_points[i+1].point) new_velocity = math.sqrt( self.pursuit_points[i+1].velocity**2 + (2 * Constants.MAX_ACCELERATION * distance)) new_velocity = min(self.pursuit_points[i].velocity, new_velocity) self.pursuit_points[i].velocity = new_velocity def updateLookaheadPointIndex2(self, state): """Update the lookahead point given the current robot state. Uses the minimum distance point if the state is more than Constants.LOOKAHEAD_DIST from all points, otherwise uses the closes point to self.loohead_distance""" # Compute point distances to state and differences from those distances to Constants.LOOKAHEAD_DIST distances = [math.hypot(state.x - ppoint.point.x, state.y - ppoint.point.y) for ppoint in self.pursuit_points] differences = [abs(d-Constants.LOOKAHEAD_DIST) for d in distances] min_distance = min(distances) # Get new lookahead index if min_distance <= Constants.LOOKAHEAD_DIST: self.last_lookahead_index = differences.index(min(differences)) else: self.last_lookahead_index = distances.index(min_distance) def updateLookaheadPointIndex(self, state): """Loop over the points in the path to get the lookahead point given the current robot state.""" for i in range(self.last_lookahead_index, len(self.pursuit_points)-1): lookahead = self.computeLookaheadPoint( self.pursuit_points[i].point, self.pursuit_points[i+1].point, state) if lookahead != None: self.last_lookahead_index = i def computeLookaheadPoint(self, start, end, state): """Compute the lookahead point given the current robot state. Returns a point if the current state is Constants.LOOKAHEAD_DIST from between start and end, otherwise returns None.""" # Algorithm for circle line segment intersection found here: https://stackoverflow.com/questions/1073336/circle-line-segment-collision-detection-algorithm/1084899#1084899 segment_direction = end - start center_to_start = start - state a = segment_direction * segment_direction b = 2 * (center_to_start * segment_direction) c = (center_to_start * center_to_start) - Constants.LOOKAHEAD_DIST ** 2 discriminant = b**2 - (4 * a * c) if discriminant < 0: return None else: discriminant = math.sqrt(discriminant) t0 = (-b - discriminant) / (2 * a) t1 = (-b + discriminant) / (2 * a) if t0 >= 0 and t0 <= 1: return start + t0 * segment_direction if t1 >= 0 and t1 <= 1: return start + t1 * segment_direction return None def updateCurvature(self, state): """Update the curvature from the current lookahead point to the current robot position.""" lookahead = self.pursuit_points[self.last_lookahead_index].point # Transform the lookahead and state.pos to get an aligned vector transform = lookahead - state.pos transform = transform.getRotated(-state.angle) # Use the transformed vector to calculate the curvature (derived from https://www.ri.cmu.edu/pub_files/pub3/coulter_r_craig_1992_1/coulter_r_craig_1992_1.pdf#page=12) self.cur_curvature = (2 * transform.x) / Constants.LOOKAHEAD_DIST**2 def updateClosestPointIndex(self, state): """Update the index of the closest point to the current robot position.""" index = self.closest_point_index smallest_distance = self.pursuit_points[index].point.getDistance(state) for i in range(0, len(self.pursuit_points)): distance = self.pursuit_points[i].point.getDistance(state) if smallest_distance > distance: smallest_distance = distance index = i self.closest_point_index = index def updateTargetVelocities(self, state): """Update the target velocities of the left and right wheels.""" robot_velocity = self.pursuit_points[self.closest_point_index].velocity # Use kinematics (http://robotsforroboticists.com/drive-kinematics/) and algebra to find wheel target velocties l_velocity = robot_velocity * \ (2 + self.cur_curvature * Constants.TRACK_WIDTH) / \ 2 / Constants.PURE_PURSUIT_KV r_velocity = robot_velocity * \ (2 - self.cur_curvature * Constants.TRACK_WIDTH) / \ 2 / Constants.PURE_PURSUIT_KV scale = max(abs(l_velocity), abs(r_velocity)) if scale > 1: l_velocity /= scale r_velocity /= scale self.target_velocities = vector2d.Vector2D(l_velocity, r_velocity) def update(self, state): """Update the pure pursuit follower(runs all update functions).""" # TODO which lookahead function to use self.updateLookaheadPointIndex(state.pos) # self.updateLookaheadPointIndex2(state.pos) self.updateCurvature(state) self.updateClosestPointIndex(state.pos) self.updateTargetVelocities(state.pos) def outputToSmartDashboard(self): """Output values to the smart dashboard.""" lookahead = self.pursuit_points[self.last_lookahead_index].point closest = self.pursuit_points[self.closest_point_index].point Dash.putNumberArray("Lookahead Point", [lookahead.x, lookahead.y]) Dash.putNumber("Curvature", self.cur_curvature) Dash.putNumberArray("Closes Point", [closest.x, closest.y]) Dash.putNumberArray("Target Velocities", [ self.target_velocities.x, self.target_velocities.y]) #print("Lookahead Point - {}".format(lookahead)) #print("Curvature - {}".format(self.cur_curvature)) #print("Closes Point - {}".format(closest)) #print("Target Velocities - {}".format(self.target_velocities)) # print("------------------------------") def isDone(self): """Check if the path is done being followed.""" return (len(self.pursuit_points) - self.closest_point_index) <= 1
50.10596
178
0.655432
7,414
0.97991
0
0
0
0
0
0
2,202
0.291039
13f4c5d6b839fc74a59e3720afa044833541c6ea
8,661
py
Python
esphome/voluptuous_schema.py
TheEggi/esphomeyaml
98e8cc1edc7b29891e8100eb484922e5c2d4fc33
[ "MIT" ]
null
null
null
esphome/voluptuous_schema.py
TheEggi/esphomeyaml
98e8cc1edc7b29891e8100eb484922e5c2d4fc33
[ "MIT" ]
null
null
null
esphome/voluptuous_schema.py
TheEggi/esphomeyaml
98e8cc1edc7b29891e8100eb484922e5c2d4fc33
[ "MIT" ]
null
null
null
import difflib import itertools import voluptuous as vol from esphome.py_compat import string_types class ExtraKeysInvalid(vol.Invalid): def __init__(self, *arg, **kwargs): self.candidates = kwargs.pop('candidates') vol.Invalid.__init__(self, *arg, **kwargs) def ensure_multiple_invalid(err): if isinstance(err, vol.MultipleInvalid): return err return vol.MultipleInvalid(err) # pylint: disable=protected-access, unidiomatic-typecheck class _Schema(vol.Schema): """Custom cv.Schema that prints similar keys on error.""" def __init__(self, schema, extra=vol.PREVENT_EXTRA, extra_schemas=None): super(_Schema, self).__init__(schema, extra=extra) # List of extra schemas to apply after validation # Should be used sparingly, as it's not a very voluptuous-way/clean way of # doing things. self._extra_schemas = extra_schemas or [] def __call__(self, data): res = super(_Schema, self).__call__(data) for extra in self._extra_schemas: try: res = extra(res) except vol.Invalid as err: raise ensure_multiple_invalid(err) return res def _compile_mapping(self, schema, invalid_msg=None): invalid_msg = invalid_msg or 'mapping value' # Check some things that ESPHome's schemas do not allow # mostly to keep the logic in this method sane (so these may be re-added if needed). for key in schema: if key is vol.Extra: raise ValueError("ESPHome does not allow vol.Extra") if isinstance(key, vol.Remove): raise ValueError("ESPHome does not allow vol.Remove") if isinstance(key, vol.primitive_types): raise ValueError("All schema keys must be wrapped in cv.Required or cv.Optional") # Keys that may be required all_required_keys = set(key for key in schema if isinstance(key, vol.Required)) # Keys that may have defaults all_default_keys = set(key for key in schema if isinstance(key, vol.Optional)) # Recursively compile schema _compiled_schema = {} for skey, svalue in vol.iteritems(schema): new_key = self._compile(skey) new_value = self._compile(svalue) _compiled_schema[skey] = (new_key, new_value) # Sort compiled schema (probably not necessary for esphome, but leave it here just in case) candidates = list(vol.schema_builder._iterate_mapping_candidates(_compiled_schema)) # After we have the list of candidates in the correct order, we want to apply some # optimization so that each # key in the data being validated will be matched against the relevant schema keys only. # No point in matching against different keys additional_candidates = [] candidates_by_key = {} for skey, (ckey, cvalue) in candidates: if type(skey) in vol.primitive_types: candidates_by_key.setdefault(skey, []).append((skey, (ckey, cvalue))) elif isinstance(skey, vol.Marker) and type(skey.schema) in vol.primitive_types: candidates_by_key.setdefault(skey.schema, []).append((skey, (ckey, cvalue))) else: # These are wildcards such as 'int', 'str', 'Remove' and others which should be # applied to all keys additional_candidates.append((skey, (ckey, cvalue))) key_names = [] for skey in schema: if isinstance(skey, string_types): key_names.append(skey) elif isinstance(skey, vol.Marker) and isinstance(skey.schema, string_types): key_names.append(skey.schema) def validate_mapping(path, iterable, out): required_keys = all_required_keys.copy() # Build a map of all provided key-value pairs. # The type(out) is used to retain ordering in case a ordered # map type is provided as input. key_value_map = type(out)() for key, value in iterable: key_value_map[key] = value # Insert default values for non-existing keys. for key in all_default_keys: if not isinstance(key.default, vol.Undefined) and key.schema not in key_value_map: # A default value has been specified for this missing key, insert it. key_value_map[key.schema] = key.default() error = None errors = [] for key, value in key_value_map.items(): key_path = path + [key] # Optimization. Validate against the matching key first, then fallback to the rest relevant_candidates = itertools.chain(candidates_by_key.get(key, []), additional_candidates) # compare each given key/value against all compiled key/values # schema key, (compiled key, compiled value) for skey, (ckey, cvalue) in relevant_candidates: try: new_key = ckey(key_path, key) except vol.Invalid as e: if len(e.path) > len(key_path): raise if not error or len(e.path) > len(error.path): error = e continue # Backtracking is not performed once a key is selected, so if # the value is invalid we immediately throw an exception. exception_errors = [] try: cval = cvalue(key_path, value) out[new_key] = cval except vol.MultipleInvalid as e: exception_errors.extend(e.errors) except vol.Invalid as e: exception_errors.append(e) if exception_errors: for err in exception_errors: if len(err.path) <= len(key_path): err.error_type = invalid_msg errors.append(err) # If there is a validation error for a required # key, this means that the key was provided. # Discard the required key so it does not # create an additional, noisy exception. required_keys.discard(skey) break # Key and value okay, mark as found in case it was # a Required() field. required_keys.discard(skey) break else: if self.extra == vol.ALLOW_EXTRA: out[key] = value elif self.extra != vol.REMOVE_EXTRA: if isinstance(key, string_types) and key_names: matches = difflib.get_close_matches(key, key_names) errors.append(ExtraKeysInvalid('extra keys not allowed', key_path, candidates=matches)) else: errors.append(vol.Invalid('extra keys not allowed', key_path)) # for any required keys left that weren't found and don't have defaults: for key in required_keys: msg = getattr(key, 'msg', None) or 'required key not provided' errors.append(vol.RequiredFieldInvalid(msg, path + [key])) if errors: raise vol.MultipleInvalid(errors) return out return validate_mapping def add_extra(self, validator): validator = _Schema(validator) self._extra_schemas.append(validator) return self # pylint: disable=arguments-differ def extend(self, *schemas, **kwargs): extra = kwargs.pop('extra', None) if kwargs: raise ValueError if not schemas: return self.extend({}) if len(schemas) != 1: ret = self for schema in schemas: ret = ret.extend(schema) return ret schema = schemas[0] if isinstance(schema, vol.Schema): schema = schema.schema ret = super(_Schema, self).extend(schema, extra=extra) return _Schema(ret.schema, extra=ret.extra, extra_schemas=self._extra_schemas)
43.305
99
0.564831
8,359
0.965131
0
0
0
0
0
0
2,062
0.238079
13f5928fe05ccf64858c18af5eff2188153c32e0
20,738
py
Python
semisupervised/DensityPeaks.py
dpr1005/Semisupervised-learning-and-instance-selection-methods
646d9e729c85322e859928e71a3241f2aec6d93d
[ "MIT" ]
3
2021-12-10T09:04:18.000Z
2022-01-22T15:03:19.000Z
semisupervised/DensityPeaks.py
dpr1005/Semisupervised-learning-and-instance-selection-methods
646d9e729c85322e859928e71a3241f2aec6d93d
[ "MIT" ]
107
2021-12-02T07:43:11.000Z
2022-03-31T11:02:46.000Z
semisupervised/DensityPeaks.py
dpr1005/Semisupervised-learning-and-instance-selection-methods
646d9e729c85322e859928e71a3241f2aec6d93d
[ "MIT" ]
null
null
null
#!/usr/bin/env python # -*- coding:utf-8 -*- # @Filename: DensityPeaks.py # @Author: Daniel Puente Ramírez # @Time: 5/3/22 09:55 # @Version: 4.0 import math from collections import defaultdict import numpy as np import pandas as pd from sklearn.neighbors import KNeighborsClassifier, NearestNeighbors from sklearn.preprocessing import LabelEncoder from sklearn.semi_supervised import SelfTrainingClassifier from sklearn.svm import SVC from instance_selection import ENN from .utils import split class STDPNF: """ Li, J., Zhu, Q., & Wu, Q. (2019). A self-training method based on density peaks and an extended parameter-free local noise filter for k nearest neighbor. Knowledge-Based Systems, 184, 104895. Wu, D., Shang, M., Luo, X., Xu, J., Yan, H., Deng, W., & Wang, G. (2018). Self-training semi-supervised classification based on density peaks of data. Neurocomputing, 275, 180-191. """ def __init__( self, dc=None, distance_metric="euclidean", k=3, gauss_cutoff=True, percent=2.0, density_threshold=None, distance_threshold=None, anormal=True, filtering=False, classifier=None, classifier_params=None, filter_method=None, ): """Semi Supervised Algorithm based on Density Peaks.""" self.dc = dc self.distance_metric = distance_metric self.k = k self.gauss_cutoff = gauss_cutoff self.percent = percent self.density_threshold = density_threshold self.distance_threshold = distance_threshold self.anormal = anormal self.filtering = filtering if classifier is not None: if isinstance(classifier_params, dict): self.classifier = classifier(**classifier_params) else: self.classifier = classifier() else: self.classifier = None if filter_method is not None and filter_method != "ENANE": self.filter = filter_method() elif isinstance(filter_method, str) and filter_method == "ENANE": self.filter = filter_method else: self.filter = None self.y = None self.low = None self.u = None self.classifier_stdpnf = None self.order = None self.structure = None self.structure_stdnpf = None self.n_id = None self.distances = None self.max_dis = None self.min_dis = None self.rho = None self.delta = None self.nneigh = None self.data = None def __build_distance(self): """ Calculate distance dict. :return: distance dict, max distance, min distance """ from scipy.spatial.distance import pdist, squareform distance_matrix = pdist(self.data, metric=self.distance_metric) distance_matrix = squareform(distance_matrix) triangle_upper = np.triu_indices(self.data.shape[0], 1) triangle_upper = distance_matrix[triangle_upper] distance = {} for i in range(self.n_id): for j in range(i + 1, self.n_id): distance[(i, j)] = distance_matrix[i, j] distance[(j, i)] = distance_matrix[i, j] max_dis, min_dis = np.max(triangle_upper), np.min(triangle_upper) return distance, max_dis, min_dis def __auto_select_dc(self): """ Auto select the local density threshold that let average neighbor is 1-2 percent of all nodes. :return: dc that local density threshold """ max_dis, min_dis = self.max_dis, self.min_dis dc = (max_dis + min_dis) / 2 while True: nneighs = ( sum([1 for v in self.distances.values() if v < dc]) / self.n_id**2 ) if 0.01 <= nneighs <= 0.02: break # binary search if nneighs < 0.01: min_dis = dc else: max_dis = dc dc = (max_dis + min_dis) / 2 if max_dis - min_dis < 0.0001: break return dc def __select_dc(self): """ Select the local density threshold, default is the method used in paper, 'auto' is auto select. :return: dc that local density threshold """ if self.dc == "auto": dc = self.__auto_select_dc() else: position = int(self.n_id * (self.n_id + 1) / 2 * self.percent / 100) dc = np.sort(list(self.distances.values()))[ position * 2 + self.n_id] return dc def __local_density(self): """ Compute all points' local density. :return: local density vector that index is the point index """ def gauss_func(dij, dc): """ > The function takes in a distance value and a cutoff value, and returns the value of the Gaussian function at that point :param dij: distance between two nodes :param dc: The cutoff distance :return: the value of the gaussian function. """ return math.exp(-((dij / dc) ** 2)) def cutoff_func(dij, dc): """ If the distance between two atoms is less than the cutoff distance, return 1, otherwise return 0 :param dij: distance between atoms i and j :param dc: cutoff distance :return: 1 if dij < dc, else 0 """ return 1 if dij < dc else 0 func = gauss_func if self.gauss_cutoff else cutoff_func rho = [0] * self.n_id for i in range(self.n_id): for j in range(i + 1, self.n_id): temp = func(self.distances[(i, j)], self.dc) rho[i] += temp rho[j] += temp return np.array(rho, np.float32) def __min_neighbor_and_distance(self): """ Compute all points' min util to the higher local density point(which is the nearest neighbor). :return: distance vector, nearest neighbor vector """ if self.rho is None: raise ValueError("Encountered rho as None.") sort_rho_idx = np.argsort(-self.rho) delta, nneigh = [float(self.max_dis)] * self.n_id, [0] * self.n_id delta[sort_rho_idx[0]] = -1.0 for i in range(self.n_id): for j in range(0, i): old_i, old_j = sort_rho_idx[i], sort_rho_idx[j] if self.distances[(old_i, old_j)] < delta[old_i]: delta[old_i] = self.distances[(old_i, old_j)] nneigh[old_i] = old_j delta[sort_rho_idx[0]] = max(delta) return np.array(delta, np.float32), np.array(nneigh, np.float32) def __structure(self): """ The function takes the data and the nearest neighbor indices and creates a dataframe with the following columns: - sample: the data point - next: the index of the nearest neighbor - previous: the index of the nearest neighbor of the nearest neighbor - label: the label of the data point The function also creates a copy of the dataframe called structure_stdnpf """ self.structure = dict.fromkeys(range(self.n_id)) for index, sample in enumerate(self.data): self.structure[index] = [ sample, int(self.nneigh[index]), None, self.y[index] if index < len(self.y) else -1, ] for index in range(self.n_id): if self.structure[self.structure[index][1]][2] is None: self.structure[self.structure[index][1]][2] = index self.structure = pd.DataFrame( self.structure, index=["sample", "next", "previous", "label"] ).transpose() self.structure_stdnpf = self.structure.copy(deep=True) def __step_a(self): """ > The function takes the labeled samples and trains the classifier on them :return: The samples that have been labeled. """ samples_labeled = self.structure.loc[self.structure["label"] != -1] sam_lab = samples_labeled["sample"].to_list() y_without = samples_labeled["label"].to_list() self.classifier.fit(sam_lab, y_without) return samples_labeled def __discover_structure(self): """Discovers the under laying structure.""" self._fit_without() def __nan_search(self): """ For each point, find the set of points that are within a distance of r, and the set of points that are within a distance of r+1. The set of points that are within a distance of r+1 is a superset of the set of points that are within a distance of r. The set of points that are within a distance of r+1 is also a superset of the set of points that are within a distance of r+2. The set of points that are within a distance of r+2 is also a superset of the set of points that are within a distance of r+3. And so on. The set of points that are within a distance of r+1 is also a superset of the set of points that are within a distance of r+2. The set of points that are within a distance of r+2 is :return: nan, r """ r = 1 nan = defaultdict(set) nb = dict.fromkeys(range(self.n_id), 0) knn = defaultdict(set) rnn = defaultdict(set) cnt = defaultdict(int) while True: search = NearestNeighbors(n_neighbors=r + 1, algorithm="kd_tree") search.fit(self.data) for index, sample in enumerate(self.data): r_neighs = search.kneighbors( [sample], return_distance=False)[0][1:] knn[index].update(list(r_neighs)) for neigh in r_neighs: nb[neigh] += 1 rnn[neigh].add(index) cnt[r] = np.count_nonzero((np.array(list(nb.values())) == 0)) if r > 2 and cnt[r] == cnt[r - 1]: r -= 1 break r += 1 for index in range(self.n_id): nan[index] = knn[index].intersection(rnn[index]) return nan, r def __enane(self, fx, nan, r): """ > The function takes in the dataframe, the list of indices of the unlabeled data, the list of indices of the neighbors of the unlabeled data, and the number of neighbors to use in the KNN classifier. It then creates a new dataframe with the labeled data and the unlabeled data, and uses the KNN classifier to predict the labels of the unlabeled data. It then checks if the predicted label is the same as the label of the majority of the neighbors of the unlabeled data. If it is, then it adds the index of the unlabeled data to the list of indices of the data to be labeled :param fx: the indexes of the unlabeled data :param nan: a list of lists, where each list contains the indices of the neighbors of a sample :param r: the number of neighbors to consider :return: The indexes of the samples that are going to be labeled and the labels that are going to be assigned to them. """ es = [] es_pred = [] local_structure = self.structure_stdnpf.copy(deep=True) base_estimator = KNeighborsClassifier( n_neighbors=r, metric=self.distance_metric ) labeled_data = local_structure.loc[local_structure["label"] != -1] nan_unlabeled = local_structure.loc[fx] data = pd.concat([labeled_data, nan_unlabeled], join="inner") enane_model = SelfTrainingClassifier(base_estimator) enane_model.fit(data["sample"].tolist(), data["label"].tolist()) enane_pred = enane_model.predict(nan_unlabeled["sample"].tolist()) for (row_index, _), pred in zip(nan_unlabeled.iterrows(), enane_pred): usefulness = 0 harmfulness = 0 for neigh in nan[row_index]: if local_structure.loc[neigh, "label"] == pred: usefulness += 1 else: harmfulness += 1 if usefulness >= harmfulness: es.append(row_index) es_pred.append(pred) return es, es_pred def __init_values(self, low, u, y): """ It takes in the lower and upper bounds of the data, and the data itself, and then calculates the distances between the data points, the maximum distance, the minimum distance, the dc value, the rho value, the delta value, the number of neighbors, and the structure of the data :param low: lower bound of the data :param u: upper bound of the data :param y: the labels of the data """ self.y = y self.low = low self.u = u self.data = np.concatenate((low, u), axis=0) self.n_id = self.data.shape[0] self.distances, self.max_dis, self.min_dis = self.__build_distance() self.dc = self.__select_dc() self.rho = self.__local_density() self.delta, self.nneigh = self.__min_neighbor_and_distance() self.__structure() def _fit_without(self): """ The function takes in a classifier, and then labels the next point, and then labels the previous points, without filtering. """ if self.classifier is None: self.classifier = SVC() count = 1 self.order = dict.fromkeys(range(self.n_id), 0) count = self._label_next_point(count) self._label_previous_points(count) def _label_previous_points(self, count): """ > The function takes the samples labeled in the previous step and finds the previous samples of those samples. It then labels those samples and repeats the process until there are no more samples to label :param count: the number of the current iteration """ while True: samples_labeled = self.__step_a() prev_rows = samples_labeled["previous"].to_numpy() prev_unlabeled = [] samples_labeled_index = samples_labeled.index.to_list() for prev_row in prev_rows: if prev_row not in samples_labeled_index and prev_row is not None: prev_unlabeled.append(prev_row) self.order[prev_row] = count if len(prev_unlabeled) == 0: break unlabeled_prev_of_labeled = self.structure.loc[prev_unlabeled] lu = unlabeled_prev_of_labeled["sample"].to_list() y_pred = self.classifier.predict(lu) for new_label, pos in zip(y_pred, prev_unlabeled): self.structure.at[pos, "label"] = new_label count += 1 def _label_next_point(self, count): """ > The function takes the samples labeled in the previous step and finds the next samples in the structure. If the next samples are not labeled, it labels them and updates the order of the samples :param count: the number of the next point to be labeled :return: The number of labeled samples. """ while True: samples_labeled = self.__step_a() next_rows = samples_labeled["next"].to_numpy() next_unlabeled = [] samples_labeled_index = samples_labeled.index.to_list() for next_row in next_rows: if next_row not in samples_labeled_index: next_unlabeled.append(next_row) self.order[next_row] = count if len(next_unlabeled) == 0: break unlabeled_next_of_labeled = self.structure.loc[next_unlabeled] lu = unlabeled_next_of_labeled["sample"].to_list() y_pred = self.classifier.predict(lu) for new_label, pos in zip(y_pred, next_unlabeled): self.structure.at[pos, "label"] = new_label count += 1 return count def _fit_stdpnf(self): """ Self Training based on Density Peaks and a parameter-free noise filter. """ self.__discover_structure() nan, lambda_param = self.__nan_search() self.classifier_stdpnf = KNeighborsClassifier( n_neighbors=self.k, metric=self.distance_metric ) self.classifier_stdpnf.fit(self.low, self.y) count = 1 while count <= max(self.order.values()): unlabeled_rows = self.structure_stdnpf.loc[ self.structure_stdnpf["label"] == -1 ].index.to_list() unlabeled_indexes = [] for row in unlabeled_rows: if self.order[row] == count: unlabeled_indexes.append(row) if isinstance(self.filter, str) and self.filter == "ENANE": filtered_indexes, filtered_labels = self.__enane( unlabeled_indexes, nan, lambda_param ) self.structure_stdnpf.at[filtered_indexes, "label"] = filtered_labels else: labeled_data = self.structure_stdnpf.loc[ self.structure_stdnpf["label"] != -1 ] complete = labeled_data["sample"] complete_y = labeled_data["label"] result = self._if_filter(complete, complete_y) self._results_to_structure(complete, result) labeled_data = self.structure_stdnpf.loc[ self.structure_stdnpf["label"] != -1 ] self.classifier_stdpnf.fit( labeled_data["sample"].tolist(), labeled_data["label"].tolist() ) count += 1 labeled_data = self.structure_stdnpf.loc[self.structure_stdnpf["label"] != -1] self.classifier_stdpnf.fit( labeled_data["sample"].tolist(), labeled_data["label"].tolist() ) def _results_to_structure(self, complete, result): """ > This function takes the results of the model and compares them to the complete data set. If the result is not in the complete data set, it is added to the structure data set. :param complete: the complete dataset :param result: the result of the clustering """ results_to_unlabeled = [] for r in result.to_numpy(): is_in = False for c in complete: if np.array_equal(r, c): is_in = True if not is_in: results_to_unlabeled.append(r) for r in results_to_unlabeled: self.structure_stdnpf.at[np.array(self.structure_stdnpf["sample"], r)][ "label" ] = -1 def _if_filter(self, complete, complete_y): """ If the filter is an ENN, then filter the original data, otherwise filter the complete data :param complete: the complete dataframe :param complete_y: the complete y values :return: The result is a dataframe with the filtered data. """ if isinstance(self.filter, ENN): original = pd.DataFrame(self.low) original_y = pd.DataFrame(self.y) result, _ = self.filter.filter_original_complete( original, original_y, complete, complete_y ) else: result, _ = self.filter.filter(complete, complete_y) return result def fit(self, samples, y): """Fit method.""" try: l, u, y = split(samples, y) except IndexError: raise ValueError("Dimensions do not match.") le = LabelEncoder() le.fit(y) y = le.transform(y) self.__init_values(l, u, y) if self.filtering: self._fit_stdpnf() else: self._fit_without() def predict(self, src): """ Predict based on a trained classifier. :param src: The source image :return: The classifier is being returned. """ if self.classifier is None: raise AssertionError("The model needs to be fitted first.") return self.classifier.predict(src)
35.268707
86
0.580384
20,217
0.97483
0
0
0
0
0
0
7,026
0.338782
13f713d62e74a1cd787ec98b134812d16f5287ea
933
py
Python
N-aryTreeLevelOrderTraversal429.py
Bit64L/LeetCode-Python-
64847cbb1adcaca4561b949e8acc52e8e031a6cb
[ "MIT" ]
null
null
null
N-aryTreeLevelOrderTraversal429.py
Bit64L/LeetCode-Python-
64847cbb1adcaca4561b949e8acc52e8e031a6cb
[ "MIT" ]
null
null
null
N-aryTreeLevelOrderTraversal429.py
Bit64L/LeetCode-Python-
64847cbb1adcaca4561b949e8acc52e8e031a6cb
[ "MIT" ]
null
null
null
""" # Definition for a Node. """ class TreeNode(object): def __init__(self, val, children): self.val = val self.children = children class Solution(object): def levelOrder(self, root): """ :type root: Node :rtype: List[List[int]] """ if root is None: return [] from Queue import Queue que = Queue() que.put(root) ans, tmp, k = [], [], 1 while que.qsize() != 0: node = que.get() tmp.append(node.val) k -= 1 for child in node.children: que.put(child) if k == 0: k = que.qsize() ans.append(list(tmp)) tmp = [] return ans node2 = TreeNode(2, []) node3 = TreeNode(3, []) children = [node2, node3] node1 = TreeNode(1, children) solution = Solution() print(solution.levelOrder(node1))
20.733333
39
0.485531
733
0.785638
0
0
0
0
0
0
104
0.111468
13f7593938a4204f0e27844ca0c493ca0b47ec5f
16,444
py
Python
plugin.video.team.milhanos/websocket/_core.py
akuala/REPO.KUALA
ea9a157025530d2ce8fa0d88431c46c5352e89d4
[ "Apache-2.0" ]
2
2018-11-02T19:55:30.000Z
2020-08-14T02:22:20.000Z
venv/lib/python3.5/site-packages/websocket/_core.py
dukakisxyz/wifiportal21-map
1f1917c2f3c2987f7a88cc537d7c50449d144ea0
[ "MIT" ]
null
null
null
venv/lib/python3.5/site-packages/websocket/_core.py
dukakisxyz/wifiportal21-map
1f1917c2f3c2987f7a88cc537d7c50449d144ea0
[ "MIT" ]
3
2019-12-17T20:47:00.000Z
2021-02-11T19:03:59.000Z
""" websocket - WebSocket client library for Python Copyright (C) 2010 Hiroki Ohtani(liris) This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA """ from __future__ import print_function import six import socket if six.PY3: from base64 import encodebytes as base64encode else: from base64 import encodestring as base64encode import struct import threading # websocket modules from ._exceptions import * from ._abnf import * from ._socket import * from ._utils import * from ._url import * from ._logging import * from ._http import * from ._handshake import * from ._ssl_compat import * """ websocket python client. ========================= This version support only hybi-13. Please see http://tools.ietf.org/html/rfc6455 for protocol. """ class WebSocket(object): """ Low level WebSocket interface. This class is based on The WebSocket protocol draft-hixie-thewebsocketprotocol-76 http://tools.ietf.org/html/draft-hixie-thewebsocketprotocol-76 We can connect to the websocket server and send/receive data. The following example is an echo client. >>> import websocket >>> ws = websocket.WebSocket() >>> ws.connect("ws://echo.websocket.org") >>> ws.send("Hello, Server") >>> ws.recv() 'Hello, Server' >>> ws.close() get_mask_key: a callable to produce new mask keys, see the set_mask_key function's docstring for more details sockopt: values for socket.setsockopt. sockopt must be tuple and each element is argument of sock.setsockopt. sslopt: dict object for ssl socket option. fire_cont_frame: fire recv event for each cont frame. default is False enable_multithread: if set to True, lock send method. skip_utf8_validation: skip utf8 validation. """ def __init__(self, get_mask_key=None, sockopt=None, sslopt=None, fire_cont_frame=False, enable_multithread=False, skip_utf8_validation=False, **options): """ Initialize WebSocket object. """ self.sock_opt = sock_opt(sockopt, sslopt) self.handshake_response = None self.sock = None self.connected = False self.get_mask_key = get_mask_key # These buffer over the build-up of a single frame. self.frame_buffer = frame_buffer(self._recv, skip_utf8_validation) self.cont_frame = continuous_frame(fire_cont_frame, skip_utf8_validation) if enable_multithread: self.lock = threading.Lock() else: self.lock = NoLock() def __iter__(self): """ Allow iteration over websocket, implying sequential `recv` executions. """ while True: yield self.recv() def __next__(self): return self.recv() def next(self): return self.__next__() def fileno(self): return self.sock.fileno() def set_mask_key(self, func): """ set function to create musk key. You can customize mask key generator. Mainly, this is for testing purpose. func: callable object. the func takes 1 argument as integer. The argument means length of mask key. This func must return string(byte array), which length is argument specified. """ self.get_mask_key = func def gettimeout(self): """ Get the websocket timeout(second). """ return self.sock_opt.timeout def settimeout(self, timeout): """ Set the timeout to the websocket. timeout: timeout time(second). """ self.sock_opt.timeout = timeout if self.sock: self.sock.settimeout(timeout) timeout = property(gettimeout, settimeout) def getsubprotocol(self): """ get subprotocol """ if self.handshake_response: return self.handshake_response.subprotocol else: return None subprotocol = property(getsubprotocol) def getstatus(self): """ get handshake status """ if self.handshake_response: return self.handshake_response.status else: return None status = property(getstatus) def getheaders(self): """ get handshake response header """ if self.handshake_response: return self.handshake_response.headers else: return None headers = property(getheaders) def connect(self, url, **options): """ Connect to url. url is websocket url scheme. ie. ws://host:port/resource You can customize using 'options'. If you set "header" list object, you can set your own custom header. >>> ws = WebSocket() >>> ws.connect("ws://echo.websocket.org/", ... header=["User-Agent: MyProgram", ... "x-custom: header"]) timeout: socket timeout time. This value is integer. if you set None for this value, it means "use default_timeout value" options: "header" -> custom http header list or dict. "cookie" -> cookie value. "origin" -> custom origin url. "host" -> custom host header string. "http_proxy_host" - http proxy host name. "http_proxy_port" - http proxy port. If not set, set to 80. "http_no_proxy" - host names, which doesn't use proxy. "http_proxy_auth" - http proxy auth information. tuple of username and password. default is None "subprotocols" - array of available sub protocols. default is None. "socket" - pre-initialized stream socket. """ self.sock, addrs = connect(url, self.sock_opt, proxy_info(**options), options.pop('socket', None)) try: self.handshake_response = handshake(self.sock, *addrs, **options) self.connected = True except: if self.sock: self.sock.close() self.sock = None raise def send(self, payload, opcode=ABNF.OPCODE_TEXT): """ Send the data as string. payload: Payload must be utf-8 string or unicode, if the opcode is OPCODE_TEXT. Otherwise, it must be string(byte array) opcode: operation code to send. Please see OPCODE_XXX. """ frame = ABNF.create_frame(payload, opcode) return self.send_frame(frame) def send_frame(self, frame): """ Send the data frame. frame: frame data created by ABNF.create_frame >>> ws = create_connection("ws://echo.websocket.org/") >>> frame = ABNF.create_frame("Hello", ABNF.OPCODE_TEXT) >>> ws.send_frame(frame) >>> cont_frame = ABNF.create_frame("My name is ", ABNF.OPCODE_CONT, 0) >>> ws.send_frame(frame) >>> cont_frame = ABNF.create_frame("Foo Bar", ABNF.OPCODE_CONT, 1) >>> ws.send_frame(frame) """ if self.get_mask_key: frame.get_mask_key = self.get_mask_key data = frame.format() length = len(data) trace("send: " + repr(data)) with self.lock: while data: l = self._send(data) data = data[l:] return length def send_binary(self, payload): return self.send(payload, ABNF.OPCODE_BINARY) def ping(self, payload=""): """ send ping data. payload: data payload to send server. """ if isinstance(payload, six.text_type): payload = payload.encode("utf-8") self.send(payload, ABNF.OPCODE_PING) def pong(self, payload): """ send pong data. payload: data payload to send server. """ if isinstance(payload, six.text_type): payload = payload.encode("utf-8") self.send(payload, ABNF.OPCODE_PONG) def recv(self): """ Receive string data(byte array) from the server. return value: string(byte array) value. """ opcode, data = self.recv_data() if six.PY3 and opcode == ABNF.OPCODE_TEXT: return data.decode("utf-8") elif opcode == ABNF.OPCODE_TEXT or opcode == ABNF.OPCODE_BINARY: return data else: return '' def recv_data(self, control_frame=False): """ Receive data with operation code. control_frame: a boolean flag indicating whether to return control frame data, defaults to False return value: tuple of operation code and string(byte array) value. """ opcode, frame = self.recv_data_frame(control_frame) return opcode, frame.data def recv_data_frame(self, control_frame=False): """ Receive data with operation code. control_frame: a boolean flag indicating whether to return control frame data, defaults to False return value: tuple of operation code and string(byte array) value. """ while True: frame = self.recv_frame() if not frame: # handle error: # 'NoneType' object has no attribute 'opcode' raise WebSocketProtocolException("Not a valid frame %s" % frame) elif frame.opcode in (ABNF.OPCODE_TEXT, ABNF.OPCODE_BINARY, ABNF.OPCODE_CONT): self.cont_frame.validate(frame) self.cont_frame.add(frame) if self.cont_frame.is_fire(frame): return self.cont_frame.extract(frame) elif frame.opcode == ABNF.OPCODE_CLOSE: self.send_close() return (frame.opcode, frame) elif frame.opcode == ABNF.OPCODE_PING: if len(frame.data) < 126: self.pong(frame.data) else: raise WebSocketProtocolException("Ping message is too long") if control_frame: return (frame.opcode, frame) elif frame.opcode == ABNF.OPCODE_PONG: if control_frame: return (frame.opcode, frame) def recv_frame(self): """ receive data as frame from server. return value: ABNF frame object. """ return self.frame_buffer.recv_frame() def send_close(self, status=STATUS_NORMAL, reason=six.b("")): """ send close data to the server. status: status code to send. see STATUS_XXX. reason: the reason to close. This must be string or bytes. """ if status < 0 or status >= ABNF.LENGTH_16: raise ValueError("code is invalid range") self.connected = False self.send(struct.pack('!H', status) + reason, ABNF.OPCODE_CLOSE) def close(self, status=STATUS_NORMAL, reason=six.b(""), timeout=3): """ Close Websocket object status: status code to send. see STATUS_XXX. reason: the reason to close. This must be string. timeout: timeout until receive a close frame. If None, it will wait forever until receive a close frame. """ if self.connected: if status < 0 or status >= ABNF.LENGTH_16: raise ValueError("code is invalid range") try: self.connected = False self.send(struct.pack('!H', status) + reason, ABNF.OPCODE_CLOSE) sock_timeout = self.sock.gettimeout() self.sock.settimeout(timeout) try: frame = self.recv_frame() if isEnabledForError(): recv_status = struct.unpack("!H", frame.data)[0] if recv_status != STATUS_NORMAL: error("close status: " + repr(recv_status)) except: pass self.sock.settimeout(sock_timeout) self.sock.shutdown(socket.SHUT_RDWR) except: pass self.shutdown() def abort(self): """ Low-level asynchronous abort, wakes up other threads that are waiting in recv_* """ if self.connected: self.sock.shutdown(socket.SHUT_RDWR) def shutdown(self): "close socket, immediately." if self.sock: self.sock.close() self.sock = None self.connected = False def _send(self, data): return send(self.sock, data) def _recv(self, bufsize): try: return recv(self.sock, bufsize) except WebSocketConnectionClosedException: if self.sock: self.sock.close() self.sock = None self.connected = False raise def create_connection(url, timeout=None, class_=WebSocket, **options): """ connect to url and return websocket object. Connect to url and return the WebSocket object. Passing optional timeout parameter will set the timeout on the socket. If no timeout is supplied, the global default timeout setting returned by getdefauttimeout() is used. You can customize using 'options'. If you set "header" list object, you can set your own custom header. >>> conn = create_connection("ws://echo.websocket.org/", ... header=["User-Agent: MyProgram", ... "x-custom: header"]) timeout: socket timeout time. This value is integer. if you set None for this value, it means "use default_timeout value" class_: class to instantiate when creating the connection. It has to implement settimeout and connect. It's __init__ should be compatible with WebSocket.__init__, i.e. accept all of it's kwargs. options: "header" -> custom http header list or dict. "cookie" -> cookie value. "origin" -> custom origin url. "host" -> custom host header string. "http_proxy_host" - http proxy host name. "http_proxy_port" - http proxy port. If not set, set to 80. "http_no_proxy" - host names, which doesn't use proxy. "http_proxy_auth" - http proxy auth information. tuple of username and password. default is None "enable_multithread" -> enable lock for multithread. "sockopt" -> socket options "sslopt" -> ssl option "subprotocols" - array of available sub protocols. default is None. "skip_utf8_validation" - skip utf8 validation. "socket" - pre-initialized stream socket. """ sockopt = options.pop("sockopt", []) sslopt = options.pop("sslopt", {}) fire_cont_frame = options.pop("fire_cont_frame", False) enable_multithread = options.pop("enable_multithread", False) skip_utf8_validation = options.pop("skip_utf8_validation", False) websock = class_(sockopt=sockopt, sslopt=sslopt, fire_cont_frame=fire_cont_frame, enable_multithread=enable_multithread, skip_utf8_validation=skip_utf8_validation, **options) websock.settimeout(timeout if timeout is not None else getdefaulttimeout()) websock.connect(url, **options) return websock
33.490835
90
0.589698
12,365
0.751946
172
0.01046
0
0
0
0
8,668
0.527122
13f7ddb9a41846fb5799958db3fabf2ed4eeb64a
3,105
py
Python
vaccine_card/logistic/models.py
Unanimad/lais_046_2020_etapa_2
630efc6b25a580be44b6cd50be6744a01221a2c4
[ "Apache-2.0" ]
null
null
null
vaccine_card/logistic/models.py
Unanimad/lais_046_2020_etapa_2
630efc6b25a580be44b6cd50be6744a01221a2c4
[ "Apache-2.0" ]
null
null
null
vaccine_card/logistic/models.py
Unanimad/lais_046_2020_etapa_2
630efc6b25a580be44b6cd50be6744a01221a2c4
[ "Apache-2.0" ]
null
null
null
from django.db import models from vaccine_card.vaccination.models import Vaccine class State(models.Model): name = models.CharField(max_length=20, verbose_name='Nome') class Meta: verbose_name = 'Unidade Federativa' def __str__(self): return self.name class City(models.Model): name = models.CharField(max_length=50, verbose_name='Nome') state = models.ForeignKey(State, on_delete=models.CASCADE, verbose_name=State._meta.verbose_name) class Meta: verbose_name = 'Município' def __str__(self): return self.name class Address(models.Model): logradouro = models.CharField(max_length=150, verbose_name='Logradouro') numero = models.CharField(max_length=4, verbose_name='Número') complemento = models.CharField(max_length=50, null=True, blank=True, verbose_name='Complemento') bairro = models.CharField(max_length=150, verbose_name='Bairro') cep = models.CharField(max_length=8, verbose_name='CEP') # state = models.ForeignKey(State, on_delete=models.CASCADE, verbose_name=State._meta.verbose_name) city = models.ForeignKey(City, on_delete=models.CASCADE, verbose_name=City._meta.verbose_name) class Meta: verbose_name = 'Endereço' class HealthCenter(models.Model): cnes = models.CharField(max_length=7, verbose_name='CNES') cnpj = models.CharField(max_length=14, verbose_name='CNPJ') name = models.CharField(max_length=255, verbose_name='Razão Social') created_at = models.DateTimeField(auto_now_add=True, auto_now=False, verbose_name='Criado em:') updated_at = models.DateTimeField(auto_now_add=False, auto_now=True, verbose_name='Atualizado em:') address = models.ManyToManyField(Address, verbose_name=Address._meta.verbose_name) class Meta: verbose_name = 'Estabelecimento de Saúde' verbose_name_plural = 'Estabelecimentos de Saúde' def __str__(self): return self.name class Stock(models.Model): lot = models.PositiveSmallIntegerField(verbose_name='Lote') created_at = models.DateTimeField(auto_now_add=True, auto_now=False, verbose_name='Criado em:') updated_at = models.DateTimeField(auto_now_add=False, auto_now=True, verbose_name='Atualizado em:') health_center = models.ForeignKey(HealthCenter, on_delete=models.CASCADE, verbose_name=HealthCenter._meta.verbose_name) vaccines = models.ManyToManyField(Vaccine, through='VaccineStock', verbose_name=Vaccine._meta.verbose_name) class Meta: verbose_name = 'Estoque' class VaccineStock(models.Model): amount = models.PositiveSmallIntegerField(verbose_name='Quantidade recebida') remaining = models.PositiveSmallIntegerField(verbose_name='Quantidade restante') vaccine = models.ForeignKey(Vaccine, on_delete=models.DO_NOTHING, verbose_name=Vaccine._meta.verbose_name) stock = models.ForeignKey(Stock, on_delete=models.DO_NOTHING, verbose_name=Stock._meta.verbose_name) class Meta: verbose_name = 'Estoque de Vacina' def __str__(self): return self.vaccine.name
36.104651
111
0.738808
3,011
0.967856
0
0
0
0
0
0
429
0.137898
13f954a55ebaa879400311cfe5c32a3993b29137
12,933
py
Python
test/test_rimuhosting.py
shenoyn/libcloud
bd902992a658b6a99193d69323e051ffa7388253
[ "Apache-2.0" ]
1
2015-11-08T12:59:27.000Z
2015-11-08T12:59:27.000Z
test/test_rimuhosting.py
shenoyn/libcloud
bd902992a658b6a99193d69323e051ffa7388253
[ "Apache-2.0" ]
null
null
null
test/test_rimuhosting.py
shenoyn/libcloud
bd902992a658b6a99193d69323e051ffa7388253
[ "Apache-2.0" ]
null
null
null
# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # libcloud.org licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Copyright 2009 RedRata Ltd from libcloud.drivers.rimuhosting import RimuHostingNodeDriver from test import MockHttp from test import MockHttp, TestCaseMixin import unittest import httplib class RimuHostingTest(unittest.TestCase, TestCaseMixin): def setUp(self): RimuHostingNodeDriver.connectionCls.conn_classes = (None, RimuHostingMockHttp) self.driver = RimuHostingNodeDriver('foo') def test_list_nodes(self): nodes = self.driver.list_nodes() self.assertEqual(len(nodes),1) node = nodes[0] self.assertEqual(node.public_ip[0], "1.2.3.4") self.assertEqual(node.public_ip[1], "1.2.3.5") self.assertEqual(node.extra['order_oid'], 88833465) self.assertEqual(node.id, "order-88833465-api-ivan-net-nz") def test_list_sizes(self): sizes = self.driver.list_sizes() self.assertEqual(len(sizes),1) size = sizes[0] self.assertEqual(size.ram,950) self.assertEqual(size.disk,20) self.assertEqual(size.bandwidth,75) self.assertEqual(size.price,32.54) def test_list_images(self): images = self.driver.list_images() self.assertEqual(len(images),6) image = images[0] self.assertEqual(image.name,"Debian 5.0 (aka Lenny, RimuHosting"\ " recommended distro)") self.assertEqual(image.id, "lenny") def test_reboot_node(self): # Raises exception on failure node = self.driver.list_nodes()[0] self.driver.reboot_node(node) def test_destroy_node(self): # Raises exception on failure node = self.driver.list_nodes()[0] self.driver.destroy_node(node) def test_create_node(self): # Raises exception on failure size = self.driver.list_sizes()[0] image = self.driver.list_images()[0] self.driver.create_node(name="api.ivan.net.nz", image=image, size=size) class RimuHostingMockHttp(MockHttp): def _r_orders(self,method,url,body,headers): body = """ { "get_orders_response" : { "status_message" : null , "status_code" : 200 , "error_info" : null , "response_type" : "OK" , "human_readable_message" : "Found 15 orders" , "response_display_duration_type" : "REGULAR", "about_orders" : [{ "order_oid" : 88833465 , "domain_name" : "api.ivan.net.nz" , "slug" : "order-88833465-api-ivan-net-nz" , "billing_oid" : 96122465 , "is_on_customers_own_physical_server" : false , "vps_parameters" : { "memory_mb" : 160 , "disk_space_mb" : 4096 , "disk_space_2_mb" : 0} , "host_server_oid" : "764" , "server_type" : "VPS" , "data_transfer_allowance" : { "data_transfer_gb" : 30 , "data_transfer" : "30"} , "billing_info" : { } , "allocated_ips" : { "primary_ip" : "1.2.3.4" , "secondary_ips" : ["1.2.3.5","1.2.3.6"]} , "running_state" : "RUNNING"}]}}""" return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _r_pricing_plans(self,method,url,body,headers): body = """ {"get_pricing_plans_response" : { "status_message" : null , "status_code" : 200 , "error_info" : null , "response_type" : "OK" , "human_readable_message" : "Here some pricing plans we are offering on new orders.&nbsp; Note we offer most disk and memory sizes.&nbsp; So if you setup a new server feel free to vary these (e.g. different memory, disk, etc) and we will just adjust the pricing to suit.&nbsp; Pricing is in USD.&nbsp; If you are an NZ-based customer then we would need to add GST." , "response_display_duration_type" : "REGULAR" , "pricing_plan_infos" : [{ "pricing_plan_code" : "MiroVPSLowContention" , "pricing_plan_description" : "MiroVPS Semi-Dedicated Server (Dallas)" , "monthly_recurring_fee" : 32.54 , "monthly_recurring_amt" : { "amt" : 35.0 , "currency" : "CUR_AUD" ,"amt_usd" : 32.54} , "minimum_memory_mb" : 950 , "minimum_disk_gb" : 20 , "minimum_data_transfer_allowance_gb" : 75 , "see_also_url" : "http://rimuhosting.com/order/serverdetails.jsp?plan=MiroVPSLowContention" , "server_type" : "VPS" , "offered_at_data_center" : { "data_center_location_code" : "DCDALLAS" , "data_center_location_name" : "Dallas"}} ]}} """ return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _r_distributions(self, method, url, body, headers): body = """ { "get_distros_response" : { "status_message" : null , "status_code" : 200 , "error_info" : null , "response_type" : "OK" , "human_readable_message" : "Here are the distros we are offering on new orders." , "response_display_duration_type" : "REGULAR" , "distro_infos" : [{ "distro_code" : "lenny" , "distro_description" : "Debian 5.0 (aka Lenny, RimuHosting recommended distro)"} , { "distro_code" : "centos5" , "distro_description" : "Centos5"} , { "distro_code" : "ubuntu904" , "distro_description" : "Ubuntu 9.04 (Jaunty Jackalope, from 2009-04)"} , { "distro_code" : "ubuntu804" , "distro_description" : "Ubuntu 8.04 (Hardy Heron, 5 yr long term support (LTS))"} , { "distro_code" : "ubuntu810" , "distro_description" : "Ubuntu 8.10 (Intrepid Ibex, from 2008-10)"} , { "distro_code" : "fedora10" , "distro_description" : "Fedora 10"}]}} """ return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _r_orders_new_vps(self, method, url, body, headers): body = """ { "post_new_vps_response" : { "status_message" : null , "status_code" : 200 , "error_info" : null , "response_type" : "OK" , "human_readable_message" : null , "response_display_duration_type" : "REGULAR" , "setup_messages" : ["Using user-specified billing data: Wire Transfer" , "Selected user as the owner of the billing details: Ivan Meredith" , "No VPS paramters provided, using default values."] , "about_order" : { "order_oid" : 52255865 , "domain_name" : "api.ivan.net.nz" , "slug" : "order-52255865-api-ivan-net-nz" , "billing_oid" : 96122465 , "is_on_customers_own_physical_server" : false , "vps_parameters" : { "memory_mb" : 160 , "disk_space_mb" : 4096 , "disk_space_2_mb" : 0} , "host_server_oid" : "764" , "server_type" : "VPS" , "data_transfer_allowance" : { "data_transfer_gb" : 30 , "data_transfer" : "30"} , "billing_info" : { } , "allocated_ips" : { "primary_ip" : "74.50.57.80", "secondary_ips" : []} , "running_state" : "RUNNING"} , "new_order_request" : { "billing_oid" : 96122465 , "user_oid" : 0 , "host_server_oid" : null , "vps_order_oid_to_clone" : 0 , "ip_request" : { "num_ips" : 1, "extra_ip_reason" : ""} , "vps_parameters" : { "memory_mb" : 160 , "disk_space_mb" : 4096 , "disk_space_2_mb" : 0} , "pricing_plan_code" : "MIRO1B" , "instantiation_options" : { "control_panel" : "webmin" , "domain_name" : "api.ivan.net.nz" , "password" : "aruxauce27" , "distro" : "lenny"}} , "running_vps_info" : { "pings_ok" : true , "current_kernel" : "default" , "current_kernel_canonical" : "2.6.30.5-xenU.i386" , "last_backup_message" : "" , "is_console_login_enabled" : false , "console_public_authorized_keys" : null , "is_backup_running" : false , "is_backups_enabled" : true , "next_backup_time" : { "ms_since_epoch": 1256446800000, "iso_format" : "2009-10-25T05:00:00Z", "users_tz_offset_ms" : 46800000} , "vps_uptime_s" : 31 , "vps_cpu_time_s" : 6 , "running_state" : "RUNNING" , "is_suspended" : false}}} """ return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _r_orders_order_88833465_api_ivan_net_nz_vps(self, method, url, body, headers): body = """ { "delete_server_response" : { "status_message" : null , "status_code" : 200 , "error_info" : null , "response_type" : "OK" , "human_readable_message" : "Server removed" , "response_display_duration_type" : "REGULAR" , "cancel_messages" : ["api.ivan.net.nz is being shut down." , "A $7.98 credit has been added to your account." , "If you need to un-cancel the server please contact our support team."] } } """ return (httplib.OK, body, {}, httplib.responses[httplib.OK]) def _r_orders_order_88833465_api_ivan_net_nz_vps_running_state(self, method, url, body, headers): body = """ { "put_running_state_response" : { "status_message" : null , "status_code" : 200 , "error_info" : null , "response_type" : "OK" , "human_readable_message" : "api.ivan.net.nz restarted. After the reboot api.ivan.net.nz is pinging OK." , "response_display_duration_type" : "REGULAR" , "is_restarted" : true , "is_pinging" : true , "running_vps_info" : { "pings_ok" : true , "current_kernel" : "default" , "current_kernel_canonical" : "2.6.30.5-xenU.i386" , "last_backup_message" : "" , "is_console_login_enabled" : false , "console_public_authorized_keys" : null , "is_backup_running" : false , "is_backups_enabled" : true , "next_backup_time" : { "ms_since_epoch": 1256446800000, "iso_format" : "2009-10-25T05:00:00Z", "users_tz_offset_ms" : 46800000} , "vps_uptime_s" : 19 , "vps_cpu_time_s" : 5 , "running_state" : "RUNNING" , "is_suspended" : false} , "host_server_info" : { "is_host64_bit_capable" : true , "default_kernel_i386" : "2.6.30.5-xenU.i386" , "default_kernel_x86_64" : "2.6.30.5-xenU.x86_64" , "cpu_model_name" : "Intel(R) Xeon(R) CPU E5506 @ 2.13GHz" , "host_num_cores" : 1 , "host_xen_version" : "3.4.1" , "hostload" : [1.45 , 0.56 , 0.28] , "host_uptime_s" : 3378276 , "host_mem_mb_free" : 51825 , "host_mem_mb_total" : 73719 , "running_vpss" : 34} , "running_state_messages" : null}} """ return (httplib.OK, body, {}, httplib.responses[httplib.OK])
45.22028
380
0.540478
11,950
0.923993
0
0
0
0
0
0
10,062
0.77801
13f9663c3671ee791e1374fc1c550b7438edff48
1,033
py
Python
tests/base_tests/polygon_tests/test_contains.py
lycantropos/gon
b3f811ece5989d1623b17d633a84071fbff6dd69
[ "MIT" ]
10
2020-07-18T12:55:52.000Z
2022-03-20T07:09:10.000Z
tests/base_tests/polygon_tests/test_contains.py
lycantropos/gon
b3f811ece5989d1623b17d633a84071fbff6dd69
[ "MIT" ]
52
2019-07-11T16:59:01.000Z
2022-03-29T19:41:59.000Z
tests/base_tests/polygon_tests/test_contains.py
lycantropos/gon
b3f811ece5989d1623b17d633a84071fbff6dd69
[ "MIT" ]
1
2020-03-22T12:56:07.000Z
2020-03-22T12:56:07.000Z
from typing import Tuple from hypothesis import given from gon.base import (Point, Polygon) from tests.utils import (equivalence, implication) from . import strategies @given(strategies.polygons) def test_vertices(polygon: Polygon) -> None: assert all(vertex in polygon for vertex in polygon.border.vertices) assert all(vertex in polygon for hole in polygon.holes for vertex in hole.vertices) @given(strategies.polygons_with_points) def test_convex_hull(polygon_with_point: Tuple[Polygon, Point]) -> None: polygon, point = polygon_with_point assert implication(point in polygon, point in polygon.convex_hull) @given(strategies.polygons_with_points) def test_indexing(polygon_with_point: Tuple[Polygon, Point]) -> None: polygon, point = polygon_with_point before_indexing = point in polygon polygon.index() after_indexing = point in polygon assert equivalence(before_indexing, after_indexing)
26.487179
72
0.708616
0
0
0
0
807
0.78122
0
0
0
0
b913259774170b0ae117752589cf379fac40286c
4,139
py
Python
easyidp/core/tests/test_class_reconsproject.py
HowcanoeWang/EasyIDP
0d0a0df1287e3c15cda17e8e4cdcbe05f21f7272
[ "MIT" ]
null
null
null
easyidp/core/tests/test_class_reconsproject.py
HowcanoeWang/EasyIDP
0d0a0df1287e3c15cda17e8e4cdcbe05f21f7272
[ "MIT" ]
null
null
null
easyidp/core/tests/test_class_reconsproject.py
HowcanoeWang/EasyIDP
0d0a0df1287e3c15cda17e8e4cdcbe05f21f7272
[ "MIT" ]
null
null
null
import os import numpy as np import pytest import easyidp from easyidp.core.objects import ReconsProject, Points from easyidp.io import metashape module_path = os.path.join(easyidp.__path__[0], "io/tests") def test_init_reconsproject(): attempt1 = ReconsProject("agisoft") assert attempt1.software == "metashape" attempt2 = ReconsProject("Metashape") assert attempt2.software == "metashape" with pytest.raises(LookupError): attempt3 = ReconsProject("not_supported_sfm") def test_local2world2local(): attempt1 = ReconsProject("agisoft") attempt1.transform.matrix = np.asarray([[-0.86573098, -0.01489186, 0.08977677, 7.65034123], [0.06972335, 0.44334391, 0.74589315, 1.85910928], [-0.05848325, 0.74899678, -0.43972184, -0.1835615], [0., 0., 0., 1.]], dtype=np.float) w_pos = Points([0.5, 1, 1.5]) l_pos = Points([7.960064093299587, 1.3019528769064523, -2.6697181763370965]) w_pos_ans = Points([0.4999999999999978, 0.9999999999999993, 1.5]) world_pos = attempt1.local2world(l_pos) np.testing.assert_array_almost_equal(w_pos_ans.values, world_pos.values, decimal=6) local_pos = attempt1.world2local(w_pos) np.testing.assert_array_almost_equal(l_pos.values, local_pos.values, decimal=6) def test_metashape_project_local_points_on_raw(): test_project_folder = easyidp.test_full_path("data/metashape/goya_test.psx") chunks = metashape.open_project(test_project_folder) chunk = chunks[0] # test for single point l_pos = Points([7.960064093299587, 1.3019528769064523, -2.6697181763370965]) p_dis_out = chunk.project_local_points_on_raw(l_pos, 0, distortion_correct=False) p_undis_out = chunk.project_local_points_on_raw(l_pos, 0, distortion_correct=True) # pro_api_out = np.asarray([2218.883386793118, 1991.4709388015149]) my_undistort_out = Points([2220.854889556147, 1992.6933680261686]) my_distort_out = Points([2218.47960556, 1992.46356322]) np.testing.assert_array_almost_equal(p_dis_out.values, my_distort_out.values) np.testing.assert_array_almost_equal(p_undis_out.values, my_undistort_out.values) # test for multiple points l_pos_points = Points([[7.960064093299587, 1.3019528769064523, -2.6697181763370965], [7.960064093299587, 1.3019528769064523, -2.6697181763370965]]) p_dis_outs = chunk.project_local_points_on_raw(l_pos_points, 0, distortion_correct=False) p_undis_outs = chunk.project_local_points_on_raw(l_pos_points, 0, distortion_correct=True) my_undistort_outs = Points([[2220.854889556147, 1992.6933680261686], [2220.854889556147, 1992.6933680261686]]) my_distort_outs = Points([[2218.47960556, 1992.46356322], [2218.47960556, 1992.46356322]]) np.testing.assert_array_almost_equal(p_dis_outs.values, my_distort_outs.values) np.testing.assert_array_almost_equal(p_undis_outs.values, my_undistort_outs.values) def test_world2crs_and_on_raw_images(): test_project_folder = easyidp.test_full_path("data/metashape/wheat_tanashi.psx") chunks = metashape.open_project(test_project_folder) chunk = chunks[0] local = Points([11.870130675203006, 0.858098777517136, -12.987136541275]) geocentric = Points([-3943658.7087006606, 3363404.124223561, 3704651.3067566575]) geodetic = Points([139.54033578028609, 35.73756358928734, 96.87827569602781], columns=['lon', 'lat', 'alt']) idp_world = chunk.local2world(local) np.testing.assert_array_almost_equal(idp_world.values, geocentric.values, decimal=1) idp_crs = chunk.world2crs(idp_world) np.testing.assert_array_almost_equal(idp_crs.values, geodetic.values) camera_id = 56 # camera_label = 'DJI_0057' camera_pix_ans = Points([2391.7104647010146, 1481.8987733175165]) idp_cam_pix = chunk.project_local_points_on_raw(local, camera_id, distortion_correct=True) np.testing.assert_array_almost_equal(camera_pix_ans.values, idp_cam_pix.values)
42.234694
112
0.723846
0
0
0
0
0
0
0
0
302
0.072964
b9139bb412d2bf193e04d2282744a0d621a61a94
2,413
py
Python
withings_api/const.py
tiloc/python_withings_api
64c9706ab70c93e4c54cc843a778ecd3f9960980
[ "MIT" ]
null
null
null
withings_api/const.py
tiloc/python_withings_api
64c9706ab70c93e4c54cc843a778ecd3f9960980
[ "MIT" ]
null
null
null
withings_api/const.py
tiloc/python_withings_api
64c9706ab70c93e4c54cc843a778ecd3f9960980
[ "MIT" ]
null
null
null
"""Constant values.""" STATUS_SUCCESS = (0,) STATUS_AUTH_FAILED = (100, 101, 102, 200, 401) STATUS_INVALID_PARAMS = ( 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 216, 217, 218, 220, 221, 223, 225, 227, 228, 229, 230, 234, 235, 236, 238, 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 254, 260, 261, 262, 263, 264, 265, 266, 267, 271, 272, 275, 276, 283, 284, 285, 286, 287, 288, 290, 293, 294, 295, 297, 300, 301, 302, 303, 304, 321, 323, 324, 325, 326, 327, 328, 329, 330, 331, 332, 333, 334, 335, 336, 337, 338, 339, 340, 341, 342, 343, 344, 345, 346, 347, 348, 349, 350, 351, 352, 353, 380, 381, 382, 400, 501, 502, 503, 504, 505, 506, 509, 510, 511, 523, 532, 3017, 3018, 3019, ) STATUS_UNAUTHORIZED = (214, 277, 2553, 2554, 2555) STATUS_ERROR_OCCURRED = ( 215, 219, 222, 224, 226, 231, 233, 237, 253, 255, 256, 257, 258, 259, 268, 269, 270, 273, 274, 278, 279, 280, 281, 282, 289, 291, 292, 296, 298, 305, 306, 308, 309, 310, 311, 312, 313, 314, 315, 316, 317, 318, 319, 320, 322, 370, 371, 372, 373, 374, 375, 383, 391, 402, 516, 517, 518, 519, 520, 521, 525, 526, 527, 528, 529, 530, 531, 533, 602, 700, 1051, 1052, 1053, 1054, 2551, 2552, 2556, 2557, 2558, 2559, 3000, 3001, 3002, 3003, 3004, 3005, 3006, 3007, 3008, 3009, 3010, 3011, 3012, 3013, 3014, 3015, 3016, 3020, 3021, 3022, 3023, 3024, 5000, 5001, 5005, 5006, 6000, 6010, 6011, 9000, 10000, ) STATUS_TIMEOUT = (522,) STATUS_BAD_STATE = (524,) STATUS_TOO_MANY_REQUESTS = (601,)
9.690763
50
0.390385
0
0
0
0
0
0
0
0
22
0.009117
b9141fcf42d65abf107a484255f641db4d6e639b
3,249
py
Python
examples/canvas/bezier.py
sirpercival/kivy
29ef854a200e6764aae60ea29324379c69d271a3
[ "MIT" ]
2
2015-10-26T12:35:37.000Z
2020-11-26T12:06:09.000Z
examples/canvas/bezier.py
sirpercival/kivy
29ef854a200e6764aae60ea29324379c69d271a3
[ "MIT" ]
null
null
null
examples/canvas/bezier.py
sirpercival/kivy
29ef854a200e6764aae60ea29324379c69d271a3
[ "MIT" ]
3
2015-07-18T11:03:59.000Z
2018-03-17T01:32:42.000Z
#!/usr/bin/env python from kivy.app import App from kivy.uix.floatlayout import FloatLayout from kivy.uix.slider import Slider from kivy.graphics import Color, Bezier, Line class BezierTest(FloatLayout): def __init__(self, points=[], loop=False, *args, **kwargs): super(BezierTest, self).__init__(*args, **kwargs) self.d = 10 self.points = points self.loop = loop self.current_point = None with self.canvas: Color(1.0, 0.0, 0.0) self.bezier = Bezier( points=self.points, segments=150, loop=self.loop, dash_length=100, dash_offset=10) Color(1.0, 0.0, 1.0) self.line = Line( points=self.points+self.points[:2], dash_offset=10, dash_length=100) s = Slider(y=0, pos_hint={'x': .3}, size_hint=(.7, None), height=50) s.bind(value=self._set_bezier_dash_offset) self.add_widget(s) s = Slider(y=50, pos_hint={'x': .3}, size_hint=(.7, None), height=50) s.bind(value=self._set_line_dash_offset) self.add_widget(s) def _set_bezier_dash_offset(self, instance, value): # effect to reduce length while increase offset self.bezier.dash_length = 100 - value self.bezier.dash_offset = value def _set_line_dash_offset(self, instance, value): # effect to reduce length while increase offset self.line.dash_length = 100 - value self.line.dash_offset = value def on_touch_down(self, touch): if self.collide_point(touch.pos[0], touch.pos[1]): for i, p in enumerate(list(zip(self.points[::2], self.points[1::2]))): if ( abs(touch.pos[0] - self.pos[0] - p[0]) < self.d and abs(touch.pos[1] - self.pos[1] - p[1]) < self.d): self.current_point = i + 1 return True return super(BezierTest, self).on_touch_down(touch) def on_touch_up(self, touch): if self.collide_point(touch.pos[0], touch.pos[1]): if self.current_point: self.current_point = None return True return super(BezierTest, self).on_touch_up(touch) def on_touch_move(self, touch): if self.collide_point(touch.pos[0], touch.pos[1]): c = self.current_point if c: self.points[(c - 1) * 2] = touch.pos[0] - self.pos[0] self.points[(c - 1) * 2 + 1] = touch.pos[1] - self.pos[1] self.bezier.points = self.points self.line.points = self.points + self.points[:2] return True return super(BezierTest, self).on_touch_move(touch) class Main(App): def build(self): from math import cos, sin, radians x = y = 150 l = 100 # Pacman ! points = [x, y] for i in range(45, 360, 45): i = radians(i) points.extend([x + cos(i) * l, y + sin(i) * l]) return BezierTest(points=points, loop=True) if __name__ == '__main__': Main().run()
33.84375
82
0.544783
3,024
0.930748
0
0
0
0
0
0
141
0.043398
b915c0a9f384ec67869ab91a6425fc1e66fbe2a2
2,698
py
Python
tests/bugs/core_6489_test.py
reevespaul/firebird-qa
98f16f425aa9ab8ee63b86172f959d63a2d76f21
[ "MIT" ]
null
null
null
tests/bugs/core_6489_test.py
reevespaul/firebird-qa
98f16f425aa9ab8ee63b86172f959d63a2d76f21
[ "MIT" ]
null
null
null
tests/bugs/core_6489_test.py
reevespaul/firebird-qa
98f16f425aa9ab8ee63b86172f959d63a2d76f21
[ "MIT" ]
null
null
null
#coding:utf-8 # # id: bugs.core_6489 # title: User without ALTER ANY ROLE privilege can use COMMENT ON ROLE # decription: # Test creates two users: one of them has no any rights, second is granted with 'alter any role' privilege. # First user ('junior') must not have ability to add comment to rdb$admin role, but second ('senior') must # be able to set comment to any string and make it null. # # Confirmed bug on 4.0.0.2384, 3.0.8.33425 # Checked on: 4.0.0.2387, 3.0.8.33426 -- all OK. # # NOTE: # phrase '-Effective user is ...' presents only in FB 4.x and is suppressed here. # # tracker_id: CORE-6489 # min_versions: ['3.0.8'] # versions: 3.0.8 # qmid: None import pytest from firebird.qa import db_factory, isql_act, Action # version: 3.0.8 # resources: None substitutions_1 = [('ROLE_DESCR_BLOB_ID .*', ''), ('[\t ]+', ' '), ('(-)?Effective user is.*', '')] init_script_1 = """""" db_1 = db_factory(sql_dialect=3, init=init_script_1) test_script_1 = """ create or alter user tmp$c6489_junior password '123' using plugin Srp; create or alter user tmp$c6489_senior password '456' using plugin Srp; commit; grant alter any role to user tmp$c6489_senior; commit; connect '$(DSN)' user tmp$c6489_junior password '123'; comment on role rdb$admin is 'Comment by tmp$c6489_junior'; commit; connect '$(DSN)' user tmp$c6489_senior password '456'; comment on role rdb$admin is 'Comment by tmp$c6489_senior'; commit; set list on; select r.rdb$description as role_descr_blob_id from rdb$roles r where r.rdb$role_name = upper('rdb$admin'); commit; comment on role rdb$admin is null; commit; connect '$(DSN)' user 'SYSDBA' password 'masterkey'; drop user tmp$c6489_junior using plugin Srp; drop user tmp$c6489_senior using plugin Srp; commit; """ act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1) expected_stdout_1 = """ Comment by tmp$c6489_senior """ expected_stderr_1 = """ Statement failed, SQLSTATE = 28000 unsuccessful metadata update -COMMENT ON RDB$ADMIN failed -no permission for ALTER access to ROLE RDB$ADMIN -Effective user is TMP$C6489_JUNIOR """ @pytest.mark.version('>=3.0.8') def test_1(act_1: Action): act_1.expected_stdout = expected_stdout_1 act_1.expected_stderr = expected_stderr_1 act_1.execute() assert act_1.clean_expected_stderr == act_1.clean_stderr assert act_1.clean_expected_stdout == act_1.clean_stdout
32.902439
124
0.648258
0
0
0
0
292
0.108228
0
0
2,085
0.772795
b915eeed88fbfbe46318454fd21bc9db43d6d639
6,023
py
Python
utils/utils_bbox.py
MasoonZhang/FasterRConvMixer
a7a17d00f716a28a5b301088053e00840c222524
[ "MIT" ]
null
null
null
utils/utils_bbox.py
MasoonZhang/FasterRConvMixer
a7a17d00f716a28a5b301088053e00840c222524
[ "MIT" ]
null
null
null
utils/utils_bbox.py
MasoonZhang/FasterRConvMixer
a7a17d00f716a28a5b301088053e00840c222524
[ "MIT" ]
1
2022-03-14T05:29:42.000Z
2022-03-14T05:29:42.000Z
import numpy as np import torch from torch.nn import functional as F from torchvision.ops import nms def loc2bbox(src_bbox, loc): if src_bbox.size()[0] == 0: return torch.zeros((0, 4), dtype=loc.dtype) src_width = torch.unsqueeze(src_bbox[:, 2] - src_bbox[:, 0], -1) src_height = torch.unsqueeze(src_bbox[:, 3] - src_bbox[:, 1], -1) src_ctr_x = torch.unsqueeze(src_bbox[:, 0], -1) + 0.5 * src_width src_ctr_y = torch.unsqueeze(src_bbox[:, 1], -1) + 0.5 * src_height dx = loc[:, 0::4] dy = loc[:, 1::4] dw = loc[:, 2::4] dh = loc[:, 3::4] ctr_x = dx * src_width + src_ctr_x ctr_y = dy * src_height + src_ctr_y w = torch.exp(dw) * src_width h = torch.exp(dh) * src_height dst_bbox = torch.zeros_like(loc) dst_bbox[:, 0::4] = ctr_x - 0.5 * w dst_bbox[:, 1::4] = ctr_y - 0.5 * h dst_bbox[:, 2::4] = ctr_x + 0.5 * w dst_bbox[:, 3::4] = ctr_y + 0.5 * h return dst_bbox class DecodeBox(): def __init__(self, std, num_classes): self.std = std self.num_classes = num_classes + 1 def frcnn_correct_boxes(self, box_xy, box_wh, input_shape, image_shape): #-----------------------------------------------------------------# # 把y轴放前面是因为方便预测框和图像的宽高进行相乘 #-----------------------------------------------------------------# box_yx = box_xy[..., ::-1] box_hw = box_wh[..., ::-1] input_shape = np.array(input_shape) image_shape = np.array(image_shape) box_mins = box_yx - (box_hw / 2.) box_maxes = box_yx + (box_hw / 2.) boxes = np.concatenate([box_mins[..., 0:1], box_mins[..., 1:2], box_maxes[..., 0:1], box_maxes[..., 1:2]], axis=-1) boxes *= np.concatenate([image_shape, image_shape], axis=-1) return boxes def forward(self, roi_cls_locs, roi_scores, rois, image_shape, input_shape, nms_iou = 0.3, confidence = 0.5): results = [] bs = len(roi_cls_locs) #--------------------------------# # batch_size, num_rois, 4 #--------------------------------# rois = rois.view((bs, -1, 4)) #----------------------------------------------------------------------------------------------------------------# # 对每一张图片进行处理,由于在predict.py的时候,我们只输入一张图片,所以for i in range(len(mbox_loc))只进行一次 #----------------------------------------------------------------------------------------------------------------# for i in range(bs): #----------------------------------------------------------# # 对回归参数进行reshape #----------------------------------------------------------# roi_cls_loc = roi_cls_locs[i] * self.std #----------------------------------------------------------# # 第一维度是建议框的数量,第二维度是每个种类 # 第三维度是对应种类的调整参数 #----------------------------------------------------------# roi_cls_loc = roi_cls_loc.view([-1, self.num_classes, 4]) #-------------------------------------------------------------# # 利用classifier网络的预测结果对建议框进行调整获得预测框 # num_rois, 4 -> num_rois, 1, 4 -> num_rois, num_classes, 4 #-------------------------------------------------------------# roi = rois[i].view((-1, 1, 4)).expand_as(roi_cls_loc) cls_bbox = loc2bbox(roi.contiguous().view((-1, 4)), roi_cls_loc.contiguous().view((-1, 4))) cls_bbox = cls_bbox.view([-1, (self.num_classes), 4]) #-------------------------------------------------------------# # 对预测框进行归一化,调整到0-1之间 #-------------------------------------------------------------# cls_bbox[..., [0, 2]] = (cls_bbox[..., [0, 2]]) / input_shape[1] cls_bbox[..., [1, 3]] = (cls_bbox[..., [1, 3]]) / input_shape[0] roi_score = roi_scores[i] prob = F.softmax(roi_score, dim=-1) results.append([]) for c in range(1, self.num_classes): #--------------------------------# # 取出属于该类的所有框的置信度 # 判断是否大于门限 #--------------------------------# c_confs = prob[:, c] c_confs_m = c_confs > confidence if len(c_confs[c_confs_m]) > 0: #-----------------------------------------# # 取出得分高于confidence的框 #-----------------------------------------# boxes_to_process = cls_bbox[c_confs_m, c] confs_to_process = c_confs[c_confs_m] keep = nms( boxes_to_process, confs_to_process, nms_iou ) #-----------------------------------------# # 取出在非极大抑制中效果较好的内容 #-----------------------------------------# good_boxes = boxes_to_process[keep] confs = confs_to_process[keep][:, None] labels = (c - 1) * torch.ones((len(keep), 1)).cuda() if confs.is_cuda else (c - 1) * torch.ones((len(keep), 1)) #-----------------------------------------# # 将label、置信度、框的位置进行堆叠。 #-----------------------------------------# c_pred = torch.cat((good_boxes, confs, labels), dim=1).cpu().numpy() # 添加进result里 results[-1].extend(c_pred) if len(results[-1]) > 0: results[-1] = np.array(results[-1]) box_xy, box_wh = (results[-1][:, 0:2] + results[-1][:, 2:4])/2, results[-1][:, 2:4] - results[-1][:, 0:2] results[-1][:, :4] = self.frcnn_correct_boxes(box_xy, box_wh, input_shape, image_shape) return results
45.628788
136
0.381205
5,419
0.843162
0
0
0
0
0
0
2,073
0.322546
b9160a13d47cfdacbbfdb45a0590f6674809ddbe
96
py
Python
lib/python/test/__init__.py
woozhijun/cat
3d523202c38e37b1a2244b26d4336ebbea5db001
[ "Apache-2.0" ]
17,318
2015-01-03T03:02:07.000Z
2022-03-31T02:43:28.000Z
lib/python/test/__init__.py
MrCoderYu/cat
674bd9ab70267dd6fc74879e4344af77397f4acd
[ "Apache-2.0" ]
1,162
2015-01-04T08:23:49.000Z
2022-03-31T15:38:04.000Z
lib/python/test/__init__.py
MrCoderYu/cat
674bd9ab70267dd6fc74879e4344af77397f4acd
[ "Apache-2.0" ]
5,520
2015-01-03T03:02:07.000Z
2022-03-31T16:16:56.000Z
#!/usr/bin/env python # encoding: utf-8 import sys reload(sys) sys.setdefaultencoding("utf-8")
13.714286
31
0.729167
0
0
0
0
0
0
0
0
45
0.46875
b918984647c67e09bce945847905654d35530277
15,886
py
Python
tests/test_pyclipper.py
odidev/pyclipper
3de54fa4c4d5b8efeede364fbe69336f935f88f2
[ "MIT" ]
null
null
null
tests/test_pyclipper.py
odidev/pyclipper
3de54fa4c4d5b8efeede364fbe69336f935f88f2
[ "MIT" ]
null
null
null
tests/test_pyclipper.py
odidev/pyclipper
3de54fa4c4d5b8efeede364fbe69336f935f88f2
[ "MIT" ]
null
null
null
#!/usr/bin/python """ Tests for Pyclipper wrapper library. """ from __future__ import print_function from unittest2 import TestCase, main import sys if sys.version_info < (3,): integer_types = (int, long) else: integer_types = (int,) import pyclipper # Example polygons from http://www.angusj.com/delphi/clipper.php PATH_SUBJ_1 = [[180, 200], [260, 200], [260, 150], [180, 150]] # square, orientation is False PATH_SUBJ_2 = [[215, 160], [230, 190], [200, 190]] # triangle PATH_CLIP_1 = [[190, 210], [240, 210], [240, 130], [190, 130]] # square PATH_SIGMA = [[300, 400], [100, 400], [200, 300], [100, 200], [300, 200]] # greek letter sigma PATTERN = [[4, -6], [6, -6], [-4, 6], [-6, 6]] INVALID_PATH = [[1, 1], ] # less than 2 vertices class TestPyclipperModule(TestCase): def test_has_classes(self): self.assertTrue(hasattr(pyclipper, 'Pyclipper')) self.assertTrue(hasattr(pyclipper, 'PyclipperOffset')) def test_has_namespace_methods(self): for method in ('Orientation', 'Area', 'PointInPolygon', 'SimplifyPolygon', 'SimplifyPolygons', 'CleanPolygon', 'CleanPolygons', 'MinkowskiSum', 'MinkowskiSum2', 'MinkowskiDiff', 'PolyTreeToPaths', 'ClosedPathsFromPolyTree', 'OpenPathsFromPolyTree', 'ReversePath', 'ReversePaths'): self.assertTrue(hasattr(pyclipper, method)) class TestNamespaceMethods(TestCase): def setUp(self): pyclipper.SCALING_FACTOR = 1 def test_orientation(self): self.assertFalse(pyclipper.Orientation(PATH_SUBJ_1)) self.assertTrue(pyclipper.Orientation(PATH_SUBJ_1[::-1])) def test_area(self): # area less than 0 because orientation is False area_neg = pyclipper.Area(PATH_SUBJ_1) area_pos = pyclipper.Area(PATH_SUBJ_1[::-1]) self.assertLess(area_neg, 0) self.assertGreater(area_pos, 0) self.assertEqual(abs(area_neg), area_pos) def test_point_in_polygon(self): # on polygon self.assertEqual(pyclipper.PointInPolygon((180, 200), PATH_SUBJ_1), -1) # in polygon self.assertEqual(pyclipper.PointInPolygon((200, 180), PATH_SUBJ_1), 1) # outside of polygon self.assertEqual(pyclipper.PointInPolygon((500, 500), PATH_SUBJ_1), 0) def test_minkowski_sum(self): solution = pyclipper.MinkowskiSum(PATTERN, PATH_SIGMA, False) self.assertGreater(len(solution), 0) def test_minkowski_sum2(self): solution = pyclipper.MinkowskiSum2(PATTERN, [PATH_SIGMA], False) self.assertGreater(len(solution), 0) def test_minkowski_diff(self): solution = pyclipper.MinkowskiDiff(PATH_SUBJ_1, PATH_SUBJ_2) self.assertGreater(len(solution), 0) def test_reverse_path(self): solution = pyclipper.ReversePath(PATH_SUBJ_1) manualy_reversed = PATH_SUBJ_1[::-1] self.check_reversed_path(solution, manualy_reversed) def test_reverse_paths(self): solution = pyclipper.ReversePaths([PATH_SUBJ_1]) manualy_reversed = [PATH_SUBJ_1[::-1]] self.check_reversed_path(solution[0], manualy_reversed[0]) def check_reversed_path(self, path_1, path_2): if len(path_1) is not len(path_2): return False for i in range(len(path_1)): self.assertEqual(path_1[i][0], path_2[i][0]) self.assertEqual(path_1[i][1], path_2[i][1]) def test_simplify_polygon(self): solution = pyclipper.SimplifyPolygon(PATH_SUBJ_1) self.assertEqual(len(solution), 1) def test_simplify_polygons(self): solution = pyclipper.SimplifyPolygons([PATH_SUBJ_1]) solution_single = pyclipper.SimplifyPolygon(PATH_SUBJ_1) self.assertEqual(len(solution), 1) self.assertEqual(len(solution), len(solution_single)) _do_solutions_match(solution, solution_single) def test_clean_polygon(self): solution = pyclipper.CleanPolygon(PATH_CLIP_1) self.assertEqual(len(solution), len(PATH_CLIP_1)) def test_clean_polygons(self): solution = pyclipper.CleanPolygons([PATH_CLIP_1]) self.assertEqual(len(solution), 1) self.assertEqual(len(solution[0]), len(PATH_CLIP_1)) class TestFilterPyPolyNode(TestCase): def setUp(self): tree = pyclipper.PyPolyNode() tree.Contour.append(PATH_CLIP_1) tree.IsOpen = True child = pyclipper.PyPolyNode() child.IsOpen = False child.Parent = tree child.Contour = PATH_SUBJ_1 tree.Childs.append(child) child = pyclipper.PyPolyNode() child.IsOpen = True child.Parent = tree child.Contour = PATH_SUBJ_2 tree.Childs.append(child) child2 = pyclipper.PyPolyNode() child2.IsOpen = False child2.Parent = child child2.Contour = PATTERN child.Childs.append(child2) # empty contour should not # be included in filtered results child2 = pyclipper.PyPolyNode() child2.IsOpen = False child2.Parent = child child2.Contour = [] child.Childs.append(child2) self.tree = tree def test_polytree_to_paths(self): paths = pyclipper.PolyTreeToPaths(self.tree) self.check_paths(paths, 4) def test_closed_paths_from_polytree(self): paths = pyclipper.ClosedPathsFromPolyTree(self.tree) self.check_paths(paths, 2) def test_open_paths_from_polytree(self): paths = pyclipper.OpenPathsFromPolyTree(self.tree) self.check_paths(paths, 2) def check_paths(self, paths, expected_nr): self.assertEqual(len(paths), expected_nr) self.assertTrue(all((len(path) > 0 for path in paths))) class TestPyclipperAddPaths(TestCase): def setUp(self): pyclipper.SCALING_FACTOR = 1 self.pc = pyclipper.Pyclipper() def test_add_path(self): # should not raise an exception self.pc.AddPath(PATH_CLIP_1, poly_type=pyclipper.PT_CLIP) def test_add_paths(self): # should not raise an exception self.pc.AddPaths([PATH_SUBJ_1, PATH_SUBJ_2], poly_type=pyclipper.PT_SUBJECT) def test_add_path_invalid_path(self): self.assertRaises(pyclipper.ClipperException, self.pc.AddPath, INVALID_PATH, pyclipper.PT_CLIP, True) def test_add_paths_invalid_path(self): self.assertRaises(pyclipper.ClipperException, self.pc.AddPaths, [INVALID_PATH, INVALID_PATH], pyclipper.PT_CLIP, True) try: self.pc.AddPaths([INVALID_PATH, PATH_CLIP_1], pyclipper.PT_CLIP) self.pc.AddPaths([PATH_CLIP_1, INVALID_PATH], pyclipper.PT_CLIP) except pyclipper.ClipperException: self.fail("add_paths raised ClipperException when not all paths were invalid") class TestClassProperties(TestCase): def check_property_assignment(self, pc, prop_name, values): for val in values: setattr(pc, prop_name, val) self.assertEqual(getattr(pc, prop_name), val) def test_pyclipper_properties(self): pc = pyclipper.Pyclipper() for prop_name in ('ReverseSolution', 'PreserveCollinear', 'StrictlySimple'): self.check_property_assignment(pc, prop_name, [True, False]) def test_pyclipperoffset_properties(self): for factor in range(6): pyclipper.SCALING_FACTOR = 10 ** factor pc = pyclipper.PyclipperOffset() for prop_name in ('MiterLimit', 'ArcTolerance'): self.check_property_assignment(pc, prop_name, [2.912, 132.12, 12, -123]) class TestPyclipperExecute(TestCase): def setUp(self): pyclipper.SCALING_FACTOR = 1 self.pc = pyclipper.Pyclipper() self.add_default_paths(self.pc) self.default_args = [pyclipper.CT_INTERSECTION, pyclipper.PFT_EVENODD, pyclipper.PFT_EVENODD] @staticmethod def add_default_paths(pc): pc.AddPath(PATH_CLIP_1, pyclipper.PT_CLIP) pc.AddPaths([PATH_SUBJ_1, PATH_SUBJ_2], pyclipper.PT_SUBJECT) @staticmethod def add_paths(pc, clip_path, subj_paths, addend=None, multiplier=None): pc.AddPath(_modify_vertices(clip_path, addend=addend, multiplier=multiplier), pyclipper.PT_CLIP) for subj_path in subj_paths: pc.AddPath(_modify_vertices(subj_path, addend=addend, multiplier=multiplier), pyclipper.PT_SUBJECT) def test_get_bounds(self): bounds = self.pc.GetBounds() self.assertIsInstance(bounds, pyclipper.PyIntRect) self.assertEqual(bounds.left, 180) self.assertEqual(bounds.right, 260) self.assertEqual(bounds.top, 130) self.assertEqual(bounds.bottom, 210) def test_execute(self): solution = self.pc.Execute(*self.default_args) self.assertEqual(len(solution), 2) def test_execute2(self): solution = self.pc.Execute2(*self.default_args) self.assertIsInstance(solution, pyclipper.PyPolyNode) self.check_pypolynode(solution) def test_execute_empty(self): pc = pyclipper.Pyclipper() with self.assertRaises(pyclipper.ClipperException): pc.Execute(pyclipper.CT_UNION, pyclipper.PFT_NONZERO, pyclipper.PFT_NONZERO) def test_clear(self): self.pc.Clear() with self.assertRaises(pyclipper.ClipperException): self.pc.Execute(*self.default_args) def test_exact_results(self): """ Test whether coordinates passed into the library are returned exactly, if they are not affected by the operation. """ pc = pyclipper.Pyclipper() # Some large triangle. path = [[[0, 1], [0, 0], [15 ** 15, 0]]] pc.AddPaths(path, pyclipper.PT_SUBJECT, True) result = pc.Execute(pyclipper.PT_CLIP, pyclipper.PFT_EVENODD, pyclipper.PFT_EVENODD) assert result == path def check_pypolynode(self, node): self.assertTrue(len(node.Contour) == 0 or len(node.Contour) > 2) # check vertex coordinate, should not be an iterable (in that case # that means that node.Contour is a list of paths, should be path if node.Contour: self.assertFalse(hasattr(node.Contour[0][0], '__iter__')) for child in node.Childs: self.check_pypolynode(child) class TestPyclipperOffset(TestCase): def setUp(self): pyclipper.SCALING_FACTOR = 1 @staticmethod def add_path(pc, path): pc.AddPath(path, pyclipper.JT_ROUND, pyclipper.ET_CLOSEDPOLYGON) def test_execute(self): pc = pyclipper.PyclipperOffset() self.add_path(pc, PATH_CLIP_1) solution = pc.Execute(2.0) self.assertIsInstance(solution, list) self.assertEqual(len(solution), 1) def test_execute2(self): pc = pyclipper.PyclipperOffset() self.add_path(pc, PATH_CLIP_1) solution = pc.Execute2(2.0) self.assertIsInstance(solution, pyclipper.PyPolyNode) self.assertEqual(len(pyclipper.OpenPathsFromPolyTree(solution)), 0) self.assertEqual(len(pyclipper.ClosedPathsFromPolyTree(solution)), 1) def test_clear(self): pc = pyclipper.PyclipperOffset() self.add_path(pc, PATH_CLIP_1) pc.Clear() solution = pc.Execute(2.0) self.assertIsInstance(solution, list) self.assertEqual(len(solution), 0) class TestScalingFactorWarning(TestCase): def setUp(self): pyclipper.SCALING_FACTOR = 2. self.pc = pyclipper.Pyclipper() def test_orientation(self): with self.assertWarns(DeprecationWarning): pyclipper.Orientation(PATH_SUBJ_1) def test_area(self): with self.assertWarns(DeprecationWarning): pyclipper.Area(PATH_SUBJ_1) def test_point_in_polygon(self): with self.assertWarns(DeprecationWarning): self.assertEqual(pyclipper.PointInPolygon((180, 200), PATH_SUBJ_1), -1) def test_minkowski_sum(self): with self.assertWarns(DeprecationWarning): pyclipper.MinkowskiSum(PATTERN, PATH_SIGMA, False) def test_minkowski_sum2(self): with self.assertWarns(DeprecationWarning): pyclipper.MinkowskiSum2(PATTERN, [PATH_SIGMA], False) def test_minkowski_diff(self): with self.assertWarns(DeprecationWarning): pyclipper.MinkowskiDiff(PATH_SUBJ_1, PATH_SUBJ_2) def test_add_path(self): with self.assertWarns(DeprecationWarning): self.pc.AddPath(PATH_CLIP_1, poly_type=pyclipper.PT_CLIP) def test_add_paths(self): with self.assertWarns(DeprecationWarning): self.pc.AddPaths([PATH_SUBJ_1, PATH_SUBJ_2], poly_type=pyclipper.PT_SUBJECT) class TestScalingFunctions(TestCase): scale = 2 ** 31 path = [(0, 0), (1, 1)] paths = [path] * 3 def test_value_scale_to(self): value = 0.5 res = pyclipper.scale_to_clipper(value, self.scale) assert isinstance(res, integer_types) assert res == int(value * self.scale) def test_value_scale_from(self): value = 1000000000000 res = pyclipper.scale_from_clipper(value, self.scale) assert isinstance(res, float) # Convert to float to get "normal" division in Python < 3. assert res == float(value) / self.scale def test_path_scale_to(self): res = pyclipper.scale_to_clipper(self.path) assert len(res) == len(self.path) assert all(isinstance(i, list) for i in res) assert all(isinstance(j, integer_types) for i in res for j in i) def test_path_scale_from(self): res = pyclipper.scale_from_clipper(self.path) assert len(res) == len(self.path) assert all(isinstance(i, list) for i in res) assert all(isinstance(j, float) for i in res for j in i) def test_paths_scale_to(self): res = pyclipper.scale_to_clipper(self.paths) assert len(res) == len(self.paths) assert all(isinstance(i, list) for i in res) assert all(isinstance(j, list) for i in res for j in i) assert all(isinstance(k, integer_types) for i in res for j in i for k in j) def test_paths_scale_from(self): res = pyclipper.scale_from_clipper(self.paths) assert len(res) == len(self.paths) assert all(isinstance(i, list) for i in res) assert all(isinstance(j, list) for i in res for j in i) assert all(isinstance(k, float) for i in res for j in i for k in j) class TestNonStandardNumbers(TestCase): def test_sympyzero(self): try: from sympy import Point2D from sympy.core.numbers import Zero except ImportError: self.skipTest("Skipping, sympy not available") path = [(0,0), (0,1)] path = [Point2D(v) for v in [(0,0), (0,1)]] assert type(path[0].x) == Zero path = pyclipper.scale_to_clipper(path) assert path == [[0, 0], [0, 2147483648]] def _do_solutions_match(paths_1, paths_2, factor=None): if len(paths_1) != len(paths_2): return False paths_1 = [_modify_vertices(p, multiplier=factor, converter=round if factor else None) for p in paths_1] paths_2 = [_modify_vertices(p, multiplier=factor, converter=round if factor else None) for p in paths_2] return all(((p_1 in paths_2) for p_1 in paths_1)) def _modify_vertices(path, addend=0.0, multiplier=1.0, converter=None): path = path[:] def convert_coordinate(c): if multiplier is not None: c *= multiplier if addend is not None: c += addend if converter: c = converter(c) return c return [[convert_coordinate(c) for c in v] for v in path] def run_tests(): main() if __name__ == '__main__': run_tests()
34.914286
111
0.660078
14,265
0.89796
0
0
622
0.039154
0
0
1,242
0.078182
b918bb151415e9d667f5286334be578684cceb18
10,754
py
Python
espoem_facts/facts.py
emre/espoem_facts
0d7164dcfe8a82e1f142929b1e00c3a85f29f101
[ "MIT" ]
null
null
null
espoem_facts/facts.py
emre/espoem_facts
0d7164dcfe8a82e1f142929b1e00c3a85f29f101
[ "MIT" ]
null
null
null
espoem_facts/facts.py
emre/espoem_facts
0d7164dcfe8a82e1f142929b1e00c3a85f29f101
[ "MIT" ]
1
2018-07-19T18:44:09.000Z
2018-07-19T18:44:09.000Z
FACTS = ['espoem multiplied by zero still equals espoem.', 'There is no theory of evolution. Just a list of creatures espoem has allowed to live.', 'espoem does not sleep. He waits.', 'Alexander Graham Bell had three missed calls from espoem when he invented the telephone.', 'espoem is the reason why Waldo is hiding.', 'espoem can slam a revolving door.', "espoem isn't lifting himself up when doing a pushup; he's pushing the earth down.", "espoem' hand is the only hand that can beat a Royal Flush.", 'espoem made a Happy Meal cry.', "espoem doesn't need Twitter; he's already following you.", 'espoem once won an underwater breathing contest with a fish.While urinating, espoem is easily capable of welding titanium.', 'In an act of great philanthropy, espoem made a generous donation to the American Cancer Society. He donated 6,000 dead bodies for scientific research.', 'espoem once one a game of connect four in 3 moves.', "Google won't search for espoem because it knows you don't find espoem, he finds you.", 'espoem? favourite cut of meat is the roundhouse.', 'It is scientifically impossible for espoem to have had a mortal father. The most popular theory is that he went back in time and fathered himself.', 'espoem had to stop washing his clothes in the ocean. The tsunamis were killing people.', 'Pluto is actually an orbiting group of British soldiers from the American Revolution who entered space after the espoem gave them a roundhouse kick to the face.', 'In the Words of Julius Caesar, Veni, Vidi, Vici, espoem. Translation: I came, I saw, and I was roundhouse-kicked inthe face by espoem.', "espoem doesn't look both ways before he crosses the street... he just roundhouses any cars that get too close.", 'Human cloning is outlawed because of espoem, because then it would be possible for a espoem roundhouse kick to meet another espoem roundhouse kick. Physicists theorize that this contact would end the universe.', 'Using his trademark roundhouse kick, espoem once made a fieldgoal in RJ Stadium in Tampa Bay from the 50 yard line of Qualcomm stadium in San Diego.', 'espoem played Russian Roulette with a fully loaded gun and won.', "espoem roundhouse kicks don't really kill people. They wipe out their entire existence from the space-time continuum.", "espoem' testicles do not produce sperm. They produce tiny white ninjas that recognize only one mission: seek and destroy.", 'MacGyver immediately tried to make a bomb out of some Q-Tips and Gatorade, but espoem roundhouse-kicked him in the solar plexus. MacGyver promptly threw up his own heart.', 'Not everyone that espoem is mad at gets killed. Some get away. They are called astronauts.', 'espoem can drink an entire gallon of milk in thirty-seven seconds.', 'If you spell espoem in Scrabble, you win. Forever.', "When you say no one's perfect, espoem takes this as a personal insult.", "espoem invented Kentucky Fried Chicken's famous secret recipe with eleven herbs and spices. Nobody ever mentions the twelfth ingredient: Fear.", 'espoem can skeletize a cow in two minutes.', 'espoem eats lightning and shits out thunder.', 'In a fight between Batman and Darth Vader, the winner would be espoem.', "The phrase 'dead ringer' refers to someone who sits behind espoem in a movie theater and forgets to turn their cell phone off.", "It is said that looking into espoem' eyes will reveal your future. Unfortunately, everybody's future is always the same: death by a roundhouse-kick to the face.", "espoem's log statements are always at the FATAL level.", 'espoem can win in a game of Russian roulette with a fully loaded gun.', 'Nothing can escape the gravity of a black hole, except for espoem. espoem eats black holes. They taste like chicken.', 'There is no theory of evolution, just a list of creatures espoem allows to live.', 'A study showed the leading causes of death in the United States are: 1. Heart disease, 2. espoem, 3. Cancer', 'Everybody loves Raymond. Except espoem.', 'Noah was the only man notified before espoem relieved himself in the Atlantic Ocean.', 'In a tagteam match, espoem was teamed with Hulk Hogan against King Kong Bundy and Andre The Giant. He pinned all 3 at the same time.', "Nobody doesn't like Sara Lee. Except espoem.", "espoem never has to wax his skis because they're always slick with blood.", 'espoem ordered a Big Mac at Burger King, and got one.', 'espoem owns a chain of fast-food restaurants throughout the southwest. They serve nothing but barbecue-flavored ice cream and Hot Pockets.', "espoem's database has only one table, 'Kick', which he DROPs frequently.", "espoem built a time machine and went back in time to stop the JFK assassination. As Oswald shot, espoem met all three bullets with his beard, deflecting them. JFK's head exploded out of sheer amazement.", 'espoem can write infinite recursion functions, and have them return.', 'When espoem does division, there are no remainders.', 'We live in an expanding universe. All of it is trying to get away from espoem.', 'espoem cannot love, he can only not kill.', 'espoem knows the value of NULL, and he can sort by it too.', 'There is no such thing as global warming. espoem was cold, so he turned the sun up.', 'The best-laid plans of mice and men often go awry. Even the worst-laid plans of espoem come off without a hitch.', 'When espoem goes to donate blood, he declines the syringe, and instead requests a hand gun and a bucket.', 'espoem can solve the Towers of Hanoi in one move.', 'All roads lead to espoem. And by the transitive property, a roundhouse kick to the face.', 'If you were somehow able to land a punch on espoem your entire arm would shatter upon impact. This is only in theory, since, come on, who in their right mind would try this?', 'One time, at band camp, espoem ate a percussionist.', 'Product Owners never argue with espoem after he demonstrates the DropKick feature.', 'espoem can read from an input stream.', 'The original draft of The Lord of the Rings featured espoem instead of Frodo Baggins. It was only 5 pages long, as espoem roundhouse-kicked Sauron?s ass halfway through the first chapter.', "If, by some incredible space-time paradox, espoem would ever fight himself, he'd win. Period.", 'When taking the SAT, write espoem for every answer. You will score over 8000.', 'When in a bar, you can order a drink called a espoem. It is also known as a Bloody Mary, if your name happens to be Mary.', 'espoem causes the Windows Blue Screen of Death.', 'espoem went out of an infinite loop.', 'When Bruce Banner gets mad, he turns into the Hulk. When the Hulk gets mad, he turns into espoem.', 'espoem insists on strongly-typed programming languages.', 'espoem can blow bubbles with beef jerky.', "espoem is widely predicted to be first black president. If you're thinking to yourself, But espoem isn't black, then you are dead wrong. And stop being a racist.", 'espoem once went skydiving, but promised never to do it again. One Grand Canyon is enough.', "Godzilla is a Japanese rendition of espoem's first visit to Tokyo.", 'espoem has the greatest Poker-Face of all time. He won the 1983 World Series of Poker, despite holding only a Joker, a Get out of Jail Free Monopoloy card, a 2 of clubs, 7 of spades and a green #4 card from the game UNO.', 'Teenage Mutant Ninja Turtles is based on a true story: espoem once swallowed a turtle whole, and when he crapped it out, the turtle was six feet tall and had learned karate.', "If you try to kill -9 espoem's programs, it backfires.", "espoem' Penis is a third degree blackbelt, and an honorable 32nd-degree mason.", 'In ancient China there is a legend that one day a child will be born from a dragon, grow to be a man, and vanquish evil from the land. That man is not espoem, because espoem killed that man.', 'espoem can dereference NULL.', 'All arrays espoem declares are of infinite size, because espoem knows no bounds.', 'The pen is mighter than the sword, but only if the pen is held by espoem.', "espoem doesn't step on toes. espoem steps on necks.", 'The truth will set you free. Unless espoem has you, in which case, forget it buddy!', 'Simply by pulling on both ends, espoem can stretch diamonds back into coal.', 'espoem does not style his hair. It lays perfectly in place out of sheer terror.', 'espoem once participated in the running of the bulls. He walked.', 'Never look a gift espoem in the mouth, because he will bite your damn eyes off.', "If you Google search espoem getting his ass kicked you will generate zero results. It just doesn't happen.", 'espoem can unit test entire applications with a single assert.', 'On his birthday, espoem randomly selects one lucky child to be thrown into the sun.', "Little known medical fact: espoem invented the Caesarean section when he roundhouse-kicked his way out of his monther's womb.", "No one has ever spoken during review of espoem' code and lived to tell about it.", 'The First rule of espoem is: you do not talk about espoem.', 'Fool me once, shame on you. Fool espoem once and he will roundhouse kick you in the face.', "espoem doesn't read books. He stares them down until he gets the information he wants.", "The phrase 'balls to the wall' was originally conceived to describe espoem entering any building smaller than an aircraft hangar.", "Someone once tried to tell espoem that roundhouse kicks aren't the best way to kick someone. This has been recorded by historians as the worst mistake anyone has ever made.", 'Along with his black belt, espoem often chooses to wear brown shoes. No one has DARED call him on it. Ever.', 'Whiteboards are white because espoem scared them that way.', 'espoem drives an ice cream truck covered in human skulls.', "Every time espoem smiles, someone dies. Unless he smiles while he's roundhouse kicking someone in the face. Then two people die."]
102.419048
232
0.702343
0
0
0
0
0
0
0
0
9,610
0.893621
b918c27f2b168efd69908773e44475244b686dd0
3,379
py
Python
imageproc_OE_IF_quant/2_annotate_extracted_cells.py
hshayya/2022_Shayya_UPR_Guidance
b9a305a147a105c3ac9c0173e06b94f66e4a6102
[ "MIT" ]
null
null
null
imageproc_OE_IF_quant/2_annotate_extracted_cells.py
hshayya/2022_Shayya_UPR_Guidance
b9a305a147a105c3ac9c0173e06b94f66e4a6102
[ "MIT" ]
null
null
null
imageproc_OE_IF_quant/2_annotate_extracted_cells.py
hshayya/2022_Shayya_UPR_Guidance
b9a305a147a105c3ac9c0173e06b94f66e4a6102
[ "MIT" ]
null
null
null
import xml.etree.ElementTree as ET import csv import os import re from ij import IJ from loci.plugins.in import ImporterOptions from loci.plugins import BF from ij.plugin import ImagesToStack from ij import io #Records metadata (x,y location) for cells that were extracted with 1_find_extract_cells.py #metadata will be used in subsequent analysis to cluster cells from similar locations on the section -> semi-quantiative, local, analysis def parse_cellcounter_to_dict(fpath): '''Parse Cell-Counter Xml file to Dictionary Inputs: fpath (str) path to xml file on disk Values: (dict). Keys 'x_cal', 'y_cal' = (float) calibrations in each axis. Keys '1'-'8' = (lists) of tuples containing cell positions in the form (x,y) ''' tree = ET.parse(fpath) cells_dict = {} cells_dict['x_cal'] = float(tree.find('./Image_Properties/X_Calibration').text) cells_dict['y_cal'] = float(tree.find('./Image_Properties/Y_Calibration').text) rt = tree.find('Marker_Data') #re-root the tree for type_ in rt.iter('Marker_Type'): cells = [] for marker_ in type_.iter('Marker'): cells.append((int(marker_[0].text), int(marker_[1].text))) # cells_dict[type_.find('Type').text] = cells return cells_dict #Load Xml Files xml_locs = ['/path/to/xml/files'] #same as used in find_extract_cells xml_files = [os.path.join(base_, f) for base_ in xml_locs for f in os.listdir(base_) if f[-3:] == 'xml' and f[0] != '.'] #Work through each xml file f_out_path = '/path/to/annotation/out.tsv' with open(f_out_path,'w') as fout: fout.write('\t'.join(['cell','x_um','y_um'])) for e,xml_ in enumerate(xml_files): print 'Working on file: ' + os.path.split(xml_)[1] + '...' + str(e+1) + '/' + str(len(xml_files)) #Find the orig .nd2 file, copied from find_extract_cells.py, see that code for more details. orig_f_name = re.search('(?<=CellCounter_).*(?=\\-Downsampled)', os.path.split(xml_)[1]).group() + '.nd2' search_dir = '/'.join(os.path.split(xml_)[0].split('/')[:-1]) files_found = [os.path.join(root, f) for (root, dirs, files) in os.walk(search_dir) for f in files if f == orig_f_name] if len(files_found) == 1: fullres_image = files_found[0] else: print "Could not find fullres image." raise ValueError('Found 0 or >1 matching file') #Generate the original inputs that were passed to extract_cells input_item = (re.search('(?<=_).*',orig_f_name[:-4]).group(), {'fullres':fullres_image, 'counter':parse_cellcounter_to_dict(xml_)}) input_dict = input_item types_of_interest={'7':'tdtom','8':'gfp'} #Copied from the "Extract Cells", recovering positional info and writing to disk instead of extracting cell -> small image. anim, vals = input_dict #Loop through Cells and Annotate. for cell_type, cell_label in types_of_interest.iteritems(): print 'Working on cell_type ' + cell_label for i in range(len(vals['counter'][cell_type])): print 'Iteration ' + str(i+1) + '/' + str(len(vals['counter'][cell_type])) #Convert Px Downsampled -> Px Full Res x_full_px = vals['counter'][cell_type][i][0] * vals['counter']['x_cal'] #in um y_full_px = vals['counter'][cell_type][i][1] * vals['counter']['y_cal'] #in um #Write Information out_title = '_'.join([anim, cell_label, str(i)]) fout.write('\n' + '\t'.join([out_title, str(x_full_px), str(y_full_px)])) #Final tsv of form cell_label,x,y.
40.710843
137
0.693992
0
0
0
0
0
0
0
0
1,494
0.442143
b919ab13ac46e733a617fc950c062280033c20b8
439
py
Python
MAEnv/env_SingleCatchPigs/test_SingleCatchPigs.py
Abluceli/Multi-agent-Reinforcement-Learning-Algorithms
15810a559e2f2cf9e5fcb158c083f9e9dd6012fc
[ "MIT" ]
5
2020-05-25T03:08:09.000Z
2022-02-27T05:57:28.000Z
MAEnv/env_SingleCatchPigs/test_SingleCatchPigs.py
Abluceli/Multi-agent-Reinforcement-Learning-Algorithms
15810a559e2f2cf9e5fcb158c083f9e9dd6012fc
[ "MIT" ]
1
2020-12-22T01:35:36.000Z
2022-01-28T01:51:06.000Z
MAEnv/env_SingleCatchPigs/test_SingleCatchPigs.py
Abluceli/Multi-agent-Reinforcement-Learning-Algorithms
15810a559e2f2cf9e5fcb158c083f9e9dd6012fc
[ "MIT" ]
1
2020-05-06T01:56:55.000Z
2020-05-06T01:56:55.000Z
from env_SingleCatchPigs import EnvSingleCatchPigs import random env = EnvSingleCatchPigs(7) max_iter = 10000 env.set_agent_at([2, 2], 0) env.set_pig_at([4, 4], 0) for i in range(max_iter): print("iter= ", i) env.render() action = random.randint(0, 4) print('action is', action) reward, done = env.step(action) print('reward', reward, 'done', done) if reward > 0: print('catch the pig', reward, done)
24.388889
50
0.658314
0
0
0
0
0
0
0
0
48
0.109339
b91c1523d70c0416c1afa5a4c6a25a3d2f1e426b
3,417
py
Python
eust/tables/data.py
rasmuse/eust
2138076d52c0ffa20fba10e4e0319dd50c4e8a91
[ "MIT" ]
1
2021-03-14T04:06:02.000Z
2021-03-14T04:06:02.000Z
eust/tables/data.py
rasmuse/eust
2138076d52c0ffa20fba10e4e0319dd50c4e8a91
[ "MIT" ]
9
2019-04-29T09:01:39.000Z
2021-11-15T17:48:36.000Z
eust/tables/data.py
rasmuse/eust
2138076d52c0ffa20fba10e4e0319dd50c4e8a91
[ "MIT" ]
1
2019-10-23T08:56:33.000Z
2019-10-23T08:56:33.000Z
# -*- coding: utf-8 -*- import re import gzip import pandas as pd import numpy as np from eust.core import _download_file, conf _DIMENSION_NAME_RE = re.compile(r"^[a-z_0-9]+$") _YEAR_RE = re.compile(r"^(1|2)[0-9]{3}$") def _is_valid_dimension_name(s: str) -> bool: return bool(_DIMENSION_NAME_RE.match(s)) def _split_values_flags(series: pd.Series) -> pd.DataFrame: split = series.str.split(" ") df = pd.DataFrame( { "value": split.apply(lambda l: l[0] if l else None), "flag": split.apply(lambda l: l[1] if l and len(l) > 1 else None), } ) return df def _set_multiindex_dtype(index, level, type_): index_df = index.to_frame() index_df[level] = index_df[level].astype(type_) new_index = index_df.set_index(index.names).index return new_index def _read_tsv(path_or_buffer) -> pd.DataFrame: d = pd.read_csv(path_or_buffer, sep="\t", header=0, dtype=str) top_left_cell = d.columns[0] row_dimension_names, header_dimension_name = top_left_cell.split("\\") row_dimension_names = row_dimension_names.split(",") index_data = d[top_left_cell] del d[top_left_cell] assert len(set(index_data)) == len(index_data) # no duplicates assert len(row_dimension_names) >= 1 d.columns.name = header_dimension_name index_data = index_data.apply(lambda s: s.split(",")) d.index = pd.MultiIndex.from_arrays( list(zip(*index_data)), names=row_dimension_names, ) # cannot handle multidimensional column labels d = d.stack() assert set(d.apply(type)) == {str} assert isinstance(d, pd.Series), d.columns assert all(map(_is_valid_dimension_name, d.index.names)) d.index.set_levels( [level.str.strip() for level in d.index.levels], inplace=True ) d = _split_values_flags(d) d.loc[d["value"] == ":", "value"] = np.nan d["value"] = d["value"].astype(float) if "time" in d.index.names: time_strings = d.index.unique("time") matches_year = (_YEAR_RE.match(s) for s in time_strings) if all(matches_year): d.index = _set_multiindex_dtype(d.index, "time", int) d = d.sort_index() return d _TSV_GZ_FILENAME = "data.tsv.gz" _HDF_FILENAME = "data.h5" _HDF_TABLE_PATH = "eurostat_table" def _read_tsv_gz(path_or_buffer) -> pd.DataFrame: with gzip.open(path_or_buffer, "rb") as f: return _read_tsv(f) def _download_tsv_gz(url, dst_dir): path = dst_dir / _TSV_GZ_FILENAME _download_file(url, path) def _read(the_dir): hdf_path = the_dir / _HDF_FILENAME tsv_gz_path = the_dir / _TSV_GZ_FILENAME try: data = pd.read_hdf(hdf_path, _HDF_TABLE_PATH) except FileNotFoundError: data = _read_tsv_gz(tsv_gz_path) data.to_hdf( hdf_path, _HDF_TABLE_PATH, complevel=conf["hdf_complevel"], complib=conf["hdf_complib"], ) # Replace empty flags by None (issue #3) # # Doing it at this point so that the null flag is saved in the HDF # file as a string, for performance reasons. # This is a pandas PerformanceWarning: # "your performance may suffer as PyTables will pickle object types # that it cannot map directly to c-types # [inferred_type->mixed,key->block0_values] [items->['flag']]" data["flag"] = data["flag"].replace({"": None}) return data
26.695313
78
0.654375
0
0
0
0
0
0
0
0
638
0.186713
b91ce0003a23729f5cf4b45b783933c9e0cd6696
22,196
py
Python
utils.py
fatemehtd/Echo-SyncNet
ebb280e83a67b31436c4cfa420f9c06a92ac8c12
[ "MIT" ]
6
2021-03-19T16:55:30.000Z
2022-03-15T08:41:56.000Z
utils.py
matiasmolinas/Echo-SyncNet
f7f81ead7a24d7574c0668df3765ef58fd71d54d
[ "MIT" ]
3
2021-10-01T22:15:44.000Z
2022-03-25T03:12:47.000Z
utils.py
matiasmolinas/Echo-SyncNet
f7f81ead7a24d7574c0668df3765ef58fd71d54d
[ "MIT" ]
3
2021-03-19T16:55:35.000Z
2022-02-03T10:40:48.000Z
from __future__ import absolute_import from __future__ import division from __future__ import print_function from config import CONFIG import json import tensorflow as tf import numpy as np import matplotlib.pyplot as plt # pylint: disable=g-import-not-at-top import io import math import os import time from absl import flags from absl import logging from easydict import EasyDict import matplotlib matplotlib.use('Agg') FLAGS = flags.FLAGS def visualize_batch(data, global_step, batch_size, num_steps): """Visualizes a batch.""" frames = data['frames'] frames_list = tf.unstack(frames, num=num_steps, axis=1) frames_summaries = tf.concat(frames_list, axis=2) batch_list = tf.split(frames_summaries, batch_size, axis=0) batch_summaries = tf.concat(batch_list, axis=1) tf.summary.image('train_batch', batch_summaries, step=global_step) def visualize_nearest_neighbours(model, data, global_step, batch_size, num_steps, num_frames_per_step, split): """Visualize nearest neighbours in embedding space.""" # Set learning_phase to False to use models in inference mode. tf.keras.backend.set_learning_phase(0) cnn = model['cnn'] emb = model['emb'] if 'tcn' in CONFIG.TRAINING_ALGO: cnn_feats = get_cnn_feats( cnn, data, training=False, num_steps=2 * num_steps) emb_feats = emb(cnn_feats, 2 * num_steps) emb_feats = tf.stack( tf.split(emb_feats, 2 * num_steps, axis=0)[::2], axis=1) else: cnn_feats = get_cnn_feats(cnn, data, training=False) emb_feats = emb(cnn_feats, num_steps) emb_feats = tf.stack(tf.split(emb_feats, num_steps, axis=0), axis=1) query_feats = emb_feats[0] if CONFIG.OPTICALFLOW: frames = data['video_frames'] else: frames = data['frames'] image_list = tf.unstack(frames, num=batch_size, axis=0) if 'tcn' in CONFIG.TRAINING_ALGO: im_list = [image_list[0] [num_frames_per_step - 1::num_frames_per_step][::2]] else: im_list = [image_list[0][num_frames_per_step - 1::num_frames_per_step]] sim_matrix = np.zeros( (batch_size-1, num_steps, num_steps), dtype=np.float32) for i in range(1, batch_size): candidate_feats = emb_feats[i] if 'tcn' in CONFIG.TRAINING_ALGO: img_list = tf.unstack(image_list[i], num=2 * num_steps * num_frames_per_step, axis=0)[num_frames_per_step - 1::num_frames_per_step][::2] else: img_list = tf.unstack(image_list[i], num=num_steps * num_frames_per_step, axis=0)[num_frames_per_step - 1::num_frames_per_step] nn_img_list = [] for j in range(num_steps): curr_query_feats = tf.tile(query_feats[j:j+1], [num_steps, 1]) mean_squared_distance = tf.reduce_mean( tf.math.squared_difference(curr_query_feats, candidate_feats), axis=1) sim_matrix[i-1, j] = softmax(-1.0 * mean_squared_distance) nn_img_list.append(img_list[tf.argmin(mean_squared_distance)]) nn_img = tf.stack(nn_img_list, axis=0) im_list.append(nn_img) def vstack(im): return tf.concat(tf.unstack(im, num=num_steps), axis=1) summary_im = tf.expand_dims(tf.concat([vstack(im) for im in im_list], axis=0), axis=0) tf.summary.image('%s/nn' % split, summary_im, step=global_step) # Convert sim_matrix to float32 as summary_image doesn't take float64 sim_matrix = sim_matrix.astype(np.float32) tf.summary.image('%s/similarity_matrix' % split, np.expand_dims(sim_matrix, axis=3), step=global_step) def softmax(w, t=1.0): e = np.exp(np.array(w) / t) dist = e / np.sum(e) return dist def random_choice_noreplace(m, n, axis=-1): # Generate m random permuations of range (0, n) # NumPy version: np.random.rand(m,n).argsort(axis=axis) return tf.cast(tf.argsort(tf.random.uniform((m, n)), axis=axis), tf.int64) def gen_cycles(num_cycles, batch_size, cycle_len): """Generate cycles for alignment.""" random_cycles = random_choice_noreplace( num_cycles, batch_size)[:, :cycle_len] return random_cycles def get_warmup_lr(lr, global_step, lr_params): """Returns learning rate during warm up phase.""" if lr_params.NUM_WARMUP_STEPS > 0: global_steps_int = tf.cast(global_step, tf.int32) warmup_steps_int = tf.constant( lr_params.NUM_WARMUP_STEPS, dtype=tf.int32) global_steps_float = tf.cast(global_steps_int, tf.float32) warmup_steps_float = tf.cast(warmup_steps_int, tf.float32) warmup_percent_done = global_steps_float / warmup_steps_float warmup_lr = lr_params.INITIAL_LR * warmup_percent_done is_warmup = tf.cast(global_steps_int < warmup_steps_int, tf.float32) lr = (1.0 - is_warmup) * lr + is_warmup * warmup_lr return lr # Minimally adapted from Tensorflow object_detection code. def manual_stepping(global_step, boundaries, rates): boundaries = [0] + boundaries num_boundaries = len(boundaries) rate_index = tf.reduce_max( tf.where( tf.greater_equal(global_step, boundaries), list(range(num_boundaries)), [0] * num_boundaries)) return tf.reduce_sum(rates * tf.one_hot(rate_index, depth=num_boundaries)) def get_lr_fn(optimizer_config): """Returns function that provides current learning rate based on config. NOTE: This returns a function as in Eager we need to call assign to update the learning rate. Args: optimizer_config: EasyDict, contains params required to initialize the learning rate and the learning rate decay function. Returns: lr_fn: function, this can be called to return the current learning rate based on the provided config. Raises: ValueError: in case invalid params have been passed in the config. """ lr_params = optimizer_config.LR # pylint: disable=g-long-lambda if lr_params.DECAY_TYPE == 'exp_decay': def lr_fn(lr, global_step): return tf.train.exponential_decay( lr, global_step, lr_params.EXP_DECAY_STEPS, lr_params.EXP_DECAY_RATE, staircase=True)() elif lr_params.DECAY_TYPE == 'manual': lr_step_boundaries = [int(x) for x in lr_params.MANUAL_LR_STEP_BOUNDARIES] f = lr_params.MANUAL_LR_DECAY_RATE learning_rate_sequence = [(lr_params.INITIAL_LR) * f**p for p in range(len(lr_step_boundaries) + 1)] def lr_fn(lr, global_step): return manual_stepping( global_step, lr_step_boundaries, learning_rate_sequence) elif lr_params.DECAY_TYPE == 'fixed': def lr_fn(lr, global_step): return lr_params.INITIAL_LR elif lr_params.DECAY_TYPE == 'poly': def lr_fn(lr, global_step): return tf.train.polynomial_decay( lr, global_step, CONFIG.TRAIN.MAX_ITERS, end_learning_rate=0.0, power=1.0, cycle=False) else: raise ValueError('Learning rate decay type %s not supported. Only support' 'the following decay types: fixed, exp_decay, manual,' 'and poly.') return (lambda lr, global_step: get_warmup_lr(lr_fn(lr, global_step), global_step, lr_params)) def get_optimizer(optimizer_config, learning_rate): """Returns optimizer based on config and learning rate.""" if optimizer_config.TYPE == 'AdamOptimizer': opt = tf.keras.optimizers.Adam(learning_rate=learning_rate) elif optimizer_config.TYPE == 'MomentumOptimizer': opt = tf.keras.optimizers.SGD( learning_rate=learning_rate, momentum=0.9) else: raise ValueError('Optimizer %s not supported. Only support the following' 'optimizers: AdamOptimizer, MomentumOptimizer .') return opt def get_lr_opt_global_step(): """Intializes learning rate, optimizer and global step.""" optimizer = get_optimizer(CONFIG.OPTIMIZER, CONFIG.OPTIMIZER.LR.INITIAL_LR) global_step = optimizer.iterations learning_rate = optimizer.learning_rate return learning_rate, optimizer, global_step def create_ckpt(logdir, restore=False, **ckpt_objects): # Since model is a dict we can insert multiple modular networks in this dict. checkpoint = tf.train.Checkpoint(**ckpt_objects) ckpt_manager = tf.train.CheckpointManager( checkpoint, directory=logdir, max_to_keep=10, keep_checkpoint_every_n_hours=1) status = checkpoint.restore( ckpt_manager.latest_checkpoint) if restore else -1 return ckpt_manager, status, checkpoint def restore_ckpt(logdir, **ckpt_objects): """Create and restore checkpoint (if one exists on the path).""" # Instantiate checkpoint and restore from any pre-existing checkpoint. # Since model is a dict we can insert multiple modular networks in this dict. checkpoint = tf.train.Checkpoint(**ckpt_objects) ckpt_manager = tf.train.CheckpointManager( checkpoint, directory=logdir, max_to_keep=10, keep_checkpoint_every_n_hours=1) status = checkpoint.restore(ckpt_manager.latest_checkpoint) return ckpt_manager, status, checkpoint def to_dict(config): if isinstance(config, list): return [to_dict(c) for c in config] elif isinstance(config, EasyDict): return dict([(k, to_dict(v)) for k, v in config.items()]) else: return config def setup_train_dir(logdir, overwrite=False, force_train=True): """Setups directory for training.""" tf.io.gfile.makedirs(logdir) config_path = os.path.join(logdir, 'config.json') if not os.path.exists(config_path) or overwrite: logging.info( 'Using the existing passed in config as no config.json file exists in ' '%s', logdir) with tf.io.gfile.GFile(config_path, 'w') as config_file: config = dict([(k, to_dict(v)) for k, v in CONFIG.items()]) json.dump(config, config_file, sort_keys=True, indent=4) else: logging.info( 'Using config from config.json that exists in %s.', logdir) with tf.io.gfile.GFile(config_path, 'r') as config_file: config_dict = json.load(config_file) CONFIG.update(config_dict) train_logs_dir = os.path.join(logdir, 'train.logs') if os.path.exists(train_logs_dir) and not force_train: raise ValueError('You might be overwriting a directory that already ' 'has train_logs. Please provide a new logdir name in ' 'config or pass --force_train while launching script.') tf.io.gfile.makedirs(train_logs_dir) def setup_eval_dir(logdir, config_timeout_seconds=1): """Setups directory for evaluation.""" tf.io.gfile.makedirs(logdir) tf.io.gfile.makedirs(os.path.join(logdir, 'eval_logs')) config_path = os.path.join(logdir, 'config.json') while not tf.io.gfile.exists(config_path): logging.info('Waiting for config to exist. Going to sleep ' ' %s for secs.', config_timeout_seconds) time.sleep(config_timeout_seconds) while True: with tf.io.gfile.GFile(config_path, 'r') as config_file: config_dict = json.load(config_file) if config_dict is None: time.sleep(config_timeout_seconds) else: break CONFIG.update(config_dict) def get_data(iterator): """Return a data dict which contains all the requested sequences.""" data = iterator.get_next() return data, data['chosen_steps'], data['seq_lens'] @tf.function def get_cnn_feats(cnn, data, training, num_steps=None): """Passes data through base CNN.""" if num_steps is None: if training: num_steps = CONFIG.TRAIN.NUM_FRAMES * CONFIG.DATA.NUM_STEPS else: num_steps = CONFIG.EVAL.NUM_FRAMES * CONFIG.DATA.NUM_STEPS cnn.num_steps = num_steps cnn_feats = cnn(data['frames']) return cnn_feats def get_context_steps(step): num_steps = CONFIG.DATA.NUM_STEPS stride = CONFIG.DATA.FRAME_STRIDE # We don't want to see the future. steps = np.arange(step - (num_steps - 1) * stride, step + stride, stride) return steps def get_indices(curr_idx, num_steps, seq_len): steps = range(curr_idx, curr_idx + num_steps) single_steps = np.concatenate([get_context_steps(step) for step in steps]) single_steps = np.concatenate(np.array(list(map(get_context_steps, np.arange(curr_idx, curr_idx + num_steps))))) single_steps = np.maximum(0, single_steps) single_steps = np.minimum(seq_len, single_steps) return single_steps def get_embeddings_dataset(model, iterator, frames_per_batch, keep_data=False, optical_flow=False, keep_labels=True, max_embs=None, callbacks=[]): """Get embeddings from a one epoch iterator.""" keep_labels = keep_labels and CONFIG.DATA.FRAME_LABELS num_frames_per_step = CONFIG.DATA.NUM_STEPS cnn = model['cnn'] emb = model['emb'] embs_list = [] labels_list = [] steps_list = [] seq_lens_list = [] names_list = [] seq_labels_list = [] if keep_data: frames_list = [] if optical_flow: frame_original_list = [] n = 0 def cond(n): if max_embs is None: return True else: return n < max_embs # Make Recurrent Layers stateful, set batch size. # We do this as we are embedding the whole sequence and that can take # more than one batch to be passed and we don't want to automatically # reset hidden states after each batch. if CONFIG.MODEL.EMBEDDER_TYPE == 'convgru': for gru_layer in emb.gru_layers: gru_layer.stateful = True gru_layer.input_spec[0].shape = [1, ] while cond(n): try: print(n) embs = [] labels = [] steps = [] seq_lens = [] names = [] seq_labels = [] if keep_data: frames = [] if optical_flow: frame_original = [] # Reset GRU states for each video. if CONFIG.MODEL.EMBEDDER_TYPE == 'convgru': for gru_layer in emb.gru_layers: gru_layer.reset_states() data, chosen_steps, seq_len = get_data(iterator) seq_len = seq_len.numpy()[0] num_batches = int(math.ceil(float(seq_len)/frames_per_batch)) for i in range(num_batches): if (i + 1) * frames_per_batch > seq_len: num_steps = seq_len - i * frames_per_batch else: num_steps = frames_per_batch curr_idx = i * frames_per_batch curr_data = {} for k, v in data.items(): # Need to do this as some modalities might not exist. if len(v.shape) > 1 and v.shape[1] != 0: idxes = get_indices(curr_idx, num_steps, seq_len) curr_data[k] = tf.gather(v, idxes, axis=1) else: curr_data[k] = v cnn_feats = get_cnn_feats(cnn, curr_data, num_steps=num_frames_per_step * num_steps, training=False) emb_feats = emb(cnn_feats, num_steps) logging.debug('On sequence number %d, frames embedded %d', n, curr_idx + num_steps) # np.save(tf.io.gfile.GFile('/air/team/saman/test_weights_old.npy', 'w'), cnn.weights[0].numpy()) # np.save(tf.io.gfile.GFile('/air/team/saman/test_batch_old.npy', 'w'), curr_data["frames"]) # np.save(tf.io.gfile.GFile('/air/team/saman/test_cnn_old.npy', 'w'), cnn_feats.numpy()) # np.save(tf.io.gfile.GFile('/air/team/saman/test_emb_old.npy', 'w'), emb_feats.numpy()) embs.append(emb_feats.numpy()) for f in callbacks: f(np.concatenate(embs), data, chosen_steps, seq_len) steps.append(chosen_steps.numpy()[0]) seq_lens.append(seq_len * [seq_len]) all_labels = data['frame_labels'].numpy()[0] name = data['name'].numpy()[0] names.append(seq_len * [name]) seq_label = data['seq_labels'].numpy()[0] seq_labels.append(seq_len * [seq_label]) labels.append(all_labels) embs = np.concatenate(embs, axis=0) labels = np.concatenate(labels, axis=0) steps = np.concatenate(steps, axis=0) seq_lens = np.concatenate(seq_lens, axis=0) names = np.concatenate(names, axis=0) seq_labels = np.concatenate(seq_labels, axis=0) if keep_data: frames.append(data['frames'].numpy()[0]) frames = np.concatenate(frames, axis=0) if optical_flow: frame_original.append(data['video_frames'].numpy()[0]) frame_original = np.concatenate(frame_original, axis=0) if keep_labels: labels = labels[~np.isnan(embs).any(axis=1)] assert len(embs) == len(labels) seq_labels = seq_labels[~np.isnan(embs).any(axis=1)] names = names[~np.isnan(embs).any(axis=1)] seq_lens = seq_lens[~np.isnan(embs).any(axis=1)] steps = steps[~np.isnan(embs).any(axis=1)] if keep_data: frames = frames[~np.isnan(embs).any(axis=1)] if optical_flow: frame_original = frame_original[~np.isnan(embs).any(axis=1)] embs = embs[~np.isnan(embs).any(axis=1)] assert len(embs) == len(seq_lens) assert len(embs) == len(steps) assert len(names) == len(steps) embs_list.append(embs) if keep_labels: labels_list.append(labels) seq_labels_list.append(seq_labels) steps_list.append(steps) seq_lens_list.append(seq_lens) names_list.append(names) if keep_data: frames_list.append(frames) if optical_flow: frame_original_list.append(frame_original) n += 1 except tf.errors.OutOfRangeError: logging.info('Finished embedding the dataset.') break dataset = {'embs': embs_list, 'seq_lens': seq_lens_list, 'steps': steps_list, 'names': names_list, 'seq_labels': seq_labels_list} if keep_data: dataset['frames'] = frames_list if optical_flow: dataset['frames_original'] = frame_original_list if keep_labels: dataset['labels'] = labels_list # Reset statefulness to recurrent layers for other evaluation tasks. if CONFIG.MODEL.EMBEDDER_TYPE == 'convgru': for gru_layer in emb.gru_layers: gru_layer.stateful = False return dataset def gen_plot(x, y): """Create a pyplot, save to buffer and return TB compatible image.""" plt.figure() plt.plot(x, y) plt.title('Val Accuracy') plt.ylim(0, 1) plt.tight_layout() buf = io.BytesIO() plt.savefig(buf, format='png') buf.seek(0) # Convert PNG buffer to TF image image = tf.image.decode_png(buf.getvalue(), channels=4) # Add the batch dimension image = tf.expand_dims(image, 0) return image class Stopwatch(object): """Simple timer for measuring elapsed time.""" def __init__(self): self.reset() def elapsed(self): return time.time() - self.time def done(self, target_interval): return self.elapsed() >= target_interval def reset(self): self.time = time.time() def set_learning_phase(f): """Sets the correct learning phase before calling function f.""" def wrapper(*args, **kwargs): """Calls the function f after setting proper learning phase.""" if 'training' not in kwargs: raise ValueError('Function called with set_learning_phase decorator which' ' does not have training argument.') training = kwargs['training'] if training: # Set learning_phase to True to use models in training mode. tf.keras.backend.set_learning_phase(1) else: # Set learning_phase to False to use models in inference mode. tf.keras.backend.set_learning_phase(0) return f(*args, **kwargs) return wrapper def load_config(config_path): config = None if os.path.exists(config_path): with open(config_path) as f: config = json.load(f) assert config is not None, "config file is not provided or is corrupted" return config def prepare_gpu(ind=-1): ind = int(ind) GPUS = tf.config.experimental.list_physical_devices('GPU') if GPUS: if ind > -1: tf.config.experimental.set_visible_devices(GPUS[ind], 'GPU') try: # Currently, memory growth needs to be the same across GPUs for gpu in GPUS: tf.config.experimental.set_memory_growth(gpu, True) logical_gpus = tf.config.experimental.list_logical_devices('GPU') logging.info([len(GPUS), "Physical GPUs,", len(logical_gpus), "Logical GPUs"]) except RuntimeError as e: # Memory growth must be set before GPUs have been initialized logging.info(e) os.environ["CUDA_VISIBLE_DEVICES"] = str(ind)
36.748344
113
0.621193
325
0.014642
0
0
400
0.018021
0
0
4,333
0.195215
b91d0a28a2d3c169f55ef3fbe14306db5438a499
8,468
py
Python
UnityPy/classes/Sprite.py
dblack2056/UnityPy
303291e46ddfbf266131237e59e6b1b5c46a9ca4
[ "MIT" ]
null
null
null
UnityPy/classes/Sprite.py
dblack2056/UnityPy
303291e46ddfbf266131237e59e6b1b5c46a9ca4
[ "MIT" ]
null
null
null
UnityPy/classes/Sprite.py
dblack2056/UnityPy
303291e46ddfbf266131237e59e6b1b5c46a9ca4
[ "MIT" ]
null
null
null
from enum import IntEnum from .Mesh import BoneWeights4, SubMesh, VertexData from .NamedObject import NamedObject from .PPtr import PPtr, save_ptr from ..export import SpriteHelper from ..enums import SpriteMeshType from ..streams import EndianBinaryWriter class Sprite(NamedObject): @property def image(self): return SpriteHelper.get_image_from_sprite(self) def __init__(self, reader): super().__init__(reader=reader) version = self.version self.m_Rect = reader.read_rectangle_f() self.m_Offset = reader.read_vector2() if version >= (4, 5): # 4.5 and up self.m_Border = reader.read_vector4() self.m_PixelsToUnits = reader.read_float() if version >= (5, 4, 2) or ( version >= (5, 4, 1, 3) and self.build_type.IsPatch ): # 5.4.1p3 and up self.m_Pivot = reader.read_vector2() self.m_Extrude = reader.read_u_int() if version >= (5, 3): # 5.3 and up self.m_IsPolygon = reader.read_boolean() reader.align_stream() if version >= (2017,): # 2017 and up first = reader.read_bytes(16) # GUID second = reader.read_long() self.m_RenderDataKey = (first, second) self.m_AtlasTags = reader.read_string_array() self.m_SpriteAtlas = PPtr(reader) # SpriteAtlas self.m_RD = SpriteRenderData(reader) if version >= (2017,): # 2017 and up m_PhysicsShapeSize = reader.read_int() self.m_PhysicsShape = [ reader.read_vector2_array() for _ in range(m_PhysicsShapeSize) ] if version >= (2018,): # 2018 and up m_BonesSize = reader.read_int() self.m_Bones = [ reader.read_vector2_array() for _ in range(m_BonesSize) ] def save(self, writer: EndianBinaryWriter = None): if writer is None: writer = EndianBinaryWriter(endian=self.reader.endian) version = self.version super().save(writer) writer.write_rectangle_f(self.m_Rect) writer.write_vector2(self.m_Offset) if version >= (4, 5): # 4.5 and up writer.write_vector4(self.m_Border) writer.write_float(self.m_PixelsToUnits) if version >= (5, 4, 2) or ( version >= (5, 4, 1, 3) and self.build_type.IsPatch ): # 5.4.1p3 and up writer.write_vector2(self.m_Pivot) writer.write_u_int(self.m_Extrude) if version >= (5, 3): # 5.3 and up writer.write_boolean(self.m_IsPolygon) writer.align_stream() if version >= (2017,): # 2017 and up writer.write_bytes(self.m_RenderDataKey[0]) # GUID writer.write_long(self.m_RenderDataKey[1]) writer.write_string_array(self.m_AtlasTags) self.m_SpriteAtlas.save(writer) # SpriteAtlas self.m_RD.save(writer, version) if version >= (2017,): # 2017 and up writer.write_int(len(self.m_PhysicsShape)) for phys in self.m_PhysicsShape: writer.write_vector2_array(phys) if version >= (2018,): # 2018 and up writer.write_int(len(self.m_Bones)) for bone in self.m_Bones: writer.write_vector2_array(bone) self.set_raw_data(writer.bytes) class SecondarySpriteTexture: def __init__(self, reader): self.texture = PPtr(reader) # Texture2D self.name = reader.read_string_to_null() def save(self, writer): self.texture.save(writer) writer.write_string_to_null(self.name) class SpritePackingRotation(IntEnum): kSPRNone = (0,) kSPRFlipHorizontal = (1,) kSPRFlipVertical = (2,) kSPRRotate180 = (3,) kSPRRotate90 = 4 class SpritePackingMode(IntEnum): kSPMTight = (0,) kSPMRectangle = 1 class SpriteSettings: def __init__(self, reader): self.value = reader.read_u_int() @property def value(self): return self.m_settingsRaw @value.setter def value(self, _value): self.m_settingsRaw = _value self.packed = self.m_settingsRaw & 1 # 1 self.packingMode = SpritePackingMode((self.m_settingsRaw >> 1) & 1) # 1 self.packingRotation = SpritePackingRotation((self.m_settingsRaw >> 2) & 0xF) # 4 self.meshType = SpriteMeshType((self.m_settingsRaw >> 6) & 1) # 1 # rest of the bits are reserved def save(self, writer): writer.write_u_int(self.m_settingsRaw) class SpriteVertex: def __init__(self, reader): version = reader.version self.pos = reader.read_vector3() if version[:2] <= (4, 3): # 4.3 and down self.uv = reader.read_vector2() def save(self, writer, version): writer.write_vector3(self.pos) if version[:2] <= (4, 3): # 4.3 and down writer.write__vector2(self.uv) class SpriteRenderData: def __init__(self, reader): version = reader.version self.texture = PPtr(reader) # Texture2D if version >= (5, 2): # 5.2 and up self.alphaTexture = PPtr(reader) # Texture2D if version >= (2019,): # 2019 and up secondaryTexturesSize = reader.read_int() self.secondaryTextures = [ SecondarySpriteTexture(reader) for _ in range(secondaryTexturesSize) ] if version >= (5, 6): # 5.6 and up SubMeshesSize = reader.read_int() self.m_SubMeshes = [SubMesh(reader) for _ in range(SubMeshesSize)] IndexBufferSize = reader.read_int() self.m_IndexBuffer = reader.read_bytes(IndexBufferSize) reader.align_stream() self.m_VertexData = VertexData(reader) else: verticesSize = reader.read_int() self.vertices = [SpriteVertex(reader) for _ in range(verticesSize)] self.indices = reader.read_u_short_array() reader.align_stream() if version >= (2018,): # 2018 and up self.m_Bindpose = reader.read_matrix_array() if version < (2018, 2): # 2018.2 down self.m_SourceSkinSize = reader.read_int() self.m_SourceSkin = [BoneWeights4(reader)] self.textureRect = reader.read_rectangle_f() self.textureRectOffset = reader.read_vector2() if version >= (5, 6): # 5.6 and up self.atlasRectOffset = reader.read_vector2() self.settingsRaw = SpriteSettings(reader) if version >= (4, 5): # 4.5 and up self.uvTransform = reader.read_vector4() if version >= (2017,): # 2017 and up self.downscaleMultiplier = reader.read_float() def save(self, writer, version): self.texture.save(writer) # Texture2D if version >= (5, 2): # 5.2 and up self.alphaTexture.save(writer) # Texture2D if version >= (2019,): # 2019 and up writer.write_int(len(self.secondaryTextures)) for tex in self.secondaryTextures: tex.save(writer) if version >= (5, 6): # 5.6 and up writer.write_int(len(self.m_SubMeshes)) for mesh in self.m_SubMeshes: mesh.save(writer, version) writer.write_int(len(self.m_IndexBuffer)) writer.write_bytes(self.m_IndexBuffer) writer.align_stream() self.m_VertexData.save(writer, version) else: writer.write_int(len(self.vertices)) for vertex in self.vertices: vertex.save(writer, version) writer.write_u_short_array(self.indices) writer.align_stream() if version >= (2018,): # 2018 and up writer.write_matrix_array(self.m_Bindpose) if version < (2018, 2): # 2018.2 down writer.write_int(self.m_SourceSkinSize) self.m_SourceSkin[0].save(writer) writer.write_rectangle_f(self.textureRect) writer.write_vector2(self.textureRectOffset) if version >= (5, 6): # 5.6 and up writer.write_vector2(self.atlasRectOffset) self.settingsRaw.save(writer) if version >= (4, 5): # 4.5 and up writer.write_vector4(self.uvTransform) if version >= (2017,): # 2017 and up writer.write_float(self.downscaleMultiplier)
34.563265
90
0.599906
8,190
0.967171
0
0
566
0.06684
0
0
522
0.061644
b91e27e8ce2a32cb1f2fa0c55d35f35399d00f99
11,123
py
Python
eazy/filters.py
albertfxwang/eazy-py
bcfd8a1e49f077adc794202871345542ab29800b
[ "MIT" ]
null
null
null
eazy/filters.py
albertfxwang/eazy-py
bcfd8a1e49f077adc794202871345542ab29800b
[ "MIT" ]
null
null
null
eazy/filters.py
albertfxwang/eazy-py
bcfd8a1e49f077adc794202871345542ab29800b
[ "MIT" ]
null
null
null
import numpy as np import os from astropy.table import Table from . import utils __all__ = ["FilterDefinition", "FilterFile", "ParamFilter"] VEGA_FILE = os.path.join(utils.path_to_eazy_data(), 'alpha_lyr_stis_008.fits') VEGA = Table.read(VEGA_FILE) for c in VEGA.colnames: VEGA[c] = VEGA[c].astype(float) class FilterDefinition: def __init__(self, name=None, wave=None, throughput=None, bp=None): """ Bandpass object Parameters ---------- name : str Label name wave : array Wavelength array, in `astropy.units.Angstrom`. throughput : array Throughput, arbitrary normalization bp : optional, `pysynphot.obsbandpass` object `pysynphot` filter bandpass """ self.name = name self.wave = wave self.throughput = throughput self.Aflux = 1. # pysynphot Bandpass if bp is not None: self.wave = np.cast[np.double](bp.wave) self.throughput = np.cast[np.double](bp.throughput) self.name = bp.name self.norm = 1. if self.throughput is not None: self.norm = np.trapz(self.throughput/self.wave, self.wave) def __repr__(self): return self.name.__repr__() def __str__(self): return self.name.__str__() def get_extinction(self, EBV=0, Rv=3.1): """ Extinction factor """ import astropy.units as u f99 = utils.GalacticExtinction(EBV=EBV, Rv=Rv) self.Alambda = f99(self.wave) self.Aflux = 10**(-0.4*self.Alambda) def extinction_correction(self, EBV, Rv=3.1, mag=True, source_lam=None, source_flux=None): """ Get the MW extinction correction within the filter. Optionally supply a source spectrum. """ import astropy.units as u try: import grizli.utils_c interp = grizli.utils_c.interp.interp_conserve_c except ImportError: interp = utils.interp_conserve if self.wave is None: print('Filter not defined.') return False if source_flux is None: source_flux = self.throughput*0.+1 else: source_flux = interp(self.wave, source_lam, source_flux, left=0, right=0) if (self.wave.min() < 910) | (self.wave.max() > 6.e4): Alambda = 0. else: f99 = utils.GalacticExtinction(EBV=EBV, Rv=Rv) Alambda = f99(self.wave) delta = np.trapz(self.throughput*source_flux*10**(-0.4*Alambda), self.wave) / np.trapz(self.throughput*source_flux, self.wave) if mag: return 2.5*np.log10(delta) else: return 1./delta @property def ABVega(self): """ Compute AB-Vega conversion """ from astropy.constants import c import astropy.units as u try: import grizli.utils_c interp = grizli.utils_c.interp.interp_conserve_c except ImportError: interp = utils.interp_conserve # Union of throughput and Vega spectrum arrays full_x = np.hstack([self.wave, VEGA['WAVELENGTH']]) full_x = full_x[np.argsort(full_x)] # Vega spectrum, units of f-lambda flux density, cgs # Interpolate to wavelength grid, no extrapolation vega_full = interp(full_x, VEGA['WAVELENGTH'], VEGA['FLUX'], left=0, right=0) thru_full = interp(full_x, self.wave, self.throughput, left=0, right=0) # AB = 0, same units absp = 3631*1e-23*c.to(u.m/u.s).value*1.e10/full_x**2 # Integrate over the bandpass, flam dlam num = np.trapz(vega_full*thru_full, full_x) den = np.trapz(absp*thru_full, full_x) return -2.5*np.log10(num/den) @property def pivot(self): """ Pivot wavelength http://pysynphot.readthedocs.io/en/latest/properties.html """ integrator = np.trapz num = integrator(self.wave, self.wave*self.throughput) den = integrator(self.wave, self.throughput/self.wave) pivot = np.sqrt(num/den) return pivot @property def equivwidth(self): """ Filter equivalent width http://pysynphot.readthedocs.io/en/latest/properties.html """ return np.trapz(self.throughput, self.wave) @property def rectwidth(self): """ Filter rectangular width http://pysynphot.readthedocs.io/en/latest/properties.html """ rect = self.equivwidth / self.throughput.max() return rect @property def ctw95(self): """ 95% cumulative throughput width http://www.stsci.edu/hst/acs/analysis/bandwidths/#keywords """ dl = np.diff(self.wave) filt = np.cumsum((self.wave*self.throughput)[1:]*dl) ctw95 = np.interp([0.025, 0.975], filt/filt.max(), self.wave[1:]) return np.diff(ctw95)[0] def for_filter_file(self, row_str='{i:6} {wave:.5e} {thru:.5e}'): """ Return a string that can be put in the EAZY filter file """ header = '{0} {1} lambda_c= {2:.4e} AB-Vega= {3:.3f} w95={4:.1f}' N = len(self.wave) lines = [header.format(N, self.name.split('lambda_c')[0], self.pivot, self.ABVega, self.ctw95)] lines += [row_str.format(i=i+1, wave=w, thru=t) for i, (w, t) in enumerate(zip(self.wave, self.throughput))] return '\n'.join(lines) class FilterFile: def __init__(self, file='FILTER.RES.latest', path='./'): """ Read a EAZY filter file. .. plot:: :include-source: import matplotlib.pyplot as plt from eazy.filters import FilterFile res = FilterFile(path=None) print(len(res.filters)) bp = res[205] print(bp) fig, ax = plt.subplots(1,1,figsize=(6,4)) ax.plot(bp.wave, bp.throughput, label=bp.name.split()[0]) ax.set_xlabel('wavelength, Angstroms') ax.set_ylabel('throughput') ax.legend() ax.grid() fig.tight_layout(pad=0.5) """ if path is None: file_path = os.path.join(os.getenv('EAZYCODE'), 'filters', file) else: file_path = os.path.join(path, file) with open(file_path, 'r') as fp: lines = fp.readlines() self.filename = file_path filters = [] wave = [] trans = [] header = '' for line in lines: if 'lambda_c' in line: if len(wave) > 0: # Make filter from lines already read in new_filter = FilterDefinition(name=header, wave=np.cast[float](wave), throughput=np.cast[float](trans)) # new_filter.name = header # new_filter.wave = np.cast[float](wave) # new_filter.throughput = np.cast[float](trans) filters.append(new_filter) # Initialize filter header = ' '.join(line.split()[1:]) wave = [] trans = [] else: lspl = np.cast[float](line.split()) wave.append(lspl[1]) trans.append(lspl[2]) # last one # new_filter = FilterDefinition() # new_filter.name = header # new_filter.wave = np.cast[float](wave) # new_filter.throughput = np.cast[float](trans) new_filter = FilterDefinition(name=header, wave=np.cast[float](wave), throughput=np.cast[float](trans)) filters.append(new_filter) self.filters = filters @property def NFILT(self): """ Number of filters in the list """ return len(self.filters) def __getitem__(self, i1): """ Return unit-indexed filter, e.g., 161 = 2mass-j """ return self.filters[i1-1] def names(self, verbose=True): """ Print the filter names. """ if verbose: for i in range(len(self.filters)): print('{0:5d} {1}'.format(i+1, self.filters[i].name)) else: string_list = ['{0:5d} {1}\n'.format(i+1, self.filters[i].name) for i in range(len(self.filters))] return string_list def write(self, file='xxx.res', verbose=True): """ Dump the filter information to a filter file. """ fp = open(file,'w') for filter in self.filters: fp.write('{0:6d} {1}\n'.format(len(filter.wave), filter.name)) for i in range(len(filter.wave)): fp.write('{0:6d} {1:.5e} {2:.5e}\n'.format(i+1, filter.wave[i], filter.throughput[i])) fp.close() string_list = self.names(verbose=False) fp = open(file+'.info', 'w') fp.writelines(string_list) fp.close() if verbose: print('Wrote <{0}[.info]>'.format(file)) def search(self, search_string, case=False, verbose=True): """ Search filter names for ``search_string``. If ``case`` is True, then match case. """ import re if not case: search_string = search_string.upper() matched = [] for i in range(len(self.filters)): filt_name = self.filters[i].name if not case: filt_name = filt_name.upper() if re.search(search_string, filt_name) is not None: if verbose: print('{0:5d} {1}'.format(i+1, self.filters[i].name)) matched.append(i) return np.array(matched) class ParamFilter(FilterDefinition): def __init__(self, line='# Filter #20, RES#78: COSMOS/SUBARU_filter_B.txt - lambda_c=4458.276253'): self.lambda_c = float(line.split('lambda_c=')[1]) self.name = line.split()[4] self.fnumber = int(line.split('RES#')[1].split(':')[0]) self.cnumber = int(line.split('Filter #')[1].split(',')[0])
30.225543
134
0.507687
10,748
0.966286
0
0
2,570
0.231053
0
0
3,342
0.300459
b91e9c056c9dab4c7981c513788ac7b746223cf5
672
py
Python
LeetCode/106.py
KevinTMtz/CompetitiveProgramming
0bf8a297c404073df707b6d7b06965b055ccd872
[ "MIT" ]
1
2020-12-08T02:01:18.000Z
2020-12-08T02:01:18.000Z
LeetCode/106.py
KevinTMtz/CompetitiveProgramming
0bf8a297c404073df707b6d7b06965b055ccd872
[ "MIT" ]
null
null
null
LeetCode/106.py
KevinTMtz/CompetitiveProgramming
0bf8a297c404073df707b6d7b06965b055ccd872
[ "MIT" ]
null
null
null
# # LeetCode # # Problem - 106 # URL - https://leetcode.com/problems/construct-binary-tree-from-inorder-and-postorder-traversal/ # # Definition for a binary tree node. # class TreeNode: # def __init__(self, val=0, left=None, right=None): # self.val = val # self.left = left # self.right = right class Solution: def buildTree(self, inorder: List[int], postorder: List[int]) -> TreeNode: if not inorder: return None r = postorder.pop() root = TreeNode(r) index = inorder.index(r) root.right = self.buildTree(inorder[index+1:], postorder) root.left = self.buildTree(inorder[:index], postorder) return root
24
97
0.651786
346
0.514881
0
0
0
0
0
0
311
0.462798
b91f064ec51160dd5a168a0ea9d44e81a3af31b7
44,880
py
Python
evalml/automl/automl_search.py
skvorekn/evalml
2cbfa344ec3fdc0fb0f4a0f1093811135b9b97d8
[ "BSD-3-Clause" ]
null
null
null
evalml/automl/automl_search.py
skvorekn/evalml
2cbfa344ec3fdc0fb0f4a0f1093811135b9b97d8
[ "BSD-3-Clause" ]
null
null
null
evalml/automl/automl_search.py
skvorekn/evalml
2cbfa344ec3fdc0fb0f4a0f1093811135b9b97d8
[ "BSD-3-Clause" ]
null
null
null
import copy import time from collections import defaultdict import cloudpickle import numpy as np import pandas as pd import woodwork as ww from sklearn.model_selection import BaseCrossValidator from .pipeline_search_plots import PipelineSearchPlots from evalml.automl.automl_algorithm import IterativeAlgorithm from evalml.automl.callbacks import log_error_callback from evalml.automl.engine import SequentialEngine from evalml.automl.utils import ( check_all_pipeline_names_unique, get_default_primary_search_objective, make_data_splitter ) from evalml.exceptions import AutoMLSearchException, PipelineNotFoundError from evalml.model_family import ModelFamily from evalml.objectives import ( get_core_objectives, get_non_core_objectives, get_objective ) from evalml.pipelines import ( MeanBaselineRegressionPipeline, ModeBaselineBinaryPipeline, ModeBaselineMulticlassPipeline, TimeSeriesBaselineBinaryPipeline, TimeSeriesBaselineMulticlassPipeline, TimeSeriesBaselineRegressionPipeline ) from evalml.pipelines.components.utils import get_estimators from evalml.pipelines.utils import make_pipeline from evalml.preprocessing import split_data from evalml.problem_types import ProblemTypes, handle_problem_types from evalml.tuners import SKOptTuner from evalml.utils import convert_to_seconds, infer_feature_types from evalml.utils.logger import ( get_logger, log_subtitle, log_title, time_elapsed, update_pipeline ) logger = get_logger(__file__) class AutoMLSearch: """Automated Pipeline search.""" _MAX_NAME_LEN = 40 # Necessary for "Plotting" documentation, since Sphinx does not work well with instance attributes. plot = PipelineSearchPlots def __init__(self, X_train=None, y_train=None, problem_type=None, objective='auto', max_iterations=None, max_time=None, patience=None, tolerance=None, data_splitter=None, allowed_pipelines=None, allowed_model_families=None, start_iteration_callback=None, add_result_callback=None, error_callback=None, additional_objectives=None, random_seed=0, n_jobs=-1, tuner_class=None, optimize_thresholds=True, ensembling=False, max_batches=None, problem_configuration=None, train_best_pipeline=True, pipeline_parameters=None, _ensembling_split_size=0.2, _pipelines_per_batch=5): """Automated pipeline search Arguments: X_train (pd.DataFrame, ww.DataTable): The input training data of shape [n_samples, n_features]. Required. y_train (pd.Series, ww.DataColumn): The target training data of length [n_samples]. Required for supervised learning tasks. problem_type (str or ProblemTypes): type of supervised learning problem. See evalml.problem_types.ProblemType.all_problem_types for a full list. objective (str, ObjectiveBase): The objective to optimize for. Used to propose and rank pipelines, but not for optimizing each pipeline during fit-time. When set to 'auto', chooses: - LogLossBinary for binary classification problems, - LogLossMulticlass for multiclass classification problems, and - R2 for regression problems. max_iterations (int): Maximum number of iterations to search. If max_iterations and max_time is not set, then max_iterations will default to max_iterations of 5. max_time (int, str): Maximum time to search for pipelines. This will not start a new pipeline search after the duration has elapsed. If it is an integer, then the time will be in seconds. For strings, time can be specified as seconds, minutes, or hours. patience (int): Number of iterations without improvement to stop search early. Must be positive. If None, early stopping is disabled. Defaults to None. tolerance (float): Minimum percentage difference to qualify as score improvement for early stopping. Only applicable if patience is not None. Defaults to None. allowed_pipelines (list(class)): A list of PipelineBase subclasses indicating the pipelines allowed in the search. The default of None indicates all pipelines for this problem type are allowed. Setting this field will cause allowed_model_families to be ignored. allowed_model_families (list(str, ModelFamily)): The model families to search. The default of None searches over all model families. Run evalml.pipelines.components.utils.allowed_model_families("binary") to see options. Change `binary` to `multiclass` or `regression` depending on the problem type. Note that if allowed_pipelines is provided, this parameter will be ignored. data_splitter (sklearn.model_selection.BaseCrossValidator): Data splitting method to use. Defaults to StratifiedKFold. tuner_class: The tuner class to use. Defaults to SKOptTuner. optimize_thresholds (bool): Whether or not to optimize the binary pipeline threshold. Defaults to True. start_iteration_callback (callable): Function called before each pipeline training iteration. Callback function takes three positional parameters: The pipeline class, the pipeline parameters, and the AutoMLSearch object. add_result_callback (callable): Function called after each pipeline training iteration. Callback function takes three positional parameters: A dictionary containing the training results for the new pipeline, an untrained_pipeline containing the parameters used during training, and the AutoMLSearch object. error_callback (callable): Function called when `search()` errors and raises an Exception. Callback function takes three positional parameters: the Exception raised, the traceback, and the AutoMLSearch object. Must also accepts kwargs, so AutoMLSearch is able to pass along other appropriate parameters by default. Defaults to None, which will call `log_error_callback`. additional_objectives (list): Custom set of objectives to score on. Will override default objectives for problem type if not empty. random_seed (int): Seed for the random number generator. Defaults to 0. n_jobs (int or None): Non-negative integer describing level of parallelism used for pipelines. None and 1 are equivalent. If set to -1, all CPUs are used. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used. ensembling (boolean): If True, runs ensembling in a separate batch after every allowed pipeline class has been iterated over. If the number of unique pipelines to search over per batch is one, ensembling will not run. Defaults to False. max_batches (int): The maximum number of batches of pipelines to search. Parameters max_time, and max_iterations have precedence over stopping the search. problem_configuration (dict, None): Additional parameters needed to configure the search. For example, in time series problems, values should be passed in for the gap and max_delay variables. train_best_pipeline (boolean): Whether or not to train the best pipeline before returning it. Defaults to True. pipeline_parameters (dict): A dict of the parameters used to initalize a pipeline with. _ensembling_split_size (float): The amount of the training data we'll set aside for training ensemble metalearners. Only used when ensembling is True. Must be between 0 and 1, exclusive. Defaults to 0.2 _pipelines_per_batch (int): The number of pipelines to train for every batch after the first one. The first batch will train a baseline pipline + one of each pipeline family allowed in the search. """ if X_train is None: raise ValueError('Must specify training data as a 2d array using the X_train argument') if y_train is None: raise ValueError('Must specify training data target values as a 1d vector using the y_train argument') try: self.problem_type = handle_problem_types(problem_type) except ValueError: raise ValueError('choose one of (binary, multiclass, regression) as problem_type') self.tuner_class = tuner_class or SKOptTuner self.start_iteration_callback = start_iteration_callback self.add_result_callback = add_result_callback self.error_callback = error_callback or log_error_callback self.data_splitter = data_splitter self.optimize_thresholds = optimize_thresholds self.ensembling = ensembling if objective == 'auto': objective = get_default_primary_search_objective(self.problem_type.value) objective = get_objective(objective, return_instance=False) self.objective = self._validate_objective(objective) if self.data_splitter is not None and not issubclass(self.data_splitter.__class__, BaseCrossValidator): raise ValueError("Not a valid data splitter") if not objective.is_defined_for_problem_type(self.problem_type): raise ValueError("Given objective {} is not compatible with a {} problem.".format(self.objective.name, self.problem_type.value)) if additional_objectives is None: additional_objectives = get_core_objectives(self.problem_type) # if our main objective is part of default set of objectives for problem_type, remove it existing_main_objective = next((obj for obj in additional_objectives if obj.name == self.objective.name), None) if existing_main_objective is not None: additional_objectives.remove(existing_main_objective) else: additional_objectives = [get_objective(o) for o in additional_objectives] additional_objectives = [self._validate_objective(obj) for obj in additional_objectives] self.additional_objectives = additional_objectives self.objective_name_to_class = {o.name: o for o in [self.objective] + self.additional_objectives} if not isinstance(max_time, (int, float, str, type(None))): raise TypeError(f"Parameter max_time must be a float, int, string or None. Received {type(max_time)} with value {str(max_time)}..") if isinstance(max_time, (int, float)) and max_time < 0: raise ValueError(f"Parameter max_time must be None or non-negative. Received {max_time}.") if max_batches is not None and max_batches < 0: raise ValueError(f"Parameter max_batches must be None or non-negative. Received {max_batches}.") if max_iterations is not None and max_iterations < 0: raise ValueError(f"Parameter max_iterations must be None or non-negative. Received {max_iterations}.") self.max_time = convert_to_seconds(max_time) if isinstance(max_time, str) else max_time self.max_iterations = max_iterations self.max_batches = max_batches self._pipelines_per_batch = _pipelines_per_batch if not self.max_iterations and not self.max_time and not self.max_batches: self.max_batches = 1 logger.info("Using default limit of max_batches=1.\n") if patience and (not isinstance(patience, int) or patience < 0): raise ValueError("patience value must be a positive integer. Received {} instead".format(patience)) if tolerance and (tolerance > 1.0 or tolerance < 0.0): raise ValueError("tolerance value must be a float between 0.0 and 1.0 inclusive. Received {} instead".format(tolerance)) self.patience = patience self.tolerance = tolerance or 0.0 self._results = { 'pipeline_results': {}, 'search_order': [], 'errors': [] } self.random_seed = random_seed self.n_jobs = n_jobs self.plot = None try: self.plot = PipelineSearchPlots(self) except ImportError: logger.warning("Unable to import plotly; skipping pipeline search plotting\n") self.allowed_pipelines = allowed_pipelines self.allowed_model_families = allowed_model_families self._automl_algorithm = None self._start = 0.0 self._baseline_cv_scores = {} self.show_batch_output = False self._validate_problem_type() self.problem_configuration = self._validate_problem_configuration(problem_configuration) self._train_best_pipeline = train_best_pipeline self._best_pipeline = None self._searched = False self.X_train = infer_feature_types(X_train) self.y_train = infer_feature_types(y_train) self.ensembling_indices = None default_data_splitter = make_data_splitter(self.X_train, self.y_train, self.problem_type, self.problem_configuration, n_splits=3, shuffle=True, random_seed=self.random_seed) self.data_splitter = self.data_splitter or default_data_splitter self.pipeline_parameters = pipeline_parameters if pipeline_parameters is not None else {} self.search_iteration_plot = None self._interrupted = False if self.allowed_pipelines is None: logger.info("Generating pipelines to search over...") allowed_estimators = get_estimators(self.problem_type, self.allowed_model_families) logger.debug(f"allowed_estimators set to {[estimator.name for estimator in allowed_estimators]}") self.allowed_pipelines = [make_pipeline(self.X_train, self.y_train, estimator, self.problem_type, custom_hyperparameters=self.pipeline_parameters) for estimator in allowed_estimators] if self.allowed_pipelines == []: raise ValueError("No allowed pipelines to search") check_all_pipeline_names_unique(self.allowed_pipelines) run_ensembling = self.ensembling if run_ensembling and len(self.allowed_pipelines) == 1: logger.warning("Ensembling is set to True, but the number of unique pipelines is one, so ensembling will not run.") run_ensembling = False if run_ensembling and self.max_iterations is not None: # Baseline + first batch + each pipeline iteration + 1 first_ensembling_iteration = (1 + len(self.allowed_pipelines) + len(self.allowed_pipelines) * self._pipelines_per_batch + 1) if self.max_iterations < first_ensembling_iteration: run_ensembling = False logger.warning(f"Ensembling is set to True, but max_iterations is too small, so ensembling will not run. Set max_iterations >= {first_ensembling_iteration} to run ensembling.") else: logger.info(f"Ensembling will run at the {first_ensembling_iteration} iteration and every {len(self.allowed_pipelines) * self._pipelines_per_batch} iterations after that.") if self.max_batches and self.max_iterations is None: self.show_batch_output = True if run_ensembling: ensemble_nth_batch = len(self.allowed_pipelines) + 1 num_ensemble_batches = (self.max_batches - 1) // ensemble_nth_batch if num_ensemble_batches == 0: run_ensembling = False logger.warning(f"Ensembling is set to True, but max_batches is too small, so ensembling will not run. Set max_batches >= {ensemble_nth_batch + 1} to run ensembling.") else: logger.info(f"Ensembling will run every {ensemble_nth_batch} batches.") self.max_iterations = (1 + len(self.allowed_pipelines) + self._pipelines_per_batch * (self.max_batches - 1 - num_ensemble_batches) + num_ensemble_batches) else: self.max_iterations = 1 + len(self.allowed_pipelines) + (self._pipelines_per_batch * (self.max_batches - 1)) if run_ensembling: if not (0 < _ensembling_split_size < 1): raise ValueError(f"Ensembling split size must be between 0 and 1 exclusive, received {_ensembling_split_size}") X_shape = ww.DataTable(np.arange(self.X_train.shape[0])) _, ensembling_indices, _, _ = split_data(X_shape, self.y_train, problem_type=self.problem_type, test_size=_ensembling_split_size, random_seed=self.random_seed) self.ensembling_indices = ensembling_indices.to_dataframe()[0].tolist() self._engine = SequentialEngine(self.X_train, self.y_train, self.ensembling_indices, self, should_continue_callback=self._should_continue, pre_evaluation_callback=self._pre_evaluation_callback, post_evaluation_callback=self._post_evaluation_callback) self.allowed_model_families = list(set([p.model_family for p in (self.allowed_pipelines)])) logger.debug(f"allowed_pipelines set to {[pipeline.name for pipeline in self.allowed_pipelines]}") logger.debug(f"allowed_model_families set to {self.allowed_model_families}") if len(self.problem_configuration): pipeline_params = {**{'pipeline': self.problem_configuration}, **self.pipeline_parameters} else: pipeline_params = self.pipeline_parameters self._automl_algorithm = IterativeAlgorithm( max_iterations=self.max_iterations, allowed_pipelines=self.allowed_pipelines, tuner_class=self.tuner_class, random_seed=self.random_seed, n_jobs=self.n_jobs, number_features=self.X_train.shape[1], pipelines_per_batch=self._pipelines_per_batch, ensembling=run_ensembling, pipeline_params=pipeline_params ) def _pre_evaluation_callback(self, pipeline): if self.start_iteration_callback: self.start_iteration_callback(pipeline.__class__, pipeline.parameters, self) desc = f"{pipeline.name}" if len(desc) > AutoMLSearch._MAX_NAME_LEN: desc = desc[:AutoMLSearch._MAX_NAME_LEN - 3] + "..." desc = desc.ljust(AutoMLSearch._MAX_NAME_LEN) batch_number = 1 if self._automl_algorithm is not None and self._automl_algorithm.batch_number > 0: batch_number = self._automl_algorithm.batch_number update_pipeline(logger, desc, len(self._results['pipeline_results']) + 1, self.max_iterations, self._start, batch_number, self.show_batch_output) def _validate_objective(self, objective): non_core_objectives = get_non_core_objectives() if isinstance(objective, type): if objective in non_core_objectives: raise ValueError(f"{objective.name.lower()} is not allowed in AutoML! " "Use evalml.objectives.utils.get_core_objective_names() " "to get all objective names allowed in automl.") return objective() return objective def __str__(self): def _print_list(obj_list): lines = sorted(['\t{}'.format(o.name) for o in obj_list]) return '\n'.join(lines) def _get_funct_name(function): if callable(function): return function.__name__ else: return None search_desc = ( f"{handle_problem_types(self.problem_type).name} Search\n\n" f"Parameters: \n{'='*20}\n" f"Objective: {get_objective(self.objective).name}\n" f"Max Time: {self.max_time}\n" f"Max Iterations: {self.max_iterations}\n" f"Max Batches: {self.max_batches}\n" f"Allowed Pipelines: \n{_print_list(self.allowed_pipelines or [])}\n" f"Patience: {self.patience}\n" f"Tolerance: {self.tolerance}\n" f"Data Splitting: {self.data_splitter}\n" f"Tuner: {self.tuner_class.__name__}\n" f"Start Iteration Callback: {_get_funct_name(self.start_iteration_callback)}\n" f"Add Result Callback: {_get_funct_name(self.add_result_callback)}\n" f"Additional Objectives: {_print_list(self.additional_objectives or [])}\n" f"Random Seed: {self.random_seed}\n" f"n_jobs: {self.n_jobs}\n" f"Optimize Thresholds: {self.optimize_thresholds}\n" ) rankings_desc = "" if not self.rankings.empty: rankings_str = self.rankings.drop(['parameters'], axis='columns').to_string() rankings_desc = f"\nSearch Results: \n{'='*20}\n{rankings_str}" return search_desc + rankings_desc def _validate_problem_configuration(self, problem_configuration=None): if self.problem_type in [ProblemTypes.TIME_SERIES_REGRESSION]: required_parameters = {'gap', 'max_delay'} if not problem_configuration or not all(p in problem_configuration for p in required_parameters): raise ValueError("user_parameters must be a dict containing values for at least the gap and max_delay " f"parameters. Received {problem_configuration}.") return problem_configuration or {} def _handle_keyboard_interrupt(self): """Presents a prompt to the user asking if they want to stop the search. Returns: bool: If True, search should terminate early """ leading_char = "\n" start_of_loop = time.time() while True: choice = input(leading_char + "Do you really want to exit search (y/n)? ").strip().lower() if choice == "y": logger.info("Exiting AutoMLSearch.") return True elif choice == "n": # So that the time in this loop does not count towards the time budget (if set) time_in_loop = time.time() - start_of_loop self._start += time_in_loop return False else: leading_char = "" def search(self, show_iteration_plot=True): """Find the best pipeline for the data set. Arguments: feature_types (list, optional): list of feature types, either numerical or categorical. Categorical features will automatically be encoded show_iteration_plot (boolean, True): Shows an iteration vs. score plot in Jupyter notebook. Disabled by default in non-Jupyter enviroments. """ if self._searched: logger.info("AutoMLSearch.search() has already been run and will not run again on the same instance. Re-initialize AutoMLSearch to search again.") return # don't show iteration plot outside of a jupyter notebook if show_iteration_plot: try: get_ipython except NameError: show_iteration_plot = False log_title(logger, "Beginning pipeline search") logger.info("Optimizing for %s. " % self.objective.name) logger.info("{} score is better.\n".format('Greater' if self.objective.greater_is_better else 'Lower')) logger.info(f"Using {self._engine.__class__.__name__} to train and score pipelines.") if self.max_batches is not None: logger.info(f"Searching up to {self.max_batches} batches for a total of {self.max_iterations} pipelines. ") elif self.max_iterations is not None: logger.info("Searching up to %s pipelines. " % self.max_iterations) if self.max_time is not None: logger.info("Will stop searching for new pipelines after %d seconds.\n" % self.max_time) logger.info("Allowed model families: %s\n" % ", ".join([model.value for model in self.allowed_model_families])) self.search_iteration_plot = None if self.plot: self.search_iteration_plot = self.plot.search_iteration_plot(interactive_plot=show_iteration_plot) self._start = time.time() try: self._add_baseline_pipelines() except KeyboardInterrupt: if self._handle_keyboard_interrupt(): self._interrupted = True current_batch_pipelines = [] current_batch_pipeline_scores = [] new_pipeline_ids = [] loop_interrupted = False while self._should_continue(): try: if not loop_interrupted: current_batch_pipelines = self._automl_algorithm.next_batch() except StopIteration: logger.info('AutoML Algorithm out of recommendations, ending') break try: new_pipeline_ids = self._engine.evaluate_batch(current_batch_pipelines) loop_interrupted = False except KeyboardInterrupt: loop_interrupted = True if self._handle_keyboard_interrupt(): break full_rankings = self.full_rankings current_batch_idx = full_rankings['id'].isin(new_pipeline_ids) current_batch_pipeline_scores = full_rankings[current_batch_idx]['score'] if len(current_batch_pipeline_scores) and current_batch_pipeline_scores.isna().all(): raise AutoMLSearchException(f"All pipelines in the current AutoML batch produced a score of np.nan on the primary objective {self.objective}.") self.search_duration = time.time() - self._start elapsed_time = time_elapsed(self._start) desc = f"\nSearch finished after {elapsed_time}" desc = desc.ljust(self._MAX_NAME_LEN) logger.info(desc) self._find_best_pipeline() if self._best_pipeline is not None: best_pipeline = self.rankings.iloc[0] best_pipeline_name = best_pipeline["pipeline_name"] logger.info(f"Best pipeline: {best_pipeline_name}") logger.info(f"Best pipeline {self.objective.name}: {best_pipeline['score']:3f}") self._searched = True def _find_best_pipeline(self): """Finds the best pipeline in the rankings If self._best_pipeline already exists, check to make sure it is different from the current best pipeline before training and thresholding""" if len(self.rankings) == 0: return best_pipeline = self.rankings.iloc[0] if not (self._best_pipeline and self._best_pipeline == self.get_pipeline(best_pipeline['id'])): best_pipeline = self.get_pipeline(best_pipeline['id']) if self._train_best_pipeline: if best_pipeline.model_family == ModelFamily.ENSEMBLE: X_train, y_train = self.X_train.iloc[self.ensembling_indices], self.y_train.iloc[self.ensembling_indices] else: X_train = self.X_train y_train = self.y_train if hasattr(self.data_splitter, "transform_sample"): train_indices = self.data_splitter.transform_sample(X_train, y_train) X_train = X_train.iloc[train_indices] y_train = y_train.iloc[train_indices] best_pipeline = self._engine.train_pipeline(best_pipeline, X_train, y_train, self.optimize_thresholds, self.objective) self._best_pipeline = best_pipeline def _num_pipelines(self): """Return the number of pipeline evaluations which have been made Returns: int: the number of pipeline evaluations made in the search """ return len(self._results['pipeline_results']) def _should_continue(self): """Given the original stopping criterion and current state, should the search continue? Returns: bool: True if yes, False if no. """ if self._interrupted: return False # for add_to_rankings if self._searched: return True # Run at least one pipeline for every search num_pipelines = self._num_pipelines() if num_pipelines == 0: return True # check max_time and max_iterations elapsed = time.time() - self._start if self.max_time and elapsed >= self.max_time: return False elif self.max_iterations and num_pipelines >= self.max_iterations: return False # check for early stopping if self.patience is None or self.tolerance is None: return True first_id = self._results['search_order'][0] best_score = self._results['pipeline_results'][first_id]['score'] num_without_improvement = 0 for id in self._results['search_order'][1:]: curr_score = self._results['pipeline_results'][id]['score'] significant_change = abs((curr_score - best_score) / best_score) > self.tolerance score_improved = curr_score > best_score if self.objective.greater_is_better else curr_score < best_score if score_improved and significant_change: best_score = curr_score num_without_improvement = 0 else: num_without_improvement += 1 if num_without_improvement >= self.patience: logger.info("\n\n{} iterations without improvement. Stopping search early...".format(self.patience)) return False return True def _validate_problem_type(self): for obj in self.additional_objectives: if not obj.is_defined_for_problem_type(self.problem_type): raise ValueError("Additional objective {} is not compatible with a {} problem.".format(obj.name, self.problem_type.value)) for pipeline in self.allowed_pipelines or []: if pipeline.problem_type != self.problem_type: raise ValueError("Given pipeline {} is not compatible with problem_type {}.".format(pipeline.name, self.problem_type.value)) def _add_baseline_pipelines(self): """Fits a baseline pipeline to the data. This is the first pipeline fit during search. """ if self.problem_type == ProblemTypes.BINARY: baseline = ModeBaselineBinaryPipeline(parameters={}) elif self.problem_type == ProblemTypes.MULTICLASS: baseline = ModeBaselineMulticlassPipeline(parameters={}) elif self.problem_type == ProblemTypes.REGRESSION: baseline = MeanBaselineRegressionPipeline(parameters={}) else: pipeline_class = {ProblemTypes.TIME_SERIES_REGRESSION: TimeSeriesBaselineRegressionPipeline, ProblemTypes.TIME_SERIES_MULTICLASS: TimeSeriesBaselineMulticlassPipeline, ProblemTypes.TIME_SERIES_BINARY: TimeSeriesBaselineBinaryPipeline}[self.problem_type] gap = self.problem_configuration['gap'] max_delay = self.problem_configuration['max_delay'] baseline = pipeline_class(parameters={"pipeline": {"gap": gap, "max_delay": max_delay}, "Time Series Baseline Estimator": {"gap": gap, "max_delay": max_delay}}) self._engine.evaluate_batch([baseline]) @staticmethod def _get_mean_cv_scores_for_all_objectives(cv_data, objective_name_to_class): scores = defaultdict(int) n_folds = len(cv_data) for fold_data in cv_data: for field, value in fold_data['all_objective_scores'].items(): # The 'all_objective_scores' field contains scores for all objectives # but also fields like "# Training" and "# Testing", so we want to exclude them since # they are not scores if field in objective_name_to_class: scores[field] += value return {objective: float(score) / n_folds for objective, score in scores.items()} def _post_evaluation_callback(self, pipeline, evaluation_results): training_time = evaluation_results['training_time'] cv_data = evaluation_results['cv_data'] cv_scores = evaluation_results['cv_scores'] is_baseline = pipeline.model_family == ModelFamily.BASELINE cv_score = cv_scores.mean() percent_better_than_baseline = {} mean_cv_all_objectives = self._get_mean_cv_scores_for_all_objectives(cv_data, self.objective_name_to_class) if is_baseline: self._baseline_cv_scores = mean_cv_all_objectives for obj_name in mean_cv_all_objectives: objective_class = self.objective_name_to_class[obj_name] # In the event add_to_rankings is called before search _baseline_cv_scores will be empty so we will return # nan for the base score. percent_better = objective_class.calculate_percent_difference(mean_cv_all_objectives[obj_name], self._baseline_cv_scores.get(obj_name, np.nan)) percent_better_than_baseline[obj_name] = percent_better high_variance_cv = self._check_for_high_variance(pipeline, cv_scores) pipeline_id = len(self._results['pipeline_results']) self._results['pipeline_results'][pipeline_id] = { "id": pipeline_id, "pipeline_name": pipeline.name, "pipeline_class": type(pipeline), "pipeline_summary": pipeline.summary, "parameters": pipeline.parameters, "score": cv_score, "high_variance_cv": high_variance_cv, "training_time": training_time, "cv_data": cv_data, "percent_better_than_baseline_all_objectives": percent_better_than_baseline, "percent_better_than_baseline": percent_better_than_baseline[self.objective.name], "validation_score": cv_scores[0] } if pipeline.model_family == ModelFamily.ENSEMBLE: input_pipeline_ids = [self._automl_algorithm._best_pipeline_info[model_family]["id"] for model_family in self._automl_algorithm._best_pipeline_info] self._results['pipeline_results'][pipeline_id]["input_pipeline_ids"] = input_pipeline_ids self._results['search_order'].append(pipeline_id) if not is_baseline: score_to_minimize = -cv_score if self.objective.greater_is_better else cv_score try: self._automl_algorithm.add_result(score_to_minimize, pipeline, self._results['pipeline_results'][pipeline_id]) except PipelineNotFoundError: pass if self.search_iteration_plot: self.search_iteration_plot.update() if self.add_result_callback: self.add_result_callback(self._results['pipeline_results'][pipeline_id], pipeline, self) return pipeline_id def _check_for_high_variance(self, pipeline, cv_scores, threshold=0.2): """Checks cross-validation scores and logs a warning if variance is higher than specified threshhold.""" pipeline_name = pipeline.name high_variance_cv = bool(abs(cv_scores.std() / cv_scores.mean()) > threshold) if high_variance_cv: logger.warning(f"High coefficient of variation (cv >= {threshold}) within cross validation scores. {pipeline_name} may not perform as estimated on unseen data.") return high_variance_cv def get_pipeline(self, pipeline_id): """Given the ID of a pipeline training result, returns an untrained instance of the specified pipeline initialized with the parameters used to train that pipeline during automl search. Arguments: pipeline_id (int): pipeline to retrieve Returns: PipelineBase: untrained pipeline instance associated with the provided ID """ pipeline_results = self.results['pipeline_results'].get(pipeline_id) if pipeline_results is None: raise PipelineNotFoundError("Pipeline not found in automl results") pipeline_class = pipeline_results.get('pipeline_class') parameters = pipeline_results.get('parameters') if pipeline_class is None or parameters is None: raise PipelineNotFoundError("Pipeline class or parameters not found in automl results") return pipeline_class(parameters, random_seed=self.random_seed) def describe_pipeline(self, pipeline_id, return_dict=False): """Describe a pipeline Arguments: pipeline_id (int): pipeline to describe return_dict (bool): If True, return dictionary of information about pipeline. Defaults to False. Returns: Description of specified pipeline. Includes information such as type of pipeline components, problem, training time, cross validation, etc. """ if pipeline_id not in self._results['pipeline_results']: raise PipelineNotFoundError("Pipeline not found") pipeline = self.get_pipeline(pipeline_id) pipeline_results = self._results['pipeline_results'][pipeline_id] pipeline.describe() if pipeline.model_family == ModelFamily.ENSEMBLE: logger.info("Input for ensembler are pipelines with IDs: " + str(pipeline_results['input_pipeline_ids'])) log_subtitle(logger, "Training") logger.info("Training for {} problems.".format(pipeline.problem_type)) if self.optimize_thresholds and self.objective.is_defined_for_problem_type(ProblemTypes.BINARY) and self.objective.can_optimize_threshold: logger.info("Objective to optimize binary classification pipeline thresholds for: {}".format(self.objective)) logger.info("Total training time (including CV): %.1f seconds" % pipeline_results["training_time"]) log_subtitle(logger, "Cross Validation", underline="-") all_objective_scores = [fold["all_objective_scores"] for fold in pipeline_results["cv_data"]] all_objective_scores = pd.DataFrame(all_objective_scores) for c in all_objective_scores: if c in ["# Training", "# Validation"]: all_objective_scores[c] = all_objective_scores[c].astype("object") continue mean = all_objective_scores[c].mean(axis=0) std = all_objective_scores[c].std(axis=0) all_objective_scores.loc["mean", c] = mean all_objective_scores.loc["std", c] = std all_objective_scores.loc["coef of var", c] = std / mean if abs(mean) > 0 else np.inf all_objective_scores = all_objective_scores.fillna("-") with pd.option_context('display.float_format', '{:.3f}'.format, 'expand_frame_repr', False): logger.info(all_objective_scores) if return_dict: return pipeline_results def add_to_rankings(self, pipeline): """Fits and evaluates a given pipeline then adds the results to the automl rankings with the requirement that automl search has been run. Arguments: pipeline (PipelineBase): pipeline to train and evaluate. """ pipeline_rows = self.full_rankings[self.full_rankings['pipeline_name'] == pipeline.name] for parameter in pipeline_rows['parameters']: if pipeline.parameters == parameter: return self._engine.evaluate_batch([pipeline]) self._find_best_pipeline() @property def results(self): """Class that allows access to a copy of the results from `automl_search`. Returns: dict containing `pipeline_results`: a dict with results from each pipeline, and `search_order`: a list describing the order the pipelines were searched. """ return copy.deepcopy(self._results) @property def rankings(self): """Returns a pandas.DataFrame with scoring results from the highest-scoring set of parameters used with each pipeline.""" return self.full_rankings.drop_duplicates(subset="pipeline_name", keep="first") @property def full_rankings(self): """Returns a pandas.DataFrame with scoring results from all pipelines searched""" ascending = True if self.objective.greater_is_better: ascending = False full_rankings_cols = ["id", "pipeline_name", "score", "validation_score", "percent_better_than_baseline", "high_variance_cv", "parameters"] if not self._results['pipeline_results']: return pd.DataFrame(columns=full_rankings_cols) rankings_df = pd.DataFrame(self._results['pipeline_results'].values()) rankings_df = rankings_df[full_rankings_cols] rankings_df.sort_values("score", ascending=ascending, inplace=True) rankings_df.reset_index(drop=True, inplace=True) return rankings_df @property def best_pipeline(self): """Returns a trained instance of the best pipeline and parameters found during automl search. If `train_best_pipeline` is set to False, returns an untrained pipeline instance. Returns: PipelineBase: A trained instance of the best pipeline and parameters found during automl search. If `train_best_pipeline` is set to False, returns an untrained pipeline instance. """ if not self._best_pipeline: raise PipelineNotFoundError("automl search must be run before selecting `best_pipeline`.") return self._best_pipeline def save(self, file_path, pickle_protocol=cloudpickle.DEFAULT_PROTOCOL): """Saves AutoML object at file path Arguments: file_path (str): location to save file pickle_protocol (int): the pickle data stream format. Returns: None """ with open(file_path, 'wb') as f: cloudpickle.dump(self, f, protocol=pickle_protocol) @staticmethod def load(file_path): """Loads AutoML object at file path Arguments: file_path (str): location to find file to load Returns: AutoSearchBase object """ with open(file_path, 'rb') as f: return cloudpickle.load(f) def train_pipelines(self, pipelines): """Train a list of pipelines on the training data. This can be helpful for training pipelines once the search is complete. Arguments: pipelines (list(PipelineBase)): List of pipelines to train. Returns: Dict[str, PipelineBase]: Dictionary keyed by pipeline name that maps to the fitted pipeline. Note that the any pipelines that error out during training will not be included in the dictionary but the exception and stacktrace will be displayed in the log. """ return self._engine.train_batch(pipelines) def score_pipelines(self, pipelines, X_holdout, y_holdout, objectives): """Score a list of pipelines on the given holdout data. Arguments: pipelines (list(PipelineBase)): List of pipelines to train. X_holdout (ww.DataTable, pd.DataFrame): Holdout features. y_holdout (ww.DataTable, pd.DataFrame): Holdout targets for scoring. objectives (list(str), list(ObjectiveBase)): Objectives used for scoring. Returns: Dict[str, Dict[str, float]]: Dictionary keyed by pipeline name that maps to a dictionary of scores. Note that the any pipelines that error out during scoring will not be included in the dictionary but the exception and stacktrace will be displayed in the log. """ return self._engine.score_batch(pipelines, X_holdout, y_holdout, objectives)
50.145251
234
0.660561
43,358
0.966087
0
0
3,034
0.067602
0
0
17,310
0.385695
b9206e8febc3abecc98cfdec65d8f8f8f61e43fc
782
py
Python
graphql_social_auth/mutations.py
deepsourcelabs/django-graphql-social-auth
a0cc7715144dc289ccb4d2430e7c3b94fc1dffba
[ "MIT" ]
1
2021-09-03T11:55:33.000Z
2021-09-03T11:55:33.000Z
graphql_social_auth/mutations.py
deepsourcelabs/django-graphql-social-auth
a0cc7715144dc289ccb4d2430e7c3b94fc1dffba
[ "MIT" ]
null
null
null
graphql_social_auth/mutations.py
deepsourcelabs/django-graphql-social-auth
a0cc7715144dc289ccb4d2430e7c3b94fc1dffba
[ "MIT" ]
null
null
null
import graphene from graphql_jwt.decorators import setup_jwt_cookie from . import mixins, types from .decorators import social_auth class SocialAuthMutation(mixins.SocialAuthMixin, graphene.Mutation): social = graphene.Field(types.SocialType) class Meta: abstract = True class Arguments: provider = graphene.String(required=True) code = graphene.String(required=True) @classmethod @setup_jwt_cookie @social_auth def mutate(cls, root, info, social, **kwargs): return cls.resolve(root, info, social, **kwargs) class SocialAuth(mixins.ResolveMixin, SocialAuthMutation): """Social Auth Mutation""" class SocialAuthJWT(mixins.JSONWebTokenMixin, SocialAuthMutation): """Social Auth for JSON Web Token (JWT)"""
25.225806
68
0.726343
640
0.818414
0
0
159
0.203325
0
0
68
0.086957
b92225fd1fc48f3b53478df0ef2d1501b1d04475
1,625
py
Python
yellowbrick/regressor/base.py
Juan0001/yellowbrick-docs-zh
36275d9704fc2a946c5bec5f802106bb5281efd1
[ "Apache-2.0" ]
20
2018-03-24T02:29:20.000Z
2022-03-03T05:01:40.000Z
yellowbrick/regressor/base.py
Juan0001/yellowbrick-docs-zh
36275d9704fc2a946c5bec5f802106bb5281efd1
[ "Apache-2.0" ]
4
2018-03-20T12:01:17.000Z
2019-04-07T16:02:19.000Z
yellowbrick/regressor/base.py
Juan0001/yellowbrick-docs-zh
36275d9704fc2a946c5bec5f802106bb5281efd1
[ "Apache-2.0" ]
5
2018-03-17T08:18:57.000Z
2019-11-15T02:20:20.000Z
# yellowbrick.regressor.base # Base classes for regressor Visualizers. # # Author: Rebecca Bilbro <[email protected]> # Author: Benjamin Bengfort <[email protected]> # Created: Fri Jun 03 10:30:36 2016 -0700 # # Copyright (C) 2016 District Data Labs # For license information, see LICENSE.txt # # ID: base.py [7d3f5e6] [email protected] $ """ Base classes for regressor Visualizers. """ ########################################################################## ## Imports ########################################################################## from ..utils import isregressor from ..base import ScoreVisualizer from ..exceptions import YellowbrickTypeError ## Packages for export __all__ = [ "RegressionScoreVisualizer", ] ########################################################################## ## Regression Visualization Base Object ########################################################################## class RegressionScoreVisualizer(ScoreVisualizer): """ Base class for all ScoreVisualizers that evaluate a regression estimator. The primary functionality of this class is to perform a check to ensure the passed in estimator is a regressor, otherwise it raises a ``YellowbrickTypeError``. """ def __init__(self, model, ax=None, **kwargs): if not isregressor(model): raise YellowbrickTypeError( "This estimator is not a regressor; try a classifier or " "clustering score visualizer instead!" ) super(RegressionScoreVisualizer, self).__init__(model, ax=ax, **kwargs)
30.660377
79
0.582154
662
0.407385
0
0
0
0
0
0
1,159
0.713231
b92247a49fd2631992a5eddee925c5305320a529
2,941
py
Python
contrib/stack/stripmapStack/crossmul.py
falkamelung/isce2
edea69d4b6216f4ac729eba78f12547807a2751a
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
contrib/stack/stripmapStack/crossmul.py
falkamelung/isce2
edea69d4b6216f4ac729eba78f12547807a2751a
[ "ECL-2.0", "Apache-2.0" ]
null
null
null
contrib/stack/stripmapStack/crossmul.py
falkamelung/isce2
edea69d4b6216f4ac729eba78f12547807a2751a
[ "ECL-2.0", "Apache-2.0" ]
1
2021-06-05T16:39:25.000Z
2021-06-05T16:39:25.000Z
#!/usr/bin/env python3 import os import argparse import logging import isce import isceobj from components.stdproc.stdproc import crossmul from iscesys.ImageUtil.ImageUtil import ImageUtil as IU def createParser(): ''' Command Line Parser. ''' parser = argparse.ArgumentParser( description='Generate offset field between two Sentinel swaths') parser.add_argument('-m', '--master', type=str, dest='master', required=True, help='Master image') parser.add_argument('-s', '--slave', type=str, dest='slave', required=True, help='Slave image') parser.add_argument('-o', '--outdir', type=str, dest='prefix', default='crossmul', help='Prefix of output int and amp files') parser.add_argument('-a', '--alks', type=int, dest='azlooks', default=1, help='Azimuth looks') parser.add_argument('-r', '--rlks', type=int, dest='rglooks', default=1, help='Range looks') return parser def cmdLineParse(iargs = None): parser = createParser() return parser.parse_args(args=iargs) def run(imageSlc1, imageSlc2, resampName, azLooks, rgLooks): objSlc1 = isceobj.createSlcImage() #right now imageSlc1 and 2 are just text files, need to open them as image IU.copyAttributes(imageSlc1, objSlc1) objSlc1.setAccessMode('read') objSlc1.createImage() objSlc2 = isceobj.createSlcImage() IU.copyAttributes(imageSlc2, objSlc2) objSlc2.setAccessMode('read') objSlc2.createImage() slcWidth = imageSlc1.getWidth() intWidth = int(slcWidth / rgLooks) lines = min(imageSlc1.getLength(), imageSlc2.getLength()) resampAmp = resampName + '.amp' resampInt = resampName + '.int' objInt = isceobj.createIntImage() objInt.setFilename(resampInt) objInt.setWidth(intWidth) imageInt = isceobj.createIntImage() IU.copyAttributes(objInt, imageInt) objInt.setAccessMode('write') objInt.createImage() objAmp = isceobj.createAmpImage() objAmp.setFilename(resampAmp) objAmp.setWidth(intWidth) imageAmp = isceobj.createAmpImage() IU.copyAttributes(objAmp, imageAmp) objAmp.setAccessMode('write') objAmp.createImage() objCrossmul = crossmul.createcrossmul() objCrossmul.width = slcWidth objCrossmul.length = lines objCrossmul.LooksDown = azLooks objCrossmul.LooksAcross = rgLooks objCrossmul.crossmul(objSlc1, objSlc2, objInt, objAmp) for obj in [objInt, objAmp, objSlc1, objSlc2]: obj.finalizeImage() return imageInt, imageAmp def main(iargs=None): inps = cmdLineParse(iargs) img1 = isceobj.createImage() img1.load(inps.master + '.xml') img2 = isceobj.createImage() img2.load(inps.slave + '.xml') os.makedirs(os.path.dirname(inps.prefix), exist_ok=True) run(img1, img2, inps.prefix, inps.azlooks, inps.rglooks) if __name__ == '__main__': main() ''' Main driver. '''
27.485981
102
0.682761
0
0
0
0
0
0
0
0
478
0.16253
b92338655b37aa1b9646d78826676f4639eac7d3
550
py
Python
27. Remove Element/solution2.py
sunshot/LeetCode
8f6503201831055f1d49ed3abb25be44a13ec317
[ "MIT" ]
null
null
null
27. Remove Element/solution2.py
sunshot/LeetCode
8f6503201831055f1d49ed3abb25be44a13ec317
[ "MIT" ]
null
null
null
27. Remove Element/solution2.py
sunshot/LeetCode
8f6503201831055f1d49ed3abb25be44a13ec317
[ "MIT" ]
null
null
null
from typing import List class Solution: def removeElement(self, nums: List[int], val: int) -> int: if not nums: return 0 curr = 0 n = len(nums) while curr < n: if nums[curr] == val: nums[curr] = nums[n-1] n -= 1 else: curr += 1 return n if __name__== '__main__': solution = Solution() nums = [3,2,2,3] val = 3 ans = solution.removeElement(nums, val) # print(ans) print(nums[:ans])
23.913043
62
0.461818
340
0.618182
0
0
0
0
0
0
22
0.04
b923cd998b5a122c2fa8e86b09305b2b291d6507
3,873
py
Python
platformio/commands/home/run.py
Granjow/platformio-core
71ae579bc07b2e11fec16acda482dea04bc3a359
[ "Apache-2.0" ]
4,744
2016-11-28T14:37:47.000Z
2022-03-31T12:35:56.000Z
platformio/commands/home/run.py
Granjow/platformio-core
71ae579bc07b2e11fec16acda482dea04bc3a359
[ "Apache-2.0" ]
3,424
2016-11-27T22:45:41.000Z
2022-03-31T21:40:03.000Z
platformio/commands/home/run.py
Granjow/platformio-core
71ae579bc07b2e11fec16acda482dea04bc3a359
[ "Apache-2.0" ]
576
2016-12-01T18:48:22.000Z
2022-03-30T02:27:35.000Z
# Copyright (c) 2014-present PlatformIO <[email protected]> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from urllib.parse import urlparse import click import uvicorn from starlette.applications import Starlette from starlette.middleware import Middleware from starlette.responses import PlainTextResponse from starlette.routing import Mount, Route, WebSocketRoute from starlette.staticfiles import StaticFiles from starlette.status import HTTP_403_FORBIDDEN from platformio.commands.home.rpc.handlers.account import AccountRPC from platformio.commands.home.rpc.handlers.app import AppRPC from platformio.commands.home.rpc.handlers.ide import IDERPC from platformio.commands.home.rpc.handlers.misc import MiscRPC from platformio.commands.home.rpc.handlers.os import OSRPC from platformio.commands.home.rpc.handlers.piocore import PIOCoreRPC from platformio.commands.home.rpc.handlers.project import ProjectRPC from platformio.commands.home.rpc.server import WebSocketJSONRPCServerFactory from platformio.compat import aio_get_running_loop from platformio.exception import PlatformioException from platformio.package.manager.core import get_core_package_dir from platformio.proc import force_exit class ShutdownMiddleware: def __init__(self, app): self.app = app async def __call__(self, scope, receive, send): if scope["type"] == "http" and b"__shutdown__" in scope.get("query_string", {}): await shutdown_server() await self.app(scope, receive, send) async def shutdown_server(_=None): aio_get_running_loop().call_later(0.5, force_exit) return PlainTextResponse("Server has been shutdown!") async def protected_page(_): return PlainTextResponse( "Protected PlatformIO Home session", status_code=HTTP_403_FORBIDDEN ) def run_server(host, port, no_open, shutdown_timeout, home_url): contrib_dir = get_core_package_dir("contrib-piohome") if not os.path.isdir(contrib_dir): raise PlatformioException("Invalid path to PIO Home Contrib") ws_rpc_factory = WebSocketJSONRPCServerFactory(shutdown_timeout) ws_rpc_factory.addObjectHandler(AccountRPC(), namespace="account") ws_rpc_factory.addObjectHandler(AppRPC(), namespace="app") ws_rpc_factory.addObjectHandler(IDERPC(), namespace="ide") ws_rpc_factory.addObjectHandler(MiscRPC(), namespace="misc") ws_rpc_factory.addObjectHandler(OSRPC(), namespace="os") ws_rpc_factory.addObjectHandler(PIOCoreRPC(), namespace="core") ws_rpc_factory.addObjectHandler(ProjectRPC(), namespace="project") path = urlparse(home_url).path routes = [ WebSocketRoute(path + "wsrpc", ws_rpc_factory, name="wsrpc"), Route(path + "__shutdown__", shutdown_server, methods=["POST"]), Mount(path, StaticFiles(directory=contrib_dir, html=True), name="static"), ] if path != "/": routes.append(Route("/", protected_page)) uvicorn.run( Starlette( middleware=[Middleware(ShutdownMiddleware)], routes=routes, on_startup=[ lambda: click.echo( "PIO Home has been started. Press Ctrl+C to shutdown." ), lambda: None if no_open else click.launch(home_url), ], ), host=host, port=port, log_level="warning", )
38.73
88
0.737155
300
0.077459
0
0
0
0
504
0.130132
906
0.233927
b924107dfd6ae9e56411cce662afa3db86b021e5
11,450
py
Python
appengine/components/components/machine_provider/rpc_messages.py
stefb965/luci-py
e0a8a5640c4104e5c90781d833168aa8a8d1f24d
[ "Apache-2.0" ]
1
2017-10-30T15:08:10.000Z
2017-10-30T15:08:10.000Z
appengine/components/components/machine_provider/rpc_messages.py
stefb965/luci-py
e0a8a5640c4104e5c90781d833168aa8a8d1f24d
[ "Apache-2.0" ]
null
null
null
appengine/components/components/machine_provider/rpc_messages.py
stefb965/luci-py
e0a8a5640c4104e5c90781d833168aa8a8d1f24d
[ "Apache-2.0" ]
1
2020-07-05T19:54:40.000Z
2020-07-05T19:54:40.000Z
# Copyright 2015 The LUCI Authors. All rights reserved. # Use of this source code is governed under the Apache License, Version 2.0 # that can be found in the LICENSE file. """Messages for the Machine Provider API.""" # pylint: disable=unused-wildcard-import, wildcard-import from protorpc import messages from components.machine_provider.dimensions import * from components.machine_provider.instructions import * from components.machine_provider.policies import * class CatalogMachineRetrievalRequest(messages.Message): """Represents a request to retrieve a machine from the catalog.""" # Hostname of the machine to retrieve. hostname = messages.StringField(1, required=True) # Backend which added the machine. backend = messages.EnumField(Backend, 2) class CatalogMachineRetrievalResponse(messages.Message): """Represents a response to a catalog machine retrieval request.""" # Dimensions instance specifying what sort of machine this is. dimensions = messages.MessageField(Dimensions, 1) # Policies governing this machine. policies = messages.MessageField(Policies, 2) # State of the CatalogMachineEntry. state = messages.StringField(3) # Cloud Pub/Sub subscription the machine must listen to for instructions. pubsub_subscription = messages.StringField(4) # Project the Cloud Pub/Sub subscription exists in. pubsub_subscription_project = messages.StringField(5) # Cloud Pub/Sub topic the machine must be subscribed to. pubsub_topic = messages.StringField(6) # Project the Cloud Pub/Sub topic exists in. pubsub_topic_project = messages.StringField(7) # Timestamp indicating lease expiration seconds from epoch in UTC. lease_expiration_ts = messages.IntegerField(8) class CatalogMachineAdditionRequest(messages.Message): """Represents a request to add a machine to the catalog. dimensions.backend must be specified. dimensions.hostname must be unique per backend. """ # Dimensions instance specifying what sort of machine this is. dimensions = messages.MessageField(Dimensions, 1, required=True) # Policies instance specifying machine-specific configuration. policies = messages.MessageField(Policies, 2, required=True) class CatalogMachineBatchAdditionRequest(messages.Message): """Represents a batched set of CatalogMachineAdditionRequests. dimensions.backend must be specified in each CatalogMachineAdditionRequest. dimensions.hostname must be unique per backend. """ # CatalogMachineAdditionRequest instances to batch together. requests = messages.MessageField( CatalogMachineAdditionRequest, 1, repeated=True) class CatalogMachineDeletionRequest(messages.Message): """Represents a request to delete a machine in the catalog.""" # Dimensions instance specifying what sort of machine this is. dimensions = messages.MessageField(Dimensions, 1, required=True) class CatalogManipulationRequestError(messages.Enum): """Represents an error in a catalog manipulation request.""" # Per backend, hostnames must be unique in the catalog. HOSTNAME_REUSE = 1 # Tried to lookup an entry that didn't exist. ENTRY_NOT_FOUND = 2 # Didn't specify a backend. UNSPECIFIED_BACKEND = 3 # Specified backend didn't match the backend originating the request. MISMATCHED_BACKEND = 4 # Didn't specify a hostname. UNSPECIFIED_HOSTNAME = 5 # Proposed Cloud Pub/Sub topic was invalid. INVALID_TOPIC = 6 # Proposed Cloud Pub/Sub project was invalid. INVALID_PROJECT = 7 # Didn't specify a Cloud Pub/Sub topic. UNSPECIFIED_TOPIC = 8 # Attempted to delete a leased machine. LEASED = 9 class CatalogManipulationResponse(messages.Message): """Represents a response to a catalog manipulation request.""" # CatalogManipulationRequestError instance indicating an error with the # request, or None if there is no error. error = messages.EnumField(CatalogManipulationRequestError, 1) # CatalogMachineAdditionRequest this response is in reference to. machine_addition_request = messages.MessageField( CatalogMachineAdditionRequest, 2) # CatalogMachineDeletionRequest this response is in reference to. machine_deletion_request = messages.MessageField( CatalogMachineDeletionRequest, 3) class CatalogBatchManipulationResponse(messages.Message): """Represents a response to a batched catalog manipulation request.""" responses = messages.MessageField( CatalogManipulationResponse, 1, repeated=True) class LeaseRequest(messages.Message): """Represents a request for a lease on a machine.""" # Per-user unique ID used to deduplicate requests. request_id = messages.StringField(1, required=True) # Dimensions instance specifying what sort of machine to lease. dimensions = messages.MessageField(Dimensions, 2, required=True) # Desired length of the lease in seconds. duration = messages.IntegerField(3) # Cloud Pub/Sub topic name to communicate on regarding this request. pubsub_topic = messages.StringField(4) # Cloud Pub/Sub project name to communicate on regarding this request. pubsub_project = messages.StringField(5) # Instructions to give the machine once it's been leased. on_lease = messages.MessageField(Instruction, 6) # UTC seconds from epoch when lease should expire. lease_expiration_ts = messages.IntegerField(7) class BatchedLeaseRequest(messages.Message): """Represents a batched set of LeaseRequests.""" # LeaseRequest instances to batch together. requests = messages.MessageField(LeaseRequest, 1, repeated=True) class LeaseRequestError(messages.Enum): """Represents an error in a LeaseRequest.""" # Request IDs are intended to be unique. # Reusing a request ID in a different request is an error. REQUEST_ID_REUSE = 1 # Proposed Cloud Pub/Sub topic was invalid. INVALID_TOPIC = 2 # Proposed Cloud Pub/Sub project was invalid. INVALID_PROJECT = 3 # Didn't specify a Cloud Pub/Sub topic. UNSPECIFIED_TOPIC = 4 # Request couldn't be processed in time. DEADLINE_EXCEEDED = 5 # Miscellaneous transient error. TRANSIENT_ERROR = 6 # Mutually exclusive duration and lease_expiration_ts both specified. MUTUAL_EXCLUSION_ERROR = 7 # Proposed duration was zero or negative. NONPOSITIVE_DEADLINE = 8 # Proposed expiration time is not in the future. LEASE_EXPIRATION_TS_ERROR = 9 # Neither duration nor lease_expiration_ts were specified. LEASE_LENGTH_UNSPECIFIED = 10 # Requested lease duration is too long. LEASE_TOO_LONG = 11 class LeaseRequestState(messages.Enum): """Represents the state of a LeaseRequest.""" # LeaseRequest has been received, but not processed yet. UNTRIAGED = 0 # LeaseRequest is pending provisioning of additional capacity. PENDING = 1 # LeaseRequest has been fulfilled. FULFILLED = 2 # LeaseRequest has been denied. DENIED = 3 class LeaseResponse(messages.Message): """Represents a response to a LeaseRequest.""" # SHA-1 identifying the LeaseRequest this response refers to. request_hash = messages.StringField(1) # LeaseRequestError instance indicating an error with the request, or None # if there is no error. error = messages.EnumField(LeaseRequestError, 2) # Request ID used by the client to generate the LeaseRequest. client_request_id = messages.StringField(3, required=True) # State of the LeaseRequest. state = messages.EnumField(LeaseRequestState, 4) # Hostname of the machine available for this request. hostname = messages.StringField(5) # Timestamp indicating lease expiration seconds from epoch in UTC. lease_expiration_ts = messages.IntegerField(6) class BatchedLeaseResponse(messages.Message): """Represents a response to a batched lease request.""" responses = messages.MessageField(LeaseResponse, 1, repeated=True) class LeaseReleaseRequest(messages.Message): """Represents a request to voluntarily cancel a LeaseRequest.""" # Per-user unique ID used to identify the LeaseRequest. request_id = messages.StringField(1, required=True) class BatchedLeaseReleaseRequest(messages.Message): """Represents a batched set of lease release requests.""" requests = messages.MessageField(LeaseReleaseRequest, 1, repeated=True) class LeaseReleaseRequestError(messages.Enum): """Represents an error in a LeaseReleaseRequest.""" # Request ID referred to non-existent request for this user. NOT_FOUND = 1 # Request ID referred to an unfulfilled request. NOT_FULFILLED = 2 # Request ID referred to a fulfilled request whose machine was # already reclaimed. ALREADY_RECLAIMED = 3 # Request couldn't be processed in time. DEADLINE_EXCEEDED = 4 # Miscellaneous transient error. TRANSIENT_ERROR = 5 class LeaseReleaseResponse(messages.Message): """Represents a response to a LeaseReleaseRequest.""" # SHA-1 identifying the LeaseRequest this response refers to. request_hash = messages.StringField(1) # LeaseReleaseRequestError indicating an error with the request, or None # if there is no error. error = messages.EnumField(LeaseReleaseRequestError, 2) # Request ID used by the client to generate the LeaseRequest # referred to by the LeaseReleaseRequest. client_request_id = messages.StringField(3, required=True) class BatchedLeaseReleaseResponse(messages.Message): """Represents responses to a batched set of lease release requests.""" responses = messages.MessageField(LeaseReleaseResponse, 1, repeated=True) class MachineInstructionRequest(messages.Message): """Represents a request to send an instruction to a leased machine.""" # Request ID for the fulfilled LeaseRequest whose machine should be # instructed. request_id = messages.StringField(1, required=True) # Instruction to send the leased machine. instruction = messages.MessageField(Instruction, 2) class MachineInstructionError(messages.Enum): """Represents an error in a MachineInstructionRequest.""" # Request ID referred to an unfulfilled request. NOT_FULFILLED = 1 # Request ID referred to a fulfilled request whose machine was # already reclaimed. ALREADY_RECLAIMED = 2 # Invalid instruction for the machine. INVALID_INSTRUCTION = 3 class MachineInstructionResponse(messages.Message): """Represents a response to a MachineInstructionRequest.""" # Request ID used by the client to generate the LeaseRequest for the # machine being instructed. client_request_id = messages.StringField(1, required=True) # MachineInstructionError indicating an error with the request, or None # if there is no error. error = messages.EnumField(MachineInstructionError, 2) class PollRequest(messages.Message): """Represents a request to poll for instructions given to a machine.""" # Hostname of the machine whose instructions to retrieve. hostname = messages.StringField(1, required=True) # Backend the machine belongs to. Generally required. backend = messages.EnumField(Backend, 2) class PollResponse(messages.Message): """Represents a response to a request for instructions given to a machine.""" # Instruction given to the machine. instruction = messages.MessageField(Instruction, 1) # State of the instruction. state = messages.StringField(2) class AckRequest(messages.Message): """Represents a request to ack an instruction received by a machine.""" # Hostname of the machine whose instruction to ack. hostname = messages.StringField(1, required=True) # Backend the machine belongs to. backend = messages.EnumField(Backend, 2)
38.945578
79
0.773537
10,906
0.952489
0
0
0
0
0
0
6,219
0.543144
b925f7b3126896a3611797c97e1fa8d0eee2234c
564
py
Python
webscraping.py
carvalho-fdec/DesafioDSA
fec9742bd77ddc3923ed616b6511cce87de48968
[ "MIT" ]
null
null
null
webscraping.py
carvalho-fdec/DesafioDSA
fec9742bd77ddc3923ed616b6511cce87de48968
[ "MIT" ]
null
null
null
webscraping.py
carvalho-fdec/DesafioDSA
fec9742bd77ddc3923ed616b6511cce87de48968
[ "MIT" ]
null
null
null
# webscraping test import urllib.request from bs4 import BeautifulSoup with urllib.request.urlopen('http://www.netvasco.com.br') as url: page = url.read() #print(page) print(url.geturl()) print(url.info()) print(url.getcode()) # Analise o html na variável 'page' e armazene-o no formato Beautiful Soup soup = BeautifulSoup(page, 'html.parser') #print(soup.prettify()) print(soup.title) print(soup.title.string) print(soup.title.name) soup_a = soup.find_all('a')[:10] for a in soup_a: print(a.get('href')) print(a.get_text())
18.193548
74
0.687943
0
0
0
0
0
0
0
0
178
0.315044
b9270600c4aae588202efc6c296f0228f4d2527a
21,441
py
Python
tensorboard/backend/event_processing/data_provider_test.py
hongxu-jia/tensorboard
98d4dadc61fd5a0580bed808653c59fb37748893
[ "Apache-2.0" ]
1
2021-01-07T14:58:47.000Z
2021-01-07T14:58:47.000Z
tensorboard/backend/event_processing/data_provider_test.py
hongxu-jia/tensorboard
98d4dadc61fd5a0580bed808653c59fb37748893
[ "Apache-2.0" ]
null
null
null
tensorboard/backend/event_processing/data_provider_test.py
hongxu-jia/tensorboard
98d4dadc61fd5a0580bed808653c59fb37748893
[ "Apache-2.0" ]
null
null
null
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Unit tests for `tensorboard.backend.event_processing.data_provider`.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import six from six.moves import xrange # pylint: disable=redefined-builtin import numpy as np from tensorboard import context from tensorboard.backend.event_processing import data_provider from tensorboard.backend.event_processing import ( plugin_event_multiplexer as event_multiplexer, ) from tensorboard.compat.proto import summary_pb2 from tensorboard.data import provider as base_provider from tensorboard.plugins.graph import metadata as graph_metadata from tensorboard.plugins.histogram import metadata as histogram_metadata from tensorboard.plugins.histogram import summary_v2 as histogram_summary from tensorboard.plugins.scalar import metadata as scalar_metadata from tensorboard.plugins.scalar import summary_v2 as scalar_summary from tensorboard.plugins.image import metadata as image_metadata from tensorboard.plugins.image import summary_v2 as image_summary from tensorboard.util import tensor_util import tensorflow.compat.v1 as tf1 import tensorflow.compat.v2 as tf tf1.enable_eager_execution() class MultiplexerDataProviderTest(tf.test.TestCase): def setUp(self): super(MultiplexerDataProviderTest, self).setUp() self.logdir = self.get_temp_dir() self.ctx = context.RequestContext() logdir = os.path.join(self.logdir, "polynomials") with tf.summary.create_file_writer(logdir).as_default(): for i in xrange(10): scalar_summary.scalar( "square", i ** 2, step=2 * i, description="boxen" ) scalar_summary.scalar("cube", i ** 3, step=3 * i) logdir = os.path.join(self.logdir, "waves") with tf.summary.create_file_writer(logdir).as_default(): for i in xrange(10): scalar_summary.scalar("sine", tf.sin(float(i)), step=i) scalar_summary.scalar( "square", tf.sign(tf.sin(float(i))), step=i ) # Summary with rank-0 data but not owned by the scalars plugin. metadata = summary_pb2.SummaryMetadata() metadata.plugin_data.plugin_name = "marigraphs" metadata.data_class = summary_pb2.DATA_CLASS_SCALAR tf.summary.write( "high_tide", tensor=i, step=i, metadata=metadata ) # Summary with rank-1 data of scalar data class (bad!). metadata = summary_pb2.SummaryMetadata() metadata.plugin_data.plugin_name = "greetings" metadata.data_class = summary_pb2.DATA_CLASS_SCALAR tf.summary.write( "bad", tensor=[i, i], step=i, metadata=metadata ) logdir = os.path.join(self.logdir, "lebesgue") with tf.summary.create_file_writer(logdir).as_default(): data = [ ("very smooth", (0.0, 0.25, 0.5, 0.75, 1.0), "uniform"), ("very smoothn't", (0.0, 0.01, 0.99, 1.0), "bimodal"), ] for (description, distribution, name) in data: tensor = tf.constant([distribution], dtype=tf.float64) for i in xrange(1, 11): histogram_summary.histogram( name, tensor * i, step=i, description=description ) logdir = os.path.join(self.logdir, "mondrian") with tf.summary.create_file_writer(logdir).as_default(): data = [ ("red", (221, 28, 38), "top-right"), ("blue", (1, 91, 158), "bottom-left"), ("yellow", (239, 220, 111), "bottom-right"), ] for (name, color, description) in data: image_1x1 = tf.constant([[[color]]], dtype=tf.uint8) for i in xrange(1, 11): # Use a non-monotonic sequence of sample sizes to # test `max_length` calculation. k = 6 - abs(6 - i) # 1, .., 6, .., 2 # a `k`-sample image summary of `i`-by-`i` images image = tf.tile(image_1x1, [k, i, i, 1]) image_summary.image( name, image, step=i, description=description, max_outputs=99, ) def create_multiplexer(self): multiplexer = event_multiplexer.EventMultiplexer() multiplexer.AddRunsFromDirectory(self.logdir) multiplexer.Reload() return multiplexer def create_provider(self): multiplexer = self.create_multiplexer() return data_provider.MultiplexerDataProvider(multiplexer, self.logdir) def test_data_location(self): provider = self.create_provider() result = provider.data_location(self.ctx, experiment_id="unused") self.assertEqual(result, self.logdir) def test_list_plugins_with_no_graph(self): provider = self.create_provider() result = provider.list_plugins(self.ctx, experiment_id="unused") self.assertItemsEqual( result, [ "greetings", "marigraphs", histogram_metadata.PLUGIN_NAME, image_metadata.PLUGIN_NAME, scalar_metadata.PLUGIN_NAME, ], ) def test_list_plugins_with_graph(self): with tf.compat.v1.Graph().as_default() as graph: writer = tf.compat.v1.summary.FileWriter(self.logdir) writer.add_graph(graph) writer.flush() provider = self.create_provider() result = provider.list_plugins(self.ctx, experiment_id="unused") self.assertItemsEqual( result, [ "greetings", "marigraphs", graph_metadata.PLUGIN_NAME, histogram_metadata.PLUGIN_NAME, image_metadata.PLUGIN_NAME, scalar_metadata.PLUGIN_NAME, ], ) def test_list_runs(self): # We can't control the timestamps of events written to disk (without # manually reading the tfrecords, modifying the data, and writing # them back out), so we provide a fake multiplexer instead. start_times = { "second_2": 2.0, "first": 1.5, "no_time": None, "second_1": 2.0, } class FakeMultiplexer(object): def Runs(multiplexer): result = ["second_2", "first", "no_time", "second_1"] self.assertItemsEqual(result, start_times) return result def FirstEventTimestamp(multiplexer, run): self.assertIn(run, start_times) result = start_times[run] if result is None: raise ValueError("No event timestep could be found") else: return result multiplexer = FakeMultiplexer() provider = data_provider.MultiplexerDataProvider( multiplexer, "fake_logdir" ) result = provider.list_runs(self.ctx, experiment_id="unused") self.assertItemsEqual( result, [ base_provider.Run( run_id=run, run_name=run, start_time=start_time ) for (run, start_time) in six.iteritems(start_times) ], ) def test_list_scalars_all(self): provider = self.create_provider() result = provider.list_scalars( self.ctx, experiment_id="unused", plugin_name=scalar_metadata.PLUGIN_NAME, run_tag_filter=None, ) self.assertItemsEqual(result.keys(), ["polynomials", "waves"]) self.assertItemsEqual(result["polynomials"].keys(), ["square", "cube"]) self.assertItemsEqual(result["waves"].keys(), ["square", "sine"]) sample = result["polynomials"]["square"] self.assertIsInstance(sample, base_provider.ScalarTimeSeries) self.assertEqual(sample.max_step, 18) # nothing to test for wall time, as it can't be mocked out self.assertEqual(sample.plugin_content, b"") self.assertEqual( sample.display_name, "" ) # not written by V2 summary ops self.assertEqual(sample.description, "boxen") def test_list_scalars_filters(self): provider = self.create_provider() result = provider.list_scalars( self.ctx, experiment_id="unused", plugin_name=scalar_metadata.PLUGIN_NAME, run_tag_filter=base_provider.RunTagFilter(["waves"], ["square"]), ) self.assertItemsEqual(result.keys(), ["waves"]) self.assertItemsEqual(result["waves"].keys(), ["square"]) result = provider.list_scalars( self.ctx, experiment_id="unused", plugin_name=scalar_metadata.PLUGIN_NAME, run_tag_filter=base_provider.RunTagFilter( tags=["square", "quartic"] ), ) self.assertItemsEqual(result.keys(), ["polynomials", "waves"]) self.assertItemsEqual(result["polynomials"].keys(), ["square"]) self.assertItemsEqual(result["waves"].keys(), ["square"]) result = provider.list_scalars( self.ctx, experiment_id="unused", plugin_name=scalar_metadata.PLUGIN_NAME, run_tag_filter=base_provider.RunTagFilter(runs=["waves", "hugs"]), ) self.assertItemsEqual(result.keys(), ["waves"]) self.assertItemsEqual(result["waves"].keys(), ["sine", "square"]) result = provider.list_scalars( self.ctx, experiment_id="unused", plugin_name=scalar_metadata.PLUGIN_NAME, run_tag_filter=base_provider.RunTagFilter(["un"], ["likely"]), ) self.assertEqual(result, {}) def test_read_scalars(self): multiplexer = self.create_multiplexer() provider = data_provider.MultiplexerDataProvider( multiplexer, self.logdir ) run_tag_filter = base_provider.RunTagFilter( runs=["waves", "polynomials", "unicorns"], tags=["sine", "square", "cube", "iridescence"], ) result = provider.read_scalars( self.ctx, experiment_id="unused", plugin_name=scalar_metadata.PLUGIN_NAME, run_tag_filter=run_tag_filter, downsample=100, ) self.assertItemsEqual(result.keys(), ["polynomials", "waves"]) self.assertItemsEqual(result["polynomials"].keys(), ["square", "cube"]) self.assertItemsEqual(result["waves"].keys(), ["square", "sine"]) for run in result: for tag in result[run]: tensor_events = multiplexer.Tensors(run, tag) self.assertLen(result[run][tag], len(tensor_events)) for (datum, event) in zip(result[run][tag], tensor_events): self.assertEqual(datum.step, event.step) self.assertEqual(datum.wall_time, event.wall_time) self.assertEqual( datum.value, tensor_util.make_ndarray(event.tensor_proto).item(), ) def test_read_scalars_downsamples(self): # TODO(@wchargin): Verify that this always includes the most # recent datum, as specified by the interface. multiplexer = self.create_multiplexer() provider = data_provider.MultiplexerDataProvider( multiplexer, self.logdir ) result = provider.read_scalars( self.ctx, experiment_id="unused", plugin_name=scalar_metadata.PLUGIN_NAME, downsample=3, ) self.assertLen(result["waves"]["sine"], 3) def test_read_scalars_but_not_rank_0(self): provider = self.create_provider() run_tag_filter = base_provider.RunTagFilter(["waves"], ["bad"]) # No explicit checks yet. with six.assertRaisesRegex( self, ValueError, "can only convert an array of size 1 to a Python scalar", ): provider.read_scalars( self.ctx, experiment_id="unused", plugin_name="greetings", run_tag_filter=run_tag_filter, downsample=100, ) def test_list_tensors_all(self): provider = self.create_provider() result = provider.list_tensors( self.ctx, experiment_id="unused", plugin_name=histogram_metadata.PLUGIN_NAME, run_tag_filter=None, ) self.assertItemsEqual(result.keys(), ["lebesgue"]) self.assertItemsEqual(result["lebesgue"].keys(), ["uniform", "bimodal"]) sample = result["lebesgue"]["uniform"] self.assertIsInstance(sample, base_provider.TensorTimeSeries) self.assertEqual(sample.max_step, 10) # nothing to test for wall time, as it can't be mocked out self.assertEqual(sample.plugin_content, b"") self.assertEqual( sample.display_name, "" ) # not written by V2 summary ops self.assertEqual(sample.description, "very smooth") def test_list_tensors_filters(self): provider = self.create_provider() # Quick check only, as scalars and tensors use the same underlying # filtering implementation. result = provider.list_tensors( self.ctx, experiment_id="unused", plugin_name=histogram_metadata.PLUGIN_NAME, run_tag_filter=base_provider.RunTagFilter( ["lebesgue"], ["uniform"] ), ) self.assertItemsEqual(result.keys(), ["lebesgue"]) self.assertItemsEqual(result["lebesgue"].keys(), ["uniform"]) def test_read_tensors(self): multiplexer = self.create_multiplexer() provider = data_provider.MultiplexerDataProvider( multiplexer, self.logdir ) run_tag_filter = base_provider.RunTagFilter( runs=["lebesgue"], tags=["uniform", "bimodal"], ) result = provider.read_tensors( self.ctx, experiment_id="unused", plugin_name=histogram_metadata.PLUGIN_NAME, run_tag_filter=run_tag_filter, downsample=100, ) self.assertItemsEqual(result.keys(), ["lebesgue"]) self.assertItemsEqual(result["lebesgue"].keys(), ["uniform", "bimodal"]) for run in result: for tag in result[run]: tensor_events = multiplexer.Tensors(run, tag) self.assertLen(result[run][tag], len(tensor_events)) for (datum, event) in zip(result[run][tag], tensor_events): self.assertEqual(datum.step, event.step) self.assertEqual(datum.wall_time, event.wall_time) np.testing.assert_equal( datum.numpy, tensor_util.make_ndarray(event.tensor_proto), ) def test_read_tensors_downsamples(self): multiplexer = self.create_multiplexer() provider = data_provider.MultiplexerDataProvider( multiplexer, self.logdir ) result = provider.read_tensors( self.ctx, experiment_id="unused", plugin_name=histogram_metadata.PLUGIN_NAME, downsample=3, ) self.assertLen(result["lebesgue"]["uniform"], 3) def test_list_blob_sequences(self): provider = self.create_provider() with self.subTest("finds all time series for a plugin"): result = provider.list_blob_sequences( self.ctx, experiment_id="unused", plugin_name=image_metadata.PLUGIN_NAME, ) self.assertItemsEqual(result.keys(), ["mondrian"]) self.assertItemsEqual( result["mondrian"].keys(), ["red", "blue", "yellow"] ) sample = result["mondrian"]["blue"] self.assertIsInstance(sample, base_provider.BlobSequenceTimeSeries) self.assertEqual(sample.max_step, 10) # nothing to test for wall time, as it can't be mocked out self.assertEqual(sample.plugin_content, b"") self.assertEqual(sample.max_length, 6 + 2) self.assertEqual(sample.description, "bottom-left") self.assertEqual(sample.display_name, "") with self.subTest("filters by run/tag"): result = provider.list_blob_sequences( self.ctx, experiment_id="unused", plugin_name=image_metadata.PLUGIN_NAME, run_tag_filter=base_provider.RunTagFilter( runs=["mondrian", "picasso"], tags=["yellow", "green't"] ), ) self.assertItemsEqual(result.keys(), ["mondrian"]) self.assertItemsEqual(result["mondrian"].keys(), ["yellow"]) self.assertIsInstance( result["mondrian"]["yellow"], base_provider.BlobSequenceTimeSeries, ) def test_read_blob_sequences_and_read_blob(self): provider = self.create_provider() with self.subTest("reads all time series for a plugin"): result = provider.read_blob_sequences( self.ctx, experiment_id="unused", plugin_name=image_metadata.PLUGIN_NAME, downsample=4, ) self.assertItemsEqual(result.keys(), ["mondrian"]) self.assertItemsEqual( result["mondrian"].keys(), ["red", "blue", "yellow"] ) sample = result["mondrian"]["blue"] self.assertLen(sample, 4) # downsampled from 10 last = sample[-1] self.assertIsInstance(last, base_provider.BlobSequenceDatum) self.assertEqual(last.step, 10) self.assertLen(last.values, 2 + 2) blobs = [ provider.read_blob(self.ctx, blob_key=v.blob_key) for v in last.values ] self.assertEqual(blobs[0], b"10") self.assertEqual(blobs[1], b"10") self.assertStartsWith(blobs[2], b"\x89PNG") self.assertStartsWith(blobs[3], b"\x89PNG") blue1 = blobs[2] blue2 = blobs[3] red1 = provider.read_blob( self.ctx, blob_key=result["mondrian"]["red"][-1].values[2].blob_key, ) self.assertEqual(blue1, blue2) self.assertNotEqual(blue1, red1) with self.subTest("filters by run/tag"): result = provider.read_blob_sequences( self.ctx, experiment_id="unused", plugin_name=image_metadata.PLUGIN_NAME, run_tag_filter=base_provider.RunTagFilter( runs=["mondrian", "picasso"], tags=["yellow", "green't"] ), downsample=1, ) self.assertItemsEqual(result.keys(), ["mondrian"]) self.assertItemsEqual(result["mondrian"].keys(), ["yellow"]) self.assertIsInstance( result["mondrian"]["yellow"][0], base_provider.BlobSequenceDatum, ) class DownsampleTest(tf.test.TestCase): """Tests for the `_downsample` private helper function.""" def test_deterministic(self): xs = "abcdefg" expected = data_provider._downsample(xs, k=4) for _ in range(100): actual = data_provider._downsample(xs, k=4) self.assertEqual(actual, expected) def test_underlong_ok(self): xs = list("abcdefg") actual = data_provider._downsample(xs, k=10) expected = list("abcdefg") self.assertIsNot(actual, xs) self.assertEqual(actual, expected) def test_inorder(self): xs = list(range(10000)) actual = data_provider._downsample(xs, k=100) self.assertEqual(actual, sorted(actual)) def test_zero(self): xs = "abcdefg" actual = data_provider._downsample(xs, k=0) self.assertEqual(actual, []) if __name__ == "__main__": tf.test.main()
39.559041
80
0.583741
19,484
0.908726
0
0
0
0
0
0
3,511
0.163752
b927180a3b55091e89983dcae5d96dd47f1373ae
4,172
py
Python
extras/amld/cloud/quickdraw_rnn/task.py
luyang1210/tensorflow
948324f4cafdc97ae51c0e44fc1c28677a6e2e8a
[ "Apache-2.0" ]
1
2019-04-28T15:46:45.000Z
2019-04-28T15:46:45.000Z
extras/amld/cloud/quickdraw_rnn/task.py
luyang1210/tensorflow
948324f4cafdc97ae51c0e44fc1c28677a6e2e8a
[ "Apache-2.0" ]
null
null
null
extras/amld/cloud/quickdraw_rnn/task.py
luyang1210/tensorflow
948324f4cafdc97ae51c0e44fc1c28677a6e2e8a
[ "Apache-2.0" ]
1
2020-11-18T04:43:33.000Z
2020-11-18T04:43:33.000Z
"""Experiment wrapper for training on Cloud ML.""" import argparse, glob, os import tensorflow as tf # From this package. import model def generate_experiment_fn(data_dir, train_batch_size, eval_batch_size, train_steps, eval_steps, cell_size, hidden, **experiment_args): """Returns experiment_fn for a RNN classifier. Args: data_dir: Where {train,eval}-* tf.train.Example datasets can be found. train_batch_size: Batch size during training. train_batch_size: Batch size during evaluation. train_steps: Number of training steps. eval_steps: Number of evaluation steps. cell_size: LSTM cell size. hidden: Number of units in hidden layers (note that None means "use default" wich is equivalent to [] -- see code in model). experiment_args: Additional arguments when `tf.contrib.learn.Experiment` is instantiated. """ classes = tf.gfile.Open('%s/labels.txt' % data_dir).read().splitlines() n_classes = len(classes) params = tf.contrib.training.HParams( cell_size=cell_size, hidden=hidden or None, # Default is empty list. ) config = tf.contrib.learn.RunConfig() def _experiment_fn(output_dir): return tf.contrib.learn.Experiment( model.build_estimator(output_dir, n_classes, params, config), train_input_fn=model.make_input_fn_stroke( files_pattern=os.path.join(data_dir, 'train-*'), batch_size=train_batch_size), eval_input_fn=model.make_input_fn_stroke( files_pattern=os.path.join(data_dir, 'eval-*'), batch_size=eval_batch_size), export_strategies=[ tf.contrib.learn.utils.saved_model_export_utils.make_export_strategy( model.serving_input_fn, exports_to_keep=1) ], train_steps=train_steps, eval_steps=eval_steps, **experiment_args ) return _experiment_fn if __name__ == '__main__': tf.logging.set_verbosity(tf.logging.INFO) parser = argparse.ArgumentParser() parser.add_argument( '--data_dir', help='GCS or local path to training data', required=True ) parser.add_argument( '--train_batch_size', help='Batch size for training steps', type=int, default=100 ) parser.add_argument( '--eval_batch_size', help='Batch size for evaluation steps', type=int, default=100 ) parser.add_argument( '--train_steps', help='Steps to run the training job for.', type=int, default=10000 ) parser.add_argument( '--eval_steps', help='Number of steps to run evalution for at each checkpoint', default=100, type=int ) parser.add_argument( '--output_dir', help='GCS location to write checkpoints and export models', required=True ) parser.add_argument( '--job-dir', help='this model ignores this field, but it is required by gcloud', default='junk' ) parser.add_argument( '--eval_delay_secs', help='How long to wait before running first evaluation', default=10, type=int ) parser.add_argument( '--min_eval_frequency', help='Minimum number of training steps between evaluations', default=1, type=int ) # Hyper parameters. parser.add_argument( '--cell_size', help='LSTM cell size.', default=256, type=int ) parser.add_argument( '--hidden', help='Units in hidden layers.', default=(), nargs='+', type=int ) args = parser.parse_args() arguments = args.__dict__ # unused args provided by service arguments.pop('job_dir', None) arguments.pop('job-dir', None) output_dir = arguments.pop('output_dir') # Run the training job tf.contrib.learn.learn_runner.run( generate_experiment_fn(**arguments), output_dir)
28.972222
81
0.613375
0
0
0
0
0
0
0
0
1,466
0.35139
b92725db4e0f08b5ebf9656b39a1e567c20d5ffb
150
py
Python
A/116A.py
johnggo/Codeforces-Solutions
4127ae6f72294b5781fb94c42b69cfef570aae42
[ "MIT" ]
1
2020-08-25T19:59:11.000Z
2020-08-25T19:59:11.000Z
A/116A.py
johnggo/Codeforces-Solutions
4127ae6f72294b5781fb94c42b69cfef570aae42
[ "MIT" ]
null
null
null
A/116A.py
johnggo/Codeforces-Solutions
4127ae6f72294b5781fb94c42b69cfef570aae42
[ "MIT" ]
null
null
null
# Time: 310 ms # Memory: 1664 KB n = int(input()) e = 0 s = 0 for i in range(n): s =s- eval(input().replace(' ', '-')) e = max(e, s) print(e)
15
41
0.506667
0
0
0
0
0
0
0
0
37
0.246667
b9299565a87f9a052852f5ae8225680eeeb2de61
1,923
py
Python
tests/test_serialize.py
aferrall/redner
be52e4105140f575f153d640ba889eb6e6015616
[ "MIT" ]
1,146
2018-11-11T01:47:18.000Z
2022-03-31T14:11:03.000Z
tests/test_serialize.py
Awcrr/redner
b4f57037af26b720d916bbaf26103a3499101a9f
[ "MIT" ]
177
2018-11-13T22:48:25.000Z
2022-03-30T07:19:29.000Z
tests/test_serialize.py
Awcrr/redner
b4f57037af26b720d916bbaf26103a3499101a9f
[ "MIT" ]
127
2018-11-11T02:32:17.000Z
2022-03-31T07:24:03.000Z
import pyredner import numpy as np import torch cam = pyredner.Camera(position = torch.tensor([0.0, 0.0, -5.0]), look_at = torch.tensor([0.0, 0.0, 0.0]), up = torch.tensor([0.0, 1.0, 0.0]), fov = torch.tensor([45.0]), # in degree clip_near = 1e-2, # needs to > 0 resolution = (256, 256), fisheye = False) mat_grey = pyredner.Material(\ diffuse_reflectance = \ torch.tensor([0.5, 0.5, 0.5], device = pyredner.get_device())) materials = [mat_grey] shape_triangle = pyredner.Shape(\ vertices = torch.tensor([[-1.7, 1.0, 0.0], [1.0, 1.0, 0.0], [-0.5, -1.0, 0.0]], device = pyredner.get_device()), indices = torch.tensor([[0, 1, 2]], dtype = torch.int32, device = pyredner.get_device()), uvs = None, normals = None, material_id = 0) shape_light = pyredner.Shape(\ vertices = torch.tensor([[-1.0, -1.0, -7.0], [ 1.0, -1.0, -7.0], [-1.0, 1.0, -7.0], [ 1.0, 1.0, -7.0]], device = pyredner.get_device()), indices = torch.tensor([[0, 1, 2],[1, 3, 2]], dtype = torch.int32, device = pyredner.get_device()), uvs = None, normals = None, material_id = 0) shapes = [shape_triangle, shape_light] light = pyredner.AreaLight(shape_id = 1, intensity = torch.tensor([20.0,20.0,20.0])) area_lights = [light] scene = pyredner.Scene(cam, shapes, materials, area_lights) scene_state_dict = scene.state_dict() scene = pyredner.Scene.load_state_dict(scene_state_dict) scene_args = pyredner.RenderFunction.serialize_scene(\ scene = scene, num_samples = 16, max_bounces = 1) render = pyredner.RenderFunction.apply img = render(0, *scene_args) pyredner.imwrite(img.cpu(), 'results/test_serialize/img.exr')
34.339286
83
0.560582
0
0
0
0
0
0
0
0
57
0.029641
b92a551001bac345f595f68ea0440f1231ad8e57
2,302
py
Python
src/zope/publisher/tests/test_requestdataproperty.py
Shoobx/zope.publisher
790e82045d7ae06146bd8c5e27139555b9ec1641
[ "ZPL-2.1" ]
3
2016-11-18T08:58:09.000Z
2021-02-01T06:13:45.000Z
src/zope/publisher/tests/test_requestdataproperty.py
Shoobx/zope.publisher
790e82045d7ae06146bd8c5e27139555b9ec1641
[ "ZPL-2.1" ]
42
2015-06-02T19:26:10.000Z
2022-03-15T07:24:03.000Z
src/zope/publisher/tests/test_requestdataproperty.py
Shoobx/zope.publisher
790e82045d7ae06146bd8c5e27139555b9ec1641
[ "ZPL-2.1" ]
7
2015-04-03T09:29:31.000Z
2021-06-07T14:47:45.000Z
############################################################################## # # Copyright (c) 2001, 2002 Zope Foundation and Contributors. # All Rights Reserved. # # This software is subject to the provisions of the Zope Public License, # Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution. # THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED # WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS # FOR A PARTICULAR PURPOSE. # ############################################################################## """Request Data-Property Tests """ from unittest import TestCase, makeSuite from zope.interface.common.tests.basemapping \ import testIEnumerableMapping, testIReadMapping from zope.publisher.base \ import RequestDataProperty, RequestDataGetter, RequestDataMapper class TestDataGettr(RequestDataGetter): _gettrname = 'getSomething' class TestDataMapper(RequestDataMapper): _mapname = '_data' _marker = object() class Data(object): def getSomething(self, name, default=_marker): if name.startswith('Z'): return "something %s" % name if default is not _marker: return default raise KeyError(name) something = RequestDataProperty(TestDataGettr) somedata = RequestDataProperty(TestDataMapper) class Test(TestCase): def testRequestDataGettr(self): testIReadMapping(self, Data().something, {"Zope": "something Zope"}, ["spam"]) def testRequestDataMapper(self): data = Data() sample = {'foo': 'Foo', 'bar': 'Bar'} data._data = sample inst = data.somedata testIReadMapping(self, inst, sample, ["spam"]) testIEnumerableMapping(self, inst, sample) def testNoAssign(self): data = Data() try: data.something = {} except AttributeError: pass else: raise AssertionError("Shouldn't be able to assign") try: data.somedata = {} except AttributeError: pass else: raise AssertionError("Shouldn't be able to assign") def test_suite(): return makeSuite(Test)
27.404762
78
0.614683
1,307
0.567767
0
0
0
0
0
0
813
0.353171
b92a9fd2163ca676afa6df078248d3bd1b2d8259
146
py
Python
tools/scoring/dimensions/__init__.py
ahemphill/digitalbuildings
56a03b0055f9f771c3ed0a962f6bfb2b1d968947
[ "Apache-2.0" ]
null
null
null
tools/scoring/dimensions/__init__.py
ahemphill/digitalbuildings
56a03b0055f9f771c3ed0a962f6bfb2b1d968947
[ "Apache-2.0" ]
null
null
null
tools/scoring/dimensions/__init__.py
ahemphill/digitalbuildings
56a03b0055f9f771c3ed0a962f6bfb2b1d968947
[ "Apache-2.0" ]
null
null
null
""" Enable import """ from os import path import sys sys.path.append( path.abspath(path.join('tools', 'validators', 'instance_validator')))
18.25
73
0.69863
0
0
0
0
0
0
0
0
60
0.410959
b92b002b9d57e933962f9291a749b365792c1b9a
1,444
py
Python
src/thornfield/caches/cache_compression_decorator.py
drorvinkler/thornfield
3c5bb8afaa96097bc71cccb119394a0f351d828f
[ "MIT" ]
2
2020-11-24T13:27:14.000Z
2020-11-24T13:29:40.000Z
src/thornfield/caches/cache_compression_decorator.py
drorvinkler/thornfield
3c5bb8afaa96097bc71cccb119394a0f351d828f
[ "MIT" ]
1
2020-11-24T13:33:45.000Z
2020-11-24T15:10:41.000Z
src/thornfield/caches/cache_compression_decorator.py
drorvinkler/thornfield
3c5bb8afaa96097bc71cccb119394a0f351d828f
[ "MIT" ]
null
null
null
from typing import Callable, AnyStr, Optional from zlib import compress as default_compress, decompress as default_decompress from .cache import Cache from ..constants import NOT_FOUND class CacheCompressionDecorator(Cache): def __init__( self, cache: Cache, compress: Optional[Callable[[str], AnyStr]] = ..., decompress: Optional[Callable[[AnyStr], str]] = ..., ) -> None: super().__init__() self._cache = cache if compress is None: self._compress = self._noop elif compress is ...: self._compress = self._default_compress else: self._compress = compress if decompress is None: self._decompress = self._noop elif decompress is ...: self._decompress = self._default_decompress else: self._decompress = decompress def get(self, key): value = self._cache.get(key) return value if value is NOT_FOUND else self._decompress(value) def set(self, key, value, expiration: int) -> None: self._cache.set(key, self._compress(value), expiration) @staticmethod def _noop(x): return x @staticmethod def _default_compress(obj: str) -> bytes: return default_compress(obj.encode("UTF-8")) @staticmethod def _default_decompress(data: bytes) -> str: return default_decompress(data).decode("UTF-8")
29.469388
79
0.628809
1,255
0.869114
0
0
278
0.192521
0
0
14
0.009695
b92be50c97841e71ffe31a7d7baa405cc9ba5537
38,846
py
Python
manim/mobject/vector_field.py
kdkasad/manim
249b1dcab0f18a43e953b5fda517734084c0a941
[ "MIT" ]
2
2021-12-07T14:25:07.000Z
2021-12-09T14:16:10.000Z
manim/mobject/vector_field.py
kdkasad/manim
249b1dcab0f18a43e953b5fda517734084c0a941
[ "MIT" ]
3
2021-09-15T08:11:29.000Z
2021-10-06T02:00:03.000Z
manim/mobject/vector_field.py
kdkasad/manim
249b1dcab0f18a43e953b5fda517734084c0a941
[ "MIT" ]
3
2020-04-10T20:38:06.000Z
2020-09-30T03:03:45.000Z
"""Mobjects representing vector fields.""" __all__ = [ "VectorField", "ArrowVectorField", "StreamLines", ] import itertools as it import random from math import ceil, floor from typing import Callable, Iterable, Optional, Sequence, Tuple, Type import numpy as np from colour import Color from PIL import Image from .. import config from ..animation.composition import AnimationGroup, Succession from ..animation.creation import Create from ..animation.indication import ShowPassingFlash from ..animation.update import UpdateFromAlphaFunc from ..constants import OUT, RIGHT, UP from ..mobject.geometry import Vector from ..mobject.mobject import Mobject from ..mobject.types.vectorized_mobject import VGroup, VMobject from ..utils.bezier import interpolate, inverse_interpolate from ..utils.color import BLUE_E, GREEN, RED, YELLOW, color_to_rgb, rgb_to_color from ..utils.deprecation import deprecated_params from ..utils.rate_functions import ease_out_sine, linear from ..utils.simple_functions import sigmoid from .types.opengl_vectorized_mobject import OpenGLVMobject DEFAULT_SCALAR_FIELD_COLORS: list = [BLUE_E, GREEN, YELLOW, RED] class VectorField(VGroup): """A vector field. Vector fields are based on a function defining a vector at every position. This class does by default not include any visible elements but provides methods to move other :class:`~.Mobject` s along the vector field. Parameters ---------- func The function defining the rate of change at every position of the `VectorField`. color The color of the vector field. If set, position-specific coloring is disabled. color_scheme A function mapping a vector to a single value. This value gives the position in the color gradient defined using `min_color_scheme_value`, `max_color_scheme_value` and `colors`. min_color_scheme_value The value of the color_scheme function to be mapped to the first color in `colors`. Lower values also result in the first color of the gradient. max_color_scheme_value The value of the color_scheme function to be mapped to the last color in `colors`. Higher values also result in the last color of the gradient. colors The colors defining the color gradient of the vector field. kwargs : Any Additional arguments to be passed to the :class:`~.VGroup` constructor """ def __init__( self, func: Callable[[np.ndarray], np.ndarray], color: Optional[Color] = None, color_scheme: Optional[Callable[[np.ndarray], float]] = None, min_color_scheme_value: float = 0, max_color_scheme_value: float = 2, colors: Sequence[Color] = DEFAULT_SCALAR_FIELD_COLORS, **kwargs ): super().__init__(**kwargs) self.func = func if color is None: self.single_color = False if color_scheme is None: def color_scheme(p): return np.linalg.norm(p) self.color_scheme = color_scheme # TODO maybe other default for direction? self.rgbs = np.array(list(map(color_to_rgb, colors))) def pos_to_rgb(pos: np.ndarray) -> Tuple[float, float, float, float]: vec = self.func(pos) color_value = np.clip( self.color_scheme(vec), min_color_scheme_value, max_color_scheme_value, ) alpha = inverse_interpolate( min_color_scheme_value, max_color_scheme_value, color_value, ) alpha *= len(self.rgbs) - 1 c1 = self.rgbs[int(alpha)] c2 = self.rgbs[min(int(alpha + 1), len(self.rgbs) - 1)] alpha %= 1 return interpolate(c1, c2, alpha) self.pos_to_rgb = pos_to_rgb self.pos_to_color = lambda pos: rgb_to_color(self.pos_to_rgb(pos)) else: self.single_color = True self.color = color self.submob_movement_updater = None @staticmethod def shift_func( func: Callable[[np.ndarray], np.ndarray], shift_vector: np.ndarray, ) -> Callable[[np.ndarray], np.ndarray]: """Shift a vector field function. Parameters ---------- func The function defining a vector field. shift_vector The shift to be applied to the vector field. Returns ------- `Callable[[np.ndarray], np.ndarray]` The shifted vector field function. """ return lambda p: func(p - shift_vector) @staticmethod def scale_func( func: Callable[[np.ndarray], np.ndarray], scalar: float, ) -> Callable[[np.ndarray], np.ndarray]: """Scale a vector field function. Parameters ---------- func The function defining a vector field. shift_vector The scalar to be applied to the vector field. Examples -------- .. manim:: ScaleVectorFieldFunction class ScaleVectorFieldFunction(Scene): def construct(self): func = lambda pos: np.sin(pos[1]) * RIGHT + np.cos(pos[0]) * UP vector_field = ArrowVectorField(func) self.add(vector_field) self.wait() func = VectorField.scale_func(func, 0.5) self.play(vector_field.animate.become(ArrowVectorField(func))) self.wait() Returns ------- `Callable[[np.ndarray], np.ndarray]` The scaled vector field function. """ return lambda p: func(p * scalar) def nudge( self, mob: Mobject, dt: float = 1, substeps: int = 1, pointwise: bool = False, ) -> "VectorField": """Nudge a :class:`~.Mobject` along the vector field. Parameters ---------- mob The mobject to move along the vector field dt A scalar to the amount the mobject is moved along the vector field. The actual distance is based on the magnitude of the vector field. substeps The amount of steps the whole nudge is divided into. Higher values give more accurate approximations. pointwise Whether to move the mobject along the vector field. If `False` the vector field takes effect on the center of the given :class:`~.Mobject`. If `True` the vector field takes effect on the points of the individual points of the :class:`~.Mobject`, potentially distorting it. Returns ------- VectorField This vector field. Examples -------- .. manim:: Nudging class Nudging(Scene): def construct(self): func = lambda pos: np.sin(pos[1] / 2) * RIGHT + np.cos(pos[0] / 2) * UP vector_field = ArrowVectorField( func, x_range=[-7, 7, 1], y_range=[-4, 4, 1], length_func=lambda x: x / 2 ) self.add(vector_field) circle = Circle(radius=2).shift(LEFT) self.add(circle.copy().set_color(GRAY)) dot = Dot().move_to(circle) vector_field.nudge(circle, -2, 60, True) vector_field.nudge(dot, -2, 60) circle.add_updater(vector_field.get_nudge_updater(pointwise=True)) dot.add_updater(vector_field.get_nudge_updater()) self.add(circle, dot) self.wait(6) """ def runge_kutta(self, p: Sequence[float], step_size: float) -> float: """Returns the change in position of a point along a vector field. Parameters ---------- p The position of each point being moved along the vector field. step_size A scalar that is used to determine how much a point is shifted in a single step. Returns ------- float How much the point is shifted. """ k_1 = self.func(p) k_2 = self.func(p + step_size * (k_1 * 0.5)) k_3 = self.func(p + step_size * (k_2 * 0.5)) k_4 = self.func(p + step_size * k_3) return step_size / 6.0 * (k_1 + 2.0 * k_2 + 2.0 * k_3 + k_4) step_size = dt / substeps for _ in range(substeps): if pointwise: mob.apply_function(lambda p: p + runge_kutta(self, p, step_size)) else: mob.shift(runge_kutta(self, mob.get_center(), step_size)) return self def nudge_submobjects( self, dt: float = 1, substeps: int = 1, pointwise: bool = False, ) -> "VectorField": """Apply a nudge along the vector field to all submobjects. Parameters ---------- dt A scalar to the amount the mobject is moved along the vector field. The actual distance is based on the magnitude of the vector field. substeps The amount of steps the whole nudge is divided into. Higher values give more accurate approximations. pointwise Whether to move the mobject along the vector field. See :meth:`nudge` for details. Returns ------- VectorField This vector field. """ for mob in self.submobjects: self.nudge(mob, dt, substeps, pointwise) return self def get_nudge_updater( self, speed: float = 1, pointwise: bool = False, ) -> Callable[[Mobject, float], Mobject]: """Get an update function to move a :class:`~.Mobject` along the vector field. When used with :meth:`~.Mobject.add_updater`, the mobject will move along the vector field, where its speed is determined by the magnitude of the vector field. Parameters ---------- speed At `speed=1` the distance a mobject moves per second is equal to the magnitude of the vector field along its path. The speed value scales the speed of such a mobject. pointwise Whether to move the mobject along the vector field. See :meth:`nudge` for details. Returns ------- Callable[[Mobject, float], Mobject] The update function. """ return lambda mob, dt: self.nudge(mob, dt * speed, pointwise=pointwise) def start_submobject_movement( self, speed: float = 1, pointwise: bool = False, ) -> "VectorField": """Start continuously moving all submobjects along the vector field. Calling this method multiple times will result in removing the previous updater created by this method. Parameters ---------- speed The speed at which to move the submobjects. See :meth:`get_nudge_updater` for details. pointwise Whether to move the mobject along the vector field. See :meth:`nudge` for details. Returns ------- VectorField This vector field. """ self.stop_submobject_movement() self.submob_movement_updater = lambda mob, dt: mob.nudge_submobjects( dt * speed, pointwise=pointwise, ) self.add_updater(self.submob_movement_updater) return self def stop_submobject_movement(self) -> "VectorField": """Stops the continuous movement started using :meth:`start_submobject_movement`. Returns ------- VectorField This vector field. """ self.remove_updater(self.submob_movement_updater) self.submob_movement_updater = None return self def get_colored_background_image(self, sampling_rate: int = 5) -> Image.Image: """Generate an image that displays the vector field. The color at each position is calculated by passing the positing through a series of steps: Calculate the vector field function at that position, map that vector to a single value using `self.color_scheme` and finally generate a color from that value using the color gradient. Parameters ---------- sampling_rate The stepsize at which pixels get included in the image. Lower values give more accurate results, but may take a long time to compute. Returns ------- Image.Imgae The vector field image. """ if self.single_color: raise ValueError( "There is no point in generating an image if the vector field uses a single color.", ) ph = int(config["pixel_height"] / sampling_rate) pw = int(config["pixel_width"] / sampling_rate) fw = config["frame_width"] fh = config["frame_height"] points_array = np.zeros((ph, pw, 3)) x_array = np.linspace(-fw / 2, fw / 2, pw) y_array = np.linspace(fh / 2, -fh / 2, ph) x_array = x_array.reshape((1, len(x_array))) y_array = y_array.reshape((len(y_array), 1)) x_array = x_array.repeat(ph, axis=0) y_array.repeat(pw, axis=1) # TODO why not y_array = y_array.repeat(...)? points_array[:, :, 0] = x_array points_array[:, :, 1] = y_array rgbs = np.apply_along_axis(self.pos_to_rgb, 2, points_array) return Image.fromarray((rgbs * 255).astype("uint8")) def get_vectorized_rgba_gradient_function( self, start: float, end: float, colors: Iterable, ): """ Generates a gradient of rgbas as a numpy array Parameters ---------- start start value used for inverse interpolation at :func:`~.inverse_interpolate` end end value used for inverse interpolation at :func:`~.inverse_interpolate` colors list of colors to generate the gradient Returns ------- function to generate the gradients as numpy arrays representing rgba values """ rgbs = np.array([color_to_rgb(c) for c in colors]) def func(values, opacity=1): alphas = inverse_interpolate(start, end, np.array(values)) alphas = np.clip(alphas, 0, 1) scaled_alphas = alphas * (len(rgbs) - 1) indices = scaled_alphas.astype(int) next_indices = np.clip(indices + 1, 0, len(rgbs) - 1) inter_alphas = scaled_alphas % 1 inter_alphas = inter_alphas.repeat(3).reshape((len(indices), 3)) result = interpolate(rgbs[indices], rgbs[next_indices], inter_alphas) result = np.concatenate( (result, np.full([len(result), 1], opacity)), axis=1, ) return result return func class ArrowVectorField(VectorField): """A :class:`VectorField` represented by a set of change vectors. Vector fields are always based on a function defining the :class:`~.Vector` at every position. The values of this functions is displayed as a grid of vectors. By default the color of each vector is determined by it's magnitude. Other color schemes can be used however. Parameters ---------- func The function defining the rate of change at every position of the vector field. color The color of the vector field. If set, position-specific coloring is disabled. color_scheme A function mapping a vector to a single value. This value gives the position in the color gradient defined using `min_color_scheme_value`, `max_color_scheme_value` and `colors`. min_color_scheme_value The value of the color_scheme function to be mapped to the first color in `colors`. Lower values also result in the first color of the gradient. max_color_scheme_value The value of the color_scheme function to be mapped to the last color in `colors`. Higher values also result in the last color of the gradient. colors The colors defining the color gradient of the vector field. x_range A sequence of x_min, x_max, delta_x y_range A sequence of y_min, y_max, delta_y z_range A sequence of z_min, z_max, delta_z three_dimensions Enables three_dimensions. Default set to False, automatically turns True if z_range is not None. length_func The function determining the displayed size of the vectors. The actual size of the vector is passed, the returned value will be used as display size for the vector. By default this is used to cap the displayed size of vectors to reduce the clutter. opacity The opacity of the arrows. vector_config Additional arguments to be passed to the :class:`~.Vector` constructor kwargs : Any Additional arguments to be passed to the :class:`~.VGroup` constructor Examples -------- .. manim:: BasicUsage :save_last_frame: class BasicUsage(Scene): def construct(self): func = lambda pos: ((pos[0] * UR + pos[1] * LEFT) - pos) / 3 self.add(ArrowVectorField(func)) .. manim:: SizingAndSpacing class SizingAndSpacing(Scene): def construct(self): func = lambda pos: np.sin(pos[0] / 2) * UR + np.cos(pos[1] / 2) * LEFT vf = ArrowVectorField(func, x_range=[-7, 7, 1]) self.add(vf) self.wait() length_func = lambda x: x / 3 vf2 = ArrowVectorField(func, x_range=[-7, 7, 1], length_func=length_func) self.play(vf.animate.become(vf2)) self.wait() .. manim:: Coloring :save_last_frame: class Coloring(Scene): def construct(self): func = lambda pos: pos - LEFT * 5 colors = [RED, YELLOW, BLUE, DARK_GRAY] min_radius = Circle(radius=2, color=colors[0]).shift(LEFT * 5) max_radius = Circle(radius=10, color=colors[-1]).shift(LEFT * 5) vf = ArrowVectorField( func, min_color_scheme_value=2, max_color_scheme_value=10, colors=colors ) self.add(vf, min_radius, max_radius) """ def __init__( self, func: Callable[[np.ndarray], np.ndarray], color: Optional[Color] = None, color_scheme: Optional[Callable[[np.ndarray], float]] = None, min_color_scheme_value: float = 0, max_color_scheme_value: float = 2, colors: Sequence[Color] = DEFAULT_SCALAR_FIELD_COLORS, # Determining Vector positions: x_range: Sequence[float] = None, y_range: Sequence[float] = None, z_range: Sequence[float] = None, three_dimensions: bool = False, # Automatically True if z_range is set # Takes in actual norm, spits out displayed norm length_func: Callable[[float], float] = lambda norm: 0.45 * sigmoid(norm), opacity: float = 1.0, vector_config: Optional[dict] = None, **kwargs ): self.x_range = x_range or [ floor(-config["frame_width"] / 2), ceil(config["frame_width"] / 2), ] self.y_range = y_range or [ floor(-config["frame_height"] / 2), ceil(config["frame_height"] / 2), ] self.ranges = [self.x_range, self.y_range] if three_dimensions or z_range: self.z_range = z_range or self.y_range.copy() self.ranges += [self.z_range] else: self.ranges += [[0, 0]] for i in range(len(self.ranges)): if len(self.ranges[i]) == 2: self.ranges[i] += [0.5] self.ranges[i][1] += self.ranges[i][2] self.x_range, self.y_range, self.z_range = self.ranges super().__init__( func, color, color_scheme, min_color_scheme_value, max_color_scheme_value, colors, **kwargs, ) self.length_func = length_func self.opacity = opacity if vector_config is None: vector_config = {} self.vector_config = vector_config self.func = func x_range = np.arange(*self.x_range) y_range = np.arange(*self.y_range) z_range = np.arange(*self.z_range) for x, y, z in it.product(x_range, y_range, z_range): self.add(self.get_vector(x * RIGHT + y * UP + z * OUT)) self.set_opacity(self.opacity) def get_vector(self, point: np.ndarray): """Creates a vector in the vector field. The created vector is based on the function of the vector field and is rooted in the given point. Color and length fit the specifications of this vector field. Parameters ---------- point The root point of the vector. kwargs : Any Additional arguments to be passed to the :class:`~.Vector` constructor """ output = np.array(self.func(point)) norm = np.linalg.norm(output) if norm != 0: output *= self.length_func(norm) / norm vect = Vector(output, **self.vector_config) vect.shift(point) if self.single_color: vect.set_color(self.color) else: vect.set_color(self.pos_to_color(point)) return vect class StreamLines(VectorField): """StreamLines represent the flow of a :class:`VectorField` using the trace of moving agents. Vector fields are always based on a function defining the vector at every position. The values of this functions is displayed by moving many agents along the vector field and showing their trace. Parameters ---------- func The function defining the rate of change at every position of the vector field. color The color of the vector field. If set, position-specific coloring is disabled. color_scheme A function mapping a vector to a single value. This value gives the position in the color gradient defined using `min_color_scheme_value`, `max_color_scheme_value` and `colors`. min_color_scheme_value The value of the color_scheme function to be mapped to the first color in `colors`. Lower values also result in the first color of the gradient. max_color_scheme_value The value of the color_scheme function to be mapped to the last color in `colors`. Higher values also result in the last color of the gradient. colors The colors defining the color gradient of the vector field. x_range A sequence of x_min, x_max, delta_x y_range A sequence of y_min, y_max, delta_y z_range A sequence of z_min, z_max, delta_z three_dimensions Enables three_dimensions. Default set to False, automatically turns True if z_range is not None. noise_factor The amount by which the starting position of each agent is altered along each axis. Defaults to :code:`delta_y / 2` if not defined. n_repeats The number of agents generated at each starting point. dt The factor by which the distance an agent moves per step is stretched. Lower values result in a better approximation of the trajectories in the vector field. virtual_time The time the agents get to move in the vector field. Higher values therefore result in longer stream lines. However, this whole time gets simulated upon creation. max_anchors_per_line The maximum number of anchors per line. Lines with more anchors get reduced in complexity, not in length. padding The distance agents can move out of the generation area before being terminated. stroke_width The stroke with of the stream lines. opacity The opacity of the stream lines. Examples -------- .. manim:: BasicUsage :save_last_frame: class BasicUsage(Scene): def construct(self): func = lambda pos: ((pos[0] * UR + pos[1] * LEFT) - pos) / 3 self.add(StreamLines(func)) .. manim:: SpawningAndFlowingArea :save_last_frame: class SpawningAndFlowingArea(Scene): def construct(self): func = lambda pos: np.sin(pos[0]) * UR + np.cos(pos[1]) * LEFT + pos / 5 stream_lines = StreamLines( func, x_range=[-3, 3, 0.2], y_range=[-2, 2, 0.2], padding=1 ) spawning_area = Rectangle(width=6, height=4) flowing_area = Rectangle(width=8, height=6) labels = [Tex("Spawning Area"), Tex("Flowing Area").shift(DOWN * 2.5)] for lbl in labels: lbl.add_background_rectangle(opacity=0.6, buff=0.05) self.add(stream_lines, spawning_area, flowing_area, *labels) """ def __init__( self, func: Callable[[np.ndarray], np.ndarray], color: Optional[Color] = None, color_scheme: Optional[Callable[[np.ndarray], float]] = None, min_color_scheme_value: float = 0, max_color_scheme_value: float = 2, colors: Sequence[Color] = DEFAULT_SCALAR_FIELD_COLORS, # Determining stream line starting positions: x_range: Sequence[float] = None, y_range: Sequence[float] = None, z_range: Sequence[float] = None, three_dimensions: bool = False, noise_factor: Optional[float] = None, n_repeats=1, # Determining how lines are drawn dt=0.05, virtual_time=3, max_anchors_per_line=100, padding=3, # Determining stream line appearance: stroke_width=1, opacity=1, **kwargs ): self.x_range = x_range or [ floor(-config["frame_width"] / 2), ceil(config["frame_width"] / 2), ] self.y_range = y_range or [ floor(-config["frame_height"] / 2), ceil(config["frame_height"] / 2), ] self.ranges = [self.x_range, self.y_range] if three_dimensions or z_range: self.z_range = z_range or self.y_range.copy() self.ranges += [self.z_range] else: self.ranges += [[0, 0]] for i in range(len(self.ranges)): if len(self.ranges[i]) == 2: self.ranges[i] += [0.5] self.ranges[i][1] += self.ranges[i][2] self.x_range, self.y_range, self.z_range = self.ranges super().__init__( func, color, color_scheme, min_color_scheme_value, max_color_scheme_value, colors, **kwargs, ) self.noise_factor = ( noise_factor if noise_factor is not None else self.y_range[2] / 2 ) self.n_repeats = n_repeats self.virtual_time = virtual_time self.max_anchors_per_line = max_anchors_per_line self.padding = padding self.stroke_width = stroke_width half_noise = self.noise_factor / 2 np.random.seed(0) start_points = np.array( [ (x - half_noise) * RIGHT + (y - half_noise) * UP + (z - half_noise) * OUT + self.noise_factor * np.random.random(3) for n in range(self.n_repeats) for x in np.arange(*self.x_range) for y in np.arange(*self.y_range) for z in np.arange(*self.z_range) ], ) def outside_box(p): return ( p[0] < self.x_range[0] - self.padding or p[0] > self.x_range[1] + self.padding - self.x_range[2] or p[1] < self.y_range[0] - self.padding or p[1] > self.y_range[1] + self.padding - self.y_range[2] or p[2] < self.z_range[0] - self.padding or p[2] > self.z_range[1] + self.padding - self.z_range[2] ) max_steps = ceil(virtual_time / dt) + 1 if not self.single_color: self.background_img = self.get_colored_background_image() if config["renderer"] == "opengl": self.values_to_rgbas = self.get_vectorized_rgba_gradient_function( min_color_scheme_value, max_color_scheme_value, colors, ) for point in start_points: points = [point] for _ in range(max_steps): last_point = points[-1] new_point = last_point + dt * func(last_point) if outside_box(new_point): break points.append(new_point) step = max_steps if not step: continue if config["renderer"] == "opengl": line = OpenGLVMobject() else: line = VMobject() line.duration = step * dt step = max(1, int(len(points) / self.max_anchors_per_line)) line.set_points_smoothly(points[::step]) if self.single_color: line.set_stroke(self.color) else: if config["renderer"] == "opengl": # scaled for compatibility with cairo line.set_stroke(width=self.stroke_width / 4.0) norms = np.array( [np.linalg.norm(self.func(point)) for point in line.points], ) line.set_rgba_array_direct( self.values_to_rgbas(norms, opacity), name="stroke_rgba", ) else: if np.any(self.z_range != np.array([0, 0.5, 0.5])): line.set_stroke( [self.pos_to_color(p) for p in line.get_anchors()], ) else: line.color_using_background_image(self.background_img) line.set_stroke(width=self.stroke_width, opacity=opacity) self.add(line) self.stream_lines = [*self.submobjects] def create( self, lag_ratio: Optional[float] = None, run_time: Optional[Callable[[float], float]] = None, **kwargs ) -> AnimationGroup: """The creation animation of the stream lines. The stream lines appear in random order. Parameters ---------- lag_ratio The lag ratio of the animation. If undefined, it will be selected so that the total animation length is 1.5 times the run time of each stream line creation. run_time The run time of every single stream line creation. The runtime of the whole animation might be longer due to the `lag_ratio`. If undefined, the virtual time of the stream lines is used as run time. Returns ------- :class:`~.AnimationGroup` The creation animation of the stream lines. Examples -------- .. manim:: StreamLineCreation class StreamLineCreation(Scene): def construct(self): func = lambda pos: (pos[0] * UR + pos[1] * LEFT) - pos stream_lines = StreamLines( func, color=YELLOW, x_range=[-7, 7, 1], y_range=[-4, 4, 1], stroke_width=3, virtual_time=1, # use shorter lines max_anchors_per_line=5, # better performance with fewer anchors ) self.play(stream_lines.create()) # uses virtual_time as run_time self.wait() """ if run_time is None: run_time = self.virtual_time if lag_ratio is None: lag_ratio = run_time / 2 / len(self.submobjects) animations = [ Create(line, run_time=run_time, **kwargs) for line in self.stream_lines ] random.shuffle(animations) return AnimationGroup(*animations, lag_ratio=lag_ratio) def start_animation( self, warm_up=True, flow_speed: float = 1, time_width: float = 0.3, rate_func: Callable[[float], float] = linear, line_animation_class: Type[ShowPassingFlash] = ShowPassingFlash, **kwargs ) -> None: """Animates the stream lines using an updater. The stream lines will continuously flow Parameters ---------- warm_up : bool, optional If `True` the animation is initialized line by line. Otherwise it starts with all lines shown. flow_speed At `flow_speed=1` the distance the flow moves per second is equal to the magnitude of the vector field along its path. The speed value scales the speed of this flow. time_width The proportion of the stream line shown while being animated rate_func The rate function of each stream line flashing line_animation_class The animation class being used Examples -------- .. manim:: ContinuousMotion class ContinuousMotion(Scene): def construct(self): func = lambda pos: np.sin(pos[0] / 2) * UR + np.cos(pos[1] / 2) * LEFT stream_lines = StreamLines(func, stroke_width=3, max_anchors_per_line=30) self.add(stream_lines) stream_lines.start_animation(warm_up=False, flow_speed=1.5) self.wait(stream_lines.virtual_time / stream_lines.flow_speed) """ for line in self.stream_lines: run_time = line.duration / flow_speed line.anim = line_animation_class( line, run_time=run_time, rate_func=rate_func, time_width=time_width, **kwargs, ) line.anim.begin() line.time = random.random() * self.virtual_time if warm_up: line.time *= -1 self.add(line.anim.mobject) def updater(mob, dt): for line in mob.stream_lines: line.time += dt * flow_speed if line.time >= self.virtual_time: line.time -= self.virtual_time line.anim.interpolate(np.clip(line.time / line.anim.run_time, 0, 1)) self.add_updater(updater) self.flow_animation = updater self.flow_speed = flow_speed self.time_width = time_width def end_animation(self) -> AnimationGroup: """End the stream line animation smoothly. Returns an animation resulting in fully displayed stream lines without a noticeable cut. Returns ------- :class:`~.AnimationGroup` The animation fading out the running stream animation. Raises ------ ValueError if no stream line animation is running Examples -------- .. manim:: EndAnimation class EndAnimation(Scene): def construct(self): func = lambda pos: np.sin(pos[0] / 2) * UR + np.cos(pos[1] / 2) * LEFT stream_lines = StreamLines( func, stroke_width=3, max_anchors_per_line=5, virtual_time=1, color=BLUE ) self.add(stream_lines) stream_lines.start_animation(warm_up=False, flow_speed=1.5, time_width=0.5) self.wait(1) self.play(stream_lines.end_animation()) """ if self.flow_animation is None: raise ValueError("You have to start the animation before fading it out.") def hide_and_wait(mob, alpha): if alpha == 0: mob.set_stroke(opacity=0) elif alpha == 1: mob.set_stroke(opacity=1) def finish_updater_cycle(line, alpha): line.time += dt * self.flow_speed line.anim.interpolate(min(line.time / line.anim.run_time, 1)) if alpha == 1: self.remove(line.anim.mobject) line.anim.finish() max_run_time = self.virtual_time / self.flow_speed creation_rate_func = ease_out_sine creation_staring_speed = creation_rate_func(0.001) * 1000 creation_run_time = ( max_run_time / (1 + self.time_width) * creation_staring_speed ) # creation_run_time is calculated so that the creation animation starts at the same speed # as the regular line flash animation but eases out. dt = 1 / config["frame_rate"] animations = [] self.remove_updater(self.flow_animation) self.flow_animation = None for line in self.stream_lines: create = Create( line, run_time=creation_run_time, rate_func=creation_rate_func, ) if line.time <= 0: animations.append( Succession( UpdateFromAlphaFunc( line, hide_and_wait, run_time=-line.time / self.flow_speed, ), create, ), ) self.remove(line.anim.mobject) line.anim.finish() else: remaining_time = max_run_time - line.time / self.flow_speed animations.append( Succession( UpdateFromAlphaFunc( line, finish_updater_cycle, run_time=remaining_time, ), create, ), ) return AnimationGroup(*animations) # TODO: Variant of StreamLines that is able to respond to changes in the vector field function
36.96099
185
0.57298
37,588
0.967616
0
0
1,685
0.043376
0
0
20,272
0.521856
b92c7cbb70fbc4dd2dec20c24e021d0f6405bd12
19,900
py
Python
marshmallow_dataclass/__init__.py
dan-starkware/marshmallow_dataclass
25c3e041d8c6a87d740984e57a5bd29b768afbf8
[ "MIT" ]
null
null
null
marshmallow_dataclass/__init__.py
dan-starkware/marshmallow_dataclass
25c3e041d8c6a87d740984e57a5bd29b768afbf8
[ "MIT" ]
null
null
null
marshmallow_dataclass/__init__.py
dan-starkware/marshmallow_dataclass
25c3e041d8c6a87d740984e57a5bd29b768afbf8
[ "MIT" ]
null
null
null
""" This library allows the conversion of python 3.7's :mod:`dataclasses` to :mod:`marshmallow` schemas. It takes a python class, and generates a marshmallow schema for it. Simple example:: from marshmallow import Schema from marshmallow_dataclass import dataclass @dataclass class Point: x:float y:float point = Point(x=0, y=0) point_json = Point.Schema().dumps(point) Full example:: from marshmallow import Schema from dataclasses import field from marshmallow_dataclass import dataclass import datetime @dataclass class User: birth: datetime.date = field(metadata= { "required": True # A parameter to pass to marshmallow's field }) website:str = field(metadata = { "marshmallow_field": marshmallow.fields.Url() # Custom marshmallow field }) Schema: ClassVar[Type[Schema]] = Schema # For the type checker """ import inspect from enum import EnumMeta from functools import lru_cache from typing import ( Any, Callable, Dict, List, Mapping, Optional, Set, Tuple, Type, TypeVar, Union, cast, overload, ) import dataclasses import marshmallow import typing_inspect __all__ = ["dataclass", "add_schema", "class_schema", "field_for_schema", "NewType"] NoneType = type(None) _U = TypeVar("_U") # Whitelist of dataclass members that will be copied to generated schema. MEMBERS_WHITELIST: Set[str] = {"Meta"} # Max number of generated schemas that class_schema keeps of generated schemas. Removes duplicates. MAX_CLASS_SCHEMA_CACHE_SIZE = 1024 # _cls should never be specified by keyword, so start it with an # underscore. The presence of _cls is used to detect if this # decorator is being called with parameters or not. def dataclass( _cls: Type[_U] = None, *, repr: bool = True, eq: bool = True, order: bool = False, unsafe_hash: bool = False, frozen: bool = False, base_schema: Optional[Type[marshmallow.Schema]] = None, ): """ This decorator does the same as dataclasses.dataclass, but also applies :func:`add_schema`. It adds a `.Schema` attribute to the class object :param base_schema: marshmallow schema used as a base class when deriving dataclass schema >>> @dataclass ... class Artist: ... name: str >>> Artist.Schema <class 'marshmallow.schema.Artist'> >>> from typing import ClassVar >>> from marshmallow import Schema >>> @dataclass(order=True) # preserve field order ... class Point: ... x:float ... y:float ... Schema: ClassVar[Type[Schema]] = Schema # For the type checker ... >>> Point.Schema().load({'x':0, 'y':0}) # This line can be statically type checked Point(x=0.0, y=0.0) """ # dataclass's typing doesn't expect it to be called as a function, so ignore type check dc = dataclasses.dataclass( # type: ignore _cls, repr=repr, eq=eq, order=order, unsafe_hash=unsafe_hash, frozen=frozen ) if _cls is None: return lambda cls: add_schema(dc(cls), base_schema) return add_schema(dc, base_schema) @overload def add_schema(_cls: Type[_U]) -> Type[_U]: ... @overload def add_schema( base_schema: Type[marshmallow.Schema] = None, ) -> Callable[[Type[_U]], Type[_U]]: ... @overload def add_schema( _cls: Type[_U], base_schema: Type[marshmallow.Schema] = None ) -> Type[_U]: ... def add_schema(_cls=None, base_schema=None): """ This decorator adds a marshmallow schema as the 'Schema' attribute in a dataclass. It uses :func:`class_schema` internally. :param type cls: The dataclass to which a Schema should be added :param base_schema: marshmallow schema used as a base class when deriving dataclass schema >>> class BaseSchema(marshmallow.Schema): ... def on_bind_field(self, field_name, field_obj): ... field_obj.data_key = (field_obj.data_key or field_name).upper() >>> @add_schema(base_schema=BaseSchema) ... @dataclasses.dataclass ... class Artist: ... names: Tuple[str, str] >>> artist = Artist.Schema().loads('{"NAMES": ["Martin", "Ramirez"]}') >>> artist Artist(names=('Martin', 'Ramirez')) """ def decorator(clazz: Type[_U]) -> Type[_U]: clazz.Schema = class_schema(clazz, base_schema) # type: ignore return clazz return decorator(_cls) if _cls else decorator def class_schema( clazz: type, base_schema: Optional[Type[marshmallow.Schema]] = None ) -> Type[marshmallow.Schema]: """ Convert a class to a marshmallow schema :param clazz: A python class (may be a dataclass) :param base_schema: marshmallow schema used as a base class when deriving dataclass schema :return: A marshmallow Schema corresponding to the dataclass .. note:: All the arguments supported by marshmallow field classes can be passed in the `metadata` dictionary of a field. If you want to use a custom marshmallow field (one that has no equivalent python type), you can pass it as the ``marshmallow_field`` key in the metadata dictionary. >>> import typing >>> Meters = typing.NewType('Meters', float) >>> @dataclasses.dataclass() ... class Building: ... height: Optional[Meters] ... name: str = dataclasses.field(default="anonymous") ... class Meta: ... ordered = True ... >>> class_schema(Building) # Returns a marshmallow schema class (not an instance) <class 'marshmallow.schema.Building'> >>> @dataclasses.dataclass() ... class City: ... name: str = dataclasses.field(metadata={'required':True}) ... best_building: Building # Reference to another dataclasses. A schema will be created for it too. ... other_buildings: List[Building] = dataclasses.field(default_factory=lambda: []) ... >>> citySchema = class_schema(City)() >>> city = citySchema.load({"name":"Paris", "best_building": {"name": "Eiffel Tower"}}) >>> city City(name='Paris', best_building=Building(height=None, name='Eiffel Tower'), other_buildings=[]) >>> citySchema.load({"name":"Paris"}) Traceback (most recent call last): ... marshmallow.exceptions.ValidationError: {'best_building': ['Missing data for required field.']} >>> city_json = citySchema.dump(city) >>> city_json['best_building'] # We get an OrderedDict because we specified order = True in the Meta class OrderedDict([('height', None), ('name', 'Eiffel Tower')]) >>> @dataclasses.dataclass() ... class Person: ... name: str = dataclasses.field(default="Anonymous") ... friends: List['Person'] = dataclasses.field(default_factory=lambda:[]) # Recursive field ... >>> person = class_schema(Person)().load({ ... "friends": [{"name": "Roger Boucher"}] ... }) >>> person Person(name='Anonymous', friends=[Person(name='Roger Boucher', friends=[])]) >>> @dataclasses.dataclass() ... class C: ... important: int = dataclasses.field(init=True, default=0) ... # Only fields that are in the __init__ method will be added: ... unimportant: int = dataclasses.field(init=False, default=0) ... >>> c = class_schema(C)().load({ ... "important": 9, # This field will be imported ... "unimportant": 9 # This field will NOT be imported ... }, unknown=marshmallow.EXCLUDE) >>> c C(important=9, unimportant=0) >>> @dataclasses.dataclass ... class Website: ... url:str = dataclasses.field(metadata = { ... "marshmallow_field": marshmallow.fields.Url() # Custom marshmallow field ... }) ... >>> class_schema(Website)().load({"url": "I am not a good URL !"}) Traceback (most recent call last): ... marshmallow.exceptions.ValidationError: {'url': ['Not a valid URL.']} >>> @dataclasses.dataclass ... class NeverValid: ... @marshmallow.validates_schema ... def validate(self, data, **_): ... raise marshmallow.ValidationError('never valid') ... >>> class_schema(NeverValid)().load({}) Traceback (most recent call last): ... marshmallow.exceptions.ValidationError: {'_schema': ['never valid']} >>> # noinspection PyTypeChecker >>> class_schema(None) # unsupported type Traceback (most recent call last): ... TypeError: None is not a dataclass and cannot be turned into one. >>> @dataclasses.dataclass ... class Anything: ... name: str ... @marshmallow.validates('name') ... def validates(self, value): ... if len(value) > 5: raise marshmallow.ValidationError("Name too long") >>> class_schema(Anything)().load({"name": "aaaaaargh"}) Traceback (most recent call last): ... marshmallow.exceptions.ValidationError: {'name': ['Name too long']} """ return _proxied_class_schema(clazz, base_schema) @lru_cache(maxsize=MAX_CLASS_SCHEMA_CACHE_SIZE) def _proxied_class_schema( clazz: type, base_schema: Optional[Type[marshmallow.Schema]] = None ) -> Type[marshmallow.Schema]: try: # noinspection PyDataclass fields: Tuple[dataclasses.Field, ...] = dataclasses.fields(clazz) except TypeError: # Not a dataclass try: return class_schema(dataclasses.dataclass(clazz), base_schema) except Exception: raise TypeError( f"{getattr(clazz, '__name__', repr(clazz))} is not a dataclass and cannot be turned into one." ) # Copy all marshmallow hooks and whitelisted members of the dataclass to the schema. attributes = { k: v for k, v in inspect.getmembers(clazz) if hasattr(v, "__marshmallow_hook__") or k in MEMBERS_WHITELIST } # Update the schema members to contain marshmallow fields instead of dataclass fields attributes.update( ( field.name, field_for_schema( field.type, _get_field_default(field), field.metadata, base_schema ), ) for field in fields if field.init ) schema_class = type(clazz.__name__, (_base_schema(clazz, base_schema),), attributes) return cast(Type[marshmallow.Schema], schema_class) def _field_by_type( typ: Union[type, Any], base_schema: Optional[Type[marshmallow.Schema]] ) -> Optional[Type[marshmallow.fields.Field]]: return ( base_schema and base_schema.TYPE_MAPPING.get(typ) ) or marshmallow.Schema.TYPE_MAPPING.get(typ) def _field_by_supertype( typ: Type, default: marshmallow.missing, newtype_supertype: Type, metadata: dict, base_schema: Optional[Type[marshmallow.Schema]], ) -> marshmallow.fields.Field: """ Return a new field for fields based on a super field. (Usually spawned from NewType) """ # Add the information coming our custom NewType implementation typ_args = getattr(typ, "_marshmallow_args", {}) # Handle multiple validators from both `typ` and `metadata`. # See https://github.com/lovasoa/marshmallow_dataclass/issues/91 new_validators: List[Callable] = [] for meta_dict in (typ_args, metadata): if "validate" in meta_dict: if marshmallow.utils.is_iterable_but_not_string(meta_dict["validate"]): new_validators.extend(meta_dict["validate"]) elif callable(meta_dict["validate"]): new_validators.append(meta_dict["validate"]) metadata["validate"] = new_validators if new_validators else None metadata = {"description": typ.__name__, **typ_args, **metadata} field = getattr(typ, "_marshmallow_field", None) if field: return field(**metadata) else: return field_for_schema( newtype_supertype, metadata=metadata, default=default, base_schema=base_schema, ) def field_for_schema( typ: type, default=marshmallow.missing, metadata: Mapping[str, Any] = None, base_schema: Optional[Type[marshmallow.Schema]] = None, ) -> marshmallow.fields.Field: """ Get a marshmallow Field corresponding to the given python type. The metadata of the dataclass field is used as arguments to the marshmallow Field. :param typ: The type for which a field should be generated :param default: value to use for (de)serialization when the field is missing :param metadata: Additional parameters to pass to the marshmallow field constructor :param base_schema: marshmallow schema used as a base class when deriving dataclass schema >>> int_field = field_for_schema(int, default=9, metadata=dict(required=True)) >>> int_field.__class__ <class 'marshmallow.fields.Integer'> >>> int_field.default 9 >>> field_for_schema(str, metadata={"marshmallow_field": marshmallow.fields.Url()}).__class__ <class 'marshmallow.fields.Url'> """ metadata = {} if metadata is None else dict(metadata) if default is not marshmallow.missing: metadata.setdefault("default", default) # 'missing' must not be set for required fields. if not metadata.get("required"): metadata.setdefault("missing", default) else: metadata.setdefault("required", True) # If the field was already defined by the user predefined_field = metadata.get("marshmallow_field") if predefined_field: return predefined_field # Generic types specified without type arguments if typ is list: typ = List[Any] elif typ is dict: typ = Dict[Any, Any] # Base types field = _field_by_type(typ, base_schema) if field: return field(**metadata) if typ is Any: metadata.setdefault("allow_none", True) return marshmallow.fields.Raw(**metadata) # Generic types origin = typing_inspect.get_origin(typ) if origin: arguments = typing_inspect.get_args(typ, True) # Override base_schema.TYPE_MAPPING to change the class used for generic types below type_mapping = base_schema.TYPE_MAPPING if base_schema else {} if origin in (list, List): child_type = field_for_schema(arguments[0], base_schema=base_schema) list_type = type_mapping.get(List, marshmallow.fields.List) return list_type(child_type, **metadata) if origin in (tuple, Tuple): children = tuple( field_for_schema(arg, base_schema=base_schema) for arg in arguments ) tuple_type = type_mapping.get(Tuple, marshmallow.fields.Tuple) return tuple_type(children, **metadata) elif origin in (dict, Dict): dict_type = type_mapping.get(Dict, marshmallow.fields.Dict) return dict_type( keys=field_for_schema(arguments[0], base_schema=base_schema), values=field_for_schema(arguments[1], base_schema=base_schema), **metadata, ) elif typing_inspect.is_optional_type(typ): subtyp = next(t for t in arguments if t is not NoneType) # type: ignore # Treat optional types as types with a None default metadata["default"] = metadata.get("default", None) metadata["missing"] = metadata.get("missing", None) metadata["required"] = False return field_for_schema(subtyp, metadata=metadata, base_schema=base_schema) elif typing_inspect.is_union_type(typ): from . import union_field return union_field.Union( [ ( subtyp, field_for_schema( subtyp, metadata=metadata, base_schema=base_schema ), ) for subtyp in arguments ], **metadata, ) # typing.NewType returns a function with a __supertype__ attribute newtype_supertype = getattr(typ, "__supertype__", None) if newtype_supertype and inspect.isfunction(typ): return _field_by_supertype( typ=typ, default=default, newtype_supertype=newtype_supertype, metadata=metadata, base_schema=base_schema, ) # enumerations if isinstance(typ, EnumMeta): import marshmallow_enum return marshmallow_enum.EnumField(typ, **metadata) # Nested marshmallow dataclass nested_schema = getattr(typ, "Schema", None) # Nested dataclasses forward_reference = getattr(typ, "__forward_arg__", None) nested = ( nested_schema or forward_reference or class_schema(typ, base_schema=base_schema) ) return marshmallow.fields.Nested(nested, **metadata) def _base_schema( clazz: type, base_schema: Optional[Type[marshmallow.Schema]] = None ) -> Type[marshmallow.Schema]: """ Base schema factory that creates a schema for `clazz` derived either from `base_schema` or `BaseSchema` """ # Remove `type: ignore` when mypy handles dynamic base classes # https://github.com/python/mypy/issues/2813 class BaseSchema(base_schema or marshmallow.Schema): # type: ignore def load(self, data: Mapping, *, many: bool = None, **kwargs): all_loaded = super().load(data, many=many, **kwargs) many = self.many if many is None else bool(many) if many: return [clazz(**loaded) for loaded in all_loaded] else: return clazz(**all_loaded) return BaseSchema def _get_field_default(field: dataclasses.Field): """ Return a marshmallow default value given a dataclass default value >>> _get_field_default(dataclasses.field()) <marshmallow.missing> """ # Remove `type: ignore` when https://github.com/python/mypy/issues/6910 is fixed default_factory = field.default_factory # type: ignore if default_factory is not dataclasses.MISSING: return default_factory elif field.default is dataclasses.MISSING: return marshmallow.missing return field.default def NewType( name: str, typ: Type[_U], field: Optional[Type[marshmallow.fields.Field]] = None, **kwargs, ) -> Callable[[_U], _U]: """NewType creates simple unique types to which you can attach custom marshmallow attributes. All the keyword arguments passed to this function will be transmitted to the marshmallow field constructor. >>> import marshmallow.validate >>> IPv4 = NewType('IPv4', str, validate=marshmallow.validate.Regexp(r'^([0-9]{1,3}\\.){3}[0-9]{1,3}$')) >>> @dataclass ... class MyIps: ... ips: List[IPv4] >>> MyIps.Schema().load({"ips": ["0.0.0.0", "grumble grumble"]}) Traceback (most recent call last): ... marshmallow.exceptions.ValidationError: {'ips': {1: ['String does not match expected pattern.']}} >>> MyIps.Schema().load({"ips": ["127.0.0.1"]}) MyIps(ips=['127.0.0.1']) >>> Email = NewType('Email', str, field=marshmallow.fields.Email) >>> @dataclass ... class ContactInfo: ... mail: Email = dataclasses.field(default="[email protected]") >>> ContactInfo.Schema().load({}) ContactInfo(mail='[email protected]') >>> ContactInfo.Schema().load({"mail": "grumble grumble"}) Traceback (most recent call last): ... marshmallow.exceptions.ValidationError: {'mail': ['Not a valid email address.']} """ def new_type(x: _U): return x new_type.__name__ = name new_type.__supertype__ = typ # type: ignore new_type._marshmallow_field = field # type: ignore new_type._marshmallow_args = kwargs # type: ignore return new_type if __name__ == "__main__": import doctest doctest.testmod(verbose=True)
34.133791
110
0.647538
413
0.020754
0
0
1,632
0.08201
0
0
11,211
0.563367
b92e1fb5ed102dbd1d7dc2d4b0ef720e265a976f
1,045
py
Python
electrum_trc/scripts/txradar.py
TheSin-/electrum-trc
d2f5b15fd4399a9248cce0d63e20128f3f54e69c
[ "MIT" ]
1
2019-08-20T18:05:32.000Z
2019-08-20T18:05:32.000Z
electrum_trc/scripts/txradar.py
TheSin-/electrum-trc
d2f5b15fd4399a9248cce0d63e20128f3f54e69c
[ "MIT" ]
1
2022-03-14T19:45:31.000Z
2022-03-14T19:45:31.000Z
electrum_trc/scripts/txradar.py
TheSin-/electrum-trc
d2f5b15fd4399a9248cce0d63e20128f3f54e69c
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 import sys import asyncio from electrum_trc.network import filter_protocol, Network from electrum_trc.util import create_and_start_event_loop, log_exceptions try: txid = sys.argv[1] except: print("usage: txradar txid") sys.exit(1) loop, stopping_fut, loop_thread = create_and_start_event_loop() network = Network() network.start() @log_exceptions async def f(): try: peers = await network.get_peers() peers = filter_protocol(peers, 's') results = await network.send_multiple_requests(peers, 'blockchain.transaction.get', [txid]) r1, r2 = [], [] for k, v in results.items(): (r1 if not isinstance(v, Exception) else r2).append(k) print(f"Received {len(results)} answers") try: propagation = len(r1) * 100. / (len(r1) + len(r2)) except ZeroDivisionError: propagation = 0 print(f"Propagation rate: {propagation:.1f} percent") finally: stopping_fut.set_result(1) asyncio.run_coroutine_threadsafe(f(), loop)
28.243243
99
0.675598
0
0
0
0
627
0.6
611
0.584689
154
0.147368
b92ef9143bb84fe6d37501129ff559d015cf231e
1,091
py
Python
jp.atcoder/dp/dp_g/24586988.py
kagemeka/atcoder-submissions
91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e
[ "MIT" ]
1
2022-02-09T03:06:25.000Z
2022-02-09T03:06:25.000Z
jp.atcoder/dp/dp_g/24586988.py
kagemeka/atcoder-submissions
91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e
[ "MIT" ]
1
2022-02-05T22:53:18.000Z
2022-02-09T01:29:30.000Z
jp.atcoder/dp/dp_g/24586988.py
kagemeka/atcoder-submissions
91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e
[ "MIT" ]
null
null
null
import sys import typing import numpy as np def solve( n: int, g: np.array, ) -> typing.NoReturn: indeg = np.zeros( n, dtype=np.int64, ) for v in g[:, 1]: indeg[v] += 1 g = g[g[:, 0].argsort()] i = np.searchsorted( g[:, 0], np.arange(n + 1) ) q = [ v for v in range(n) if not indeg[v] ] dist = np.zeros( n, dtype=np.int64, ) for u in q: for j in range( i[u], i[u + 1], ): v = g[j, 1] indeg[v] -= 1 dist[v] = max( dist[v], dist[u] + 1, ) if indeg[v]: continue q.append(v) print(dist.max()) def main() -> typing.NoReturn: n, m = map( int, input().split(), ) g = np.array( sys.stdin.read().split(), dtype=np.int64, ).reshape(m, 2) - 1 solve(n, g) OJ = 'ONLINE_JUDGE' if sys.argv[-1] == OJ: from numba import i8, njit from numba.pycc import CC cc = CC('my_module') fn = solve signature = (i8, i8[:, :]) cc.export( fn.__name__, signature, )(fn) cc.compile() exit(0) from my_module import solve main()
13.810127
30
0.505041
0
0
0
0
0
0
0
0
25
0.022915
b930187de467bdc99d38231d4b217f6589a62613
2,039
py
Python
starteMessung.py
jkerpe/TroubleBubble
813ad797398b9f338f136bcb96c6c92186d92ebf
[ "MIT" ]
null
null
null
starteMessung.py
jkerpe/TroubleBubble
813ad797398b9f338f136bcb96c6c92186d92ebf
[ "MIT" ]
null
null
null
starteMessung.py
jkerpe/TroubleBubble
813ad797398b9f338f136bcb96c6c92186d92ebf
[ "MIT" ]
1
2021-08-09T14:57:57.000Z
2021-08-09T14:57:57.000Z
from datetime import datetime from pypylon import pylon import nimmAuf import smbus2 import os import argparse import bestimmeVolumen from threading import Thread import time programmstart = time.time() # Argumente parsen (bei Aufruf im Terminal z.B. 'starteMessung.py -n 100' eingeben) ap = argparse.ArgumentParser(description="""Skript zum Aufnehmen von Bildern der Teststrecke und der Volumenbestimmung von Luftblasen""") ap.add_argument("-n", "--number", default=400, type=int, help="Anzahl an Frames die aufgenommen werden sollen. Default: 400 Bilder") ap.add_argument("-fr", "--framerate", default=100, type=int, help="Framerate in fps. Richtwerte: <Flow 3 ml/s:50 fps, 3-6ml/s:100 fps, >6ml/s:200 fps; Default: 100 fps") args = vars(ap.parse_args()) # Argumente des Parsers extrahieren numberOfImagesToGrab = args['number'] framerate = args['framerate'] if __name__ == '__main__': startzeit = time.time() #Test ob Kamera angeschlossen ist devices = pylon.TlFactory.GetInstance().EnumerateDevices() if len(devices) == 0: print("Keine Kamera angeschlossen oder Kamera woanders geöffnet.") return False # Test ob Drucksensor angeschlossen ist try: bus = smbus2.SMBus(0) bus.read_i2c_block_data(0x40, 0, 2) # 2 Bytes empfangen except OSError: print("Kein Drucksensor angeschlossen") exit() # Aus der aktuellen Zeit und den Parametern einen individuellen Ordnernamen generieren dirname = f'{datetime.now().strftime("%Y-%m-%d-%H-%M-%S")}' os.mkdir(dirname) # Ordner erstellen print(f"Ordnername: {dirname}") beginn = time.time()-programmstart # Threads zum Aufnehmen und Verarbeiten starten t_aufnahme = Thread(target=nimmAuf.starte, args=(dirname, numberOfImagesToGrab, framerate, startzeit)) t_tracke = Thread(target=bestimmeVolumen.tracke, args=(dirname, numberOfImagesToGrab)) t_aufnahme.start() t_tracke.start() t_aufnahme.join() t_tracke.join()
34.559322
169
0.703776
0
0
0
0
0
0
0
0
897
0.439706
b93050ad4c3c78860eb79accbddb8566a673cb7e
3,211
py
Python
application/services/decart.py
Sapfir0/web-premier-eye
f060b01e98a923374ea60360ba133caaa654b6c7
[ "MIT" ]
null
null
null
application/services/decart.py
Sapfir0/web-premier-eye
f060b01e98a923374ea60360ba133caaa654b6c7
[ "MIT" ]
null
null
null
application/services/decart.py
Sapfir0/web-premier-eye
f060b01e98a923374ea60360ba133caaa654b6c7
[ "MIT" ]
1
2020-01-06T18:27:45.000Z
2020-01-06T18:27:45.000Z
import os import tempfile def hasOnePointInside(bigRect, minRect): # хотя бы одна точка лежит внутри minY, minX, maxY, maxX = bigRect y1, x1, y2, x2 = minRect a = (minY <= y1 <= maxY) b = (minX <= x1 <= maxX) c = (minY <= y2 <= maxY) d = (minX <= x2 <= maxX) return a or b or c or d def isCompletelyInside(bigRect, minRect): # объект полностью внутри прямоугольника y1, x1, y2, x2 = bigRect minX = x1 minY = y1 # вроде верно maxX = x2 maxY = y2 y1, x1, y2, x2 = minRect a = (minY <= y1 <= maxY) b = (minX <= x1 <= maxX) c = (minY <= y2 <= maxY) d = (minX <= x2 <= maxX) return a and b and c and d # если тру, то объект полностью внутри большого прямоугольника def isPartiallyInside(bigRect, minRect, innerPercent=0.5): # объект частично внутри прямоугольника bigLUy, bigLUx, bigRDy, bigRDx = bigRect minLUy, minLUx, minRDy, minRDx = minRect fullSquare = (minLUy - minRDy) * (minRDx - minLUx) # не уверен что правильно # Не уверен в ифах if bigLUy < minLUy: minLUy = bigLUy if bigRDy < minRDy: minRDy = bigRDy if bigLUx > minLUx: minLUx = bigLUx if bigRDx > minRDx: minRDx = bigRDx inObjSquare = (minLUy - minRDy) * (minRDx - minLUx) return inObjSquare / fullSquare >= innerPercent def createGraphic(imagePath: str, searchRect: list, objectsListRect: list): import matplotlib.pyplot as plt from PIL import Image import numpy as np import matplotlib.patches as patches im = np.array(Image.open(imagePath), dtype=np.uint8) fig, ax = plt.subplots(1) ax.imshow(im) bigRect = Rectangle(searchRect) minRects = [Rectangle(i) for i in objectsListRect] rect = patches.Rectangle(*bigRect.getMTparam(), linewidth=1, edgecolor='g', facecolor='None') ax.add_patch(rect) for i in minRects: rect = patches.Rectangle(*i.getMTparam(), linewidth=1, edgecolor='r', facecolor='none') ax.add_patch(rect) temp = tempfile.NamedTemporaryFile() path = os.path.join(os.getcwd(), temp.name) plt.savefig(path) return os.path.split(temp.name + ".png") class Rectangle: LDx = 0 LDy = 0 RUx = 0 RUy = 0 def __init__(self, coordinates: list): if len(coordinates) != 4: raise ValueError("Нужно подавать координаты(х,у) двух противоложных вершин") if coordinates[0] >= coordinates[2] or coordinates[1] >= coordinates[3]: raise ValueError( "Неверно заданы вершины, сначала подаются 2 координаты нижнего левого угла, потом верхнего правого") self.LDx, self.LDy, self.RUx, self.RUy = coordinates def getWidth(self): return self.RUx - self.LDx def getHeight(self): return self.RUy - self.LDy def getLUx(self): return self.LDx def getLUy(self): return self.RUy def getMTparam(self): return ((self.getLUy(), self.getLUx()), # почему -? я не знаю -self.getHeight(), self.getWidth()) # все абсолютно в другом порядке, чем должно быть? что ха дринся def getCenterOfDown(self): return [(self.LDx + self.RUx) / 2, self.LDy]
28.927928
117
0.62753
1,229
0.341959
0
0
0
0
0
0
879
0.244574
b9312660991c249b5bd6faf4ead63f4150e99b7e
4,915
py
Python
pysnmp/EXTREME-RTSTATS-MIB.py
agustinhenze/mibs.snmplabs.com
1fc5c07860542b89212f4c8ab807057d9a9206c7
[ "Apache-2.0" ]
11
2021-02-02T16:27:16.000Z
2021-08-31T06:22:49.000Z
pysnmp/EXTREME-RTSTATS-MIB.py
agustinhenze/mibs.snmplabs.com
1fc5c07860542b89212f4c8ab807057d9a9206c7
[ "Apache-2.0" ]
75
2021-02-24T17:30:31.000Z
2021-12-08T00:01:18.000Z
pysnmp/EXTREME-RTSTATS-MIB.py
agustinhenze/mibs.snmplabs.com
1fc5c07860542b89212f4c8ab807057d9a9206c7
[ "Apache-2.0" ]
10
2019-04-30T05:51:36.000Z
2022-02-16T03:33:41.000Z
# # PySNMP MIB module EXTREME-RTSTATS-MIB (http://snmplabs.com/pysmi) # ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/EXTREME-BASE-MIB # Produced by pysmi-0.3.4 at Mon Apr 29 18:53:03 2019 # On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4 # Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15) # ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer") NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues") SingleValueConstraint, ValueSizeConstraint, ConstraintsUnion, ValueRangeConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ValueSizeConstraint", "ConstraintsUnion", "ValueRangeConstraint", "ConstraintsIntersection") extremeAgent, = mibBuilder.importSymbols("EXTREME-BASE-MIB", "extremeAgent") NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance") Unsigned32, iso, Gauge32, MibScalar, MibTable, MibTableRow, MibTableColumn, ObjectIdentity, Bits, MibIdentifier, ModuleIdentity, Counter64, Counter32, NotificationType, Integer32, IpAddress, TimeTicks = mibBuilder.importSymbols("SNMPv2-SMI", "Unsigned32", "iso", "Gauge32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ObjectIdentity", "Bits", "MibIdentifier", "ModuleIdentity", "Counter64", "Counter32", "NotificationType", "Integer32", "IpAddress", "TimeTicks") DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention") extremeRtStats = ModuleIdentity((1, 3, 6, 1, 4, 1, 1916, 1, 11)) if mibBuilder.loadTexts: extremeRtStats.setLastUpdated('9906240000Z') if mibBuilder.loadTexts: extremeRtStats.setOrganization('Extreme Networks, Inc.') extremeRtStatsTable = MibTable((1, 3, 6, 1, 4, 1, 1916, 1, 11, 1), ) if mibBuilder.loadTexts: extremeRtStatsTable.setStatus('current') extremeRtStatsEntry = MibTableRow((1, 3, 6, 1, 4, 1, 1916, 1, 11, 1, 1), ).setIndexNames((0, "EXTREME-RTSTATS-MIB", "extremeRtStatsIndex")) if mibBuilder.loadTexts: extremeRtStatsEntry.setStatus('current') extremeRtStatsIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 1916, 1, 11, 1, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readonly") if mibBuilder.loadTexts: extremeRtStatsIndex.setStatus('current') extremeRtStatsIntervalStart = MibTableColumn((1, 3, 6, 1, 4, 1, 1916, 1, 11, 1, 1, 2), TimeTicks()).setMaxAccess("readonly") if mibBuilder.loadTexts: extremeRtStatsIntervalStart.setStatus('current') extremeRtStatsCRCAlignErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 1916, 1, 11, 1, 1, 3), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: extremeRtStatsCRCAlignErrors.setStatus('current') extremeRtStatsUndersizePkts = MibTableColumn((1, 3, 6, 1, 4, 1, 1916, 1, 11, 1, 1, 4), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: extremeRtStatsUndersizePkts.setStatus('current') extremeRtStatsOversizePkts = MibTableColumn((1, 3, 6, 1, 4, 1, 1916, 1, 11, 1, 1, 5), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: extremeRtStatsOversizePkts.setStatus('current') extremeRtStatsFragments = MibTableColumn((1, 3, 6, 1, 4, 1, 1916, 1, 11, 1, 1, 6), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: extremeRtStatsFragments.setStatus('current') extremeRtStatsJabbers = MibTableColumn((1, 3, 6, 1, 4, 1, 1916, 1, 11, 1, 1, 7), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: extremeRtStatsJabbers.setStatus('current') extremeRtStatsCollisions = MibTableColumn((1, 3, 6, 1, 4, 1, 1916, 1, 11, 1, 1, 8), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: extremeRtStatsCollisions.setStatus('current') extremeRtStatsTotalErrors = MibTableColumn((1, 3, 6, 1, 4, 1, 1916, 1, 11, 1, 1, 9), Counter32()).setMaxAccess("readonly") if mibBuilder.loadTexts: extremeRtStatsTotalErrors.setStatus('current') extremeRtStatsUtilization = MibTableColumn((1, 3, 6, 1, 4, 1, 1916, 1, 11, 1, 1, 10), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 10000))).setMaxAccess("readonly") if mibBuilder.loadTexts: extremeRtStatsUtilization.setStatus('current') mibBuilder.exportSymbols("EXTREME-RTSTATS-MIB", extremeRtStatsEntry=extremeRtStatsEntry, extremeRtStatsOversizePkts=extremeRtStatsOversizePkts, extremeRtStatsUndersizePkts=extremeRtStatsUndersizePkts, extremeRtStatsTable=extremeRtStatsTable, extremeRtStatsTotalErrors=extremeRtStatsTotalErrors, extremeRtStats=extremeRtStats, PYSNMP_MODULE_ID=extremeRtStats, extremeRtStatsCollisions=extremeRtStatsCollisions, extremeRtStatsCRCAlignErrors=extremeRtStatsCRCAlignErrors, extremeRtStatsJabbers=extremeRtStatsJabbers, extremeRtStatsIndex=extremeRtStatsIndex, extremeRtStatsUtilization=extremeRtStatsUtilization, extremeRtStatsIntervalStart=extremeRtStatsIntervalStart, extremeRtStatsFragments=extremeRtStatsFragments)
114.302326
713
0.790031
0
0
0
0
0
0
0
0
1,190
0.242116
b931a37de7e1f1ed0fc213effed503351b163f01
9,946
py
Python
goopylib/objects/_BBox.py
BhavyeMathur/goopylib
f9eb1458e9218a8dd4add6693ce70b804624bf91
[ "MIT" ]
25
2020-07-09T10:57:16.000Z
2022-02-06T10:31:34.000Z
goopylib/objects/_BBox.py
BhavyeMathur/goopy
f9eb1458e9218a8dd4add6693ce70b804624bf91
[ "MIT" ]
48
2020-07-02T20:08:40.000Z
2020-07-06T16:09:25.000Z
goopylib/objects/_BBox.py
BhavyeMathur/goopy
f9eb1458e9218a8dd4add6693ce70b804624bf91
[ "MIT" ]
1
2020-12-01T13:45:53.000Z
2020-12-01T13:45:53.000Z
from goopylib.objects.GraphicsObject import GraphicsObject from goopylib.styles import * class BBox(GraphicsObject): # Internal base class for objects represented by bounding box # (opposite corners) Line segment is a degenerate case. resizing_objects = [] def __init__(self, p1, p2, bounds=None, fill=None, outline=None, outline_width=None, cursor="arrow", layer=0, tag=None): self.p1 = p1 self.p2 = p2 # These make sure that the p2 is 'after' p1, ie the x & y value of p2 is greater than that of p1 if self.p1[0] > self.p2[0]: # Checking if p1's x value is greater than p2's. If so, then swap the values self.p1[0], self.p2[0] = self.p2[0], self.p1[0] if self.p1[1] > self.p2[1]: # Checking if p1's y value is greater than p2's. If so, then swap the values self.p1[1], self.p2[1] = self.p2[1], self.p1[1] self.anchor = [(self.p1[0] + self.p2[0]) // 2, (self.p1[1] + self.p2[1]) // 2] GraphicsObject.__init__(self, options=(), cursor=cursor, layer=layer, bounds=bounds, tag=tag) # abs(p2[0] - p1[0]) is not required because the p2 value is always greater than or equal to the p1 value self.width = self.p2[0] - self.p1[0] self.height = self.p2[1] - self.p1[1] self.min_width = None self.min_height = None self.max_width = None self.max_height = None self.resizing_bounds = {} self.is_resizing = {} self.bounds_thickness = 0 if fill is None: self.fill = STYLES["default"]["fill"] elif isinstance(fill, Colour): # Checking if the option is a colour self.fill = fill else: # If not, raise an error raise GraphicsError(f"\n\nGraphicsError: The Rectangle fill must be a Colour object , not {fill}") if outline is None: self.outline = STYLES["default"]["outline"] elif isinstance(outline, Colour): # Checking if the option is a colour self.outline = outline else: # If not, raise an error raise GraphicsError(f"\n\nGraphicsError: The rectangle outline must be a Colour object , not {outline}") if outline_width is None: self.outline_width = STYLES["default"]["width"] elif isinstance(outline_width, int): # Checking if the option is an integer self.outline_width = outline_width else: # If not, raise an error raise GraphicsError(f"\n\nGraphicsError: The rectangle outline width must be an integer, not {outline_width}") def __repr__(self): return "_BBox" def _set_resizable(self, resizables, top_bounds=None, bottom_bounds=None, left_bounds=None, right_bounds=None, thickness=10): """Override in subclasses""" pass def _move(self, dx, dy): self.p1[0] += dx self.p1[1] += dy self.p2[0] += dx self.p2[1] += dy self.anchor[0] += dx self.anchor[1] += dy def is_clicked(self, mouse_pos): if self.bounds is None: if mouse_pos is None: return False else: if (self.p1[0] < mouse_pos[0] < self.p2[0] or self.p1[0] > mouse_pos[0] > self.p2[0]) and \ (self.p1[1] < mouse_pos[1] < self.p2[1] or self.p1[1] > mouse_pos[1] > self.p2[1]): return True else: return False else: return self.bounds.is_clicked(mouse_pos) def get_p1(self): return self.p1.copy() def get_p2(self): return self.p2.copy() def get_top_right(self): return self.p1.copy() def get_top_left(self): return [self.p2[0], self.p1[1]] def get_bottom_left(self): return [self.p1[0], self.p2[1]] def get_bottom_right(self): return self.p2.copy() def get_top(self): return [(self.p2[0] + self.p1[0]) / 2, self.p1[1]] def get_bottom(self): return [(self.p2[0] + self.p1[0]) / 2, self.p2[1]] def get_left(self): return [self.p1[0], (self.p1[1] + self.p2[1]) / 2] def get_right(self): return [self.p2[0], (self.p1[1] + self.p2[1]) / 2] def get_width(self): return self.width def get_height(self): return self.height def get_fill(self): return self.fill def get_outline(self): return self.outline def get_outline_width(self): return self.outline_width def get_anchor(self): return self.anchor def set_dimensions(self, width, height, horizontal_align="center", vertical_align="center"): self.set_width(width, horizontal_align) self.set_height(height, vertical_align) return self def set_resizable(self, top=False, left=False, bottom=False, right=False, min_width=40, min_height=40, bounds_width=10, top_bounds=None, bottom_bounds=None, left_bounds=None, right_bounds=None): if min_width < 1 or min_height < 1: raise GraphicsError(f"\n\nGraphicsError: Minimum height and width of resizable object must be greater than " f"or equal to 1. Right now, min_width={min_width} & min_height={min_height}") self.min_width = min_width self.min_height = min_height self.is_resizing = {"top": top, "left": left, "bottom": bottom, "right": right} self._set_resizable([top, bottom, left, right], top_bounds=top_bounds, bottom_bounds=bottom_bounds, left_bounds=left_bounds, right_bounds=right_bounds, thickness=bounds_width) if top is False and bottom is False and left is False and right is False: if self in GraphicsObject.resizing_objects: GraphicsObject.resizing_objects.remove(self) elif self not in GraphicsObject.resizing_objects: GraphicsObject.resizing_objects.add(self) self.bounds_thickness = bounds_width return self def set_coords(self, p1, p2): self.p1 = p1.copy() self.p2 = p2.copy() # These make sure that the p2 is 'after' p1, ie the x & y value of p2 is greater than that of p1 if self.p1[0] > self.p2[0]: # Checking if p1's x value is greater than p2's. If so, then swap the values self.p1[0], self.p2[0] = self.p2[0], self.p1[0] if self.p1[1] > self.p2[1]: # Checking if p1's y value is greater than p2's. If so, then swap the values self.p1[1], self.p2[1] = self.p2[1], self.p1[1] # abs(p2[0] - p1[0]) is not required because the p2 value is always greater than or equal to the p1 value self.width = self.p2[0] - self.p1[0] self.height = self.p2[1] - self.p1[1] width_scale = (p2[0] - p1[0]) / self.width height_scale = (p2[1] - p1[1]) / self.height # abs(p2[0] - p1[0]) is not required because the p2 value is always greater than or equal to the p1 value self.width = p2[0] - p1[0] self.height = p2[1] - p1[1] self.anchor = [(self.p1[0] + self.p2[0]) // 2, (self.p1[1] + self.p2[1]) // 2] self._update_layer() return self def set_width(self, width, center="center"): if center not in {"center", "right", "left"}: raise GraphicsError( "\n\nThe center argument for resizing the object (set_outline_width) needs to be one of " f'{["center", "right", "left"]}') if center == "left": self.set_coords(self.p1, self.p2.add_x(width - self.width)) elif center == "right": self.set_coords(self.p1.add_x(-(width - self.width)), self.p2) else: self.set_coords(self.p1.add_x(-(width / 2 - self.width)), self.p2.add_x(width / 2 - self.width)) return self def set_height(self, height, center="center"): if center not in {"center", "top", "bottom"}: raise GraphicsError( "\n\nThe center argument for resizing the object (set_height) needs to be one of " f'{["center", "top", "bottom"]}') if center == "top": self.set_coords(self.p1, self.p2.add_y(height - self.height)) elif center == "bottom": self.set_coords(self.p1.add_y(-(height - self.height)), self.p2) else: self.set_coords(self.p1.add_y(-(height / 2 - self.height)), self.p2.add_y(height / 2 - self.height)) return self def set_fill(self, fill): if fill is None: self.fill = STYLES["default"]["fill"] elif isinstance(fill, Colour): # Checking if the option is a colour self.fill = fill else: # If not, raise an error raise GraphicsError(f"\n\nGraphicsError: The Rectangle fill must be a Colour object , not {fill}") self._update_layer() return self def set_outline(self, outline): if outline is None: self.outline = STYLES["default"]["outline"] elif isinstance(outline, Colour): # Checking if the option is a colour self.outline = outline else: # If not, raise an error raise GraphicsError(f"\n\nGraphicsError: The rectangle outline must be a Colour object , not {outline}") self._update_layer() return self def set_outline_width(self, outline_width): if outline_width is None: self.outline_width = STYLES["default"]["width"] elif isinstance(outline_width, int): # Checking if the option is an integer self.outline_width = outline_width else: # If not, raise an error raise GraphicsError( f"\n\nGraphicsError: The rectangle outline width must be an integer, not {outline_width}") self._update_layer() return self
37.81749
122
0.594008
9,853
0.99065
0
0
0
0
0
0
2,454
0.246732
b931c0b51c15ef9d8f1fe028562964e4cc16bd70
670
py
Python
Graph/DFS&BFS.py
Mayner0220/Programmers
42e4783a526506fb7d8208841a76201909ed5c5c
[ "Apache-2.0" ]
1
2021-04-01T06:19:02.000Z
2021-04-01T06:19:02.000Z
Graph/DFS&BFS.py
Mayner0220/Programmers
42e4783a526506fb7d8208841a76201909ed5c5c
[ "Apache-2.0" ]
null
null
null
Graph/DFS&BFS.py
Mayner0220/Programmers
42e4783a526506fb7d8208841a76201909ed5c5c
[ "Apache-2.0" ]
null
null
null
# https://www.acmicpc.net/problem/1260 n, m, v = map(int, input().split()) graph = [[0] * (n+1) for _ in range(n+1)] visit = [False] * (n+1) for _ in range(m): R, C = map(int, input().split()) graph[R][C] = 1 graph[C][R] = 1 def dfs(v): visit[v] = True print(v, end=" ") for i in range(1, n+1): if not visit[i] and graph[v][i]==1: dfs(i) def bfs(v): queue = [v] visit[v] = False while queue: v = queue.pop(0) print(v, end=" ") for i in range(1, n+1): if visit[i] and graph[v][i]==1: queue.append(i) visit[i] = False dfs(v) print() bfs(v)
19.142857
43
0.470149
0
0
0
0
0
0
0
0
44
0.065672
b932e9aa7c1cc0da8573d5baaf3b16b4549529cd
347
py
Python
coding_intereview/1576. Replace All ?'s to Avoid Consecutive Repeating Characters.py
Jahidul007/Python-Bootcamp
3c870587465ff66c2c1871c8d3c4eea72463abda
[ "MIT" ]
2
2020-12-07T16:07:07.000Z
2020-12-07T16:08:53.000Z
coding_intereview/1576. Replace All ?'s to Avoid Consecutive Repeating Characters.py
purusharthmalik/Python-Bootcamp
2ed1cf886d1081de200b0fdd4cb4e28008c7e3d1
[ "MIT" ]
null
null
null
coding_intereview/1576. Replace All ?'s to Avoid Consecutive Repeating Characters.py
purusharthmalik/Python-Bootcamp
2ed1cf886d1081de200b0fdd4cb4e28008c7e3d1
[ "MIT" ]
1
2020-10-03T16:38:02.000Z
2020-10-03T16:38:02.000Z
class Solution: def modifyString(self, s: str) -> str: s = list(s) for i in range(len(s)): if s[i] == "?": for c in "abc": if (i == 0 or s[i-1] != c) and (i+1 == len(s) or s[i+1] != c): s[i] = c break return "".join(s)
31.545455
83
0.337176
346
0.997118
0
0
0
0
0
0
10
0.028818
b9341a63382a080379eb1fbad26490deed5a76c6
2,404
py
Python
pysteps/tests/helpers.py
Fangyh09/pysteps
9eb7f4ead0a946d98b7504d1bd66b18dc405ed51
[ "BSD-3-Clause" ]
6
2019-01-06T07:42:55.000Z
2021-02-03T13:59:50.000Z
pysteps/tests/helpers.py
Fangyh09/pysteps
9eb7f4ead0a946d98b7504d1bd66b18dc405ed51
[ "BSD-3-Clause" ]
5
2018-12-23T15:10:27.000Z
2021-01-06T15:03:03.000Z
pysteps/tests/helpers.py
Fangyh09/pysteps
9eb7f4ead0a946d98b7504d1bd66b18dc405ed51
[ "BSD-3-Clause" ]
2
2019-08-06T14:16:43.000Z
2019-08-13T00:36:31.000Z
""" Testing helper functions ======================= Collection of helper functions for the testing suite. """ from datetime import datetime import numpy as np import pytest import pysteps as stp from pysteps import io, rcparams def get_precipitation_fields(num_prev_files=0): """Get a precipitation field from the archive to be used as reference.""" # Selected case date = datetime.strptime("201505151630", "%Y%m%d%H%M") data_source = rcparams.data_sources["mch"] root_path = data_source["root_path"] path_fmt = data_source["path_fmt"] fn_pattern = data_source["fn_pattern"] fn_ext = data_source["fn_ext"] importer_name = data_source["importer"] importer_kwargs = data_source["importer_kwargs"] # Find the input files from the archive fns = io.archive.find_by_date(date, root_path, path_fmt, fn_pattern, fn_ext, timestep=5, num_prev_files=num_prev_files) # Read the radar composites importer = io.get_method(importer_name, "importer") reference_field, quality, metadata = io.read_timeseries(fns, importer, **importer_kwargs) del quality # Not used if num_prev_files == 0: reference_field = np.squeeze(reference_field) # Remove time dimension # Convert to mm/h reference_field, metadata = stp.utils.to_rainrate(reference_field, metadata) # Mask invalid values reference_field = np.ma.masked_invalid(reference_field) # Log-transform the data [dBR] reference_field, metadata = stp.utils.dB_transform(reference_field, metadata, threshold=0.1, zerovalue=-15.0) return reference_field def smart_assert(actual_value, expected, tolerance=None): """ Assert by equality for non-numeric values, or by approximation otherwise. If the precision keyword is None, assert by equality. When the precision is not None, assert that two numeric values (or two sets of numbers) are equal to each other within the tolerance. """ if tolerance is None: assert actual_value == expected else: # Compare numbers up to a certain precision assert actual_value == pytest.approx(expected, 1e-6)
33.388889
80
0.640599
0
0
0
0
0
0
0
0
808
0.336106
b934cd0c4d4115b02def19c6bd570d1877b158cd
3,598
py
Python
modules/courses/courses.py
ehiller/mobilecsp-v18
a59801c44c616d30f5e916d6771e479c8a9e88f7
[ "Apache-2.0" ]
null
null
null
modules/courses/courses.py
ehiller/mobilecsp-v18
a59801c44c616d30f5e916d6771e479c8a9e88f7
[ "Apache-2.0" ]
null
null
null
modules/courses/courses.py
ehiller/mobilecsp-v18
a59801c44c616d30f5e916d6771e479c8a9e88f7
[ "Apache-2.0" ]
null
null
null
# Copyright 2012 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Courses module.""" __author__ = 'Pavel Simakov ([email protected])' from common import resource from controllers import assessments from controllers import lessons from controllers import utils from models import content from models import resources_display from models import custom_modules from models import roles from tools import verify All_LOCALES_PERMISSION = 'can_pick_all_locales' All_LOCALES_DESCRIPTION = 'Can pick all locales, including unavailable ones.' SEE_DRAFTS_PERMISSION = 'can_see_draft_content' SEE_DRAFTS_DESCRIPTION = 'Can see lessons and assessments with draft status.' custom_module = None def can_pick_all_locales(app_context): return roles.Roles.is_user_allowed( app_context, custom_module, All_LOCALES_PERMISSION) def can_see_drafts(app_context): return roles.Roles.is_user_allowed( app_context, custom_module, SEE_DRAFTS_PERMISSION) def register_module(): """Registers this module in the registry.""" def on_module_enabled(): roles.Roles.register_permissions(custom_module, permissions_callback) resource.Registry.register(resources_display.ResourceCourseSettings) resource.Registry.register(resources_display.ResourceUnit) resource.Registry.register(resources_display.ResourceAssessment) resource.Registry.register(resources_display.ResourceLink) resource.Registry.register(resources_display.ResourceLesson) resource.Registry.register(utils.ResourceHtmlHook) def permissions_callback(unused_app_context): return [ roles.Permission(All_LOCALES_PERMISSION, All_LOCALES_DESCRIPTION), roles.Permission(SEE_DRAFTS_PERMISSION, SEE_DRAFTS_DESCRIPTION) ] # provide parser to verify verify.parse_content = content.parse_string_in_scope # setup routes courses_routes = [ ('/', lessons.CourseHandler), ('/activity', lessons.UnitHandler), ('/answer', assessments.AnswerHandler), ('/assessment', lessons.AssessmentHandler), ('/course', lessons.CourseHandler), ('/forum', utils.ForumHandler), ('/preview', utils.PreviewHandler), ('/register', utils.RegisterHandler), ('/resources', utils.ResourcesHandler), ('/rest/locale', utils.StudentLocaleRESTHandler), ('/review', lessons.ReviewHandler), ('/reviewdashboard', lessons.ReviewDashboardHandler), ('/student/editstudent', utils.StudentEditStudentHandler), ('/student/settracks', utils.StudentSetTracksHandler), ('/student/home', utils.StudentProfileHandler), ('/student/unenroll', utils.StudentUnenrollHandler), ('/unit', lessons.UnitHandler)] global custom_module # pylint: disable=global-statement custom_module = custom_modules.Module( 'Course', 'A set of pages for delivering an online course.', [], courses_routes, notify_module_enabled=on_module_enabled) return custom_module
36.714286
78
0.735686
0
0
0
0
0
0
0
0
1,175
0.32657
b934ce47dae53d305023f829683b8ba6f625367b
1,362
py
Python
packages/merlin/protocols/PrefixLayout.py
pyre/pyre
0f903836f52450bf81216c5dfdfdfebb16090177
[ "BSD-3-Clause" ]
25
2018-04-23T01:45:39.000Z
2021-12-10T06:01:23.000Z
packages/merlin/protocols/PrefixLayout.py
pyre/pyre
0f903836f52450bf81216c5dfdfdfebb16090177
[ "BSD-3-Clause" ]
53
2018-05-31T04:55:00.000Z
2021-10-07T21:41:32.000Z
packages/merlin/protocols/PrefixLayout.py
pyre/pyre
0f903836f52450bf81216c5dfdfdfebb16090177
[ "BSD-3-Clause" ]
12
2018-04-23T22:50:40.000Z
2022-02-20T17:27:23.000Z
# -*- coding: utf-8 -*- # # michael a.g. aïvázis <[email protected]> # (c) 1998-2021 all rights reserved # support import merlin # the manager of intermediate and final build products class PrefixLayout(merlin.protocol, family="merlin.layouts.prefix"): """ The manager of the all build products, both final and intermediate disposables """ # required state bin = merlin.properties.path() bin.doc = "the location of executables" config = merlin.properties.path() config.doc = "global package configuration files" doc = merlin.properties.path() doc.doc = "package documentation" etc = merlin.properties.path() etc.doc = "host specific files" include = merlin.properties.path() include.doc = "library header files" lib = merlin.properties.path() lib.doc = "libraries" libexec = merlin.properties.path() libexec.doc = "binaries that are meant to be used by other packages" share = merlin.properties.path() share.doc = "architecture independent package files" var = merlin.properties.path() var.doc = "runtime files" # framework hooks @classmethod def pyre_default(cls, **kwds): """ Specify the default implementation """ # choose the default implementer return merlin.components.fhs # end of file
23.482759
82
0.668135
1,146
0.840176
0
0
192
0.140762
0
0
681
0.499267
b9355080468a287acd9198671ea28f44a47c9a46
2,389
py
Python
test/IECoreMaya/ImageConverterTest.py
bradleyhenke/cortex
f8245cc6c9464b1de9e6c6e57068248198e63de0
[ "BSD-3-Clause" ]
386
2015-01-02T11:10:43.000Z
2022-03-10T15:12:20.000Z
test/IECoreMaya/ImageConverterTest.py
bradleyhenke/cortex
f8245cc6c9464b1de9e6c6e57068248198e63de0
[ "BSD-3-Clause" ]
484
2015-01-09T18:28:06.000Z
2022-03-31T16:02:04.000Z
test/IECoreMaya/ImageConverterTest.py
bradleyhenke/cortex
f8245cc6c9464b1de9e6c6e57068248198e63de0
[ "BSD-3-Clause" ]
99
2015-01-28T23:18:04.000Z
2022-03-27T00:59:39.000Z
########################################################################## # # Copyright (c) 2011, Image Engine Design Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # * Neither the name of Image Engine Design nor the names of any # other contributors to this software may be used to endorse or # promote products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ########################################################################## import maya.cmds import IECore import IECoreImage import IECoreMaya class ImageConverterTest( IECoreMaya.TestCase ) : def test( self ) : imageA = IECore.Reader.create( "test/IECoreImage/data/exr/colorBarsWithAlpha.exr" ).read() toMaya = IECoreMaya.ToMayaImageConverter( imageA ) mImage = maya.OpenMaya.MImage() toMaya.convert( mImage ) fromMaya = IECoreMaya.FromMayaImageConverter( mImage ) imageB = fromMaya.convert() self.assertFalse( IECoreImage.ImageDiffOp()( imageA=imageA, imageB=imageB, maxError=1.0/256 ).value ) if __name__ == "__main__": IECoreMaya.TestProgram()
37.920635
92
0.706153
480
0.200921
0
0
0
0
0
0
1,810
0.757639
b936e2da1dfb0c50e0a4123e54c302664e300cf0
4,454
py
Python
tests/core_ptl/check_for_ranks.py
PatrykNeubauer/NeMo
3ada744b884dba5f233f22c6991fc6092c6ca8d0
[ "Apache-2.0" ]
2
2021-09-21T07:36:20.000Z
2022-02-05T15:29:04.000Z
tests/core_ptl/check_for_ranks.py
PatrykNeubauer/NeMo
3ada744b884dba5f233f22c6991fc6092c6ca8d0
[ "Apache-2.0" ]
null
null
null
tests/core_ptl/check_for_ranks.py
PatrykNeubauer/NeMo
3ada744b884dba5f233f22c6991fc6092c6ca8d0
[ "Apache-2.0" ]
12
2021-06-20T08:56:10.000Z
2022-03-16T19:07:10.000Z
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import shutil import torch from omegaconf import OmegaConf from pytorch_lightning import Trainer from pytorch_lightning.utilities.distributed import rank_zero_only from nemo.core import ModelPT from nemo.utils import logging from nemo.utils.exp_manager import ExpManagerConfig, exp_manager class OnesDataset(torch.utils.data.Dataset): def __init__(self, dataset_len): super().__init__() self.__dataset_len = dataset_len def __getitem__(self, *args): return torch.ones(2) def __len__(self): return self.__dataset_len class ExampleModel(ModelPT): def __init__(self, *args, **kwargs): cfg = OmegaConf.structured({}) super().__init__(cfg, trainer=kwargs.get('trainer', None)) # dummy parameter in order to allow DDP to execute self.l1 = torch.nn.modules.Linear(in_features=2, out_features=1) def train_dataloader(self): return None def val_dataloader(self): return None def predict_dataloader(self): dataset = OnesDataset(2) return torch.utils.data.DataLoader(dataset, batch_size=2) def forward(self, batch): return batch.mean() def validation_step(self, batch, batch_idx): return self(batch) def training_step(self, batch, batch_idx): return self(batch) def list_available_models(self): pass def setup_training_data(self): pass def setup_validation_data(self): pass def validation_epoch_end(self, loss): self.log("val_loss", torch.stack(loss).mean()) def instantiate_multinode_ddp_if_possible(): num_gpus = torch.cuda.device_count() trainer = Trainer(gpus=num_gpus, accelerator='ddp', logger=None, checkpoint_callback=None) exp_manager_cfg = ExpManagerConfig(exp_dir='./ddp_check/', use_datetime_version=False, version="") exp_manager(trainer, cfg=OmegaConf.structured(exp_manager_cfg)) return trainer def setup_model(trainer: Trainer): model = ExampleModel(trainer=trainer) logging.info(f"M.Global Rank:{model.global_rank}") logging.info(f"M.Local Rank:{model.local_rank}") logging.info(f"M.World Size:{model.trainer.world_size}") trainer.predict(model) return model def get_rank_info(texts: list, rank_key: str) -> int: for line in texts: if rank_key in line: rank_value = line.split(":")[-1] rank_value = int(rank_value) return rank_value print("Could not find the correct rank key !") exit(1) @rank_zero_only def check_model_ranks(model: ExampleModel): basedir = os.path.join('./ddp_check/', 'default', 'version_0') file_template = "nemo_log_globalrank-{rank}_localrank-{rank}.txt" world_size = torch.cuda.device_count() for rank in range(world_size): filename = file_template.format(rank=rank) filepath = os.path.join(basedir, filename) with open(filepath, 'r') as f: texts = f.readlines() texts = [t.replace("\n", "") for t in texts] log_global_rank = get_rank_info(texts, rank_key='M.Global Rank') log_world_size = get_rank_info(texts, rank_key='M.World Size') if log_global_rank != rank: print("Logged global rank is not equal to trainer.global_rank !") exit(1) if log_world_size != world_size: print("Logged world size if not equal to trainer.world_size !") exit(1) @rank_zero_only def cleanup(): if os.path.exists('./ddp_check'): shutil.rmtree('./ddp_check', ignore_errors=True) def run_checks(): cleanup() trainer = instantiate_multinode_ddp_if_possible() model = setup_model(trainer) check_model_ranks(model) print("DDP checks passed !") cleanup() if __name__ == '__main__': run_checks()
28.551282
102
0.687023
1,276
0.286484
0
0
1,051
0.235968
0
0
1,133
0.254378
b93839299c30aa23ab066b85969c7c27e043c202
1,143
py
Python
helpers/json_manager.py
Lofi-Lemonade/Python-Discord-Bot-Template
4cb79197c751c88100ad396adb38e88bf2a4d1ed
[ "Apache-2.0" ]
null
null
null
helpers/json_manager.py
Lofi-Lemonade/Python-Discord-Bot-Template
4cb79197c751c88100ad396adb38e88bf2a4d1ed
[ "Apache-2.0" ]
null
null
null
helpers/json_manager.py
Lofi-Lemonade/Python-Discord-Bot-Template
4cb79197c751c88100ad396adb38e88bf2a4d1ed
[ "Apache-2.0" ]
null
null
null
"""" Copyright © Krypton 2022 - https://github.com/kkrypt0nn (https://krypton.ninja) Description: This is a template to create your own discord bot in python. Version: 4.1 """ import json def add_user_to_blacklist(user_id: int) -> None: """ This function will add a user based on its ID in the blacklist.json file. :param user_id: The ID of the user that should be added into the blacklist.json file. """ with open("blacklist.json", "r+") as file: file_data = json.load(file) file_data["ids"].append(user_id) with open("blacklist.json", "w") as file: file.seek(0) json.dump(file_data, file, indent=4) def remove_user_from_blacklist(user_id: int) -> None: """ This function will remove a user based on its ID from the blacklist.json file. :param user_id: The ID of the user that should be removed from the blacklist.json file. """ with open("blacklist.json", "r") as file: file_data = json.load(file) file_data["ids"].remove(user_id) with open("blacklist.json", "w") as file: file.seek(0) json.dump(file_data, file, indent=4)
31.75
91
0.659668
0
0
0
0
0
0
0
0
629
0.549825
b93889b31eb8ffef50e08b669fe2f20c16f4d959
1,628
py
Python
tests/test_common.py
ColinKennedy/ways
1eb44e4aa5e35fb839212cd8cb1c59c714ba10d3
[ "MIT" ]
2
2019-11-10T18:35:38.000Z
2020-05-12T10:37:42.000Z
tests/test_common.py
ColinKennedy/ways
1eb44e4aa5e35fb839212cd8cb1c59c714ba10d3
[ "MIT" ]
5
2017-11-27T18:05:25.000Z
2021-06-01T21:57:48.000Z
tests/test_common.py
ColinKennedy/ways
1eb44e4aa5e35fb839212cd8cb1c59c714ba10d3
[ "MIT" ]
1
2017-11-27T17:54:53.000Z
2017-11-27T17:54:53.000Z
#!/usr/bin/env python # -*- coding: utf-8 -*- '''Make sure that generic functions work exactly as we expect.''' # IMPORT STANDARD LIBRARIES import unittest # IMPORT WAYS LIBRARIES from ways import common class ParseTestCase(unittest.TestCase): '''Test generic parsing-related functions.''' def test_working_0001(self): '''Test that correct input for expand_string works as expected.''' pattern = '/jobs/{JOB}/some_kind/{THING}/real_folders' text = '/jobs/some_job_here/some_kind/of/real_folders' expected_output = {'JOB': 'some_job_here', 'THING': 'of'} self.assertEqual(expected_output, common.expand_string(pattern, text)) def test_working_0002(self): '''Test that correct input for expand_string works as expected.''' shot = 'NAME_010' format_string = '{SHOT}_{ID}' expected_output = {'SHOT': 'NAME', 'ID': '010'} self.assertEqual(expected_output, common.expand_string(format_string, shot)) def test_expand_string_failure_0001(self): '''Force expand_string fails to prevent a bad match from occurring.''' text = '/jobs/some_job/some_kind/of/real_folders' pattern = '/jobs/{JOB}/some_kind/of/real_folders/inner' self.assertFalse(common.expand_string(pattern, text)) def test_expand_string_failure_0002(self): '''Force expand_string fails to prevent a bad match from occurring.''' text = '/jobs/some_job/some_kind/of/real_folders' pattern = '/jobs/{JOB}/some_kind/{SHOTNAME}/real_folders/inner' self.assertFalse(common.expand_string(pattern, text))
35.391304
84
0.686732
1,418
0.871007
0
0
0
0
0
0
824
0.506143
b938dd2d4297c0de33a03a4e075f88143c4fb4d8
942
py
Python
setup.py
glibin/natasha
4f5c153f754759c189779f9879decd8d218356af
[ "MIT" ]
1
2020-01-16T14:02:01.000Z
2020-01-16T14:02:01.000Z
setup.py
glibin/natasha
4f5c153f754759c189779f9879decd8d218356af
[ "MIT" ]
null
null
null
setup.py
glibin/natasha
4f5c153f754759c189779f9879decd8d218356af
[ "MIT" ]
null
null
null
from setuptools import setup, find_packages setup( name='natasha', version='0.2.0', description='Named-entity recognition for russian language', url='https://github.com/bureaucratic-labs/natasha', author='Dmitry Veselov', author_email='[email protected]', license='MIT', classifiers=[ 'Development Status :: 3 - Alpha', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', ], keywords='natural language processing, russian morphology, named entity recognition, tomita', packages=find_packages(), install_requires=[ 'yargy==0.3.0' ], extras_require={ 'web': [ 'ujson', 'aiohttp', ], }, )
29.4375
97
0.59448
0
0
0
0
0
0
0
0
531
0.563694
b93a3daf85b033d7039d8c3747eadb457802db6b
2,814
py
Python
GeneratePassword/generate_password_v2.py
OneScreenfulOfPython/screenfuls
ea4e378c8d9e530edadd4a3315fe9e8acc98460b
[ "Apache-2.0" ]
2
2015-01-19T14:50:55.000Z
2015-01-28T12:45:59.000Z
GeneratePassword/generate_password_v2.py
OneScreenfulOfPython/screenfuls
ea4e378c8d9e530edadd4a3315fe9e8acc98460b
[ "Apache-2.0" ]
null
null
null
GeneratePassword/generate_password_v2.py
OneScreenfulOfPython/screenfuls
ea4e378c8d9e530edadd4a3315fe9e8acc98460b
[ "Apache-2.0" ]
null
null
null
import os, sys import random import string try: # Make Python2 work like Python3 input = raw_input except NameError: # On Python3; already using input pass letters = string.ascii_letters numbers = string.digits punctuation = string.punctuation def generate(password_length, at_least_one_letter, at_least_one_number, at_least_one_punctuation): """Generate a password by include enough random characters to meet the password length restriction. In addition, the user can specify that at least one of the each of the classes of character be used. """ # # Any combination of characters is valid # valid_characters = "" if at_least_one_letter: valid_characters += letters if at_least_one_number: valid_characters += numbers if at_least_one_punctuation: valid_characters += punctuation # # Start with a blank password and then go round enough # times to make a password of the required length. # password = "" for i in range(password_length): # # Each time around, ensure that one of each of the selected # groups is chosen, and then just choose randomly from all # groups. # if at_least_one_letter: character = random.choice(letters) at_least_one_letter = False elif at_least_one_number: character = random.choice(numbers) at_least_one_number = False elif at_least_one_punctuation: character = random.choice(punctuation) at_least_one_punctuation = False else: character = random.choice(valid_characters) password += character # # Finally, shuffle the password so we don't always get a # letter at the beginning, with a number after and some # punctuation. # characters = list(password) # # random.shuffle shuffles a list *in place* # random.shuffle(characters) # # X.join(...) means: return all the strings in (...) joined by X # ", ".join(['Eggs', 'Bacon', 'Beans']) => "Eggs, Bacon, Beans" # But if you want to generate *real* .csv files, use the csv module # because there are lots of corner-cases. # password = "".join(characters) return password if __name__ == '__main__': password_length = int(input("How many letters? ")) at_least_one_letter = "Y" == (input("At least one letter [Y/n]? ").upper() or "Y") at_least_one_number = "Y" == (input("At least one number [Y/n]? ").upper() or "Y") at_least_one_punctuation = "Y" == (input("At least one punctuation [Y/n]? ").upper() or "Y") password = generate(password_length, at_least_one_letter, at_least_one_number, at_least_one_punctuation) print("Your password is: {}".format(password))
33.5
108
0.658138
0
0
0
0
0
0
0
0
1,139
0.404762
b93a4101b4ff85c90fbde08405fbe7515b2816bd
17,093
py
Python
bot/jobs/thorchain_node_jobs.py
block42-blockchain-company/thornode-telegram-bot
6478b1eb41e36c5fdd327b963b55343de1ce5337
[ "MIT" ]
15
2020-04-21T07:51:26.000Z
2021-11-02T05:45:48.000Z
bot/jobs/thorchain_node_jobs.py
block42-blockchain-company/thornode-telegram-bot
6478b1eb41e36c5fdd327b963b55343de1ce5337
[ "MIT" ]
78
2020-04-13T23:01:16.000Z
2021-05-09T11:46:25.000Z
bot/jobs/thorchain_node_jobs.py
block42-blockchain-company/thornode-telegram-bot
6478b1eb41e36c5fdd327b963b55343de1ce5337
[ "MIT" ]
5
2020-09-03T21:19:16.000Z
2021-11-20T00:17:56.000Z
from constants.messages import get_node_health_warning_message, get_node_healthy_again_message from handlers.chat_helpers import try_message_with_home_menu, try_message_to_all_users from packaging import version from service.utils import * def check_thornodes(context): chat_id = context.job.context['chat_id'] chat_data = context.job.context['chat_data'] inactive_nodes = [] for node_address, local_node in chat_data.get('nodes', {}).items(): try: remote_node = get_thornode_object_or_none(address=node_address) except HTTPError as e: logger.exception(e) continue if remote_node is None: text = 'THORNode ' + local_node['alias'] + ' is not active anymore! 💀' + '\n' + \ 'Address: ' + node_address + '\n\n' + \ 'Please enter another THORNode address.' inactive_nodes.append(node_address) try_message_with_home_menu(context=context, chat_id=chat_id, text=text) continue is_not_blocked = float(local_node['last_notification_timestamp']) < \ datetime.timestamp( datetime.now() - timedelta(seconds=local_node['notification_timeout_in_seconds'])) if is_not_blocked: message = build_notification_message_for_active_node(local_node, remote_node, context) if message: # Update data local_node['status'] = remote_node['status'] local_node['bond'] = remote_node['bond'] local_node['slash_points'] = remote_node['slash_points'] local_node['ip_address'] = remote_node['ip_address'] local_node['last_notification_timestamp'] = datetime.timestamp(datetime.now()) local_node['notification_timeout_in_seconds'] *= NOTIFICATION_TIMEOUT_MULTIPLIER try_message_with_home_menu(context=context, chat_id=chat_id, text=message) else: local_node['notification_timeout_in_seconds'] = INITIAL_NOTIFICATION_TIMEOUT if local_node['status'].upper() in MONITORED_STATUSES and is_thornode_healthy(context, node_address): check_thorchain_block_height(context, node_address=node_address) check_thorchain_catch_up_status(context, node_address=node_address) check_thorchain_midgard_api(context, node_address=node_address) for node_address in inactive_nodes: del chat_data['nodes'][node_address] def build_notification_message_for_active_node(local_node, remote_node, context) -> [str, None]: changed_fields = [ field for field in ['status', 'bond', 'slash_points'] if local_node[field] != remote_node[field] ] threshold = get_slash_points_threshold(context) slash_point_change = abs(int(local_node['slash_points']) - int(remote_node['slash_points'])) if (len(changed_fields) <= 1) and ('slash_points' in changed_fields) and (slash_point_change <= threshold): return None if len(changed_fields) > 0: text = f"THORNode: {local_node['alias']}\n" \ f"Address: {local_node['node_address']}\n" \ f"Status: {local_node['status'].capitalize()}" if 'status' in changed_fields: text += f' ➡️ {remote_node["status"].capitalize()}' text += f"\nBond: {tor_to_rune(int(local_node['bond']))}" if 'bond' in changed_fields: text += f" ➡️ {tor_to_rune(int(remote_node['bond']))}" text += '\nSlash Points: ' + '{:,}'.format(int(local_node['slash_points'])) if 'slash_points' in changed_fields: text += ' ➡️ ' + '{:,}'.format(int(remote_node['slash_points'])) return text else: return None def check_versions_status(context): chat_data = context.job.context['chat_data'] try: node_accounts = get_node_accounts() except Exception as e: logger.exception(e) logger.error("I couldn't get the node accounts while checking version status.") return highest_version = max(map(lambda n: n['version'], node_accounts), key=lambda v: version.parse(v)) last_newest_version = chat_data.get('newest_software_version', None) if last_newest_version is None or version.parse( highest_version) > version.parse(last_newest_version): chat_data['newest_software_version'] = highest_version for node in chat_data.get('nodes', {}).values(): if version.parse(node['version']) < version.parse(highest_version): message = f"Consider updating the software on your node: *{node['alias']}* ‼️\n" \ f"Your software version is *{node['version']}* " \ f"but one of the nodes already runs on *{highest_version}*" try_message_with_home_menu( context, chat_id=context.job.context['chat_id'], text=message) def check_churning(context): try: validators = get_node_accounts() except Exception as e: logger.exception(e) logger.error("I couldn't get the node accounts while checking if churning occurred.") return if 'node_statuses' not in context.bot_data: context.bot_data['node_statuses'] = {} for validator in validators: context.bot_data['node_statuses'][ validator['node_address']] = validator['status'] return local_node_statuses = context.bot_data['node_statuses'] churned_in = [] churned_out = [] highest_churn_status_since = 0 for validator in validators: if did_churn_happen(validator, local_node_statuses, highest_churn_status_since): highest_churn_status_since = int(validator['status_since']) for validator in validators: remote_status = validator['status'] local_status = local_node_statuses[ validator['node_address']] if validator[ 'node_address'] in local_node_statuses else "unknown" if remote_status != local_status: if 'active' == remote_status: churned_in.append({ "address": validator['node_address'], "bond": validator['bond'] }) elif 'active' == local_status: churned_out.append({ "address": validator['node_address'], "bond": validator['bond'] }) if len(churned_in) or len(churned_out): text = "🔄 CHURN SUMMARY\n" \ "THORChain has successfully churned:\n\n" text += "Nodes Added:\n" if len(churned_in) else "" for node in churned_in: text += f"*{node['address']}*\nBond: *{tor_to_rune(node['bond'])}*\n" text += "\nNodes Removed:\n" if len(churned_out) else "" for node in churned_out: text += f"*{node['address']}*\nBond: *{tor_to_rune(node['bond'])}*\n" text += "\nSystem:\n" try: network = get_network_data() text += f"📡 Network Security: *{network_security_ratio_to_string(get_network_security_ratio(network))}*\n\n" \ f"💚 Total Active Bond: *{tor_to_rune(network['bondMetrics']['totalActiveBond'])}* (total)\n\n" \ "⚖️ Bonded/Staked Ratio: *" + '{:.2f}'.format( int(get_network_security_ratio(network) * 100)) + " %*\n\n" \ "↩️ Bonding ROI: *" + '{:.2f}'.format( float(network['bondingAPY']) * 100) + " %* APY\n\n" \ "↩️ Liquidity ROI: *" + '{:.2f}'.format( float(network['liquidityAPY']) * 100) + " %* APY" context.bot_data.setdefault("vault_addresses", {}) current_chains = get_pool_addresses_from_any_node() for chain in current_chains: if chain['chain'] in context.bot_data['vault_addresses']: if chain['address'] != context.bot_data['vault_addresses'][chain['chain']]: text += f"\n\n🔐 Vault Addresses:" if "Vault Addresses" not in text else "" text += f"\n*{chain['chain']}*: \n" \ f"Old Vault address: {context.bot_data['vault_addresses'][chain['chain']]}\n"\ f"⬇️\n" \ f"New Vault address: {chain['address']}\n" else: text += "\n\n⚠️ 🚨 CHURNING BUT THE VAULT ADDRESSES DID NOT CHANGE 🚨\n" context.bot_data['vault_addresses'][chain['chain']] = chain['address'] except Exception as e: logger.exception(e) try_message_to_all_users(context, text=text) for validator in validators: context.bot_data['node_statuses'][ validator['node_address']] = validator['status'] def did_churn_happen(validator, local_node_statuses, highest_churn_status_since) -> bool: remote_status = validator['status'] local_status = local_node_statuses[validator['node_address']] if validator[ 'node_address'] in local_node_statuses else "unknown" if int(validator['status_since']) > highest_churn_status_since and \ ((local_status == 'ready' and remote_status == 'active') or ( local_status == 'active' and remote_status == 'standby')): return True return False def is_thornode_healthy(context, node_address) -> bool: chat_id = context.job.context['chat_id'] node_data = context.job.context['chat_data']['nodes'][node_address] # If not initialized assuming node was healhty. if "healthy" not in context.job.context['chat_data']['nodes'][node_address]: context.job.context['chat_data']['nodes'][node_address]["healthy"] = True was_healthy = node_data["healthy"] try: # Check whether node answers. If it doesn't we get an Exception. get_latest_block_height(node_data['ip_address']) if not was_healthy: try_message_with_home_menu(context=context, chat_id=chat_id, text=get_node_healthy_again_message(node_data)) context.job.context['chat_data']['nodes'][node_address]["healthy"] = True return True except (Timeout, ConnectionError, BadStatusException, Exception): if was_healthy: try_message_with_home_menu(context=context, chat_id=chat_id, text=get_node_health_warning_message(node_data)) context.job.context['chat_data']['nodes'][node_address]["healthy"] = False return False def check_thorchain_block_height(context, node_address): chat_id = context.job.context['chat_id'] node_data = context.job.context['chat_data']['nodes'][node_address] try: block_height = get_latest_block_height(node_data['ip_address']) except (Timeout, ConnectionError): logger.warning(f"Timeout or Connection error with {node_data['ip_address']}") return is_stuck = block_height <= node_data.setdefault('block_height', 0) block_height_stuck_count = node_data.setdefault("block_height_stuck_count", 0) if is_stuck: block_height_stuck_count += 1 if block_height_stuck_count == 1: text = 'Block height is not increasing anymore! 💀' + '\n' + \ 'IP: ' + node_data['ip_address'] + '\n' + \ 'THORNode: ' + node_data['alias'] + '\n' + \ 'Node address: ' + node_address + '\n' + \ 'Block height stuck at: ' + block_height + '\n\n' + \ 'Please check your Thornode immediately!' try_message_with_home_menu(context=context, chat_id=chat_id, text=text) else: if block_height_stuck_count >= 1: text = f"Block height is increasing again! 👌\n" + \ f"IP: {node_data['ip_address']}\n" + \ f"THORNode: {node_data['alias']}\n" + \ f"Node address: {node_address}\n" + \ f"Block height now at: {block_height}\n" try_message_with_home_menu(context=context, chat_id=chat_id, text=text) block_height_stuck_count = 0 node_data['block_height'] = block_height node_data["block_height_stuck_count"] = block_height_stuck_count def check_solvency_job(context): message = check_solvency(context) if message: try_message_to_all_users(context, text=message) def check_solvency(context) -> [str, None]: try: asgard_solvency = asgard_solvency_check() yggdrasil_solvency = yggdrasil_solvency_check() except (Timeout, ConnectionError): logger.warning(f"Timeout or Connection error while querying Asgard and Yggdrasil.") return None except Exception as e: logger.exception(e) return None is_solvent = asgard_solvency['is_solvent'] and yggdrasil_solvency['is_solvent'] insolvency_count = context.bot_data.setdefault("insolvency_count", 0) message = None if not is_solvent: insolvency_count += 1 if insolvency_count == MISSING_FUNDS_THRESHOLD: message = 'THORChain is *missing funds*! 💀\n\n' message += get_insolvent_balances_message(asgard_solvency, yggdrasil_solvency) else: if insolvency_count >= MISSING_FUNDS_THRESHOLD: message = 'THORChain is *100% solvent* again! 👌\n' insolvency_count = 0 context.bot_data["insolvency_count"] = insolvency_count return message def check_thorchain_catch_up_status(context, node_address): """ Check if node is some blocks behind with catch up status """ chat_id = context.job.context['chat_id'] node_data = context.job.context['chat_data']['nodes'][node_address] if 'is_catching_up' not in node_data: node_data['is_catching_up'] = False try: is_currently_catching_up = is_thorchain_catching_up( node_data['ip_address']) except (Timeout, ConnectionError): logger.warning(f"Timeout or Connection error with {node_data['ip_address']}") return if node_data['is_catching_up'] != is_currently_catching_up: try: block_height = get_latest_block_height(node_data['ip_address']) except (Timeout, ConnectionError): logger.warning(f"Timeout or Connection error with {node_data['ip_address']}") block_height = "currently unavailable" if is_currently_catching_up: node_data['is_catching_up'] = True text = 'The Node is behind the latest block height and catching up! 💀 ' + '\n' + \ 'IP: ' + node_data['ip_address'] + '\n' + \ 'THORNode: ' + node_data['alias'] + '\n' + \ 'Node address: ' + node_address + '\n' + \ 'Current block height: ' + block_height + '\n\n' + \ 'Please check your Thornode immediately!' else: node_data['is_catching_up'] = False text = 'The node caught up to the latest block height again! 👌' + '\n' + \ 'IP: ' + node_data['ip_address'] + '\n' + \ 'THORNode: ' + node_data['alias'] + '\n' + \ 'Node address: ' + node_address + '\n' + \ 'Current block height: ' + block_height try_message_with_home_menu(context=context, chat_id=chat_id, text=text) def check_thorchain_midgard_api(context, node_address): """ Check that Midgard API is ok """ chat_id = context.job.context['chat_id'] node_data = context.job.context['chat_data']['nodes'][node_address] was_healthy = node_data.setdefault('is_midgard_healthy', True) is_midgard_healthy = is_midgard_api_healthy(node_data['ip_address']) if was_healthy != is_midgard_healthy: if is_midgard_healthy: text = 'Midgard API is healthy again! 👌' + '\n' + \ 'IP: ' + node_data['ip_address'] + '\n' + \ 'THORNode: ' + node_data['alias'] + '\n' + \ 'Node address: ' + node_address try_message_with_home_menu(context, chat_id=chat_id, text=text) else: text = 'Midgard API is not healthy anymore! 💀' + '\n' + \ 'IP: ' + node_data['ip_address'] + '\n' + \ 'THORNode: ' + node_data['alias'] + '\n' + \ 'Node address: ' + node_address + '\n\n' + \ 'Please check your Thornode immediately!' try_message_with_home_menu(context, chat_id=chat_id, text=text) node_data['is_midgard_healthy'] = is_midgard_healthy
43.164141
126
0.600889
0
0
0
0
0
0
0
0
4,701
0.273728
b93aaafe8012e07a3a1b7cd6bfac2b4027e51ebd
3,760
py
Python
hard-gists/7578539/snippet.py
jjhenkel/dockerizeme
eaa4fe5366f6b9adf74399eab01c712cacaeb279
[ "Apache-2.0" ]
21
2019-07-08T08:26:45.000Z
2022-01-24T23:53:25.000Z
hard-gists/7578539/snippet.py
jjhenkel/dockerizeme
eaa4fe5366f6b9adf74399eab01c712cacaeb279
[ "Apache-2.0" ]
5
2019-06-15T14:47:47.000Z
2022-02-26T05:02:56.000Z
hard-gists/7578539/snippet.py
jjhenkel/dockerizeme
eaa4fe5366f6b9adf74399eab01c712cacaeb279
[ "Apache-2.0" ]
17
2019-05-16T03:50:34.000Z
2021-01-14T14:35:12.000Z
from pylab import * from numpy import * from numpy.linalg import solve from scipy.integrate import odeint from scipy.stats import norm, uniform, beta from scipy.special import jacobi a = 0.0 b = 3.0 theta=1.0 sigma=sqrt(theta/(2*(a+b+2))) tscale = 0.05 invariant_distribution = poly1d( [-1 for x in range(int(a))], True)*poly1d( [1 for x in range(int(b))], True) def eigenvalue(n): return theta*n*(n+a+b+1)/(a+b+2) gaussian_var = norm() def dW(dt): return norm.rvs() / sqrt(dt) def random_walk(y0, tmax, dt, times = None): dt = dt * tscale def rhs(y,t): return -theta*(y-(a-b)/(a+b+2)) + sqrt(2*theta*(1-y*y)/(a+b+2))*dW(dt/tscale) if (times is None): times = arange(0,tmax,dt) y = zeros(shape=times.shape, dtype=float) y[0] = y0 for i in range(1,y.shape[0]): y[i] = y[i-1] + rhs(y[i-1], times[i])*dt if abs(y[i]) > 1: y[i] = y[i] / abs(y[i]) return (times, y) def beta_prior(s, f): return poly1d(ones(shape=(s,)), True)*poly1d(-1*ones(shape=(f,)), True) def poly_to_jacobi(x): """x is a poly1d object""" xc = x.coeffs N = x.order+1 matrix = zeros(shape=(N,N), dtype=float) for i in range(N): matrix[N-i-1:N, i] = jacobi(i,a,b).coeffs return solve(matrix, xc) def jacobi_to_poly(x): result = poly1d([0]) for i in range(x.shape[0]): result = result + (jacobi(i,a,b)*invariant_distribution)*x[i] return result def jacobi_to_poly_no_invariant(x): result = poly1d([0]) for i in range(x.shape[0]): result = result + jacobi(i,a,b)*x[i] return result def propagate_jacobi(pc, t): """Takes jacobi coefficients and propagates them""" n = arange(pc.shape[0], dtype=float) l = theta*n*(n+a+b+1.0)/(a+b+2.0)*tscale return exp(-l*t)*pc def truncate_unnecessary_jacobi(p): p_normalized = p / (abs(p).sum()) cs = cumsum(abs(p_normalized[::-1]))[::-1] return p_normalized[where(abs(cs) > 1e-4)] def pde_solve(prior, t): result = zeros(shape=(t.shape[0], prior.shape[0]), dtype=float) result[0,:] = prior for i in range(1,t.shape[0]): result[i,:] = propagate_jacobi(result[i-1,:], t[i]-t[i-1]) return result def transform_to_x(pdf, x): result = zeros(shape=(pdf.shape[0], x.shape[0]), dtype=float) for i in range(0, pdf.shape[0]): p = jacobi_to_poly(pdf[i,:]) result[i,:] = p(x) result[i,:] /= result[i,:].sum() return result tmax = 4 prior = beta_prior(40, 20) prior_in_jacobi = poly_to_jacobi(prior) dt = 0.1 times = arange(0,tmax,dt) x = arange(-1,1,0.01) rw_dt = 0.01 t, y = random_walk(0.35*2-1, tmax, rw_dt) solution_as_x = zeros(shape=(times.size, x.size), dtype=float) solution_as_jacobi = None empirical_ctr = zeros(shape=(4,), dtype=float) for i in range(0,4): nt = int(1.0/dt) prior = prior_in_jacobi rnd = uniform(0,1) if (i > 0): nsamples = 40 r = rnd.rvs(nsamples) ctr = (y[i/rw_dt]+1)/2.0 print "CTR: " + str(ctr) success = (r < ctr).sum() print "Empirical: " + str(success / float(nsamples)) evidence = beta_prior( nsamples - success, success) prior = None j = truncate_unnecessary_jacobi(solution_as_jacobi[int(1/dt)-1]) prior = poly_to_jacobi(evidence * jacobi_to_poly_no_invariant(j)) empirical_ctr[i] = success / float(nsamples) solution_as_jacobi = pde_solve(prior, times[i*nt:(i+1)*nt]) solution_as_x[i*nt:(i+1)*nt] = transform_to_x(solution_as_jacobi, x) plot(arange(0,4), empirical_ctr, 'go') plot(t, (y+1)/2.0, 'k') imshow(solution_as_x.transpose(), origin='lower', extent=[0,tmax,0,1]) xlabel("time") ylabel("CTR") title("Bayesian Estimate of CTR") colorbar() show()
27.246377
109
0.611702
0
0
0
0
0
0
0
0
148
0.039362
b93b21d31a5eecb527d2b3ad7f00cf5d4683d661
1,535
py
Python
forms.py
lennykioko/Flask-social-network
15bfe1f7dca90074c0cbef62c5da9d5a25b5ce65
[ "MIT" ]
1
2018-04-15T19:35:54.000Z
2018-04-15T19:35:54.000Z
forms.py
lennykioko/Flask-social-network
15bfe1f7dca90074c0cbef62c5da9d5a25b5ce65
[ "MIT" ]
null
null
null
forms.py
lennykioko/Flask-social-network
15bfe1f7dca90074c0cbef62c5da9d5a25b5ce65
[ "MIT" ]
null
null
null
# forms are not just about display, instead they are more of validation # wtf forms protect our site against csrf attacks from flask_wtf import FlaskForm from wtforms import StringField, PasswordField, TextAreaField from wtforms.validators import (DataRequired, Regexp, ValidationError, Email, Length, EqualTo) from models import User def name_exists(form, field): if User.select().where(User.username == field.data).exists(): raise ValidationError('User with this name already exists.') def email_exists(form, field): if User.select().where(User.email == field.data).exists(): raise ValidationError('User with this email already exists.') class RegisterForm(FlaskForm): username = StringField( 'Username', # is the label validators=[ DataRequired(), Regexp( r'^[a-zA-Z0-9_]+$', message = ("Username should be one word, letters, numbers and underscores only.") ), name_exists ]) email = StringField( 'Email', validators=[ DataRequired(), Email(), email_exists ]) password = PasswordField( 'Password', validators=[ DataRequired(), Length(min=8), EqualTo('password2', message = 'Passwords must match') ]) password2 = PasswordField( 'Confirm Password', validators=[DataRequired() ]) class LoginForm(FlaskForm): email = StringField('Email', validators=[DataRequired(), Email()]) password = PasswordField('Password', validators=[DataRequired()]) class PostForm(FlaskForm): content = TextAreaField("What's Up?", validators = [DataRequired()])
25.583333
85
0.712704
867
0.564821
0
0
0
0
0
0
403
0.262541
b93b8add4495a7de42fb7a036f7ba8c5ddea0d87
1,508
py
Python
pantam_cli/utils/messages.py
flmnt/pantam
da47d977e69ec410d0642b5ade1f2323c1b6b350
[ "MIT" ]
2
2020-10-04T10:29:43.000Z
2021-03-30T13:45:09.000Z
pantam_cli/utils/messages.py
flmnt/pantam
da47d977e69ec410d0642b5ade1f2323c1b6b350
[ "MIT" ]
null
null
null
pantam_cli/utils/messages.py
flmnt/pantam
da47d977e69ec410d0642b5ade1f2323c1b6b350
[ "MIT" ]
null
null
null
from sys import stderr, stdout from enum import Enum from colored import fg, attr PANTAM: str = fg("yellow") + attr("bold") + "PANTAM" + attr("reset") colour_msg = lambda msg, colour: fg(colour) + attr("bold") + msg + attr("reset") info_msg = lambda msg: colour_msg(msg, "blue") success_msg = lambda msg: colour_msg(msg, "green") error_msg = lambda msg: colour_msg(msg, "red") class NewLine(Enum): before = 1 after = 2 both = 3 def write_msg(msg: str, spacing: NewLine = None) -> None: """Write message to stdout""" prefix: str = "\n" if spacing in (NewLine.before, NewLine.both) else "" suffix: str = "\n" if spacing in (NewLine.after, NewLine.both) else "" stdout.write("%s%s%s" % (prefix, msg, suffix)) def write_error(msg: str) -> None: """Write message to stderr""" stderr.write("\n%s\n" % msg) welcome_msg = ( lambda: PANTAM + """ The microframework for microservices. Let's build your app... """ ) name_index_file_msg = lambda: "What is the name of your main script?" name_actions_folder_msg = lambda: "What is the name of your actions folder?" def create_actions_file_msg(second_run: bool): """Actions File Message""" article = "another" if second_run else "an" return "Do you want to create %s action file?" % article name_actions_file_msg = lambda: "What is the name of your actions file?" confirm_structure_msg = ( lambda structure: """Your application will look like this: %s Happy to proceed?""" % structure )
24.322581
80
0.671088
62
0.041114
0
0
0
0
0
0
482
0.319629
b93da1b1bbce8a3e5fafae55f093b2f5323fb641
2,510
py
Python
tests/manage/test_remove_mon_from_cluster.py
zmc/ocs-ci
fcf51f3637f657689ba5a8ac869f2b14ac04b0cf
[ "MIT" ]
null
null
null
tests/manage/test_remove_mon_from_cluster.py
zmc/ocs-ci
fcf51f3637f657689ba5a8ac869f2b14ac04b0cf
[ "MIT" ]
null
null
null
tests/manage/test_remove_mon_from_cluster.py
zmc/ocs-ci
fcf51f3637f657689ba5a8ac869f2b14ac04b0cf
[ "MIT" ]
null
null
null
""" A Testcase to remove mon from when I/O's are happening. Polarion-ID- OCS-355 """ import logging import pytest from ocs_ci.ocs import ocp, constants from ocs_ci.framework.testlib import tier4, ManageTest from ocs_ci.framework import config from ocs_ci.ocs.resources import pod from tests.helpers import run_io_with_rados_bench, delete_cephblockpool from ocs_ci.ocs.cluster import CephCluster from ocs_ci.utility.retry import retry from ocs_ci.ocs.exceptions import CephHealthException log = logging.getLogger(__name__) @retry(CephHealthException, 8, 3, 1) def verify_mon_pod_up(ceph_cluster, pods): """ Verify mon pods are in Running state. Returns: bool: True for wait for the resource, False otherwise """ log.info(f"Verifying all mons pods are up and Running") ceph_cluster.cluster_health_check(timeout=3) ret = pods.wait_for_resource( condition=constants.STATUS_RUNNING, selector='app=rook-ceph-mon', resource_count=3, timeout=700) log.info(f"waited for all mon pod to come up and running {ret}") return ret def run_io_on_pool(): """ Runs the I/O on the pool and delete the pool Returns: A thread of I/O """ tools_pod = pod.get_ceph_tools_pod() tools_pod.add_role(role='client') return run_io_with_rados_bench( ceph_pods=[tools_pod], config={'time': 45, 'cleanup': False, 'pool': 'test-pool' } ) @tier4 @pytest.mark.polarion_id("OCS-355") class TestRemoveMonFromCluster(ManageTest): def test_remove_mon_pod_from_cluster(self): """ To remove mon pod from the cluster after the I/O is performed on the pool and waiting for the operator to create a new mon pod on its own """ ceph_cluster = CephCluster() pods = ocp.OCP( kind=constants.POD, namespace=config.ENV_DATA['cluster_namespace'] ) list_mons = ceph_cluster.get_mons_from_cluster() assert len(list_mons) > 1, pytest.skip( "INVALID: Mon count should be more than one to delete." ) assert run_io_on_pool(), 'Failed to run I/O on the pool' assert delete_cephblockpool('test-pool'), 'Failed to delete pool' ceph_cluster.cluster_health_check(timeout=0) ceph_cluster.remove_mon_from_cluster() assert verify_mon_pod_up(ceph_cluster, pods), f"Mon pods are not up and running state" ceph_cluster.cluster_health_check(timeout=60)
29.529412
94
0.688446
1,008
0.401594
0
0
1,605
0.639442
0
0
838
0.333865
b93f9ebd7406695d9627c10b5f85877c35692320
2,690
py
Python
smartystreets_python_sdk/us_autocomplete_pro/client.py
Caaz/smartystreets-python-sdk
f56cd00d29861bde297143c128f79a4b1d89541c
[ "Apache-2.0" ]
null
null
null
smartystreets_python_sdk/us_autocomplete_pro/client.py
Caaz/smartystreets-python-sdk
f56cd00d29861bde297143c128f79a4b1d89541c
[ "Apache-2.0" ]
null
null
null
smartystreets_python_sdk/us_autocomplete_pro/client.py
Caaz/smartystreets-python-sdk
f56cd00d29861bde297143c128f79a4b1d89541c
[ "Apache-2.0" ]
null
null
null
from smartystreets_python_sdk import Request from smartystreets_python_sdk.exceptions import SmartyException from smartystreets_python_sdk.us_autocomplete_pro import Suggestion, geolocation_type class Client: def __init__(self, sender, serializer): """ It is recommended to instantiate this class using ClientBuilder.build_us_autocomplete_pro_api_client() """ self.sender = sender self.serializer = serializer def send(self, lookup): """ Sends a Lookup object to the US Autocomplete Pro API and stores the result in the Lookup's result field. """ if not lookup or not lookup.search: raise SmartyException('Send() must be passed a Lookup with the search field set.') request = self.build_request(lookup) response = self.sender.send(request) if response.error: raise response.error result = self.serializer.deserialize(response.payload) suggestions = self.convert_suggestions(result.get('suggestions') or []) lookup.result = suggestions return suggestions def build_request(self, lookup): request = Request() self.add_parameter(request, 'search', lookup.search) self.add_parameter(request, 'max_results', lookup.max_results) self.add_parameter(request, 'include_only_cities', self.build_filter_string(lookup.city_filter)) self.add_parameter(request, 'include_only_states', self.build_filter_string(lookup.state_filter)) self.add_parameter(request, 'include_only_zip_codes', self.build_filter_string(lookup.zip_filter)) self.add_parameter(request, 'exclude_states', self.build_filter_string(lookup.exclude)) self.add_parameter(request, 'prefer_cities', self.build_filter_string(lookup.prefer_cities)) self.add_parameter(request, 'prefer_states', self.build_filter_string(lookup.prefer_states)) self.add_parameter(request, 'prefer_zip_codes', self.build_filter_string(lookup.prefer_zips)) self.add_parameter(request, 'prefer_ratio', lookup.prefer_ratio) self.add_parameter(request, 'prefer_geolocation', lookup.prefer_geo) self.add_parameter(request, 'selected', lookup.selected) return request @staticmethod def build_filter_string(filter_list): return ','.join(filter_list or []) or None @staticmethod def convert_suggestions(suggestion_dictionaries): return [Suggestion(suggestion) for suggestion in suggestion_dictionaries] @staticmethod def add_parameter(request, key, value): if value and value != 'none': request.parameters[key] = value
42.03125
112
0.717472
2,492
0.926394
0
0
394
0.146468
0
0
530
0.197026
b94044f865f05e0aee9b401bba3907e01e40ff6c
11,578
py
Python
mssqlvc.py
Saritasa/mssqlvc
836caeea59cc0ed23234687b94062e007707c603
[ "BSD-2-Clause" ]
2
2016-09-22T04:36:46.000Z
2018-07-31T21:36:42.000Z
mssqlvc.py
Saritasa/mssqlvc
836caeea59cc0ed23234687b94062e007707c603
[ "BSD-2-Clause" ]
1
2016-02-02T07:58:29.000Z
2016-02-02T14:19:18.000Z
mssqlvc.py
krasninja/mssqlvc
836caeea59cc0ed23234687b94062e007707c603
[ "BSD-2-Clause" ]
2
2016-09-21T09:48:44.000Z
2020-03-24T15:59:54.000Z
# -*- coding: utf-8 -*- """ mssqlvc ~~~~~~~ Database version control utility for Microsoft SQL Server. See README.md for more information. Licensed under the BSD license. See LICENSE file in the project root for full license information. """ import argparse import datetime import io import logging import os import re import sys import urlparse try: import clr except ImportError: print('Cannot import crl module, make sure you run this script using IronPython') exit(2) import System clr.AddReference('Microsoft.SqlServer.Smo') clr.AddReference('Microsoft.SqlServer.SqlEnum') clr.AddReference('Microsoft.SqlServer.ConnectionInfo') import Microsoft.SqlServer.Management.Smo as Smo import Microsoft.SqlServer.Management.Common as Common __author__ = 'Ivan Kozhin' __copyright__ = 'Copyright (c) 2015-2016, Saritasa' __license__ = 'BSD' __version__ = '1.4.5' __all__ = ['MsSqlVersion'] class ScriptExecutionError(Exception): pass class MsSqlVersion(object): """ SQL Server patch migration class. """ class bcolors: OKBLUE = '\033[94m' OKGREEN = '\033[92m' WARNING = '\033[93m' FAIL = '\033[91m' ENDC = '\033[0m' BOLD = '\033[1m' def __init__(self, connection_string, patch_dir='.', exclude_pattern=None, logger=None, stop_on_error=False, noexecute=False, case_insensitive=False, record_files_only=False): """ Initialize instance with connection and database objects. :param connection_string: Connection string in rfc1738 url format :param patch_dir: Patch directory with .sql files :param exclude_pattern: String with regular expression the patch files should match :param logger: Logger that is used for logging :param stop_on_error: Stop execution on error, default behavior is to continue :param case_insensitive: Use case insensitive to compare patch files :param record_files_only: Only file names will be stored to patch table without folder paths """ url = urlparse.urlparse(connection_string) is_local_login = not url.username self.connection = Common.ServerConnection(LoginSecure=is_local_login, ServerInstance=url.hostname, DatabaseName=url.path.replace('/', '')) if not is_local_login: self.connection.Login = url.username self.connection.Password = url.password self.server = Smo.Server(self.connection) self.database = self.server.Databases[self.connection.DatabaseName] self.server.ConnectionContext.ConnectTimeout = 90 self.exclude_pattern = exclude_pattern self.patch_dir = patch_dir self.stop_on_error = stop_on_error self.case_insensitive = case_insensitive self.record_files_only = record_files_only self.executed_count = 0 self.logger = logging.NullHandler() if not logger else logger if not os.path.exists(patch_dir): raise Exception('Patch folder does not exist') if 'mssql' not in connection_string: raise Exception('Wrong connection string, it should contain mssql word') exists = self._create_patch_table_if_not_exists(self.database) if not exists: self.logger.info('[%s] created _patch_history table' % (self.database.Name,)) def __del__(self): if self.server: self.server.ConnectionContext.Disconnect() def update(self): """Executes database update process""" patches = self.get_pending_patches() self.logger.debug('Files to execute %s' % (patches,)) for patch in patches: success = self.execute_file(patch) if success: self.executed_count += 1 self.put_patch(patch) if not success and self.stop_on_error: self.logger.critical(MsSqlVersion.bcolors.WARNING + 'Execution stopped. Please fix errors and try again.' + MsSqlVersion.bcolors.ENDC) raise ScriptExecutionError() self.logger.info('[%s] Executed %d patch(-es)' % (self.database.Name, self.executed_count)) def fill(self): """Skip scripts execution but add them to patches table""" patches = self.get_pending_patches() for patch in patches: self.logger.info('Add file %s' % (patch,)) self.put_patch(patch) def get_pending_patches(self): applied_patches = self.get_applied_patches() if self.record_files_only: applied_patches = [os.path.basename(f) for f in applied_patches] patches = self._get_sql_files_from_dir(applied_patches) patches.sort() return patches def execute_file(self, file): """Executes file against database in transaction, returns True if success""" ret = True try: full_name = os.path.join(os.path.normpath(self.patch_dir), file) with io.open(full_name, 'r', encoding='utf8') as sql_file: sql = sql_file.read() self.logger.info('[%s] Executing %s...' % (self.database.Name, file)) self.connection.BeginTransaction() self.database.ExecuteNonQuery(sql) self.connection.CommitTransaction() except Exception as e: self.connection.RollBackTransaction() self.logger.error('Exception on %s' % (file,)) message = e.message or e if e.clsException.InnerException is not None and e.clsException.InnerException.InnerException is not None: message += ' ' + e.clsException.InnerException.InnerException.Message self.logger.error('[%s] %s (%s)' % (self.database.Name, full_name, message)) ret = False return ret def put_patch(self, file): """Write record that file has been executed""" now = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') if self.record_files_only: file = os.path.basename(file) sql = 'insert [_patch_history] (name, applied_at) values(\'%s\', \'%s\');' % (file, now) self.database.ExecuteNonQuery(sql) def get_applied_patches(self): rows = self.database.ExecuteWithResults('select name from [_patch_history];').Tables[0].Rows return set([row['name'] for row in rows]) def _get_sql_files_from_dir(self, exclude_list=[]): """Get all script files from directory""" _exclude_list = set(exclude_list) if not self.case_insensitive else [f.lower() for f in exclude_list] prevdir = os.getcwd() os.chdir(self.patch_dir) sql_files = [] for root, dirs, files in os.walk('.'): for file in files: file = os.path.normpath(os.path.join(root, file)) _file = file if self.case_insensitive: _file = _file.lower() if self.record_files_only: _file = os.path.basename(_file) if (_file in _exclude_list or not _file.lower().endswith('.sql') or (self.exclude_pattern and re.search(self.exclude_pattern, file))): continue sql_files.append(file) os.chdir(prevdir) return sql_files @staticmethod def _create_patch_table_if_not_exists(database): """Create patch table in database if not exists""" sql = 'select * from sys.objects where object_id = object_id(\'_patch_history\') AND type in (\'U\');' exists = database.ExecuteWithResults(sql).Tables[0].Rows.Count > 0 if not exists: sql = """ create table [_patch_history] (id int not null identity(1, 1), name varchar(100) not null, applied_at datetime not null); alter table [_patch_history] add constraint _patch_history_PK primary key clustered (id); """ database.ExecuteNonQuery(sql) return exists def get_cmd_line_parser(): """Get initialized argparse.ArgumentParser object""" parser = argparse.ArgumentParser( description='MSSQL database patch history tool', formatter_class=argparse.RawDescriptionHelpFormatter, epilog='''Example: %(prog)s -c "mssql://sa:123@host\instance/database" -d "D:/1/project/patch"''') parser.add_argument('--connection', '-c', required=True, dest='connection', action='store', help='connection string in rfc1738 url format, required') parser.add_argument('--directory', '-d', dest='directory', action='store', default='.', help='directory with patch files') parser.add_argument('--log', '-l', dest='log', action='store', help='log file') parser.add_argument('--noexecute', '-n', action='store_true', dest='noexecute', default=False, help='displays pending script files with no execution') parser.add_argument('--noexecute-fill', '-nf', action='store_true', dest='noexecute_fill', default=False, help='displays pending script files with no execution and fills patch table') parser.add_argument('--stop-on-error', '-soe', action='store_true', dest='stop_on_error', default=False, help='stops execution if any script fails') parser.add_argument('--exclude-pattern', '-ep', dest='exclude_pattern', help='skips files match to regular expression') parser.add_argument('--record-files-only', '-rfo', action='store_true', dest='record_files_only', default=False, help='only file names will be stored to patch table without folder paths') parser.add_argument('--case-insensitive', '-ci', action='store_true', dest='case_insensitive', default=False, help='use case insensitive to compare patch files so "PatchName.sql" and "patchname.sql" is the same') parser.add_argument('--debug', action='store_true', dest='debug', default=False, help='enables debug output') parser.add_argument('--version', '-v', action='version', version='%(prog)s ' + __version__) return parser if __name__ == '__main__': # parser parser = get_cmd_line_parser() parser_args = parser.parse_args() if parser_args.connection is None or parser_args.directory is None: parser.print_help() exit(1) # logging logger = logging.getLogger('mssql') if parser_args.log: fh = logging.FileHandler(parser_args.log) fh.setFormatter(logging.Formatter('%(asctime)s [%(levelname)s] %(message)s')) logger.addHandler(fh) ch = logging.StreamHandler() ch.setFormatter(logging.Formatter('%(asctime)s [%(levelname)s] %(message)s')) logger.setLevel(logging.DEBUG if parser_args.debug else logging.INFO) logger.addHandler(ch) # database handle sqlvc = MsSqlVersion(parser_args.connection, parser_args.directory, exclude_pattern=parser_args.exclude_pattern, stop_on_error=parser_args.stop_on_error, case_insensitive=parser_args.case_insensitive, record_files_only=parser_args.record_files_only, logger=logger) if parser_args.noexecute: for patch in sqlvc.get_pending_patches(): logger.info(' ' + patch) elif parser_args.noexecute_fill: sqlvc.fill() else: sqlvc.update()
39.515358
121
0.640266
7,163
0.618673
0
0
704
0.060805
0
0
3,617
0.312403
b9408aacd4d750c790ebb27107e026e183ea1d35
4,296
py
Python
lib/python3.6/site-packages/statsmodels/iolib/tests/test_table_econpy.py
KshitizSharmaV/Quant_Platform_Python
d784aa0604d8de5ba5ca0c3a171e3556c0cd6b39
[ "BSD-3-Clause" ]
1
2020-05-09T08:42:52.000Z
2020-05-09T08:42:52.000Z
statsmodels/iolib/tests/test_table_econpy.py
yanzhenxiong/statsmodels
e56c4046ff8807c3c16d6a9293b5cb5dfe6f0cd0
[ "BSD-3-Clause" ]
null
null
null
statsmodels/iolib/tests/test_table_econpy.py
yanzhenxiong/statsmodels
e56c4046ff8807c3c16d6a9293b5cb5dfe6f0cd0
[ "BSD-3-Clause" ]
1
2020-05-09T08:42:58.000Z
2020-05-09T08:42:58.000Z
''' Unit tests table.py. :see: http://docs.python.org/lib/minimal-example.html for an intro to unittest :see: http://agiletesting.blogspot.com/2005/01/python-unit-testing-part-1-unittest.html :see: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/305292 ''' from __future__ import absolute_import from statsmodels.compat.python import zip import numpy as np from numpy.testing import assert_equal __docformat__ = "restructuredtext en" from statsmodels.iolib.table import Cell, SimpleTable from statsmodels.iolib.table import default_latex_fmt from statsmodels.iolib.table import default_html_fmt ltx_fmt1 = default_latex_fmt.copy() html_fmt1 = default_html_fmt.copy() txt_fmt1 = dict( data_fmts = ['%0.2f', '%d'], empty_cell = ' ', colwidths = 1, colsep=' * ', row_pre = '* ', row_post = ' *', table_dec_above='*', table_dec_below='*', header_dec_below='*', header_fmt = '%s', stub_fmt = '%s', title_align='r', header_align = 'r', data_aligns = "r", stubs_align = "l", fmt = 'txt' ) cell0data = 0.0000 cell1data = 1 row0data = [cell0data, cell1data] row1data = [2, 3.333] table1data = [ row0data, row1data ] test1stubs = ('stub1', 'stub2') test1header = ('header1', 'header2') #test1header = ('header1\nheader1a', 'header2\nheader2a') tbl = SimpleTable(table1data, test1header, test1stubs, txt_fmt=txt_fmt1, ltx_fmt=ltx_fmt1, html_fmt=html_fmt1) def custom_labeller(cell): if cell.data is np.nan: return 'missing' class TestCell(object): def test_celldata(self): celldata = cell0data, cell1data, row1data[0], row1data[1] cells = [Cell(datum, datatype=i % 2) for i, datum in enumerate(celldata)] for cell, datum in zip(cells, celldata): assert_equal(cell.data, datum) class TestSimpleTable(object): def test_txt_fmt1(self): # Limited test of custom txt_fmt desired = """ ***************************** * * header1 * header2 * ***************************** * stub1 * 0.00 * 1 * * stub2 * 2.00 * 3 * ***************************** """ actual = '\n%s\n' % tbl.as_text() #print('actual') #print(actual) #print('desired') #print(desired) assert_equal(actual, desired) def test_ltx_fmt1(self): # Limited test of custom ltx_fmt desired = r""" \begin{center} \begin{tabular}{lcc} \toprule & \textbf{header1} & \textbf{header2} \\ \midrule \textbf{stub1} & 0.0 & 1 \\ \textbf{stub2} & 2 & 3.333 \\ \bottomrule \end{tabular} \end{center} """ actual = '\n%s\n' % tbl.as_latex_tabular() #print(actual) #print(desired) assert_equal(actual, desired) def test_html_fmt1(self): # Limited test of custom html_fmt desired = """ <table class="simpletable"> <tr> <td></td> <th>header1</th> <th>header2</th> </tr> <tr> <th>stub1</th> <td>0.0</td> <td>1</td> </tr> <tr> <th>stub2</th> <td>2</td> <td>3.333</td> </tr> </table> """ #the previous has significant trailing whitespace that got removed #desired = '''\n<table class="simpletable">\n<tr>\n <td></td> <th>header1</th> <th>header2</th>\n</tr>\n<tr>\n <th>stub1</th> <td>0.0</td> <td>1</td> \n</tr>\n<tr>\n <th>stub2</th> <td>2</td> <td>3.333</td> \n</tr>\n</table>\n''' actual = '\n%s\n' % tbl.as_html() actual = '\n'.join((line.rstrip() for line in actual.split('\n'))) #print(actual) #print(desired) #print len(actual), len(desired) assert_equal(actual, desired) def test_customlabel(self): # Limited test of custom custom labeling tbl = SimpleTable(table1data, test1header, test1stubs, txt_fmt=txt_fmt1) tbl[1][1].data = np.nan tbl.label_cells(custom_labeller) #print([[c.datatype for c in row] for row in tbl]) desired = """ ***************************** * * header1 * header2 * ***************************** * stub1 * -- * 1 * * stub2 * 2.00 * 3 * ***************************** """ actual = '\n%s\n' % tbl.as_text(missing='--') assert_equal(actual, desired)
30.041958
261
0.573091
2,781
0.647346
0
0
0
0
0
0
2,019
0.469972
b9409e44daa0d7a262748b347f053c849e397b73
291
py
Python
homeassistant/components/todoist/types.py
MrDelik/core
93a66cc357b226389967668441000498a10453bb
[ "Apache-2.0" ]
30,023
2016-04-13T10:17:53.000Z
2020-03-02T12:56:31.000Z
homeassistant/components/todoist/types.py
MrDelik/core
93a66cc357b226389967668441000498a10453bb
[ "Apache-2.0" ]
24,710
2016-04-13T08:27:26.000Z
2020-03-02T12:59:13.000Z
homeassistant/components/todoist/types.py
MrDelik/core
93a66cc357b226389967668441000498a10453bb
[ "Apache-2.0" ]
11,956
2016-04-13T18:42:31.000Z
2020-03-02T09:32:12.000Z
"""Types for the Todoist component.""" from __future__ import annotations from typing import TypedDict class DueDate(TypedDict): """Dict representing a due date in a todoist api response.""" date: str is_recurring: bool lang: str string: str timezone: str | None
19.4
65
0.697595
184
0.632302
0
0
0
0
0
0
99
0.340206
b9417eb816defb8a05e4de472fa5d06b0845774d
4,237
py
Python
src/c/c_pyzstd.py
corneliusroemer/pyzstd
06f14ad29735d9ae85c188703dcb64c24686c4f2
[ "BSD-3-Clause" ]
29
2020-10-13T03:35:37.000Z
2022-03-14T11:09:47.000Z
src/c/c_pyzstd.py
corneliusroemer/pyzstd
06f14ad29735d9ae85c188703dcb64c24686c4f2
[ "BSD-3-Clause" ]
12
2020-12-22T02:27:47.000Z
2022-03-18T14:54:33.000Z
src/c/c_pyzstd.py
corneliusroemer/pyzstd
06f14ad29735d9ae85c188703dcb64c24686c4f2
[ "BSD-3-Clause" ]
3
2020-11-21T20:57:10.000Z
2021-09-26T01:14:44.000Z
from collections import namedtuple from enum import IntEnum from ._zstd import * from . import _zstd __all__ = (# From this file 'compressionLevel_values', 'get_frame_info', 'CParameter', 'DParameter', 'Strategy', # From _zstd 'ZstdCompressor', 'RichMemZstdCompressor', 'ZstdDecompressor', 'EndlessZstdDecompressor', 'ZstdDict', 'ZstdError', 'decompress', 'get_frame_size', 'compress_stream', 'decompress_stream', 'zstd_version', 'zstd_version_info', 'zstd_support_multithread') # Used in __init__.py _ZSTD_DStreamInSize = _zstd._ZSTD_DStreamInSize _train_dict = _zstd._train_dict _finalize_dict = _zstd._finalize_dict # compressionLevel_values _nt_values = namedtuple('values', ['default', 'min', 'max']) compressionLevel_values = _nt_values(_zstd._ZSTD_defaultCLevel, _zstd._ZSTD_minCLevel, _zstd._ZSTD_maxCLevel) _nt_frame_info = namedtuple('frame_info', ['decompressed_size', 'dictionary_id']) def get_frame_info(frame_buffer): """Get zstd frame infomation from a frame header. Argument frame_buffer: A bytes-like object. It should starts from the beginning of a frame, and needs to include at least the frame header (6 to 18 bytes). Return a two-items namedtuple: (decompressed_size, dictionary_id) If decompressed_size is None, decompressed size is unknown. dictionary_id is a 32-bit unsigned integer value. 0 means dictionary ID was not recorded in the frame header, the frame may or may not need a dictionary to be decoded, and the ID of such a dictionary is not specified. It's possible to append more items to the namedtuple in the future.""" ret_tuple = _zstd._get_frame_info(frame_buffer) return _nt_frame_info(*ret_tuple) class CParameter(IntEnum): """Compression parameters""" compressionLevel = _zstd._ZSTD_c_compressionLevel windowLog = _zstd._ZSTD_c_windowLog hashLog = _zstd._ZSTD_c_hashLog chainLog = _zstd._ZSTD_c_chainLog searchLog = _zstd._ZSTD_c_searchLog minMatch = _zstd._ZSTD_c_minMatch targetLength = _zstd._ZSTD_c_targetLength strategy = _zstd._ZSTD_c_strategy enableLongDistanceMatching = _zstd._ZSTD_c_enableLongDistanceMatching ldmHashLog = _zstd._ZSTD_c_ldmHashLog ldmMinMatch = _zstd._ZSTD_c_ldmMinMatch ldmBucketSizeLog = _zstd._ZSTD_c_ldmBucketSizeLog ldmHashRateLog = _zstd._ZSTD_c_ldmHashRateLog contentSizeFlag = _zstd._ZSTD_c_contentSizeFlag checksumFlag = _zstd._ZSTD_c_checksumFlag dictIDFlag = _zstd._ZSTD_c_dictIDFlag nbWorkers = _zstd._ZSTD_c_nbWorkers jobSize = _zstd._ZSTD_c_jobSize overlapLog = _zstd._ZSTD_c_overlapLog def bounds(self): """Return lower and upper bounds of a parameter, both inclusive.""" # 1 means compression parameter return _zstd._get_param_bounds(1, self.value) class DParameter(IntEnum): """Decompression parameters""" windowLogMax = _zstd._ZSTD_d_windowLogMax def bounds(self): """Return lower and upper bounds of a parameter, both inclusive.""" # 0 means decompression parameter return _zstd._get_param_bounds(0, self.value) class Strategy(IntEnum): """Compression strategies, listed from fastest to strongest. Note : new strategies _might_ be added in the future, only the order (from fast to strong) is guaranteed. """ fast = _zstd._ZSTD_fast dfast = _zstd._ZSTD_dfast greedy = _zstd._ZSTD_greedy lazy = _zstd._ZSTD_lazy lazy2 = _zstd._ZSTD_lazy2 btlazy2 = _zstd._ZSTD_btlazy2 btopt = _zstd._ZSTD_btopt btultra = _zstd._ZSTD_btultra btultra2 = _zstd._ZSTD_btultra2 # Set CParameter/DParameter types for validity check _zstd._set_parameter_types(CParameter, DParameter)
36.213675
80
0.663441
2,203
0.519943
0
0
0
0
0
0
1,632
0.385178
b941e493bd72a0cc29b7f5487a4bd483b40a8fe3
4,414
py
Python
test/unit/data/model/mapping/common.py
quacksawbones/galaxy-1
65f7259b29d3886e526d9be670c60d9da9fbe038
[ "CC-BY-3.0" ]
1,085
2015-02-18T16:14:38.000Z
2022-03-30T23:52:07.000Z
test/unit/data/model/mapping/common.py
quacksawbones/galaxy-1
65f7259b29d3886e526d9be670c60d9da9fbe038
[ "CC-BY-3.0" ]
11,253
2015-02-18T17:47:32.000Z
2022-03-31T21:47:03.000Z
test/unit/data/model/mapping/common.py
quacksawbones/galaxy-1
65f7259b29d3886e526d9be670c60d9da9fbe038
[ "CC-BY-3.0" ]
1,000
2015-02-18T16:18:10.000Z
2022-03-29T08:22:56.000Z
from abc import ABC, abstractmethod from contextlib import contextmanager from uuid import uuid4 import pytest from sqlalchemy import ( delete, select, UniqueConstraint, ) class AbstractBaseTest(ABC): @pytest.fixture def cls_(self): """ Return class under test. Assumptions: if the class under test is Foo, then the class grouping the tests should be a subclass of BaseTest, named TestFoo. """ prefix = len("Test") class_name = self.__class__.__name__[prefix:] return getattr(self.get_model(), class_name) @abstractmethod def get_model(self): pass def dbcleanup_wrapper(session, obj, where_clause=None): with dbcleanup(session, obj, where_clause): yield obj @contextmanager def dbcleanup(session, obj, where_clause=None): """ Use the session to store obj in database; delete from database on exit, bypassing the session. If obj does not have an id field, a SQLAlchemy WHERE clause should be provided to construct a custom select statement. """ return_id = where_clause is None try: obj_id = persist(session, obj, return_id) yield obj_id finally: table = obj.__table__ if where_clause is None: where_clause = _get_default_where_clause(type(obj), obj_id) stmt = delete(table).where(where_clause) session.execute(stmt) def persist(session, obj, return_id=True): """ Use the session to store obj in database, then remove obj from session, so that on a subsequent load from the database we get a clean instance. """ session.add(obj) session.flush() obj_id = obj.id if return_id else None # save this before obj is expunged session.expunge(obj) return obj_id def delete_from_database(session, objects): """ Delete each object in objects from database. May be called at the end of a test if use of a context manager is impractical. (Assume all objects have the id field as their primary key.) """ # Ensure we have a list of objects (check for list explicitly: a model can be iterable) if not isinstance(objects, list): objects = [objects] for obj in objects: table = obj.__table__ stmt = delete(table).where(table.c.id == obj.id) session.execute(stmt) def get_stored_obj(session, cls, obj_id=None, where_clause=None, unique=False): # Either obj_id or where_clause must be provided, but not both assert bool(obj_id) ^ (where_clause is not None) if where_clause is None: where_clause = _get_default_where_clause(cls, obj_id) stmt = select(cls).where(where_clause) result = session.execute(stmt) # unique() is required if result contains joint eager loads against collections # https://gerrit.sqlalchemy.org/c/sqlalchemy/sqlalchemy/+/2253 if unique: result = result.unique() return result.scalar_one() def has_unique_constraint(table, fields): for constraint in table.constraints: if isinstance(constraint, UniqueConstraint): col_names = {c.name for c in constraint.columns} if set(fields) == col_names: return True def has_index(table, fields): for index in table.indexes: col_names = {c.name for c in index.columns} if set(fields) == col_names: return True def collection_consists_of_objects(collection, *objects): """ Returns True iff list(collection) == list(objects), where object equality is determined by primary key equality: object1.id == object2.id. """ if len(collection) != len(objects): # False if lengths are different return False if not collection: # True if both are empty return True # Sort, then compare each member by its 'id' attribute, which must be its primary key. collection.sort(key=lambda item: item.id) objects_l = list(objects) objects_l.sort(key=lambda item: item.id) for item1, item2 in zip(collection, objects_l): if item1.id is None or item2.id is None or item1.id != item2.id: return False return True def get_unique_value(): """Generate unique values to accommodate unique constraints.""" return uuid4().hex def _get_default_where_clause(cls, obj_id): where_clause = cls.__table__.c.id == obj_id return where_clause
31.084507
98
0.677843
464
0.10512
756
0.171273
1,076
0.24377
0
0
1,494
0.338469
b9421dbb7e263a5a3de9a9e29e270b09ceba630c
1,004
py
Python
django_events/users/management/commands/create_default_su.py
chrisBrookes93/django-events-management
93886448a7bb85c8758324977ff67bcacc80bbec
[ "MIT" ]
null
null
null
django_events/users/management/commands/create_default_su.py
chrisBrookes93/django-events-management
93886448a7bb85c8758324977ff67bcacc80bbec
[ "MIT" ]
null
null
null
django_events/users/management/commands/create_default_su.py
chrisBrookes93/django-events-management
93886448a7bb85c8758324977ff67bcacc80bbec
[ "MIT" ]
null
null
null
from django.core.management.base import BaseCommand from django.contrib.auth import get_user_model class Command(BaseCommand): help = "Creates a default super user if one doesn't already exist. " \ "This is designed to be used in the docker-compose.yml to create an initial super user on deployment." def handle(self, *args, **kwargs): """ Checks whether any super users exist and creates a default one if not :param args: Unused :param kwargs: Unused """ super_users = get_user_model().objects.filter(is_superuser=True) if super_users.exists(): self.stdout.write('A superuser already exists, not creating one') else: get_user_model().objects.create_superuser(email="[email protected]", password="EventsEvents") self.stdout.write('Created default superuser "[email protected]"') self.stdout.write('Make sure you change the password immediately!')
41.833333
114
0.661355
897
0.893426
0
0
0
0
0
0
492
0.49004
b942ff3dafb5c886434a478e8bfb0592e83afd1c
6,215
bzl
Python
antlir/bzl/image_layer.bzl
zeroxoneb/antlir
811d88965610d16a5c85d831d317f087797ca732
[ "MIT" ]
28
2020-08-11T16:22:46.000Z
2022-03-04T15:41:52.000Z
antlir/bzl/image_layer.bzl
zeroxoneb/antlir
811d88965610d16a5c85d831d317f087797ca732
[ "MIT" ]
137
2020-08-11T16:07:49.000Z
2022-02-27T10:59:05.000Z
antlir/bzl/image_layer.bzl
zeroxoneb/antlir
811d88965610d16a5c85d831d317f087797ca732
[ "MIT" ]
10
2020-09-10T00:01:28.000Z
2022-03-08T18:00:28.000Z
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ An `image.layer` is a set of `feature` with some additional parameters. Its purpose to materialize those `feature`s as a btrfs subvolume in the per-repo `buck-image/out/volume/targets`. We call the subvolume a "layer" because it can be built on top of a snapshot of its `parent_layer`, and thus can be represented as a btrfs send-stream for more efficient storage & distribution. The Buck output of an `image.layer` target is a JSON file with information on how to find the resulting layer in the per-repo `buck-image/out/volume/targets`. See `SubvolumeOnDisk.to_json_file`. ## Implementation notes The implementation of this converter deliberately minimizes the amount of business logic in its command. The converter must include **only** our interactions with the buck target graph. Everything else should be delegated to subcommands. ### Command In composing the `bash` command, our core maxim is: make it a hermetic function of the converter's inputs -- do not read data from disk, do not insert disk paths into the command, do not do anything that might cause the bytes of the command to vary between machines or between runs. To achieve this, we use Buck macros to resolve all paths, including those to helper scripts. We rely on environment variables or pipes to pass data between the helper scripts. Another reason to keep this converter minimal is that `buck test` cannot make assertions about targets that fail to build. Since we only have the ability to test the "good" targets, it behooves us to put most logic in external scripts, so that we can unit-test its successes **and** failures thoroughly. ### Output We mark `image.layer` uncacheable, because there's no easy way to teach Buck to serialize a btrfs subvolume (for that, we have `package.new`). That said, we should still follow best practices to avoid problems if e.g. the user renames their repo, or similar. These practices include: - The output JSON must store no absolute paths. - Store Buck target paths instead of paths into the output directory. ### Dependency resolution An `image.layer` consumes a set of `feature` outputs to decide what to put into the btrfs subvolume. These outputs are actually just JSON files that reference other targets, and do not contain the data to be written into the image. Therefore, `image.layer` has to explicitly tell buck that it needs all direct dependencies of its `feature`s to be present on disk -- see our `attrfilter` queries below. Without this, Buck would merrily fetch the just the `feature` JSONs from its cache, and not provide us with any of the buid artifacts that comprise the image. We do NOT need the direct dependencies of the parent layer's features, because we treat the parent layer as a black box -- whatever it has laid down in the image, that's what it provides (and we don't care about how). The consequences of this information hiding are: - Better Buck cache efficiency -- we don't have to download the dependencies of the ancestor layers' features. Doing that would be wasteful, since those bits are redundant with what's in the parent. - Ability to use genrule image layers / apply non-pure post-processing to a layer. In terms of engineering, both of these non-pure approaches are a terrible idea and a maintainability headache, but they do provide a useful bridge for transitioning to Buck image builds from legacy imperative systems. - The image compiler needs a litte extra code to walk the parent layer and determine what it provides. - We cannot have "unobservable" dependencies between features. Since feature dependencies are expected to routinely cross layer boundaries, feature implementations are forced only to depend on data that can be inferred from the filesystem -- since this is all that the parent layer implementation can do. NB: This is easy to relax in the future by writing a manifest with additional metadata into each layer, and using that metadata during compilation. """ load(":compile_image_features.bzl", "compile_image_features") load(":image_layer_utils.bzl", "image_layer_utils") load(":image_utils.bzl", "image_utils") def image_layer( name, parent_layer = None, features = None, flavor = None, flavor_config_override = None, antlir_rule = "user-internal", **image_layer_kwargs): """ Arguments - `parent_layer`: The name of another `image_layer` target, on top of which the current layer will install its features. - `features`: List of `feature` target paths and/or nameless structs from `feature.new`. - `flavor`: Picks default build options for the layer, including `build_appliance`, RPM installer, and others. See `flavor_helpers.bzl` for details. - `flavor_config_override`: A struct that can override the default values fetched from `REPO_CFG[flavor].flavor_to_config`. - `mount_config`: Specifies how this layer is mounted in the `mounts` field of a `feature` of a parent layer. See the field in `_image_layer_impl` in `image_layer_utils.bzl` - `runtime`: A list of desired helper buck targets to be emitted. `container` is always included in the list by default. See the field in `_image_layer_impl` in `image_layer_utils.bzl` and the [docs](/docs/tutorials/helper-buck-targets#imagelayer) for the list of possible helpers, their respective behaviours, and how to invoke them. """ image_layer_utils.image_layer_impl( _rule_type = "image_layer", _layer_name = name, # Build a new layer. It may be empty. _make_subvol_cmd = compile_image_features( name = name, current_target = image_utils.current_target(name), parent_layer = parent_layer, features = features, flavor = flavor, flavor_config_override = flavor_config_override, ), antlir_rule = antlir_rule, **image_layer_kwargs )
44.078014
79
0.740628
0
0
0
0
0
0
0
0
5,488
0.883025
b943636ba1006005819134b02620af2faa23d559
84
py
Python
python/testData/debug/test_ignore_lib.py
jnthn/intellij-community
8fa7c8a3ace62400c838e0d5926a7be106aa8557
[ "Apache-2.0" ]
2
2019-04-28T07:48:50.000Z
2020-12-11T14:18:08.000Z
python/testData/debug/test_ignore_lib.py
jnthn/intellij-community
8fa7c8a3ace62400c838e0d5926a7be106aa8557
[ "Apache-2.0" ]
173
2018-07-05T13:59:39.000Z
2018-08-09T01:12:03.000Z
python/testData/debug/test_ignore_lib.py
jnthn/intellij-community
8fa7c8a3ace62400c838e0d5926a7be106aa8557
[ "Apache-2.0" ]
2
2020-03-15T08:57:37.000Z
2020-04-07T04:48:14.000Z
from calendar import setfirstweekday stopped_in_user_file = True setfirstweekday(15)
28
36
0.880952
0
0
0
0
0
0
0
0
0
0
b9443b673da6e4fd8c252e11eba4606e69192845
1,036
py
Python
promt_tr/__main__.py
ffreemt/promt-tr-free
ff20b0f176f9611fa5a834af5aeaa9ef6ca3a3ee
[ "MIT" ]
null
null
null
promt_tr/__main__.py
ffreemt/promt-tr-free
ff20b0f176f9611fa5a834af5aeaa9ef6ca3a3ee
[ "MIT" ]
null
null
null
promt_tr/__main__.py
ffreemt/promt-tr-free
ff20b0f176f9611fa5a834af5aeaa9ef6ca3a3ee
[ "MIT" ]
null
null
null
''' __main__, to run: python -m promt_tr ''' import sys from random import randint from promt_tr import promt_tr, LANG_CODES # pragma: no cover def main(): '''main''' from_lang = 'auto' to_lang = 'zh' text = 'test ' + str(randint(0, 10000)) if not sys.argv[1:]: print('Provide some English text, with an optional to_lang') print('E.g., python -m promt_tr test this and that de') print('Testing with some random text\n') else: argv = sys.argv[1:] len_ = len(argv) if len_ == 1: if argv[0] in LANG_CODES: to_lang = argv[0] else: text = argv[0] elif argv[-1] in LANG_CODES: to_lang = argv[-1] text = ' '.join(argv[:-1]) else: text = ' '.join(argv) for to_lang in ['zh', 'de', 'fr', 'it', 'es']: resu = promt_tr(text, from_lang, to_lang) print(f'[{text}] translated to [{to_lang}]: [{resu}]') if __name__ == '__main__': main()
23.545455
68
0.527027
0
0
0
0
0
0
0
0
306
0.295367
b9458ab72f55b4db845f6d76e44dba3b00e000ed
6,265
py
Python
src/features/v3/proc_v3_n1_calc_distance.py
askoki/nfl_dpi_prediction
dc3256f24ddc0b6725eace2081d1fb1a7e5ce805
[ "MIT" ]
null
null
null
src/features/v3/proc_v3_n1_calc_distance.py
askoki/nfl_dpi_prediction
dc3256f24ddc0b6725eace2081d1fb1a7e5ce805
[ "MIT" ]
null
null
null
src/features/v3/proc_v3_n1_calc_distance.py
askoki/nfl_dpi_prediction
dc3256f24ddc0b6725eace2081d1fb1a7e5ce805
[ "MIT" ]
null
null
null
import os import sys import pandas as pd from datetime import datetime from settings import RAW_DATA_DIR, DataV3, DATA_V3_SUBVERSION from src.features.helpers.processing import add_missing_timestamp_values from src.features.helpers.processing_v3 import get_closest_players, get_players_and_ball_indices, calculate_distance, \ normalize_according_to_play_direction, check_group_event from src.features.helpers.processing_v4 import home_has_possession, calculate_team_sitation week_num = int(sys.argv[1]) data_v3 = DataV3(DATA_V3_SUBVERSION) save_file_path = data_v3.get_step1_checkpoint_path(week_num) try: clean_df = pd.read_csv(save_file_path) save_file_exists = True except FileNotFoundError: save_file_exists = False if not save_file_exists: print("Started loading data") play_df = pd.read_csv(os.path.join(RAW_DATA_DIR, 'plays.csv')) games_df = pd.read_csv(os.path.join(RAW_DATA_DIR, 'games.csv')) week_and_games = games_df[games_df.week == week_num] tracking_df = pd.read_csv(os.path.join(RAW_DATA_DIR, f'week{week_num}.csv')) print("Data loaded. Start processing timestamps") tracking_df = add_missing_timestamp_values(tracking_df) games_n_plays_df = play_df.merge(week_and_games, how='inner', on='gameId') m_grouped = games_n_plays_df.groupby(['gameId', 'playId']) df_t = tracking_df.merge(games_n_plays_df, how='left', on=['gameId', 'playId']) # Remove all events without 'pass_forward' df_t_grouped = df_t.groupby(['gameId', 'playId']) df_t_v3 = df_t.copy().sort_index() for name, group in df_t_grouped: game_id, play_id = name # if group does not contain pass forward, drop it if all(group.event != 'pass_forward'): df_t_v3 = df_t_v3[(df_t_v3.gameId != game_id) | (df_t_v3.playId != play_id)] df_t_v3_s = df_t_v3.sort_values(by=['gameId', 'playId', 'time', 'event']) df_t_v3_s = df_t_v3_s.reset_index(drop=True) df_t_grouped = df_t_v3_s.groupby(['gameId', 'playId']) # remove all values before 'pass_forward' print("Removing all values before pass forward event...") for name, group in df_t_grouped: game_id, play_id = name pass_forward_frame_id = group[group.event == 'pass_forward'].index.min() - 1 remove_start = group.index.min() df_t_v3_s = df_t_v3_s.drop(df_t_v3_s.loc[remove_start:pass_forward_frame_id].index) pd.options.mode.chained_assignment = None gb = df_t_v3_s.groupby(['gameId', 'playId']) print('Getting closest players...') keep_indices = [] for name, group in gb: game_id, play_id = name try: event_3rd = group.event.unique()[2] except IndexError: print('Number of events is < 3, skipping...') continue situation_df = group[group.event == event_3rd] # convert dataframe into series ball_row = situation_df[situation_df.team == 'football'].head(1) # remove ball player_situation_df = situation_df[situation_df.team != 'football'] try: p1, p2 = get_closest_players(player_situation_df, ball_row.x.item(), ball_row.y.item()) except ValueError: print('Value Error raised. This group will be skipped.') continue p_n_b_indices = get_players_and_ball_indices(group, p1, p2) if p_n_b_indices: keep_indices.extend(p_n_b_indices) clean_df = df_t_v3_s[df_t_v3_s.index.isin(keep_indices)] clean_df.to_csv( save_file_path, index=False ) print('Normalize...') clean_df = normalize_according_to_play_direction(clean_df) clean_df['homeHasPossession'] = clean_df.apply( lambda row: home_has_possession(row), axis=1 ) clean_df['teamSituation'] = clean_df.apply( lambda row: calculate_team_sitation(row), axis=1 ) print('Creating features...') min_df = clean_df[[ 'time', 'x', 'y', 's', 'o', 'dir', 'event', 'team', 'gameId', 'playId', 'frameId', 'isDefensivePI' ]] gb_2 = clean_df.groupby(['gameId', 'playId', 'frameId']) # ball direction and orientation are NaN calc_df = pd.DataFrame( columns=[ 'time', 'att_def_d', 'att_ball_d', 'def_ball_d', 'att_s', 'def_s', 'ball_s', 'att_o', 'def_o', 'att_dir', 'def_dir', 'event', 'gameId', 'playId', 'frameId', 'isDefensivePI' ] ) GROUP_SIZE_MINIMUM = 3 for name, group in gb_2: game_id, play_id, frameId = name if len(group) < GROUP_SIZE_MINIMUM: continue ball = group[group.teamSituation == 'football'].head(1).squeeze() p_att = group[group.teamSituation == 'attacking'].head(1).squeeze() p_def = group[group.teamSituation == 'defending'].head(1).squeeze() group_row = group.head(1).squeeze() group_events = group.event.unique().tolist() dict_to_append = { 'time': group_row.time, 'att_def_d': calculate_distance(p_att.x, p_att.y, p_def.x, p_def.y), 'att_ball_d': calculate_distance(p_att.x, p_att.y, ball.x, ball.y), 'def_ball_d': calculate_distance(p_def.x, p_def.y, ball.x, ball.y), 'att_s': p_att.s, 'def_s': p_def.s, 'ball_s': ball.s, 'att_a': p_att.a, 'def_a': p_def.a, 'ball_a': ball.a, 'att_o': p_att.o, 'def_o': p_def.o, 'att_dir': p_att.dir, 'def_dir': p_def.dir, 'event': group_row.event, 'pass_arrived': check_group_event(group_events, 'pass_arrived'), 'pass_outcome_caught': check_group_event(group_events, 'pass_outcome_caught'), 'tackle': check_group_event(group_events, 'tackle'), 'first_contact': check_group_event(group_events, 'first_contact'), 'pass_outcome_incomplete': check_group_event(group_events, 'pass_outcome_incomplete'), 'out_of_bounds': check_group_event(group_events, 'out_of_bounds'), 'week': week_num, 'gameId': group_row.gameId, 'playId': group_row.playId, 'frameId': group_row.frameId, 'isDefensivePI': group_row.isDefensivePI } calc_df = calc_df.append( dict_to_append, ignore_index=True ) print("Saving data...") calc_df.to_csv( data_v3.get_step1_end_path(week_num), index=False ) print(f'End time: {datetime.now().strftime("%H:%M:%S")}')
35.596591
119
0.675499
0
0
0
0
0
0
0
0
1,443
0.230327
b945e094a775936b9b256c03b9ad1404cebcb291
1,312
py
Python
annotate-preprocessed.py
Rajpratik71/devel-scripts
068285719a13b02889b1314361cc5bdb764d9a3a
[ "Apache-2.0" ]
null
null
null
annotate-preprocessed.py
Rajpratik71/devel-scripts
068285719a13b02889b1314361cc5bdb764d9a3a
[ "Apache-2.0" ]
null
null
null
annotate-preprocessed.py
Rajpratik71/devel-scripts
068285719a13b02889b1314361cc5bdb764d9a3a
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/python """Annotates -E preprocessed source input with line numbers. Read std input, then annotate each line with line number based on previous expanded line directives from -E output. Useful in the context of compiler debugging. """ import getopt import os import re import sys import script_utils as u flag_reverse = True def usage(msgarg): """Print usage and exit.""" if msgarg: sys.stderr.write("error: %s\n" % msgarg) print """\ usage: %s [options] < input > output options: -d increase debug msg verbosity level """ % os.path.basename(sys.argv[0]) sys.exit(1) def parse_args(): """Command line argument parsing.""" global flag_reverse try: optlist, _ = getopt.getopt(sys.argv[1:], "dr") except getopt.GetoptError as err: # unrecognized option usage(str(err)) for opt, _ in optlist: if opt == "-d": u.increment_verbosity() elif opt == "-r": flag_reverse = False # Setup u.setdeflanglocale() parse_args() # Read lines = sys.stdin.readlines() lnum = -1 matcher = re.compile(r"^\#\s+(\d+)\s+\"(\S+)\".*$") for line in lines: m = matcher.match(line) if m: lnum = int(m.group(1)) afile = m.group(2) print "<%s:%d>" % (afile, lnum) continue print "%d:%s" % (lnum, line.strip()) lnum += 1
19.014493
74
0.636433
0
0
0
0
0
0
0
0
525
0.400152
b94613d2fb24bf9487b3045eae02b837543d3647
2,547
py
Python
pages/lstm.py
tekeburak/dam-occupancy-model
f39d436bf27088068177245f0180cafaa56ad123
[ "MIT" ]
8
2021-01-24T14:56:23.000Z
2021-03-26T18:10:33.000Z
pages/lstm.py
tekeburak/dam-occupancy-model
f39d436bf27088068177245f0180cafaa56ad123
[ "MIT" ]
null
null
null
pages/lstm.py
tekeburak/dam-occupancy-model
f39d436bf27088068177245f0180cafaa56ad123
[ "MIT" ]
6
2021-01-24T14:44:49.000Z
2021-03-21T17:50:30.000Z
import streamlit as st import tensorflow as tf import numpy from utils.get_owm_data import get_open_weather_map_data from utils.get_date import get_date_list_for_gmt import plotly.graph_objects as go from plotly import tools import plotly.offline as py import plotly.express as px def app(): st.title("LSTM Model") st.subheader('What does LSTM model do?') st.markdown("""<p style='text-align: justify;'>LSTM networks are an extension of recurrent neural networks (RNNs) mainly introduced to handle situations where RNNs fail. It has been so designed that thevanishing gradient problem is almost completely removed, while the training model is left unaltered. Long-time lags in certain problems are bridged using LSTMs where they also handle noise, distributed representations, and continuous values.</p>""", unsafe_allow_html=True) st.subheader('Why we chose LSTM?') st.markdown("""<p style='text-align: justify;'>LSTM is well-suited to classify, process and predict time series given time lags of unknown duration. Relative insensitivity to gap length gives an advantage to LSTM over alternative RNNs, hidden Markov models and other sequence learningmethods. In addition, LSTM works great because LSTM cells have a memory that can store previous timestep information and this is how it learns.</p>""", unsafe_allow_html=True) st.subheader('LSTM model input and output') st.markdown("Model input is 7 days daily weather data from [OpenWeatherAPI](https://openweathermap.org/api). Model input features are *Rain*, *MaxTemp*, *MinTemp*, *AvgWind*, *AvgHumidity* and *AvgPressure*. Model predicts 7 days dam occupancy rate of İstanbul using these features.", unsafe_allow_html=True) LSTM_model_name = 'models/LSTM_model.h5' model_lstm = tf.keras.models.load_model(LSTM_model_name) features = get_open_weather_map_data() prediction_lstm = model_lstm.predict(features) * 100 prediction_lstm = prediction_lstm.ravel() date_list = get_date_list_for_gmt() data = [] layout = go.Layout( title= "<b>LSTM Dam Occupancy Forecasting Plot</b>",paper_bgcolor = 'rgb(248, 248, 255)',plot_bgcolor = 'rgb(248, 248, 255)',barmode = "stack", xaxis = dict(title="Time", linecolor="#BCCCDC",showspikes=True,spikethickness=2,spikedash="dot",spikecolor= "#ffffff",spikemode="across",), yaxis= dict(title="Dam Occupancy Rate (%)",linecolor="#021C1E")) line_chart= go.Scatter(x=date_list, y=prediction_lstm, marker_color='rgb(0, 200, 200)' ) data.append(line_chart) fig= go.Figure(data=data, layout=layout) st.plotly_chart(fig)
50.94
476
0.773852
0
0
0
0
0
0
0
0
1,420
0.5573
b9475ee1123a7f8c87eb161ddf2246d4b5a64a79
1,847
py
Python
fst_web/demo_settings.py
kamidev/autobuild_fst
6baffa955075ffe3c5f197789e9fd065fa74058e
[ "BSD-3-Clause" ]
null
null
null
fst_web/demo_settings.py
kamidev/autobuild_fst
6baffa955075ffe3c5f197789e9fd065fa74058e
[ "BSD-3-Clause" ]
null
null
null
fst_web/demo_settings.py
kamidev/autobuild_fst
6baffa955075ffe3c5f197789e9fd065fa74058e
[ "BSD-3-Clause" ]
null
null
null
# -*- coding: utf-8 -*- import os ROOT = os.path.abspath(os.path.dirname(__file__)) path = lambda *args: os.path.join(ROOT, *args) """ Template for local settings of the FST webservice (fst_web) Please edit this file and replace all generic values with values suitable to your particular installation. """ # NOTE! Always set this to False before deploying DEBUG = True # NOTE! Before deploying on a public, uncomment ALLOWED_HOSTS # and add IP address and/or domain of your site ALLOWED_HOSTS = ['localhost', '127.0.0.1', 'fst.magokoro.nu'] # Look for instance-specific settings try: from .instance_settings import * except ImportError: from .default_instance_settings import * DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': path('database/fst_demo.db') } } LOG_LEVEL = "DEBUG" # Enable this to override global DB Debug setting # DB_DEBUG_LEVEL = "DEBUG" # Setup mail server for sending email notifications. # You can use any mail server you want. # But a very simple way to get started is to use a gmail account. EMAIL_USE_TLS = True EMAIL_HOST = 'smtp.gmail.com' EMAIL_PORT = 587 # EMAIL_HOST_USER = 'your email' # EMAIL_HOST_PASSWORD = 'your password' # Admins specified here receive email notifications on critical errors. ADMINS = () MANAGERS = ADMINS # URL that handles the media served from MEDIA_ROOT. Make sure to use a # trailing slash. # Examples: "http://media.lawrence.com/media/", "http://example.com/media/" MEDIA_URL = os.path.join("/dokument/") # Site and port for hosting FST service (do not add ending '/'). FST_SITE_URL = "http://127.0.0.1:8000" # TODO - Check if FST_INSTANCE_PREFIX can be removed # Site and port of specific FST instance (do not add ending '/'). FST_INSTANCE_URL = os.path.join( "http://127.0.0.1:8000", FST_INSTANCE_PREFIX)
28.415385
76
0.721711
0
0
0
0
0
0
0
0
1,302
0.704927
b947d963b017c12ec37d222b3722de432bf97da6
8,891
py
Python
BookingScraper-joao_v2/BookingScraper/airbnb.py
joaocamargo/estudos-python
c5fbf59a1f06131d9789dca7dbdfdcf2200d0227
[ "MIT" ]
1
2019-10-09T12:56:13.000Z
2019-10-09T12:56:13.000Z
BookingScraper-joao_v2/BookingScraper/airbnb.py
joaocamargo/estudos-python
c5fbf59a1f06131d9789dca7dbdfdcf2200d0227
[ "MIT" ]
null
null
null
BookingScraper-joao_v2/BookingScraper/airbnb.py
joaocamargo/estudos-python
c5fbf59a1f06131d9789dca7dbdfdcf2200d0227
[ "MIT" ]
null
null
null
#! /usr/bin/env python3.6 import argparse import argcomplete from argcomplete.completers import ChoicesCompleter from argcomplete.completers import EnvironCompleter import requests from bthread import BookingThread from bs4 import BeautifulSoup from file_writer import FileWriter hotels = [] def get_countries(): with open("europa2020.txt", "r") as f: countries = f.read().splitlines() return countries def get_booking_page(session, offset, rooms, country, dest_id, DayIni, DayFim): print('get_booking_page(session, offset, rooms, country, dest_id, DayIni, DayFim):') print(session, offset, rooms, country, dest_id, DayIni, DayFim) diaInicial = str(int(DayIni[0:2])) mesInicial = str(int(DayIni[3:5])) anoInicial = str(int(DayIni[6:10])) diaFinal = str(int(DayFim[0:2])) mesFinal = str(int(DayFim[3:5])) anoFinal = str(int(DayFim[6:10])) ''' Make request to airbnb page and parse html :param offset: :return: html page ''' url = 'https://www.airbnb.com.br/s/Londres/'\ 'homes?refinement_paths%5B%5D=%2Fhomes&current_tab_id=home_tab&selected_tab_id=home_tab&source=mc_search_bar&search_type=unknown'\ '&click_referer=t%3ASEE_ALL%7Csid%3A874f16ee-6196-4289-9717-17dec73e1e5c%7Cst%3AMAGAZINE_HOMES&screen_size=large&hide_dates_and_guests_filters=false'\ '&ne_lat=51.80546533345978&ne_lng=0.4969575708007312&sw_lat=51.17528882051496&sw_lng=-0.8200285131836154&zoom=10&search_by_map=false&checkin={anoInicial}-{mesInicial}-{diaInicial}'\ '&checkout={anoFinal}-{mesFinal}-{diaFinal}&adults={rooms}&property_type_id%5B%5D=1&property_type_id%5B%5D=43&property_type_id%5B%5D=47'\ '&place_id=ChIJdd4hrwug2EcRmSrV3Vo6llI&room_types%5B%5D=Entire%20home%2Fapt'\ '&section_offset=6&items_offset=18'.format(rooms=rooms, country=country.replace(' ', '+'),anoFinal=anoFinal,mesFinal=mesFinal,diaInicial=diaInicial,mesInicial=mesInicial,anoInicial=anoInicial,diaFinal=diaFinal,dest_id=dest_id) + str(offset) r = requests.get(url, headers= {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:47.0)' ' Gecko/20100101 Firefox/48.0'}) html = r.content print(url) parsed_html = BeautifulSoup(html, 'lxml') return parsed_html def process_hotels(session, offset, rooms, country, dest_id, DayIni, DayFim): parsed_html = get_booking_page(session, offset, rooms, country, dest_id,DayIni, DayFim) hotel = parsed_html.find_all('div', {'class': 'sr_item'}) for ho in hotel: #print("ho.find('a', {'class': 'jq_tooltip'})") #print(ho.find('a', {'class': 'jq_tooltip'})) #name = ho.find('a', {'class': 'jq_tooltip'})['data-title'] print("ho.find('span', {'class': 'sr-hotel__name'})") #print(ho.find('span', {'class': 'sr-hotel__name'})) if ho.find('span', {'class': 'sr-hotel__name'}) is not None: name = str(ho.find('span', {'class': 'sr-hotel__name'}).text.encode('utf-8')).replace('\\n','').replace("b","").replace("'","").replace('\\','') else: name = '-1' if ho.find('div', {'class': 'bui-price-display__value prco-inline-block-maker-helper'}) is not None: price = ho.find('div', {'class': 'bui-price-display__value prco-inline-block-maker-helper'}).text.replace('\n','').replace("b","").replace("'","") else: price = '-1' if ho.find('span', {'class': '_ky9opu0'}) is not None: nota = str(ho.find('span', {'class': '_ky9opu0'}).text.replace('\n','').replace("b","").replace("'","")) else : nota = '-1' if ho.find('span', {'title': 'This is the straight-line distance on the map. Actual travel distance may vary.'}) is not None: distance = str(ho.find('span', {'title': 'This is the straight-line distance on the map. Actual travel distance may vary.'}).text.encode('utf-8')).replace('\\n','').replace("b","").replace("'","").replace('\\','') else : distance = '-1' # if ho.find('a', {'class': 'bui-link'}) is not None : # result = [str(item) for item in ho.find_all('span', attrs={'data-bui-component' : 'Tooltip'})] # print('TAMANHO TOOLTIP', str(len(result))) # for i in result: # print(i) # for i in result: # if i in 'km': # distance = str(i) # else: # distance = '----' # else: # distance = '----' # if len(result) ==1: # if result[0] in 'km': # distance = result # else: # distance = 'aaaaa' + str(len(result)) # else: # distance = '---' hotels.append(DayIni+';'+DayFim+';'+name + ';' + price + ';' + nota + ';' + distance) #hotels.append(str(len(hotels) + 1) + ' : ' + name + ' : ' + price) def prep_data(rooms=1, country='Macedonia', dest_id='-1', DayIni='01/01/2019', DayFim='02/01/2019', out_format=None): ''' Prepare data for saving :return: hotels: set() ''' offset = 1 session = requests.Session() parsed_html = get_booking_page(session, offset, rooms, country, dest_id, DayIni,DayFim) all_offset = parsed_html.find_all('li', {'class': 'sr_pagination_item'})[-1].get_text().splitlines()[-1] threads = [] for i in range(int(all_offset)): offset += 1 t = BookingThread(session, offset, rooms, country,dest_id,DayIni, DayFim, process_hotels) threads.append(t) for t in threads: t.start() for t in threads: t.join() hotels2 = hotels return hotels2 def get_data(rooms=1, country='Macedonia', dest_id='-1',DayIni='01/01/2019',DayFim='02/01/2019', out_format=None): ''' Get all accomodations in Macedonia and save them in file :return: hotels-in-macedonia.{txt/csv/xlsx} file ''' print('Procurando por',country) hotels_list = prep_data(rooms, country,dest_id, DayIni, DayFim, out_format) save_data(hotels_list , out_format=out_format, country=country) def save_data(data, out_format, country): ''' Saves hotels list in file :param data: hotels list :param out_format: json, csv or excel :return: ''' writer = FileWriter(data, out_format, country) file = writer.output_file() print('All accommodations are saved.') print('You can find them in', file, 'file') if __name__ == "__main__": parser = argparse.ArgumentParser() countries = get_countries() parser.add_argument("--rooms", help='Add the number of rooms to the booking request.', default=1, type=int, nargs='?') parser.add_argument("--country", help='Add the country to the booking request.', default='Macedonia', nargs='?').completer = ChoicesCompleter(countries) parser.add_argument("--dest_id", help='Add the country to the booking request.', default='0', nargs='?') parser.add_argument("--DayIni", help='Data inicial', default='01/01/2019', nargs='?') parser.add_argument("--DayFim", help='Data inicial', default='02/01/2019', nargs='?') parser.add_argument("--out_format", help='Add the format for the output file. Add excel, json or csv.', default='json', choices=['json', 'excel', 'csv'], nargs='?').completer = EnvironCompleter argcomplete.autocomplete(parser) args = parser.parse_args() localidades = [{ 'Pais': 'London', 'dest_id': '-2601889' }, { 'Pais': 'Utrecht', 'dest_id': '-2154382' }, { 'Pais': 'Buzios', 'dest_id': '-626254' }, { 'Pais': '', 'dest_id': '' }] countryAux = [d['Pais'] for d in localidades if args.dest_id in d['dest_id']] if len(countryAux)>0: country = countryAux[0] print('Parametros') print(args.rooms, country,args.dest_id,args.DayIni,args.DayFim, args.out_format) get_data(args.rooms, country,args.dest_id,args.DayIni,args.DayFim, args.out_format) else: country = 'Nao Identificado' locais = [d['Pais'] + ':' + d['dest_id'] for d in localidades if d['Pais'] != ''] print('----------') print('Utilize uma das seguintes localizações') for i in locais: print(i) print('----------')
37.995726
250
0.576313
0
0
0
0
0
0
0
0
3,665
0.412122
b94890b4860019fd993040c0790c0701fc24a0c5
2,919
py
Python
main.py
valurhrafn/chromium-sync
df5e3299d179fc47ff34d1a95409383f46aac4d4
[ "MIT" ]
4
2017-03-27T02:25:07.000Z
2021-03-07T21:40:58.000Z
main.py
valurhrafn/chromium-sync
df5e3299d179fc47ff34d1a95409383f46aac4d4
[ "MIT" ]
null
null
null
main.py
valurhrafn/chromium-sync
df5e3299d179fc47ff34d1a95409383f46aac4d4
[ "MIT" ]
null
null
null
#!/usr/bin/env python # # Copyright 2007 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from google.appengine.api import users import webapp2 # For datastore import cgi import urllib from google.appengine.ext import ndb class UserId(ndb.Model): content = ndb.StringProperty() date = ndb.DateTimeProperty(auto_now_add=True) @classmethod def query_user(cls, ancestor_key): return cls.query(ancestor=ancestor_key).order(-cls.date) # ************** MainHandler ************* # class MainHandler(webapp2.RequestHandler): def get(self): self.response.write('Hello world!') # ************** GetUser ************* # class GetUser(webapp2.RequestHandler): def get(self): self.response.out.write('<html><body>') client_id = self.request.get('client_id') ancestor_key = ndb.Key("ID", client_id or "*no_id*") userids = UserId.query_user(ancestor_key).fetch(20) self.response.out.write('her er eitthvad') for userid in userids: self.response.out.write('<blockquote>%s</blockquote>' % cgi.escape(userid.content)) # Checks for active Google account session # user = users.get_current_user() # if user: # self.response.headers['Content-Type'] = 'text/plain' # self.response.write('Hello, ' + user.nickname()) # else: # self.redirect(users.create_login_url(self.request.uri)) self.response.out.write('</body></html>') def post(self): pass # ************** HasData ************* # class HasData(webapp2.RequestHandler): def get(self): pass #TODO does user have data class PostData(webapp2.RequestHandler): def post(self): client_id = self.request.get('client_id') chrome_user = UserId(parent=ndb.Key("ID", client_id or "*no_id*"), content = self.request.get('client_id')) chrome_user.put() #TODO recieve data from client class GetSyncData(object): """docstring for GetSyncData""" def __init__(self, arg): super(GetSyncData, self).__init__() self.arg = arg #implement get data for user # property user.email() or user.user_id() app = webapp2.WSGIApplication([ ('/', MainHandler), ('/GetUser/', GetUser), ('/HasData/', HasData), ('/chrome-sync/command/', PostData), ('/GetSyncData/', GetSyncData) ], debug=True)
30.40625
74
0.647825
1,838
0.629668
0
0
110
0.037684
0
0
1,356
0.464543
b94a534d42db78fa886439d7fdfdf20e0f8b2504
1,434
py
Python
comet/service/subscriber.py
dneise/Comet
abaa0da65d69f90a5262d81416477b4e71deb2ad
[ "BSD-2-Clause" ]
15
2015-11-29T18:53:58.000Z
2022-03-09T15:47:30.000Z
comet/service/subscriber.py
dneise/Comet
abaa0da65d69f90a5262d81416477b4e71deb2ad
[ "BSD-2-Clause" ]
29
2016-01-21T18:10:45.000Z
2021-10-01T16:41:12.000Z
comet/service/subscriber.py
dneise/Comet
abaa0da65d69f90a5262d81416477b4e71deb2ad
[ "BSD-2-Clause" ]
11
2016-01-22T14:05:51.000Z
2022-03-09T17:49:56.000Z
# Comet VOEvent Broker. from twisted.application.internet import ClientService from comet.protocol.subscriber import VOEventSubscriberFactory __all__ = ["makeSubscriberService"] def makeSubscriberService(endpoint, local_ivo, validators, handlers, filters): """Create a reconnecting VOEvent subscriber service. Parameters ---------- endpoint : implements `twisted.internet.interfaces.IStreamClientEndpoint` The endpoint to which the service will connect. local_ivo : `str` or `None` IVOA identifier for the subscriber. validators : `list` of implementers of `~comet.icomet.IValidator`. Validators which will be applied to incoming events. Events which fail validation will be rejected. handlers : `list` of implementers of `~comet.icomet.IHandler`. Handlers to which events which pass validation will be passed. filters : `list` of `str` XPath filters. Will be passed to upstream as a request to filter the alerts being sent. Notes ----- Upstream brokes may not provide support for XPath filtering; in this case, the filters suppplied will be ignored. Reconnection is handled according to the default policies of `twisted.application.internet.ClientService`. """ factory = VOEventSubscriberFactory(local_ivo, validators, handlers, filters) service = ClientService(endpoint, factory) return service
35.85
80
0.727336
0
0
0
0
0
0
0
0
1,065
0.742678
b94c3a86b197fdae8da6f36cf6af0eeecde07155
13,008
py
Python
scripts/master/cros_try_job_git.py
bopopescu/build
4e95fd33456e552bfaf7d94f7d04b19273d1c534
[ "BSD-3-Clause" ]
null
null
null
scripts/master/cros_try_job_git.py
bopopescu/build
4e95fd33456e552bfaf7d94f7d04b19273d1c534
[ "BSD-3-Clause" ]
null
null
null
scripts/master/cros_try_job_git.py
bopopescu/build
4e95fd33456e552bfaf7d94f7d04b19273d1c534
[ "BSD-3-Clause" ]
1
2020-07-23T11:05:06.000Z
2020-07-23T11:05:06.000Z
# Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import base64 import json import os import re import shutil import zlib from StringIO import StringIO try: # Create a block to work around evil sys.modules manipulation in # email/__init__.py that triggers pylint false positives. # pylint: disable=E0611,F0401 from email.Message import Message from email.Utils import formatdate except ImportError: raise from buildbot.process.properties import Properties from buildbot.schedulers.trysched import TryBase from twisted.internet import defer, reactor, utils from twisted.mail.smtp import SMTPSenderFactory from twisted.python import log from common.twisted_util.response import StringResponse from master import gitiles_poller from master.try_job_base import BadJobfile class CbuildbotConfigs(object): # Valid 'etc' builder targets. Specifically, this ensures: # - The build name doesn't begin with a flag ('--') # - The build name doesn't contain spaces (to spill into extra args). _ETC_TARGET_RE = re.compile(r'^[a-zA-Z][\w-]+\w$') def __init__(self, configs, etc_builder=None): """Holds base state of the master's try job related configuration. configs (dict): A dictionary of all known CrOS configs. This will be as up-to-date as the Chromite pin. etc_builder (str): If not None, the name of the etc builder. """ self.configs = configs self.etc_builder = etc_builder def AddBuildBucketHooks(self, c): """Build mutation hook called via BuildBucket when scheduling builds. The cbuildbot config is specified in the `cbb_config` property. The callback transforms that property to an actual waterfall builder name by mapping it based on its config. If an 'etc' builder is configured and the config name is unknown, it will be mapped to the 'etc' builder if possible. A tryserver BuildBucket build takes the form: - Empty `builder_name` parameter. If one is supplied, it will be ignored. - BuildBot changes can be added by including one or more BuildBucket `changes` parameters: [{'author': {'email': '[email protected]'}}]. - `cbb_config` property must be set to the build's cbuildbot config target. - `extra_args` property (optional) may be a JSON list of additional parameters to pass to the tryjob. - `slaves_request` property (optional) may be a JSON list of slaves on which this build may run. - Additional BuildBot properties may be added. NOTE: Internally, all of these parameters are converted to BuildBot properties and referenced as such in other areas of code. The Git poller also constructs the same property set, so code paths converge. """ def params_hook(params, _build): # Map `cbb_config` to a builder name. properties = params.get('properties', {}) config_name = properties.get('cbb_config') if not config_name: raise ValueError('Missing required `cbb_config` property.') params['builder_name'] = self.GetBuilderForConfig(config_name) # Validate other fields. if not isinstance(properties.get('extra_args', []), list): raise ValueError('`extra_args` property is not a list.') if not isinstance(properties.get('slaves_request', []), list): raise ValueError('`slaves_request` is not a list.') # Add mandatory properties to build. params['properties'] = properties c['buildbucket_params_hook'] = params_hook def GetBuilderForConfig(self, config_name): config = self.configs.get(config_name) if config: return config['_template'] or config_name self.ValidateEtcBuild(config_name) return self.etc_builder def ValidateEtcBuild(self, config_name): """Tests whether a specified build config_name is candidate for etc build. Raises a ValueError if an etc build cannot be dispatched. """ if not self.etc_builder: raise ValueError('etc builder is not configured.') if not config_name: raise ValueError('Empty config name') if not self._ETC_TARGET_RE.match(config_name): raise ValueError('invalid etc config name (%s).' % (config_name,)) def translate_v1_to_v2(parsed_job): """Translate tryjob desc from V1 to V2.""" parsed_job.setdefault('extra_args', []).append('--remote-trybot') parsed_job['version'] = 2 def translate_v2_to_v3(parsed_job): """Translate tryjob desc from V2 to V3.""" # V3 --remote-patches format is not backwards compatible. if any(a.startswith('--remote-patches') for a in parsed_job.get('extra_args', ())): raise BadJobfile('Cannot translate --remote-patches from tryjob v.2 to ' 'v.3. Please run repo sync.') parsed_job['version'] = 3 class CrOSTryJobGit(TryBase): """Poll a Git server to grab patches to try.""" # Name of property source for generated properties. _PROPERTY_SOURCE = 'Try Job' # The version of tryjob that the master is expecting. _TRYJOB_FORMAT_VERSION = 3 # Functions that translate from one tryjob version to another. _TRANSLATION_FUNCS = { 1 : translate_v1_to_v2, 2 : translate_v2_to_v3, } # Template path URL component to retrieve the Base64 contents of a file from # Gitiles. _GITILES_PATH_TMPL = '%(repo)s/+/%(revision)s/%(path)s?format=text' @classmethod def updateJobDesc(cls, parsed_job): """Ensure job description is in the format we expect.""" while parsed_job['version'] < cls._TRYJOB_FORMAT_VERSION: prev_ver = parsed_job['version'] translation_func = cls._TRANSLATION_FUNCS[parsed_job['version']] translation_func(parsed_job) if parsed_job['version'] <= prev_ver: raise AssertionError('translation function %s not incrementing version!' % str(translation_func)) def __init__(self, name, pollers, smtp_host, from_addr, reply_to, email_footer, cbuildbot_configs, properties=None): """Initialize the class. Arguments: name: See TryBase.__init__(). pollers: A list of job repo git pit pollers. smtp_host: The smtp host for sending out error emails. from_addr: The email address to display as being sent from. reply_to: The email address to put in the 'Reply-To' email header field. email_footer: The footer to append to any emails sent out. cbuildbot_configs: (CbuildbotConfigs) A configuration set instance. Any 'bot' request outside of this list will go to an 'etc' builder, if available. properties: See TryBase.__init__() """ TryBase.__init__(self, name, [], properties or {}) self.pollers = pollers self.smtp_host = smtp_host self.from_addr = from_addr self.reply_to = reply_to self.email_footer = email_footer self.cbb = cbuildbot_configs def startService(self): TryBase.startService(self) self.startConsumingChanges() @staticmethod def load_job(data): try: return json.loads(data) except ValueError as e: raise BadJobfile("Failed to parse job JSON: %s" % (e.message,)) def validate_job(self, parsed_job): # A list of field description tuples of the format: # (name, type, required). fields = [('name', basestring, True), ('user', basestring, True), ('email', list, True), ('bot', list, True), ('extra_args', list, False), ('version', int, True), ('slaves_request', list, False), ] error_msgs = [] for name, f_type, required in fields: val = parsed_job.get(name) if val is None: if required: error_msgs.append('Option %s missing!' % name) elif not isinstance(val, f_type): error_msgs.append('Option %s of wrong type!' % name) # If we're an 'etc' job, we must have bots defined to execute. for bot in parsed_job['bot']: if bot in self.cbb.configs: continue # Assert that this is a valid 'etc' build. try: self.cbb.ValidateEtcBuild(bot) except ValueError as e: error_msgs.append("Invalid 'etc' build (%s): %s" % (bot, e.message)) if error_msgs: raise BadJobfile('\n'.join(error_msgs)) def get_props(self, config, options): """Overriding base class method.""" props = Properties() props.setProperty('slaves_request', options.get('slaves_request', []), self._PROPERTY_SOURCE) props.setProperty('cbb_config', config, self._PROPERTY_SOURCE) extra_args = options.get('extra_args') if extra_args: # This field can be quite large, and exceed BuildBot property limits. # Compress it, Base64 encode it, and prefix it with "z:" so the consumer # knows its size. extra_args = 'z:' + base64.b64encode(zlib.compress(json.dumps( extra_args))) props.setProperty('cbb_extra_args', extra_args, self._PROPERTY_SOURCE) return props def create_buildset(self, ssid, parsed_job): """Overriding base class method.""" dlist = [] buildset_name = '%s:%s' % (parsed_job['user'], parsed_job['name']) for bot in parsed_job['bot']: builder_name = self.cbb.GetBuilderForConfig(bot) log.msg("Creating '%s' try job(s) %s for %s" % (builder_name, ssid, bot)) dlist.append(self.addBuildsetForSourceStamp(ssid=ssid, reason=buildset_name, external_idstring=buildset_name, builderNames=[builder_name], properties=self.get_props(bot, parsed_job))) return defer.DeferredList(dlist) def send_validation_fail_email(self, name, emails, error): """Notify the user via email about the tryjob error.""" html_content = [] html_content.append('<html><body>') body = """ Your tryjob with name '%(name)s' failed the validation step. This is most likely because <br>you are running an older version of cbuildbot. Please run <br><code>repo sync chromiumos/chromite</code> and try again. If you still see<br>this message please contact [email protected].<br> """ html_content.append(body % {'name': name}) html_content.append("Extra error information:") html_content.append(error.replace('\n', '<br>\n')) html_content.append(self.email_footer) m = Message() m.set_payload('<br><br>'.join(html_content), 'utf8') m.set_type("text/html") m['Date'] = formatdate(localtime=True) m['Subject'] = 'Tryjob failed validation' m['From'] = self.from_addr m['Reply-To'] = self.reply_to result = defer.Deferred() sender_factory = SMTPSenderFactory(self.from_addr, emails, StringIO(m.as_string()), result) reactor.connectTCP(self.smtp_host, 25, sender_factory) @defer.inlineCallbacks def gotChange(self, change, important): try: yield self._gotChangeImpl(change, important) except Exception as e: log.msg('Exception in try job scheduler: %s' % (e,)) import traceback traceback.print_exc() @defer.inlineCallbacks def _gotChangeImpl(self, change, _important): """Process the received data and send the queue buildset.""" # Find poller that this change came from. for poller in self.pollers: if not isinstance(poller, gitiles_poller.GitilesPoller): continue if poller.repo_url == change.repository: break else: raise BadJobfile( 'Received tryjob from unsupported repository %s' % change.repository) # pylint: disable=W0631 file_contents = yield self.loadGitilesChangeFile(poller, change) parsed = {} try: parsed = self.load_job(file_contents) self.validate_job(parsed) self.updateJobDesc(parsed) except BadJobfile as e: self.send_validation_fail_email(parsed.setdefault('name', ''), parsed['email'], str(e)) raise # The sourcestamp/buildsets created will be merge-able. ssid = yield self.master.db.sourcestamps.addSourceStamp( branch=change.branch, revision=change.revision, project=change.project, repository=change.repository, changeids=[change.number]) yield self.create_buildset(ssid, parsed) @defer.inlineCallbacks def loadGitilesChangeFile(self, poller, change): if len(change.files) != 1: # We only accept changes with 1 diff file. raise BadJobfile( 'Try job with too many files %s' % (','.join(change.files))) # Load the contents of the modified file. path = self._GITILES_PATH_TMPL % { 'repo': poller.repo_path, 'revision': change.revision, 'path': change.files[0], } contents_b64 = yield poller.agent.request('GET', path, retry=5, protocol=StringResponse.Get) defer.returnValue(base64.b64decode(contents_b64))
37.165714
80
0.676661
11,525
0.885993
2,036
0.156519
2,780
0.213715
0
0
5,615
0.431657
b94d43136b5079271270c2099bbeca811ff9b1ce
1,412
py
Python
Medium/515.py
Hellofafar/Leetcode
7a459e9742958e63be8886874904e5ab2489411a
[ "CNRI-Python" ]
6
2017-09-25T18:05:50.000Z
2019-03-27T00:23:15.000Z
Medium/515.py
Hellofafar/Leetcode
7a459e9742958e63be8886874904e5ab2489411a
[ "CNRI-Python" ]
1
2017-10-29T12:04:41.000Z
2018-08-16T18:00:37.000Z
Medium/515.py
Hellofafar/Leetcode
7a459e9742958e63be8886874904e5ab2489411a
[ "CNRI-Python" ]
null
null
null
# ------------------------------ # 515. Find Largest Value in Each Tree Row # # Description: # You need to find the largest value in each row of a binary tree. # Example: # Input: # 1 # / \ # 3 2 # / \ \ # 5 3 9 # Output: [1, 3, 9] # # Version: 1.0 # 12/22/18 by Jianfa # ------------------------------ # Definition for a binary tree node. # class TreeNode: # def __init__(self, x): # self.val = x # self.left = None # self.right = None class Solution: def largestValues(self, root): """ :type root: TreeNode :rtype: List[int] """ if not root: return [] children = [root] res = [] while children: temp = [] # Node of next row largest = -sys.maxsize # Largest number of this row for i in range(len(children)): node = children[i] largest = max(node.val, largest) if node.left: temp.append(node.left) if node.right: temp.append(node.right) res.append(largest) children = temp return res # Used for testing if __name__ == "__main__": test = Solution() # ------------------------------ # Summary: # BFS solution.
23.147541
66
0.434136
761
0.538952
0
0
0
0
0
0
697
0.493626
b94d5a11e77235531376a017f673e8c5a0fdf637
9,578
py
Python
opsmop/meta/docs/exparser.py
lachmanfrantisek/opsmop
562ae2d753ff84b3d794a6815d0436753e82d2a0
[ "Apache-2.0" ]
null
null
null
opsmop/meta/docs/exparser.py
lachmanfrantisek/opsmop
562ae2d753ff84b3d794a6815d0436753e82d2a0
[ "Apache-2.0" ]
null
null
null
opsmop/meta/docs/exparser.py
lachmanfrantisek/opsmop
562ae2d753ff84b3d794a6815d0436753e82d2a0
[ "Apache-2.0" ]
null
null
null
# Copyright 2018 Michael DeHaan LLC, <[email protected]> # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os class Example(object): def __init__(self): # things we'll figure out as we scan an example self.name = "" self.see_files = [] self.description = [] self.code = [] class Record(object): def __init__(self): # things which we'll figure out as we scan the example self.name = "" self.purpose = "" self.provider_names = [] self.related_modules = [] self.category = "" self.description = [] self.examples = [] self.current_example = Example() self.phase = 'module' self.count = 0 def set_phase(self, phase): self.phase = phase print("---------------------------------------------------------") print("%s phase | %s" % (self.count, self.phase)) print("---------------------------------------------------------") @classmethod def from_file(cls, filename): r = cls() r.name = os.path.basename(filename).replace(".py","") print("=========================================================") print("%s M | %s" % ('0', r.name)) data = open(filename).read().splitlines() for line in data: if not r.handle_line(line): break return r def load_command(self, line): if "DESCRIPTION" in line or '----' in line or '====' in line: pass elif not ":" in line: # commands must contain a colon unless they are blocks or DESCRIPTION starters return (False, None, None) if not line.startswith("#"): # commands must be in comments return (False, None, None) if ":" in line: tokens = line.split(":") if tokens[0].upper() != tokens[0]: # commands must be in all caps. This is done # so we don't get confused by colons in URLs and so on. print("REJECT: %s" % tokens[0]) return (False, None, None) # at this point we are sure it is a command if '#------------' in line.replace(" ",""): return (True, 'start_block', None) if '#============' in line.replace(" ",""): return (True, 'end_block', None) # throw away the leading comment line = line.replace("#","",1).strip() if line.startswith("DESCRIPTION"): return (True, 'description', None) tokens = line.split(':', 1) command = tokens[0].replace("#","").strip().lower() rest = tokens[1].strip() return (True, command, rest) def handle_line(self, line): self.count = self.count + 1 (is_command, command, rest) = self.load_command(line) print("%s line | %s" % (self.count, line)) #if command == 'policy': # return False if is_command: #if command not in [ 'start_block', 'end_block' ]: # print("keyword: %s => %s" % (command, rest)) self.handle_command(command, rest) return True #print("PHASE=%s" % self.phase) #print("LINE=%s" % line) if self.phase == 'module': if not line.startswith("#") or line.replace("#","").strip(): raise Exception("the module phase should be all commands") elif self.phase == 'description': # module description lines must be comments self.handle_module_description(line) elif self.phase == 'example': if not line.startswith("#") or line.replace("#","").strip(): raise Exception("the example phase should be all commands") elif self.phase == 'example_description': self.handle_example_description(self.current_example, line) elif self.phase == 'example_code': self.handle_example_code(self.current_example, line) elif self.phase == 'limbo': #print("ignoring line while in limbo: %s" % line) pass elif self.phase == 'done': #print("ignoring line while done: %s" % line) pass else: raise Exception("unknown phase: %s" % self.phase) return True # continue def handle_command(self, command, rest): #print("<PHASE: %s, COMMAND: %s, REST: %s>" % (self.phase, command, rest)) if self.phase == 'done': return False if self.phase == 'module': # from module mode the only state transition is into module_description mode # when we find the description command if command not in ['start_block', 'end_block']: print("%s set | %-20s | %s" % (self.count, command, rest)) if command == 'module': pass elif command == 'start_block': pass elif command == 'category': self.category = rest elif command == 'purpose': self.purpose = rest elif command == 'related': self.related_modules = [ x.strip() for x in rest.split(",") ] elif command == 'providers': self.providers = [ x.strip() for x in rest.split(",") ] elif command == 'fyi': pass elif command == 'description': print("---------------------------------------------------------") self.set_phase('description') elif command == 'end_block': raise Exception("unexpected end block without description") else: raise Exception("unknown command: %s" % command) elif self.phase == 'description': # in description phase end block moves us into limbo until we find # another example start block if command == 'end_block': self.set_phase('limbo') else: raise Exception("invalid command: %s" % command) elif self.phase == 'limbo': # in limbo, seeing a start block moves us into example phase if command == 'start_block': self.set_phase('example') else: raise Exception("invalid command: %s" % command) elif self.phase == 'example': # in example phase we can only move into example description phase # by hitting the description command if command == 'example': print("---------------------------------------------------------") print("%s exmp | %s" % (self.count, rest)) print("---------------------------------------------------------") self.current_example.name = rest elif command == 'setup': self.set_phase('done') elif command == 'description': print("MOV!") self.set_phase('example_description') elif command == 'see_files' or command == 'see_file': self.current_example.see_files = [ x.strip() for x in rest.split(",")] else: raise Exception("unknown command: %s" % command) elif self.phase == 'example_description': # in example description phase we can only move into example code phase # by hitting an end block if command == 'end_block': print("-------") self.set_phase('example_code') else: raise Exception("unknown command: %s" % command) elif self.phase == 'example_code': # in example code phase we can only move back into example phase by # hitting a start block if command == 'start_block': self.examples.append(self.current_example) self.current_example = Example() self.set_phase('example') else: raise Exception("unknown command: %s" % command) elif self.phase == 'done': return False else: raise Exception("unknown phase: %s" % self.phase) def handle_example_description(self, example, line): # could be a comment or the code example, we want to keep both if line.startswith("#"): line = line.replace("#","") line = line.strip() print("%s desc | %s" % (self.count, line)) example.description.append(line) def handle_example_code(self, example, line): line = line.rstrip() example.code.append(line) print("%s code | %s" % (self.count, line)) def handle_module_description(self, line): if line.startswith("#"): line = line.replace("#","") line = line.strip() if line: print("%s mdesc | %s" % (self.count, line)) self.description.append(line)
37.708661
90
0.516914
8,951
0.934537
0
0
403
0.042076
0
0
3,475
0.362811
b94dd4c5db15c696e937d22b21b3d1a6fd038ef8
737
py
Python
pylox/TokenType.py
sheunl/Compiler_Tests
18c5e0568bc39a60094f3e44943ac252c279ffb9
[ "CC0-1.0" ]
null
null
null
pylox/TokenType.py
sheunl/Compiler_Tests
18c5e0568bc39a60094f3e44943ac252c279ffb9
[ "CC0-1.0" ]
null
null
null
pylox/TokenType.py
sheunl/Compiler_Tests
18c5e0568bc39a60094f3e44943ac252c279ffb9
[ "CC0-1.0" ]
null
null
null
from enum import Enum class T(Enum): #single character Tokens LEFT_PAREN =1 RIGHT_PAREN =2 LEFT_BRACE = 3 RIGHT_BRACE = 4 COMMA = 5 DOT = 6 MINUS = 7 PLUS = 8 SEMICOLON = 9 SLASH = 10 STAR = 11 #one or two character tokens BANG = 12 BANG_EQUAL = 13 EQUAL = 14 EQUAL_EQUAL = 15 GREATER = 16 GREATER_EQUAL = 17 LESS = 18 LESS_EQUAL = 19 #Literals IDENTIFIER = 20 STRING = 21 NUMBER = 22 #keywords AND = 23 CLASS = 24 ELSE = 25 FALSE = 26 FUN = 27 FOR = 28 IF = 29 NIL =30 OR =31 PRINT =32 RETURN = 33 SUPER = 34 THIS = 35 TRUE = 36 VAR = 37 WHILE = 38 EOF= 39
14.45098
32
0.522388
713
0.967436
0
0
0
0
0
0
70
0.09498
b94e05939494c3c75adce95bb694899b36d0a091
919
py
Python
src/oslibs/cocos/cocos-src/tools/cocos2d-console/plugins/framework/framework_add.py
dios-game/dios-cocos
b7fbcbafe02f516ef18fdb64b4519dbf806303fc
[ "MIT" ]
1
2021-07-22T15:53:26.000Z
2021-07-22T15:53:26.000Z
src/oslibs/cocos/cocos-src/tools/cocos2d-console/plugins/framework/framework_add.py
dios-game/dios-cocos
b7fbcbafe02f516ef18fdb64b4519dbf806303fc
[ "MIT" ]
null
null
null
src/oslibs/cocos/cocos-src/tools/cocos2d-console/plugins/framework/framework_add.py
dios-game/dios-cocos
b7fbcbafe02f516ef18fdb64b4519dbf806303fc
[ "MIT" ]
null
null
null
import cocos from MultiLanguage import MultiLanguage from package.helper import ProjectHelper class FrameworkAdd(cocos.CCPlugin): @staticmethod def plugin_name(): return "add-framework" @staticmethod def brief_description(): return MultiLanguage.get_string('FRAMEWORK_ADD_BRIEF') # parse arguments def parse_args(self, argv): from argparse import ArgumentParser parser = ArgumentParser(prog="cocos %s" % self.__class__.plugin_name(), description=self.__class__.brief_description()) parser.add_argument("name", metavar="NAME", help=MultiLanguage.get_string('FRAMEWORK_ADD_ARG_NAME')) return parser.parse_args(argv) def run(self, argv): args = self.parse_args(argv) name = args.name project = ProjectHelper.get_current_project() ProjectHelper.add_framework(project, name)
28.71875
108
0.686616
820
0.892274
0
0
172
0.18716
0
0
99
0.107726
b9514946d8170f94e426e1cbf736a481d8427c11
761
py
Python
src/utils.py
f-grimaldi/explain_ML
00892675be32bebd023b274270ccb05b798fb388
[ "MIT" ]
1
2020-08-03T08:23:31.000Z
2020-08-03T08:23:31.000Z
src/utils.py
f-grimaldi/explain_ML
00892675be32bebd023b274270ccb05b798fb388
[ "MIT" ]
null
null
null
src/utils.py
f-grimaldi/explain_ML
00892675be32bebd023b274270ccb05b798fb388
[ "MIT" ]
null
null
null
from matplotlib import colors import numpy as np class SaveOutput: def __init__(self): self.outputs = [] def __call__(self, module, module_in, module_out): self.outputs.append(module_out) def clear(self): self.outputs = [] class MidpointNormalize(colors.Normalize): def __init__(self, vmin=None, vmax=None, vcenter=None, clip=False): self.vcenter = vcenter colors.Normalize.__init__(self, vmin, vmax, clip) def __call__(self, value, clip=None): # I'm ignoring masked values and all kinds of edge cases to make a # simple example... x, y = [self.vmin, self.vcenter, self.vmax], [self.vmin, self.vcenter, self.vmax] return np.ma.masked_array(np.interp(value, x, y))
31.708333
89
0.659658
708
0.930355
0
0
0
0
0
0
85
0.111695
b9516c7b124e87fce1712aca1aa49ef2cd923f11
3,056
py
Python
lib/two/mongomgr.py
erkyrath/tworld
9f5237771196b03753d027277ffc296e25fd7425
[ "MIT" ]
38
2015-01-03T16:59:20.000Z
2021-10-13T09:15:53.000Z
lib/two/mongomgr.py
Oreolek/tworld
9f5237771196b03753d027277ffc296e25fd7425
[ "MIT" ]
32
2015-01-04T01:59:34.000Z
2016-05-20T16:29:26.000Z
lib/two/mongomgr.py
Oreolek/tworld
9f5237771196b03753d027277ffc296e25fd7425
[ "MIT" ]
7
2015-10-08T21:01:20.000Z
2020-05-21T17:42:54.000Z
""" Manage the connection to the MongoDB server. """ import tornado.gen import tornado.ioloop import motor class MongoMgr(object): def __init__(self, app): # Keep a link to the owning application. self.app = app self.log = self.app.log # This will be the Motor (MongoDB) connection. We'll open it in the # first monitor_mongo_status call. self.mongo = None self.mongoavailable = False # true if self.mongo exists and is open self.mongotimerbusy = False # true while monitor_mongo_status runs # We also manage self.app.mongodb, a MotorDatabase. This must be # non-None exactly when mongoavailable is true. def init_timers(self): ioloop = tornado.ioloop.IOLoop.instance() # The mongo status monitor. We set up one call immediately, and then # try again every three seconds. ioloop.add_callback(self.monitor_mongo_status) res = tornado.ioloop.PeriodicCallback(self.monitor_mongo_status, 3000) res.start() def close(self): """Close the connection to mongodb. (The monitor will start it right back up again, or try to.) """ if self.mongo: try: self.mongo.disconnect() except Exception as ex: self.log.error('Problem disconnecting mongo: %s', ex) self.mongo = None self.app.mongodb = None @tornado.gen.coroutine def monitor_mongo_status(self): if (self.mongotimerbusy): self.log.warning('monitor_mongo_status: already in flight; did a previous call jam?') return if (self.app.shuttingdown): self.log.warning('monitor_mongo_status: server is shutting down, never mind') return self.mongotimerbusy = True if (self.mongoavailable): try: res = yield motor.Op(self.mongo.admin.command, 'ping') if (not res): self.log.error('Mongo client not alive') self.mongoavailable = False except Exception as ex: self.log.error('Mongo client not alive: %s', ex) self.mongoavailable = False if (not self.mongoavailable): self.close() if (not self.mongoavailable): try: self.mongo = motor.MotorClient(tz_aware=True) res = yield motor.Op(self.mongo.open) ### maybe authenticate to a database? self.mongoavailable = True self.app.mongodb = self.mongo[self.app.opts.mongo_database] self.log.info('Mongo client open') self.app.queue_command({'cmd':'dbconnected'}) except Exception as ex: self.mongoavailable = False self.app.mongodb = None self.log.error('Mongo client not open: %s', ex) self.mongotimerbusy = False
35.534884
97
0.576571
2,943
0.963024
1,556
0.509162
1,583
0.517997
0
0
914
0.299084
b9530c0fbf29c36506820a41f0b32bd37796d3e0
1,298
py
Python
code/examples/example_binomial_and_log_normal_abtest.py
hugopibernat/BayesianABTestAnalysis
026960524f5313f4a734f30fd447a5731be802e0
[ "Apache-2.0" ]
null
null
null
code/examples/example_binomial_and_log_normal_abtest.py
hugopibernat/BayesianABTestAnalysis
026960524f5313f4a734f30fd447a5731be802e0
[ "Apache-2.0" ]
null
null
null
code/examples/example_binomial_and_log_normal_abtest.py
hugopibernat/BayesianABTestAnalysis
026960524f5313f4a734f30fd447a5731be802e0
[ "Apache-2.0" ]
null
null
null
################################################# ####### Author: Hugo Pibernat ####### ####### Contact: [email protected] ####### ####### Date: April 2014 ####### ################################################# from bayesianABTest import sampleSuccessRateForBinomial, sampleMeanForLogNormal, probabilityOfABetterThanB from numpy.random import lognormal from numpy import mean, concatenate, zeros # Generate Log-Normal data A_actuals = lognormal(mean=4.10, sigma=1.0, size=100) B_actuals = lognormal(mean=4.00, sigma=1.0, size=100) # Plus some zeros A_data = concatenate([A_actuals,zeros(10000)]) B_data = concatenate([B_actuals,zeros(10000)]) # Modeling conversions with a binomial variable A_purchases = sum(A_data > 0) A_sessions = len(A_data) B_purchases = sum(B_data > 0) B_sessions = len(B_data) A_CR = sampleSuccessRateForBinomial(A_sessions,A_purchases) B_CR = sampleSuccessRateForBinomial(B_sessions,B_purchases) # Modeling the spend with a log-normal A_non_zero_data = A_data[A_data > 0] B_non_zero_data = B_data[B_data > 0] A_spend = sampleMeanForLogNormal(A_non_zero_data) B_spend = sampleMeanForLogNormal(B_non_zero_data) # Combining the two A_rps = A_CR*A_spend B_rps = B_CR*B_spend # Result: print probabilityOfABetterThanB(A_rps,B_rps)
32.45
106
0.692604
0
0
0
0
0
0
0
0
401
0.308937
b95332c99e63e536863282307e578d423edf7664
644
py
Python
tests/models/test_documents.py
airslate-oss/python-airslate
0f7fe6321b1c2e6875a02dfecb5ffa07a361bb1d
[ "Apache-2.0" ]
3
2021-02-07T20:04:26.000Z
2021-09-22T08:32:26.000Z
tests/models/test_documents.py
airslate-oss/python-airslate
0f7fe6321b1c2e6875a02dfecb5ffa07a361bb1d
[ "Apache-2.0" ]
15
2021-01-21T15:38:37.000Z
2021-02-16T07:52:20.000Z
tests/models/test_documents.py
airslate-oss/python-airslate
0f7fe6321b1c2e6875a02dfecb5ffa07a361bb1d
[ "Apache-2.0" ]
null
null
null
# This file is part of the airslate. # # Copyright (c) 2021 airSlate, Inc. # # For the full copyright and license information, please view # the LICENSE file that was distributed with this source code. from airslate.models.documents import UpdateFields from airslate.entities.fields import Field def test_empty_update_fields__to_dict(): model = UpdateFields() assert model.to_dict() == {'data': []} def test_update_fields__to_dict(): model = UpdateFields(data=[Field('123'), Field('abc')]) assert model.to_dict() == {'data': [ {'id': '123', 'type': 'dictionary'}, {'id': 'abc', 'type': 'dictionary'} ]}
28
62
0.677019
0
0
0
0
0
0
0
0
272
0.42236
b953812809c13031070133a7849795278b13dea4
111
py
Python
sim/dynamicobject.py
rseed42/labyrinth
1cd4dc74c67b1b76972e1e048a7fce0c13955e7d
[ "Apache-2.0" ]
null
null
null
sim/dynamicobject.py
rseed42/labyrinth
1cd4dc74c67b1b76972e1e048a7fce0c13955e7d
[ "Apache-2.0" ]
null
null
null
sim/dynamicobject.py
rseed42/labyrinth
1cd4dc74c67b1b76972e1e048a7fce0c13955e7d
[ "Apache-2.0" ]
null
null
null
class DynamicObject(object): def __init__(self, name, id_): self.name = name self.id = id_
22.2
34
0.603604
110
0.990991
0
0
0
0
0
0
0
0
b95403252db42b0394653a122fd73b2b596e194d
400
py
Python
app/main.py
meysam81/sheypoor
aa67e20646ebc4143b83968f60c0b28c2ad340a1
[ "MIT" ]
null
null
null
app/main.py
meysam81/sheypoor
aa67e20646ebc4143b83968f60c0b28c2ad340a1
[ "MIT" ]
null
null
null
app/main.py
meysam81/sheypoor
aa67e20646ebc4143b83968f60c0b28c2ad340a1
[ "MIT" ]
null
null
null
from fastapi import FastAPI from fastapi.middleware.cors import CORSMiddleware from app import api from app.core.config import config app = FastAPI(title="Sheypoor") # Set all CORS enabled origins app.add_middleware( CORSMiddleware, allow_origins=["*"], allow_credentials=True, allow_methods=["*"], allow_headers=["*"], ) app.include_router(api.router, prefix=config.API_URI)
21.052632
53
0.7425
0
0
0
0
0
0
0
0
49
0.1225
b9556579b31dd7d2370d8083a431ada02beb471d
2,205
py
Python
cdnu/ccds.py
Indy2222/mbg-codon-usage
d415076a8150cd712010c0389c71ef22ba9ad850
[ "MIT" ]
null
null
null
cdnu/ccds.py
Indy2222/mbg-codon-usage
d415076a8150cd712010c0389c71ef22ba9ad850
[ "MIT" ]
null
null
null
cdnu/ccds.py
Indy2222/mbg-codon-usage
d415076a8150cd712010c0389c71ef22ba9ad850
[ "MIT" ]
null
null
null
from typing import List, NamedTuple CCDS_FILE = 'CCDS.current.txt' CHROMOSOMES = ('1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '20', '21', '22', 'X', 'Y') class CdsPos(NamedTuple): ccds_id: str indexes: list """2-tuples with start (inclusive) and stop indexes (exclusive) in reference genome. Whole CDS can be constructed as concatenation of the sub-sequences.""" molecule: str """Molecule name, see :const:`CHROMOSOMES`""" def load_ccds() -> List[CdsPos]: """Load file with CDS locations within GRCh38 genome as a list of :class:`CdsPos`.""" cds = [] with open(CCDS_FILE, encoding='utf-8', newline='\n') as fp: for line in fp: if not line: # Skip empty lines continue if line.startswith('#'): # Skip comments continue parts = line.split('\t') ccds_id = parts[4] status = parts[5] if 'Public' not in status: # CDS is not yet public continue if parts[6] == '-': # CDS strand negative order = reverse-complement continue locations_str = parts[9] if locations_str == '-': # CDS location unknown continue chromosome = parts[0] assert chromosome in CHROMOSOMES, chromosome locations = [] assert locations_str.startswith('[') assert locations_str.endswith(']') for location_str in locations_str[1:-1].split(','): start_str, stop_str = location_str.split('-') start, stop = int(start_str), int(stop_str) + 1 locations.append((start, stop)) if sum(b - a for a, b in locations) % 3 != 0: # Skip CDS which are not multiple of three in length. continue cds.append(CdsPos( ccds_id=ccds_id, molecule='chr' + chromosome, indexes=locations )) return cds
30.205479
77
0.502494
297
0.134694
0
0
0
0
0
0
628
0.284807
b9576be4fad430a84f92a2e3dc9d1b34f113118c
2,732
py
Python
test/test_resolve_errors.py
ITMO-NSS-team/GEFEST
72bb61cf3fbb9f87fe3dcd48b71f3e84dd23b669
[ "BSD-3-Clause" ]
12
2022-01-19T11:06:32.000Z
2022-02-21T14:59:23.000Z
test/test_resolve_errors.py
ITMO-NSS-team/GEFEST
72bb61cf3fbb9f87fe3dcd48b71f3e84dd23b669
[ "BSD-3-Clause" ]
9
2022-01-19T11:09:11.000Z
2022-03-29T13:36:41.000Z
test/test_resolve_errors.py
ITMO-NSS-team/GEFEST
72bb61cf3fbb9f87fe3dcd48b71f3e84dd23b669
[ "BSD-3-Clause" ]
2
2022-01-19T11:37:24.000Z
2022-03-24T19:35:33.000Z
import pytest from copy import deepcopy from gefest.core.structure.point import Point from gefest.core.structure.polygon import Polygon from gefest.core.structure.structure import Structure from gefest.core.algs.postproc.resolve_errors import * from gefest.core.algs.geom.validation import * # marking length and width for testing polygon poly_width = 10 poly_length = 20 # creating a testing polygons via corner points rectangle_points = [(-1, 40), (-1, poly_length+40), (-poly_width-10, poly_length+40), (-poly_width-10, 40)] out_bounds_rectangle_poly = Polygon('rectangle', points=[Point(*coords) for coords in rectangle_points]) triangle_points = [(1, 1), (poly_width, poly_length), (1, poly_length)] unclosed_triangle_poly = Polygon('triangle', points=[Point(*coords) for coords in triangle_points]) incorrect_points = [(5, 5), (5, poly_length), (8, poly_length), (5, 5), (5, 30)] incorrect_poly = Polygon('incorrect_poly', points=[Point(*coords) for coords in incorrect_points]) domain = Domain() def test_unclosed_poly(): input_structure = Structure([unclosed_triangle_poly]) observed_structure = postprocess(input_structure, domain) assert unclosed_poly(input_structure, domain) assert not unclosed_poly(observed_structure, domain) def test_self_intersection(): input_structure = Structure([incorrect_poly]) observed_structure = postprocess(input_structure, domain) assert self_intersection(input_structure) assert not self_intersection(observed_structure) def test_out_of_bound(): input_structure = Structure([out_bounds_rectangle_poly]) observed_structure = postprocess(input_structure, domain) assert out_of_bound(input_structure, domain) assert not out_of_bound(observed_structure, domain) def test_fixed_polys(): domain = Domain(fixed_points=[[[15, 30], [40, 30], [15, 40]]]) poly_like_fixed = Polygon('like_fixed', points=[Point(15, 30), Point(40, 30), Point(15, 40)]) input_structure = Structure([poly_like_fixed, unclosed_triangle_poly]) observed_structure = postprocess(input_structure, domain) assert all([np.isclose(len(observed_structure.polygons), 2), 'like_fixed' not in [poly.id for poly in observed_structure.polygons], 'fixed' in [poly.id for poly in observed_structure.polygons]]) def test_too_close(): same_poly = deepcopy(unclosed_triangle_poly) same_poly.id = 'same_triangle' input_structure = Structure([unclosed_triangle_poly, same_poly]) observed_structure = postprocess(input_structure, domain) print(observed_structure.polygons) assert np.isclose(len(observed_structure.polygons), 1)
37.424658
107
0.739019
0
0
0
0
0
0
0
0
176
0.064422
b959064c37513b8eabaf61132941fe714e3a8dbc
1,833
py
Python
tests/mocks.py
davla/i3-live-tree
8dc3917afdd09f53f7cf39653c2bf12cb0200983
[ "MIT" ]
1
2021-07-22T09:04:46.000Z
2021-07-22T09:04:46.000Z
tests/mocks.py
davla/i3-live-tree
8dc3917afdd09f53f7cf39653c2bf12cb0200983
[ "MIT" ]
null
null
null
tests/mocks.py
davla/i3-live-tree
8dc3917afdd09f53f7cf39653c2bf12cb0200983
[ "MIT" ]
null
null
null
from unittest.mock import MagicMock, Mock from i3ipc.aio import Con import i3_live_tree.tree_serializer # noqa: F401 class MockConSerializer(Mock, Con): """Mock a generic i3ipc.aio.Con for serialization purposes This Mock is meant to ease testing of i3ipc.aio.Con serialization methods, which are mokey patched in i3_live_tree.tree_serializer. In order to achieve this, the mock inherits all the method implementations of i3ipc.aio.Con, most importantly the serialization ones. However, whatever is needed for serialization, both properties and methods, is mocked and can be injected in the constructor, in order to ease the creation of mock instances. """ def __init__(self, *args, name=None, layout=None, focused=False, nodes=iter(()), **kwargs): Mock.__init__(self, *args, **kwargs) self.focused = focused self.layout = layout self.name = name self.nodes = nodes class MockConNavigation(MagicMock): """Mock an i3ipc.aio.Con for navigation purposes This Mock is meant to be used when testing i3ipc event handlers. It mocks all the necessary methods and properties, by returning `self` when an i3ipc.aio.Con instance is needed for the sake of simplicity. """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) def find_focused(self): """Return the focused window""" return self def workspace(self): """Return the containing workspace""" return self class MockI3(Mock): """Mock an i3ipc.aio.Connection""" def __init__(self, *args, tree, **kwargs): super().__init__(*args, **kwargs) self.tree = tree async def get_tree(self): """Return the i3 tree asynchronously""" return self.tree
30.55
78
0.67485
1,704
0.929624
0
0
0
0
98
0.053464
963
0.525368
b95a54ae27c88b1a727a1742ed1880093d3693e0
971
py
Python
hvac/api/secrets_engines/gcp.py
nested-tech/hvac
2a58ac9850b882e43c1617ae6b0ea93104c99794
[ "Apache-2.0" ]
null
null
null
hvac/api/secrets_engines/gcp.py
nested-tech/hvac
2a58ac9850b882e43c1617ae6b0ea93104c99794
[ "Apache-2.0" ]
null
null
null
hvac/api/secrets_engines/gcp.py
nested-tech/hvac
2a58ac9850b882e43c1617ae6b0ea93104c99794
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python # -*- coding: utf-8 -*- """Gcp methods module.""" from hvac import exceptions from hvac.api.vault_api_base import VaultApiBase from hvac.constants.gcp import DEFAULT_MOUNT_POINT, ALLOWED_CREDS_ENDPOINTS class Gcp(VaultApiBase): def generate_credentials(self, roleset, endpoint='key', mount_point=DEFAULT_MOUNT_POINT): if endpoint not in ALLOWED_CREDS_ENDPOINTS: error_msg = 'invalid endpoint argument provided "{arg}", supported types: "{allowed_endpoints}"' raise exceptions.ParamValidationError(error_msg.format( arg=endpoint, allowed_endpoints=', '.join(ALLOWED_CREDS_ENDPOINTS), )) api_path = '/v1/{mount_point}/{endpoint}/{roleset}'.format( mount_point=mount_point, endpoint=endpoint, roleset=roleset, ) response = self._adapter.get( url=api_path ) return response.json()
34.678571
108
0.652935
743
0.765191
0
0
0
0
0
0
202
0.208033
b95b84a26deaf7cd8b371b13b34ee9e7005ee7c0
9,155
py
Python
ypricemagic/uniswap.py
poolpitako/ypricemagic
882aa2071a918937e77e0b85e5f52191a4714d28
[ "MIT" ]
null
null
null
ypricemagic/uniswap.py
poolpitako/ypricemagic
882aa2071a918937e77e0b85e5f52191a4714d28
[ "MIT" ]
null
null
null
ypricemagic/uniswap.py
poolpitako/ypricemagic
882aa2071a918937e77e0b85e5f52191a4714d28
[ "MIT" ]
null
null
null
import token from tokenize import tokenize from brownie import Contract, chain from brownie.exceptions import ContractNotFound from cachetools.func import ttl_cache from .utils.cache import memory from .utils.multicall2 import fetch_multicall from .interfaces.ERC20 import ERC20ABI import ypricemagic.magic import ypricemagic.utils.utils from .constants import STABLECOINS, dai, usdc, usdt, wbtc, weth, sushi # NOTE: If this is failing to pull a price for a token you need, it's likely because that token requires a special swap path. # Please add a viable swap path below to fetch price data successfully. #project.load() if chain.id == 1: FACTORIES = { "uniswap": "0x5C69bEe701ef814a2B6a3EDD4B1652CB9cc5aA6f", "sushiswap": "0xC0AEe478e3658e2610c5F7A4A2E1777cE9e4f2Ac", } ROUTERS = { "uniswap": Contract("0x7a250d5630B4cF539739dF2C5dAcb4c659F2488D"), "sushiswap": Contract("0xD9E1CE17F2641F24AE83637AB66A2CCA9C378B9F"), } SPECIAL_PATHS = { "sushiswap": { "0xEF69B5697f2Fb0345cC680210fD39b593a2f9684": ["0xEF69B5697f2Fb0345cC680210fD39b593a2f9684","0x6B3595068778DD592e39A122f4f5a5cF09C90fE2",weth,usdc] ,"0xbf2179859fc6D5BEE9Bf9158632Dc51678a4100e": ["0xbf2179859fc6D5BEE9Bf9158632Dc51678a4100e","0xC28E27870558cF22ADD83540d2126da2e4b464c2",weth,usdc] ,"0x3166C570935a7D8554c8f4eA792ff965D2EFe1f2": ["0x3166C570935a7D8554c8f4eA792ff965D2EFe1f2","0x4954Db6391F4feB5468b6B943D4935353596aEC9",usdc] ,"0xE6279E1c65DD41b30bA3760DCaC3CD8bbb4420D6": ["0xE6279E1c65DD41b30bA3760DCaC3CD8bbb4420D6","0x87F5F9eBE40786D49D35E1B5997b07cCAA8ADbFF",weth,usdc] ,"0x4954Db6391F4feB5468b6B943D4935353596aEC9": ["0x4954Db6391F4feB5468b6B943D4935353596aEC9",usdc] ,"0x1E18821E69B9FAA8e6e75DFFe54E7E25754beDa0": ["0x1E18821E69B9FAA8e6e75DFFe54E7E25754beDa0","0xEF69B5697f2Fb0345cC680210fD39b593a2f9684","0x6B3595068778DD592e39A122f4f5a5cF09C90fE2",weth,usdc] ,"0xfC1E690f61EFd961294b3e1Ce3313fBD8aa4f85d": ["0xfC1E690f61EFd961294b3e1Ce3313fBD8aa4f85d","0xba100000625a3754423978a60c9317c58a424e3D",weth,usdc] ,"0xBA50933C268F567BDC86E1aC131BE072C6B0b71a": ["0xBA50933C268F567BDC86E1aC131BE072C6B0b71a",weth,usdc] ,"0x6102407f07029892eB5Ff02164ADFaFb85f4d222": ["0x6102407f07029892eB5Ff02164ADFaFb85f4d222",usdt] ,"0x85034b3b2e292493D029443455Cc62ab669573B3": ["0x85034b3b2e292493D029443455Cc62ab669573B3","0x1f9840a85d5aF5bf1D1762F925BDADdC4201F984",weth,usdc] ,"0xb220D53F7D0f52897Bcf25E47c4c3DC0bac344F8": ["0xb220D53F7D0f52897Bcf25E47c4c3DC0bac344F8", usdc] ,"0x383518188C0C6d7730D91b2c03a03C837814a899": ["0x383518188C0C6d7730D91b2c03a03C837814a899",dai] ,"0xafcE9B78D409bF74980CACF610AFB851BF02F257": ["0xafcE9B78D409bF74980CACF610AFB851BF02F257",wbtc,weth,usdc] }, "uniswap": { } } elif chain.id == 56: ROUTERS = { "pancakeswapv2": Contract("0x10ED43C718714eb63d5aA57B78B54704E256024E"), "pancakeswapv1": Contract("0x05fF2B0DB69458A0750badebc4f9e13aDd608C7F") } FACTORIES = { "pancakeswapv2": "0xcA143Ce32Fe78f1f7019d7d551a6402fC5350c73", "pancakeswapv1": "0xBCfCcbde45cE874adCB698cC183deBcF17952812" } SPECIAL_PATHS = { "pancakeswapv2": { }, "pancakeswapv1": { } } elif chain.id == 137: ROUTERS = { "quickswap": Contract("0xa5E0829CaCEd8fFDD4De3c43696c57F7D7A678ff") } FACTORIES = { "quickswap": "0x5757371414417b8C6CAad45bAeF941aBc7d3Ab32", } SPECIAL_PATHS = { "quickswap": { } } FACTORY_TO_ROUTER = {FACTORIES[name]: ROUTERS[name] for name in FACTORIES} FACTORY_TO_PROTOCOL = {FACTORIES[name]: name for name in FACTORIES} @ttl_cache(ttl=36000) def get_price(token_in, token_out=usdc, router="uniswap", block=None, paired_against=weth): """ Calculate a price based on Uniswap Router quote for selling one `token_in`. Always uses intermediate WETH pair if `[token_in,weth,token_out]` swap path available. """ if chain.id == 56 and token_out == usdc: busd = Contract("0xe9e7CEA3DedcA5984780Bafc599bD69ADd087D56") token_out = busd tokens = [str(token) for token in [token_in, token_out]] amount_in = 10 ** ypricemagic.utils.utils.get_decimals_with_override(tokens[0]) if str(token_in) in STABLECOINS: return 1 elif str(paired_against) in STABLECOINS and str(token_out) in STABLECOINS: path = [token_in, paired_against] elif weth in (token_in, token_out): path = [token_in, token_out] elif paired_against == sushi and token_out != sushi: path = [token_in,sushi,weth,token_out] elif str(token_in) in SPECIAL_PATHS[router].keys() and str(token_out) in STABLECOINS: path = SPECIAL_PATHS[router][str(token_in)] elif chain.id == 56: #bsc from .constants import cake, wbnb if wbnb in (token_in, token_out): path = [token_in, token_out] elif cake in (token_in, token_out): path = [token_in, token_out] else: path = [token_in,wbnb,token_out] elif chain.id == 137: #bsc from .constants import wmatic if wmatic in (token_in, token_out): path = [token_in, token_out] else: path = [token_in,wmatic,token_out] else: path = [token_in, weth, token_out] fees = 0.997 ** (len(path) - 1) if router in ROUTERS: router = ROUTERS[router] try: quote = router.getAmountsOut(amount_in, path, block_identifier=block) amount_out = quote[-1] / 10 ** ypricemagic.utils.utils.get_decimals_with_override(str(path[-1])) return amount_out / fees except ValueError as e: return @ttl_cache(ttl=600) def get_price_v1(asset, block=None): factory = Contract("0xc0a47dFe034B400B47bDaD5FecDa2621de6c4d95") try: exchange = Contract(factory.getExchange(asset)) eth_bought = exchange.getTokenToEthInputPrice(10 ** ypricemagic.utils.utils.get_decimals_with_override(asset), block_identifier=block) exchange = Contract(factory.getExchange(usdc)) usdc_bought = exchange.getEthToTokenInputPrice(eth_bought, block_identifier=block) / 1e6 fees = 0.997 ** 2 return usdc_bought / fees except (ContractNotFound, ValueError) as e: pass @memory.cache() def is_uniswap_pool(address): try: return Contract(address).factory() in FACTORY_TO_ROUTER except (ValueError, OverflowError, AttributeError): pass return False @ttl_cache(ttl=600) def lp_price(address, block=None): """ Get Uniswap/Sushiswap LP token price. """ def extrapolate_balance_if_needed(): nonlocal balances if balances[0] and not balances[1]: balances[1] = balances[0] if balances[1] and not balances[0]: balances[0] = balances[1] return balances pair = Contract(address) if chain.id not in [56, 137]: # No multicall2 on bsc or poly factory, token0, token1, supply, reserves = fetch_multicall( [pair, "factory"], [pair, "token0"], [pair, "token1"], [pair, "totalSupply"], [pair, "getReserves"], block=block ) else: factory = pair.factory(block_identifier = block) token0 = pair.token0(block_identifier = block) token1 = pair.token1(block_identifier = block) supply = pair.totalSupply(block_identifier = block) reserves = pair.getReserves(block_identifier = block) router = FACTORY_TO_PROTOCOL[factory] tokens = [ypricemagic.utils.utils.Contract_with_erc20_fallback(token) for token in [token0, token1]] price0 = get_price(tokens[0], paired_against=tokens[1], router=router, block=block) price1 = get_price(tokens[1], paired_against=tokens[0], router=router, block=block) prices = [price0,price1] scales = [10 ** ypricemagic.utils.utils.get_decimals_with_override(str(token)) for token in tokens] supply = supply / 1e18 try: balances = [res / scale * price for res, scale, price in zip(reserves, scales, prices)] except TypeError as e: # If can't get price via router, try to get from elsewhere if not price0: try: price0 = ypricemagic.magic.get_price(tokens[0], block) except ypricemagic.magic.PriceError: price0 is None if not price1: try: price1 = ypricemagic.magic.get_price(tokens[1], block) except ypricemagic.magic.PriceError: price1 is None prices = [price0,price1] balances = [None,None] # [res / scale * price for res, scale, price in zip(reserves, scales, prices)] if price0: balances[0] = reserves[0] / scales[0] * price0 if price1: balances[1] = reserves[1] / scales[1] * price1 balances = extrapolate_balance_if_needed() try: return sum(balances) / supply except TypeError: return
43.388626
205
0.68935
0
0
0
0
5,327
0.581868
0
0
2,885
0.315128
b95bf173c71497f893fb19ff1c8e2576967d5c36
611
py
Python
configs/configuration_textrnn.py
haodingkui/semeval2020-task5-subtask1
bfd0c808c6b1de910d6f58ea040a13442b4bcdca
[ "MIT" ]
2
2020-08-19T12:32:21.000Z
2021-11-08T15:50:08.000Z
configs/configuration_textrnn.py
haodingkui/semeval2020-task5-subtask1
bfd0c808c6b1de910d6f58ea040a13442b4bcdca
[ "MIT" ]
null
null
null
configs/configuration_textrnn.py
haodingkui/semeval2020-task5-subtask1
bfd0c808c6b1de910d6f58ea040a13442b4bcdca
[ "MIT" ]
1
2020-08-19T12:32:48.000Z
2020-08-19T12:32:48.000Z
""" TextRNN model configuration """ class TextRNNConfig(object): def __init__( self, vocab_size=30000, pretrained_embedding=None, embedding_matrix=None, embedding_dim=300, embedding_dropout=0.3, lstm_hidden_size=128, output_dim=1, **kwargs ): self.pretrained_embedding = pretrained_embedding self.embedding_matrix = embedding_matrix self.embedding_dim = embedding_dim self.embedding_dropout = embedding_dropout self.lstm_hidden_size = lstm_hidden_size self.output_dim = output_dim
27.772727
56
0.657938
573
0.937807
0
0
0
0
0
0
35
0.057283
b95cfef2234f9a61adbaa0afe2564f0d012dea38
38
py
Python
settings/debug_members.py
akorzunin/telegram_auction_bot
d4d5042614ea11f8085815d8f9fb8b6fbebcfab0
[ "Apache-2.0" ]
null
null
null
settings/debug_members.py
akorzunin/telegram_auction_bot
d4d5042614ea11f8085815d8f9fb8b6fbebcfab0
[ "Apache-2.0" ]
null
null
null
settings/debug_members.py
akorzunin/telegram_auction_bot
d4d5042614ea11f8085815d8f9fb8b6fbebcfab0
[ "Apache-2.0" ]
null
null
null
DEBUG_MEMBER_LIST = [ 503131177, ]
12.666667
21
0.684211
0
0
0
0
0
0
0
0
0
0
b95d4a692bcb2991f35a3f589cde3570c4033e09
29,218
py
Python
metrics/pointops/pointops_util.py
JiazeWang/SP-GAN
455003f78b1160ebe0a2056005b069808c0df35b
[ "MIT" ]
73
2021-05-11T12:00:29.000Z
2022-03-31T09:40:12.000Z
metrics/pointops/pointops_util.py
JiazeWang/SP-GAN
455003f78b1160ebe0a2056005b069808c0df35b
[ "MIT" ]
6
2021-08-18T13:03:43.000Z
2022-03-30T04:48:29.000Z
metrics/pointops/pointops_util.py
JiazeWang/SP-GAN
455003f78b1160ebe0a2056005b069808c0df35b
[ "MIT" ]
13
2021-08-28T20:09:13.000Z
2022-03-20T12:42:51.000Z
from typing import Tuple import torch from torch.autograd import Function import torch.nn as nn from metrics.pointops import pointops_cuda import numpy as np class FurthestSampling(Function): @staticmethod def forward(ctx, xyz, m): """ input: xyz: (b, n, 3) and n > m, m: int32 output: idx: (b, m) """ assert xyz.is_contiguous() b, n, _ = xyz.size() idx = torch.cuda.IntTensor(b, m) temp = torch.cuda.FloatTensor(b, n).fill_(1e10) pointops_cuda.furthestsampling_cuda(b, n, m, xyz, temp, idx) return idx @staticmethod def backward(xyz, a=None): return None, None furthestsampling = FurthestSampling.apply class Gathering(Function): @staticmethod def forward(ctx, features, idx): """ input: features: (b, c, n), idx : (b, m) tensor output: (b, c, m) """ assert features.is_contiguous() assert idx.is_contiguous() b, c, n = features.size() m = idx.size(1) output = torch.cuda.FloatTensor(b, c, m) pointops_cuda.gathering_forward_cuda(b, c, n, m, features, idx, output) ctx.for_backwards = (idx, c, n) return output @staticmethod def backward(ctx, grad_out): idx, c, n = ctx.for_backwards b, m = idx.size() grad_features = torch.cuda.FloatTensor(b, c, n).zero_() grad_out_data = grad_out.data.contiguous() pointops_cuda.gathering_backward_cuda(b, c, n, m, grad_out_data, idx, grad_features.data) return grad_features, None gathering = Gathering.apply class NearestNeighbor(Function): @staticmethod def forward(ctx, unknown: torch.Tensor, known: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: """ Find the three nearest neighbors of unknown in known input: unknown: (b, n, 3), known: (b, m, 3) output: dist2: (b, n, 3) l2 distance to the three nearest neighbors idx: (b, n, 3) index of 3 nearest neighbors """ assert unknown.is_contiguous() assert known.is_contiguous() b, n, _ = unknown.size() m = known.size(1) dist2 = torch.cuda.FloatTensor(b, n, 3) idx = torch.cuda.IntTensor(b, n, 3) pointops_cuda.nearestneighbor_cuda(b, n, m, unknown, known, dist2, idx) return torch.sqrt(dist2), idx @staticmethod def backward(ctx, a=None, b=None): return None, None nearestneighbor = NearestNeighbor.apply class Interpolation(Function): @staticmethod def forward(ctx, features: torch.Tensor, idx: torch.Tensor, weight: torch.Tensor) -> torch.Tensor: """ Performs weight linear interpolation on 3 features input: features: (b, c, m) features descriptors to be interpolated from idx: (b, n, 3) three nearest neighbors of the target features in features weight: (b, n, 3) weights output: (b, c, n) tensor of the interpolated features """ assert features.is_contiguous() assert idx.is_contiguous() assert weight.is_contiguous() b, c, m = features.size() n = idx.size(1) ctx.interpolation_for_backward = (idx, weight, m) output = torch.cuda.FloatTensor(b, c, n) pointops_cuda.interpolation_forward_cuda(b, c, m, n, features, idx, weight, output) return output @staticmethod def backward(ctx, grad_out: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """ input: grad_out: (b, c, n) output: grad_features: (b, c, m), None, None """ idx, weight, m = ctx.interpolation_for_backward b, c, n = grad_out.size() grad_features = torch.cuda.FloatTensor(b, c, m).zero_() grad_out_data = grad_out.data.contiguous() pointops_cuda.interpolation_backward_cuda(b, c, n, m, grad_out_data, idx, weight, grad_features.data) return grad_features, None, None interpolation = Interpolation.apply class Grouping(Function): @staticmethod def forward(ctx, features: torch.Tensor, idx: torch.Tensor) -> torch.Tensor: """ input: features: (b, c, n), idx : (b, m, nsample) containing the indicies of features to group with output: (b, c, m, nsample) """ assert features.is_contiguous() assert idx.is_contiguous() b, c, n = features.size() _, m, nsample = idx.size() output = torch.cuda.FloatTensor(b, c, m, nsample) pointops_cuda.grouping_forward_cuda(b, c, n, m, nsample, features, idx, output) ctx.for_backwards = (idx, n) return output @staticmethod def backward(ctx, grad_out: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: """ input: grad_out: (b, c, m, nsample) output: (b, c, n), None """ idx, n = ctx.for_backwards b, c, m, nsample = grad_out.size() grad_features = torch.cuda.FloatTensor(b, c, n).zero_() grad_out_data = grad_out.data.contiguous() pointops_cuda.grouping_backward_cuda(b, c, n, m, nsample, grad_out_data, idx, grad_features.data) return grad_features, None grouping = Grouping.apply class GroupingInt(Function): @staticmethod def forward(ctx, features: torch.Tensor, idx: torch.Tensor) -> torch.Tensor: """ input: features: (b, c, n), idx : (b, m, nsample) containing the indicies of features to group with output: (b, c, m, nsample) """ assert features.is_contiguous() assert idx.is_contiguous() b, c, n = features.size() _, m, nsample = idx.size() output = torch.cuda.LongTensor(b, c, m, nsample) pointops_cuda.grouping_int_forward_cuda(b, c, n, m, nsample, features, idx, output) return output @staticmethod def backward(ctx, a=None): return None, None grouping_int = GroupingInt.apply class BallQuery(Function): @staticmethod def forward(ctx, radius: float, nsample: int, xyz: torch.Tensor, new_xyz: torch.Tensor) -> torch.Tensor: """ input: radius: float, radius of the balls nsample: int, maximum number of features in the balls xyz: torch.Tensor, (b, n, 3) xyz coordinates of the features new_xyz: torch.Tensor, (b, m, 3) centers of the ball query output: (b, m, nsample) tensor with the indicies of the features that form the query balls """ assert xyz.is_contiguous() assert new_xyz.is_contiguous() b, n, _ = xyz.size() m = new_xyz.size(1) idx = torch.cuda.IntTensor(b, m, nsample).zero_() pointops_cuda.ballquery_cuda(b, n, m, radius, nsample, new_xyz, xyz, idx) return idx @staticmethod def backward(ctx, a=None): return None, None, None, None ballquery = BallQuery.apply class FeatureDistribute(Function): @staticmethod def forward(ctx, max_xyz: torch.Tensor, xyz: torch.Tensor) -> torch.Tensor: """ :param ctx: :param max_xyz: (b, n, 3) :param xyz: (b, m, 3) :return: distribute_idx: (b, m) """ assert max_xyz.is_contiguous() assert xyz.is_contiguous() b, n, _ = max_xyz.size() m = xyz.size(1) distribute_idx = torch.cuda.IntTensor(b, m).zero_() pointops_cuda.featuredistribute_cuda(b, n, m, max_xyz, xyz, distribute_idx) return distribute_idx @staticmethod def backward(ctx, a=None): return None, None featuredistribute = FeatureDistribute.apply class FeatureGather(Function): @staticmethod def forward(ctx, max_feature: torch.Tensor, distribute_idx: torch.Tensor) -> torch.Tensor: ''' :param ctx: :param max_feature: (b, c, n) :param distribute_idx: (b, m) :return: distribute_feature: (b, c, m) ''' assert max_feature.is_contiguous() assert distribute_idx.is_contiguous() b, c, n = max_feature.size() m = distribute_idx.size(1) distribute_feature = torch.cuda.FloatTensor(b, c, m).zero_() pointops_cuda.featuregather_forward_cuda(b, n, m, c, max_feature, distribute_idx, distribute_feature) ctx.for_backwards = (distribute_idx, n) return distribute_feature @staticmethod def backward(ctx, grad_distribute_feature: torch.Tensor): ''' :param ctx: :param grad_distribute_feature: (b, c, m) :return: grad_max_feature: (b, c, n), None ''' distribute_idx, n = ctx.for_backwards b, c, m = grad_distribute_feature.size() grad_max_feature = torch.cuda.FloatTensor(b, c, n).zero_() grad_distribute_feature_data = grad_distribute_feature.data.contiguous() pointops_cuda.featuregather_backward_cuda(b, n, m, c, grad_distribute_feature_data, distribute_idx, grad_max_feature.data) return grad_max_feature, None featuregather = FeatureGather.apply class LabelStatBallRange(Function): @staticmethod def forward(ctx, radius: float, xyz: torch.Tensor, new_xyz: torch.Tensor, label_stat: torch.Tensor) -> torch.Tensor: ''' :param ctx: :param radius: :param xyz: (b, n, 3) :param new_xyz: (b, m, 3) :param label_stat: (b, n, nclass) :return: new_label_stat: (b, m, nclass) ''' assert xyz.is_contiguous() assert new_xyz.is_contiguous() assert label_stat.is_contiguous() b, n, nclass = label_stat.size() m = new_xyz.size(1) new_label_stat = torch.cuda.IntTensor(b, m, nclass).zero_() pointops_cuda.labelstat_ballrange_cuda(b, n, m, radius, nclass, new_xyz, xyz, label_stat, new_label_stat) return new_label_stat @staticmethod def backward(ctx, a=None): return None, None, None, None labelstat_ballrange = LabelStatBallRange.apply class LabelStatIdx(Function): @staticmethod def forward(ctx, nsample: int, label_stat: torch.Tensor, idx: torch.Tensor) -> torch.Tensor: ''' :param ctx: :param nsample: :param label_stat: (b, n, nclass) :param idx: (b, m, nsample) :return: new_label_stat: (b, m, nclass) ''' assert label_stat.is_contiguous() assert idx.is_contiguous() b, n, nclass = label_stat.size() m = idx.size(1) new_label_stat = torch.cuda.IntTensor(b, m, nclass).zero_() pointops_cuda.labelstat_idx_cuda(b, n, m, nsample, nclass, label_stat, idx, new_label_stat) return new_label_stat @staticmethod def backward(ctx, a=None): return None, None, None labelstat_idx = LabelStatIdx.apply class LabelStatAndBallQuery(Function): @staticmethod def forward(ctx, radius: float, nsample: int, xyz: torch.Tensor, new_xyz: torch.Tensor, label_stat: torch.Tensor): ''' :param ctx: :param radius: :param nsample: :param xyz: (b, n, 3) :param new_xyz: (b, m, 3) :param label_stat: (b, n, nclass) :return: new_label_stat: (b, m, nclass) idx: (b, m, nsample) ''' assert xyz.is_contiguous() assert new_xyz.is_contiguous() assert label_stat.is_contiguous() b, n, nclass = label_stat.size() m = new_xyz.size(1) new_label_stat = torch.cuda.IntTensor(b, m, nclass).zero_() idx = torch.cuda.IntTensor(b, m, nsample).zero_() pointops_cuda.labelstat_and_ballquery_cuda(b, n, m, radius, nsample, nclass, new_xyz, xyz, label_stat, idx, new_label_stat) return new_label_stat, idx @staticmethod def backward(ctx, a=None, b=None): return None, None, None, None, None labelstat_and_ballquery = LabelStatAndBallQuery.apply def pairwise_distances(x, y=None): ''' Input: x is a Nxd matrix y is an optional Mxd matirx Output: dist is a NxM matrix where dist[i,j] is the square norm between x[i,:] and y[j,:] if y is not given then use 'y=x'. i.e. dist[i,j] = ||x[i,:]-y[j,:]||^2 ''' x_norm = (x ** 2).sum(1).view(-1, 1) if y is not None: y_t = torch.transpose(y, 0, 1) y_norm = (y ** 2).sum(1).view(1, -1) else: y_t = torch.transpose(x, 0, 1) y_norm = x_norm.view(1, -1) dist = x_norm + y_norm - 2.0 * torch.mm(x, y_t) import numpy as np return torch.clamp(dist, 0.0, np.inf) class KNNQueryNaive(Function): @staticmethod def forward(ctx, nsample: int, xyz: torch.Tensor, new_xyz: torch.Tensor = None) -> Tuple[torch.Tensor]: """ KNN Indexing input: nsample: int32, Number of neighbor xyz: (b, n, 3) coordinates of the features new_xyz: (b, m, 3) centriods output: idx: (b, m, nsample) """ if new_xyz is None: new_xyz = xyz b, m, _ = new_xyz.size() n = xyz.size(1) ''' idx = torch.zeros(b, m, nsample).int().cuda() for i in range(b): dist = pairwise_distances(new_xyz[i, :, :], xyz[i, :, :]) [_, idxs] = torch.sort(dist, dim=1) idx[i, :, :] = idxs[:, 0:nsample] ''' # ''' # new_xyz_repeat = new_xyz.repeat(1, 1, n).view(b, m * n, 3) # xyz_repeat = xyz.repeat(1, m, 1).view(b, m * n, 3) # dist = (new_xyz_repeat - xyz_repeat).pow(2).sum(dim=2).view(b, m, n) dist = (new_xyz.repeat(1, 1, n).view(b, m * n, 3) - xyz.repeat(1, m, 1).view(b, m * n, 3)).pow(2).sum(dim=2).view(b, m, n) [_, idxs] = torch.sort(dist, dim=2) idx = idxs[:, :, 0:nsample].int() # ''' return idx @staticmethod def backward(ctx): return None, None, None knnquery_naive = KNNQueryNaive.apply class KNNQuery(Function): @staticmethod def forward(ctx, nsample: int, xyz: torch.Tensor, new_xyz: torch.Tensor = None) -> Tuple[torch.Tensor]: """ KNN Indexing input: nsample: int32, Number of neighbor xyz: (b, n, 3) coordinates of the features new_xyz: (b, m, 3) centriods output: idx: (b, m, nsample) ( dist2: (b, m, nsample) ) """ if new_xyz is None: new_xyz = xyz assert xyz.is_contiguous() assert new_xyz.is_contiguous() b, m, _ = new_xyz.size() n = xyz.size(1) idx = torch.cuda.IntTensor(b, m, nsample).zero_() dist2 = torch.cuda.FloatTensor(b, m, nsample).zero_() pointops_cuda.knnquery_cuda(b, n, m, nsample, xyz, new_xyz, idx, dist2) return idx @staticmethod def backward(ctx, a=None): return None, None, None knnquery = KNNQuery.apply class KNNQueryExclude(Function): @staticmethod def forward(ctx, nsample: int, xyz: torch.Tensor, new_xyz: torch.Tensor = None) -> Tuple[torch.Tensor]: """ KNN Indexing input: nsample: int32, Number of neighbor xyz: (b, n, 3) coordinates of the features new_xyz: (b, m, 3) centriods output: new_features: (b, m, nsample) """ if new_xyz is None: new_xyz = xyz b, m, _ = new_xyz.size() n = xyz.size(1) ''' idx = torch.zeros(b, m, nsample).int().cuda() for i in range(b): dist = pairwise_distances(new_xyz[i, :, :], xyz[i, :, :]) [_, idxs] = torch.sort(dist, dim=1) idx[i, :, :] = idxs[:, 0:nsample] ''' # ''' # new_xyz_repeat = new_xyz.repeat(1, 1, n).view(b, m * n, 3) # xyz_repeat = xyz.repeat(1, m, 1).view(b, m * n, 3) # dist = (new_xyz_repeat - xyz_repeat).pow(2).sum(dim=2).view(b, m, n) dist = (new_xyz.repeat(1, 1, n).view(b, m * n, 3) - xyz.repeat(1, m, 1).view(b, m * n, 3)).pow(2).sum(dim=2).view(b, m, n) [_, idxs] = torch.sort(dist, dim=2) idx = idxs[:, :, 1:nsample+1].int() # ''' return idx @staticmethod def backward(ctx): return None, None, None knnquery_exclude = KNNQueryExclude.apply class Le_QueryAndGroup_SameSize(nn.Module): """ Groups with a ball query of radius parameters: radius: float32, Radius of ball nsample: int32, Maximum number of features to gather in the ball """ def __init__(self, radius=None, nsample=32, use_xyz=True): super(Le_QueryAndGroup_SameSize, self).__init__() self.radius, self.nsample, self.use_xyz = radius, nsample, use_xyz def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor = None, features: torch.Tensor = None, idx: torch.Tensor = None) -> torch.Tensor: """ input: xyz: (b, n, 3) coordinates of the features new_xyz: (b, n, 3) centriods features: (b, c, n) idx: idx of neighbors # idxs: (b, n) output: new_features: (b, c+3, m, nsample) # grouped_idxs: (b, m, nsample) """ assert xyz.size() == new_xyz.size() if new_xyz is None: new_xyz = xyz if idx is None: if self.radius is not None: idx = ballquery(self.radius, self.nsample, xyz, new_xyz) else: # idx = knnquery_naive(self.nsample, xyz, new_xyz) # (b, m, nsample) idx = knnquery(self.nsample, xyz, new_xyz) # (b, m, nsample) xyz_trans = xyz.transpose(1, 2).contiguous() grouped_xyz = grouping(xyz_trans, idx) # (b, 3, m, nsample) # grouped_idxs = grouping(idxs.unsqueeze(1).float(), idx).squeeze(1).int() # (b, m, nsample) grouped_xyz -= new_xyz.transpose(1, 2).unsqueeze(-1) if features is not None: grouped_features = grouping(features, idx) # (b, c, m, nsample) if self.use_xyz: #new_features = torch.cat([grouped_xyz, grouped_features], dim=1) # (b, c+3, m, nsample) # le new_features = grouped_features # (b, c, m, nsample) else: new_features = grouped_features else: assert self.use_xyz, "Cannot have not features and not use xyz as a feature!" new_features = grouped_xyz return grouped_xyz, new_features class QueryAndGroup(nn.Module): """ Groups with a ball query of radius parameters: radius: float32, Radius of ball nsample: int32, Maximum number of features to gather in the ball """ def __init__(self, radius=None, nsample=32, use_xyz=True): super(QueryAndGroup, self).__init__() self.radius, self.nsample, self.use_xyz = radius, nsample, use_xyz def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor = None, features: torch.Tensor = None, idx: torch.Tensor = None) -> torch.Tensor: """ input: xyz: (b, n, 3) coordinates of the features new_xyz: (b, m, 3) centriods features: (b, c, n) idx: idx of neighbors # idxs: (b, n) output: new_features: (b, c+3, m, nsample) # grouped_idxs: (b, m, nsample) """ if new_xyz is None: new_xyz = xyz if idx is None: if self.radius is not None: idx = ballquery(self.radius, self.nsample, xyz, new_xyz) else: # idx = knnquery_naive(self.nsample, xyz, new_xyz) # (b, m, nsample) idx = knnquery(self.nsample, xyz, new_xyz) # (b, m, nsample) xyz_trans = xyz.transpose(1, 2).contiguous() grouped_xyz = grouping(xyz_trans, idx) # (b, 3, m, nsample) # grouped_idxs = grouping(idxs.unsqueeze(1).float(), idx).squeeze(1).int() # (b, m, nsample) grouped_xyz -= new_xyz.transpose(1, 2).unsqueeze(-1) if features is not None: grouped_features = grouping(features, idx) if self.use_xyz: new_features = torch.cat([grouped_xyz, grouped_features], dim=1) # (b, c+3, m, nsample) else: new_features = grouped_features else: assert self.use_xyz, "Cannot have not features and not use xyz as a feature!" new_features = grouped_xyz return new_features class QueryAndGroup_Dilate(nn.Module): """ Groups with a ball query of radius parameters: radius: float32, Radius of ball nsample: int32, Maximum number of features to gather in the ball """ def __init__(self, radius=None, nsample=32, use_xyz=True): super(QueryAndGroup_Dilate, self).__init__() self.radius, self.nsample, self.use_xyz = radius, nsample, use_xyz def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor = None, features: torch.Tensor = None, idx: torch.Tensor = None) -> torch.Tensor: """ input: xyz: (b, n, 3) coordinates of the features new_xyz: (b, m, 3) centriods features: (b, c, n) idx: idx of neighbors # idxs: (b, n) output: new_features: (b, c+3, m, nsample) # grouped_idxs: (b, m, nsample) """ if new_xyz is None: new_xyz = xyz if idx is None: if self.radius is not None: idx = ballquery(self.radius, 2*self.nsample, xyz, new_xyz) else: # idx = knnquery_naive(self.nsample, xyz, new_xyz) # (b, m, nsample) idx = knnquery(2*self.nsample, xyz, new_xyz) # (b, m, nsample) idx2 = np.array([i for i in range(2*self.nsample)]) np.random.shuffle(idx2) idx2 = idx2[:self.nsample] idx = idx[:, :, idx2] xyz_trans = xyz.transpose(1, 2).contiguous() grouped_xyz = grouping(xyz_trans, idx) # (b, 3, m, nsample) # grouped_idxs = grouping(idxs.unsqueeze(1).float(), idx).squeeze(1).int() # (b, m, nsample) grouped_xyz -= new_xyz.transpose(1, 2).unsqueeze(-1) if features is not None: grouped_features = grouping(features, idx) if self.use_xyz: new_features = torch.cat([grouped_xyz, grouped_features], dim=1) # (b, c+3, m, nsample) else: new_features = grouped_features else: assert self.use_xyz, "Cannot have not features and not use xyz as a feature!" new_features = grouped_xyz return new_features class Le_QueryAndGroup(nn.Module): """ Groups with a ball query of radius parameters: radius: float32, Radius of ball nsample: int32, Maximum number of features to gather in the ball """ def __init__(self, radius=None, nsample=32, use_xyz=True): super(Le_QueryAndGroup, self).__init__() self.radius, self.nsample, self.use_xyz = radius, nsample, use_xyz def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor = None, features: torch.Tensor = None, idx: torch.Tensor = None) -> torch.Tensor: """ input: xyz: (b, n, 3) coordinates of the features new_xyz: (b, m, 3) centriods features: (b, c, n) idx: idx of neighbors # idxs: (b, n) output: new_features: (b, c+3, m, nsample) # grouped_idxs: (b, m, nsample) """ if new_xyz is None: new_xyz = xyz if idx is None: if self.radius is not None: idx = ballquery(self.radius, self.nsample, xyz, new_xyz) else: # idx = knnquery_naive(self.nsample, xyz, new_xyz) # (b, m, nsample) idx = knnquery(self.nsample, xyz, new_xyz) # (b, m, nsample) xyz_trans = xyz.transpose(1, 2).contiguous() grouped_xyz = grouping(xyz_trans, idx) # (b, 3, m, nsample) # grouped_idxs = grouping(idxs.unsqueeze(1).float(), idx).squeeze(1).int() # (b, m, nsample) grouped_xyz -= new_xyz.transpose(1, 2).unsqueeze(-1) if features is not None: grouped_features = grouping(features, idx) # (b, c, m, nsample) if self.use_xyz: #new_features = torch.cat([grouped_xyz, grouped_features], dim=1) # (b, c+3, m, nsample) # le new_features = grouped_features # (b, c, m, nsample) else: new_features = grouped_features else: assert self.use_xyz, "Cannot have not features and not use xyz as a feature!" new_features = grouped_xyz return grouped_xyz, new_features class Gen_QueryAndGroupXYZ(nn.Module): """ Groups with a ball query of radius parameters: radius: float32, Radius of ball nsample: int32, Maximum number of features to gather in the ball """ def __init__(self, radius=None, nsample=32, use_xyz=True): super(Gen_QueryAndGroupXYZ, self).__init__() self.radius, self.nsample, self.use_xyz = radius, nsample, use_xyz #def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor = None, features: torch.Tensor = None, idx: torch.Tensor = None) -> torch.Tensor: def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor = None) -> torch.Tensor: """ input: xyz: (b, n, 3) coordinates of the features new_xyz: (b, m, 3) centriods features: (b, c, n) idx: idx of neighbors # idxs: (b, n) output: new_features: (b, c+3, m, nsample) # grouped_idxs: (b, m, nsample) """ if new_xyz is None: new_xyz = xyz #if idx is None: if self.radius is not None: idx = ballquery(self.radius, self.nsample, xyz, new_xyz) else: idx = knnquery(self.nsample, xyz, new_xyz) # (b, m, nsample) xyz_trans = xyz.transpose(1, 2).contiguous() # BxNx3 -> Bx3xN grouped_xyz = grouping(xyz_trans, idx) # (b, 3, m, nsample) return grouped_xyz class Le_QueryAndGroup_OnlyFeature(nn.Module): """ Groups with a ball query of radius parameters: radius: float32, Radius of ball nsample: int32, Maximum number of features to gather in the ball """ def __init__(self, radius=None, nsample=32, use_xyz=True): super(Le_QueryAndGroup_OnlyFeature, self).__init__() self.radius, self.nsample, self.use_xyz = radius, nsample, use_xyz def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor = None, features: torch.Tensor = None, idx: torch.Tensor = None) -> torch.Tensor: """ input: xyz: (b, n, 3) coordinates of the features new_xyz: (b, m, 3) centriods features: (b, c, n) idx: idx of neighbors # idxs: (b, n) output: new_features: (b, c+3, m, nsample) # grouped_idxs: (b, m, nsample) """ if new_xyz is None: new_xyz = xyz if idx is None: if self.radius is not None: idx = ballquery(self.radius, self.nsample, xyz, new_xyz) else: # idx = knnquery_naive(self.nsample, xyz, new_xyz) # (b, m, nsample) idx = knnquery(self.nsample, xyz, new_xyz) # (b, m, nsample) #xyz_trans = xyz.transpose(1, 2).contiguous() #grouped_xyz = grouping(xyz_trans, idx) # (b, 3, m, nsample) # grouped_idxs = grouping(idxs.unsqueeze(1).float(), idx).squeeze(1).int() # (b, m, nsample) #grouped_xyz -= new_xyz.transpose(1, 2).unsqueeze(-1) if features is not None: grouped_features = grouping(features, idx) # (b, c, m, nsample) if self.use_xyz: #new_features = torch.cat([grouped_xyz, grouped_features], dim=1) # (b, c+3, m, nsample) # le new_features = grouped_features # (b, c, m, nsample) else: new_features = grouped_features else: assert self.use_xyz, "Cannot have not features and not use xyz as a feature!" new_features = grouped_xyz return new_features class GroupAll(nn.Module): """ Groups all features """ def __init__(self, use_xyz: bool = True): super(GroupAll, self).__init__() self.use_xyz = use_xyz def forward(self, xyz: torch.Tensor, new_xyz: torch.Tensor, features: torch.Tensor = None) -> Tuple[torch.Tensor]: """ input: xyz: (b, n, 3) coordinates of the features new_xyz: ignored torch features: (b, c, n) descriptors of the features output: new_features: (b, c+3, 1, N) tensor """ grouped_xyz = xyz.transpose(1, 2).unsqueeze(2) if features is not None: grouped_features = features.unsqueeze(2) if self.use_xyz: new_features = torch.cat([grouped_xyz, grouped_features], dim=1) # (b, c+3, 1, n) else: new_features = grouped_features else: new_features = grouped_xyz return new_features
37.458974
145
0.585906
27,776
0.950647
0
0
14,171
0.485009
0
0
10,024
0.343076
b95d5c160689db0e0a64a0a455645d72081698d5
2,992
py
Python
core/src/zeit/cms/content/caching.py
rickdg/vivi
16134ac954bf8425646d4ad47bdd1f372e089355
[ "BSD-3-Clause" ]
5
2019-05-16T09:51:29.000Z
2021-05-31T09:30:03.000Z
core/src/zeit/cms/content/caching.py
rickdg/vivi
16134ac954bf8425646d4ad47bdd1f372e089355
[ "BSD-3-Clause" ]
107
2019-05-24T12:19:02.000Z
2022-03-23T15:05:56.000Z
core/src/zeit/cms/content/caching.py
rickdg/vivi
16134ac954bf8425646d4ad47bdd1f372e089355
[ "BSD-3-Clause" ]
3
2020-08-14T11:01:17.000Z
2022-01-08T17:32:19.000Z
from collections import defaultdict from logging import getLogger from operator import itemgetter from os import environ from time import time from zope.cachedescriptors.property import Lazy as cachedproperty from zeit.cms.content.sources import FEATURE_TOGGLES from zope.component import getUtility from zeit.connector.interfaces import IConnector from zeit.connector.filesystem import Connector log = getLogger(__name__) class ContentCache(object): @cachedproperty def cache(self): size = environ.get('CONTENT_CACHE_SIZE') check = environ.get('CONTENT_CACHE_CHECK') connector = getUtility(IConnector) if size is not None and type(connector) is Connector: self.size = int(size) self.check = int(check) if check is not None else self.size / 5 self.connector = connector self.cache = defaultdict(lambda: dict(used=0, mtimes={}, data={})) self.hits = self.misses = 0 log.info('initialized content cache (size %s)', size) return self.cache else: return None def get(self, unique_id, key, factory, suffix=''): cache = self.cache if cache is None or not FEATURE_TOGGLES.find('content_caching'): return factory() try: mtime = int(self.connector.mtime(unique_id, suffix)) except (ValueError, TypeError): mtime = None if mtime is None: return factory() obj = cache[unique_id] obj['used'] += 1 obj['last'] = time() if mtime != obj['mtimes'].get(suffix): obj['data'].clear() obj['mtimes'][suffix] = mtime cache = obj['data'] if key not in cache: cache[key] = factory() self.misses += 1 log.debug('added %s (%s)', key, mtime) if self.misses % self.check == 0: self.cleanup() else: self.hits += 1 return cache[key] def cleanup(self): cache = self.cache over = len(cache) - self.size log.info('size: %d/%d, hits: %d, misses: %d', over + self.size, self.size, self.hits, self.misses) if over > 0: log.debug('removing %d items', over) last = sorted((cache[uid]['last'], uid) for uid in cache) for _, (_, uid) in zip(range(over), last): del cache[uid] @property def usage(self): cache = self.cache stats = (dict(uid=uid, used=cache[uid]['used']) for uid in cache) return sorted(stats, key=itemgetter('used')) def info(self): cache = self.cache usage = {info['uid']: info['used'] for info in reversed(self.usage)} return dict( size=self.size, count=len(cache), hits=self.hits, misses=self.misses, usage=usage) __cache = ContentCache() get = __cache.get info = __cache.info
32.521739
78
0.57988
2,499
0.835227
0
0
827
0.276404
0
0
235
0.078543
b95e87663683cd1ca4cf5da88872ac29da6e83c7
1,177
py
Python
genesis/project.py
genialis/genesis-genapi
dfe9bcc8b332a8b9873db4ab9994b0cc10eb209a
[ "Apache-2.0" ]
3
2020-01-24T17:03:23.000Z
2021-03-16T03:20:31.000Z
genesis/project.py
genialis/genesis-genapi
dfe9bcc8b332a8b9873db4ab9994b0cc10eb209a
[ "Apache-2.0" ]
1
2018-02-15T19:33:00.000Z
2018-02-15T19:33:00.000Z
genesis/project.py
genialis/genesis-genapi
dfe9bcc8b332a8b9873db4ab9994b0cc10eb209a
[ "Apache-2.0" ]
6
2015-05-27T10:14:46.000Z
2021-01-01T06:35:40.000Z
"""Project""" from __future__ import absolute_import, division, print_function, unicode_literals class GenProject(object): """Genesais project annotation.""" def __init__(self, data, gencloud): for field in data: setattr(self, field, data[field]) self.gencloud = gencloud self.id = getattr(self, 'id', None) # pylint: disable=invalid-name self.name = getattr(self, 'name', None) def data_types(self): """Return a list of data types.""" data = self.gencloud.project_data(self.id) return sorted(set(d.type for d in data)) def data(self, **query): """Query for Data object annotation.""" data = self.gencloud.project_data(self.id) query['case_ids__contains'] = self.id ids = set(d['id'] for d in self.gencloud.api.dataid.get(**query)['objects']) return [d for d in data if d.id in ids] def find(self, filter_str): """Filter Data object annotation.""" raise NotImplementedError() def __str__(self): return self.name or 'n/a' def __repr__(self): return u"GenProject: {} - {}".format(self.id, self.name)
30.973684
84
0.619371
1,077
0.915038
0
0
0
0
0
0
256
0.217502