_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q3900
with_inverse
train
def with_inverse(points, noise): """ Smooths a set of points It smooths them twice, once in given order, another one in the reverse order. The the first half of the results will be taken from the reverse order and the second half from the normal order. Args: points (:obj:`list` of :obj:`Point`) noise (float): Expected noise, the higher it is the more the path will be smoothed. Returns: :obj:`list` of :obj:`Point` """ # noise_sample = 20 n_points = len(points)/2 break_point = n_points points_part = copy.deepcopy(points) points_part = list(reversed(points_part)) part = kalman_filter(points_part, noise) total = kalman_filter(points, noise) result = list(reversed(part))[:break_point] + total[break_point:] result[break_point] = point_mean(part[break_point], total[break_point]) return result
python
{ "resource": "" }
q3901
temporal_segmentation
train
def temporal_segmentation(segments, min_time): """ Segments based on time distant points Args: segments (:obj:`list` of :obj:`list` of :obj:`Point`): segment points min_time (int): minimum required time for segmentation """ final_segments = [] for segment in segments: final_segments.append([]) for point in segment: if point.dt > min_time: final_segments.append([]) final_segments[-1].append(point) return final_segments
python
{ "resource": "" }
q3902
correct_segmentation
train
def correct_segmentation(segments, clusters, min_time): """ Corrects the predicted segmentation This process prevents over segmentation Args: segments (:obj:`list` of :obj:`list` of :obj:`Point`): segments to correct min_time (int): minimum required time for segmentation """ # segments = [points for points in segments if len(points) > 1] result_segments = [] prev_segment = None for i, segment in enumerate(segments): if len(segment) >= 1: continue cluster = clusters[i] if prev_segment is None: prev_segment = segment else: cluster_dt = 0 if len(cluster) > 0: cluster_dt = abs(cluster[0].time_difference(cluster[-1])) if cluster_dt <= min_time: prev_segment.extend(segment) else: prev_segment.append(segment[0]) result_segments.append(prev_segment) prev_segment = segment if prev_segment is not None: result_segments.append(prev_segment) return result_segments
python
{ "resource": "" }
q3903
spatiotemporal_segmentation
train
def spatiotemporal_segmentation(points, eps, min_time): """ Splits a set of points into multiple sets of points based on spatio-temporal stays DBSCAN is used to predict possible segmentations, furthermore we check to see if each clusters is big enough in time (>=min_time). If that's the case than the segmentation is considered valid. When segmenting, the last point of the ith segment will be the same of the (i-1)th segment. Segments are identified through clusters. The last point of a clusters, that comes after a sub-segment A, will be present on the sub-segment A. Args: points (:obj:`list` of :obj:`Point`): segment's points eps (float): Epsilon to feed to the DBSCAN algorithm. Maximum distance between two samples, to be considered in the same cluster. min_time (float): Minimum time of a stay Returns: :obj:`list` of :obj:`list` of :obj:`Point`: Initial set of points in different segments """ # min time / sample rate dt_average = np.median([point.dt for point in points]) min_samples = min_time / dt_average data = [point.gen3arr() for point in points] data = StandardScaler().fit_transform(data) print 'min_samples: %f' % min_samples db_cluster = DBSCAN(eps=eps, min_samples=min_samples).fit(data) labels = db_cluster.labels_ n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0) segments = [[] for _ in range(n_clusters_+1)] clusters = [[] for _ in range(n_clusters_+1)] current_segment = 0 print 'clusters' print n_clusters_ if n_clusters_ == 1: segments = temporal_segmentation([points], min_time) return [segment for segment in segments if len(segment) > 1] # split segments identified with dbscan for i, label in enumerate(labels): if label != -1 and label + 1 != current_segment: current_segment = label + 1 point = points[i] if label == -1: segments[current_segment].append(point) else: clusters[label + 1].append(point) if len(segments) == 0 or sum([len(s) for s in segments]): segments = [points] segments = temporal_segmentation(segments, min_time) # segments = temporal_segmentation(correct_segmentation(segments, clusters, min_time), min_time) return [segment for segment in segments if len(segment) > 1]
python
{ "resource": "" }
q3904
kalman_filter
train
def kalman_filter(points, noise): """ Smooths points with kalman filter See https://github.com/open-city/ikalman Args: points (:obj:`list` of :obj:`Point`): points to smooth noise (float): expected noise """ kalman = ikalman.filter(noise) for point in points: kalman.update_velocity2d(point.lat, point.lon, point.dt) (lat, lon) = kalman.get_lat_long() point.lat = lat point.lon = lon return points
python
{ "resource": "" }
q3905
learn_transportation_mode
train
def learn_transportation_mode(track, clf): """ Inserts transportation modes of a track into a classifier Args: track (:obj:`Track`) clf (:obj:`Classifier`) """ for segment in track.segments: tmodes = segment.transportation_modes points = segment.points features = [] labels = [] for tmode in tmodes: points_part = points[tmode['from']:tmode['to']] if len(points_part) > 0: features.append(extract_features_2(points_part)) labels.append(tmode['label']) clf.learn(features, labels)
python
{ "resource": "" }
q3906
speed_difference
train
def speed_difference(points): """ Computes the speed difference between each adjacent point Args: points (:obj:`Point`) Returns: :obj:`list` of int: Indexes of changepoints """ data = [0] for before, after in pairwise(points): data.append(before.vel - after.vel) return data
python
{ "resource": "" }
q3907
acc_difference
train
def acc_difference(points): """ Computes the accelaration difference between each adjacent point Args: points (:obj:`Point`) Returns: :obj:`list` of int: Indexes of changepoints """ data = [0] for before, after in pairwise(points): data.append(before.acc - after.acc) return data
python
{ "resource": "" }
q3908
detect_changepoints
train
def detect_changepoints(points, min_time, data_processor=acc_difference): """ Detects changepoints on points that have at least a specific duration Args: points (:obj:`Point`) min_time (float): Min time that a sub-segmented, bounded by two changepoints, must have data_processor (function): Function to extract data to feed to the changepoint algorithm. Defaults to `speed_difference` Returns: :obj:`list` of int: Indexes of changepoints """ data = data_processor(points) changepoints = pelt(normal_mean(data, np.std(data)), len(data)) changepoints.append(len(points) - 1) result = [] for start, end in pairwise(changepoints): time_diff = points[end].time_difference(points[start]) if time_diff > min_time: result.append(start) # adds the first point result.append(0) # adds the last changepoint detected result.append(len(points) - 1) return sorted(list(set(result)))
python
{ "resource": "" }
q3909
group_modes
train
def group_modes(modes): """ Groups consecutive transportation modes with same label, into one Args: modes (:obj:`list` of :obj:`dict`) Returns: :obj:`list` of :obj:`dict` """ if len(modes) > 0: previous = modes[0] grouped = [] for changep in modes[1:]: if changep['label'] != previous['label']: previous['to'] = changep['from'] grouped.append(previous) previous = changep previous['to'] = modes[-1]['to'] grouped.append(previous) return grouped else: return modes
python
{ "resource": "" }
q3910
speed_clustering
train
def speed_clustering(clf, points, min_time): """ Transportation mode infering, based on changepoint segmentation Args: clf (:obj:`Classifier`): Classifier to use points (:obj:`list` of :obj:`Point`) min_time (float): Min time, in seconds, before do another segmentation Returns: :obj:`list` of :obj:`dict` """ # get changepoint indexes changepoints = detect_changepoints(points, min_time) # info for each changepoint cp_info = [] for i in range(0, len(changepoints) - 1): from_index = changepoints[i] to_index = changepoints[i+1] info = classify(clf, points[from_index:to_index], min_time, from_index, to_index) if info: cp_info.append(info) return group_modes(cp_info)
python
{ "resource": "" }
q3911
distance
train
def distance(p_a, p_b): """ Euclidean distance, between two points Args: p_a (:obj:`Point`) p_b (:obj:`Point`) Returns: float: distance, in degrees """ return sqrt((p_a.lat - p_b.lat) ** 2 + (p_a.lon - p_b.lon) ** 2)
python
{ "resource": "" }
q3912
point_line_distance
train
def point_line_distance(point, start, end): """ Distance from a point to a line, formed by two points Args: point (:obj:`Point`) start (:obj:`Point`): line point end (:obj:`Point`): line point Returns: float: distance to line, in degrees """ if start == end: return distance(point, start) else: un_dist = abs( (end.lat-start.lat)*(start.lon-point.lon) - (start.lat-point.lat)*(end.lon-start.lon) ) n_dist = sqrt( (end.lat-start.lat)**2 + (end.lon-start.lon)**2 ) if n_dist == 0: return 0 else: return un_dist / n_dist
python
{ "resource": "" }
q3913
drp
train
def drp(points, epsilon): """ Douglas ramer peucker Based on https://en.wikipedia.org/wiki/Ramer%E2%80%93Douglas%E2%80%93Peucker_algorithm Args: points (:obj:`list` of :obj:`Point`) epsilon (float): drp threshold Returns: :obj:`list` of :obj:`Point` """ dmax = 0.0 index = 0 for i in range(1, len(points)-1): dist = point_line_distance(points[i], points[0], points[-1]) if dist > dmax: index = i dmax = dist if dmax > epsilon: return drp(points[:index+1], epsilon)[:-1] + drp(points[index:], epsilon) else: return [points[0], points[-1]]
python
{ "resource": "" }
q3914
td_sp
train
def td_sp(points, speed_threshold): """ Top-Down Speed-Based Trajectory Compression Algorithm Detailed in https://www.itc.nl/library/Papers_2003/peer_ref_conf/meratnia_new.pdf Args: points (:obj:`list` of :obj:`Point`): trajectory or part of it speed_threshold (float): max speed error, in km/h Returns: :obj:`list` of :obj:`Point`, compressed trajectory """ if len(points) <= 2: return points else: max_speed_threshold = 0 found_index = 0 for i in range(1, len(points)-1): dt1 = time_dist(points[i], points[i-1]) if dt1 == 0: dt1 = 0.000000001 vim = loc_dist(points[i], points[i-1]) / dt1 dt2 = time_dist(points[i+1], points[i]) if dt2 == 0: dt2 = 0.000000001 vi_ = loc_dist(points[i+1], points[i]) / dt2 if abs(vi_ - vim) > max_speed_threshold: max_speed_threshold = abs(vi_ - vim) found_index = i if max_speed_threshold > speed_threshold: one = td_sp(points[:found_index], speed_threshold) two = td_sp(points[found_index:], speed_threshold) one.extend(two) return one else: return [points[0], points[-1]]
python
{ "resource": "" }
q3915
td_tr
train
def td_tr(points, dist_threshold): """ Top-Down Time-Ratio Trajectory Compression Algorithm Detailed in https://www.itc.nl/library/Papers_2003/peer_ref_conf/meratnia_new.pdf Args: points (:obj:`list` of :obj:`Point`): trajectory or part of it dist_threshold (float): max distance error, in meters Returns: :obj:`list` of :obj:`Point`, compressed trajectory """ if len(points) <= 2: return points else: max_dist_threshold = 0 found_index = 0 delta_e = time_dist(points[-1], points[0]) * I_3600 d_lat = points[-1].lat - points[0].lat d_lon = points[-1].lon - points[0].lon for i in range(1, len(points)-1): delta_i = time_dist(points[i], points[0]) * I_3600 di_de = delta_i / delta_e point = Point( points[0].lat + d_lat * di_de, points[0].lon + d_lon * di_de, None ) dist = loc_dist(points[i], point) if dist > max_dist_threshold: max_dist_threshold = dist found_index = i if max_dist_threshold > dist_threshold: one = td_tr(points[:found_index], dist_threshold) two = td_tr(points[found_index:], dist_threshold) one.extend(two) return one else: return [points[0], points[-1]]
python
{ "resource": "" }
q3916
spt
train
def spt(points, max_dist_error, max_speed_error): """ A combination of both `td_sp` and `td_tr` Detailed in, Spatiotemporal Compression Techniques for Moving Point Objects, Nirvana Meratnia and Rolf A. de By, 2004, in Advances in Database Technology - EDBT 2004: 9th International Conference on Extending Database Technology, Heraklion, Crete, Greece, March 14-18, 2004 Args: points (:obj:`list` of :obj:`Point`) max_dist_error (float): max distance error, in meters max_speed_error (float): max speed error, in km/h Returns: :obj:`list` of :obj:`Point` """ if len(points) <= 2: return points else: is_error = False e = 1 while e < len(points) and not is_error: i = 1 while i < e and not is_error: delta_e = time_dist(points[e], points[0]) * I_3600 delta_i = time_dist(points[i], points[0]) * I_3600 di_de = 0 if delta_e != 0: di_de = delta_i / delta_e d_lat = points[e].lat - points[0].lat d_lon = points[e].lon - points[0].lon point = Point( points[0].lat + d_lat * di_de, points[0].lon + d_lon * di_de, None ) dt1 = time_dist(points[i], points[i-1]) if dt1 == 0: dt1 = 0.000000001 dt2 = time_dist(points[i+1], points[i]) if dt2 == 0: dt2 = 0.000000001 v_i_1 = loc_dist(points[i], points[i-1]) / dt1 v_i = loc_dist(points[i+1], points[i]) / dt2 if loc_dist(points[i], point) > max_dist_error or abs(v_i - v_i_1) > max_speed_error: is_error = True else: i = i + 1 if is_error: return [points[0]] + spt(points[i:len(points)], max_dist_error, max_speed_error) e = e + 1 if not is_error: return [points[0], points[len(points)-1]]
python
{ "resource": "" }
q3917
Track.generate_name
train
def generate_name(self, name_format=DEFAULT_FILE_NAME_FORMAT): """ Generates a name for the track The name is generated based on the date of the first point of the track, or in case it doesn't exist, "EmptyTrack" Args: name_format (str, optional): Name formar to give to the track, based on its start time. Defaults to DEFAULT_FILE_NAME_FORMAT Returns: str """ if len(self.segments) > 0: return self.segments[0].points[0].time.strftime(name_format) + ".gpx" else: return "EmptyTrack"
python
{ "resource": "" }
q3918
Track.smooth
train
def smooth(self, strategy, noise): """ In-place smoothing of segments Returns: :obj:`Track`: self """ print noise for segment in self.segments: segment.smooth(noise, strategy) return self
python
{ "resource": "" }
q3919
Track.segment
train
def segment(self, eps, min_time): """In-place segmentation of segments Spatio-temporal segmentation of each segment The number of segments may increse after this step Returns: This track """ new_segments = [] for segment in self.segments: segmented = segment.segment(eps, min_time) for seg in segmented: new_segments.append(Segment(seg)) self.segments = new_segments return self
python
{ "resource": "" }
q3920
Track.simplify
train
def simplify(self, eps, max_dist_error, max_speed_error, topology_only=False): """ In-place simplification of segments Args: max_dist_error (float): Min distance error, in meters max_speed_error (float): Min speed error, in km/h topology_only: Boolean, optional. True to keep the topology, neglecting velocity and time accuracy (use common Douglas-Ramen-Peucker). False (default) to simplify segments keeping the velocity between points. Returns: This track """ for segment in self.segments: segment.simplify(eps, max_dist_error, max_speed_error, topology_only) return self
python
{ "resource": "" }
q3921
Track.infer_transportation_mode
train
def infer_transportation_mode(self, clf, min_time): """In-place transportation mode inferring of segments Returns: This track """ for segment in self.segments: segment.infer_transportation_mode(clf, min_time) return self
python
{ "resource": "" }
q3922
Track.to_trip
train
def to_trip( self, smooth, smooth_strategy, smooth_noise, seg, seg_eps, seg_min_time, simplify, simplify_max_dist_error, simplify_max_speed_error ): """In-place, transformation of a track into a trip A trip is a more accurate depiction of reality than a track. For a track to become a trip it need to go through the following steps: + noise removal + smoothing + spatio-temporal segmentation + simplification At the end of these steps we have a less noisy, track that has less points, but that holds the same information. It's required that each segment has their metrics calculated or has been preprocessed. Args: name: An optional string with the name of the trip. If none is given, one will be generated by generateName Returns: This Track instance """ self.compute_metrics() self.remove_noise() print (smooth, seg, simplify) if smooth: self.compute_metrics() self.smooth(smooth_strategy, smooth_noise) if seg: self.compute_metrics() self.segment(seg_eps, seg_min_time) if simplify: self.compute_metrics() self.simplify(0, simplify_max_dist_error, simplify_max_speed_error) self.compute_metrics() return self
python
{ "resource": "" }
q3923
Track.infer_transportation_modes
train
def infer_transportation_modes(self, dt_threshold=10): """In-place transportation inferring of segments Returns: This track """ self.segments = [ segment.infer_transportation_mode(dt_threshold=dt_threshold) for segment in self.segments ] return self
python
{ "resource": "" }
q3924
Track.infer_location
train
def infer_location( self, location_query, max_distance, google_key, foursquare_client_id, foursquare_client_secret, limit ): """In-place location inferring of segments Returns: This track """ self.segments = [ segment.infer_location( location_query, max_distance, google_key, foursquare_client_id, foursquare_client_secret, limit ) for segment in self.segments ] return self
python
{ "resource": "" }
q3925
Track.to_json
train
def to_json(self): """Converts track to a JSON serializable format Returns: Map with the name, and segments of the track. """ return { 'name': self.name, 'segments': [segment.to_json() for segment in self.segments], 'meta': self.meta }
python
{ "resource": "" }
q3926
Track.merge_and_fit
train
def merge_and_fit(self, track, pairings): """ Merges another track with this one, ordering the points based on a distance heuristic Args: track (:obj:`Track`): Track to merge with pairings Returns: :obj:`Segment`: self """ for (self_seg_index, track_seg_index, _) in pairings: self_s = self.segments[self_seg_index] ss_start = self_s.points[0] track_s = track.segments[track_seg_index] tt_start = track_s.points[0] tt_end = track_s.points[-1] d_start = ss_start.distance(tt_start) d_end = ss_start.distance(tt_end) if d_start > d_end: track_s = track_s.copy() track_s.points = list(reversed(track_s.points)) self_s.merge_and_fit(track_s) return self
python
{ "resource": "" }
q3927
Track.get_point_index
train
def get_point_index(self, point): """ Gets of the closest first point Args: point (:obj:`Point`) Returns: (int, int): Segment id and point index in that segment """ for i, segment in enumerate(self.segments): idx = segment.getPointIndex(point) if idx != -1: return i, idx return -1, -1
python
{ "resource": "" }
q3928
Track.bounds
train
def bounds(self, thr=0): """ Gets the bounds of this segment Returns: (float, float, float, float): Bounds, with min latitude, min longitude, max latitude and max longitude """ min_lat = float("inf") min_lon = float("inf") max_lat = -float("inf") max_lon = -float("inf") for segment in self.segments: milat, milon, malat, malon = segment.bounds(thr=thr) min_lat = min(milat, min_lat) min_lon = min(milon, min_lon) max_lat = max(malat, max_lat) max_lon = max(malon, max_lon) return min_lat, min_lon, max_lat, max_lon
python
{ "resource": "" }
q3929
Track.similarity
train
def similarity(self, track): """ Compares two tracks based on their topology This method compares the given track against this instance. It only verifies if given track is close to this one, not the other way arround Args: track (:obj:`Track`) Returns: Two-tuple with global similarity between tracks and an array the similarity between segments """ idx = index.Index() i = 0 for i, segment in enumerate(self.segments): idx.insert(i, segment.bounds(), obj=segment) final_siml = [] final_diff = [] for i, segment in enumerate(track.segments): query = idx.intersection(segment.bounds(), objects=True) res_siml = [] res_diff = [] for result in query: siml, diff = segment_similarity(segment, result.object) res_siml.append(siml) res_diff.append((result.id, i, diff)) if len(res_siml) > 0: final_siml.append(max(res_siml)) final_diff.append(res_diff[np.argmax(res_siml)]) else: final_siml.append(0) final_diff.append([]) return np.mean(final_siml), final_diff
python
{ "resource": "" }
q3930
Track.to_gpx
train
def to_gpx(self): """Converts track to a GPX format Uses GPXPY library as an intermediate format Returns: A string with the GPX/XML track """ gpx_segments = [] for segment in self.segments: gpx_points = [] for point in segment.points: time = '' if point.time: iso_time = point.time.isoformat().split('.')[0] time = '<time>%s</time>' % iso_time gpx_points.append( u'<trkpt lat="%f" lon="%f">%s</trkpt>' % (point.lat, point.lon, time) ) points = u'\n\t\t\t'.join(gpx_points) gpx_segments.append(u'\t\t<trkseg>\n\t\t\t%s\n\t\t</trkseg>' % points) segments = u'\t\n'.join(gpx_segments) content = [ u'<?xml version="1.0" encoding="UTF-8"?>', u'<gpx xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://www.topografix.com/GPX/1/0" xsi:schemaLocation="http://www.topografix.com/GPX/1/0 http://www.topografix.com/GPX/1/0/gpx.xsd" version="1.0" creator="GatherMySteps">', u'\t<trk>', segments, u'\t</trk>', u'</gpx>' ] return u'\n'.join(content)
python
{ "resource": "" }
q3931
Track.timezone
train
def timezone(self, timezone=0): """ Sets the timezone of the entire track Args: timezone (int): Timezone hour delta """ tz_dt = timedelta(hours=timezone) for segment in self.segments: for point in segment.points: point.time = point.time + tz_dt return self
python
{ "resource": "" }
q3932
Track.to_life
train
def to_life(self): """Converts track to LIFE format """ buff = "--%s\n" % self.segments[0].points[0].time.strftime("%Y_%m_%d") # buff += "--" + day # buff += "UTC+s" # if needed def military_time(time): """ Converts time to military time Args: time (:obj:`datetime.datetime`) Returns: str: Time in the format 1245 (12 hours and 45 minutes) """ return time.strftime("%H%M") def stay(buff, start, end, place): """ Creates a stay representation Args: start (:obj:`datetime.datetime` or str) end (:obj:`datetime.datetime` or str) place (:obj:`Location`) Returns: str """ if not isinstance(start, str): start = military_time(start) if not isinstance(end, str): end = military_time(end) return "%s\n%s-%s: %s" % (buff, start, end, place.label) def trip(buff, segment): """ Creates a trip representation Args: buff (str): buffer segment (:obj:`Segment`) Returns: str: buffer and trip representation """ trip = "%s-%s: %s -> %s" % ( military_time(segment.points[0].time), military_time(segment.points[-1].time), segment.location_from.label, segment.location_to.label ) t_modes = segment.transportation_modes if len(t_modes) == 1: trip = "%s [%s]" % (trip, t_modes[0]['label']) elif len(t_modes) > 1: modes = [] for mode in t_modes: trip_from = military_time(segment.points[mode['from']].time) trip_to = military_time(segment.points[mode['to']].time) modes.append(" %s-%s: [%s]" % (trip_from, trip_to, mode['label'])) trip = "%s\n%s" % (trip, "\n".join(modes)) return "%s\n%s" % (buff, trip) last = len(self.segments) - 1 for i, segment in enumerate(self.segments): if i == 0: buff = stay( buff, "0000", military_time(segment.points[0].time), segment.location_from ) buff = trip(buff, segment) if i is last: buff = stay( buff, military_time(segment.points[-1].time), "2359", segment.location_to ) else: next_seg = self.segments[i+1] buff = stay( buff, military_time(segment.points[-1].time), military_time(next_seg.points[0].time), segment.location_to ) return buff
python
{ "resource": "" }
q3933
Track.from_gpx
train
def from_gpx(file_path): """ Creates a Track from a GPX file. No preprocessing is done. Arguments: file_path (str): file path and name to the GPX file Return: :obj:`list` of :obj:`Track` """ gpx = gpxpy.parse(open(file_path, 'r')) file_name = basename(file_path) tracks = [] for i, track in enumerate(gpx.tracks): segments = [] for segment in track.segments: segments.append(Segment.from_gpx(segment)) if len(gpx.tracks) > 1: name = file_name + "_" + str(i) else: name = file_name tracks.append(Track(name, segments)) return tracks
python
{ "resource": "" }
q3934
Track.from_json
train
def from_json(json): """Creates a Track from a JSON file. No preprocessing is done. Arguments: json: map with the keys: name (optional) and segments. Return: A track instance """ segments = [Segment.from_json(s) for s in json['segments']] return Track(json['name'], segments).compute_metrics()
python
{ "resource": "" }
q3935
line
train
def line(p1, p2): """Creates a line from two points From http://stackoverflow.com/a/20679579 Args: p1 ([float, float]): x and y coordinates p2 ([float, float]): x and y coordinates Returns: (float, float, float): x, y and _ """ A = (p1[1] - p2[1]) B = (p2[0] - p1[0]) C = (p1[0]*p2[1] - p2[0]*p1[1]) return A, B, -C
python
{ "resource": "" }
q3936
intersection
train
def intersection(L1, L2): """Intersects two line segments Args: L1 ([float, float]): x and y coordinates L2 ([float, float]): x and y coordinates Returns: bool: if they intersect (float, float): x and y of intersection, if they do """ D = L1[0] * L2[1] - L1[1] * L2[0] Dx = L1[2] * L2[1] - L1[1] * L2[2] Dy = L1[0] * L2[2] - L1[2] * L2[0] if D != 0: x = Dx / D y = Dy / D return x, y else: return False
python
{ "resource": "" }
q3937
closest_point
train
def closest_point(a, b, p): """Finds closest point in a line segment Args: a ([float, float]): x and y coordinates. Line start b ([float, float]): x and y coordinates. Line end p ([float, float]): x and y coordinates. Point to find in the segment Returns: (float, float): x and y coordinates of the closest point """ ap = [p[0]-a[0], p[1]-a[1]] ab = [b[0]-a[0], b[1]-a[1]] mag = float(ab[0]**2 + ab[1]**2) proj = dot(ap, ab) if mag ==0 : dist = 0 else: dist = proj / mag if dist < 0: return [a[0], a[1]] elif dist > 1: return [b[0], b[1]] else: return [a[0] + ab[0] * dist, a[1] + ab[1] * dist]
python
{ "resource": "" }
q3938
distance_to_line
train
def distance_to_line(a, b, p): """Closest distance between a line segment and a point Args: a ([float, float]): x and y coordinates. Line start b ([float, float]): x and y coordinates. Line end p ([float, float]): x and y coordinates. Point to compute the distance Returns: float """ return distance(closest_point(a, b, p), p)
python
{ "resource": "" }
q3939
distance_similarity
train
def distance_similarity(a, b, p, T=CLOSE_DISTANCE_THRESHOLD): """Computes the distance similarity between a line segment and a point Args: a ([float, float]): x and y coordinates. Line start b ([float, float]): x and y coordinates. Line end p ([float, float]): x and y coordinates. Point to compute the distance Returns: float: between 0 and 1. Where 1 is very similar and 0 is completely different """ d = distance_to_line(a, b, p) r = (-1/float(T)) * abs(d) + 1 return r if r > 0 else 0
python
{ "resource": "" }
q3940
line_distance_similarity
train
def line_distance_similarity(p1a, p1b, p2a, p2b, T=CLOSE_DISTANCE_THRESHOLD): """Line distance similarity between two line segments Args: p1a ([float, float]): x and y coordinates. Line A start p1b ([float, float]): x and y coordinates. Line A end p2a ([float, float]): x and y coordinates. Line B start p2b ([float, float]): x and y coordinates. Line B end Returns: float: between 0 and 1. Where 1 is very similar and 0 is completely different """ d1 = distance_similarity(p1a, p1b, p2a, T=T) d2 = distance_similarity(p1a, p1b, p2b, T=T) return abs(d1 + d2) * 0.5
python
{ "resource": "" }
q3941
line_similarity
train
def line_similarity(p1a, p1b, p2a, p2b, T=CLOSE_DISTANCE_THRESHOLD): """Similarity between two lines Args: p1a ([float, float]): x and y coordinates. Line A start p1b ([float, float]): x and y coordinates. Line A end p2a ([float, float]): x and y coordinates. Line B start p2b ([float, float]): x and y coordinates. Line B end Returns: float: between 0 and 1. Where 1 is very similar and 0 is completely different """ d = line_distance_similarity(p1a, p1b, p2a, p2b, T=T) a = abs(angle_similarity(normalize(line(p1a, p1b)), normalize(line(p2a, p2b)))) return d * a
python
{ "resource": "" }
q3942
bounding_box_from
train
def bounding_box_from(points, i, i1, thr): """Creates bounding box for a line segment Args: points (:obj:`list` of :obj:`Point`) i (int): Line segment start, index in points array i1 (int): Line segment end, index in points array Returns: (float, float, float, float): with bounding box min x, min y, max x and max y """ pi = points[i] pi1 = points[i1] min_lat = min(pi.lat, pi1.lat) min_lon = min(pi.lon, pi1.lon) max_lat = max(pi.lat, pi1.lat) max_lon = max(pi.lon, pi1.lon) return min_lat-thr, min_lon-thr, max_lat+thr, max_lon+thr
python
{ "resource": "" }
q3943
segment_similarity
train
def segment_similarity(A, B, T=CLOSE_DISTANCE_THRESHOLD): """Computes the similarity between two segments Args: A (:obj:`Segment`) B (:obj:`Segment`) Returns: float: between 0 and 1. Where 1 is very similar and 0 is completely different """ l_a = len(A.points) l_b = len(B.points) idx = index.Index() dex = 0 for i in range(l_a-1): idx.insert(dex, bounding_box_from(A.points, i, i+1, T), obj=[A.points[i], A.points[i+1]]) dex = dex + 1 prox_acc = [] for i in range(l_b-1): ti = B.points[i].gen2arr() ti1 = B.points[i+1].gen2arr() bb = bounding_box_from(B.points, i, i+1, T) intersects = idx.intersection(bb, objects=True) n_prox = [] i_prox = 0 a = 0 for x in intersects: a = a + 1 pi = x.object[0].gen2arr() pi1 = x.object[1].gen2arr() prox = line_similarity(ti, ti1, pi, pi1, T) i_prox = i_prox + prox n_prox.append(prox) if a != 0: prox_acc.append(i_prox / a) # prox_acc.append(max(n_prox)) else: prox_acc.append(0) return np.mean(prox_acc), prox_acc
python
{ "resource": "" }
q3944
sort_segment_points
train
def sort_segment_points(Aps, Bps): """Takes two line segments and sorts all their points, so that they form a continuous path Args: Aps: Array of tracktotrip.Point Bps: Array of tracktotrip.Point Returns: Array with points ordered """ mid = [] j = 0 mid.append(Aps[0]) for i in range(len(Aps)-1): dist = distance_tt_point(Aps[i], Aps[i+1]) for m in range(j, len(Bps)): distm = distance_tt_point(Aps[i], Bps[m]) if dist > distm: direction = dot(normalize(line(Aps[i].gen2arr(), Aps[i+1].gen2arr())), normalize(Bps[m].gen2arr())) if direction > 0: j = m + 1 mid.append(Bps[m]) break mid.append(Aps[i+1]) for m in range(j, len(Bps)): mid.append(Bps[m]) return mid
python
{ "resource": "" }
q3945
distance
train
def distance(latitude_1, longitude_1, elevation_1, latitude_2, longitude_2, elevation_2, haversine=None): """ Distance between two points """ # If points too distant -- compute haversine distance: if haversine or (abs(latitude_1 - latitude_2) > .2 or abs(longitude_1 - longitude_2) > .2): return haversine_distance(latitude_1, longitude_1, latitude_2, longitude_2) coef = math.cos(latitude_1 / 180. * math.pi) #pylint: disable=invalid-name x = latitude_1 - latitude_2 y = (longitude_1 - longitude_2) * coef distance_2d = math.sqrt(x * x + y * y) * ONE_DEGREE if elevation_1 is None or elevation_2 is None or elevation_1 == elevation_2: return distance_2d return math.sqrt(distance_2d ** 2 + (elevation_1 - elevation_2) ** 2)
python
{ "resource": "" }
q3946
Point.distance
train
def distance(self, other): """ Distance between points Args: other (:obj:`Point`) Returns: float: Distance in km """ return distance(self.lat, self.lon, None, other.lat, other.lon, None)
python
{ "resource": "" }
q3947
Point.compute_metrics
train
def compute_metrics(self, previous): """ Computes the metrics of this point Computes and updates the dt, vel and acc attributes. Args: previous (:obj:`Point`): Point before Returns: :obj:`Point`: Self """ delta_t = self.time_difference(previous) delta_x = self.distance(previous) vel = 0 delta_v = 0 acc = 0 if delta_t != 0: vel = delta_x/delta_t delta_v = vel - previous.vel acc = delta_v/delta_t self.dt = delta_t self.dx = delta_x self.acc = acc self.vel = vel return self
python
{ "resource": "" }
q3948
Point.from_gpx
train
def from_gpx(gpx_track_point): """ Creates a point from GPX representation Arguments: gpx_track_point (:obj:`gpxpy.GPXTrackPoint`) Returns: :obj:`Point` """ return Point( lat=gpx_track_point.latitude, lon=gpx_track_point.longitude, time=gpx_track_point.time )
python
{ "resource": "" }
q3949
Point.to_json
train
def to_json(self): """ Creates a JSON serializable representation of this instance Returns: :obj:`dict`: For example, { "lat": 9.3470298, "lon": 3.79274, "time": "2016-07-15T15:27:53.574110" } """ return { 'lat': self.lat, 'lon': self.lon, 'time': self.time.isoformat() if self.time is not None else None }
python
{ "resource": "" }
q3950
Point.from_json
train
def from_json(json): """ Creates Point instance from JSON representation Args: json (:obj:`dict`): Must have at least the following keys: lat (float), lon (float), time (string in iso format). Example, { "lat": 9.3470298, "lon": 3.79274, "time": "2016-07-15T15:27:53.574110" } json: map representation of Point instance Returns: :obj:`Point` """ return Point( lat=json['lat'], lon=json['lon'], time=isostr_to_datetime(json['time']) )
python
{ "resource": "" }
q3951
compute_centroid
train
def compute_centroid(points): """ Computes the centroid of set of points Args: points (:obj:`list` of :obj:`Point`) Returns: :obj:`Point` """ lats = [p[1] for p in points] lons = [p[0] for p in points] return Point(np.mean(lats), np.mean(lons), None)
python
{ "resource": "" }
q3952
update_location_centroid
train
def update_location_centroid(point, cluster, max_distance, min_samples): """ Updates the centroid of a location cluster with another point Args: point (:obj:`Point`): Point to add to the cluster cluster (:obj:`list` of :obj:`Point`): Location cluster max_distance (float): Max neighbour distance min_samples (int): Minimum number of samples Returns: (:obj:`Point`, :obj:`list` of :obj:`Point`): Tuple with the location centroid and new point cluster (given cluster + given point) """ cluster.append(point) points = [p.gen2arr() for p in cluster] # Estimates the epsilon eps = estimate_meters_to_deg(max_distance, precision=6) p_cluster = DBSCAN(eps=eps, min_samples=min_samples) p_cluster.fit(points) clusters = {} for i, label in enumerate(p_cluster.labels_): if label in clusters.keys(): clusters[label].append(points[i]) else: clusters[label] = [points[i]] centroids = [] biggest_centroid_l = -float("inf") biggest_centroid = None for label, n_cluster in clusters.items(): centroid = compute_centroid(n_cluster) centroids.append(centroid) if label >= 0 and len(n_cluster) >= biggest_centroid_l: biggest_centroid_l = len(n_cluster) biggest_centroid = centroid if biggest_centroid is None: biggest_centroid = compute_centroid(points) return biggest_centroid, cluster
python
{ "resource": "" }
q3953
query_foursquare
train
def query_foursquare(point, max_distance, client_id, client_secret): """ Queries Squarespace API for a location Args: point (:obj:`Point`): Point location to query max_distance (float): Search radius, in meters client_id (str): Valid Foursquare client id client_secret (str): Valid Foursquare client secret Returns: :obj:`list` of :obj:`dict`: List of locations with the following format: { 'label': 'Coffee house', 'distance': 19, 'types': 'Commerce', 'suggestion_type': 'FOURSQUARE' } """ if not client_id: return [] if not client_secret: return [] if from_cache(FS_CACHE, point, max_distance): return from_cache(FS_CACHE, point, max_distance) url = FOURSQUARE_URL % (client_id, client_secret, point.lat, point.lon, max_distance) req = requests.get(url) if req.status_code != 200: return [] response = req.json() result = [] venues = response['response']['venues'] for venue in venues: name = venue['name'] distance = venue['location']['distance'] categories = [c['shortName'] for c in venue['categories']] result.append({ 'label': name, 'distance': distance, 'types': categories, 'suggestion_type': 'FOURSQUARE' }) # final_results = sorted(result, key=lambda elm: elm['distance']) foursquare_insert_cache(point, result) return result
python
{ "resource": "" }
q3954
query_google
train
def query_google(point, max_distance, key): """ Queries google maps API for a location Args: point (:obj:`Point`): Point location to query max_distance (float): Search radius, in meters key (str): Valid google maps api key Returns: :obj:`list` of :obj:`dict`: List of locations with the following format: { 'label': 'Coffee house', 'types': 'Commerce', 'suggestion_type': 'GOOGLE' } """ if not key: return [] if from_cache(GG_CACHE, point, max_distance): return from_cache(GG_CACHE, point, max_distance) req = requests.get(GOOGLE_PLACES_URL % ( point.lat, point.lon, max_distance, key )) if req.status_code != 200: return [] response = req.json() results = response['results'] # l = len(results) final_results = [] for local in results: final_results.append({ 'label': local['name'], 'distance': Point(local['geometry']['location']['lat'], local['geometry']['location']['lng'], None).distance(point), # 'rank': (l-i)/float(l), 'types': local['types'], 'suggestion_type': 'GOOGLE' }) google_insert_cache(point, final_results) return final_results
python
{ "resource": "" }
q3955
estimate_meters_to_deg
train
def estimate_meters_to_deg(meters, precision=PRECISION_PERSON): """ Meters to degrees estimation See https://en.wikipedia.org/wiki/Decimal_degrees Args: meters (float) precision (float) Returns: float: meters in degrees approximation """ line = PRECISION_TABLE[precision] dec = 1/float(10 ** precision) return meters / line[3] * dec
python
{ "resource": "" }
q3956
isostr_to_datetime
train
def isostr_to_datetime(dt_str): """ Converts iso formated text string into a datetime object Args: dt_str (str): ISO formated text string Returns: :obj:`datetime.datetime` """ if len(dt_str) <= 20: return datetime.datetime.strptime(dt_str, "%Y-%m-%dT%H:%M:%SZ") else: dt_str = dt_str.split(".") return isostr_to_datetime("%sZ" % dt_str[0])
python
{ "resource": "" }
q3957
Classifier.__learn_labels
train
def __learn_labels(self, labels): """ Learns new labels, this method is intended for internal use Args: labels (:obj:`list` of :obj:`str`): Labels to learn """ if self.feature_length > 0: result = list(self.labels.classes_) else: result = [] for label in labels: result.append(label) self.labels.fit(result)
python
{ "resource": "" }
q3958
Classifier.learn
train
def learn(self, features, labels): """ Fits the classifier If it's state is empty, the classifier is fitted, if not the classifier is partially fitted. See sklearn's SGDClassifier fit and partial_fit methods. Args: features (:obj:`list` of :obj:`list` of :obj:`float`) labels (:obj:`list` of :obj:`str`): Labels for each set of features. New features are learnt. """ labels = np.ravel(labels) self.__learn_labels(labels) if len(labels) == 0: return labels = self.labels.transform(labels) if self.feature_length > 0 and hasattr(self.clf, 'partial_fit'): # FIXME? check docs, may need to pass class=[...] self.clf = self.clf.partial_fit(features, labels) else: self.clf = self.clf.fit(features, labels) self.feature_length = len(features[0])
python
{ "resource": "" }
q3959
Classifier.predict
train
def predict(self, features, verbose=False): """ Probability estimates of each feature See sklearn's SGDClassifier predict and predict_proba methods. Args: features (:obj:`list` of :obj:`list` of :obj:`float`) verbose: Boolean, optional. If true returns an array where each element is a dictionary, where keys are labels and values are the respective probabilities. Defaults to False. Returns: Array of array of numbers, or array of dictionaries if verbose i True """ probs = self.clf.predict_proba(features) if verbose: labels = self.labels.classes_ res = [] for prob in probs: vals = {} for i, val in enumerate(prob): label = labels[i] vals[label] = val res.append(vals) return res else: return probs
python
{ "resource": "" }
q3960
FromCSVTablesGenerator.source_loader
train
def source_loader(self, source_paths, create_missing_tables=True): """Load source from 3 csv files. First file should contain global settings: * ``native_lagnauge,languages`` header on first row * appropriate values on following rows Example:: native_lagnauge,languages ru,ru ,en Second file should contain templates: * ``template_name,probability,genders,template`` header on first row * appropriate values on following rows (separate values with semicolon ";" in template column) Example:: template_name,probability,genders,template male_1,5,m,prefixes;male_suffixes baby_1,1,m;f,prefixes;descriptive Third file should contain tables with values for template slugs in all languages: * first row should contain slugs with language code after colon for each * appropriate values on following rows. Multiple forms may be specified using semicolon as separator Example:: prefixes:ru,prefixes:en,male_suffixes:ru,male_suffixes:en,descriptive:ru,descriptive:en Бж,Bzh,пра,pra,быстряк;быстряку,fasty дон;дону,don,Иван;Ивану,Ivan,Иванов;Иванову,Ivanov Note: you may use slugs without ":lang_code" suffix in csv header of tables file. Such headers will be treated as headers for native language If tables are missing for some slug then it is automatically created with values equeal to slug itself. So you may use some slugs without specifying tables data for them. Example for apostrophe and space: male_1,5,m,prefixes;';male_suffixes male_full,5,m,first_name; ;last_name """ if not isinstance(source_paths, Iterable) or len(source_paths) < 3: raise TypeError('FromCSVTablesGenerator.source_loader accepts list of 3 paths as argument. Got `%s` instead' % source_paths) self.native_language = '' self.languages = [] self.templates = [] self.tables = {} self.load_settings(source_paths[0]) template_slugs = self.load_templates(source_paths[1]) self.load_tables(source_paths[2]) if create_missing_tables: self.create_missing_tables(template_slugs) self.full_forms_for_languages = set()
python
{ "resource": "" }
q3961
loader
train
def loader(): """Load image from URL, and preprocess for Resnet.""" url = request.args.get('url') # read image URL as a request URL param response = requests.get(url) # make request to static image file return response.content
python
{ "resource": "" }
q3962
postprocessor
train
def postprocessor(prediction): """Map prediction tensor to labels.""" prediction = prediction.data.numpy()[0] top_predictions = prediction.argsort()[-3:][::-1] return [labels[prediction] for prediction in top_predictions]
python
{ "resource": "" }
q3963
get_logger
train
def get_logger(name): """Get a logger with the specified name.""" logger = logging.getLogger(name) logger.setLevel(getenv('LOGLEVEL', 'INFO')) return logger
python
{ "resource": "" }
q3964
make_serializable
train
def make_serializable(data): """Ensure data is serializable.""" if is_serializable(data): return data # if numpy array convert to list try: return data.tolist() except AttributeError: pass except Exception as e: logger.debug('{} exception ({}): {}'.format(type(e).__name__, e, data)) # try serializing each child element if isinstance(data, dict): return {key: make_serializable(value) for key, value in data.items()} try: return [make_serializable(element) for element in data] except TypeError: # not iterable pass except Exception: logger.debug('Could not serialize {}; converting to string'.format(data)) # last resort: convert to string return str(data)
python
{ "resource": "" }
q3965
json_numpy_loader
train
def json_numpy_loader(): """Load data from JSON request and convert to numpy array.""" data = request.get_json() logger.debug('Received JSON data of length {:,}'.format(len(data))) return data
python
{ "resource": "" }
q3966
get_bytes_to_image_callback
train
def get_bytes_to_image_callback(image_dims=(224, 224)): """Return a callback to process image bytes for ImageNet.""" from keras.preprocessing import image import numpy as np from PIL import Image from io import BytesIO def preprocess_image_bytes(data_bytes): """Process image bytes for ImageNet.""" try: img = Image.open(BytesIO(data_bytes)) # open image except OSError as e: raise ValueError('Please provide a raw image') img = img.resize(image_dims, Image.ANTIALIAS) # model requires 224x224 pixels x = image.img_to_array(img) # convert image to numpy array x = np.expand_dims(x, axis=0) # model expects dim 0 to be iterable across images return x return preprocess_image_bytes
python
{ "resource": "" }
q3967
exception_log_and_respond
train
def exception_log_and_respond(exception, logger, message, status_code): """Log an error and send jsonified respond.""" logger.error(message, exc_info=True) return make_response( message, status_code, dict(exception_type=type(exception).__name__, exception_message=str(exception)), )
python
{ "resource": "" }
q3968
make_response
train
def make_response(message, status_code, details=None): """Make a jsonified response with specified message and status code.""" response_body = dict(message=message) if details: response_body['details'] = details response = jsonify(response_body) response.status_code = status_code return response
python
{ "resource": "" }
q3969
ModelServer._create_prediction_endpoint
train
def _create_prediction_endpoint( self, to_numpy=True, data_loader=json_numpy_loader, preprocessor=lambda x: x, input_validation=lambda data: (True, None), postprocessor=lambda x: x, make_serializable_post=True): """Create an endpoint to serve predictions. Arguments: - input_validation (fn): takes a numpy array as input; returns True if validation passes and False otherwise - data_loader (fn): reads flask request and returns data preprocessed to be used in the `predict` method - postprocessor (fn): transforms the predictions from the `predict` method """ # copy instance variables to local scope for resource class predict = self.predict logger = self.app.logger # create restful resource class Predictions(Resource): @staticmethod def post(): # read data from API request try: data = data_loader() except Exception as e: return exception_log_and_respond(e, logger, 'Unable to fetch data', 400) try: if hasattr(preprocessor, '__iter__'): for preprocessor_step in preprocessor: data = preprocessor_step(data) else: data = preprocessor(data) # preprocess data data = np.array(data) if to_numpy else data # convert to numpy except Exception as e: return exception_log_and_respond(e, logger, 'Could not preprocess data', 400) # sanity check using user defined callback (default is no check) validation_pass, validation_reason = input_validation(data) if not validation_pass: # if validation fails, log the reason code, log the data, and send a 400 response validation_message = 'Input validation failed with reason: {}'.format(validation_reason) logger.error(validation_message) logger.debug('Data: {}'.format(data)) return make_response(validation_message, 400) try: prediction = predict(data) except Exception as e: # log exception and return the message in a 500 response logger.debug('Data: {}'.format(data)) return exception_log_and_respond(e, logger, 'Unable to make prediction', 500) logger.debug(prediction) try: # preprocess data if hasattr(postprocessor, '__iter__'): for postprocessor_step in postprocessor: prediction = postprocessor_step(prediction) else: prediction = postprocessor(prediction) # cast to serializable types if make_serializable_post: return make_serializable(prediction) else: return prediction except Exception as e: return exception_log_and_respond(e, logger, 'Postprocessing failed', 500) # map resource to endpoint self.api.add_resource(Predictions, '/predictions')
python
{ "resource": "" }
q3970
ModelServer.serve
train
def serve(self, host='127.0.0.1', port=5000): """Serve predictions as an API endpoint.""" from meinheld import server, middleware # self.app.run(host=host, port=port) server.listen((host, port)) server.run(middleware.WebSocketMiddleware(self.app))
python
{ "resource": "" }
q3971
get_model
train
def get_model(input_dim): """Create and compile simple model.""" model = Sequential() model.add(Dense(100, input_dim=input_dim, activation='sigmoid')) model.add(Dense(1)) model.compile(loss='mean_squared_error', optimizer='SGD') return model
python
{ "resource": "" }
q3972
validator
train
def validator(input_data): """Simple model input validator. Validator ensures the input data array is - two dimensional - has the correct number of features. """ global data # check num dims if input_data.ndim != 2: return False, 'Data should have two dimensions.' # check number of columns if input_data.shape[1] != data.data.shape[1]: reason = '{} features required, {} features provided'.format( data.data.shape[1], input_data.shape[1]) return False, reason # validation passed return True, None
python
{ "resource": "" }
q3973
read_chd_header
train
def read_chd_header(chd_file): """ read the .chd header file created when Vision Research software saves the images in a file format other than .cine """ with open(chd_file, "rb") as f: header = { "cinefileheader": cine.CINEFILEHEADER(), "bitmapinfoheader": cine.BITMAPINFOHEADER(), "setup": cine.SETUP(), } f.readinto(header["cinefileheader"]) f.readinto(header["bitmapinfoheader"]) f.readinto(header["setup"]) return header
python
{ "resource": "" }
q3974
_load_module
train
def _load_module(module_name, path): '''A helper function invoked on the server to tell it to import a module.''' # TODO: handle the case that the module is already loaded try: # First try to find a non-builtin, non-frozen, non-special # module using the client's search path fd, filename, info = imp.find_module(module_name, path) except ImportError: # The above will fail for builtin, frozen, or special # modules. We search for those now... fd, filename, info = imp.find_module(module_name) # Now import the module given the info found above try: return imp.load_module(module_name, fd, filename, info) finally: if fd is not None: fd.close()
python
{ "resource": "" }
q3975
byvalue
train
def byvalue(proxy): '''Return a copy of the underlying object for which the argument is a proxy.''' assert isinstance(proxy, Proxy) return proxy.client.execute(ByValueDelegate(proxy))
python
{ "resource": "" }
q3976
Client.state
train
def state(self, state): '''Change the state of the client. This is one of the values defined in ClientStates.''' logger.debug('client changing to state=%s', ClientState.Names[state]) self._state = state
python
{ "resource": "" }
q3977
Client._read_result
train
def _read_result(self, num_retries): '''Read an object from a channel, possibly retrying if the attempt is interrupted by a signal from the operating system.''' for i in range(num_retries): self._assert_alive() try: return self._result_channel.get() except IOError as ex: if ex.errno == 4: # errno=4 corresponds to "System call interrupted", # which means a signal was recieved before any data # was sent. For now I think it's safe to ignore this # and continue. logger.exception('attempt to read from channel was interrupted by something') sys.exc_clear() else: # Something else went wrong - raise the exception as usual raise ex raise ChannelError('failed to read from channel after %d retries' % num_retries)
python
{ "resource": "" }
q3978
Client.terminate
train
def terminate(self): '''Stop the server process and change our state to TERMINATING. Only valid if state=READY.''' logger.debug('client.terminate() called (state=%s)', self.strstate) if self.state == ClientState.WAITING_FOR_RESULT: raise ClientStateError('terimate() called while state='+self.strstate) if self.state == ClientState.TERMINATING: raise ClientStateError('terimate() called while state='+self.strstate) elif self.state in ClientState.TerminatedSet: assert not self._server_process.is_alive() return elif self.state == ClientState.READY: # Check that the process itself is still alive self._assert_alive() # Make sure the SIGCHLD signal handler doesn't throw any exceptions self.state = ClientState.TERMINATING # Do not call execute() because that function will check # whether the process is alive and throw an exception if not # TODO: can the queue itself throw exceptions? self._delegate_channel.put(FunctionCallDelegate(_raise_terminate)) # Wait for acknowledgement try: self._read_result(num_retries=5) except ProcessTerminationError as ex: pass except ChannelError as ex: # Was interrupted five times in a row! Ignore for now logger.debug('client failed to read sentinel from channel after 5 retries - will terminate anyway') self.state = ClientState.TERMINATED_CLEANLY
python
{ "resource": "" }
q3979
Client.cleanup
train
def cleanup(self): '''Terminate this client if it has not already terminated.''' if self.state == ClientState.WAITING_FOR_RESULT: # There is an ongoing call to execute() # Not sure what to do here logger.warn('cleanup() called while state is WAITING_FOR_RESULT: ignoring') elif self.state == ClientState.TERMINATING: # terminate() has been called but we have not recieved SIGCHLD yet # Not sure what to do here logger.warn('cleanup() called while state is TERMINATING: ignoring') elif self.state in ClientState.TerminatedSet: # We have already terminated # TODO: should we deal with TERMINATED_ASYNC in some special way? logger.debug('cleanup() called while state is TERMINATING: nothing needs to be done') else: logger.debug('cleanup() called while state is %s: attempting to terminate', self.strstate) try: self.terminate() except ProcessTerminationError as ex: # Terminate can throw a ProcessTerminationError if the # process terminated at some point between the last # execute() and the call to terminate() # For now we just ignore this. pass
python
{ "resource": "" }
q3980
IsolationContext.start
train
def start(self): '''Create a process in which the isolated code will be run.''' assert self._client is None logger.debug('IsolationContext[%d] starting', id(self)) # Create the queues request_queue = multiprocessing.Queue() response_queue = multiprocessing.Queue() # Launch the server process server = Server(request_queue, response_queue) # Do not keep a reference to this object! server_process = multiprocessing.Process(target=server.loop) server_process.start() # Create a client to talk to the server self._client = Client(server_process, request_queue, response_queue)
python
{ "resource": "" }
q3981
IsolationContext.load_module
train
def load_module(self, module_name, path=None): '''Import a module into this isolation context and return a proxy for it.''' self.ensure_started() if path is None: path = sys.path mod = self.client.call(_load_module, module_name, path) mod.__isolation_context__ = self return mod
python
{ "resource": "" }
q3982
_has_annotation
train
def _has_annotation(annotation, value): """ Returns a function that can be used as a predicate in get_members, that """ def matches_property_name(fun): """ return true if fun is a callable that has the correct annotation with value """ return callable(fun) and hasattr(fun, annotation) \ and getattr(fun, annotation) is value return matches_property_name
python
{ "resource": "" }
q3983
_get_getter_fun
train
def _get_getter_fun(object_type, # type: Type parameter, # type: Parameter private_property_name # type: str ): """ Utility method to find the overridden getter function for a given property, or generate a new one :param object_type: :param property_name: :param private_property_name: :return: """ property_name = parameter.name # -- check overridden getter for this property name overridden_getters = getmembers(object_type, predicate=_has_annotation(__GETTER_OVERRIDE_ANNOTATION, property_name)) if len(overridden_getters) > 0: if len(overridden_getters) > 1: raise DuplicateOverrideError('Getter is overridden more than once for attribute name : ' + property_name) # --use the overridden getter getter_fun = overridden_getters[0][1] # --check its signature s = signature(getter_fun) if not ('self' in s.parameters.keys() and len(s.parameters.keys()) == 1): raise IllegalGetterSignatureException('overridden getter must only have a self parameter, found ' + str(len(s.parameters.items()) - 1) + ' for function ' + str( getter_fun.__qualname__)) # --use the overridden getter ? property_obj = property(getter_fun) else: # -- generate the getter : def autoprops_generated_getter(self): return getattr(self, private_property_name) # -- use the generated getter getter_fun = autoprops_generated_getter try: annotations = getter_fun.__annotations__ except AttributeError: # python 2 pass else: annotations['return'] = parameter.annotation # add type hint to output declaration return getter_fun
python
{ "resource": "" }
q3984
_get_setter_fun
train
def _get_setter_fun(object_type, # type: Type parameter, # type: Parameter private_property_name # type: str ): """ Utility method to find the overridden setter function for a given property, or generate a new one :param object_type: :param property_name: :param property_type: :param private_property_name: :return: """ # the property will have the same name than the constructor argument property_name = parameter.name overridden_setters = getmembers(object_type, _has_annotation(__SETTER_OVERRIDE_ANNOTATION, property_name)) if len(overridden_setters) > 0: # --check that we only have one if len(overridden_setters) > 1: raise DuplicateOverrideError('Setter is overridden more than once for attribute name : %s' % property_name) # --use the overridden setter setter_fun = overridden_setters[0][1] try: # python 2 setter_fun = setter_fun.im_func except AttributeError: pass # --find the parameter name and check the signature s = signature(setter_fun) p = [attribute_name for attribute_name, param in s.parameters.items() if attribute_name is not 'self'] if len(p) != 1: try: qname = setter_fun.__qualname__ except AttributeError: qname = setter_fun.__name__ raise IllegalSetterSignatureException('overridden setter must have only 1 non-self argument, found ' + '%s for function %s' '' % (len(s.parameters.items()) - 1, qname)) var_name = p[0] else: # --create the setter, equivalent of: # ** Dynamically compile a wrapper with correct argument name ** sig = Signature(parameters=[Parameter('self', kind=Parameter.POSITIONAL_OR_KEYWORD), parameter]) @with_signature(sig) def autoprops_generated_setter(self, **kwargs): setattr(self, private_property_name, kwargs.popitem()[1]) setter_fun = autoprops_generated_setter var_name = property_name return setter_fun, var_name
python
{ "resource": "" }
q3985
getter_override
train
def getter_override(attribute=None, # type: str f=DECORATED ): """ A decorator to indicate an overridden getter for a given attribute. If the attribute name is None, the function name will be used as the attribute name. :param attribute: the attribute name for which the decorated function is an overridden getter :return: """ return autoprops_override_decorate(f, attribute=attribute, is_getter=True)
python
{ "resource": "" }
q3986
is_attr_selected
train
def is_attr_selected(attr_name, # type: str include=None, # type: Union[str, Tuple[str]] exclude=None # type: Union[str, Tuple[str]] ): """decide whether an action has to be performed on the attribute or not, based on its name""" if include is not None and exclude is not None: raise ValueError('Only one of \'include\' or \'exclude\' argument should be provided.') # win time by not doing this # check_var(include, var_name='include', var_types=[str, tuple], enforce_not_none=False) # check_var(exclude, var_name='exclude', var_types=[str, tuple], enforce_not_none=False) if attr_name is 'self': return False if exclude and attr_name in exclude: return False if not include or attr_name in include: return True else: return False
python
{ "resource": "" }
q3987
method_already_there
train
def method_already_there(object_type, method_name, this_class_only=False): """ Returns True if method `method_name` is already implemented by object_type, that is, its implementation differs from the one in `object`. :param object_type: :param method_name: :param this_class_only: :return: """ if this_class_only: return method_name in vars(object_type) # or object_type.__dict__ else: try: method = getattr(object_type, method_name) except AttributeError: return False else: return method is not None and method is not getattr(object, method_name, None)
python
{ "resource": "" }
q3988
OrderedMultiDict.extend
train
def extend(self, *args, **kwargs): """Add key value pairs for an iterable.""" if len(args) > 1: raise TypeError('expected at most 1 arguments, got %d' % len(args)) iterable = args[0] if args else None if iterable: if isinstance(iterable, Mapping) or hasattr(iterable, 'items'): for key, value in iterable.items(): self.append(key, value) elif hasattr(iterable, 'keys'): for key in iterable.keys(): self.append(key, iterable[key]) else: for key, value in iterable: self.append(key, value) for key, value in kwargs.items(): self.append(key, value)
python
{ "resource": "" }
q3989
OrderedMultiDict.__insert_wrapper
train
def __insert_wrapper(func): """Make sure the arguments given to the insert methods are correct""" def check_func(self, key, new_item, instance=0): if key not in self.keys(): raise KeyError("%s not a key in label" % (key)) if not isinstance(new_item, (list, OrderedMultiDict)): raise TypeError("The new item must be a list or PVLModule") if isinstance(new_item, OrderedMultiDict): new_item = list(new_item) return func(self, key, new_item, instance) return check_func
python
{ "resource": "" }
q3990
OrderedMultiDict._get_index_for_insert
train
def _get_index_for_insert(self, key, instance): """Get the index of the key to insert before or after""" if instance == 0: # Index method will return the first occurence of the key index = self.keys().index(key) else: occurrence = -1 for index, k in enumerate(self.keys()): if k == key: occurrence += 1 if occurrence == instance: # Found the key and the correct occurence of the key break if occurrence != instance: # Gone through the entire list of keys and the instance number # given is too high for the number of occurences of the key raise ValueError( ( "Cannot insert before/after the %d " "instance of the key '%s' since there are " "only %d occurences of the key" % ( instance, key, occurrence) )) return index
python
{ "resource": "" }
q3991
OrderedMultiDict._insert_item
train
def _insert_item(self, key, new_item, instance, is_after): """Insert a new item before or after another item""" index = self._get_index_for_insert(key, instance) index = index + 1 if is_after else index self.__items = self.__items[:index] + new_item + self.__items[index:] # Make sure indexing works with new items for new_key, new_value in new_item: if new_key in self: value_list = [val for k, val in self.__items if k == new_key] dict_setitem(self, new_key, value_list) else: dict_setitem(self, new_key, [new_value])
python
{ "resource": "" }
q3992
OrderedMultiDict.insert_after
train
def insert_after(self, key, new_item, instance=0): """Insert an item after a key""" self._insert_item(key, new_item, instance, True)
python
{ "resource": "" }
q3993
OrderedMultiDict.insert_before
train
def insert_before(self, key, new_item, instance=0): """Insert an item before a key""" self._insert_item(key, new_item, instance, False)
python
{ "resource": "" }
q3994
ByteStream.peek
train
def peek(self, n): """Returns buffered bytes without advancing the position. The argument indicates a desired minimal number of bytes; we do at most one raw read to satisfy it. We never return more than self.buffer_size. """ pos = self._read_pos end = pos + n return self.raw[pos:end]
python
{ "resource": "" }
q3995
PVLDecoder.parse_group
train
def parse_group(self, stream): """Block Name must match Block Name in paired End Group Statement if Block Name is present in End Group Statement. BeginGroupStmt ::= BeginGroupKeywd WSC AssignmentSymbol WSC BlockName StatementDelim """ self.expect_in(stream, self.begin_group_tokens) self.ensure_assignment(stream) name = self.next_token(stream) self.skip_statement_delimiter(stream) statements = self.parse_block(stream, self.has_end_group) self.expect_in(stream, self.end_group_tokens) self.parse_end_assignment(stream, name) self.skip_statement_delimiter(stream) return name.decode('utf-8'), PVLGroup(statements)
python
{ "resource": "" }
q3996
PVLDecoder.parse_object
train
def parse_object(self, stream): """Block Name must match Block Name in paired End Object Statement if Block Name is present in End Object Statement StatementDelim. BeginObjectStmt ::= BeginObjectKeywd WSC AssignmentSymbol WSC BlockName StatementDelim """ self.expect_in(stream, self.begin_object_tokens) self.ensure_assignment(stream) name = self.next_token(stream) self.skip_statement_delimiter(stream) statements = self.parse_block(stream, self.has_end_object) self.expect_in(stream, self.end_object_tokens) self.parse_end_assignment(stream, name) self.skip_statement_delimiter(stream) return name.decode('utf-8'), PVLObject(statements)
python
{ "resource": "" }
q3997
DjangoFaker.varchar
train
def varchar(self, field=None): """ Returns a chunk of text, of maximum length 'max_length' """ assert field is not None, "The field parameter must be passed to the 'varchar' method." max_length = field.max_length def source(): length = random.choice(range(1, max_length + 1)) return "".join(random.choice(general_chars) for i in xrange(length)) return self.get_allowed_value(source, field)
python
{ "resource": "" }
q3998
DjangoFaker.datetime
train
def datetime(self, field=None, val=None): """ Returns a random datetime. If 'val' is passed, a datetime within two years of that date will be returned. """ if val is None: def source(): tzinfo = get_default_timezone() if settings.USE_TZ else None return datetime.fromtimestamp(randrange(1, 2100000000), tzinfo) else: def source(): tzinfo = get_default_timezone() if settings.USE_TZ else None return datetime.fromtimestamp(int(val.strftime("%s")) + randrange(-365*24*3600*2, 365*24*3600*2), tzinfo) return self.get_allowed_value(source, field)
python
{ "resource": "" }
q3999
DjangoFaker.date
train
def date(self, field=None, val=None): """ Like datetime, but truncated to be a date only """ return self.datetime(field=field, val=val).date()
python
{ "resource": "" }