_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 31
13.1k
| language
stringclasses 1
value | meta_information
dict |
---|---|---|---|---|---|
q3900
|
with_inverse
|
train
|
def with_inverse(points, noise):
""" Smooths a set of points
It smooths them twice, once in given order, another one in the reverse order.
The the first half of the results will be taken from the reverse order and
the second half from the normal order.
Args:
points (:obj:`list` of :obj:`Point`)
noise (float): Expected noise, the higher it is the more the path will
be smoothed.
Returns:
:obj:`list` of :obj:`Point`
"""
# noise_sample = 20
n_points = len(points)/2
break_point = n_points
points_part =
|
python
|
{
"resource": ""
}
|
q3901
|
temporal_segmentation
|
train
|
def temporal_segmentation(segments, min_time):
""" Segments based on time distant points
Args:
segments (:obj:`list` of :obj:`list` of :obj:`Point`): segment points
min_time (int): minimum required time for segmentation
"""
final_segments = []
for segment in segments:
final_segments.append([])
|
python
|
{
"resource": ""
}
|
q3902
|
correct_segmentation
|
train
|
def correct_segmentation(segments, clusters, min_time):
""" Corrects the predicted segmentation
This process prevents over segmentation
Args:
segments (:obj:`list` of :obj:`list` of :obj:`Point`):
segments to correct
min_time (int): minimum required time for segmentation
"""
# segments = [points for points in segments if len(points) > 1]
result_segments = []
prev_segment = None
for i, segment in enumerate(segments):
|
python
|
{
"resource": ""
}
|
q3903
|
spatiotemporal_segmentation
|
train
|
def spatiotemporal_segmentation(points, eps, min_time):
""" Splits a set of points into multiple sets of points based on
spatio-temporal stays
DBSCAN is used to predict possible segmentations,
furthermore we check to see if each clusters is big enough in
time (>=min_time). If that's the case than the segmentation is
considered valid.
When segmenting, the last point of the ith segment will be the same
of the (i-1)th segment.
Segments are identified through clusters.
The last point of a clusters, that comes after a sub-segment A, will
be present on the sub-segment A.
Args:
points (:obj:`list` of :obj:`Point`): segment's points
eps (float): Epsilon to feed to the DBSCAN algorithm.
Maximum distance between two samples, to be considered in
the same cluster.
min_time (float): Minimum time of a stay
Returns:
:obj:`list` of :obj:`list` of :obj:`Point`: Initial set of
points in different segments
"""
# min time / sample rate
dt_average = np.median([point.dt for point in points])
min_samples = min_time / dt_average
data = [point.gen3arr() for point in points]
data = StandardScaler().fit_transform(data)
print 'min_samples: %f' % min_samples
db_cluster = DBSCAN(eps=eps, min_samples=min_samples).fit(data)
labels = db_cluster.labels_
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
segments = [[] for _ in range(n_clusters_+1)]
clusters = [[] for _ in range(n_clusters_+1)]
current_segment = 0
|
python
|
{
"resource": ""
}
|
q3904
|
kalman_filter
|
train
|
def kalman_filter(points, noise):
""" Smooths points with kalman filter
See https://github.com/open-city/ikalman
Args:
points (:obj:`list` of :obj:`Point`): points to smooth
noise (float): expected noise
"""
kalman = ikalman.filter(noise)
for point in points:
|
python
|
{
"resource": ""
}
|
q3905
|
learn_transportation_mode
|
train
|
def learn_transportation_mode(track, clf):
""" Inserts transportation modes of a track into a classifier
Args:
track (:obj:`Track`)
clf (:obj:`Classifier`)
"""
for segment in track.segments:
tmodes = segment.transportation_modes
points = segment.points
features = []
labels = []
|
python
|
{
"resource": ""
}
|
q3906
|
speed_difference
|
train
|
def speed_difference(points):
""" Computes the speed difference between each adjacent point
Args:
points (:obj:`Point`)
|
python
|
{
"resource": ""
}
|
q3907
|
acc_difference
|
train
|
def acc_difference(points):
""" Computes the accelaration difference between each adjacent point
Args:
points (:obj:`Point`)
|
python
|
{
"resource": ""
}
|
q3908
|
detect_changepoints
|
train
|
def detect_changepoints(points, min_time, data_processor=acc_difference):
""" Detects changepoints on points that have at least a specific duration
Args:
points (:obj:`Point`)
min_time (float): Min time that a sub-segmented, bounded by two changepoints, must have
data_processor (function): Function to extract data to feed to the changepoint algorithm.
Defaults to `speed_difference`
|
python
|
{
"resource": ""
}
|
q3909
|
group_modes
|
train
|
def group_modes(modes):
""" Groups consecutive transportation modes with same label, into one
Args:
modes (:obj:`list` of :obj:`dict`)
Returns:
:obj:`list` of :obj:`dict`
"""
if len(modes) > 0:
previous = modes[0]
grouped = []
for changep in modes[1:]:
if changep['label'] != previous['label']:
previous['to'] = changep['from']
|
python
|
{
"resource": ""
}
|
q3910
|
speed_clustering
|
train
|
def speed_clustering(clf, points, min_time):
""" Transportation mode infering, based on changepoint segmentation
Args:
clf (:obj:`Classifier`): Classifier to use
points (:obj:`list` of :obj:`Point`)
min_time (float): Min time, in seconds, before do another segmentation
Returns:
|
python
|
{
"resource": ""
}
|
q3911
|
distance
|
train
|
def distance(p_a, p_b):
""" Euclidean distance, between two points
Args:
p_a (:obj:`Point`)
p_b (:obj:`Point`)
Returns:
|
python
|
{
"resource": ""
}
|
q3912
|
point_line_distance
|
train
|
def point_line_distance(point, start, end):
""" Distance from a point to a line, formed by two points
Args:
point (:obj:`Point`)
start (:obj:`Point`): line point
end (:obj:`Point`): line point
Returns:
float: distance to line, in degrees
"""
if start == end:
return distance(point, start)
else:
|
python
|
{
"resource": ""
}
|
q3913
|
drp
|
train
|
def drp(points, epsilon):
""" Douglas ramer peucker
Based on https://en.wikipedia.org/wiki/Ramer%E2%80%93Douglas%E2%80%93Peucker_algorithm
Args:
points (:obj:`list` of :obj:`Point`)
epsilon (float): drp threshold
Returns:
:obj:`list` of :obj:`Point`
"""
dmax = 0.0
index = 0
for i in range(1, len(points)-1):
|
python
|
{
"resource": ""
}
|
q3914
|
td_sp
|
train
|
def td_sp(points, speed_threshold):
""" Top-Down Speed-Based Trajectory Compression Algorithm
Detailed in https://www.itc.nl/library/Papers_2003/peer_ref_conf/meratnia_new.pdf
Args:
points (:obj:`list` of :obj:`Point`): trajectory or part of it
speed_threshold (float): max speed error, in km/h
Returns:
:obj:`list` of :obj:`Point`, compressed trajectory
"""
if len(points) <= 2:
return points
else:
max_speed_threshold = 0
found_index = 0
for i in range(1, len(points)-1):
dt1 = time_dist(points[i], points[i-1])
if dt1 == 0:
dt1 = 0.000000001
vim = loc_dist(points[i], points[i-1]) / dt1
dt2 = time_dist(points[i+1], points[i])
if dt2 == 0:
dt2 = 0.000000001
vi_ = loc_dist(points[i+1], points[i]) / dt2
|
python
|
{
"resource": ""
}
|
q3915
|
td_tr
|
train
|
def td_tr(points, dist_threshold):
""" Top-Down Time-Ratio Trajectory Compression Algorithm
Detailed in https://www.itc.nl/library/Papers_2003/peer_ref_conf/meratnia_new.pdf
Args:
points (:obj:`list` of :obj:`Point`): trajectory or part of it
dist_threshold (float): max distance error, in meters
Returns:
:obj:`list` of :obj:`Point`, compressed trajectory
"""
if len(points) <= 2:
return points
else:
max_dist_threshold = 0
found_index = 0
delta_e = time_dist(points[-1], points[0]) * I_3600
d_lat = points[-1].lat - points[0].lat
d_lon = points[-1].lon - points[0].lon
for i in range(1, len(points)-1):
delta_i = time_dist(points[i], points[0]) * I_3600
di_de = delta_i / delta_e
point = Point(
points[0].lat + d_lat * di_de,
points[0].lon + d_lon * di_de,
|
python
|
{
"resource": ""
}
|
q3916
|
spt
|
train
|
def spt(points, max_dist_error, max_speed_error):
""" A combination of both `td_sp` and `td_tr`
Detailed in,
Spatiotemporal Compression Techniques for Moving Point Objects,
Nirvana Meratnia and Rolf A. de By, 2004,
in Advances in Database Technology - EDBT 2004: 9th
International Conference on Extending Database Technology,
Heraklion, Crete, Greece, March 14-18, 2004
Args:
points (:obj:`list` of :obj:`Point`)
max_dist_error (float): max distance error, in meters
max_speed_error (float): max speed error, in km/h
Returns:
:obj:`list` of :obj:`Point`
"""
if len(points) <= 2:
return points
else:
is_error = False
e = 1
while e < len(points) and not is_error:
i = 1
while i < e and not is_error:
delta_e = time_dist(points[e], points[0]) * I_3600
delta_i = time_dist(points[i], points[0]) * I_3600
di_de = 0
if delta_e != 0:
di_de = delta_i / delta_e
d_lat = points[e].lat - points[0].lat
d_lon = points[e].lon - points[0].lon
point = Point(
points[0].lat + d_lat * di_de,
points[0].lon + d_lon * di_de,
None
)
dt1 = time_dist(points[i], points[i-1])
if dt1 == 0:
|
python
|
{
"resource": ""
}
|
q3917
|
Track.generate_name
|
train
|
def generate_name(self, name_format=DEFAULT_FILE_NAME_FORMAT):
""" Generates a name for the track
The name is generated based on the date of the first point of the
track, or in case it doesn't exist, "EmptyTrack"
Args:
|
python
|
{
"resource": ""
}
|
q3918
|
Track.smooth
|
train
|
def smooth(self, strategy, noise):
""" In-place smoothing of segments
Returns:
|
python
|
{
"resource": ""
}
|
q3919
|
Track.segment
|
train
|
def segment(self, eps, min_time):
"""In-place segmentation of segments
Spatio-temporal segmentation of each segment
The number of segments may increse after this step
Returns:
This track
"""
new_segments = []
|
python
|
{
"resource": ""
}
|
q3920
|
Track.simplify
|
train
|
def simplify(self, eps, max_dist_error, max_speed_error, topology_only=False):
""" In-place simplification of segments
Args:
max_dist_error (float): Min distance error, in meters
max_speed_error (float): Min speed error, in km/h
topology_only: Boolean, optional. True to keep
the topology, neglecting velocity and time
accuracy (use common Douglas-Ramen-Peucker).
False (default) to simplify segments keeping
|
python
|
{
"resource": ""
}
|
q3921
|
Track.infer_transportation_mode
|
train
|
def infer_transportation_mode(self, clf, min_time):
"""In-place transportation mode inferring of segments
Returns:
This track
"""
|
python
|
{
"resource": ""
}
|
q3922
|
Track.to_trip
|
train
|
def to_trip(
self,
smooth,
smooth_strategy,
smooth_noise,
seg,
seg_eps,
seg_min_time,
simplify,
simplify_max_dist_error,
simplify_max_speed_error
):
"""In-place, transformation of a track into a trip
A trip is a more accurate depiction of reality than a
track.
For a track to become a trip it need to go through the
following steps:
+ noise removal
+ smoothing
+ spatio-temporal segmentation
+ simplification
At the end of these steps we have a less noisy, track
that has less points, but that holds the same information.
It's required that each segment has their metrics calculated
or has been preprocessed.
Args:
name: An optional string with the name of the trip. If
none is given, one will be generated by generateName
Returns:
This
|
python
|
{
"resource": ""
}
|
q3923
|
Track.infer_transportation_modes
|
train
|
def infer_transportation_modes(self, dt_threshold=10):
"""In-place transportation inferring of segments
Returns:
This track
"""
self.segments = [
|
python
|
{
"resource": ""
}
|
q3924
|
Track.infer_location
|
train
|
def infer_location(
self,
location_query,
max_distance,
google_key,
foursquare_client_id,
foursquare_client_secret,
limit
):
"""In-place location inferring of segments
Returns:
This track
"""
self.segments = [
segment.infer_location(
|
python
|
{
"resource": ""
}
|
q3925
|
Track.to_json
|
train
|
def to_json(self):
"""Converts track to a JSON serializable format
Returns:
Map with the name, and segments of the track.
"""
return {
'name': self.name,
|
python
|
{
"resource": ""
}
|
q3926
|
Track.merge_and_fit
|
train
|
def merge_and_fit(self, track, pairings):
""" Merges another track with this one, ordering the points based on a
distance heuristic
Args:
track (:obj:`Track`): Track to merge with
pairings
Returns:
:obj:`Segment`: self
"""
for (self_seg_index, track_seg_index, _) in pairings:
self_s = self.segments[self_seg_index]
ss_start = self_s.points[0]
track_s = track.segments[track_seg_index]
tt_start = track_s.points[0]
|
python
|
{
"resource": ""
}
|
q3927
|
Track.get_point_index
|
train
|
def get_point_index(self, point):
""" Gets of the closest first point
Args:
point (:obj:`Point`)
Returns:
(int, int): Segment id and point index in that segment
"""
for i, segment in enumerate(self.segments):
|
python
|
{
"resource": ""
}
|
q3928
|
Track.bounds
|
train
|
def bounds(self, thr=0):
""" Gets the bounds of this segment
Returns:
(float, float, float, float): Bounds, with min latitude, min longitude,
max latitude and max longitude
"""
min_lat = float("inf")
min_lon = float("inf")
|
python
|
{
"resource": ""
}
|
q3929
|
Track.similarity
|
train
|
def similarity(self, track):
""" Compares two tracks based on their topology
This method compares the given track against this
instance. It only verifies if given track is close
to this one, not the other way arround
Args:
track (:obj:`Track`)
Returns:
Two-tuple with global similarity between tracks
and an array the similarity between segments
"""
idx = index.Index()
i = 0
for i, segment in enumerate(self.segments):
|
python
|
{
"resource": ""
}
|
q3930
|
Track.to_gpx
|
train
|
def to_gpx(self):
"""Converts track to a GPX format
Uses GPXPY library as an intermediate format
Returns:
A string with the GPX/XML track
"""
gpx_segments = []
for segment in self.segments:
gpx_points = []
for point in segment.points:
time = ''
if point.time:
iso_time = point.time.isoformat().split('.')[0]
time = '<time>%s</time>' % iso_time
gpx_points.append(
u'<trkpt lat="%f" lon="%f">%s</trkpt>' % (point.lat, point.lon, time)
)
points = u'\n\t\t\t'.join(gpx_points)
|
python
|
{
"resource": ""
}
|
q3931
|
Track.timezone
|
train
|
def timezone(self, timezone=0):
""" Sets the timezone of the entire track
Args:
timezone (int): Timezone hour delta
"""
tz_dt = timedelta(hours=timezone)
for segment in self.segments:
|
python
|
{
"resource": ""
}
|
q3932
|
Track.to_life
|
train
|
def to_life(self):
"""Converts track to LIFE format
"""
buff = "--%s\n" % self.segments[0].points[0].time.strftime("%Y_%m_%d")
# buff += "--" + day
# buff += "UTC+s" # if needed
def military_time(time):
""" Converts time to military time
Args:
time (:obj:`datetime.datetime`)
Returns:
str: Time in the format 1245 (12 hours and 45 minutes)
"""
return time.strftime("%H%M")
def stay(buff, start, end, place):
""" Creates a stay representation
Args:
start (:obj:`datetime.datetime` or str)
end (:obj:`datetime.datetime` or str)
place (:obj:`Location`)
Returns:
str
"""
if not isinstance(start, str):
start = military_time(start)
if not isinstance(end, str):
end = military_time(end)
return "%s\n%s-%s: %s" % (buff, start, end, place.label)
def trip(buff, segment):
""" Creates a trip representation
Args:
buff (str): buffer
segment (:obj:`Segment`)
Returns:
str: buffer and trip representation
"""
trip = "%s-%s: %s -> %s" % (
military_time(segment.points[0].time),
military_time(segment.points[-1].time),
segment.location_from.label,
segment.location_to.label
)
t_modes = segment.transportation_modes
if len(t_modes) == 1:
trip = "%s [%s]" % (trip, t_modes[0]['label'])
elif len(t_modes) > 1:
modes = []
for mode in t_modes:
trip_from = military_time(segment.points[mode['from']].time)
|
python
|
{
"resource": ""
}
|
q3933
|
Track.from_gpx
|
train
|
def from_gpx(file_path):
""" Creates a Track from a GPX file.
No preprocessing is done.
Arguments:
file_path (str): file path and name to the GPX file
Return:
:obj:`list` of :obj:`Track`
"""
gpx = gpxpy.parse(open(file_path, 'r'))
file_name = basename(file_path)
tracks = []
for i, track in enumerate(gpx.tracks):
segments = []
for segment in track.segments:
|
python
|
{
"resource": ""
}
|
q3934
|
Track.from_json
|
train
|
def from_json(json):
"""Creates a Track from a JSON file.
No preprocessing is done.
Arguments:
json: map with the keys: name (optional) and segments.
Return:
A track instance
|
python
|
{
"resource": ""
}
|
q3935
|
line
|
train
|
def line(p1, p2):
"""Creates a line from two points
From http://stackoverflow.com/a/20679579
Args:
p1 ([float, float]): x and y coordinates
|
python
|
{
"resource": ""
}
|
q3936
|
intersection
|
train
|
def intersection(L1, L2):
"""Intersects two line segments
Args:
L1 ([float, float]): x and y coordinates
L2 ([float, float]): x and y coordinates
Returns:
bool: if they intersect
(float, float): x and y of intersection, if they do
"""
D = L1[0] * L2[1] - L1[1] *
|
python
|
{
"resource": ""
}
|
q3937
|
closest_point
|
train
|
def closest_point(a, b, p):
"""Finds closest point in a line segment
Args:
a ([float, float]): x and y coordinates. Line start
b ([float, float]): x and y coordinates. Line end
p ([float, float]): x and y coordinates. Point to find in the segment
Returns:
(float, float): x and y coordinates of the closest point
"""
ap = [p[0]-a[0], p[1]-a[1]]
ab
|
python
|
{
"resource": ""
}
|
q3938
|
distance_to_line
|
train
|
def distance_to_line(a, b, p):
"""Closest distance between a line segment and a point
Args:
a ([float, float]): x and y coordinates. Line start
|
python
|
{
"resource": ""
}
|
q3939
|
distance_similarity
|
train
|
def distance_similarity(a, b, p, T=CLOSE_DISTANCE_THRESHOLD):
"""Computes the distance similarity between a line segment
and a point
Args:
a ([float, float]): x and y coordinates. Line start
b ([float, float]): x and y coordinates. Line end
p ([float, float]): x and y coordinates. Point to compute the distance
Returns:
|
python
|
{
"resource": ""
}
|
q3940
|
line_distance_similarity
|
train
|
def line_distance_similarity(p1a, p1b, p2a, p2b, T=CLOSE_DISTANCE_THRESHOLD):
"""Line distance similarity between two line segments
Args:
p1a ([float, float]): x and y coordinates. Line A start
p1b ([float, float]): x and y coordinates. Line A end
p2a ([float, float]): x and y coordinates. Line B start
p2b ([float, float]): x and y coordinates.
|
python
|
{
"resource": ""
}
|
q3941
|
line_similarity
|
train
|
def line_similarity(p1a, p1b, p2a, p2b, T=CLOSE_DISTANCE_THRESHOLD):
"""Similarity between two lines
Args:
p1a ([float, float]): x and y coordinates. Line A start
p1b ([float, float]): x and y coordinates. Line A end
p2a ([float, float]): x and y coordinates. Line B start
p2b ([float, float]): x and y coordinates. Line B end
Returns:
|
python
|
{
"resource": ""
}
|
q3942
|
bounding_box_from
|
train
|
def bounding_box_from(points, i, i1, thr):
"""Creates bounding box for a line segment
Args:
points (:obj:`list` of :obj:`Point`)
i (int): Line segment start, index in points array
i1 (int): Line segment end, index in points array
Returns:
(float, float, float, float): with bounding box min x, min y, max x and max y
|
python
|
{
"resource": ""
}
|
q3943
|
segment_similarity
|
train
|
def segment_similarity(A, B, T=CLOSE_DISTANCE_THRESHOLD):
"""Computes the similarity between two segments
Args:
A (:obj:`Segment`)
B (:obj:`Segment`)
Returns:
float: between 0 and 1. Where 1 is very similar and 0 is completely different
"""
l_a = len(A.points)
l_b = len(B.points)
idx = index.Index()
dex = 0
for i in range(l_a-1):
idx.insert(dex, bounding_box_from(A.points, i, i+1, T), obj=[A.points[i], A.points[i+1]])
dex = dex + 1
prox_acc = []
for i in range(l_b-1):
ti = B.points[i].gen2arr()
ti1 = B.points[i+1].gen2arr()
bb = bounding_box_from(B.points, i, i+1, T)
intersects = idx.intersection(bb, objects=True)
n_prox = []
i_prox = 0
a = 0
for x in intersects:
a = a + 1
|
python
|
{
"resource": ""
}
|
q3944
|
sort_segment_points
|
train
|
def sort_segment_points(Aps, Bps):
"""Takes two line segments and sorts all their points,
so that they form a continuous path
Args:
Aps: Array of tracktotrip.Point
Bps: Array of tracktotrip.Point
Returns:
Array with points ordered
"""
mid = []
j = 0
mid.append(Aps[0])
for i in range(len(Aps)-1):
dist = distance_tt_point(Aps[i], Aps[i+1])
for m in range(j, len(Bps)):
distm = distance_tt_point(Aps[i], Bps[m])
if dist > distm:
|
python
|
{
"resource": ""
}
|
q3945
|
distance
|
train
|
def distance(latitude_1, longitude_1, elevation_1, latitude_2, longitude_2, elevation_2,
haversine=None):
""" Distance between two points """
# If points too distant -- compute haversine distance:
if haversine or (abs(latitude_1 - latitude_2) > .2 or abs(longitude_1 - longitude_2) > .2):
return haversine_distance(latitude_1, longitude_1, latitude_2, longitude_2)
coef = math.cos(latitude_1 / 180. * math.pi)
#pylint: disable=invalid-name
x = latitude_1 - latitude_2
|
python
|
{
"resource": ""
}
|
q3946
|
Point.distance
|
train
|
def distance(self, other):
""" Distance between points
Args:
other (:obj:`Point`)
Returns:
float: Distance in km
"""
|
python
|
{
"resource": ""
}
|
q3947
|
Point.compute_metrics
|
train
|
def compute_metrics(self, previous):
""" Computes the metrics of this point
Computes and updates the dt, vel and acc attributes.
Args:
previous (:obj:`Point`): Point before
Returns:
:obj:`Point`: Self
"""
delta_t = self.time_difference(previous)
delta_x = self.distance(previous)
vel = 0
delta_v = 0
acc = 0
if delta_t != 0:
|
python
|
{
"resource": ""
}
|
q3948
|
Point.from_gpx
|
train
|
def from_gpx(gpx_track_point):
""" Creates a point from GPX representation
Arguments:
gpx_track_point (:obj:`gpxpy.GPXTrackPoint`)
Returns:
:obj:`Point`
"""
|
python
|
{
"resource": ""
}
|
q3949
|
Point.to_json
|
train
|
def to_json(self):
""" Creates a JSON serializable representation of this instance
Returns:
:obj:`dict`: For example,
{
"lat": 9.3470298,
"lon": 3.79274,
"time": "2016-07-15T15:27:53.574110"
}
"""
|
python
|
{
"resource": ""
}
|
q3950
|
Point.from_json
|
train
|
def from_json(json):
""" Creates Point instance from JSON representation
Args:
json (:obj:`dict`): Must have at least the following keys: lat (float), lon (float),
time (string in iso format). Example,
{
"lat": 9.3470298,
"lon": 3.79274,
"time": "2016-07-15T15:27:53.574110"
|
python
|
{
"resource": ""
}
|
q3951
|
compute_centroid
|
train
|
def compute_centroid(points):
""" Computes the centroid of set of points
Args:
points (:obj:`list` of :obj:`Point`)
Returns:
:obj:`Point`
|
python
|
{
"resource": ""
}
|
q3952
|
update_location_centroid
|
train
|
def update_location_centroid(point, cluster, max_distance, min_samples):
""" Updates the centroid of a location cluster with another point
Args:
point (:obj:`Point`): Point to add to the cluster
cluster (:obj:`list` of :obj:`Point`): Location cluster
max_distance (float): Max neighbour distance
min_samples (int): Minimum number of samples
Returns:
(:obj:`Point`, :obj:`list` of :obj:`Point`): Tuple with the location centroid
and new point cluster (given cluster + given point)
"""
cluster.append(point)
points = [p.gen2arr() for p in cluster]
# Estimates the epsilon
eps = estimate_meters_to_deg(max_distance, precision=6)
p_cluster = DBSCAN(eps=eps, min_samples=min_samples)
p_cluster.fit(points)
clusters = {}
for i, label in enumerate(p_cluster.labels_):
if label in clusters.keys():
|
python
|
{
"resource": ""
}
|
q3953
|
query_foursquare
|
train
|
def query_foursquare(point, max_distance, client_id, client_secret):
""" Queries Squarespace API for a location
Args:
point (:obj:`Point`): Point location to query
max_distance (float): Search radius, in meters
client_id (str): Valid Foursquare client id
client_secret (str): Valid Foursquare client secret
Returns:
:obj:`list` of :obj:`dict`: List of locations with the following format:
{
'label': 'Coffee house',
'distance': 19,
'types': 'Commerce',
'suggestion_type': 'FOURSQUARE'
}
"""
|
python
|
{
"resource": ""
}
|
q3954
|
query_google
|
train
|
def query_google(point, max_distance, key):
""" Queries google maps API for a location
Args:
point (:obj:`Point`): Point location to query
max_distance (float): Search radius, in meters
key (str): Valid google maps api key
Returns:
:obj:`list` of :obj:`dict`: List of locations with the following format:
{
'label': 'Coffee house',
'types': 'Commerce',
'suggestion_type': 'GOOGLE'
}
"""
if not key:
return []
if from_cache(GG_CACHE, point,
|
python
|
{
"resource": ""
}
|
q3955
|
estimate_meters_to_deg
|
train
|
def estimate_meters_to_deg(meters, precision=PRECISION_PERSON):
""" Meters to degrees estimation
See https://en.wikipedia.org/wiki/Decimal_degrees
Args:
meters (float)
precision (float)
Returns:
|
python
|
{
"resource": ""
}
|
q3956
|
isostr_to_datetime
|
train
|
def isostr_to_datetime(dt_str):
""" Converts iso formated text string into a datetime object
Args:
dt_str (str): ISO formated text string
Returns:
|
python
|
{
"resource": ""
}
|
q3957
|
Classifier.__learn_labels
|
train
|
def __learn_labels(self, labels):
""" Learns new labels, this method is intended for internal use
Args:
labels (:obj:`list` of :obj:`str`): Labels to learn
"""
if self.feature_length > 0:
|
python
|
{
"resource": ""
}
|
q3958
|
Classifier.learn
|
train
|
def learn(self, features, labels):
""" Fits the classifier
If it's state is empty, the classifier is fitted, if not
the classifier is partially fitted.
See sklearn's SGDClassifier fit and partial_fit methods.
Args:
features (:obj:`list` of :obj:`list` of :obj:`float`)
labels (:obj:`list` of :obj:`str`): Labels for each set of features.
New features are learnt.
"""
labels = np.ravel(labels)
self.__learn_labels(labels)
if len(labels) == 0:
|
python
|
{
"resource": ""
}
|
q3959
|
Classifier.predict
|
train
|
def predict(self, features, verbose=False):
""" Probability estimates of each feature
See sklearn's SGDClassifier predict and predict_proba methods.
Args:
features (:obj:`list` of :obj:`list` of :obj:`float`)
verbose: Boolean, optional. If true returns an array where each
element is a dictionary, where keys are labels and values are
the respective probabilities. Defaults to False.
Returns:
Array of array of numbers, or array of dictionaries if verbose i
True
|
python
|
{
"resource": ""
}
|
q3960
|
FromCSVTablesGenerator.source_loader
|
train
|
def source_loader(self, source_paths, create_missing_tables=True):
"""Load source from 3 csv files.
First file should contain global settings:
* ``native_lagnauge,languages`` header on first row
* appropriate values on following rows
Example::
native_lagnauge,languages
ru,ru
,en
Second file should contain templates:
* ``template_name,probability,genders,template`` header on first row
* appropriate values on following rows (separate values with semicolon ";" in template column)
Example::
template_name,probability,genders,template
male_1,5,m,prefixes;male_suffixes
baby_1,1,m;f,prefixes;descriptive
Third file should contain tables with values for template slugs in all languages:
* first row should contain slugs with language code after colon for each
* appropriate values on following rows. Multiple forms may be specified using semicolon as separator
Example::
prefixes:ru,prefixes:en,male_suffixes:ru,male_suffixes:en,descriptive:ru,descriptive:en
Бж,Bzh,пра,pra,быстряк;быстряку,fasty
дон;дону,don,Иван;Ивану,Ivan,Иванов;Иванову,Ivanov
Note: you may use slugs without ":lang_code" suffix in csv header of tables file. Such headers will be treated as headers for native language
If tables are missing for some slug then it is automatically created with values equeal to slug itself.
So you may use some slugs without specifying tables data for them. Example for apostrophe
|
python
|
{
"resource": ""
}
|
q3961
|
loader
|
train
|
def loader():
"""Load image from URL, and preprocess for Resnet."""
url = request.args.get('url')
|
python
|
{
"resource": ""
}
|
q3962
|
postprocessor
|
train
|
def postprocessor(prediction):
"""Map prediction tensor to labels."""
prediction = prediction.data.numpy()[0]
|
python
|
{
"resource": ""
}
|
q3963
|
get_logger
|
train
|
def get_logger(name):
"""Get a logger with the specified name."""
logger = logging.getLogger(name)
|
python
|
{
"resource": ""
}
|
q3964
|
make_serializable
|
train
|
def make_serializable(data):
"""Ensure data is serializable."""
if is_serializable(data):
return data
# if numpy array convert to list
try:
return data.tolist()
except AttributeError:
pass
except Exception as e:
logger.debug('{} exception ({}): {}'.format(type(e).__name__, e, data))
# try serializing each child element
if isinstance(data, dict):
return {key: make_serializable(value) for key, value in data.items()}
|
python
|
{
"resource": ""
}
|
q3965
|
json_numpy_loader
|
train
|
def json_numpy_loader():
"""Load data from JSON request and convert to numpy array."""
data = request.get_json()
|
python
|
{
"resource": ""
}
|
q3966
|
get_bytes_to_image_callback
|
train
|
def get_bytes_to_image_callback(image_dims=(224, 224)):
"""Return a callback to process image bytes for ImageNet."""
from keras.preprocessing import image
import numpy as np
from PIL import Image
from io import BytesIO
def preprocess_image_bytes(data_bytes):
"""Process image bytes for ImageNet."""
try:
img = Image.open(BytesIO(data_bytes)) # open image
except OSError as e:
raise ValueError('Please provide a raw image')
img = img.resize(image_dims, Image.ANTIALIAS) #
|
python
|
{
"resource": ""
}
|
q3967
|
exception_log_and_respond
|
train
|
def exception_log_and_respond(exception, logger, message, status_code):
"""Log an error and send jsonified respond."""
logger.error(message, exc_info=True)
return make_response(
message,
|
python
|
{
"resource": ""
}
|
q3968
|
make_response
|
train
|
def make_response(message, status_code, details=None):
"""Make a jsonified response with specified message and status code."""
response_body = dict(message=message)
if details:
|
python
|
{
"resource": ""
}
|
q3969
|
ModelServer._create_prediction_endpoint
|
train
|
def _create_prediction_endpoint(
self,
to_numpy=True,
data_loader=json_numpy_loader,
preprocessor=lambda x: x,
input_validation=lambda data: (True, None),
postprocessor=lambda x: x,
make_serializable_post=True):
"""Create an endpoint to serve predictions.
Arguments:
- input_validation (fn): takes a numpy array as input;
returns True if validation passes and False otherwise
- data_loader (fn): reads flask request and returns data preprocessed to be
used in the `predict` method
- postprocessor (fn): transforms the predictions from the `predict` method
"""
# copy instance variables to local scope for resource class
predict = self.predict
logger = self.app.logger
# create restful resource
class Predictions(Resource):
@staticmethod
def post():
# read data from API request
try:
data = data_loader()
except Exception as e:
return exception_log_and_respond(e, logger, 'Unable to fetch data', 400)
try:
if hasattr(preprocessor, '__iter__'):
for preprocessor_step in preprocessor:
data = preprocessor_step(data)
else:
data = preprocessor(data) # preprocess data
data = np.array(data) if to_numpy else data # convert to numpy
except Exception as e:
return exception_log_and_respond(e, logger, 'Could not preprocess data', 400)
# sanity check using user defined callback (default is no check)
validation_pass, validation_reason = input_validation(data)
if not validation_pass:
# if validation fails, log the reason code, log the data, and send a 400 response
validation_message = 'Input validation failed with reason: {}'.format(validation_reason)
logger.error(validation_message)
|
python
|
{
"resource": ""
}
|
q3970
|
ModelServer.serve
|
train
|
def serve(self, host='127.0.0.1', port=5000):
"""Serve predictions as an API endpoint."""
from meinheld import server, middleware
# self.app.run(host=host, port=port)
|
python
|
{
"resource": ""
}
|
q3971
|
get_model
|
train
|
def get_model(input_dim):
"""Create and compile simple model."""
model = Sequential()
model.add(Dense(100, input_dim=input_dim, activation='sigmoid'))
|
python
|
{
"resource": ""
}
|
q3972
|
validator
|
train
|
def validator(input_data):
"""Simple model input validator.
Validator ensures the input data array is
- two dimensional
- has the correct number of features.
"""
global data
# check num dims
if input_data.ndim != 2:
return False, 'Data should have two dimensions.'
# check number of columns
if input_data.shape[1] != data.data.shape[1]:
|
python
|
{
"resource": ""
}
|
q3973
|
read_chd_header
|
train
|
def read_chd_header(chd_file):
"""
read the .chd header file created when Vision Research software saves the images in a file format other than .cine
"""
with open(chd_file, "rb") as f:
header = {
"cinefileheader": cine.CINEFILEHEADER(),
"bitmapinfoheader": cine.BITMAPINFOHEADER(),
|
python
|
{
"resource": ""
}
|
q3974
|
_load_module
|
train
|
def _load_module(module_name, path):
'''A helper function invoked on the server to tell it to import a module.'''
# TODO: handle the case that the module is already loaded
try:
# First try to find a non-builtin, non-frozen, non-special
# module using the client's search path
|
python
|
{
"resource": ""
}
|
q3975
|
byvalue
|
train
|
def byvalue(proxy):
'''Return a copy of the underlying object for which the argument
is a proxy.'''
|
python
|
{
"resource": ""
}
|
q3976
|
Client.state
|
train
|
def state(self, state):
'''Change the state of the client. This is one of the values
defined in ClientStates.'''
|
python
|
{
"resource": ""
}
|
q3977
|
Client._read_result
|
train
|
def _read_result(self, num_retries):
'''Read an object from a channel, possibly retrying if the attempt
is interrupted by a signal from the operating system.'''
for i in range(num_retries):
self._assert_alive()
try:
return self._result_channel.get()
except IOError as ex:
if ex.errno == 4:
# errno=4 corresponds to "System call interrupted",
# which means a signal was recieved before any data
# was sent. For now I think it's safe to
|
python
|
{
"resource": ""
}
|
q3978
|
Client.terminate
|
train
|
def terminate(self):
'''Stop the server process and change our state to TERMINATING. Only valid if state=READY.'''
logger.debug('client.terminate() called (state=%s)', self.strstate)
if self.state == ClientState.WAITING_FOR_RESULT:
raise ClientStateError('terimate() called while state='+self.strstate)
if self.state == ClientState.TERMINATING:
raise ClientStateError('terimate() called while state='+self.strstate)
elif self.state in ClientState.TerminatedSet:
assert not self._server_process.is_alive()
return
elif self.state == ClientState.READY:
# Check that the process itself is still alive
self._assert_alive()
# Make sure the SIGCHLD signal handler doesn't throw any exceptions
self.state = ClientState.TERMINATING
# Do not call execute() because that function will check
# whether the process is alive and throw an exception if not
# TODO: can the queue itself throw exceptions?
|
python
|
{
"resource": ""
}
|
q3979
|
Client.cleanup
|
train
|
def cleanup(self):
'''Terminate this client if it has not already terminated.'''
if self.state == ClientState.WAITING_FOR_RESULT:
# There is an ongoing call to execute()
# Not sure what to do here
logger.warn('cleanup() called while state is WAITING_FOR_RESULT: ignoring')
elif self.state == ClientState.TERMINATING:
# terminate() has been called but we have not recieved SIGCHLD yet
# Not sure what to do here
logger.warn('cleanup() called while state is TERMINATING: ignoring')
elif self.state in ClientState.TerminatedSet:
# We have already terminated
# TODO: should we deal with TERMINATED_ASYNC in some special way?
logger.debug('cleanup() called while state is
|
python
|
{
"resource": ""
}
|
q3980
|
IsolationContext.start
|
train
|
def start(self):
'''Create a process in which the isolated code will be run.'''
assert self._client is None
logger.debug('IsolationContext[%d] starting', id(self))
# Create the queues
request_queue = multiprocessing.Queue()
response_queue = multiprocessing.Queue()
# Launch the server process
server = Server(request_queue, response_queue) # Do not
|
python
|
{
"resource": ""
}
|
q3981
|
IsolationContext.load_module
|
train
|
def load_module(self, module_name, path=None):
'''Import a module into this isolation context and return a proxy for it.'''
self.ensure_started()
if path is None:
path = sys.path
|
python
|
{
"resource": ""
}
|
q3982
|
_has_annotation
|
train
|
def _has_annotation(annotation, value):
""" Returns a function that can be used as a predicate in get_members, that """
def matches_property_name(fun):
|
python
|
{
"resource": ""
}
|
q3983
|
_get_getter_fun
|
train
|
def _get_getter_fun(object_type, # type: Type
parameter, # type: Parameter
private_property_name # type: str
):
"""
Utility method to find the overridden getter function for a given property, or generate a new one
:param object_type:
:param property_name:
:param private_property_name:
:return:
"""
property_name = parameter.name
# -- check overridden getter for this property name
overridden_getters = getmembers(object_type, predicate=_has_annotation(__GETTER_OVERRIDE_ANNOTATION, property_name))
if len(overridden_getters) > 0:
if len(overridden_getters) > 1:
raise DuplicateOverrideError('Getter is overridden more than once for attribute name : ' + property_name)
# --use the overridden getter
getter_fun = overridden_getters[0][1]
# --check its signature
s = signature(getter_fun)
if not ('self' in s.parameters.keys() and len(s.parameters.keys()) == 1):
raise IllegalGetterSignatureException('overridden getter must only have a self parameter, found ' +
|
python
|
{
"resource": ""
}
|
q3984
|
_get_setter_fun
|
train
|
def _get_setter_fun(object_type, # type: Type
parameter, # type: Parameter
private_property_name # type: str
):
"""
Utility method to find the overridden setter function for a given property, or generate a new one
:param object_type:
:param property_name:
:param property_type:
:param private_property_name:
:return:
"""
# the property will have the same name than the constructor argument
property_name = parameter.name
overridden_setters = getmembers(object_type, _has_annotation(__SETTER_OVERRIDE_ANNOTATION, property_name))
if len(overridden_setters) > 0:
# --check that we only have one
if len(overridden_setters) > 1:
raise DuplicateOverrideError('Setter is overridden more than once for attribute name : %s' % property_name)
# --use the overridden setter
setter_fun = overridden_setters[0][1]
try:
# python 2
setter_fun = setter_fun.im_func
except AttributeError:
pass
# --find the parameter name and check the signature
s = signature(setter_fun)
p = [attribute_name for attribute_name, param in s.parameters.items() if attribute_name is not 'self']
if len(p) != 1:
|
python
|
{
"resource": ""
}
|
q3985
|
getter_override
|
train
|
def getter_override(attribute=None, # type: str
f=DECORATED
):
"""
A decorator to indicate an overridden getter for a given attribute. If the attribute name
|
python
|
{
"resource": ""
}
|
q3986
|
is_attr_selected
|
train
|
def is_attr_selected(attr_name, # type: str
include=None, # type: Union[str, Tuple[str]]
exclude=None # type: Union[str, Tuple[str]]
):
"""decide whether an action has to be performed on the attribute or not, based on its name"""
if include is not None and exclude is not None:
raise ValueError('Only one of \'include\' or \'exclude\' argument should be provided.')
# win time by not doing this
#
|
python
|
{
"resource": ""
}
|
q3987
|
method_already_there
|
train
|
def method_already_there(object_type, method_name, this_class_only=False):
"""
Returns True if method `method_name` is already implemented by object_type, that is, its implementation differs from
the one in `object`.
:param object_type:
:param method_name:
|
python
|
{
"resource": ""
}
|
q3988
|
OrderedMultiDict.extend
|
train
|
def extend(self, *args, **kwargs):
"""Add key value pairs for an iterable."""
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
iterable = args[0] if args else None
if iterable:
if isinstance(iterable, Mapping) or hasattr(iterable, 'items'):
for key, value in iterable.items():
self.append(key, value)
elif hasattr(iterable, 'keys'):
|
python
|
{
"resource": ""
}
|
q3989
|
OrderedMultiDict.__insert_wrapper
|
train
|
def __insert_wrapper(func):
"""Make sure the arguments given to the insert methods are correct"""
def check_func(self, key, new_item, instance=0):
if key not in self.keys():
raise KeyError("%s not a key in label" % (key))
if not isinstance(new_item, (list, OrderedMultiDict)):
|
python
|
{
"resource": ""
}
|
q3990
|
OrderedMultiDict._get_index_for_insert
|
train
|
def _get_index_for_insert(self, key, instance):
"""Get the index of the key to insert before or after"""
if instance == 0:
# Index method will return the first occurence of the key
index = self.keys().index(key)
else:
occurrence = -1
for index, k in enumerate(self.keys()):
if k == key:
occurrence += 1
if occurrence == instance:
# Found the key and the correct occurence of the key
break
if occurrence != instance:
# Gone through the entire list of keys and the instance number
# given is too high for the number of occurences of the key
|
python
|
{
"resource": ""
}
|
q3991
|
OrderedMultiDict._insert_item
|
train
|
def _insert_item(self, key, new_item, instance, is_after):
"""Insert a new item before or after another item"""
index = self._get_index_for_insert(key, instance)
index = index + 1 if is_after else index
self.__items = self.__items[:index] + new_item + self.__items[index:]
|
python
|
{
"resource": ""
}
|
q3992
|
OrderedMultiDict.insert_after
|
train
|
def insert_after(self, key, new_item, instance=0):
"""Insert an item after a key"""
|
python
|
{
"resource": ""
}
|
q3993
|
OrderedMultiDict.insert_before
|
train
|
def insert_before(self, key, new_item, instance=0):
"""Insert an item before a key"""
|
python
|
{
"resource": ""
}
|
q3994
|
ByteStream.peek
|
train
|
def peek(self, n):
"""Returns buffered bytes without advancing the position.
The argument indicates a desired minimal number of bytes;
|
python
|
{
"resource": ""
}
|
q3995
|
PVLDecoder.parse_group
|
train
|
def parse_group(self, stream):
"""Block Name must match Block Name in paired End Group Statement if
Block Name is present in End Group Statement.
BeginGroupStmt ::=
BeginGroupKeywd WSC AssignmentSymbol WSC BlockName StatementDelim
"""
self.expect_in(stream, self.begin_group_tokens)
self.ensure_assignment(stream)
name = self.next_token(stream)
self.skip_statement_delimiter(stream)
|
python
|
{
"resource": ""
}
|
q3996
|
PVLDecoder.parse_object
|
train
|
def parse_object(self, stream):
"""Block Name must match Block Name in paired End Object Statement
if Block Name is present in End Object Statement StatementDelim.
BeginObjectStmt ::=
BeginObjectKeywd WSC AssignmentSymbol WSC BlockName StatementDelim
"""
self.expect_in(stream, self.begin_object_tokens)
self.ensure_assignment(stream)
name = self.next_token(stream)
|
python
|
{
"resource": ""
}
|
q3997
|
DjangoFaker.varchar
|
train
|
def varchar(self, field=None):
"""
Returns a chunk of text, of maximum length 'max_length'
"""
assert field is not None, "The field parameter must be passed to the 'varchar' method."
max_length = field.max_length
def source():
|
python
|
{
"resource": ""
}
|
q3998
|
DjangoFaker.datetime
|
train
|
def datetime(self, field=None, val=None):
"""
Returns a random datetime. If 'val' is passed, a datetime within two
years of that date will be returned.
"""
if val is None:
def source():
tzinfo = get_default_timezone() if settings.USE_TZ else None
return datetime.fromtimestamp(randrange(1, 2100000000),
tzinfo)
else:
def
|
python
|
{
"resource": ""
}
|
q3999
|
DjangoFaker.date
|
train
|
def date(self, field=None, val=None):
"""
Like datetime, but truncated to be a date only
"""
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.