docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Merges two stops. For the stops to be merged, they must have: - the same stop_id - the same stop_name (case insensitive) - the same zone_id - locations less than largest_stop_distance apart The other attributes can have arbitary changes. The merged attributes are taken from the new stop. Args: a: The first stop. b: The second stop. Returns: The merged stop. Raises: MergeError: The stops could not be merged.
def _MergeEntities(self, a, b): distance = transitfeed.ApproximateDistanceBetweenStops(a, b) if distance > self.largest_stop_distance: raise MergeError("Stops are too far apart: %.1fm " "(largest_stop_distance is %.1fm)." % (distance, self.largest_stop_distance)) scheme = {'stop_id': self._MergeIdentical, 'stop_name': self._MergeIdenticalCaseInsensitive, 'zone_id': self._MergeIdentical, 'location_type': self._MergeIdentical} return self._SchemedMerge(scheme, a, b)
197,608
Forces the old and new calendars to be disjoint about a cutoff date. This truncates the service periods of the old schedule so that service stops one day before the given cutoff date and truncates the new schedule so that service only begins on the cutoff date. Args: cutoff: The cutoff date as a string in YYYYMMDD format. The timezone is the same as used in the calendar.txt file.
def DisjoinCalendars(self, cutoff): def TruncatePeriod(service_period, start, end): service_period.start_date = max(service_period.start_date, start) service_period.end_date = min(service_period.end_date, end) dates_to_delete = [] for k in service_period.date_exceptions: if (k < start) or (k > end): dates_to_delete.append(k) for k in dates_to_delete: del service_period.date_exceptions[k] # find the date one day before cutoff year = int(cutoff[:4]) month = int(cutoff[4:6]) day = int(cutoff[6:8]) cutoff_date = datetime.date(year, month, day) one_day_delta = datetime.timedelta(days=1) before = (cutoff_date - one_day_delta).strftime('%Y%m%d') for a in self.feed_merger.a_schedule.GetServicePeriodList(): TruncatePeriod(a, 0, before) for b in self.feed_merger.b_schedule.GetServicePeriodList(): TruncatePeriod(b, cutoff, '9'*8)
197,620
Merges the shapes by taking the new shape. Args: a: The first transitfeed.Shape instance. b: The second transitfeed.Shape instance. Returns: The merged shape. Raises: MergeError: If the ids are different or if the endpoints are further than largest_shape_distance apart.
def _MergeEntities(self, a, b): if a.shape_id != b.shape_id: raise MergeError('shape_id must be the same') distance = max(ApproximateDistanceBetweenPoints(a.points[0][:2], b.points[0][:2]), ApproximateDistanceBetweenPoints(a.points[-1][:2], b.points[-1][:2])) if distance > self.largest_shape_distance: raise MergeError('The shape endpoints are too far away: %.1fm ' '(largest_shape_distance is %.1fm)' % (distance, self.largest_shape_distance)) return self._Migrate(b, self.feed_merger.b_schedule, False)
197,629
Initialise the merger. Once this initialiser has been called, a_schedule and b_schedule should not be modified. Args: a_schedule: The old schedule, an instance of transitfeed.Schedule. b_schedule: The new schedule, an instance of transitfeed.Schedule. problem_reporter: The problem reporter, an instance of transitfeed.ProblemReporter.
def __init__(self, a_schedule, b_schedule, merged_schedule, problem_reporter): self.a_schedule = a_schedule self.b_schedule = b_schedule self.merged_schedule = merged_schedule self.a_merge_map = {} self.b_merge_map = {} self.a_zone_map = {} self.b_zone_map = {} self._mergers = [] self._idnum = max(self._FindLargestIdPostfixNumber(self.a_schedule), self._FindLargestIdPostfixNumber(self.b_schedule)) self.problem_reporter = problem_reporter
197,636
Finds the largest integer used as the ending of an id in the schedule. Args: schedule: The schedule to check. Returns: The maximum integer used as an ending for an id.
def _FindLargestIdPostfixNumber(self, schedule): postfix_number_re = re.compile('(\d+)$') def ExtractPostfixNumber(entity_id): if entity_id is None: return 0 match = postfix_number_re.search(entity_id) if match is not None: return int(match.group(1)) else: return 0 id_data_sets = {'agency_id': schedule.GetAgencyList(), 'stop_id': schedule.GetStopList(), 'route_id': schedule.GetRouteList(), 'trip_id': schedule.GetTripList(), 'service_id': schedule.GetServicePeriodList(), 'fare_id': schedule.GetFareAttributeList(), 'shape_id': schedule.GetShapeList()} max_postfix_number = 0 for id_name, entity_list in id_data_sets.items(): for entity in entity_list: entity_id = getattr(entity, id_name) postfix_number = ExtractPostfixNumber(entity_id) max_postfix_number = max(max_postfix_number, postfix_number) return max_postfix_number
197,637
Generate a unique id based on the given id. This is done by appending a counter which is then incremented. The counter is initialised at the maximum number used as an ending for any id in the old and new schedules. Args: entity_id: The base id string. This is allowed to be None. Returns: The generated id.
def GenerateId(self, entity_id=None): self._idnum += 1 if entity_id: return '%s_merged_%d' % (entity_id, self._idnum) else: return 'merged_%d' % self._idnum
197,638
Looks for an added DataSetMerger derived from the given class. Args: cls: A class derived from DataSetMerger. Returns: The matching DataSetMerger instance. Raises: LookupError: No matching DataSetMerger has been added.
def GetMerger(self, cls): for merger in self._mergers: if isinstance(merger, cls): return merger raise LookupError('No matching DataSetMerger found')
197,641
Initialize a new ShapePoint object. Args: field_dict: A dictionary mapping attribute name to unicode string
def __init__(self, shape_id=None, lat=None, lon=None,seq=None, dist=None, field_dict=None): self._schedule = None if field_dict: if isinstance(field_dict, self.__class__): for k, v in field_dict.iteritems(): self.__dict__[k] = v else: self.__dict__.update(field_dict) else: self.shape_id = shape_id self.shape_pt_lat = lat self.shape_pt_lon = lon self.shape_pt_sequence = seq self.shape_dist_traveled = dist
197,688
Save the current context to be output with any errors. Args: file_name: string row_num: int row: list of strings headers: list of column headers, its order corresponding to row's
def SetFileContext(self, file_name, row_num, row, headers): self._context = (file_name, row_num, row, headers)
197,691
Return a text string describing the problem. Args: d: map returned by GetDictToFormat with with formatting added
def FormatProblem(self, d=None): if not d: d = self.GetDictToFormat() output_error_text = self.__class__.ERROR_TEXT % d if ('reason' in d) and d['reason']: return '%s\n%s' % (output_error_text, d['reason']) else: return output_error_text
197,732
Initialise. Args: raise_warnings: If this is True then warnings are also raised as exceptions. If it is false, warnings are printed to the console using SimpleProblemAccumulator.
def __init__(self, raise_warnings=False): self.raise_warnings = raise_warnings self.accumulator = SimpleProblemAccumulator()
197,738
Returns a point on the shape polyline with the input shape_dist_traveled. Args: shape_dist_traveled: The input shape_dist_traveled. Returns: The shape point as a tuple (lat, lng, shape_dist_traveled), where lat and lng is the location of the shape point, and shape_dist_traveled is an increasing metric representing the distance traveled along the shape. Returns None if there is data error in shape.
def GetPointWithDistanceTraveled(self, shape_dist_traveled): if not self.distance: return None if shape_dist_traveled <= self.distance[0]: return self.points[0] if shape_dist_traveled >= self.distance[-1]: return self.points[-1] index = bisect.bisect(self.distance, shape_dist_traveled) (lat0, lng0, dist0) = self.points[index - 1] (lat1, lng1, dist1) = self.points[index] # Interpolate if shape_dist_traveled does not equal to any of the point # in shape segment. # (lat0, lng0) (lat, lng) (lat1, lng1) # -----|--------------------|---------------------|------ # dist0 shape_dist_traveled dist1 # \------- ca --------/ \-------- bc -------/ # \----------------- ba ------------------/ ca = shape_dist_traveled - dist0 bc = dist1 - shape_dist_traveled ba = bc + ca if ba == 0: # This only happens when there's data error in shapes and should have been # catched before. Check to avoid crash. return None # This won't work crossing longitude 180 and is only an approximation which # works well for short distance. lat = (lat1 * ca + lat0 * bc) / ba lng = (lng1 * ca + lng0 * bc) / ba return (lat, lng, shape_dist_traveled)
197,783
Add a stop to this trip. Stops must be added in the order visited. Args: stop: A Stop object kwargs: remaining keyword args passed to StopTime.__init__ Returns: None
def AddStopTime(self, stop, problems=None, schedule=None, **kwargs): if problems is None: # TODO: delete this branch when StopTime.__init__ doesn't need a # ProblemReporter problems = problems_module.default_problem_reporter stoptime = self.GetGtfsFactory().StopTime( problems=problems, stop=stop, **kwargs) self.AddStopTimeObject(stoptime, schedule)
197,788
Add a StopTime object to the end of this trip. Args: stoptime: A StopTime object. Should not be reused in multiple trips. schedule: Schedule object containing this trip which must be passed to Trip.__init__ or here problems: ProblemReporter object for validating the StopTime in its new home Returns: None
def AddStopTimeObject(self, stoptime, schedule=None, problems=None): if schedule is None: schedule = self._schedule if schedule is None: warnings.warn("No longer supported. _schedule attribute is used to get " "stop_times table", DeprecationWarning) if problems is None: problems = schedule.problem_reporter new_secs = stoptime.GetTimeSecs() cursor = schedule._connection.cursor() cursor.execute("SELECT max(stop_sequence), max(arrival_secs), " "max(departure_secs) FROM stop_times WHERE trip_id=?", (self.trip_id,)) row = cursor.fetchone() if row[0] is None: # This is the first stop_time of the trip stoptime.stop_sequence = 1 if new_secs == None: problems.OtherProblem( 'No time for first StopTime of trip_id "%s"' % (self.trip_id,)) else: stoptime.stop_sequence = row[0] + 1 prev_secs = max(row[1], row[2]) if new_secs != None and new_secs < prev_secs: problems.OtherProblem( 'out of order stop time for stop_id=%s trip_id=%s %s < %s' % (util.EncodeUnicode(stoptime.stop_id), util.EncodeUnicode(self.trip_id), util.FormatSecondsSinceMidnight(new_secs), util.FormatSecondsSinceMidnight(prev_secs))) self._AddStopTimeObjectUnordered(stoptime, schedule)
197,791
Validate attributes of this object. Check that this object has all required values set to a valid value without reference to the rest of the schedule. If the _schedule attribute is set then check that references such as route_id and service_id are correct. Args: problems: A ProblemReporter object validate_children: if True and the _schedule attribute is set than call ValidateChildren
def Validate(self, problems, validate_children=True): self.ValidateRouteId(problems) self.ValidateServicePeriod(problems) self.ValidateDirectionId(problems) self.ValidateTripId(problems) self.ValidateShapeIdsExistInShapeList(problems) self.ValidateRouteIdExistsInRouteList(problems) self.ValidateServiceIdExistsInServiceList(problems) self.ValidateBikesAllowed(problems) self.ValidateWheelchairAccessible(problems) if self._schedule and validate_children: self.ValidateChildren(problems)
197,816
Return a tuple that outputs a row of _FIELD_NAMES to be written to a GTFS file. Arguments: trip_id: The trip_id of the trip to which this StopTime corresponds. It must be provided, as it is not stored in StopTime.
def GetFieldValuesTuple(self, trip_id): result = [] for fn in self._FIELD_NAMES: if fn == 'trip_id': result.append(trip_id) else: # Since we'll be writting to an output file, we want empty values to be # outputted as an empty string result.append(getattr(self, fn) or '' ) return tuple(result)
197,826
Return a tuple that outputs a row of _FIELD_NAMES to be written to a SQLite database. Arguments: trip_id: The trip_id of the trip to which this StopTime corresponds. It must be provided, as it is not stored in StopTime.
def GetSqlValuesTuple(self, trip_id): result = [] for fn in self._SQL_FIELD_NAMES: if fn == 'trip_id': result.append(trip_id) else: # Since we'll be writting to SQLite, we want empty values to be # outputted as NULL string (contrary to what happens in # GetFieldValuesTuple) result.append(getattr(self, fn)) return tuple(result)
197,827
Random multivariate hypergeometric variates. Parameters: - `n` : Number of draws. - `m` : Number of items in each categoy.
def rmultivariate_hypergeometric(n, m, size=None): N = len(m) urn = np.repeat(np.arange(N), m) if size: draw = np.array([[urn[i] for i in np.random.permutation(len(urn))[:n]] for j in range(size)]) r = [[np.sum(draw[j] == i) for i in range(len(m))] for j in range(size)] else: draw = np.array([urn[i] for i in np.random.permutation(len(urn))[:n]]) r = [np.sum(draw == i) for i in range(len(m))] return np.asarray(r)
198,556
Expected value of multivariate hypergeometric distribution. Parameters: - `n` : Number of draws. - `m` : Number of items in each categoy.
def multivariate_hypergeometric_expval(n, m): m = np.asarray(m, float) return n * (m / m.sum())
198,557
Make a grid of images, via numpy. Args: tensor (Tensor or list): 4D mini-batch Tensor of shape (B x C x H x W) or a list of images all of the same size. nrow (int, optional): Number of images displayed in each row of the grid. The Final grid size is (B / nrow, nrow). Default is 8. padding (int, optional): amount of padding. Default is 2. pad_value (float, optional): Value for the padded pixels.
def make_grid(tensor, nrow=8, padding=2, pad_value=0): if not (isinstance(tensor, np.ndarray) or (isinstance(tensor, list) and all(isinstance(t, np.ndarray) for t in tensor))): raise TypeError('tensor or list of tensors expected, got {}'.format(type(tensor))) # if list of tensors, convert to a 4D mini-batch Tensor if isinstance(tensor, list): tensor = np.stack(tensor, 0) if tensor.ndim == 2: # single image H x W tensor = tensor.reshape((1, tensor.shape[0], tensor.shape[1])) if tensor.ndim == 3: if tensor.shape[0] == 1: # if single-channel, single image, convert to 3-channel tensor = np.concatenate((tensor, tensor, tensor), 0) tensor = tensor.reshape((1, tensor.shape[0], tensor.shape[1], tensor.shape[2])) if tensor.ndim == 4 and tensor.shape[1] == 1: # single-channel images tensor = np.concatenate((tensor, tensor, tensor), 1) if tensor.shape[0] == 1: return np.squeeze(tensor) # make the mini-batch of images into a grid nmaps = tensor.shape[0] xmaps = min(nrow, nmaps) ymaps = int(math.ceil(float(nmaps) / xmaps)) height, width = int(tensor.shape[2] + padding), int(tensor.shape[3] + padding) grid = np.ones((3, height * ymaps + padding, width * xmaps + padding)) * pad_value k = 0 for y in range(ymaps): for x in range(xmaps): if k >= nmaps: break grid[:, y * height + padding:(y+1) * height,\ x * width + padding:(x+1) * width] = tensor[k] k = k + 1 return grid
198,779
Save a given Tensor into an image file. Args: tensor (Tensor or list): Image to be saved. If given a mini-batch tensor, saves the tensor as a grid of images by calling ``make_grid``. **kwargs: Other arguments are documented in ``make_grid``.
def save_image(tensor, filename, nrow=8, padding=2, pad_value=0): from PIL import Image grid = make_grid(tensor, nrow=nrow, padding=padding, pad_value=pad_value) im = Image.fromarray(pre_pillow_float_img_process(grid)) im.save(filename)
198,781
The render primitive (mode) must be the same as the input primitive of the GeometryShader. Args: mode (int): By default :py:data:`TRIANGLES` will be used. vertices (int): The number of vertices to transform. Keyword Args: first (int): The index of the first vertex to start with. instances (int): The number of instances.
def render(self, mode=None, vertices=-1, *, first=0, instances=1) -> None: if mode is None: mode = TRIANGLES self.mglo.render(mode, vertices, first, instances)
199,366
Copy buffer content. Args: dst (Buffer): The destination buffer. src (Buffer): The source buffer. size (int): The number of bytes to copy. Keyword Args: read_offset (int): The read offset. write_offset (int): The write offset.
def copy_buffer(self, dst, src, size=-1, *, read_offset=0, write_offset=0) -> None: self.mglo.copy_buffer(dst.mglo, src.mglo, size, read_offset, write_offset)
199,390
Copy framebuffer content. Use this method to: - blit framebuffers. - copy framebuffer content into a texture. - downsample framebuffers. (it will allow to read the framebuffer's content) - downsample a framebuffer directly to a texture. Args: dst (Framebuffer or Texture): Destination framebuffer or texture. src (Framebuffer): Source framebuffer.
def copy_framebuffer(self, dst, src) -> None: self.mglo.copy_framebuffer(dst.mglo, src.mglo)
199,391
Detect framebuffer. Args: glo (int): Framebuffer object. Returns: :py:class:`Framebuffer` object
def detect_framebuffer(self, glo=None) -> 'Framebuffer': res = Framebuffer.__new__(Framebuffer) res.mglo, res._size, res._samples, res._glo = self.mglo.detect_framebuffer(glo) res._color_attachments = None res._depth_attachment = None res.ctx = self res.extra = None return res
199,392
Create a :py:class:`Buffer` object. Args: data (bytes): Content of the new buffer. Keyword Args: reserve (int): The number of bytes to reserve. dynamic (bool): Treat buffer as dynamic. Returns: :py:class:`Buffer` object
def buffer(self, data=None, *, reserve=0, dynamic=False) -> Buffer: if type(reserve) is str: reserve = mgl.strsize(reserve) res = Buffer.__new__(Buffer) res.mglo, res._size, res._glo = self.mglo.buffer(data, reserve, dynamic) res._dynamic = dynamic res.ctx = self res.extra = None return res
199,393
Create a :py:class:`Texture3D` object. Args: size (tuple): The width, height and depth of the texture. components (int): The number of components 1, 2, 3 or 4. data (bytes): Content of the texture. Keyword Args: alignment (int): The byte alignment 1, 2, 4 or 8. dtype (str): Data type. Returns: :py:class:`Texture3D` object
def texture3d(self, size, components, data=None, *, alignment=1, dtype='f1') -> 'Texture3D': res = Texture3D.__new__(Texture3D) res.mglo, res._glo = self.mglo.texture3d(size, components, data, alignment, dtype) res.ctx = self res.extra = None return res
199,396
Create a :py:class:`TextureCube` object. Args: size (tuple): The width, height of the texture. Each side of the cube will have this size. components (int): The number of components 1, 2, 3 or 4. data (bytes): Content of the texture. Keyword Args: alignment (int): The byte alignment 1, 2, 4 or 8. dtype (str): Data type. Returns: :py:class:`TextureCube` object
def texture_cube(self, size, components, data=None, *, alignment=1, dtype='f1') -> 'TextureCube': res = TextureCube.__new__(TextureCube) res.mglo, res._glo = self.mglo.texture_cube(size, components, data, alignment, dtype) res._size = size res._components = components res._dtype = dtype res.ctx = self res.extra = None return res
199,397
Create a :py:class:`Texture` object. Args: size (tuple): The width and height of the texture. data (bytes): Content of the texture. Keyword Args: samples (int): The number of samples. Value 0 means no multisample format. alignment (int): The byte alignment 1, 2, 4 or 8. Returns: :py:class:`Texture` object
def depth_texture(self, size, data=None, *, samples=0, alignment=4) -> 'Texture': res = Texture.__new__(Texture) res.mglo, res._glo = self.mglo.depth_texture(size, data, samples, alignment) res._size = size res._components = 1 res._samples = samples res._dtype = 'f4' res._depth = True res.ctx = self res.extra = None return res
199,398
Create a :py:class:`VertexArray` object. Args: program (Program): The program used when rendering. buffer (Buffer): The buffer. attributes (list): A list of attribute names. Keyword Args: index_element_size (int): byte size of each index element, 1, 2 or 4. index_buffer (Buffer): An index buffer. Returns: :py:class:`VertexArray` object
def simple_vertex_array(self, program, buffer, *attributes, index_buffer=None, index_element_size=4) -> 'VertexArray': if type(buffer) is list: raise SyntaxError('Change simple_vertex_array to vertex_array') content = [(buffer, detect_format(program, attributes)) + attributes] return self.vertex_array(program, content, index_buffer, index_element_size)
199,400
Create a :py:class:`Program` object. Only linked programs will be returned. A single shader in the `shaders` parameter is also accepted. The varyings are only used when a transform program is created. Args: shaders (list): A list of :py:class:`Shader` objects. varyings (list): A list of varying names. Returns: :py:class:`Program` object
def program(self, *, vertex_shader, fragment_shader=None, geometry_shader=None, tess_control_shader=None, tess_evaluation_shader=None, varyings=()) -> 'Program': if type(varyings) is str: varyings = (varyings,) varyings = tuple(varyings) res = Program.__new__(Program) res.mglo, ls1, ls2, ls3, ls4, ls5, res._subroutines, res._geom, res._glo = self.mglo.program( vertex_shader, fragment_shader, geometry_shader, tess_control_shader, tess_evaluation_shader, varyings ) members = {} for item in ls1: obj = Attribute.__new__(Attribute) obj.mglo, obj._location, obj._array_length, obj._dimension, obj._shape, obj._name = item members[obj.name] = obj for item in ls2: obj = Varying.__new__(Varying) obj._number, obj._array_length, obj._dimension, obj._name = item members[obj.name] = obj for item in ls3: obj = Uniform.__new__(Uniform) obj.mglo, obj._location, obj._array_length, obj._dimension, obj._name = item members[obj.name] = obj for item in ls4: obj = UniformBlock.__new__(UniformBlock) obj.mglo, obj._index, obj._size, obj._name = item members[obj.name] = obj for item in ls5: obj = Subroutine.__new__(Subroutine) obj._index, obj._name = item members[obj.name] = obj res._members = members res.ctx = self res.extra = None return res
199,401
Create a :py:class:`Scope` object. Args: framebuffer (Framebuffer): The framebuffer to use when entering. enable_only (int): The enable_only flags to set when entering. Keyword Args: textures (list): List of (texture, binding) tuples. uniform_buffers (list): List of (buffer, binding) tuples. storage_buffers (list): List of (buffer, binding) tuples.
def scope(self, framebuffer, enable_only=None, *, textures=(), uniform_buffers=(), storage_buffers=()) -> 'Scope': textures = tuple((tex.mglo, idx) for tex, idx in textures) uniform_buffers = tuple((buf.mglo, idx) for buf, idx in uniform_buffers) storage_buffers = tuple((buf.mglo, idx) for buf, idx in storage_buffers) res = Scope.__new__(Scope) res.mglo = self.mglo.scope(framebuffer.mglo, enable_only, textures, uniform_buffers, storage_buffers) res.ctx = self res.extra = None return res
199,403
A :py:class:`Framebuffer` is a collection of buffers that can be used as the destination for rendering. The buffers for Framebuffer objects reference images from either Textures or Renderbuffers. Args: color_attachments (list): A list of :py:class:`Texture` or :py:class:`Renderbuffer` objects. depth_attachment (Renderbuffer or Texture): The depth attachment. Returns: :py:class:`Framebuffer` object
def framebuffer(self, color_attachments=(), depth_attachment=None) -> 'Framebuffer': if type(color_attachments) is Texture or type(color_attachments) is Renderbuffer: color_attachments = (color_attachments,) ca_mglo = tuple(x.mglo for x in color_attachments) da_mglo = None if depth_attachment is None else depth_attachment.mglo res = Framebuffer.__new__(Framebuffer) res.mglo, res._size, res._samples, res._glo = self.mglo.framebuffer(ca_mglo, da_mglo) res._color_attachments = tuple(color_attachments) res._depth_attachment = depth_attachment res.ctx = self res.extra = None return res
199,405
:py:class:`Renderbuffer` objects are OpenGL objects that contain images. They are created and used specifically with :py:class:`Framebuffer` objects. Args: size (tuple): The width and height of the renderbuffer. Keyword Args: samples (int): The number of samples. Value 0 means no multisample format. Returns: :py:class:`Renderbuffer` object
def depth_renderbuffer(self, size, *, samples=0) -> 'Renderbuffer': res = Renderbuffer.__new__(Renderbuffer) res.mglo, res._glo = self.mglo.depth_renderbuffer(size, samples) res._size = size res._components = 1 res._samples = samples res._dtype = 'f4' res._depth = True res.ctx = self res.extra = None return res
199,407
A :py:class:`ComputeShader` is a Shader Stage that is used entirely for computing arbitrary information. While it can do rendering, it is generally used for tasks not directly related to drawing. Args: source (str): The source of the compute shader. Returns: :py:class:`ComputeShader` object
def compute_shader(self, source) -> 'ComputeShader': res = ComputeShader.__new__(ComputeShader) res.mglo, ls1, ls2, ls3, ls4, res._glo = self.mglo.compute_shader(source) members = {} for item in ls1: obj = Uniform.__new__(Uniform) obj.mglo, obj._location, obj._array_length, obj._dimension, obj._name = item members[obj.name] = obj for item in ls2: obj = UniformBlock.__new__(UniformBlock) obj.mglo, obj._index, obj._size, obj._name = item members[obj.name] = obj res._members = members res.ctx = self res.extra = None return res
199,408
Key event callback for glfw. Translates and forwards keyboard event to :py:func:`keyboard_event` Args: window: Window event origin key: The key that was pressed or released. scancode: The system-specific scancode of the key. action: GLFW_PRESS, GLFW_RELEASE or GLFW_REPEAT mods: Bit field describing which modifier keys were held down.
def key_event_callback(self, window, key, scancode, action, mods): if key == self.keys.ESCAPE: self.close() self.example.key_event(key, action)
199,421
Mouse event callback from glfw. Translates the events forwarding them to :py:func:`cursor_event`. Args: window: The window xpos: viewport x pos ypos: viewport y pos
def mouse_event_callback(self, window, xpos, ypos): # screen coordinates relative to the top-left corner self.example.mouse_position_event(xpos, ypos)
199,422
Split data to count equal parts. Write the chunks using offsets calculated from start, step and stop. Args: data (bytes): The data. start (int): First offset. step (int): Offset increment. count (int): The number of offsets.
def write_chunks(self, data, start, step, count) -> None: self.mglo.write_chunks(data, start, step, count)
199,427
Read the content. Args: size (int): The size. Value ``-1`` means all. Keyword Args: offset (int): The offset. Returns: bytes
def read(self, size=-1, *, offset=0) -> bytes: return self.mglo.read(size, offset)
199,428
Read the content into a buffer. Args: buffer (bytarray): The buffer that will receive the content. size (int): The size. Value ``-1`` means all. Keyword Args: offset (int): The read offset. write_offset (int): The write offset.
def read_into(self, buffer, size=-1, *, offset=0, write_offset=0) -> None: return self.mglo.read_into(buffer, size, offset, write_offset)
199,429
Read the content. Read and concatenate the chunks of size chunk_size using offsets calculated from start, step and stop. Args: chunk_size (int): The chunk size. start (int): First offset. step (int): Offset increment. count (int): The number of offsets. Returns: bytes
def read_chunks(self, chunk_size, start, step, count) -> bytes: return self.mglo.read_chunks(chunk_size, start, step, count)
199,430
Clear the content. Args: size (int): The size. Value ``-1`` means all. Keyword Args: offset (int): The offset. chunk (bytes): The chunk to use repeatedly.
def clear(self, size=-1, *, offset=0, chunk=None) -> None: self.mglo.clear(size, offset, chunk)
199,432
Bind the buffer to a uniform block. Args: binding (int): The uniform block binding. Keyword Args: offset (int): The offset. size (int): The size. Value ``-1`` means all.
def bind_to_uniform_block(self, binding=0, *, offset=0, size=-1) -> None: self.mglo.bind_to_uniform_block(binding, offset, size)
199,433
Bind the buffer to a shader storage buffer. Args: binding (int): The shader storage binding. Keyword Args: offset (int): The offset. size (int): The size. Value ``-1`` means all.
def bind_to_storage_buffer(self, binding=0, *, offset=0, size=-1) -> None: self.mglo.bind_to_storage_buffer(binding, offset, size)
199,434
Read the content of the framebuffer. Args: viewport (tuple): The viewport. components (int): The number of components to read. Keyword Args: attachment (int): The color attachment. alignment (int): The byte alignment of the pixels. dtype (str): Data type. Returns: bytes
def read(self, viewport=None, components=3, *, attachment=0, alignment=1, dtype='f1') -> bytes: return self.mglo.read(viewport, components, attachment, alignment, dtype)
199,452
Renders the assigned example Args: time (float): Current time in seconds frame_time (float): Delta time from last frame in seconds
def render(self, time: float, frame_time: float): self.example.render(time, frame_time)
199,461
Run an example entering a blocking main loop Args: example_cls: The exmaple class to render args: Override sys.args
def run_example(example_cls: Example, args=None): values = parse_args(args) window_cls = get_window_cls(values.window) window = window_cls( title=example_cls.title, size=example_cls.window_size, fullscreen=values.fullscreen, resizable=example_cls.resizable, gl_version=example_cls.gl_version, aspect_ratio=example_cls.aspect_ratio, vsync=values.vsync, samples=values.samples, cursor=values.cursor, ) window.example = example_cls(ctx=window.ctx, wnd=window) start_time = time.time() current_time = start_time prev_time = start_time frame_time = 0 while not window.is_closing: current_time, prev_time = time.time(), current_time frame_time = max(current_time - prev_time, 1 / 1000) window.render(current_time - start_time, frame_time) window.swap_buffers() duration = time.time() - start_time window.destroy() print("Duration: {0:.2f}s @ {1:.2f} FPS".format(duration, window.frames / duration))
199,527
Import a dotted module path and return the attribute/class designated by the last name in the path. Raise ImportError if the import failed. Args: dotted_path: The path to attempt importing Returns: Imported class/attribute
def import_string(dotted_path): try: module_path, class_name = dotted_path.rsplit('.', 1) except ValueError as err: raise ImportError("%s doesn't look like a module path" % dotted_path) from err module = import_module(module_path) try: return getattr(module, class_name) except AttributeError as err: raise ImportError('Module "%s" does not define a "%s" attribute/class' % ( module_path, class_name)) from err
199,530
Run the compute shader. Args: group_x (int): The number of work groups to be launched in the X dimension. group_y (int): The number of work groups to be launched in the Y dimension. group_z (int): The number of work groups to be launched in the Z dimension.
def run(self, group_x=1, group_y=1, group_z=1) -> None: return self.mglo.run(group_x, group_y, group_z)
199,553
Returns a Uniform, UniformBlock, Subroutine, Attribute or Varying. Args: default: This is the value to be returned in case key does not exist. Returns: :py:class:`Uniform`, :py:class:`UniformBlock`, :py:class:`Subroutine`, :py:class:`Attribute` or :py:class:`Varying`
def get(self, key, default) -> Union[Uniform, UniformBlock, Subroutine, Attribute, Varying]: return self._members.get(key, default)
199,554
Read a face from the cubemap texture. Args: face (int): The face to read. Keyword Args: alignment (int): The byte alignment of the pixels.
def read(self, face, *, alignment=1) -> bytes: return self.mglo.read(face, alignment)
199,625
Read a face from the cubemap texture. Args: buffer (bytearray): The buffer that will receive the pixels. face (int): The face to read. Keyword Args: alignment (int): The byte alignment of the pixels. write_offset (int): The write offset.
def read_into(self, buffer, face, *, alignment=1, write_offset=0) -> None: if type(buffer) is Buffer: buffer = buffer.mglo return self.mglo.read_into(buffer, face, alignment, write_offset)
199,626
Update the content of the texture. Args: face (int): The face to update. data (bytes): The pixel data. viewport (tuple): The viewport. Keyword Args: alignment (int): The byte alignment of the pixels.
def write(self, face, data, viewport=None, *, alignment=1) -> None: if type(data) is Buffer: data = data.mglo self.mglo.write(face, data, viewport, alignment)
199,627
Detect format for vertex attributes. The format returned does not contain padding. Args: program (Program): The program. attributes (list): A list of attribute names. Returns: str
def detect_format(program, attributes) -> str: def fmt(attr): return attr.array_length * attr.dimension, attr.shape return ' '.join('%d%s' % fmt(program[a]) for a in attributes)
199,635
convenience tool to detect if something is iterable. in python3, strings count as iterables to we have the option to exclude them Parameters: ----------- obj : object to analyse reject_string : bool, whether to ignore strings Returns: -------- bool, if the object is itereable.
def isiterable(obj, reject_string=True): iterable = hasattr(obj, '__len__') if reject_string: iterable = iterable and not isinstance(obj, str) return iterable
199,699
Call visitor on root and all dependencies reachable from it in breadth first order. Args: root (component): component function or class visitor (function): signature is `func(component, parent)`. The call on root is `visitor(root, None)`.
def walk_dependencies(root, visitor): def visit(parent, visitor): for d in get_dependencies(parent): visitor(d, parent) visit(d, visitor) visitor(root, None) visit(root, visitor)
201,552
Checks if the specified user or user and group own the file. Args: owner (str): the user (or group) name for which we ask about ownership also_check_group (bool): if set to True, both user owner and group owner checked if set to False, only user owner checked Returns: bool: True if owner of the file is the specified owner
def owned_by(self, owner, also_check_group=False): if also_check_group: return self.owner == owner and self.group == owner else: return self.owner == owner
201,681
Add a filter or list of filters to a datasource. A filter is a simple string, and it matches if it is contained anywhere within a line. Args: ds (@datasource component): The datasource to filter patterns (str, [str]): A string, list of strings, or set of strings to add to the datasource's filters.
def add_filter(ds, patterns): if not plugins.is_datasource(ds): raise Exception("Filters are applicable only to datasources.") delegate = dr.get_delegate(ds) if delegate.raw: raise Exception("Filters aren't applicable to raw datasources.") if not delegate.filterable: raise Exception("Filters aren't applicable to %s." % dr.get_name(ds)) if ds in _CACHE: del _CACHE[ds] if isinstance(patterns, six.string_types): FILTERS[ds].add(patterns) elif isinstance(patterns, list): FILTERS[ds] |= set(patterns) elif isinstance(patterns, set): FILTERS[ds] |= patterns else: raise TypeError("patterns must be string, list, or set.")
201,693
Returns a function that hydrates components as they are evaluated. The function should be registered as an observer on a Broker just before execution. Args: to_persist (set): Set of components to persist. Skip everything else.
def make_persister(self, to_persist): if not self.meta_data: raise Exception("Root not set. Can't create persister.") def persister(c, broker): if c in to_persist: self.dehydrate(c, broker) return persister
201,719
Helper method for parsing package string. Args: package_string (str): dash separated package string such as 'bash-4.2.39-3.el7' Returns: dict: dictionary containing 'name', 'version', 'release' and 'arch' keys
def _parse_package(cls, package_string): pkg, arch = rsplit(package_string, cls._arch_sep(package_string)) if arch not in KNOWN_ARCHITECTURES: pkg, arch = (package_string, None) pkg, release = rsplit(pkg, '-') name, version = rsplit(pkg, '-') epoch, version = version.split(':', 1) if ":" in version else ['0', version] # oracleasm packages have a dash in their version string, fix that if name.startswith('oracleasm') and name.endswith('.el5'): name, version2 = name.split('-', 1) version = version2 + '-' + version return { 'name': name, 'version': version, 'release': release, 'arch': arch, 'epoch': epoch }
201,766
Helper method for parsing package line with or without SOS report information. Args: line (str): package line with or without SOS report information Returns: dict: dictionary containing 'name', 'version', 'release' and 'arch' keys plus additionally 'installtime', 'buildtime', 'vendor', 'buildserver', 'pgpsig', 'pgpsig_short' if these are present.
def _parse_line(cls, line): try: pkg, rest = line.split(None, 1) except ValueError: rpm = cls._parse_package(line.strip()) return rpm rpm = cls._parse_package(pkg) rest = rest.split('\t') for i, value in enumerate(rest): rpm[cls.SOSREPORT_KEYS[i]] = value return rpm
201,767
Adds an array of systems to specified group Args: group_name: Display name of group systems: Array of {'machine_id': machine_id}
def group_systems(self, group_name, systems): api_group_id = None headers = {'Content-Type': 'application/json'} group_path = self.api_url + '/v1/groups' group_get_path = group_path + ('?display_name=%s' % quote(group_name)) logger.debug("GET group: %s", group_get_path) net_logger.info("GET %s", group_get_path) get_group = self.session.get(group_get_path) logger.debug("GET group status: %s", get_group.status_code) if get_group.status_code == 200: api_group_id = get_group.json()['id'] if get_group.status_code == 404: # Group does not exist, POST to create logger.debug("POST group") data = json.dumps({'display_name': group_name}) net_logger.info("POST", group_path) post_group = self.session.post(group_path, headers=headers, data=data) logger.debug("POST group status: %s", post_group.status_code) logger.debug("POST Group: %s", post_group.json()) self.handle_fail_rcs(post_group) api_group_id = post_group.json()['id'] logger.debug("PUT group") data = json.dumps(systems) net_logger.info("PUT %s", group_path + ('/%s/systems' % api_group_id)) put_group = self.session.put(group_path + ('/%s/systems' % api_group_id), headers=headers, data=data) logger.debug("PUT group status: %d", put_group.status_code) logger.debug("PUT Group: %s", put_group.json())
201,815
Utility function to merge the source dictionary `src` to the target dictionary recursively Note: The type of the values in the dictionary can only be `dict` or `list` Parameters: tgt (dict): The target dictionary src (dict): The source dictionary
def dict_deep_merge(tgt, src): for k, v in src.items(): if k in tgt: if isinstance(tgt[k], dict) and isinstance(v, dict): dict_deep_merge(tgt[k], v) else: tgt[k].extend(deepcopy(v)) else: tgt[k] = deepcopy(v)
201,836
Rule reports a response if there is more than 1 host entry defined in the /etc/hosts file. Arguments: hp (HostParser): Parser object for the custom parser in this module. rhr (RedhatRelease): Parser object for the /etc/redhat-release file.
def report(hp, rhr): if len(hp.hosts) > 1: return make_fail("TOO_MANY_HOSTS", num=len(hp.hosts)) return make_pass("TOO_MANY_HOSTS", num=len(hp.hosts))
201,878
Parse part of an ls output line that is selinux. Args: parts (list): A four element list of strings representing the initial parts of an ls line after the permission bits. The parts are owner group, selinux info, and the path. Returns: A dict containing owner, group, se_user, se_role, se_type, se_mls, and name. If the raw name was a symbolic link, link is always included.
def parse_selinux(parts): owner, group = parts[:2] selinux = parts[2].split(":") lsel = len(selinux) path, link = parse_path(parts[-1]) result = { "owner": owner, "group": group, "se_user": selinux[0], "se_role": selinux[1] if lsel > 1 else None, "se_type": selinux[2] if lsel > 2 else None, "se_mls": selinux[3] if lsel > 3 else None, "name": path } if link: result["link"] = link return result
201,940
Parses a list of lines from ls into dictionaries representing their components. Args: lines (list): A list of lines generated by ls. root (str): The directory name to be used for ls output stanzas that don't have a name. Returns: A dictionary representing the ls output. It's keyed by the path containing each ls stanza.
def parse(lines, root=None): doc = {} entries = [] name = None total = None for line in lines: line = line.strip() if not line: continue if line and line[0] == "/" and line[-1] == ":": if name is None: name = line[:-1] if entries: d = Directory(name, total or len(entries), entries) doc[root] = d total = None entries = [] else: d = Directory(name, total or len(entries), entries) doc[name or root] = d total = None entries = [] name = line[:-1] continue if line.startswith("total"): total = int(line.split(None, 1)[1]) continue entries.append(line) name = name or root doc[name] = Directory(name, total or len(entries), entries) return doc
201,941
Main parsing class method which stores all interesting data from the content. Args: content (context.content): Parser context content
def parse_content(self, content): # note, the Parser class sets: # * self.file_path = context.path and # * self.file_name = os.path.basename(context.path) self.active_lines_unparsed = get_active_lines(content) if content is not None else [] # (man page shows all options with "=") self.active_settings = split_kv_pairs(content, use_partition=False) if content is not None else []
202,177
Collects fact for each host Collects the cpu and node configuration facts to be used by the rule. Arguments: cpu (CpuInfo): Parser object for the cpu info. cfg (NodeConfig): Parser object for the node configuration. Returns: dict: Dictionary of fact information including the keys ``cpu_count``, ``pods_per_core_int``, ``pods_per_core_customized``, ``max_pods``, and ``max_pods_customized``.
def cluster_info(cpu, cfg): cpus = cpu.cpu_count pods_per_core = cfg.doc.find("pods-per-core") pods_per_core_int = int(pods_per_core.value) if pods_per_core else PODS_PER_CORE cfg_max_pods = cfg.doc.find("max-pods") cfg_max_pods_int = int(cfg_max_pods.value) if cfg_max_pods else MAX_PODS calc_max_pods = cpus * pods_per_core_int return { "cpu_count": cpus, "pods_per_core": pods_per_core_int, "pods_per_core_customized": bool(pods_per_core), "max_pods": min(cfg_max_pods_int, calc_max_pods), "max_pods_customized": bool(cfg_max_pods) }
202,178
Get the list of rules for a particular chain. Chain order is kept intact. Args: name (str): chain name, e.g. `` table (str): table name, defaults to ``filter`` Returns: list: rules
def get_chain(self, name, table="filter"): return [r for r in self.rules if r["table"] == table and r["chain"] == name]
202,228
Get a dict where the keys are all the chains for the given table and each value is the set of rules defined for the given chain. Args: table (str): table name, defaults to ``filter`` Returns: dict: chains with set of defined rules
def table_chains(self, table="filter"): return dict((c["name"], self.get_chain(c["name"], table)) for c in self.get_table(table))
202,229
Method for quick testing of a parser against a test string. Good for simple inline microtests of sub expressions while building up larger parser, as in: expr = Word(nums) assert expr.matches("100") Parameters: - testString - string
def matches(self, s, parseAll=True): try: self.parseString(_ustr(s), parseAll=parseAll) return True except ParseBaseException: return False
202,258
Turn the prefix length netmask into a int for comparison. Args: prefixlen: An integer, the prefix length. Returns: An integer.
def _ip_int_from_prefix(self, prefixlen=None): if prefixlen is None: prefixlen = self._prefixlen return self._ALL_ONES ^ (self._ALL_ONES >> prefixlen)
202,333
Turn a prefix length into a dotted decimal string. Args: prefixlen: An integer, the netmask prefix length. Returns: A string, the dotted decimal netmask string.
def _ip_string_from_prefix(self, prefixlen=None): if not prefixlen: prefixlen = self._prefixlen return self._string_from_ip_int(self._ip_int_from_prefix(prefixlen))
202,334
Turn the given IP string into an integer for comparison. Args: ip_str: A string, the IP ip_str. Returns: The IP ip_str as an integer. Raises: AddressValueError: if ip_str isn't a valid IPv4 Address.
def _ip_int_from_string(self, ip_str): if not ip_str: raise AddressValueError('Address cannot be empty') octets = ip_str.split('.') if len(octets) != 4: raise AddressValueError("Expected 4 octets in %r" % ip_str) try: bvs = map(self._parse_octet, octets) return _compat_int_from_byte_vals(bvs, 'big') except ValueError as exc: raise AddressValueError("%s in %r" % (exc, ip_str))
202,336
Verify that the netmask/prefixlen is valid. Args: prefixlen: A string, the netmask in prefix length format. Returns: A boolean, True if the prefix represents a valid IPv6 netmask.
def _is_valid_netmask(self, prefixlen): try: prefixlen = int(prefixlen) except ValueError: return False return 0 <= prefixlen <= self._max_prefixlen
202,340
Returns the updated caching headers. Args: response (HttpResponse): The response from the remote service Returns: response:(HttpResponse.Headers): Http caching headers
def update_headers(self, response): if 'expires' in response.headers and 'cache-control' in response.headers: self.msg = self.server_cache_headers return response.headers else: self.msg = self.default_cache_vars date = parsedate(response.headers['date']) expires = datetime(*date[:6]) + timedelta(0, self.expire_after) response.headers.update({'expires': formatdate(calendar.timegm(expires.timetuple())), 'cache-control': 'public'}) return response.headers
202,374
If `results` contains a single line and that line is included in the `bad_lines` list, this function returns `False`. If no bad line is found the function returns `True` Parameters: results(str): The results string of the output from the command defined by the command spec. Returns: (Boolean): True for no bad lines or False for bad line found.
def validate_lines(results, bad_lines): if results and len(results) == 1: first = results[0] if any(l in first.lower() for l in bad_lines): return False return True
202,403
Returns all lines that contain `s` anywhere and wrap them in a list of dictionaries. `s` can be either a single string or a string list. For list, all keywords in the list must be found in each line. Parameters: s(str or list): one or more strings to search for. Returns: (list): list of dictionaries corresponding to the parsed lines contain the `s`.
def get(self, s): ret = [] search_by_expression = self._valid_search(s) for l in self.lines: if search_by_expression(l): ret.append(self._parse_line(l)) return ret
202,417
Constructor. Args: channel: A grpc.Channel.
def __init__(self, channel): self.SayHello = channel.unary_unary( '/helloworld.Greeter/SayHello', request_serializer=hello__world__pb2.HelloRequest. SerializeToString, response_deserializer=hello__world__pb2.HelloReply.FromString, )
202,882
Returns a short, term-friendly string representation of the object. Args: obj: An object for which to return a string representation. max_len: Maximum length of the returned string. Longer reprs will be turned into a brief descriptive string giving the type and length of obj.
def short_repr(obj, max_len=40): obj_repr = repr(obj) if len(obj_repr) <= max_len: return obj_repr return '<{} of length {}>'.format(type(obj).__name__, len(obj_repr))
205,077
Install apk to device. Doesn't support verifier file, instead allows destination directory to be overridden. Arguments: apk_path: Local path to apk to install. destination_dir: Optional destination directory. Use /system/app/ for persistent applications. timeout_ms: Expected timeout for pushing and installing. Returns: The pm install output.
def install(self, apk_path, destination_dir=None, timeout_ms=None): if not destination_dir: destination_dir = '/data/local/tmp/' basename = os.path.basename(apk_path) destination_path = destination_dir + basename self.push(apk_path, destination_path, timeout_ms=timeout_ms) return self.Shell('pm install -r "%s"' % destination_path, timeout_ms=timeout_ms)
205,112
Push source_file to file on device. Arguments: source_file: Either a filename or file-like object to push to the device. If a filename, will set the remote mtime to match the local mtime, otherwise will use the current time. device_filename: The filename on the device to write to. timeout_ms: Expected timeout for any part of the push.
def push(self, source_file, device_filename, timeout_ms=None): mtime = 0 if isinstance(source_file, six.string_types): mtime = os.path.getmtime(source_file) source_file = open(source_file) self.filesync_service.send( source_file, device_filename, mtime=mtime, timeout=timeouts.PolledTimeout.from_millis(timeout_ms))
205,113
Pull file from device. Arguments: device_filename: The filename on the device to pull. dest_file: If set, a filename or writable file-like object. timeout_ms: Expected timeout for the pull. Returns: The file data if dest_file is not set, None otherwise.
def pull(self, device_filename, dest_file=None, timeout_ms=None): should_return_data = dest_file is None if isinstance(dest_file, six.string_types): dest_file = open(dest_file, 'w') elif dest_file is None: dest_file = six.StringIO() self.filesync_service.recv(device_filename, dest_file, timeouts.PolledTimeout.from_millis(timeout_ms)) if should_return_data: return dest_file.getvalue()
205,114
Connect to the device. Args: usb_handle: UsbHandle instance to use. **kwargs: See AdbConnection.connect for kwargs. Includes rsa_keys, and auth_timeout_ms. Returns: An instance of this class if the device connected successfully.
def connect(cls, usb_handle, **kwargs): adb_connection = adb_protocol.AdbConnection.connect(usb_handle, **kwargs) return cls(adb_connection)
205,118
Sends a query to the given multicast socket and returns responses. Args: query: The string query to send. address: Multicast IP address component of the socket to send to. port: Multicast UDP port component of the socket to send to. ttl: TTL for multicast messages. 1 to keep traffic in-network. timeout_s: Seconds to wait for responses. Returns: A set of all responses that arrived before the timeout expired. Responses are tuples of (sender_address, message).
def send(query, address=DEFAULT_ADDRESS, port=DEFAULT_PORT, ttl=DEFAULT_TTL, local_only=False, timeout_s=2): # Set up the socket as a UDP Multicast socket with the given timeout. sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, ttl) if local_only: # Set outgoing interface to localhost to ensure no packets leave this host. sock.setsockopt( socket.IPPROTO_IP, socket.IP_MULTICAST_IF, struct.pack('!L', LOCALHOST_ADDRESS)) sock.settimeout(timeout_s) sock.sendto(query.encode('utf-8'), (address, port)) # Set up our thread-safe Queue for handling responses. recv_queue = queue.Queue() def _handle_responses(): while True: try: data, address = sock.recvfrom(MAX_MESSAGE_BYTES) data = data.decode('utf-8') except socket.timeout: recv_queue.put(None) break else: _LOG.debug('Multicast response to query "%s": %s:%s', query, address[0], data) recv_queue.put((address[0], str(data))) # Yield responses as they come in, giving up once timeout expires. response_thread = threading.Thread(target=_handle_responses) response_thread.start() while response_thread.is_alive(): recv_tuple = recv_queue.get() if not recv_tuple: break yield recv_tuple response_thread.join()
205,119
Sends a command to the device. Args: command: The command to send. arg: Optional argument to the command.
def send_command(self, command, arg=None): if arg is not None: command = '%s:%s' % (command, arg) self._write(six.StringIO(command), len(command))
205,124
Accepts normal responses from the device. Args: timeout_ms: Timeout in milliseconds to wait for each response. info_cb: Optional callback for text sent from the bootloader. Returns: OKAY packet's message.
def handle_simple_responses( self, timeout_ms=None, info_cb=DEFAULT_MESSAGE_CALLBACK): return self._accept_responses('OKAY', info_cb, timeout_ms=timeout_ms)
205,125
Constructs a FastbootCommands instance. Arguments: usb: UsbHandle instance.
def __init__(self, usb): self._usb = usb self._protocol = self.protocol_handler(usb)
205,130
Flashes a partition from the file on disk. Args: partition: Partition name to flash to. source_file: Filename to download to the device. source_len: Optional length of source_file, uses os.stat if not provided. info_cb: See Download. progress_callback: See Download. timeout_ms: The amount of time to wait on okay after flashing. Returns: Download and flash responses, normally nothing.
def flash_from_file(self, partition, source_file, source_len=0, info_cb=DEFAULT_MESSAGE_CALLBACK, progress_callback=None, timeout_ms=None): if source_len == 0: # Fall back to stat. source_len = os.stat(source_file).st_size download_response = self.download( source_file, source_len=source_len, info_cb=info_cb, progress_callback=progress_callback) flash_response = self.flash(partition, info_cb=info_cb, timeout_ms=timeout_ms) return download_response + flash_response
205,132
Flashes the last downloaded file to the given partition. Args: partition: Partition to flash. timeout_ms: Optional timeout in milliseconds to wait for it to finish. info_cb: See Download. Usually no messages. Returns: Response to a download request, normally nothing.
def flash(self, partition, timeout_ms=None, info_cb=DEFAULT_MESSAGE_CALLBACK): return self._simple_command('flash', arg=partition, info_cb=info_cb, timeout_ms=timeout_ms)
205,134
Returns the given variable's definition. Args: var: A variable the bootloader tracks, such as version. info_cb: See Download. Usually no messages. Returns: Value of var according to the current bootloader.
def get_var(self, var, info_cb=DEFAULT_MESSAGE_CALLBACK): return self._simple_command('getvar', arg=var, info_cb=info_cb)
205,136
Executes an OEM command on the device. Args: command: The command to execute, such as 'poweroff' or 'bootconfig read'. timeout_ms: Optional timeout in milliseconds to wait for a response. info_cb: See Download. Messages vary based on command. Returns: The final response from the device.
def oem(self, command, timeout_ms=None, info_cb=DEFAULT_MESSAGE_CALLBACK): return self._simple_command( 'oem %s' % command, timeout_ms=timeout_ms, info_cb=info_cb)
205,137
Reboots the device. Args: target_mode: Normal reboot when unspecified (or None). Can specify other target modes, such as 'recovery' or 'bootloader'. timeout_ms: Optional timeout in milliseconds to wait for a response. Returns: Usually the empty string. Depends on the bootloader and the target_mode.
def reboot(self, target_mode=None, timeout_ms=None): return self._simple_command('reboot', arg=target_mode, timeout_ms=timeout_ms)
205,138
A generator that parses a worksheet containing UNECE code definitions. Args: sheet: An xldr.sheet object representing a UNECE code worksheet. column_names: A list/tuple with the expected column names corresponding to the unit name, code and suffix in that order. Yields: Lines of Python source code that define OpenHTF Unit objects.
def unit_defs_from_sheet(sheet, column_names): seen = set() try: col_indices = {} rows = sheet.get_rows() # Find the indices for the columns we care about. for idx, cell in enumerate(six.next(rows)): if cell.value in column_names: col_indices[cell.value] = idx # loop over all remaining rows and pull out units. for row in rows: name = row[col_indices[column_names[0]]].value.replace("'", r'\'') code = row[col_indices[column_names[1]]].value suffix = row[col_indices[column_names[2]]].value.replace("'", r'\'') key = unit_key_from_name(name) if key in seen: continue seen.add(key) # Split on ' or ' to support the units like '% or pct' for suffix in suffix.split(' or '): yield "%s = UnitDescriptor('%s', '%s', )\n" % ( key, name, code, suffix) yield "ALL_UNITS.append(%s)\n" % key except xlrd.XLRDError: sys.stdout.write('Unable to process the .xls file.')
205,151
Send the given message over this transport. Args: message: The AdbMessage to send. timeout: Use this timeout for the entire write operation, it should be an instance of timeouts.PolledTimeout.
def write_message(self, message, timeout): with self._writer_lock: self._transport.write(message.header, timeout.remaining_ms) # Use any remaining time to send the data. Note that if we get this far, # we always at least try to send the data (with a minimum of 10ms timeout) # because we don't want the remote end to get out of sync because we sent # a header but no data. if timeout.has_expired(): _LOG.warning('Timed out between AdbMessage header and data, sending ' 'data anyway with 10ms timeout') timeout = timeouts.PolledTimeout.from_millis(10) self._transport.write(message.data, timeout.remaining_ms)
205,157
List directory contents on the device. Args: path: List the contents of this directory. timeout: Timeout to use for this operation. Returns: Generator yielding DeviceFileStat tuples representing the contents of the requested path.
def list(self, path, timeout=None): transport = DentFilesyncTransport(self.stream) transport.write_data('LIST', path, timeout) return (DeviceFileStat(dent_msg.name, dent_msg.mode, dent_msg.size, dent_msg.time) for dent_msg in transport.read_until_done('DENT', timeout))
205,169
Push a file-like object to the device. Args: src_file: File-like object for reading from filename: Filename to push to on the device st_mode: stat mode for filename on the device mtime: modification time to set for the file on the device timeout: Timeout to use for the send operation. Raises: AdbProtocolError: If we get an unexpected response. AdbRemoteError: If there's a remote error (but valid protocol).
def send(self, src_file, filename, st_mode=DEFAULT_PUSH_MODE, mtime=None, timeout=None): transport = DataFilesyncTransport(self.stream) transport.write_data('SEND', '%s,%s' % (filename, st_mode), timeout) try: while True: data = src_file.read(MAX_PUSH_DATA_BYTES) if not data: break transport.write_data('DATA', data, timeout) mtime = mtime or int(time.time()) transport.write_message( FilesyncMessageTypes.DoneMessage('DONE', mtime), timeout) except usb_exceptions.AdbStreamClosedError: # Try to do one last read to see if we can get any more information, # ignoring any errors for this Read attempt. Note that this always # raises, either a new AdbRemoteError, or the AdbStreamClosedError. self._check_for_fail_message(transport, sys.exc_info(), timeout) data_msg = transport.read_message(timeout) data_msg.assert_command_is('OKAY')
205,172
Write an arbitrary message (of one of the types above). For the host side implementation, this will only ever be a DataMessage, but it's implemented generically enough here that you could use FilesyncTransport to implement the device side if you wanted. Args: msg: The message to send, must be one of the types above. timeout: timeouts.PolledTimeout to use for the operation.
def write_message(self, msg, timeout=None): replace_dict = {'command': self.CMD_TO_WIRE[msg.command]} if msg.has_data: # Swap out data for the data length for the wire. data = msg[-1] replace_dict[msg._fields[-1]] = len(data) self.stream.write(struct.pack(msg.struct_format, *msg._replace(**replace_dict)), timeout) if msg.has_data: self.stream.write(data, timeout)
205,176
Makes the names of phase measurement and attachments unique. This function will make the names of measurements and attachments unique. It modifies the input all_phases. Args: all_phases: the phases to make unique Returns: the phases now modified.
def phase_uniquizer(all_phases): measurement_name_maker = UniqueNameMaker( itertools.chain.from_iterable( phase.measurements.keys() for phase in all_phases if phase.measurements)) attachment_names = list(itertools.chain.from_iterable( phase.attachments.keys() for phase in all_phases)) attachment_names.extend(itertools.chain.from_iterable([ 'multidim_' + name for name, meas in phase.measurements.items() if meas.dimensions is not None ] for phase in all_phases if phase.measurements)) attachment_name_maker = UniqueNameMaker(attachment_names) for phase in all_phases: # Make measurements unique. for name, _ in sorted(phase.measurements.items()): old_name = name name = measurement_name_maker.make_unique(name) phase.measurements[old_name].name = name phase.measurements[name] = phase.measurements.pop(old_name) # Make attachments unique. for name, _ in sorted(phase.attachments.items()): old_name = name name = attachment_name_maker.make_unique(name) phase.attachments[old_name].name = name phase.attachments[name] = phase.attachments.pop(old_name) return all_phases
205,186
Convert an OpenHTF test record attachment to a multi-dim measurement. This is a best effort attempt to reverse, as some data is lost in converting from a multidim to an attachment. Args: attachment: an `openhtf.test_record.Attachment` from a multi-dim. name: an optional name for the measurement. If not provided will use the name included in the attachment. Returns: An multi-dim `openhtf.Measurement`.
def attachment_to_multidim_measurement(attachment, name=None): data = json.loads(attachment.data) name = name or data.get('name') # attachment_dimn are a list of dicts with keys 'uom_suffix' and 'uom_code' attachment_dims = data.get('dimensions', []) # attachment_value is a list of lists [[t1, x1, y1, f1], [t2, x2, y2, f2]] attachment_values = data.get('value') attachment_outcome_str = data.get('outcome') if attachment_outcome_str not in TEST_RUN_STATUS_NAME_TO_MEASUREMENT_OUTCOME: # Fpr backward compatibility with saved data we'll convert integers to str try: attachment_outcome_str = test_runs_pb2.Status.Name( int(attachment_outcome_str)) except ValueError: attachment_outcome_str = None # Convert test status outcome str to measurement outcome outcome = TEST_RUN_STATUS_NAME_TO_MEASUREMENT_OUTCOME.get( attachment_outcome_str) # convert dimensions into htf.Dimensions _lazy_load_units_by_code() dims = [] for d in attachment_dims: # Try to convert into htf.Dimension including backwards compatibility. unit = UNITS_BY_CODE.get(d.get('uom_code'), units.NONE) description = d.get('name', '') dims.append(measurements.Dimension(description=description, unit=unit)) # Attempt to determine if units are included. if attachment_values and len(dims) == len(attachment_values[0]): # units provided units_ = dims[-1].unit dimensions = dims[:-1] else: units_ = None dimensions = dims # created dimensioned_measured_value and populate with values. measured_value = measurements.DimensionedMeasuredValue( name=name, num_dimensions=len(dimensions) ) for row in attachment_values: coordinates = tuple(row[:-1]) val = row[-1] measured_value[coordinates] = val measurement = measurements.Measurement( name=name, units=units_, dimensions=tuple(dimensions), measured_value=measured_value, outcome=outcome ) return measurement
205,190