docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Convert decimal angle to degrees, minutes and possibly seconds. Args: angle (float): Angle to convert style (str): Return fractional or whole minutes values Returns: tuple of int: Angle converted to degrees, minutes and possibly seconds Raises: ValueError: Unknown value for ``style``
def to_dms(angle, style='dms'): sign = 1 if angle >= 0 else -1 angle = abs(angle) * 3600 minutes, seconds = divmod(angle, 60) degrees, minutes = divmod(minutes, 60) if style == 'dms': return tuple(sign * abs(i) for i in (int(degrees), int(minutes), seconds)) elif style == 'dm': return tuple(sign * abs(i) for i in (int(degrees), (minutes + seconds / 60))) else: raise ValueError('Unknown style type %r' % style)
1,058,923
Convert degrees, minutes and optionally seconds to decimal angle. Args: degrees (float): Number of degrees minutes (float): Number of minutes seconds (float): Number of seconds Returns: float: Angle converted to decimal degrees
def to_dd(degrees, minutes, seconds=0): sign = -1 if any(i < 0 for i in (degrees, minutes, seconds)) else 1 return sign * (abs(degrees) + abs(minutes) / 60 + abs(seconds) / 3600)
1,058,924
Generate a ``tuple`` of compass direction names. Args: segment (list): Compass segment to generate names for abbr (bool): Names should use single letter abbreviations Returns: bool: Direction names for compass segment
def __chunk(segment, abbr=False): names = ('north', 'east', 'south', 'west', 'north') if not abbr: sjoin = '-' else: names = [s[0].upper() for s in names] sjoin = '' if segment % 2 == 0: return (names[segment].capitalize(), sjoin.join((names[segment].capitalize(), names[segment], names[segment + 1])), sjoin.join((names[segment].capitalize(), names[segment + 1])), sjoin.join((names[segment + 1].capitalize(), names[segment], names[segment + 1]))) else: return (names[segment].capitalize(), sjoin.join((names[segment].capitalize(), names[segment + 1], names[segment])), sjoin.join((names[segment + 1].capitalize(), names[segment])), sjoin.join((names[segment + 1].capitalize(), names[segment + 1], names[segment])))
1,058,925
Convert angle in to direction name. Args: angle (float): Angle in degrees to convert to direction name segments (int): Number of segments to split compass in to abbr (bool): Whether to return abbreviated direction string Returns: str: Direction name for ``angle``
def angle_to_name(angle, segments=8, abbr=False): if segments == 4: string = COMPASS_NAMES[int((angle + 45) / 90) % 4 * 2] elif segments == 8: string = COMPASS_NAMES[int((angle + 22.5) / 45) % 8 * 2] elif segments == 16: string = COMPASS_NAMES[int((angle + 11.25) / 22.5) % 16] else: raise ValueError('Segments parameter must be 4, 8 or 16 not %r' % segments) if abbr: return ''.join(i[0].capitalize() for i in string.split('-')) else: return string
1,058,926
Convert angle in to distance along a great circle. Args: angle (float): Angle in degrees to convert to distance units (str): Unit type to be used for distances Returns: float: Distance in ``units`` Raises: ValueError: Unknown value for ``units``
def angle_to_distance(angle, units='metric'): distance = math.radians(angle) * BODY_RADIUS if units in ('km', 'metric'): return distance elif units in ('sm', 'imperial', 'US customary'): return distance / STATUTE_MILE elif units in ('nm', 'nautical'): return distance / NAUTICAL_MILE else: raise ValueError('Unknown units type %r' % units)
1,058,929
Convert a distance in to an angle along a great circle. Args: distance (float): Distance to convert to degrees units (str): Unit type to be used for distances Returns: float: Angle in degrees Raises: ValueError: Unknown value for ``units``
def distance_to_angle(distance, units='metric'): if units in ('km', 'metric'): pass elif units in ('sm', 'imperial', 'US customary'): distance *= STATUTE_MILE elif units in ('nm', 'nautical'): distance *= NAUTICAL_MILE else: raise ValueError('Unknown units type %r' % units) return math.degrees(distance / BODY_RADIUS)
1,058,930
Calculate geodesic latitude/longitude from Maidenhead locator. Args: locator (str): Maidenhead locator string Returns: tuple of float: Geodesic latitude and longitude values Raises: ValueError: Incorrect grid locator length ValueError: Invalid values in locator string
def from_grid_locator(locator): if not len(locator) in (4, 6, 8): raise ValueError('Locator must be 4, 6 or 8 characters long %r' % locator) # Convert the locator string to a list, because we need it to be mutable to # munge the values locator = list(locator) # Convert characters to numeric value, fields are always uppercase locator[0] = ord(locator[0]) - 65 locator[1] = ord(locator[1]) - 65 # Values for square are always integers locator[2] = int(locator[2]) locator[3] = int(locator[3]) if len(locator) >= 6: # Some people use uppercase for the subsquare data, in spite of # lowercase being the accepted style, so handle that too. locator[4] = ord(locator[4].lower()) - 97 locator[5] = ord(locator[5].lower()) - 97 if len(locator) == 8: # Extended square values are always integers locator[6] = int(locator[6]) locator[7] = int(locator[7]) # Check field values within 'A'(0) to 'R'(17), and square values are within # 0 to 9 if not 0 <= locator[0] <= 17 \ or not 0 <= locator[1] <= 17 \ or not 0 <= locator[2] <= 9 \ or not 0 <= locator[3] <= 9: raise ValueError('Invalid values in locator %r' % locator) # Check subsquare values are within 'a'(0) to 'x'(23) if len(locator) >= 6: if not 0 <= locator[4] <= 23 \ or not 0 <= locator[5] <= 23: raise ValueError('Invalid values in locator %r' % locator) # Extended square values must be within 0 to 9 if len(locator) == 8: if not 0 <= locator[6] <= 9 \ or not 0 <= locator[7] <= 9: raise ValueError('Invalid values in locator %r' % locator) longitude = LONGITUDE_FIELD * locator[0] \ + LONGITUDE_SQUARE * locator[2] latitude = LATITUDE_FIELD * locator[1] \ + LATITUDE_SQUARE * locator[3] if len(locator) >= 6: longitude += LONGITUDE_SUBSQUARE * locator[4] latitude += LATITUDE_SUBSQUARE * locator[5] if len(locator) == 8: longitude += LONGITUDE_EXTSQUARE * locator[6] + LONGITUDE_EXTSQUARE / 2 latitude += LATITUDE_EXTSQUARE * locator[7] + LATITUDE_EXTSQUARE / 2 else: longitude += LONGITUDE_EXTSQUARE * 5 latitude += LATITUDE_EXTSQUARE * 5 # Rebase longitude and latitude to normal geodesic longitude -= 180 latitude -= 90 return latitude, longitude
1,058,931
Calculate Maidenhead locator from latitude and longitude. Args: latitude (float): Position's latitude longitude (float): Position's longitude precision (str): Precision with which generate locator string Returns: str: Maidenhead locator for latitude and longitude Raise: ValueError: Invalid precision identifier ValueError: Invalid latitude or longitude value
def to_grid_locator(latitude, longitude, precision='square'): if precision not in ('square', 'subsquare', 'extsquare'): raise ValueError('Unsupported precision value %r' % precision) if not -90 <= latitude <= 90: raise ValueError('Invalid latitude value %r' % latitude) if not -180 <= longitude <= 180: raise ValueError('Invalid longitude value %r' % longitude) latitude += 90.0 longitude += 180.0 locator = [] field = int(longitude / LONGITUDE_FIELD) locator.append(chr(field + 65)) longitude -= field * LONGITUDE_FIELD field = int(latitude / LATITUDE_FIELD) locator.append(chr(field + 65)) latitude -= field * LATITUDE_FIELD square = int(longitude / LONGITUDE_SQUARE) locator.append(str(square)) longitude -= square * LONGITUDE_SQUARE square = int(latitude / LATITUDE_SQUARE) locator.append(str(square)) latitude -= square * LATITUDE_SQUARE if precision in ('subsquare', 'extsquare'): subsquare = int(longitude / LONGITUDE_SUBSQUARE) locator.append(chr(subsquare + 97)) longitude -= subsquare * LONGITUDE_SUBSQUARE subsquare = int(latitude / LATITUDE_SUBSQUARE) locator.append(chr(subsquare + 97)) latitude -= subsquare * LATITUDE_SUBSQUARE if precision == 'extsquare': extsquare = int(longitude / LONGITUDE_EXTSQUARE) locator.append(str(extsquare)) extsquare = int(latitude / LATITUDE_EXTSQUARE) locator.append(str(extsquare)) return ''.join(locator)
1,058,932
Parse latitude and longitude from string location. Args: location (str): String to parse Returns: tuple of float: Latitude and longitude of location
def parse_location(location): def split_dms(text, hemisphere): out = [] sect = [] for i in text: if i.isdigit(): sect.append(i) else: out.append(sect) sect = [] d, m, s = [float(''.join(i)) for i in out] if hemisphere in 'SW': d, m, s = [-1 * x for x in (d, m, s)] return to_dd(d, m, s) for sep in ';, ': chunks = location.split(sep) if len(chunks) == 2: if chunks[0].endswith('N'): latitude = float(chunks[0][:-1]) elif chunks[0].endswith('S'): latitude = -1 * float(chunks[0][:-1]) else: latitude = float(chunks[0]) if chunks[1].endswith('E'): longitude = float(chunks[1][:-1]) elif chunks[1].endswith('W'): longitude = -1 * float(chunks[1][:-1]) else: longitude = float(chunks[1]) return latitude, longitude elif len(chunks) == 4: if chunks[0].endswith(('s', '"')): latitude = split_dms(chunks[0], chunks[1]) else: latitude = float(chunks[0]) if chunks[1] == 'S': latitude = -1 * latitude if chunks[2].endswith(('s', '"')): longitude = split_dms(chunks[2], chunks[3]) else: longitude = float(chunks[2]) if chunks[3] == 'W': longitude = -1 * longitude return latitude, longitude
1,058,933
Initialise a new ``FileFormatError`` object. Args: site (str): Remote site name to display in error message
def __init__(self, site=None): super(FileFormatError, self).__init__() self.site = site
1,058,938
Parse an ISO 8601 formatted time stamp. Args: timestamp (str): Timestamp to parse Returns: Timestamp: Parsed timestamp
def parse_isoformat(timestamp): if len(timestamp) == 20: zone = TzOffset('+00:00') timestamp = timestamp[:-1] elif len(timestamp) == 24: zone = TzOffset('%s:%s' % (timestamp[-5:-2], timestamp[-2:])) timestamp = timestamp[:-5] elif len(timestamp) == 25: zone = TzOffset(timestamp[-6:]) timestamp = timestamp[:-6] timestamp = Timestamp.strptime(timestamp, '%Y-%m-%dT%H:%M:%S') timestamp = timestamp.replace(tzinfo=zone) return timestamp
1,058,940
Update the locator, and trigger a latitude and longitude update. Args: value (str): New Maidenhead locator string
def locator(self, value): self._locator = value self._latitude, self._longitude = utils.from_grid_locator(value)
1,059,006
Pretty printed location string. Args: mode (str): Coordinate formatting system to use Returns: str: Human readable string representation of ``Baken`` object
def __str__(self): text = super(Baken, self).__format__('dms') if self._locator: text = '%s (%s)' % (self._locator, text) return text
1,059,007
Search for funds matching a search term. Args: term (str): Fund id to search on field (str): The field to search on. Options are title, amount, org_name and type. kwargs (dict): additional keywords passed into requests.session.get params keyword.
def funds(self, term, field=None, **kwargs): params = kwargs params['q'] = term if field: params['f'] = field else: params['f'] = 'fu.org.n' baseuri = self._BASE_URI + 'funds' res = self.session.get(baseuri, params=params) self.handle_http_error(res) return res
1,059,025
Returns a GDAL virtual filesystem prefixed path. Arguments: path -- file path as str
def vsiprefix(path): vpath = path.lower() scheme = VSI_SCHEMES.get(urlparse(vpath).scheme, '') for ext in VSI_TYPES: if ext in vpath: filesys = VSI_TYPES[ext] break else: filesys = '' if filesys and scheme: filesys = filesys[:-1] return ''.join((filesys, scheme, path))
1,059,028
Extended pretty printing for location strings. Args: format_spec (str): Coordinate formatting system to use Returns: str: Human readable string representation of ``Point`` object Raises: ValueError: Unknown value for ``format_spec``
def __format__(self, format_spec='dd'): text = super(Location.__base__, self).__format__(format_spec) if self.alt_names: return '%s (%s - %s)' % (self.name, ', '.join(self.alt_names), text) else: return '%s (%s)' % (self.name, text)
1,059,090
Return the corresponding latitude Args: line (int): Line number Returns: Correponding latitude in degree
def lat_id(self, line): if self.grid == 'WAC': lat = ((1 + self.LINE_PROJECTION_OFFSET - line) * self.MAP_SCALE * 1e-3 / self.A_AXIS_RADIUS) return lat * 180 / np.pi else: lat = float(self.CENTER_LATITUDE) - \ (line - float(self.LINE_PROJECTION_OFFSET) - 1)\ / float(self.MAP_RESOLUTION) return lat
1,059,148
Return the corresponding longitude Args: sample (int): sample number on a line Returns: Correponding longidude in degree
def long_id(self, sample): if self.grid == 'WAC': lon = self.CENTER_LONGITUDE + (sample - self.SAMPLE_PROJECTION_OFFSET - 1)\ * self.MAP_SCALE * 1e-3 / (self.A_AXIS_RADIUS * np.cos(self.CENTER_LATITUDE * np.pi / 180.0)) return lon * 180 / np.pi else: lon = float(self.CENTER_LONGITUDE) + \ (sample - float(self.SAMPLE_PROJECTION_OFFSET) - 1)\ / float(self.MAP_RESOLUTION) return lon
1,059,149
Return the corresponding sample Args: lon (int): longidute in degree Returns: Correponding sample
def sample_id(self, lon): if self.grid == 'WAC': sample = np.rint(float(self.SAMPLE_PROJECTION_OFFSET) + 1.0 + (lon * np.pi / 180.0 - float(self.CENTER_LONGITUDE)) * self.A_AXIS_RADIUS * np.cos(self.CENTER_LATITUDE * np.pi / 180.0) / (self.MAP_SCALE * 1e-3)) else: sample = np.rint(float(self.SAMPLE_PROJECTION_OFFSET) + float(self.MAP_RESOLUTION) * (lon - float(self.CENTER_LONGITUDE))) + 1 return self._control_sample(sample)
1,059,151
Return the corresponding line Args: lat (int): latitude in degree Returns: Correponding line
def line_id(self, lat): if self.grid == 'WAC': line = np.rint(1.0 + self.LINE_PROJECTION_OFFSET - self.A_AXIS_RADIUS * np.pi * lat / (self.MAP_SCALE * 1e-3 * 180)) else: line = np.rint(float(self.LINE_PROJECTION_OFFSET) - float(self.MAP_RESOLUTION) * (lat - float(self.CENTER_LATITUDE))) + 1 return self._control_line(line)
1,059,153
Read part of the binary file Args: size_chunk (int) : Size of the chunk to read start (int): Starting byte bytesize (int): Ending byte Returns: (np.array): array of the corresponding values
def array(self, size_chunk, start, bytesize): with open(self.img, 'rb') as f1: f1.seek(self.start_byte + start * self.bytesize) data = f1.read(size_chunk * self.bytesize) Z = np.fromstring(data, dtype=self.dtype, count=size_chunk) if self.grid == 'LOLA': return Z * float(self.SCALING_FACTOR) else: return Z
1,059,154
Change the region of interest Args: size_window (float): Radius of the region of interest (km) Notes: Change the attributes ``size_window`` and ``window`` to correspond to the new region of interest.
def change_window(self, size_window): self.size_window = size_window self.window = self.lambert_window( self.size_window, self.lat0, self.lon0)
1,059,234
Return arrays the region of interest Args: type_img (str): Either lola or wac. Returns: A tupple of three arrays ``(X,Y,Z)`` with ``X`` contains the longitudes, ``Y`` contains the latitude and ``Z`` the values extracted for the region of interest. Note: The argument has to be either lola or wac. Note case sensitive. All return arrays have the same size. All coordinates are in degree.
def get_arrays(self, type_img): if type_img.lower() == 'lola': return LolaMap(self.ppdlola, *self.window, path_pdsfile=self.path_pdsfiles).image() elif type_img.lower() == 'wac': return WacMap(self.ppdwac, *self.window, path_pdsfile=self.path_pdsfiles).image() else: raise ValueError('The img type has to be either "Lola" or "Wac"')
1,059,237
desc: Contains the URIs for this service args: - name: routes type: list subtypes: [Route] desc: a mapping of URIs to Route instances ctor: pymarshal.api_docs.routes.Route.__init__
def __init__( self, routes, ): self.routes = type_assert_iter( routes, Route, ) check_dups(routes)
1,059,399
Attempt to load config from paths, in order. Args: paths (List[string]): list of paths to python files Return: Config: loaded config
def load_config(paths=DEFAULT_CONFIG_PATHS): config = Config() for path in paths: if os.path.isfile(path): config.load_pyfile(path) return config
1,059,489
Exception to be raised if pased file is invalid. Args: file_path (string): path to bad config cause (string): reason of failure, i.e. what exactly was the problem while parsing
def __init__(self, file_path, cause): message = six.text_type("Malformed config at {}: {}").format( file_path, cause ) super(MalformedConfig, self).__init__(message)
1,059,490
Load python file as config. Args: path (string): path to the python file
def load_pyfile(self, path): with open(path) as config_file: contents = config_file.read() try: exec(compile(contents, path, 'exec'), self) except Exception as e: raise MalformedConfig(path, six.text_type(e))
1,059,491
Search for a project by id. Args: term (str): Term to search for. kwargs (dict): additional keywords passed into requests.session.get params keyword.
def project(self, term, **kwargs): params = kwargs baseuri = self._BASE_URI + 'projects/' + term res = self.session.get(baseuri, params=params) self.handle_http_error(res) return res
1,059,562
Search for projects. Defaults to project_title. Other fields are: project_reference project_abstract Args: term (str): Term to search for. kwargs (dict): additional keywords passed into requests.session.get params keyword.
def projects(self, term, field=None, **kwargs): params = kwargs params['q'] = term if field: params['f'] = self._FIELD_MAP[field] else: params['f'] = 'pro.t' baseuri = self._BASE_URI + 'projects' res = self.session.get(baseuri, params=params) self.handle_http_error(res) return res
1,059,563
Recursively marshal a Python object to a JSON-compatible dict that can be passed to json.{dump,dumps}, a web client, or a web server, etc... Args: obj: object, It's members can be nested Python objects which will be converted to dictionaries types: tuple-of-types, The JSON primitive types, typically you would not change this fields: None-list-of-str, Explicitly marshal only these fields Returns: dict
def marshal_json( obj, types=JSON_TYPES, fields=None, ): return marshal_dict( obj, types, fields=fields, )
1,059,782
Parse OSM XML element for generic data. Args: element (etree.Element): Element to parse Returns: tuple: Generic OSM data for object instantiation
def _parse_flags(element): visible = True if element.get('visible') else False user = element.get('user') timestamp = element.get('timestamp') if timestamp: timestamp = utils.Timestamp.parse_isoformat(timestamp) tags = {} try: for tag in element['tag']: key = tag.get('k') value = tag.get('v') tags[key] = value except AttributeError: pass return visible, user, timestamp, tags
1,059,980
Create element independent flags output. Args: osm_obj (Node): Object with OSM-style metadata Returns: list: Human readable flags output
def _get_flags(osm_obj): flags = [] if osm_obj.visible: flags.append('visible') if osm_obj.user: flags.append('user: %s' % osm_obj.user) if osm_obj.timestamp: flags.append('timestamp: %s' % osm_obj.timestamp.isoformat()) if osm_obj.tags: flags.append(', '.join('%s: %s' % (k, v) for k, v in sorted(osm_obj.tags.items()))) return flags
1,059,981
Initialise a new ``Node`` object. Args: ident (int): Unique identifier for the node latitude (float): Nodes's latitude longitude (float): Node's longitude visible (bool): Whether the node is visible user (str): User who logged the node timestamp (str): The date and time a node was logged tags (dict): Tags associated with the node
def __init__(self, ident, latitude, longitude, visible=False, user=None, timestamp=None, tags=None): super(Node, self).__init__(latitude, longitude) self.ident = ident self.visible = visible self.user = user self.timestamp = timestamp self.tags = tags
1,059,983
Parse a OSM node XML element. Args: element (etree.Element): XML Element to parse Returns: Node: Object representing parsed element
def parse_elem(element): ident = int(element.get('id')) latitude = element.get('lat') longitude = element.get('lon') flags = _parse_flags(element) return Node(ident, latitude, longitude, *flags)
1,059,986
Return the Luhn check digit for the given string. Args: base(str): string for which to calculate the check digit num_only(bool): allow only digits in `base` (default: False) allow_lower_case(bool): allow lower case letters in `base` (default: False) Returns: int: Luhn check digit Raises: ValueError: given `base` contains an unallowed character
def luhn(base, num_only=False, allow_lower_case=False): if num_only: alphabet = _ALPHABET[:10] else: alphabet = _ALPHABET if allow_lower_case: base = base.upper() try: pre_calc = (_PRE_CALC[alphabet.index(c)] for c in reversed(base)) cum = 0 parity = 1 for elem in pre_calc: val, parity = elem[parity] cum += val except ValueError: pass # fall through else: return 10 - cum % 10 # unallowed character detected if num_only: msg = 'The string given must only contain digits.' elif allow_lower_case: msg = 'The string given must only contain digits and ascii letters.' else: msg = 'The string given must only contain digits and upper case ' \ 'ascii letters.' raise ValueError(msg)
1,060,293
Note that type_assert can't be used because it would create a circular dependency. Args: cls, type, The type that was attempted to unmarshal into diff: dict, The extra arguments that were passed to @cls
def __init__( self, cls, diff, ): msg = "\n".join([ "", # Newline to make the output cleaner "ctor: {}".format(cls), "extras: {}".format(diff) ]) Exception.__init__(self, msg) self.type = str( type(self), ) self.cls = str(cls) self.diff = str(diff) self.type = self.__class__.__name__
1,060,351
Note that type_assert can't be used because it would create a circular dependency. Args: cls, type-or-static-method, The type or constructor that was attempted to unmarshal into cls_args: list, The arguments of @cls kwargs: dict, The arguments that were passed to @cls ex: Exception, The exception that was raised
def __init__( self, cls, cls_args, kwargs, ex, ): msg = "\n".join([ "", # Newline to make the output cleaner "module: {}".format(cls.__module__), "ctor: {}".format(cls), "ctor_args: {}".format(cls_args), "args (after removing args not in ctor_args): {}".format(kwargs), "only in ctor_args".format( [x for x in cls_args if x not in kwargs] ), "exception: {}".format(ex), ]) Exception.__init__(self, msg) self.type = str( type(self), ) self.cls = str(cls) self.cls_args = str(cls_args) self.kwargs = str(kwargs) self.ex = str(ex) self.type = self.__class__.__name__
1,060,352
Initialise a new ``Placemark`` object. Args: latitude (float): Placemarks's latitude longitude (float): Placemark's longitude altitude (float): Placemark's altitude name (str): Name for placemark description (str): Placemark's description
def __init__(self, latitude, longitude, altitude=None, name=None, description=None): super(Placemark, self).__init__(latitude, longitude, altitude, name) if altitude: self.altitude = float(altitude) self.description = description
1,060,500
Makes sure the request has a valid authorization jwt before calling the wrapped function. It does this by checking the timestamp of the last jwt and if > 10 minutes have elapsed, it refreshes it's existing jwt from the server. Args: f: Function to wrap Returns: Function, f
def _auth(f): @wraps(f) def method(self, *args, **kwargs): if not self._auth_token or datetime.utcnow() >= self._last_auth + timedelta(minutes=10): # Need to get new jwt self.auth_refresh() return f(self, *args, **kwargs) return method
1,060,536
Checks if the expected response code matches the actual response code. If they're not equal, raises the appropriate exception Args: response: (int) Actual status code expected: (int) Expected status code
def _check_response(response, expected): response_code = response.status_code if expected == response_code: return if response_code < 400: raise ex.UnexpectedResponseCodeException(response.text) elif response_code == 401: raise ex.UnauthorizedException(response.text) elif response_code == 400: raise ex.BadRequestException(response.text) elif response_code == 403: raise ex.ForbiddenException(response.text) elif response_code == 404: raise ex.NotFoundException(response.text) elif response_code == 429: raise ex.RateLimitedException(response.text) else: raise ex.InternalServerErrorException(response.text)
1,060,542
Take a string + filename, return a (tarinfo, stringbuf) tuple for insertion. Args: bytes (bstring): Bytestring representation of the filedata. filename (string): Filepath relative to tarfile root. Returns: tuple: (tarfile.TarInfo,io.BytesIO). This can be passed directly to TarFile.addfile().
def bytestring_to_tar_tuple(filename, bytes): info = tarfile.TarInfo(filename) info.size = len(bytes) return info, BytesIO(bytes)
1,060,723
Iterate over voevent models / dbrows and write to bz'd tarball. Args: voevents (iterable): An iterable (e.g. list) of e.g. Voevent db-rows, with access to the 'ivorn' and 'xml' attributes. filepath (string): Path to the new tarball to create. Typically of form '/path/to/foo.tar.bz2' Returns packet_count (int): Number of packets written to tarball
def write_tarball(voevents, filepath): tuple_gen = ( (v.ivorn, v.xml) for v in voevents) return write_tarball_from_ivorn_xml_tuples(tuple_gen, filepath)
1,060,724
Iterate over a series of ivorn / xml bstring tuples and write to bz'd tarball. Args: ivorn_xml_tuples (iterable): [(ivorn,xml)] An iterable (e.g. list) of tuples containing two entries - an ivorn string and an xml bytestring. filepath (string): Path to the new tarball to create. Typically of form '/path/to/foo.tar.bz2' Returns packet_count (int): Number of packets written to tarball
def write_tarball_from_ivorn_xml_tuples(ivorn_xml_tuples, filepath): out = tarfile.open(filepath, mode='w:bz2') logger.info("Writing packets to tarball at " + filepath) packet_count = 0 try: for (ivorn, xml) in ivorn_xml_tuples: out.addfile(*bytestring_to_tar_tuple( filename_from_ivorn(ivorn), xml )) packet_count += 1 finally: out.close() return packet_count
1,060,725
Initialise a new ``Xearth`` object. Args: latitude (float): Location's latitude longitude (float): Location's longitude comment (str): Comment for location
def __init__(self, latitude, longitude, comment=None): super(Xearth, self).__init__(latitude, longitude) self.comment = comment
1,060,735
Swap the keys in a dictionary Args: d: dict, The dict to swap keys in cls: class, If the class has a staticly defined _marshal_key_swap and/or _unmarshal_key_swap dict, the keys will be swapped. Otherwise @d is returned marshal: bool, True if marshalling class to JSON, False if unmarshalling JSON to class Returns: dict
def key_swap( d, cls, marshal ): dname = '_{}marshal_key_swap'.format("" if marshal else "un") if hasattr(cls, dname): key_swap = getattr(cls, dname) return { key_swap[k] if k in key_swap else k: v for k, v in d.items() } else: return d
1,060,991
Return the matching score of 2 given lists of authors. Args: x_authors (list(dict)): first schema-compliant list of authors. y_authors (list(dict)): second schema-compliant list of authors. Returns: float: matching score of authors.
def compute_author_match_score(x_authors, y_authors): if not x_authors or not y_authors: return 0.0 matches = get_number_of_author_matches(x_authors, y_authors) max_length = max(len(x_authors), len(y_authors)) return matches / float(max_length)
1,061,103
Return the Jaccard similarity coefficient of 2 given sets. Args: x_set (set): first set. y_set (set): second set. Returns: float: Jaccard similarity coefficient.
def compute_jaccard_index(x_set, y_set): if not x_set or not y_set: return 0.0 intersection_cardinal = len(x_set & y_set) union_cardinal = len(x_set | y_set) return intersection_cardinal / float(union_cardinal)
1,061,104
Calculate a NMEA 0183 checksum for the given sentence. NMEA checksums are a simple XOR of all the characters in the sentence between the leading "$" symbol, and the "*" checksum separator. Args: sentence (str): NMEA 0183 formatted sentence
def calc_checksum(sentence): if sentence.startswith('$'): sentence = sentence[1:] sentence = sentence.split('*')[0] return reduce(xor, map(ord, sentence))
1,061,167
Parse a NMEA-formatted latitude pair. Args: latitude (str): Latitude in DDMM.MMMM hemisphere (str): North or South Returns: float: Decimal representation of latitude
def parse_latitude(latitude, hemisphere): latitude = int(latitude[:2]) + float(latitude[2:]) / 60 if hemisphere == 'S': latitude = -latitude elif not hemisphere == 'N': raise ValueError('Incorrect North/South value %r' % hemisphere) return latitude
1,061,168
Parse a NMEA-formatted longitude pair. Args: longitude (str): Longitude in DDDMM.MMMM hemisphere (str): East or West Returns: float: Decimal representation of longitude
def parse_longitude(longitude, hemisphere): longitude = int(longitude[:3]) + float(longitude[3:]) / 60 if hemisphere == 'W': longitude = -longitude elif not hemisphere == 'E': raise ValueError('Incorrect North/South value %r' % hemisphere) return longitude
1,061,169
Initialise a new ``LoranPosition`` object. Args: latitude (float): Fix's latitude longitude (float): Fix's longitude time (datetime.time): Time the fix was taken status (bool): Whether the data is active mode (str): Type of reading
def __init__(self, latitude, longitude, time, status, mode=None): super(LoranPosition, self).__init__(latitude, longitude) self.time = time self.status = status self.mode = mode
1,061,170
Pretty printed position string. Args: talker (str): Talker ID Returns: str: Human readable string representation of ``Position`` object
def __str__(self, talker='GP'): if not len(talker) == 2: raise ValueError('Talker ID must be two characters %r' % talker) data = ['%sGLL' % talker] data.extend(nmea_latitude(self.latitude)) data.extend(nmea_longitude(self.longitude)) data.append('%s.%02i' % (self.time.strftime('%H%M%S'), self.time.microsecond / 1000000)) data.append('A' if self.status else 'V') if self.mode: data.append(self.mode) data = ','.join(data) return '$%s*%02X\r' % (data, calc_checksum(data))
1,061,171
Parse position data elements. Args: elements (list): Data values for fix Returns: Fix: Fix object representing data
def parse_elements(elements): if not len(elements) in (6, 7): raise ValueError('Invalid GLL position data') # Latitude and longitude are checked for validity during Fix # instantiation latitude = parse_latitude(elements[0], elements[1]) longitude = parse_longitude(elements[2], elements[3]) hour, minute, second = [int(elements[4][i:i + 2]) for i in range(0, 6, 2)] usecond = int(elements[4][6:8]) * 10000 time = datetime.time(hour, minute, second, usecond) active = True if elements[5] == 'A' else False mode = elements[6] if len(elements) == 7 else None return LoranPosition(latitude, longitude, time, active, mode)
1,061,172
Initialise a new ``Position`` object. Args: time (datetime.time): Time the fix was taken status (bool): Whether the data is active latitude (float): Fix's latitude longitude (float): Fix's longitude speed (float): Ground speed track (float): Track angle date (datetime.date): Date when position was taken variation (float): Magnetic variation mode (str): Type of reading
def __init__(self, time, status, latitude, longitude, speed, track, date, variation, mode=None): super(Position, self).__init__(latitude, longitude) self.time = time self.status = status self.speed = speed self.track = track self.date = date self.variation = variation self.mode = mode
1,061,173
Parse position data elements. Args: elements (list): Data values for position Returns: Position: Position object representing data
def parse_elements(elements): if not len(elements) in (11, 12): raise ValueError('Invalid RMC position data') time = datetime.time(*[int(elements[0][i:i + 2]) for i in range(0, 6, 2)]) active = True if elements[1] == 'A' else False # Latitude and longitude are checked for validity during Fix # instantiation latitude = parse_latitude(elements[2], elements[3]) longitude = parse_longitude(elements[4], elements[5]) speed = float(elements[6]) track = float(elements[7]) date = datetime.date(2000 + int(elements[8][4:6]), int(elements[8][2:4]), int(elements[8][:2])) variation = float(elements[9]) if not elements[9] == '' else None if elements[10] == 'W': variation = -variation elif variation and not elements[10] == 'E': raise ValueError('Incorrect variation value %r' % elements[10]) mode = elements[11] if len(elements) == 12 else None return Position(time, active, latitude, longitude, speed, track, date, variation, mode)
1,061,175
Parse essential fix's data elements. Args: elements (list): Data values for fix Returns: Fix: Fix object representing data
def parse_elements(elements): if not len(elements) in (14, 15): raise ValueError('Invalid GGA fix data') time = datetime.time(*[int(elements[0][i:i + 2]) for i in range(0, 6, 2)]) # Latitude and longitude are checked for validity during Fix # instantiation latitude = parse_latitude(elements[1], elements[2]) longitude = parse_longitude(elements[3], elements[4]) quality = int(elements[5]) if not 0 <= quality <= 9: raise ValueError('Invalid quality value %r' % quality) satellites = int(elements[6]) if not 0 <= satellites <= 12: raise ValueError('Invalid number of satellites %r' % satellites) dilution = float(elements[7]) altitude = float(elements[8]) if elements[9] == 'F': altitude = altitude * 3.2808399 elif not elements[9] == 'M': raise ValueError('Unknown altitude unit %r' % elements[9]) if elements[10] in ('-', ''): geoid_delta = False logging.warning('Altitude data could be incorrect, as the geoid ' 'difference has not been provided') else: geoid_delta = float(elements[10]) if elements[11] == 'F': geoid_delta = geoid_delta * 3.2808399 elif geoid_delta and not elements[11] == 'M': raise ValueError('Unknown geoid delta unit %r' % elements[11]) dgps_delta = float(elements[12]) if elements[12] else None dgps_station = int(elements[13]) if elements[13] else None mode = elements[14] if len(elements) == 15 else None return Fix(time, latitude, longitude, quality, satellites, dilution, altitude, geoid_delta, dgps_delta, dgps_station, mode)
1,061,178
Initialise a new ``Waypoint`` object. Args: latitude (float): Waypoint's latitude longitude (float): Waypoint's longitude name (str): Comment for waypoint
def __init__(self, latitude, longitude, name): super(Waypoint, self).__init__(latitude, longitude) self.name = name.upper()
1,061,179
Parse waypoint data elements. Args: elements (list): Data values for fix Returns: nmea.Waypoint: Object representing data
def parse_elements(elements): if not len(elements) == 5: raise ValueError('Invalid WPL waypoint data') # Latitude and longitude are checked for validity during Fix # instantiation latitude = parse_latitude(elements[0], elements[1]) longitude = parse_longitude(elements[2], elements[3]) name = elements[4] return Waypoint(latitude, longitude, name)
1,061,181
Initialise a new ``Trigpoint`` object. Args: latitude (float): Location's latitude longitude (float): Location's longitude altitude (float): Location's altitude name (str): Name for location identity (int): Database identifier, if known
def __init__(self, latitude, longitude, altitude, name=None, identity=None): super(Trigpoint, self).__init__(latitude, longitude) self.altitude = altitude self.name = name self.identity = identity
1,061,325
Extended pretty printing for location strings. Args: format_spec (str): Coordinate formatting system to use Returns: str: Human readable string representation of ``Trigpoint`` object Raises: ValueError: Unknown value for ``format_spec``
def __format__(self, format_spec='dms'): location = [super(Trigpoint, self).__format__(format_spec), ] if self.altitude: location.append('alt %im' % self.altitude) if self.name: return '%s (%s)' % (self.name, ' '.join(location)) else: return ' '.join(location)
1,061,326
Returns the gdal.Driver for a path or None based on the file extension. Arguments: path -- file path as str with a GDAL supported file extension
def driver_for_path(path, drivers=None): ext = (os.path.splitext(path)[1][1:] or path).lower() drivers = drivers or ImageDriver.registry if ext else {} for name, meta in drivers.items(): if ext == meta.get('DMD_EXTENSION', '').lower(): return ImageDriver(name) return None
1,061,421
Converts an OGR polygon to a 2D NumPy array. Arguments: geom -- OGR Geometry size -- array size in pixels as a tuple of (width, height) affine -- AffineTransform
def geom_to_array(geom, size, affine): driver = ImageDriver('MEM') rast = driver.raster(driver.ShortName, size) rast.affine = affine rast.sref = geom.GetSpatialReference() with MemoryLayer.from_records([(1, geom)]) as ml: status = gdal.RasterizeLayer(rast.ds, (1,), ml.layer, burn_values=(1,)) arr = rast.array() rast.close() return arr
1,061,422
Returns a Raster from layer features. Arguments: layer -- Layer to rasterize rast -- Raster with target affine, size, and sref
def rasterize(layer, rast): driver = ImageDriver('MEM') r2 = driver.raster(driver.ShortName, rast.size) r2.affine = rast.affine sref = rast.sref if not sref.srid: sref = SpatialReference(4326) r2.sref = sref ml = MemoryLayer(sref, layer.GetGeomType()) ml.load(layer) status = gdal.RasterizeLayer( r2.ds, (1,), ml.layer, options=['ATTRIBUTE=%s' % ml.id]) ml.close() return r2
1,061,423
Returns a Raster instance. Arguments: path -- local or remote path as str or file-like object Keyword args: mode -- gdal constant representing access mode
def open(path, mode=gdalconst.GA_ReadOnly): path = getattr(path, 'name', path) try: return Raster(vsiprefix(path), mode) except AttributeError: try: imgdata = path.read() except AttributeError: raise TypeError('Not a file-like object providing read()') else: imgio = MemFileIO(delete=False) gdal.FileFromMemBuffer(imgio.name, imgdata) return Raster(imgio, mode) raise ValueError('Failed to open raster from "%r"' % path)
1,061,424
Returns an in-memory raster initialized from a pixel buffer. Arguments: data -- byte buffer of raw pixel data size -- two or three-tuple of (xsize, ysize, bandcount) bandtype -- band data type
def frombytes(data, size, bandtype=gdal.GDT_Byte): r = ImageDriver('MEM').raster('', size, bandtype) r.frombytes(data) return r
1,061,425
Generally this will be initialized from a six-element tuple in the format returned by gdal.Dataset.GetGeoTransform(). Arguments: xorigin -- top left corner x coordinate xscale -- x scaling rx -- x rotation yorigin -- top left corner y coordinate ry -- y rotation yscale -- y scaling
def __init__(self, xorigin, xscale, rx, yorigin, ry, yscale): # Origin coordinate in projected space. self.origin = (xorigin, yorigin) self.scale = (xscale, yscale) # Rotation in X and Y directions. (0, 0) is north up. self.rotation = (rx, ry) # Avoid repeated calls to tuple() by iterators and slices. self._len = len(self.tuple)
1,061,426
Convert image pixel/line coordinates to georeferenced x/y, return a generator of two-tuples. Arguments: coords -- input coordinates as iterable containing two-tuples/lists such as ((0, 0), (10, 10))
def project(self, coords): geotransform = self.tuple for x, y in coords: geo_x = geotransform[0] + geotransform[1] * x + geotransform[2] * y geo_y = geotransform[3] + geotransform[4] * x + geotransform[5] * y # Move the coordinate to the center of the pixel. geo_x += geotransform[1] / 2.0 geo_y += geotransform[5] / 2.0 yield geo_x, geo_y
1,061,427
Transform from projection coordinates (Xp,Yp) space to pixel/line (P,L) raster space, based on the provided geotransformation. Arguments: coords -- input coordinates as iterable containing two-tuples/lists such as ((-120, 38), (-121, 39))
def transform(self, coords): # Use local vars for better performance here. origin_x, origin_y = self.origin sx, sy = self.scale return [(int(math.floor((x - origin_x) / sx)), int(math.floor((y - origin_y) / sy))) for x, y in coords]
1,061,428
Returns a copied Raster instance. Arguments: source -- the source Raster instance or filepath as str dest -- destination filepath as str
def copy(self, source, dest): if not self.copyable: raise IOError('Driver does not support raster copying') if not isinstance(source, Raster): source = Raster(source) should_close = True else: should_close = False if source.name == dest: raise ValueError( 'Input and output are the same location: %s' % source.name) settings = driverdict_tolist(self.settings) ds = self.CreateCopy(dest, source.ds, self.strictmode, options=settings) if should_close: source.close() return Raster(ds)
1,061,431
Returns a new Raster instance. gdal.Driver.Create() does not support all formats. Arguments: path -- file object or path as str size -- two or three-tuple of (xsize, ysize, bandcount) bandtype -- GDAL pixel data type
def raster(self, path, size, bandtype=gdal.GDT_Byte): path = getattr(path, 'name', path) try: is_multiband = len(size) > 2 nx, ny, nbands = size if is_multiband else size + (1,) except (TypeError, ValueError) as exc: exc.args = ('Size must be 2 or 3-item sequence',) raise if nx < 1 or ny < 1: raise ValueError('Invalid raster size %s' % (size,)) # Do not write to a non-empty file. if not self._is_empty(path): raise IOError('%s already exists, open with Raster()' % path) ds = self.Create(path, nx, ny, nbands, bandtype) if not ds: raise ValueError( 'Could not create %s using %s' % (path, str(self))) return Raster(ds)
1,061,435
Initialize a Raster data set from a path or file Arguments: path -- path as str, file object, or gdal.Dataset Keyword args: mode -- gdal constant representing access mode
def __init__(self, path, mode=gdalconst.GA_ReadOnly): if path and not isinstance(path, gdal.Dataset): # Get the name if we have a file-like object. dataset = gdal.Open(getattr(path, 'name', path), mode) else: dataset = path if not dataset: raise IOError('Failed to open: "%s"' % path) self.ds = dataset self.name = self.ds.GetDescription() # Bands are not zero based, available bands are a 1-based list of ints. self.bandlist = range(1, len(self) + 1) # Initialize attrs without calling their setters. self._affine = AffineTransform(*dataset.GetGeoTransform()) self._sref = SpatialReference(dataset.GetProjection()) #self.dtype = gdal_array.codes[self[0].DataType] self._nodata = None self._envelope = None self._driver = None self.closed = False # Closes the gdal.Dataset dataset = None
1,061,436
Sets the affine transformation. Intercepts the gdal.Dataset call to ensure use as a property setter. Arguments: affine -- AffineTransform or six-tuple of geotransformation values
def SetGeoTransform(self, affine): if isinstance(affine, collections.Sequence): affine = AffineTransform(*affine) self._affine = affine self.ds.SetGeoTransform(affine)
1,061,440
Returns a 4-tuple pixel window (x_offset, y_offset, x_size, y_size). Arguments: envelope -- coordinate extent tuple or Envelope
def get_offset(self, envelope): if isinstance(envelope, collections.Sequence): envelope = Envelope(envelope) if not (self.envelope.contains(envelope) or self.envelope.intersects(envelope)): raise ValueError('Envelope does not intersect with this extent') coords = self.affine.transform((envelope.ul, envelope.lr)) nxy = [(min(dest, size) - origin) or 1 for size, origin, dest in zip(self.size, *coords)] return coords[0] + tuple(nxy)
1,061,444
Returns a new instance resampled to provided size. Arguments: size -- tuple of x,y image dimensions
def resample(self, size, interpolation=gdalconst.GRA_NearestNeighbour): # Find the scaling factor for pixel size. factors = (size[0] / float(self.RasterXSize), size[1] / float(self.RasterYSize)) affine = AffineTransform(*tuple(self.affine)) affine.scale = (affine.scale[0] / factors[0], affine.scale[1] / factors[1]) dest = self.new(size, affine) # Uses self and dest projection when set to None gdal.ReprojectImage(self.ds, dest.ds, None, None, interpolation) return dest
1,061,452
Save this instance to the path and format provided. Arguments: to -- output path as str, file, or MemFileIO instance Keyword args: driver -- GDAL driver name as string or ImageDriver
def save(self, to, driver=None): path = getattr(to, 'name', to) if not driver and hasattr(path, 'encode'): driver = driver_for_path(path, self.driver.filter_copyable()) elif hasattr(driver, 'encode'): driver = ImageDriver(driver) if driver is None or not driver.copyable: raise ValueError('Copy supporting driver not found for %s' % path) driver.copy(self, path).close()
1,061,453
Sets the spatial reference. Intercepts the gdal.Dataset call to ensure use as a property setter. Arguments: sref -- SpatialReference or any format supported by the constructor
def SetProjection(self, sref): if not hasattr(sref, 'ExportToWkt'): sref = SpatialReference(sref) self._sref = sref self.ds.SetProjection(sref.ExportToWkt())
1,061,454
Returns a new reprojected instance. Arguments: to_sref -- spatial reference as a proj4 or wkt string, or a SpatialReference Keyword args: dest -- filepath as str interpolation -- GDAL interpolation type
def warp(self, to_sref, dest=None, interpolation=gdalconst.GRA_NearestNeighbour): if not hasattr(to_sref, 'ExportToWkt'): to_sref = SpatialReference(to_sref) dest_wkt = to_sref.ExportToWkt() dtype = self[0].DataType err_thresh = 0.125 # Determine new values for destination raster dimensions and # geotransform. vrt = gdal.AutoCreateWarpedVRT(self.ds, None, dest_wkt, interpolation, err_thresh) if vrt is None: raise ValueError('Could not warp %s to %s' % (self, dest_wkt)) warpsize = (vrt.RasterXSize, vrt.RasterYSize, len(self)) warptrans = vrt.GetGeoTransform() vrt = None if dest is None: imgio = MemFileIO() rwarp = self.driver.raster(imgio, warpsize, dtype) imgio.close() else: rwarp = self.driver.raster(dest, warpsize, dtype) rwarp.SetGeoTransform(warptrans) rwarp.SetProjection(to_sref) if self.nodata is not None: for band in rwarp: band.SetNoDataValue(self.nodata) band = None # Uses self and rwarp projection when set to None gdal.ReprojectImage(self.ds, rwarp.ds, None, None, interpolation) return rwarp
1,061,456
Generate name variations for a given name. Args: name (six.text_type): The name whose variations are to be generated. Returns: list: All the name variations for the given name. Notes: Uses `unidecode` for doing unicode characters transliteration to ASCII ones. This was chosen so that we can map both full names of authors in HEP records and user's input to the same space and thus make exact queries work.
def generate_name_variations(name): def _update_name_variations_with_product(set_a, set_b): name_variations.update([ unidecode((names_variation[0] + separator + names_variation[1]).strip(''.join(_LASTNAME_NON_LASTNAME_SEPARATORS))).lower() for names_variation in product(set_a, set_b) for separator in _LASTNAME_NON_LASTNAME_SEPARATORS ]) parsed_name = ParsedName.loads(name) # Handle rare-case of single-name if len(parsed_name) == 1: return [parsed_name.dumps().lower()] name_variations = set() # We need to filter out empty entries, since HumanName for this name `Perelstein,, Maxim` returns a first_list with # an empty string element. non_lastnames = [ non_lastname for non_lastname in parsed_name.first_list + parsed_name.suffix_list if non_lastname ] # This is needed because due to erroneous data (e.g. having many authors in a single authors field) ends up # requiring a lot of memory (due to combinatorial expansion of all non lastnames). # The policy is to use the input as a name variation, since this data will have to be curated. if len(non_lastnames) > _NAMES_MAX_NUMBER_THRESHOLD or len(parsed_name.last_list) > _NAMES_MAX_NUMBER_THRESHOLD: LOGGER.error('Skipping name variations generation - too many names in: "%s"', name) return [name] non_lastnames_variations = \ _generate_non_lastnames_variations(non_lastnames) lastnames_variations = _generate_lastnames_variations(parsed_name.last_list) # Create variations where lastnames comes first and is separated from non lastnames either by space or comma. _update_name_variations_with_product(lastnames_variations, non_lastnames_variations) # Second part of transformations - having the lastnames in the end. _update_name_variations_with_product(non_lastnames_variations, lastnames_variations) return list(name_variations)
1,061,685
Create a ParsedName instance. Args: name (Union[str, HumanName]): The name to be parsed (must be non empty nor None). constants (:class:`nameparser.config.Constants`): Configuration for `HumanName` instantiation. (Can be None, if provided it overwrites the default one generated in :method:`prepare_nameparser_constants`.)
def __init__(self, name, constants=None): if not constants: constants = ParsedName.constants if isinstance(name, HumanName): self._parsed_name = name else: self._parsed_name = HumanName(name, constants=constants) self._parsed_name.capitalize()
1,061,686
Filter down obj based on marks, presuming keys should be kept/deleted. Args: obj: The object to be filtered. Filtering is done in-place. marks: An object mapping id(obj) --> {DELETE,KEEP} These values apply to the entire subtree, unless inverted. presumption: The default action to take on all keys.
def filter_object(obj, marks, presumption=DELETE): if isinstance(obj, list): keys = reversed(range(0, len(obj))) else: keys = obj.keys() for k in keys: v = obj[k] m = marks.get(id(v), UNSPECIFIED) if m == DELETE: del obj[k] # an explicit deletion is irreversible. elif m == KEEP or presumption==KEEP: # keep descending, in case there are nodes we should delete. if isinstance(v, list) or isinstance(v, dict): filter_object(v, marks, presumption=KEEP) elif m == UNSPECIFIED: # ... and presumption == DELETE if isinstance(v, list) or isinstance(v, dict): filter_object(v, marks, presumption=DELETE) if len(v) == 0: del obj[k] else: del obj[k]
1,061,823
Return the earliest among the schema-compliant dates. This is a convenience wrapper around :ref:`PartialDate`, which should be used instead if more features are needed. Args: dates(list): List of dates from which oldest/earliest one will be returned full_date(bool): Adds month and/or day as "01" if they are missing Returns: str: Earliest date from provided list
def earliest_date(dates, full_date=False): min_date = min(PartialDate.loads(date) for date in dates) if not min_date.month and full_date: min_date.month = 1 if not min_date.day and full_date: min_date.day = 1 return min_date.dumps()
1,062,172
Adds a scheme to a url if not present. Args: url (string): a url, assumed to start with netloc default_scheme (string): a scheme to be added Returns: string: URL with a scheme
def ensure_scheme(url, default_scheme='http'): parsed = urlsplit(url, scheme=default_scheme) if not parsed.netloc: parsed = SplitResult( scheme=parsed.scheme, netloc=parsed.path, path='', query=parsed.query, fragment=parsed.fragment ) return urlunsplit(parsed)
1,062,186
Return the __init__ args (minus 'self') for @cls Args: cls: class, instance or callable Returns: list of str, the arguments minus 'self'
def init_args(cls): # This looks insanely goofy, but seems to literally be the # only thing that actually works. Your obvious ways to # accomplish this task do not apply here. try: # Assume it's a factory function, static method, or other callable argspec = getargspec(cls) except TypeError: # assume it's a class argspec = getargspec(cls.__init__) args = argspec.args # Note: There is a special place in hell for people who don't # call the first method argument 'self'. if args[0] == 'self': args.remove('self') return args
1,062,210
Convert metadata from WA-KAT to Dublin core dictionary like structure, which may be easily converted to xml using :mod:`xmltodict` module. Args: data (dict): Nested WA-KAT data. See tests for example. Returns: dict: Dict in dublin core format.
def _convert_metadata(data): def compose(val, arguments=None): if val is None: return None if not arguments: return val arguments["#text"] = val return arguments conspect = data.get("conspect", {}) author_name = data.get("author", {}).get("name") author_code = data.get("author", {}).get("code") metadata = odict[ "dc:title": data.get("title"), "dcterms:alternative": data.get("subtitle"), "dc:creator": compose(author_name, {"@id": author_code}), "dc:publisher": data.get("publisher"), "dc:description": data.get("annotation"), "dc:coverage": compose(data.get("place"), {"@xml:lang": "cze"}), "dc:language": compose(data.get("language"), {"@schema": "ISO 639-2"}), "dcterms:created": data.get("from_year"), "dcterms:accrualperiodicity": compose( data.get("periodicity"), {"@xml:lang": "cze"} ), "dc:identifier": [ {"@rdf:resource": data["url"]}, compose(data.get("issn"), {"@xsi:type": "ISSN"}), compose(conspect.get("mdt"), {"@xsi:type": "MDT"}), compose(conspect.get("ddc"), {"@xsi:type": "DDC"}), ], "dc:subject": [ compose(conspect.get("mdt"), {"@xsi:type": "dcterms:UDC"}), compose(conspect.get("ddc"), {"@xsi:type": "dcterms:DDC"}), ], ] def pick_keywords(data, source): return [ x["zahlavi"] for x in data.get(source, []) if x.get("zahlavi") ] # parse and add keywords (keywords are in dicts with other data, I want # just the free-text descriptions) cz_keywords = pick_keywords(data, "cz_keywords") en_keywords = pick_keywords(data, "en_keywords") if cz_keywords: metadata["dc:subject"].append({ "@xml:lang": "cz", "#text": ", ".join(cz_keywords) }) if en_keywords: metadata["dc:subject"].append({ "@xml:lang": "en", "#text": ", ".join(en_keywords) }) # filter unset identifiers - TODO: rewrite to recursive alg. metadata["dc:identifier"] = [x for x in metadata["dc:identifier"] if x] metadata["dc:subject"] = [x for x in metadata["dc:subject"] if x] return metadata
1,062,417
Convert WA-KAT `data` to Dublin core XML. Args: data (dict): Nested WA-KAT data. See tests for example. Returns: unicode: XML with dublin core.
def to_dc(data): root = odict[ "metadata": odict[ "@xmlns:xsi": "http://www.w3.org/2001/XMLSchema-instance", "@xmlns:dc": "http://purl.org/dc/elements/1.1/", "@xmlns:dcterms": "http://purl.org/dc/terms/", "@xmlns:rdf": "http://www.w3.org/1999/02/22-rdf-syntax-ns#", ] ] # map metadata to the root element, skip None values for key, val in _convert_metadata(_remove_none(data)).iteritems(): if val is None: continue if isinstance(val, basestring) and not val.strip(): continue if isinstance(val, str): val = val.decode("utf-8") root["metadata"][key] = val return unparse(root, pretty=True, indent=" ")
1,062,419
Initialization of instances: Args: section (str): invalid section name. Attributes: section (str): invalid section name.
def __init__(self, section): self.section = section super().__init__('invalid section name: {}'.format(section))
1,062,513
Initialization of instances: Args: option (str): invalid option name. Attributes: option (str): invalid option name.
def __init__(self, option): self.option = option super().__init__('invalid option name: {}'.format(option))
1,062,514
Gets escape-codes for flag combinations. Arguments: combination (int): Either a single integer-convertible flag or an OR'd flag-combination. Returns: A semi-colon-delimited string of appropriate escape sequences. Raises: errors.FlagError if the combination is out-of-range.
def codify(combination): if (isinstance(combination, int) and (combination < 0 or combination >= LIMIT)): raise errors.FlagError("Out-of-range flag-combination!") codes = [] for enum in (Style, Color, Fill): for flag in enum: if combination & flag: codes.append(str(flag)) return ";".join(codes)
1,062,703
Look into the database and return :class:`RequestInfo` if the `url` was already analyzed, or create and return new instance, if not. If the `new` is set to True, always create new instance. Args: url (str): URL of the analyzed resource. new (bool, default False): Force new instance? Returns: obj: :class:`RequestInfo` instance.
def get_cached_or_new(url, new=False): garbage_collection() old_req = DATABASE.get(url) if old_req and not new: return old_req if not (url.startswith("http://") or url.startswith("https://")): raise ValueError("Invalid URL `%s`!" % url) req = RequestInfo(url=url) DATABASE[url] = req return req
1,062,990
Collect and remove all :class:`.RequestInfo` objects older than `time_limit` (in seconds). Args: time_limit (float, default YEAR / 2): Collect objects older than this limit.
def garbage_collection(time_limit=YEAR/12.0): expired_request_infos = ( ri for ri in DATABASE.values() if ri.creation_ts + time_limit <= time.time() ) for ri in expired_request_infos: del DATABASE[ri.url]
1,062,991
Go over all attributes in `model` and add :class:`SourceString` to them. Args: model (obj): :class:`Model` instance. Returns: obj: :class:`Model` instance with :class:`SourceString` descriptors.
def _add_source(model): ignored_keys = {"author_tags", "original_xml", "additional_info"} # convert all values to source strings source = "Aleph" for key, val in model.get_mapping().iteritems(): if key in ignored_keys: continue if type(val) in [list, tuple]: ss_val = [ SourceString(item, source).to_dict() for item in val ] else: ss_val = [SourceString(val, source).to_dict()] setattr(model, key, ss_val) return model
1,063,054
Query aleph for records with given `issn`. The lookup is directed to the NTK's Aleph. Args: issn (str): ISSN of the periodical. Returns: obj: :class:`Model` instances for each record.
def by_issn(issn): # monkeypatched to allow search in NTK's Aleph old_url = aleph.ALEPH_URL aleph.ALEPH_URL = NTK_ALEPH_URL records = aleph.getISSNsXML(issn, base="STK02") aleph.ALEPH_URL = old_url # process all records for record in records: marc = MARCXMLRecord(record) # following values were requested by @bjackova in # https://github.com/WebArchivCZ/WA-KAT/issues/66 additional_info = { "222": marc.get("222", None), "PER": marc.get("PER", None), "776": marc.get("776", None), "008": marc.get("008", None), "alt_end_date": "" # just reminder that it is filled later } additional_info = { key: val for key, val in additional_info.iteritems() if val } # check whether there is alternative date in 008 alt_end_date = None alt_creation_date = None if additional_info["008"]: # 131114c20139999xr-q||p|s||||||---a0eng-c -> 2013 alt_creation_date = additional_info["008"][7:11] # 131114c20139999xr-q||p|s||||||---a0eng-c -> 9999 alt_end_date = additional_info["008"][11:15] if alt_end_date in ["9999", "****"]: alt_creation_date += "-" # library convention is xxxx- alt_end_date = None additional_info["alt_end_date"] = alt_end_date # parse author author = Author.parse_author(marc) model = Model( url=_first_or_none( marc.get("856u") ), conspect=_first_or_none( marc.get("072a") ), annotation_tags=_first_or_none( marc.get("520a") ), periodicity=_first_or_none( marc.get("310a") ), title_tags=_first_or_none( marc.get("222a") ), subtitle_tags=_first_or_none( marc.get("245b") ), place_tags=remove_hairs( _first_or_none(marc.get("260a")) or "" ), author_tags=author._asdict() if author else None, publisher_tags=remove_hairs( ( _first_or_none(marc.get("260b")) or _first_or_none(marc.get("264b")) or "", ), ", " ), creation_dates=_first_or_none( marc.get("260c", [alt_creation_date]) ), lang_tags=_first_or_none( marc.get("040b") ), keyword_tags=marc.get("650a07"), source_info=_first_or_none( marc.get("500a") ), original_xml=record, additional_info=additional_info, ) yield _add_source(model)
1,063,055
Parse author from `marc` data. Args: marc (obj): :class:`.MARCXMLRecord` instance. See module :mod:`.marcxml_parser` for details. Returns: obj: :class:`Author`.
def parse_author(cls, marc): name = None code = None linked_forms = None is_corporation = None record = None # parse informations from the record if marc["100a"]: # persons name = _first_or_none(marc["100a"]) code = _first_or_none(marc["1007"]) is_corporation = False record = marc.datafields["100"][0] # transport all fields elif marc["110a"]: # corporations name = _first_or_none(marc["110a"]) code = _first_or_none(marc["1107"]) linked_forms = marc["410a2 "] is_corporation = True record = marc.datafields["110"][0] # transport all fields else: return None # parse linked forms (alternative names) linked_forms = marc["410a2 "] # put together alt_name type_descriptor = ["osoba", "organizace"] alt_name = "%s [%s]" % (name, type_descriptor[is_corporation]) if linked_forms: alt_name += " (" + ", ".join(linked_forms) + ")" return cls( name=name, code=code, linked_forms=linked_forms, is_corporation=is_corporation, record=record, alt_name=alt_name, )
1,063,056
Look for author in NK Aleph authority base by `name`. Args: name (str): Author's name. Yields: obj: :class:`Author` instances.
def search_by_name(cls, name): records = aleph.downloadRecords( aleph.searchInAleph("aut", name, False, "wau") ) for record in records: marc = MARCXMLRecord(record) author = cls.parse_author(marc) if author: yield author
1,063,057
Use this function to automatically filter all the entries defined for a given rule. Params: conflicts_list(List[Conflict]): the list of conflicts to filter. fields(List[str]): fields to filter out, using an accessor syntax of the form ``field.subfield.subsubfield``. Return: List[Conflict]: the given list filtered by `fields`
def filter_conflicts(conflicts_list, fields): for field in fields: conflicts_list = filter_conflicts_by_path(conflicts_list, field) return conflicts_list
1,063,109
Convert MRC data format to MARC XML. Args: mrc (str): MRC as string. Returns: str: XML with MARC.
def mrc_to_marc(mrc): # ignore blank lines lines = [ line for line in mrc.splitlines() if line.strip() ] def split_to_parts(lines): for line in lines: first_part, second_part = line.split(" L ", 1) yield line, first_part, second_part.lstrip() control_lines = [] data_lines = [] for line, first_part, second_part in split_to_parts(lines): if second_part.startswith("$"): data_lines.append(line) else: control_lines.append(line) # convert controlfield lines record = MARCXMLRecord() record.oai_marc = True for line, descr, content in split_to_parts(control_lines): record.controlfields[descr.strip()[:3]] = content def get_subfield_dict(line): fields = ( (field[0], field[1:]) for field in line.split("$$")[1:] ) fields_dict = defaultdict(list) for key, val in fields: fields_dict[key].append(val) return fields_dict # convert datafield lines for line, descr, content_line in split_to_parts(data_lines): name = descr[:3] i1 = descr[3] i2 = descr[4] record.add_data_field( name, i1, i2, get_subfield_dict(content_line) ) return record.to_XML()
1,063,190
Convert `dicts` under `code` to MRC. This is used to compose some of the data from user's input to MRC template, which is then converted to MARC XML / Dublin core. Args: code (str): Code of the aleph field, whic hshould be used. dicts (dict): Dict with aleph fields (i1/i2, a..z, 0..9) and informations in this fields. Returns: list: List of lines with MRC data.
def dicts_to_mrc(code, dicts): def _dict_to_mrc(code, d): i1 = d.get("i1", d.get("ind1")) i2 = d.get("i2", d.get("ind2")) one_chars = [k for k in d.keys() if len(k) == 1] out = "%s%s%s L " % (code, i1, i2) for key in resorted(one_chars): for item in d[key]: out += "$$%s%s" % (key, item) return out return [ _dict_to_mrc(code, d) for d in dicts ]
1,063,191
Convert `val` to MRC, whether it is dict or string. Args: code (str): Code of the field. val (str or dict): Value of the field. Returns: list: MRC lines for output template.
def item_to_mrc(code, val): if isinstance(val, basestring): return [val_to_mrc(code, val)] if isinstance(val, dict): val = [val] return dicts_to_mrc(code, val)
1,063,193
Create a material stress table. Args: temperatures: A sequence of temperatures. materials: A mapping of material names to sequences of stress values which correspond to the temperatures.
def __init__(self, temperatures, materials): self._table = Table( column_keys=temperatures, rows_mapping=materials )
1,063,605
Filter each tuple according to visibility. Args: key_tuples: A sequence of tuples of equal length (i.e. rectangular) visibilities: A sequence of booleans equal in length to the tuples contained in key_tuples. Returns: A sequence equal in length to key_tuples where the items are tuples with a length corresponding to the number of items in visibility which are True.
def strip_hidden(key_tuples, visibilities): result = [] for key_tuple in key_tuples: if len(key_tuple) != len(visibilities): raise ValueError( "length of key tuple {} is not equal to length of visibilities {}".format( key_tuple, visibilities ) ) filtered_tuple = tuple(item for item, visible in zip(key_tuple, visibilities) if visible) result.append(filtered_tuple) return result
1,063,812