docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Calculate the optimal rotation from ``P`` unto ``Q``. Using the Kabsch algorithm the optimal rotation matrix for the rotation of ``other`` unto ``self`` is calculated. The algorithm is described very well in `wikipedia <http://en.wikipedia.org/wiki/Kabsch_algorithm>`_. Args: other (Cartesian): Returns: :class:`~numpy.array`: Rotation matrix
def get_kabsch_rotation(Q, P): # Naming of variables follows the wikipedia article: # http://en.wikipedia.org/wiki/Kabsch_algorithm A = np.dot(np.transpose(P), Q) # One can't initialize an array over its transposed V, S, W = np.linalg.svd(A) # pylint:disable=unused-variable W = W.T d = np.linalg.det(np.dot(W, V.T)) return np.linalg.multi_dot((W, np.diag([1., 1., d]), V.T))
952,611
Returns E(B-V), in mags, at the specified location(s) on the sky. Args: coords (:obj:`astropy.coordinates.SkyCoord`): The coordinates to query. Returns: A float array of the reddening, in magnitudes of E(B-V), at the selected coordinates.
def query(self, coords, **kwargs): return super(Lenz2017Query, self).query(coords, **kwargs)
952,804
Create a configuration file. Writes the current state of settings into a configuration file. .. note:: Since a file is permamently written, this function is strictly speaking not sideeffect free. Args: filepath (str): Where to write the file. The default is under both UNIX and Windows ``~/.chemcoordrc``. overwrite (bool): Returns: None:
def write_configuration_file(filepath=_give_default_file_path(), overwrite=False): config = configparser.ConfigParser() config.read_dict(settings) if os.path.isfile(filepath) and not overwrite: try: raise FileExistsError except NameError: # because of python2 warn('File exists already and overwrite is False (default).') else: with open(filepath, 'w') as configfile: config.write(configfile)
952,806
Read the configuration file. .. note:: This function changes ``cc.settings`` inplace and is therefore not sideeffect free. Args: filepath (str): Where to read the file. The default is under both UNIX and Windows ``~/.chemcoordrc``. Returns: None:
def read_configuration_file(filepath=_give_default_file_path()): config = configparser.ConfigParser() config.read(filepath) def get_correct_type(section, key, config): def getstring(section, key, config): return config[section][key] def getinteger(section, key, config): # pylint:disable=unused-variable return config[section].getint(key) def getboolean(section, key, config): return config[section].getboolean(key) def getfloat(section, key, config): # pylint:disable=unused-variable return config[section].getfloat(key) special_actions = {} # Something different than a string is expected special_actions['defaults'] = {} special_actions['defaults']['use_lookup'] = getboolean try: return special_actions[section][key](section, key, config) except KeyError: return getstring(section, key, config) for section in config.sections(): for key in config[section]: settings[section][key] = get_correct_type(section, key, config) return settings
952,807
Cut a sphere specified by origin and radius. Args: radius (float): origin (list): Please note that you can also pass an integer. In this case it is interpreted as the index of the atom which is taken as origin. outside_sliced (bool): Atoms outside/inside the sphere are cut out. preserve_bonds (bool): Do not cut covalent bonds. Returns: Cartesian:
def cut_sphere( self, radius=15., origin=None, outside_sliced=True, preserve_bonds=False): if origin is None: origin = np.zeros(3) elif pd.api.types.is_list_like(origin): origin = np.array(origin, dtype='f8') else: origin = self.loc[origin, ['x', 'y', 'z']] molecule = self.get_distance_to(origin) if outside_sliced: molecule = molecule[molecule['distance'] < radius] else: molecule = molecule[molecule['distance'] > radius] if preserve_bonds: molecule = self._preserve_bonds(molecule) return molecule
952,866
Return the mass weighted average location. Args: None Returns: :class:`numpy.ndarray`:
def get_barycenter(self): try: mass = self['mass'].values except KeyError: mass = self.add_data('mass')['mass'].values pos = self.loc[:, ['x', 'y', 'z']].values return (pos * mass[:, None]).sum(axis=0) / self.get_total_mass()
952,868
Restrict a bond dictionary to self. Args: bond_dict (dict): Look into :meth:`~chemcoord.Cartesian.get_bonds`, to see examples for a bond_dict. Returns: bond dictionary
def restrict_bond_dict(self, bond_dict): return {j: bond_dict[j] & set(self.index) for j in self.index}
952,873
Return self without the specified fragments. Args: fragments: Either a list of :class:`~chemcoord.Cartesian` or a :class:`~chemcoord.Cartesian`. use_lookup (bool): Use a lookup variable for :meth:`~chemcoord.Cartesian.get_bonds`. The default is specified in ``settings['defaults']['use_lookup']`` Returns: list: List containing :class:`~chemcoord.Cartesian`.
def get_without(self, fragments, use_lookup=None): if use_lookup is None: use_lookup = settings['defaults']['use_lookup'] if pd.api.types.is_list_like(fragments): for fragment in fragments: try: index_of_all_fragments |= fragment.index except NameError: index_of_all_fragments = fragment.index else: index_of_all_fragments = fragments.index missing_part = self.loc[self.index.difference(index_of_all_fragments)] missing_part = missing_part.fragmentate(use_lookup=use_lookup) return sorted(missing_part, key=len, reverse=True)
952,875
Calculate the shortest distance between self and other Args: Cartesian: other Returns: tuple: Returns a tuple ``i, j, d`` with the following meaning: ``i``: The index on self that minimises the pairwise distance. ``j``: The index on other that minimises the pairwise distance. ``d``: The distance between self and other. (float)
def get_shortest_distance(self, other): coords = ['x', 'y', 'z'] pos1 = self.loc[:, coords].values pos2 = other.loc[:, coords].values D = self._jit_pairwise_distances(pos1, pos2) i, j = np.unravel_index(D.argmin(), D.shape) d = D[i, j] i, j = dict(enumerate(self.index))[i], dict(enumerate(other.index))[j] return i, j, d
952,877
Return the reindexed version of Cartesian. Args: rename_dict (dict): A dictionary mapping integers on integers. Returns: Cartesian: A renamed copy according to the dictionary passed.
def change_numbering(self, rename_dict, inplace=False): output = self if inplace else self.copy() new_index = [rename_dict.get(key, key) for key in self.index] output.index = new_index if not inplace: return output
952,882
Returns the MD5 checksum of a file. Args: fname (str): Filename chunk_size (Optional[int]): Size (in Bytes) of the chunks that should be read in at once. Increasing chunk size reduces the number of reads required, but increases the memory usage. Defaults to 1024. Returns: The MD5 checksum of the file, which is a string.
def get_md5sum(fname, chunk_size=1024): def iter_chunks(f): while True: chunk = f.read(chunk_size) if not chunk: break yield chunk sig = hashlib.md5() with open(fname, 'rb') as f: for chunk in iter_chunks(f): sig.update(chunk) # data = f.read() # return hashlib.md5(data).hexdigest() return sig.hexdigest()
952,941
Downloads a file. Args: url (str): The URL to download. fname (Optional[str]): The filename to store the downloaded file in. If `None`, take the filename from the URL. Defaults to `None`. Returns: The filename the URL was downloaded to. Raises: requests.exceptions.HTTPError: There was a problem connecting to the URL.
def download(url, fname=None): # Determine the filename if fname is None: fname = url.split('/')[-1] # Stream the URL as a file, copying to local disk with contextlib.closing(requests.get(url, stream=True)) as r: try: r.raise_for_status() except requests.exceptions.HTTPError as error: print('Error connecting to URL: "{}"'.format(url)) print(r.text) raise error with open(fname, 'wb') as f: shutil.copyfileobj(r.raw, f) return fname
952,944
Fetches metadata pertaining to a Digital Object Identifier (DOI) in the Harvard Dataverse. Args: doi (str): The Digital Object Identifier (DOI) of the entry in the Dataverse. Raises: requests.exceptions.HTTPError: The given DOI does not exist, or there was a problem connecting to the Dataverse.
def dataverse_search_doi(doi): url = '{}/api/datasets/:persistentId?persistentId=doi:{}'.format(dataverse, doi) r = requests.get(url) try: r.raise_for_status() except requests.exceptions.HTTPError as error: print('Error looking up DOI "{}" in the Harvard Dataverse.'.format(doi)) print(r.text) raise error return json.loads(r.text)
952,945
Serializes a :obj:`numpy.dtype`. Args: o (:obj:`numpy.dtype`): :obj:`dtype` to be serialized. Returns: A dictionary that can be passed to :obj:`json.dumps`.
def serialize_dtype(o): if len(o) == 0: return dict( _type='np.dtype', descr=str(o)) return dict( _type='np.dtype', descr=o.descr)
952,953
Deserializes a JSONified :obj:`numpy.dtype`. Args: d (:obj:`dict`): A dictionary representation of a :obj:`dtype` object. Returns: A :obj:`dtype` object.
def deserialize_dtype(d): if isinstance(d['descr'], six.string_types): return np.dtype(d['descr']) descr = [] for col in d['descr']: col_descr = [] for c in col: if isinstance(c, six.string_types): col_descr.append(str(c)) elif type(c) is list: col_descr.append(tuple(c)) else: col_descr.append(c) descr.append(tuple(col_descr)) return np.dtype(descr)
952,954
Serializes a :obj:`numpy.ndarray` in a format where the datatype and shape are human-readable, but the array data itself is binary64 encoded. Args: o (:obj:`numpy.ndarray`): :obj:`ndarray` to be serialized. Returns: A dictionary that can be passed to :obj:`json.dumps`.
def serialize_ndarray_b64(o): if o.flags['C_CONTIGUOUS']: o_data = o.data else: o_data = np.ascontiguousarray(o).data data_b64 = base64.b64encode(o_data) return dict( _type='np.ndarray', data=data_b64.decode('utf-8'), dtype=o.dtype, shape=o.shape)
952,955
Serializes a :obj:`numpy.ndarray` in a human-readable format. Args: o (:obj:`numpy.ndarray`): :obj:`ndarray` to be serialized. Returns: A dictionary that can be passed to :obj:`json.dumps`.
def serialize_ndarray_readable(o): return dict( _type='np.ndarray', dtype=o.dtype, value=hint_tuples(o.tolist()))
952,957
Serializes a :obj:`numpy.ndarray` using numpy's built-in :obj:`save` function. This produces totally unreadable (and very un-JSON-like) results (in "npy" format), but it's basically guaranteed to work in 100% of cases. Args: o (:obj:`numpy.ndarray`): :obj:`ndarray` to be serialized. Returns: A dictionary that can be passed to :obj:`json.dumps`.
def serialize_ndarray_npy(o): with io.BytesIO() as f: np.save(f, o) f.seek(0) serialized = json.dumps(f.read().decode('latin-1')) return dict( _type='np.ndarray', npy=serialized)
952,958
Deserializes a JSONified :obj:`numpy.ndarray` that was created using numpy's :obj:`save` function. Args: d (:obj:`dict`): A dictionary representation of an :obj:`ndarray` object, created using :obj:`numpy.save`. Returns: An :obj:`ndarray` object.
def deserialize_ndarray_npy(d): with io.BytesIO() as f: f.write(json.loads(d['npy']).encode('latin-1')) f.seek(0) return np.load(f)
952,959
Deserializes a JSONified :obj:`numpy.ndarray`. Can handle arrays serialized using any of the methods in this module: :obj:`"npy"`, :obj:`"b64"`, :obj:`"readable"`. Args: d (`dict`): A dictionary representation of an :obj:`ndarray` object. Returns: An :obj:`ndarray` object.
def deserialize_ndarray(d): if 'data' in d: x = np.fromstring( base64.b64decode(d['data']), dtype=d['dtype']) x.shape = d['shape'] return x elif 'value' in d: return np.array(d['value'], dtype=d['dtype']) elif 'npy' in d: return deserialize_ndarray_npy(d) else: raise ValueError('Malformed np.ndarray encoding.')
952,960
Serializes an :obj:`astropy.units.Quantity`, for JSONification. Args: o (:obj:`astropy.units.Quantity`): :obj:`Quantity` to be serialized. Returns: A dictionary that can be passed to :obj:`json.dumps`.
def serialize_quantity(o): return dict( _type='astropy.units.Quantity', value=o.value, unit=o.unit.to_string())
952,961
Serializes an :obj:`astropy.coordinates.SkyCoord`, for JSONification. Args: o (:obj:`astropy.coordinates.SkyCoord`): :obj:`SkyCoord` to be serialized. Returns: A dictionary that can be passed to :obj:`json.dumps`.
def serialize_skycoord(o): representation = o.representation.get_name() frame = o.frame.name r = o.represent_as('spherical') d = dict( _type='astropy.coordinates.SkyCoord', frame=frame, representation=representation, lon=r.lon, lat=r.lat) if len(o.distance.unit.to_string()): d['distance'] = r.distance return d
952,962
Deserializes a JSONified :obj:`astropy.coordinates.SkyCoord`. Args: d (:obj:`dict`): A dictionary representation of a :obj:`SkyCoord` object. Returns: A :obj:`SkyCoord` object.
def deserialize_skycoord(d): if 'distance' in d: args = (d['lon'], d['lat'], d['distance']) else: args = (d['lon'], d['lat']) return coords.SkyCoord( *args, frame=d['frame'], representation='spherical')
952,963
Apply the gradient for transformation to cartesian space onto zmat_dist. Args: grad_X (:class:`numpy.ndarray`): A ``(3, n, n, 3)`` array. The mathematical details of the index layout is explained in :meth:`~chemcoord.Cartesian.get_grad_zmat()`. zmat_dist (:class:`~chemcoord.Zmat`): Distortions in Zmatrix space. Returns: :class:`~chemcoord.Cartesian`: Distortions in cartesian space.
def apply_grad_cartesian_tensor(grad_X, zmat_dist): columns = ['bond', 'angle', 'dihedral'] C_dist = zmat_dist.loc[:, columns].values.T try: C_dist = C_dist.astype('f8') C_dist[[1, 2], :] = np.radians(C_dist[[1, 2], :]) except (TypeError, AttributeError): C_dist[[1, 2], :] = sympy.rad(C_dist[[1, 2], :]) cart_dist = np.tensordot(grad_X, C_dist, axes=([3, 2], [0, 1])).T from chemcoord.cartesian_coordinates.cartesian_class_main import Cartesian return Cartesian(atoms=zmat_dist['atom'], coords=cart_dist, index=zmat_dist.index)
952,973
Change numbering to a new index. Changes the numbering of index and all dependent numbering (bond_with...) to a new_index. The user has to make sure that the new_index consists of distinct elements. Args: new_index (list): If None the new_index is taken from 1 to the number of atoms. Returns: Zmat: Reindexed version of the zmatrix.
def change_numbering(self, new_index=None): if (new_index is None): new_index = range(len(self)) elif len(new_index) != len(self): raise ValueError('len(new_index) has to be the same as len(self)') c_table = self.loc[:, ['b', 'a', 'd']] # Strange bug in pandas where .replace is transitive for object columns # and non-transitive for all other types. # (Remember that string columns are just object columns) # Example: # A = {1: 2, 2: 3} # Transtitive [1].replace(A) gives [3] # Non-Transtitive [1].replace(A) gives [2] # https://github.com/pandas-dev/pandas/issues/5338 # https://github.com/pandas-dev/pandas/issues/16051 # https://github.com/pandas-dev/pandas/issues/5541 # For this reason convert to int and replace then. c_table = c_table.replace(constants.int_label) try: c_table = c_table.astype('i8') except ValueError: raise ValueError('Due to a bug in pandas it is necessary to have ' 'integer columns') c_table = c_table.replace(self.index, new_index) c_table = c_table.replace( {v: k for k, v in constants.int_label.items()}) out = self.copy() out.unsafe_loc[:, ['b', 'a', 'd']] = c_table out._frame.index = new_index return out
953,005
Return the molecule in cartesian coordinates. Raises an :class:`~exceptions.InvalidReference` exception, if the reference of the i-th atom is undefined. Args: None Returns: Cartesian: Reindexed version of the zmatrix.
def get_cartesian(self): def create_cartesian(positions, row): xyz_frame = pd.DataFrame(columns=['atom', 'x', 'y', 'z'], index=self.index[:row], dtype='f8') xyz_frame['atom'] = self.loc[xyz_frame.index, 'atom'] xyz_frame.loc[:, ['x', 'y', 'z']] = positions[:row] from chemcoord.cartesian_coordinates.cartesian_class_main \ import Cartesian cartesian = Cartesian(xyz_frame, metadata=self.metadata) return cartesian c_table = self.loc[:, ['b', 'a', 'd']] c_table = c_table.replace(constants.int_label) c_table = c_table.replace({k: v for v, k in enumerate(c_table.index)}) c_table = c_table.values.astype('i8').T C = self.loc[:, ['bond', 'angle', 'dihedral']].values.T C[[1, 2], :] = np.radians(C[[1, 2], :]) err, row, positions = transformation.get_X(C, c_table) positions = positions.T if err == ERR_CODE_InvalidReference: rename = dict(enumerate(self.index)) i = rename[row] b, a, d = self.loc[i, ['b', 'a', 'd']] cartesian = create_cartesian(positions, row) raise InvalidReference(i=i, b=b, a=a, d=d, already_built_cartesian=cartesian) elif err == ERR_CODE_OK: return create_cartesian(positions, row + 1)
953,011
Returns the map value at the specified location(s) on the sky. Args: coords (`astropy.coordinates.SkyCoord`): The coordinates to query. order (Optional[int]): Interpolation order to use. Defaults to `1`, for linear interpolation. Returns: A float array containing the map value at every input coordinate. The shape of the output will be the same as the shape of the coordinates stored by `coords`.
def query(self, coords, order=1): out = np.full(len(coords.l.deg), np.nan, dtype='f4') for pole in self.poles: m = (coords.b.deg >= 0) if pole == 'ngp' else (coords.b.deg < 0) if np.any(m): data, w = self._data[pole] x, y = w.wcs_world2pix(coords.l.deg[m], coords.b.deg[m], 0) out[m] = map_coordinates(data, [y, x], order=order, mode='nearest') return out
953,059
Return a :class:`~Cartesian` where all members of a symmetry equivalence class are inserted back in. Args: None Returns: Cartesian: A new cartesian instance.
def get_cartesian(self): coords = ['x', 'y', 'z'] eq_sets = self._metadata['eq']['eq_sets'] sym_ops = self._metadata['eq']['sym_ops'] frame = pd.DataFrame(index=[i for v in eq_sets.values() for i in v], columns=['atom', 'x', 'y', 'z'], dtype='f8') frame['atom'] = pd.Series( {i: self.loc[k, 'atom'] for k, v in eq_sets.items() for i in v}) frame.loc[self.index, coords] = self.loc[:, coords] for i in eq_sets: for j in eq_sets[i]: frame.loc[j, coords] = np.dot(sym_ops[i][j], frame.loc[i, coords]) return Cartesian(frame)
953,073
Reads a zmat file. Lines beginning with ``#`` are ignored. Args: inputfile (str): implicit_index (bool): If this option is true the first column has to be the element symbols for the atoms. The row number is used to determine the index. Returns: Zmat:
def read_zmat(cls, inputfile, implicit_index=True): cols = ['atom', 'b', 'bond', 'a', 'angle', 'd', 'dihedral'] if implicit_index: zmat_frame = pd.read_table(inputfile, comment='#', delim_whitespace=True, names=cols) zmat_frame.index = range(1, len(zmat_frame) + 1) else: zmat_frame = pd.read_table(inputfile, comment='#', delim_whitespace=True, names=['temp_index'] + cols) zmat_frame.set_index('temp_index', drop=True, inplace=True) zmat_frame.index.name = None if pd.isnull(zmat_frame.iloc[0, 1]): zmat_values = [1.27, 127., 127.] zmat_refs = [constants.int_label[x] for x in ['origin', 'e_z', 'e_x']] for row, i in enumerate(zmat_frame.index[:3]): cols = ['b', 'a', 'd'] zmat_frame.loc[:, cols] = zmat_frame.loc[:, cols].astype('O') if row < 2: zmat_frame.loc[i, cols[row:]] = zmat_refs[row:] zmat_frame.loc[i, ['bond', 'angle', 'dihedral'][row:] ] = zmat_values[row:] else: zmat_frame.loc[i, 'd'] = zmat_refs[2] zmat_frame.loc[i, 'dihedral'] = zmat_values[2] elif zmat_frame.iloc[0, 1] in constants.int_label.keys(): zmat_frame = zmat_frame.replace( {col: constants.int_label for col in ['b', 'a', 'd']}) zmat_frame = cls._cast_correct_types(zmat_frame) try: Zmat = cls(zmat_frame) except InvalidReference: raise UndefinedCoordinateSystem( 'Your zmatrix cannot be transformed to cartesian coordinates') return Zmat
953,149
Read a file of coordinate information. Reads xyz-files. Args: inputfile (str): start_index (int): get_bonds (bool): nrows (int): Number of rows of file to read. Note that the first two rows are implicitly excluded. engine (str): Wrapper for argument of :func:`pandas.read_csv`. Returns: Cartesian:
def read_xyz(cls, buf, start_index=0, get_bonds=True, nrows=None, engine=None): frame = pd.read_table(buf, skiprows=2, comment='#', nrows=nrows, delim_whitespace=True, names=['atom', 'x', 'y', 'z'], engine=engine) remove_digits = partial(re.sub, r'[0-9]+', '') frame['atom'] = frame['atom'].apply(remove_digits) molecule = cls(frame) molecule.index = range(start_index, start_index + len(molecule)) if get_bonds: molecule.get_bonds(use_lookup=False, set_lookup=True) return molecule
953,157
Read a cjson file or a dictionary. The cjson format is specified `here <https://github.com/OpenChemistry/chemicaljson>`_. Args: buf (str, dict): If it is a filepath, the data is read from filepath. If it is a dictionary, the dictionary is interpreted as cjson. Returns: Cartesian:
def read_cjson(cls, buf): if isinstance(buf, dict): data = buf.copy() else: with open(buf, 'r') as f: data = json.load(f) assert data['chemical json'] == 0 n_atoms = len(data['atoms']['coords']['3d']) metadata = {} _metadata = {} coords = np.array( data['atoms']['coords']['3d']).reshape((n_atoms // 3, 3)) atomic_number = constants.elements['atomic_number'] elements = [dict(zip(atomic_number, atomic_number.index))[x] for x in data['atoms']['elements']['number']] try: connections = data['bonds']['connections']['index'] except KeyError: pass else: bond_dict = defaultdict(set) for i, b in zip(connections[::2], connections[1::2]): bond_dict[i].add(b) bond_dict[b].add(i) _metadata['bond_dict'] = dict(bond_dict) try: metadata.update(data['properties']) except KeyError: pass out = cls(atoms=elements, coords=coords, _metadata=_metadata, metadata=metadata) return out
953,159
Create a Molecule instance of the pymatgen library .. warning:: The `pymatgen library <http://pymatgen.org>`_ is imported locally in this function and will raise an ``ImportError`` exception, if it is not installed. Args: None Returns: :class:`pymatgen.core.structure.Molecule`:
def get_pymatgen_molecule(self): from pymatgen import Molecule return Molecule(self['atom'].values, self.loc[:, ['x', 'y', 'z']].values)
953,161
Create an instance of the own class from a pymatgen molecule Args: molecule (:class:`pymatgen.core.structure.Molecule`): Returns: Cartesian:
def from_pymatgen_molecule(cls, molecule): new = cls(atoms=[el.value for el in molecule.species], coords=molecule.cart_coords) return new._to_numeric()
953,162
Create an instance of the own class from an ase molecule Args: molecule (:class:`ase.atoms.Atoms`): Returns: Cartesian:
def from_ase_atoms(cls, atoms): return cls(atoms=atoms.get_chemical_symbols(), coords=atoms.positions)
953,163
Returns a PointGroup object for the molecule. Args: tolerance (float): Tolerance to generate the full set of symmetry operations. Returns: :class:`~PointGroupOperations`
def get_pointgroup(self, tolerance=0.3): PA = self._get_point_group_analyzer(tolerance=tolerance) return PointGroupOperations(PA.sch_symbol, PA.symmops)
953,165
Runs the 'configure' program in the working directory. Args: mandatory (bool): Throw exception if 'configure' fails or a 'configure' file is missing.
def run_configure(self, mandatory=True): if not has_file(self.working_dir, 'configure'): if mandatory: raise FileNotFoundError( "Could not find a configure script for execution.") else: return try: prog = RunningProgram(self, 'configure') prog.expect_exit_status(0) except Exception: if mandatory: raise
953,415
Runs a compiler in the working directory. Args: compiler (tuple): The compiler program and its command-line arguments, including placeholders for output and input files. inputs (tuple): The list of input files for the compiler. output (str): The name of the output file.
def run_compiler(self, compiler=GCC, inputs=None, output=None): # Let exceptions travel through prog = RunningProgram(self, *compiler_cmdline(compiler=compiler, inputs=inputs, output=output)) prog.expect_exit_status(0)
953,416
Scans the student files for text patterns. Args: regex (str): Regular expression used for scanning inside the files. Returns: tuple: Names of the matching files in the working directory.
def grep(self, regex): matches = [] logger.debug("Searching student files for '{0}'".format(regex)) for fname in self.student_files: if os.path.isfile(self.working_dir + fname): for line in open(self.working_dir + fname, 'br'): if re.search(regex.encode(), line): logger.debug("{0} contains '{1}'".format(fname, regex)) matches.append(fname) return matches
953,420
Checks the student submission for specific files. Args: filenames (tuple): The list of file names to be cjecked for. Returns: bool: Indicator if all files are found in the student archive.
def ensure_files(self, filenames): logger.debug("Testing {0} for the following files: {1}".format( self.working_dir, filenames)) dircontent = os.listdir(self.working_dir) for fname in filenames: if fname not in dircontent: return False return True
953,421
Sends an input line to the running program, including os.linesep. Args: text (str): The input text to be send. Raises: TerminationException: The program terminated before / while / after sending the input. NestedException: An internal problem occured while waiting for the output.
def sendline(self, text): logger.debug("Sending input '{0}' to '{1}'".format(text, self.name)) try: return self._spawn.sendline(text) except pexpect.exceptions.EOF as e: logger.debug("Raising termination exception.") raise TerminationException(instance=self, real_exception=e, output=self.get_output()) except pexpect.exceptions.TIMEOUT as e: logger.debug("Raising timeout exception.") raise TimeoutException(instance=self, real_exception=e, output=self.get_output()) except Exception as e: logger.debug("Sending input failed: " + str(e)) raise NestedException(instance=self, real_exception=e, output=self.get_output())
953,442
Wait for the running program to finish and expect some exit status. Args: exit_status (int): The expected exit status. Raises: WrongExitStatusException: The produced exit status is not the expected one.
def expect_exitstatus(self, exit_status): self.expect_end() logger.debug("Checking exit status of '{0}', output so far: {1}".format( self.name, self.get_output())) if self._spawn.exitstatus is None: raise WrongExitStatusException( instance=self, expected=exit_status, output=self.get_output()) if self._spawn.exitstatus is not exit_status: raise WrongExitStatusException( instance=self, expected=exit_status, got=self._spawn.exitstatus, output=self.get_output())
953,444
Creates a new config object. Parameters: config_files: Dictionary with file_name: is_production setting
def __init__(self, config_files): for config_file, is_production in config_files: if os.path.isfile(config_file): self.config_file = config_file self.is_production = is_production self.config = configparser.SafeConfigParser() self.config.read([self.config_file], encoding='utf-8') return raise IOError("No configuration file found.")
953,445
Initializes the object with TAF/METAR report text. Args: string: TAF/METAR report string Raises: MalformedTAF: An error parsing the TAF/METAR report
def __init__(self, string): # Instance variables self._raw_taf = None self._taf_header = None self._raw_weather_groups = [] self._weather_groups = [] self._maintenance = None if isinstance(string, str) and string != "": self._raw_taf = string else: raise MalformedTAF("TAF/METAR string expected") # Patterns use ^ and $, so we don't want # leading/trailing spaces self._raw_taf = self._raw_taf.strip() # Initialize header part self._taf_header = self._init_header(self._raw_taf) if self._taf_header['form'] == 'metar': self._weather_groups.append(self._parse_group(self._raw_taf)) else: # Get all TAF weather groups self._raw_weather_groups = self._init_groups(self._raw_taf) for group in self._raw_weather_groups: parsed_group = self._parse_group(group) self._weather_groups.append(parsed_group) self._maintenance = self._parse_maintenance(self._raw_taf)
953,575
Extracts header part from TAF/METAR string and populates header dict Args: TAF/METAR report string Raises: MalformedTAF: An error parsing the report Returns: Header dictionary
def _init_header(self, string): taf_header_pattern = metar_header_pattern = header_taf = re.match(taf_header_pattern, string, re.VERBOSE) header_metar = re.match(metar_header_pattern, string, re.VERBOSE) # The difference between a METAR and TAF header isn't that big # so it's likely to get both regex to match. TAF is a bit more specific so if # both regex match then we're most likely dealing with a TAF string. if header_taf: header_dict = header_taf.groupdict() header_dict['form'] = 'taf' elif header_metar: header_dict = header_metar.groupdict() header_dict['form'] = 'metar' else: raise MalformedTAF("No valid TAF/METAR header found") return header_dict
953,576
Extracts weather groups (FM, PROB etc.) and populates group list Args: TAF report string Raises: MalformedTAF: Group decoding error
def _init_groups(self, string): taf_group_pattern = group_list = [] groups = re.findall(taf_group_pattern, string, re.VERBOSE) if not groups: raise MalformedTAF("No valid groups found") for group in groups: group_list.append(group) return(group_list)
953,577
Extract first image of input stream to jpg file. Args: cam: Input stream of raw rosbag messages. Returns: File instance for first image of input stream.
def image(cam): # Set output stream title and pull first message yield marv.set_header(title=cam.topic) msg = yield marv.pull(cam) if msg is None: return # Deserialize raw ros message pytype = get_message_type(cam) rosmsg = pytype() rosmsg.deserialize(msg.data) # Write image to jpeg and push it to output stream name = '{}.jpg'.format(cam.topic.replace('/', ':')[1:]) imgfile = yield marv.make_file(name) img = imgmsg_to_cv2(rosmsg, "rgb8") cv2.imwrite(imgfile.path, img, (cv2.IMWRITE_JPEG_QUALITY, 60)) yield marv.push(imgfile)
954,536
Create detail section with one image. Args: title (str): Title to be displayed for detail section. image: marv image file. Returns One detail section.
def image_section(image, title): # pull first image img = yield marv.pull(image) if img is None: return # create image widget and section containing it widget = {'title': image.title, 'image': {'src': img.relpath}} section = {'title': title, 'widgets': [widget]} yield marv.push(section)
954,537
Extract images from input stream to jpg files. Args: cam: Input stream of raw rosbag messages. Returns: File instances for images of input stream.
def images(cam): # Set output stream title and pull first message yield marv.set_header(title=cam.topic) # Fetch and process first 20 image messages name_template = '%s-{}.jpg' % cam.topic.replace('/', ':')[1:] while True: idx, msg = yield marv.pull(cam, enumerate=True) if msg is None or idx >= 20: break # Deserialize raw ros message pytype = get_message_type(cam) rosmsg = pytype() rosmsg.deserialize(msg.data) # Write image to jpeg and push it to output stream img = imgmsg_to_cv2(rosmsg, "rgb8") name = name_template.format(idx) imgfile = yield marv.make_file(name) cv2.imwrite(imgfile.path, img) yield marv.push(imgfile)
954,538
Create detail section with gallery. Args: title (str): Title to be displayed for detail section. images: stream of marv image files Returns One detail section.
def gallery_section(images, title): # pull all images imgs = [] while True: img = yield marv.pull(images) if img is None: break imgs.append({'src': img.relpath}) if not imgs: return # create gallery widget and section containing it widget = {'title': images.title, 'gallery': {'images': imgs}} section = {'title': title, 'widgets': [widget]} yield marv.push(section)
954,539
Stat filesize of files. Args: images: stream of marv image files Returns: Stream of filesizes
def filesizes(images): # Pull each image and push its filesize while True: img = yield marv.pull(images) if img is None: break yield marv.push(img.size)
954,540
Initialize mapping based on the concept mapping (smart initialization) Arguments: candidate_mapping: candidate node match list instance1: instance triples of AMR 1 instance2: instance triples of AMR 2 Returns: initialized node mapping between two AMRs
def smart_init_mapping(candidate_mapping, instance1, instance2): random.seed() matched_dict = {} result = [] # list to store node indices that have no concept match no_word_match = [] for i, candidates in enumerate(candidate_mapping): if not candidates: # no possible mapping result.append(-1) continue # node value in instance triples of AMR 1 value1 = instance1[i][2] for node_index in candidates: value2 = instance2[node_index][2] # find the first instance triple match in the candidates # instance triple match is having the same concept value if value1 == value2: if node_index not in matched_dict: result.append(node_index) matched_dict[node_index] = 1 break if len(result) == i: no_word_match.append(i) result.append(-1) # if no concept match, generate a random mapping for i in no_word_match: candidates = list(candidate_mapping[i]) while candidates: # get a random node index from candidates rid = random.randint(0, len(candidates) - 1) candidate = candidates[rid] if candidate in matched_dict: candidates.pop(rid) else: matched_dict[candidate] = 1 result[i] = candidate break return result
955,079
Generate a random node mapping. Args: candidate_mapping: candidate_mapping: candidate node match list Returns: randomly-generated node mapping between two AMRs
def random_init_mapping(candidate_mapping): # if needed, a fixed seed could be passed here to generate same random (to help debugging) random.seed() matched_dict = {} result = [] for c in candidate_mapping: candidates = list(c) if not candidates: # -1 indicates no possible mapping result.append(-1) continue found = False while candidates: # randomly generate an index in [0, length of candidates) rid = random.randint(0, len(candidates) - 1) candidate = candidates[rid] # check if it has already been matched if candidate in matched_dict: candidates.pop(rid) else: matched_dict[candidate] = 1 result.append(candidate) found = True break if not found: result.append(-1) return result
955,080
Given a node mapping, compute match number based on weight_dict. Args: mappings: a list of node index in AMR 2. The ith element (value j) means node i in AMR 1 maps to node j in AMR 2. Returns: matching triple number Complexity: O(m*n) , m is the node number of AMR 1, n is the node number of AMR 2
def compute_match(mapping, weight_dict): # If this mapping has been investigated before, retrieve the value instead of re-computing. if veryVerbose: print("Computing match for mapping", file=DEBUG_LOG) print(mapping, file=DEBUG_LOG) if tuple(mapping) in match_triple_dict: if veryVerbose: print("saved value", match_triple_dict[tuple(mapping)], file=DEBUG_LOG) return match_triple_dict[tuple(mapping)] match_num = 0 # i is node index in AMR 1, m is node index in AMR 2 for i, m in enumerate(mapping): if m == -1: # no node maps to this node continue # node i in AMR 1 maps to node m in AMR 2 current_node_pair = (i, m) if current_node_pair not in weight_dict: continue if veryVerbose: print("node_pair", current_node_pair, file=DEBUG_LOG) for key in weight_dict[current_node_pair]: if key == -1: # matching triple resulting from instance/attribute triples match_num += weight_dict[current_node_pair][key] if veryVerbose: print("instance/attribute match", weight_dict[current_node_pair][key], file=DEBUG_LOG) # only consider node index larger than i to avoid duplicates # as we store both weight_dict[node_pair1][node_pair2] and # weight_dict[node_pair2][node_pair1] for a relation elif key[0] < i: continue elif mapping[key[0]] == key[1]: match_num += weight_dict[current_node_pair][key] if veryVerbose: print("relation match with", key, weight_dict[current_node_pair][key], file=DEBUG_LOG) if veryVerbose: print("match computing complete, result:", match_num, file=DEBUG_LOG) # update match_triple_dict match_triple_dict[tuple(mapping)] = match_num return match_num
955,081
Compute the triple match number gain from the move operation Arguments: mapping: current node mapping node_id: remapped node in AMR 1 old_id: original node id in AMR 2 to which node_id is mapped new_id: new node in to which node_id is mapped weight_dict: weight dictionary match_num: the original triple matching number Returns: the triple match gain number (might be negative)
def move_gain(mapping, node_id, old_id, new_id, weight_dict, match_num): # new node mapping after moving new_mapping = (node_id, new_id) # node mapping before moving old_mapping = (node_id, old_id) # new nodes mapping list (all node pairs) new_mapping_list = mapping[:] new_mapping_list[node_id] = new_id # if this mapping is already been investigated, use saved one to avoid duplicate computing if tuple(new_mapping_list) in match_triple_dict: return match_triple_dict[tuple(new_mapping_list)] - match_num gain = 0 # add the triple match incurred by new_mapping to gain if new_mapping in weight_dict: for key in weight_dict[new_mapping]: if key == -1: # instance/attribute triple match gain += weight_dict[new_mapping][-1] elif new_mapping_list[key[0]] == key[1]: # relation gain incurred by new_mapping and another node pair in new_mapping_list gain += weight_dict[new_mapping][key] # deduct the triple match incurred by old_mapping from gain if old_mapping in weight_dict: for k in weight_dict[old_mapping]: if k == -1: gain -= weight_dict[old_mapping][-1] elif mapping[k[0]] == k[1]: gain -= weight_dict[old_mapping][k] # update match number dictionary match_triple_dict[tuple(new_mapping_list)] = match_num + gain return gain
955,082
Compute the triple match number gain from the swapping Arguments: mapping: current node mapping list node_id1: node 1 index in AMR 1 mapping_id1: the node index in AMR 2 node 1 maps to (in the current mapping) node_id2: node 2 index in AMR 1 mapping_id2: the node index in AMR 2 node 2 maps to (in the current mapping) weight_dict: weight dictionary match_num: the original matching triple number Returns: the gain number (might be negative)
def swap_gain(mapping, node_id1, mapping_id1, node_id2, mapping_id2, weight_dict, match_num): new_mapping_list = mapping[:] # Before swapping, node_id1 maps to mapping_id1, and node_id2 maps to mapping_id2 # After swapping, node_id1 maps to mapping_id2 and node_id2 maps to mapping_id1 new_mapping_list[node_id1] = mapping_id2 new_mapping_list[node_id2] = mapping_id1 if tuple(new_mapping_list) in match_triple_dict: return match_triple_dict[tuple(new_mapping_list)] - match_num gain = 0 new_mapping1 = (node_id1, mapping_id2) new_mapping2 = (node_id2, mapping_id1) old_mapping1 = (node_id1, mapping_id1) old_mapping2 = (node_id2, mapping_id2) if node_id1 > node_id2: new_mapping2 = (node_id1, mapping_id2) new_mapping1 = (node_id2, mapping_id1) old_mapping1 = (node_id2, mapping_id2) old_mapping2 = (node_id1, mapping_id1) if new_mapping1 in weight_dict: for key in weight_dict[new_mapping1]: if key == -1: gain += weight_dict[new_mapping1][-1] elif new_mapping_list[key[0]] == key[1]: gain += weight_dict[new_mapping1][key] if new_mapping2 in weight_dict: for key in weight_dict[new_mapping2]: if key == -1: gain += weight_dict[new_mapping2][-1] # to avoid duplicate elif key[0] == node_id1: continue elif new_mapping_list[key[0]] == key[1]: gain += weight_dict[new_mapping2][key] if old_mapping1 in weight_dict: for key in weight_dict[old_mapping1]: if key == -1: gain -= weight_dict[old_mapping1][-1] elif mapping[key[0]] == key[1]: gain -= weight_dict[old_mapping1][key] if old_mapping2 in weight_dict: for key in weight_dict[old_mapping2]: if key == -1: gain -= weight_dict[old_mapping2][-1] # to avoid duplicate elif key[0] == node_id1: continue elif mapping[key[0]] == key[1]: gain -= weight_dict[old_mapping2][key] match_triple_dict[tuple(new_mapping_list)] = match_num + gain return gain
955,083
Hill-climbing method to return the best gain swap/move can get Arguments: mapping: current node mapping candidate_mappings: the candidates mapping list weight_dict: the weight dictionary instance_len: the number of the nodes in AMR 2 cur_match_num: current triple match number Returns: the best gain we can get via swap/move operation
def get_best_gain(mapping, candidate_mappings, weight_dict, instance_len, cur_match_num): largest_gain = 0 # True: using swap; False: using move use_swap = True # the node to be moved/swapped node1 = None # store the other node affected. In swap, this other node is the node swapping with node1. In move, this other # node is the node node1 will move to. node2 = None # unmatched nodes in AMR 2 unmatched = set(range(instance_len)) # exclude nodes in current mapping # get unmatched nodes for nid in mapping: if nid in unmatched: unmatched.remove(nid) for i, nid in enumerate(mapping): # current node i in AMR 1 maps to node nid in AMR 2 for nm in unmatched: if nm in candidate_mappings[i]: # remap i to another unmatched node (move) # (i, m) -> (i, nm) if veryVerbose: print("Remap node", i, "from ", nid, "to", nm, file=DEBUG_LOG) mv_gain = move_gain(mapping, i, nid, nm, weight_dict, cur_match_num) if veryVerbose: print("Move gain:", mv_gain, file=DEBUG_LOG) new_mapping = mapping[:] new_mapping[i] = nm new_match_num = compute_match(new_mapping, weight_dict) if new_match_num != cur_match_num + mv_gain: print(mapping, new_mapping, file=ERROR_LOG) print("Inconsistency in computing: move gain", cur_match_num, mv_gain, new_match_num, file=ERROR_LOG) if mv_gain > largest_gain: largest_gain = mv_gain node1 = i node2 = nm use_swap = False # compute swap gain for i, m in enumerate(mapping): for j in range(i + 1, len(mapping)): m2 = mapping[j] # swap operation (i, m) (j, m2) -> (i, m2) (j, m) # j starts from i+1, to avoid duplicate swap if veryVerbose: print("Swap node", i, "and", j, file=DEBUG_LOG) print("Before swapping:", i, "-", m, ",", j, "-", m2, file=DEBUG_LOG) print(mapping, file=DEBUG_LOG) print("After swapping:", i, "-", m2, ",", j, "-", m, file=DEBUG_LOG) sw_gain = swap_gain(mapping, i, m, j, m2, weight_dict, cur_match_num) if veryVerbose: print("Swap gain:", sw_gain, file=DEBUG_LOG) new_mapping = mapping[:] new_mapping[i] = m2 new_mapping[j] = m print(new_mapping, file=DEBUG_LOG) new_match_num = compute_match(new_mapping, weight_dict) if new_match_num != cur_match_num + sw_gain: print(mapping, new_mapping, file=ERROR_LOG) print("Inconsistency in computing: swap gain", cur_match_num, sw_gain, new_match_num, file=ERROR_LOG) if sw_gain > largest_gain: largest_gain = sw_gain node1 = i node2 = j use_swap = True # generate a new mapping based on swap/move cur_mapping = mapping[:] if node1 is not None: if use_swap: if veryVerbose: print("Use swap gain", file=DEBUG_LOG) temp = cur_mapping[node1] cur_mapping[node1] = cur_mapping[node2] cur_mapping[node2] = temp else: if veryVerbose: print("Use move gain", file=DEBUG_LOG) cur_mapping[node1] = node2 else: if veryVerbose: print("no move/swap gain found", file=DEBUG_LOG) if veryVerbose: print("Original mapping", mapping, file=DEBUG_LOG) print("Current mapping", cur_mapping, file=DEBUG_LOG) return largest_gain, cur_mapping
955,084
print the alignment based on a node mapping Args: mapping: current node mapping list instance1: nodes of AMR 1 instance2: nodes of AMR 2
def print_alignment(mapping, instance1, instance2): result = [] for instance1_item, m in zip(instance1, mapping): r = instance1_item[1] + "(" + instance1_item[2] + ")" if m == -1: r += "-Null" else: instance2_item = instance2[m] r += "-" + instance2_item[1] + "(" + instance2_item[2] + ")" result.append(r) return " ".join(result)
955,085
Compute the f-score based on the matching triple number, triple number of AMR set 1, triple number of AMR set 2 Args: match_num: matching triple number test_num: triple number of AMR 1 (test file) gold_num: triple number of AMR 2 (gold file) Returns: precision: match_num/test_num recall: match_num/gold_num f_score: 2*precision*recall/(precision+recall)
def compute_f(match_num, test_num, gold_num): if test_num == 0 or gold_num == 0: return 0.00, 0.00, 0.00 precision = float(match_num) / float(test_num) recall = float(match_num) / float(gold_num) if (precision + recall) != 0: f_score = 2 * precision * recall / (precision + recall) if veryVerbose: print("F-score:", f_score, file=DEBUG_LOG) return precision, recall, f_score else: if veryVerbose: print("F-score:", "0.0", file=DEBUG_LOG) return precision, recall, 0.00
955,086
Get the annotator name list based on a list of files Args: file_dir: AMR file folder files: a list of AMR names, e.g. nw_wsj_0001_1 Returns: a list of user names who annotate all the files
def get_names(file_dir, files): # for each user, check if they have files available # return user name list total_list = [] name_list = [] get_sub = False for path, subdir, dir_files in os.walk(file_dir): if not get_sub: total_list = subdir[:] get_sub = True else: break for user in total_list: has_file = True for f in files: file_path = file_dir + user + "/" + f + ".txt" if not os.path.exists(file_path): has_file = False break if has_file: name_list.append(user) if len(name_list) == 0: print("********Error: Cannot find any user who completes the files*************", file=ERROR_LOG) return name_list
955,394
Compute the smatch scores for a file list between two users Args: user1: user 1 name user2: user 2 name file_list: file list dir_pre: the file location prefix start_num: the number of restarts in smatch Returns: smatch f score.
def compute_files(user1, user2, file_list, dir_pre, start_num): match_total = 0 test_total = 0 gold_total = 0 for fi in file_list: file1 = dir_pre + user1 + "/" + fi + ".txt" file2 = dir_pre + user2 + "/" + fi + ".txt" if not os.path.exists(file1): print("*********Error: ", file1, "does not exist*********", file=ERROR_LOG) return -1.00 if not os.path.exists(file2): print("*********Error: ", file2, "does not exist*********", file=ERROR_LOG) return -1.00 try: file1_h = open(file1, "r") file2_h = open(file2, "r") except IOError: print("Cannot open the files", file1, file2, file=ERROR_LOG) break cur_amr1 = smatch.get_amr_line(file1_h) cur_amr2 = smatch.get_amr_line(file2_h) if cur_amr1 == "": print("AMR 1 is empty", file=ERROR_LOG) continue if cur_amr2 == "": print("AMR 2 is empty", file=ERROR_LOG) continue amr1 = amr.AMR.parse_AMR_line(cur_amr1) amr2 = amr.AMR.parse_AMR_line(cur_amr2) test_label = "a" gold_label = "b" amr1.rename_node(test_label) amr2.rename_node(gold_label) (test_inst, test_rel1, test_rel2) = amr1.get_triples() (gold_inst, gold_rel1, gold_rel2) = amr2.get_triples() if verbose: print("Instance triples of file 1:", len(test_inst), file=DEBUG_LOG) print(test_inst, file=DEBUG_LOG) print("Attribute triples of file 1:", len(test_rel1), file=DEBUG_LOG) print(test_rel1, file=DEBUG_LOG) print("Relation triples of file 1:", len(test_rel2), file=DEBUG_LOG) print(test_rel2, file=DEBUG_LOG) print("Instance triples of file 2:", len(gold_inst), file=DEBUG_LOG) print(gold_inst, file=DEBUG_LOG) print("Attribute triples of file 2:", len(gold_rel1), file=DEBUG_LOG) print(gold_rel1, file=DEBUG_LOG) print("Relation triples of file 2:", len(gold_rel2), file=DEBUG_LOG) print(gold_rel2, file=DEBUG_LOG) (best_match, best_match_num) = smatch.get_best_match(test_inst, test_rel1, test_rel2, gold_inst, gold_rel1, gold_rel2, test_label, gold_label) if verbose: print("best match number", best_match_num, file=DEBUG_LOG) print("Best Match:", smatch.print_alignment(best_match, test_inst, gold_inst), file=DEBUG_LOG) match_total += best_match_num test_total += (len(test_inst) + len(test_rel1) + len(test_rel2)) gold_total += (len(gold_inst) + len(gold_rel1) + len(gold_rel2)) smatch.match_triple_dict.clear() (precision, recall, f_score) = smatch.compute_f(match_total, test_total, gold_total) return "%.2f" % f_score
955,395
Clear all comments in json_str. Clear JS-style comments like // and /**/ in json_str. Accept a str or unicode as input. Args: json_str: A json string of str or unicode to clean up comment Returns: str: The str without comments (or unicode if you pass in unicode)
def dispose(json_str): result_str = list(json_str) escaped = False normal = True sl_comment = False ml_comment = False quoted = False a_step_from_comment = False a_step_from_comment_away = False former_index = None for index, char in enumerate(json_str): if escaped: # We have just met a '\' escaped = False continue if a_step_from_comment: # We have just met a '/' if char != '/' and char != '*': a_step_from_comment = False normal = True continue if a_step_from_comment_away: # We have just met a '*' if char != '/': a_step_from_comment_away = False if char == '"': if normal and not escaped: # We are now in a string quoted = True normal = False elif quoted and not escaped: # We are now out of a string quoted = False normal = True elif char == '\\': # '\' should not take effect in comment if normal or quoted: escaped = True elif char == '/': if a_step_from_comment: # Now we are in single line comment a_step_from_comment = False sl_comment = True normal = False former_index = index - 1 elif a_step_from_comment_away: # Now we are out of comment a_step_from_comment_away = False normal = True ml_comment = False for i in range(former_index, index + 1): result_str[i] = "" elif normal: # Now we are just one step away from comment a_step_from_comment = True normal = False elif char == '*': if a_step_from_comment: # We are now in multi-line comment a_step_from_comment = False ml_comment = True normal = False former_index = index - 1 elif ml_comment: a_step_from_comment_away = True elif char == '\n': if sl_comment: sl_comment = False normal = True for i in range(former_index, index + 1): result_str[i] = "" elif char == ']' or char == '}': if normal: _remove_last_comma(result_str, index) # Show respect to original input if we are in python2 return ("" if isinstance(json_str, str) else u"").join(result_str)
955,495
Clear the current line. Arguments: mode: | 0 | 'forward' | 'right' - Clear cursor to end of line. | 1 | 'backward' | 'left' - Clear cursor to beginning of line. | 2 | 'full' - Clear entire line. Note: Cursor position does not change.
def clear_line(mode=2): text = sc.erase_line(_mode_map.get(mode, mode)) _write(text) return text
955,511
Clear the terminal/console screen. (Also aliased to clear.) Arguments: mode: | 0 | 'forward' - Clear cursor to end of screen, cursor stays. | 1 | 'backward' - Clear cursor to beginning of screen, "" | 2 | 'full' - Clear entire visible screen, cursor to 1,1. | 3 | 'history' - Clear entire visible screen and scrollback buffer (xterm).
def clear_screen(mode=2): text = sc.erase(_mode_map.get(mode, mode)) _write(text) return text
955,512
Set the title of the terminal window/tab/icon. Arguments: title: str mode: | 0 | 'both' - Set icon/taskbar and window/tab title | 1 | 'icon' - Set only icon/taskbar title | 2 | 'title' - Set only window/tab title
def set_title(title, mode=0): if os.name == 'nt': from .windows import set_title return set_title(title) else: if _CHOSEN_PALETTE: text = f'{OSC}{_title_mode_map.get(mode, mode)};{title}{BEL}' _write(text) return text
955,514
Waits for a keypress at the console and returns it. "Where's the any key?" Arguments: keys - if passed, wait for this specific key, e.g. ESC. may be a tuple. Returns: char or ESC - depending on key hit. None - immediately under i/o redirection, not an interactive tty.
def wait_key(keys=None): if is_a_tty(): if keys: if not isinstance(keys, tuple): keys = (keys,) while True: key = _getch() if key in keys: return key else: return _getch()
955,516
Analogous to the ancient `DOS pause <https://en.wikipedia.org/wiki/List_of_DOS_commands#PAUSE>`_ command, with a modifiable message. Arguments: message: str Returns: str, None: One character or ESC - depending on key hit. None - immediately under i/o redirection, not an interactive tty.
def pause(message='Press any key to continue…'): key = None print(message, end=' ', flush=True) if is_a_tty(): # not sure if both of these should check key = wait_key() print() return key
955,517
Override new() to replace the class entirely on deactivation. Arguments: palettes - The palette(s) to support, e.g. from: ('basic', 'extended', 'truecolor'). - Set explicitly with: str or sequence, - Disable with: None - Ellipsis - Autodetect environment.
def __new__(cls, palettes=Ellipsis): self = super().__new__(cls) if palettes is Ellipsis: # autodetecten-Sie if _CHOSEN_PALETTE: # enable "up to" the chosen palette level: palettes = get_available_palettes(_CHOSEN_PALETTE) else: self = empty_bin # None, deactivate palettes = () # skipen-Sie bitte elif type(palettes) in (list, tuple): # carry on fine sir pass elif type(palettes) is str: # make iterable palettes = (palettes,) elif palettes is None: # Ah, Shaddap-a ya face self = empty_bin palettes = () # skipen-Sie else: raise TypeError(f'{palettes!r} was unrecognized.') self._palette_support = palettes return self
955,593
Converts an palette index to the corresponding ANSI color. Arguments: index - an int (from 0-15) Returns: index as str in a list for compatibility with values.
def _index_to_ansi_values(self, index): if self.__class__.__name__[0] == 'F': # Foreground if index < 8: index += ANSI_FG_LO_BASE else: index += (ANSI_FG_HI_BASE - 8) # 82 else: # Background if index < 8: index += ANSI_BG_LO_BASE else: index += (ANSI_BG_HI_BASE - 8) # 92 return [str(index)]
955,601
Formats text. Not appropriate for huge input strings. Arguments: text Original text. *styles Add "mix-in" styles, per invocation. original_length bool - Save original string length for later use. Note: Color sequences are terminated at newlines, so that paging the output works correctly.
def __call__(self, text, *styles, original_length=False): if not text: # when an empty string is passed, don't emit codes. return '' # if the category of styles is different, # copy uses fx.end instead of palette.default, see addition: for attr in styles: self += attr pos = text.find('\n', 0, MAX_NL_SEARCH) # if '\n' in text, w/limit if pos != -1: # found lines = text.splitlines() for i, line in enumerate(lines): lines[i] = f'{self}{line}{self.default}' # add styles, see tip result = '\n'.join(lines) else: result = f'{self}{text}{self.default}' if original_length: return _LengthyString(len(text), result) else: return result
955,609
Override new() to replace the class entirely on deactivation. Complies with palette detection, unless force is on: Arguments: force - Force on.
def __new__(cls, force=False): self = super().__new__(cls) if not force: if not _CHOSEN_PALETTE: self = empty_bin # None, deactivate completely # else: continue on unabated return self
955,632
Returns current colors of console. https://docs.microsoft.com/en-us/windows/console/getconsolescreenbufferinfo Arguments: name: one of ('background', 'bg', 'foreground', 'fg') stream: Handle to stdout, stderr, etc. Returns: int: a color id from the conhost palette. Ids under 0x8 (8) are dark colors, above light.
def get_color(name, stream=STD_OUTPUT_HANDLE): stream = kernel32.GetStdHandle(stream) csbi = CONSOLE_SCREEN_BUFFER_INFO() kernel32.GetConsoleScreenBufferInfo(stream, byref(csbi)) color_id = csbi.wAttributes & _mask_map.get(name, name) log.debug('color_id from conhost: %d', color_id) if name in ('background', 'bg'): color_id /= 16 # divide by 16 log.debug('color_id divided: %d', color_id) # convert to ansi order color_id = _win_to_ansi_offset_map.get(color_id, color_id) log.debug('ansi color_id: %d', color_id) return color_id
955,696
Look for clues in environment, e.g.: - https://bixense.com/clicolors/ - http://no-color.org/ Arguments: envars: Additional environment variables to check for equality, i.e. ``MYAPP_COLOR_DISABLED='1'`` Returns: None, Bool: Disabled
def color_is_disabled(**envars): result = None if 'NO_COLOR' in env: result = True elif env.CLICOLOR == '0': result = True log.debug('%r (NO_COLOR=%s, CLICOLOR=%s)', result, env.NO_COLOR or '', env.CLICOLOR or '' ) for name, value in envars.items(): envar = getattr(env, name) if envar.value == value: result = True log.debug('%s == %r: %r', name, value, result) return result
955,752
Look for clues in environment, e.g.: - https://bixense.com/clicolors/ Arguments: envars: Additional environment variables to check for equality, i.e. ``MYAPP_COLOR_FORCED='1'`` Returns: Bool: Forced
def color_is_forced(**envars): result = env.CLICOLOR_FORCE and env.CLICOLOR_FORCE != '0' log.debug('%s (CLICOLOR_FORCE=%s)', result, env.CLICOLOR_FORCE or '') for name, value in envars.items(): envar = getattr(env, name) if envar.value == value: result = True log.debug('%s == %r: %r', name, value, result) return result
955,753
Parses a range header into a list of two-tuples (start, stop) where `start` is the starting byte of the range (inclusive) and `stop` is the ending byte position of the range (exclusive). Args: header (str): The HTTP_RANGE request header. resource_size (int): The size of the file in bytes. Returns: None if the value of the header is not syntatically valid.
def parse_range_header(self, header, resource_size): if not header or '=' not in header: return None ranges = [] units, range_ = header.split('=', 1) units = units.strip().lower() if units != 'bytes': return None for val in range_.split(','): val = val.strip() if '-' not in val: return None if val.startswith('-'): # suffix-byte-range-spec: this form specifies the last N bytes # of an entity-body. start = resource_size + int(val) if start < 0: start = 0 stop = resource_size else: # byte-range-spec: first-byte-pos "-" [last-byte-pos]. start, stop = val.split('-', 1) start = int(start) # The +1 is here since we want the stopping point to be # exclusive, whereas in the HTTP spec, the last-byte-pos # is inclusive. stop = int(stop) + 1 if stop else resource_size if start >= stop: return None ranges.append((start, stop)) return ranges
955,785
RangedFileResponse constructor also requires a request, which checks whether range headers should be added to the response. Args: request(WGSIRequest): The Django request object. file (File): A file-like object.
def __init__(self, request, file, *args, **kwargs): self.ranged_file = RangedFileReader(file) super(RangedFileResponse, self).__init__(self.ranged_file, *args, **kwargs) if 'HTTP_RANGE' in request.META: self.add_range_headers(request.META['HTTP_RANGE'])
955,786
Adds several headers that are necessary for a streaming file response, in order for Safari to play audio files. Also sets the HTTP status_code to 206 (partial content). Args: range_header (str): Browser HTTP_RANGE request header.
def add_range_headers(self, range_header): self['Accept-Ranges'] = 'bytes' size = self.ranged_file.size try: ranges = self.ranged_file.parse_range_header(range_header, size) except ValueError: ranges = None # Only handle syntactically valid headers, that are simple (no # multipart byteranges). if ranges is not None and len(ranges) == 1: start, stop = ranges[0] if start >= size: # Requested range not satisfiable. self.status_code = 416 return if stop >= size: stop = size self.ranged_file.start = start self.ranged_file.stop = stop self['Content-Range'] = 'bytes %d-%d/%d' % (start, stop - 1, size) self['Content-Length'] = stop - start self.status_code = 206
955,787
Given three integers representing R, G, and B, return the nearest color index. Arguments: r: int - of range 0…255 g: int - of range 0…255 b: int - of range 0…255 Returns: int, None: index, or None on error.
def find_nearest_color_index(r, g, b, color_table=None, method='euclid'): shortest_distance = 257*257*3 # max eucl. distance from #000000 to #ffffff index = 0 # default to black if not color_table: if not color_table8: build_color_tables() color_table = color_table8 for i, values in enumerate(color_table): rd = r - values[0] gd = g - values[1] bd = b - values[2] this_distance = (rd * rd) + (gd * gd) + (bd * bd) if this_distance < shortest_distance: # closer index = i shortest_distance = this_distance return index
955,790
Given a three or six-character hex digit string, return the nearest color index. Arguments: hexdigits: a three/6 digit hex string, e.g. 'b0b', '123456' Returns: int, None: index, or None on error.
def find_nearest_color_hexstr(hexdigits, color_table=None, method='euclid'): triplet = [] try: if len(hexdigits) == 3: for digit in hexdigits: digit = int(digit, 16) triplet.append((digit * 16) + digit) elif len(hexdigits) == 6: triplet.extend(int(hexdigits[i:i+2], 16) for i in (0, 2, 4)) else: raise ValueError('wrong length: %r' % hexdigits) except ValueError: return None return find_nearest_color_index(*triplet, color_table=color_table, method=method)
955,791
Start a server which will watch .md and .rst files for changes. If a md file changes, the Home Documentation is rebuilt. If a .rst file changes, the updated sphinx project is rebuilt Args: args (ArgumentParser): flags from the CLI
def serve(args): # Sever's parameters port = args.serve_port or PORT host = "0.0.0.0" # Current working directory dir_path = Path().absolute() web_dir = dir_path / "site" # Update routes utils.set_routes() # Offline mode if args.offline: os.environ["MKINX_OFFLINE"] = "true" _ = subprocess.check_output("mkdocs build > /dev/null", shell=True) utils.make_offline() class MkinxHTTPHandler(SimpleHTTPRequestHandler): def translate_path(self, path): # default root -> cwd location = str(web_dir) route = location if len(path) != 0 and path != "/": for key, loc in utils.get_routes(): if path.startswith(key): location = loc path = path[len(key) :] break if location[-1] == "/" or not path or path[0] == "/": route = location + path else: route = location + "/" + path return route.split("?")[0] # Serve as deamon thread success = False count = 0 print("Waiting for server port...") try: while not success: try: httpd = socketserver.TCPServer((host, port), MkinxHTTPHandler) success = True except OSError: count += 1 finally: if not success and count > 20: s = "port {} seems occupied. Try with {} ? (y/n)" if "y" in input(s.format(port, port + 1)): port += 1 count = 0 else: print("You can specify a custom port with mkinx serve -s") return time.sleep(0.5) except KeyboardInterrupt: print("Aborting.") return httpd.allow_reuse_address = True print("\nServing at http://{}:{}\n".format(host, port)) thread = threading.Thread(target=httpd.serve_forever) thread.daemon = True thread.start() # Watch for changes event_handler = utils.MkinxFileHandler( patterns=["*.rst", "*.md", "*.yml", "*.yaml"] ) observer = Observer() observer.schedule(event_handler, path=str(dir_path), recursive=True) observer.start() try: while True: time.sleep(1) except KeyboardInterrupt: observer.stop() httpd.server_close() observer.join()
955,845
Initialize a Home Documentation's folder Args: args (ArgumentParser): Flags from the CLI
def init(args): # working directory dir_path = Path().absolute() if not args.project_name or args.project_name.find("/") >= 0: print( "{}You should specify a valid project name{}".format( utils.colors.FAIL, utils.colors.ENDC ) ) return project_path = dir_path / args.project_name # Create the Home Documentation's directory if not project_path.exists(): project_path.mkdir() else: print( "{}This project already exists{}".format( utils.colors.FAIL, utils.colors.ENDC ) ) return # Directory with the Home Documentation's source code home_doc_path = project_path / "docs" home_doc_path.mkdir() help_doc_path = home_doc_path / "help" help_doc_path.mkdir() file_path = Path(__file__).resolve().parent / "include" # Add initial files copyfile(file_path / "index.md", home_doc_path / "index.md") copyfile(file_path / "How_To_Use_Mkinx.md", help_doc_path / "How_To_Use_Mkinx.md") copyfile( file_path / "Writing_Sphinx_Documentation.md", help_doc_path / "Writing_Sphinx_Documentation.md", ) with open(file_path / "mkdocs.yml", "r") as f: lines = f.readlines() input_text = "What is your Documentation's name" input_text += " (it can be changed later in mkdocs.yml)?\n" input_text += "[Default: {} - Home Documentation]\n" site_name = input(input_text.format(args.project_name.capitalize())) if not site_name: site_name = "{} - Home Documentation".format(args.project_name.capitalize()) lines[0] = "site_name: {}\n".format(site_name) with open(project_path / "mkdocs.yml", "w") as f: f.writelines(lines) example_project_path = project_path / "example_project" / "example_project" windows = "y" if sys.platform in {"win32", "cygwin"} else "n" copytree(file_path / "example_project", example_project_path) move(str(example_project_path / "source"), str(project_path / "example_project")) move( str(project_path / "example_project" / "example_project" / "Makefile"), str(project_path / "example_project"), ) if windows == "y": move( str(project_path / "example_project" / "example_project" / "make.bat"), str(project_path / "example_project"), ) else: os.remove( str(project_path / "example_project" / "example_project" / "make.bat") ) static = project_path / "example_project" / "source" static /= "_static" if not static.exists(): static.mkdir() _ = subprocess.check_output( "cd {} && mkinx build -F -A > /dev/null".format(args.project_name), shell=True ) print( "\n\n", utils.colors.OKBLUE, "{}/{} created as a showcase of how mkinx works".format( args.project_name, "example_project" ), utils.colors.ENDC, ) print( "\n", utils.colors.OKGREEN, "Success!", utils.colors.ENDC, "You can now start your Docs in ./{}\n".format(args.project_name), utils.colors.HEADER, "$ cd ./{}".format(args.project_name), utils.colors.ENDC, ) print( " Start the server from within your Docs to see them \n (default", "port is 8443 but you can change it with the -s flag):", ) print( utils.colors.HEADER, " {} $ mkinx serve\n".format(args.project_name), utils.colors.ENDC, )
955,847
Train and validate the LR on a train and test dataset Args: X_train (np.array): Training data Y_train (np.array): Training labels X_test (np.array): Test data Y_test (np.array): Test labels
def train(self, X_train, Y_train, X_test, Y_test): while True: print(1) time.sleep(1) if random.randint(0, 9) >= 5: break
955,852
In the project's index.html built file, replace the top "source" link with a link to the documentation's home, which is mkdoc's home Args: project (str): project to update dir_path (pathlib.Path): this file's path
def overwrite_view_source(project, dir_path): project_html_location = dir_path / project / HTML_LOCATION if not project_html_location.exists(): return files_to_overwrite = [ f for f in project_html_location.iterdir() if "html" in f.suffix ] for html_file in files_to_overwrite: with open(html_file, "r") as f: html = f.readlines() for i, l in enumerate(html): if TO_REPLACE_WITH_HOME in l: html[i] = NEW_HOME_LINK break with open(html_file, "w") as f: f.writelines(html)
955,910
Calculates the entropy of a string based on known frequency of English letters. Args: entropy_string: A str representing the string to calculate. Returns: A negative float with the total entropy of the string (higher is better).
def calculate_entropy(self, entropy_string): total = 0 for char in entropy_string: if char.isalpha(): prob = self.frequency[char.lower()] total += - math.log(prob) / math.log(2) logging.debug("Entropy score: {0}".format(total)) return total
956,337
Inititalize the journal maker object. Appends the first lines in the journal (JrnObj variable and timestamp) to the _journal_contents. Args: permissive (bool): if True most errors in journal will not cause Revit to stop journal execution. Some still do.
def __init__(self, permissive=True): self._journal_contents = '' self._init_journal(permissive=permissive)
956,447
Add the initialization lines to the journal. By default adds JrnObj variable and timestamp to the journal contents. Args: permissive (bool): if True most errors in journal will not cause Revit to stop journal execution. Some still do.
def _init_journal(self, permissive=True): nowstamp = datetime.now().strftime("%d-%b-%Y %H:%M:%S.%f")[:-3] self._add_entry(templates.INIT .format(time_stamp=nowstamp)) if permissive: self._add_entry(templates.INIT_DEBUG)
956,448
Append a new file from .rft entry to the journal. This instructs Revit to create a new model based on the provided .rft template. Args: base_template (str): new file journal template from rmj.templates rft_file (str): full path to .rft template to be used
def _new_from_rft(self, base_template, rft_file): self._add_entry(base_template) self._add_entry(templates.NEW_FROM_RFT .format(rft_file_path=rft_file, rft_file_name=op.basename(rft_file)))
956,449
Append a new model from .rft entry to the journal. This instructs Revit to create a new model based on the provided .rft template. Args: template_name (str): optional full path to .rft template to be used. default value is <None>
def new_model(self, template_name='<None>'): self._add_entry(templates.NEW_MODEL .format(template_name=template_name))
956,450
Append a new template from .rft entry to the journal. This instructs Revit to create a new template model based on the provided .rft template. Args: template_name (str): optional full path to .rft template to be used. default value is <None>
def new_template(self, template_name='<None>'): self._add_entry(templates.NEW_MODEL_TEMPLATE .format(template_name=template_name))
956,451
Append a open workshared model entry to the journal. This instructs Revit to open a workshared model. Args: model_path (str): full path to workshared model central (bool): if True opens central model and not local detached (bool): if True opens a detached model keep_worksets (bool): if True keeps worksets when detaching audit (bool): if True audits the model when opening
def open_workshared_model(self, model_path, central=False, detached=False, keep_worksets=True, audit=False, show_workset_config=1): if detached: if audit: if keep_worksets: self._add_entry( templates.CENTRAL_OPEN_DETACH_AUDIT .format(model_path=model_path, workset_config=show_workset_config) ) else: self._add_entry( templates.CENTRAL_OPEN_DETACH_AUDIT_DISCARD .format(model_path=model_path, workset_config=show_workset_config) ) else: if keep_worksets: self._add_entry( templates.CENTRAL_OPEN_DETACH .format(model_path=model_path, workset_config=show_workset_config) ) else: self._add_entry( templates.CENTRAL_OPEN_DETACH_DISCARD .format(model_path=model_path, workset_config=show_workset_config) ) elif central: if audit: self._add_entry( templates.CENTRAL_OPEN_AUDIT .format(model_path=model_path, workset_config=show_workset_config) ) else: self._add_entry( templates.CENTRAL_OPEN .format(model_path=model_path, workset_config=show_workset_config) ) else: if audit: self._add_entry( templates.WORKSHARED_OPEN_AUDIT .format(model_path=model_path, workset_config=show_workset_config) ) else: self._add_entry( templates.WORKSHARED_OPEN .format(model_path=model_path, workset_config=show_workset_config) )
956,452
Append a open non-workshared model entry to the journal. This instructs Revit to open a non-workshared model. Args: model_path (str): full path to non-workshared model audit (bool): if True audits the model when opening
def open_model(self, model_path, audit=False): if audit: self._add_entry(templates.FILE_OPEN_AUDIT .format(model_path=model_path)) else: self._add_entry(templates.FILE_OPEN .format(model_path=model_path))
956,453
Append a import family entry to the journal. This instructs Revit to import a family into the opened model. Args: rfa_file (str): full path of the family file
def import_family(self, rfa_file): self._add_entry(templates.IMPORT_FAMILY .format(family_file=rfa_file))
956,456
Append an export warnings entry to the journal. This instructs Revit to export warnings from the opened model. Currently Revit will stop journal execution if the model does not have any warnings and the export warnings UI button is disabled. Args: export_file (str): full path of the ouput html file
def export_warnings(self, export_file): warn_filepath = op.dirname(export_file) warn_filename = op.splitext(op.basename(export_file))[0] self._add_entry(templates.EXPORT_WARNINGS .format(warnings_export_path=warn_filepath, warnings_export_file=warn_filename))
956,457
Append an purge model entry to the journal. This instructs Revit to purge the open model. Args: pass_count (int): number of times to execute the purge. default is 3
def purge_unused(self, pass_count=3): for purge_count in range(0, pass_count): self._add_entry(templates.PROJECT_PURGE)
956,458
Append a sync model entry to the journal. This instructs Revit to sync the currently open workshared model. Args: comment (str): comment to be provided for the sync step compact_central (bool): if True compacts the central file release_borrowed (bool): if True releases the borrowed elements release_workset (bool): if True releases the borrowed worksets save_local (bool): if True saves the local file as well
def sync_model(self, comment='', compact_central=False, release_borrowed=True, release_workset=True, save_local=False): self._add_entry(templates.FILE_SYNC_START) if compact_central: self._add_entry(templates.FILE_SYNC_COMPACT) if release_borrowed: self._add_entry(templates.FILE_SYNC_RELEASE_BORROWED) if release_workset: self._add_entry(templates.FILE_SYNC_RELEASE_USERWORKSETS) if save_local: self._add_entry(templates.FILE_SYNC_RELEASE_SAVELOCAL) self._add_entry(templates.FILE_SYNC_COMMENT_OK .format(sync_comment=comment))
956,459
Write the constructed journal in to the provided file. Args: journal_file_path (str): full path to output journal file
def write_journal(self, journal_file_path): # TODO: assert the extension is txt and not other with open(journal_file_path, "w") as jrn_file: jrn_file.write(self._journal_contents)
956,460
Check whether the provided string exists in Journal file. Only checks the last 5 lines of the journal file. This method is usually used when tracking a journal from an active Revit session. Args: search_str (str): string to search for Returns: bool: if True the search string is found
def endswith(self, search_str): for entry in reversed(list(open(self._jrnl_file, 'r'))[-5:]): if search_str in entry: return True return False
956,461
Returns an HTML snippet for an environment variable. Args: key: A string representing an environment variable name. Returns: String HTML representing the value and variable.
def html_for_env_var(key): value = os.getenv(key) return KEY_VALUE_TEMPLATE.format(key, value)
957,151
Returns an HTML snippet for a CGI argument. Args: argument: A string representing an CGI argument name in a form. form: A CGI FieldStorage object. Returns: String HTML representing the CGI value and variable.
def html_for_cgi_argument(argument, form): value = form[argument].value if argument in form else None return KEY_VALUE_TEMPLATE.format(argument, value)
957,152
Returns an HTML snippet for a Modules API method. Args: method_name: A string containing a Modules API method. args: Positional arguments to be passed to the method. kwargs: Keyword arguments to be passed to the method. Returns: String HTML representing the Modules API method and value.
def html_for_modules_method(method_name, *args, **kwargs): method = getattr(modules, method_name) value = method(*args, **kwargs) return KEY_VALUE_TEMPLATE.format(method_name, value)
957,153
Youku error should return in json form, like: HTTP 400 { "error":{ "code":120010223, "type":"UploadsException", "description":"Expired upload token" } } But error also maybe in response url params or response booy. Content-Type maybe application/json or text/plain, so don't relay on it. Args: expect_status: normally is 200 or 201
def check_error(response, expect_status=200): json = None try: json = response.json() except: pass if (response.status_code != expect_status or response.status_code == 400 or 'error' in json): if json: error = json['error'] raise YoukuError(error['code'], error['type'], error['description'], response.status_code) else: # try to parse error from body error = parse_qs(response.text) raise YoukuError(error.get('code', [None])[0], error.get('type', [None])[0], error.get('description', [None])[0], response.status_code)
957,338