Code
stringlengths
103
85.9k
Summary
sequencelengths
0
94
Please provide a description of the function:def _select_better_fit(self, matching_venvs): # keep the venvs in a separate array, to pick up the winner, and the (sorted, to compare # each dependency with its equivalent) in other structure to later compare venvs = [] to_compare = [] for matching, venv in matching_venvs: to_compare.append(sorted(matching, key=lambda req: getattr(req, 'key', ''))) venvs.append(venv) # compare each n-tuple of dependencies to see which one is bigger, and add score to the # position of the winner scores = [0] * len(venvs) for dependencies in zip(*to_compare): if not isinstance(dependencies[0], Distribution): # only distribution URLs can be compared continue winner = dependencies.index(max(dependencies)) scores[winner] = scores[winner] + 1 # get the rightmost winner (in case of ties, to select the latest venv) winner_pos = None winner_score = -1 for i, score in enumerate(scores): if score >= winner_score: winner_score = score winner_pos = i return venvs[winner_pos]
[ "Receive a list of matching venvs, and decide which one is the best fit." ]
Please provide a description of the function:def _match_by_requirements(self, current_venvs, requirements, interpreter, options): matching_venvs = [] for venv_str in current_venvs: venv = json.loads(venv_str) # simple filter, need to have exactly same options and interpreter if venv.get('options') != options or venv.get('interpreter') != interpreter: continue # requirements complying: result can be None (no comply) or a score to later sort matching = self._venv_match(venv['installed'], requirements) if matching is not None: matching_venvs.append((matching, venv)) if not matching_venvs: return return self._select_better_fit(matching_venvs)
[ "Select a venv matching interpreter and options, complying with requirements.\n\n Several venvs can be found in this case, will return the better fit.\n " ]
Please provide a description of the function:def _select(self, current_venvs, requirements=None, interpreter='', uuid='', options=None): if uuid: logger.debug("Searching a venv by uuid: %s", uuid) venv = self._match_by_uuid(current_venvs, uuid) else: logger.debug("Searching a venv for: reqs=%s interpreter=%s options=%s", requirements, interpreter, options) venv = self._match_by_requirements(current_venvs, requirements, interpreter, options) if venv is None: logger.debug("No matching venv found :(") return logger.debug("Found a matching venv! %s", venv) return venv['metadata']
[ "Select which venv satisfy the received requirements." ]
Please provide a description of the function:def get_venv(self, requirements=None, interpreter='', uuid='', options=None): lines = self._read_cache() return self._select(lines, requirements, interpreter, uuid=uuid, options=options)
[ "Find a venv that serves these requirements, if any." ]
Please provide a description of the function:def store(self, installed_stuff, metadata, interpreter, options): new_content = { 'timestamp': int(time.mktime(time.localtime())), 'installed': installed_stuff, 'metadata': metadata, 'interpreter': interpreter, 'options': options } logger.debug("Storing installed=%s metadata=%s interpreter=%s options=%s", installed_stuff, metadata, interpreter, options) with filelock(self.lockpath): self._write_cache([json.dumps(new_content)], append=True)
[ "Store the virtualenv metadata for the indicated installed_stuff." ]
Please provide a description of the function:def remove(self, env_path): with filelock(self.lockpath): cache = self._read_cache() logger.debug("Removing virtualenv from cache: %s" % env_path) lines = [ line for line in cache if json.loads(line).get('metadata', {}).get('env_path') != env_path ] self._write_cache(lines)
[ "Remove metadata for a given virtualenv from cache." ]
Please provide a description of the function:def _read_cache(self): if os.path.exists(self.filepath): with open(self.filepath, 'rt', encoding='utf8') as fh: lines = [x.strip() for x in fh] else: logger.debug("Index not found, starting empty") lines = [] return lines
[ "Read virtualenv metadata from cache." ]
Please provide a description of the function:def _write_cache(self, lines, append=False): mode = 'at' if append else 'wt' with open(self.filepath, mode, encoding='utf8') as fh: fh.writelines(line + '\n' for line in lines)
[ "Write virtualenv metadata to cache." ]
Please provide a description of the function:def install(self, dependency): if not self.pip_installed: logger.info("Need to install a dependency with pip, but no builtin, " "doing it manually (just wait a little, all should go well)") self._brute_force_install_pip() # split to pass several tokens on multiword dependency (this is very specific for '-e' on # external requirements, but implemented generically; note that this does not apply for # normal reqs, because even if it originally is 'foo > 1.2', after parsing it loses the # internal spaces) str_dep = str(dependency) args = [self.pip_exe, "install"] + str_dep.split() if self.options: for option in self.options: args.extend(option.split()) logger.info("Installing dependency: %r", str_dep) try: helpers.logged_exec(args) except helpers.ExecutionError as error: error.dump_to_log(logger) raise error except Exception as error: logger.exception("Error installing %s: %s", str_dep, error) raise error
[ "Install a new dependency." ]
Please provide a description of the function:def get_version(self, dependency): logger.debug("getting installed version for %s", dependency) stdout = helpers.logged_exec([self.pip_exe, "show", str(dependency)]) version = [line for line in stdout if line.startswith('Version:')] if len(version) == 1: version = version[0].strip().split()[1] logger.debug("Installed version of %s is: %s", dependency, version) return version else: logger.error('Fades is having problems getting the installed version. ' 'Run with -v or check the logs for details') return ''
[ "Return the installed version parsing the output of 'pip show'." ]
Please provide a description of the function:def _brute_force_install_pip(self): if os.path.exists(self.pip_installer_fname): logger.debug("Using pip installer from %r", self.pip_installer_fname) else: logger.debug( "Installer for pip not found in %r, downloading it", self.pip_installer_fname) self._download_pip_installer() logger.debug("Installing PIP manually in the virtualenv") python_exe = os.path.join(self.env_bin_path, "python") helpers.logged_exec([python_exe, self.pip_installer_fname, '-I']) self.pip_installed = True
[ "A brute force install of pip itself." ]
Please provide a description of the function:def _generate_configs_from_default(self, overrides=None): # type: (Dict[str, int]) -> Dict[str, int] config = DEFAULT_CONFIG.copy() if not overrides: overrides = {} for k, v in overrides.items(): config[k] = v return config
[ " Generate configs by inheriting from defaults " ]
Please provide a description of the function:def read_ical(self, ical_file_location): # type: (str) -> Calendar with open(ical_file_location, 'r') as ical_file: data = ical_file.read() self.cal = Calendar.from_ical(data) return self.cal
[ " Read the ical file " ]
Please provide a description of the function:def read_csv(self, csv_location, csv_configs=None): # type: (str, Dict[str, int]) -> List[List[str]] csv_configs = self._generate_configs_from_default(csv_configs) with open(csv_location, 'r') as csv_file: csv_reader = csv.reader(csv_file) self.csv_data = list(csv_reader) self.csv_data = self.csv_data[csv_configs['HEADER_COLUMNS_TO_SKIP']:] return self.csv_data
[ " Read the csv file " ]
Please provide a description of the function:def make_ical(self, csv_configs=None): # type: (Dict[str, int]) -> Calendar csv_configs = self._generate_configs_from_default(csv_configs) self.cal = Calendar() for row in self.csv_data: event = Event() event.add('summary', row[csv_configs['CSV_NAME']]) event.add('dtstart', row[csv_configs['CSV_START_DATE']]) event.add('dtend', row[csv_configs['CSV_END_DATE']]) event.add('description', row[csv_configs['CSV_DESCRIPTION']]) event.add('location', row[csv_configs['CSV_LOCATION']]) self.cal.add_component(event) return self.cal
[ " Make iCal entries " ]
Please provide a description of the function:def make_csv(self): # type: () -> None for event in self.cal.subcomponents: if event.name != 'VEVENT': continue row = [ event.get('SUMMARY'), event.get('DTSTART').dt, event.get('DTEND').dt, event.get('DESCRIPTION'), event.get('LOCATION'), ] row = [str(x) for x in row] self.csv_data.append(row)
[ " Make CSV " ]
Please provide a description of the function:def save_ical(self, ical_location): # type: (str) -> None data = self.cal.to_ical() with open(ical_location, 'w') as ical_file: ical_file.write(data.decode('utf-8'))
[ " Save the calendar instance to a file " ]
Please provide a description of the function:def save_csv(self, csv_location): # type: (str) -> None with open(csv_location, 'w') as csv_handle: writer = csv.writer(csv_handle) for row in self.csv_data: writer.writerow(row)
[ " Save the csv to a file " ]
Please provide a description of the function:def open(cls, filename): if filename.endswith('.gz'): fp = gzip.open(filename, 'rb') try: return cls(fp, filename, compression='gz') finally: fp.close() elif filename.endswith('.bz2'): fp = bz2.BZ2File(filename, 'rb') try: return cls(fp, filename, compression='bz2') finally: fp.close() else: with open(filename, 'rb') as fp: return cls(fp, filename)
[ " Read an image file from disk\n\n Parameters\n ----------\n filename : string\n Name of file to read as an image file. This file may be gzip\n (``.gz``) or bzip2 (``.bz2``) compressed.\n " ]
Please provide a description of the function:def image(self): if self.bands == 1: return self.data.squeeze() elif self.bands == 3: return numpy.dstack(self.data)
[ "An Image like array of ``self.data`` convenient for image processing tasks\n\n * 2D array for single band, grayscale image data\n * 3D array for three band, RGB image data\n\n Enables working with ``self.data`` as if it were a PIL image.\n\n See https://planetaryimage.readthedocs.io/en/latest/usage.html to see\n how to open images to view them and make manipulations.\n\n " ]
Please provide a description of the function:def apply_numpy_specials(self, copy=True): if copy: data = self.data.astype(numpy.float64) elif self.data.dtype != numpy.float64: data = self.data = self.data.astype(numpy.float64) else: data = self.data data[data == self.specials['Null']] = numpy.nan data[data < self.specials['Min']] = numpy.NINF data[data > self.specials['Max']] = numpy.inf return data
[ "Convert isis special pixel values to numpy special pixel values.\n\n ======= =======\n Isis Numpy\n ======= =======\n Null nan\n Lrs -inf\n Lis -inf\n His inf\n Hrs inf\n ======= =======\n\n Parameters\n ----------\n copy : bool [True]\n Whether to apply the new special values to a copy of the\n pixel data and leave the original unaffected\n\n Returns\n -------\n Numpy Array\n A numpy array with special values converted to numpy's nan, inf,\n and -inf\n " ]
Please provide a description of the function:def parse(cls, value, record_bytes): if isinstance(value, six.string_types): return cls(value, 0) if isinstance(value, list): if len(value) == 1: return cls(value[0], 0) if len(value) == 2: return cls(value[0], cls._parse_bytes(value[1], record_bytes)) raise ValueError('Unsupported pointer type') return cls(None, cls._parse_bytes(value, record_bytes))
[ "Parses the pointer label.\n\n Parameters\n ----------\n pointer_data\n Supported values for `pointer_data` are::\n\n ^PTR = nnn\n ^PTR = nnn <BYTES>\n ^PTR = \"filename\"\n ^PTR = (\"filename\")\n ^PTR = (\"filename\", nnn)\n ^PTR = (\"filename\", nnn <BYTES>)\n\n record_bytes\n Record multiplier value\n\n Returns\n -------\n Pointer object\n " ]
Please provide a description of the function:def _save(self, file_to_write, overwrite): if overwrite: file_to_write = self.filename elif os.path.isfile(file_to_write): msg = 'File ' + file_to_write + ' already exists !\n' + \ 'Call save() with "overwrite = True" to overwrite the file.' raise IOError(msg) encoder = pvl.encoder.PDSLabelEncoder serial_label = pvl.dumps(self.label, cls=encoder) label_sz = len(serial_label) image_pointer = int(label_sz / self.label['RECORD_BYTES']) + 1 self.label['^IMAGE'] = image_pointer + 1 if self._sample_bytes != self.label['IMAGE']['SAMPLE_BITS'] * 8: self.label['IMAGE']['SAMPLE_BITS'] = self.data.itemsize * 8 sample_type_to_save = self.DTYPES[self._sample_type[0] + self.dtype.kind] self.label['IMAGE']['SAMPLE_TYPE'] = sample_type_to_save if len(self.data.shape) == 3: self.label['IMAGE']['BANDS'] = self.data.shape[0] self.label['IMAGE']['LINES'] = self.data.shape[1] self.label['IMAGE']['LINE_SAMPLES'] = self.data.shape[2] else: self.label['IMAGE']['BANDS'] = 1 self.label['IMAGE']['LINES'] = self.data.shape[0] self.label['IMAGE']['LINE_SAMPLES'] = self.data.shape[1] diff = 0 if len(pvl.dumps(self.label, cls=encoder)) != label_sz: diff = abs(label_sz - len(pvl.dumps(self.label, cls=encoder))) pvl.dump(self.label, file_to_write, cls=encoder) offset = image_pointer * self.label['RECORD_BYTES'] - label_sz stream = open(file_to_write, 'a') for i in range(0, offset+diff): stream.write(" ") if (self._bands > 1 and self._format != 'BAND_SEQUENTIAL'): raise NotImplementedError else: self.data.tofile(stream, format='%' + self.dtype.kind) stream.close()
[ "Save PDS3Image object as PDS3 file.\n\n Parameters\n ----------\n filename: Set filename for the pds image to be saved.\n Overwrite: Use this keyword to save image with same filename.\n\n Usage: image.save('temp.IMG', overwrite=True)\n\n " ]
Please provide a description of the function:def _create_label(self, array): if len(array.shape) == 3: bands = array.shape[0] lines = array.shape[1] line_samples = array.shape[2] else: bands = 1 lines = array.shape[0] line_samples = array.shape[1] record_bytes = line_samples * array.itemsize label_module = pvl.PVLModule([ ('PDS_VERSION_ID', 'PDS3'), ('RECORD_TYPE', 'FIXED_LENGTH'), ('RECORD_BYTES', record_bytes), ('LABEL_RECORDS', 1), ('^IMAGE', 1), ('IMAGE', {'BANDS': bands, 'LINES': lines, 'LINE_SAMPLES': line_samples, 'MAXIMUM': 0, 'MEAN': 0, 'MEDIAN': 0, 'MINIMUM': 0, 'SAMPLE_BITS': array.itemsize * 8, 'SAMPLE_TYPE': 'MSB_INTEGER', 'STANDARD_DEVIATION': 0}) ]) return self._update_label(label_module, array)
[ "Create sample PDS3 label for NumPy Array.\n It is called by 'image.py' to create PDS3Image object\n from Numpy Array.\n\n Returns\n -------\n PVLModule label for the given NumPy array.\n\n Usage: self.label = _create_label(array)\n\n " ]
Please provide a description of the function:def _update_label(self, label, array): maximum = float(numpy.max(array)) mean = float(numpy.mean(array)) median = float(numpy.median(array)) minimum = float(numpy.min(array)) stdev = float(numpy.std(array, ddof=1)) encoder = pvl.encoder.PDSLabelEncoder serial_label = pvl.dumps(label, cls=encoder) label_sz = len(serial_label) image_pointer = int(label_sz / label['RECORD_BYTES']) + 1 label['^IMAGE'] = image_pointer + 1 label['LABEL_RECORDS'] = image_pointer label['IMAGE']['MEAN'] = mean label['IMAGE']['MAXIMUM'] = maximum label['IMAGE']['MEDIAN'] = median label['IMAGE']['MINIMUM'] = minimum label['IMAGE']['STANDARD_DEVIATION'] = stdev return label
[ "Update PDS3 label for NumPy Array.\n It is called by '_create_label' to update label values\n such as,\n - ^IMAGE, RECORD_BYTES\n - STANDARD_DEVIATION\n - MAXIMUM, MINIMUM\n - MEDIAN, MEAN\n\n Returns\n -------\n Update label module for the NumPy array.\n\n Usage: self.label = self._update_label(label, array)\n\n " ]
Please provide a description of the function:def dtype(self): try: return self.data.dtype except AttributeError: return numpy.dtype('%s%d' % (self._sample_type, self._sample_bytes))
[ "Pixel data type." ]
Please provide a description of the function:def derive_key(mode, version, salt, key, private_key, dh, auth_secret, keyid, keylabel="P-256"): context = b"" keyinfo = "" nonceinfo = "" def build_info(base, info_context): return b"Content-Encoding: " + base + b"\0" + info_context def derive_dh(mode, version, private_key, dh, keylabel): def length_prefix(key): return struct.pack("!H", len(key)) + key if isinstance(dh, ec.EllipticCurvePublicKey): pubkey = dh dh = dh.public_bytes( Encoding.X962, PublicFormat.UncompressedPoint) else: pubkey = ec.EllipticCurvePublicKey.from_encoded_point( ec.SECP256R1(), dh ) encoded = private_key.public_key().public_bytes( Encoding.X962, PublicFormat.UncompressedPoint) if mode == "encrypt": sender_pub_key = encoded receiver_pub_key = dh else: sender_pub_key = dh receiver_pub_key = encoded if version == "aes128gcm": context = b"WebPush: info\x00" + receiver_pub_key + sender_pub_key else: context = (keylabel.encode('utf-8') + b"\0" + length_prefix(receiver_pub_key) + length_prefix(sender_pub_key)) return private_key.exchange(ec.ECDH(), pubkey), context if version not in versions: raise ECEException(u"Invalid version") if mode not in ['encrypt', 'decrypt']: raise ECEException(u"unknown 'mode' specified: " + mode) if salt is None or len(salt) != KEY_LENGTH: raise ECEException(u"'salt' must be a 16 octet value") if dh is not None: if private_key is None: raise ECEException(u"DH requires a private_key") (secret, context) = derive_dh(mode=mode, version=version, private_key=private_key, dh=dh, keylabel=keylabel) else: secret = key if secret is None: raise ECEException(u"unable to determine the secret") if version == "aesgcm": keyinfo = build_info(b"aesgcm", context) nonceinfo = build_info(b"nonce", context) elif version == "aesgcm128": keyinfo = b"Content-Encoding: aesgcm128" nonceinfo = b"Content-Encoding: nonce" elif version == "aes128gcm": keyinfo = b"Content-Encoding: aes128gcm\x00" nonceinfo = b"Content-Encoding: nonce\x00" if dh is None: # Only mix the authentication secret when using DH for aes128gcm auth_secret = None if auth_secret is not None: if version == "aes128gcm": info = context else: info = build_info(b'auth', b'') hkdf_auth = HKDF( algorithm=hashes.SHA256(), length=32, salt=auth_secret, info=info, backend=default_backend() ) secret = hkdf_auth.derive(secret) hkdf_key = HKDF( algorithm=hashes.SHA256(), length=KEY_LENGTH, salt=salt, info=keyinfo, backend=default_backend() ) hkdf_nonce = HKDF( algorithm=hashes.SHA256(), length=NONCE_LENGTH, salt=salt, info=nonceinfo, backend=default_backend() ) return hkdf_key.derive(secret), hkdf_nonce.derive(secret)
[ "Derive the encryption key\n\n :param mode: operational mode (encrypt or decrypt)\n :type mode: enumerate('encrypt', 'decrypt)\n :param salt: encryption salt value\n :type salt: str\n :param key: raw key\n :type key: str\n :param private_key: DH private key\n :type key: object\n :param dh: Diffie Helman public key value\n :type dh: str\n :param keyid: key identifier label\n :type keyid: str\n :param keylabel: label for aesgcm/aesgcm128\n :type keylabel: str\n :param auth_secret: authorization secret\n :type auth_secret: str\n :param version: Content Type identifier\n :type version: enumerate('aes128gcm', 'aesgcm', 'aesgcm128')\n\n " ]
Please provide a description of the function:def iv(base, counter): if (counter >> 64) != 0: raise ECEException(u"Counter too big") (mask,) = struct.unpack("!Q", base[4:]) return base[:4] + struct.pack("!Q", counter ^ mask)
[ "Generate an initialization vector.\n\n " ]
Please provide a description of the function:def decrypt(content, salt=None, key=None, private_key=None, dh=None, auth_secret=None, keyid=None, keylabel="P-256", rs=4096, version="aes128gcm"): def parse_content_header(content): id_len = struct.unpack("!B", content[20:21])[0] return { "salt": content[:16], "rs": struct.unpack("!L", content[16:20])[0], "keyid": content[21:21 + id_len], "content": content[21 + id_len:], } def decrypt_record(key, nonce, counter, content): decryptor = Cipher( algorithms.AES(key), modes.GCM(iv(nonce, counter), tag=content[-TAG_LENGTH:]), backend=default_backend() ).decryptor() return decryptor.update(content[:-TAG_LENGTH]) + decryptor.finalize() def unpad_legacy(data): pad_size = versions[version]['pad'] pad = functools.reduce( lambda x, y: x << 8 | y, struct.unpack( "!" + ("B" * pad_size), data[0:pad_size]) ) if pad_size + pad > len(data) or \ data[pad_size:pad_size+pad] != (b"\x00" * pad): raise ECEException(u"Bad padding") return data[pad_size + pad:] def unpad(data, last): i = len(data) - 1 for i in range(len(data) - 1, -1, -1): v = struct.unpack('B', data[i:i+1])[0] if v != 0: if not last and v != 1: raise ECEException(u'record delimiter != 1') if last and v != 2: raise ECEException(u'last record delimiter != 2') return data[0:i] raise ECEException(u'all zero record plaintext') if version not in versions: raise ECEException(u"Invalid version") overhead = versions[version]['pad'] if version == "aes128gcm": try: content_header = parse_content_header(content) except Exception: raise ECEException("Could not parse the content header") salt = content_header['salt'] rs = content_header['rs'] keyid = content_header['keyid'] if private_key is not None and not dh: dh = keyid else: keyid = keyid.decode('utf-8') content = content_header['content'] overhead += 16 (key_, nonce_) = derive_key("decrypt", version=version, salt=salt, key=key, private_key=private_key, dh=dh, auth_secret=auth_secret, keyid=keyid, keylabel=keylabel) if rs <= overhead: raise ECEException(u"Record size too small") chunk = rs if version != "aes128gcm": chunk += 16 # account for tags in old versions if len(content) % chunk == 0: raise ECEException(u"Message truncated") result = b'' counter = 0 try: for i in list(range(0, len(content), chunk)): data = decrypt_record(key_, nonce_, counter, content[i:i + chunk]) if version == 'aes128gcm': last = (i + chunk) >= len(content) result += unpad(data, last) else: result += unpad_legacy(data) counter += 1 except InvalidTag as ex: raise ECEException("Decryption error: {}".format(repr(ex))) return result
[ "\n Decrypt a data block\n\n :param content: Data to be decrypted\n :type content: str\n :param salt: Encryption salt\n :type salt: str\n :param key: local public key\n :type key: str\n :param private_key: DH private key\n :type key: object\n :param keyid: Internal key identifier for private key info\n :type keyid: str\n :param dh: Remote Diffie Hellman sequence (omit for aes128gcm)\n :type dh: str\n :param rs: Record size\n :type rs: int\n :param auth_secret: Authorization secret\n :type auth_secret: str\n :param version: ECE Method version\n :type version: enumerate('aes128gcm', 'aesgcm', 'aesgcm128')\n :return: Decrypted message content\n :rtype str\n\n ", "Parse an aes128gcm content body and extract the header values.\n\n :param content: The encrypted body of the message\n :type content: str\n\n " ]
Please provide a description of the function:def encrypt(content, salt=None, key=None, private_key=None, dh=None, auth_secret=None, keyid=None, keylabel="P-256", rs=4096, version="aes128gcm"): def encrypt_record(key, nonce, counter, buf, last): encryptor = Cipher( algorithms.AES(key), modes.GCM(iv(nonce, counter)), backend=default_backend() ).encryptor() if version == 'aes128gcm': data = encryptor.update(buf + (b'\x02' if last else b'\x01')) else: data = encryptor.update((b"\x00" * versions[version]['pad']) + buf) data += encryptor.finalize() data += encryptor.tag return data def compose_aes128gcm(salt, content, rs, keyid): if len(keyid) > 255: raise ECEException("keyid is too long") header = salt if rs > MAX_RECORD_SIZE: raise ECEException("Too much content") header += struct.pack("!L", rs) header += struct.pack("!B", len(keyid)) header += keyid return header + content if version not in versions: raise ECEException(u"Invalid version") if salt is None: salt = os.urandom(16) (key_, nonce_) = derive_key("encrypt", version=version, salt=salt, key=key, private_key=private_key, dh=dh, auth_secret=auth_secret, keyid=keyid, keylabel=keylabel) overhead = versions[version]['pad'] if version == 'aes128gcm': overhead += 16 end = len(content) else: end = len(content) + 1 if rs <= overhead: raise ECEException(u"Record size too small") chunk_size = rs - overhead result = b"" counter = 0 # the extra one on the loop ensures that we produce a padding only # record if the data length is an exact multiple of the chunk size for i in list(range(0, end, chunk_size)): result += encrypt_record(key_, nonce_, counter, content[i:i + chunk_size], (i + chunk_size) >= end) counter += 1 if version == "aes128gcm": if keyid is None and private_key is not None: kid = private_key.public_key().public_bytes( Encoding.X962, PublicFormat.UncompressedPoint) else: kid = (keyid or '').encode('utf-8') return compose_aes128gcm(salt, result, rs, keyid=kid) return result
[ "\n Encrypt a data block\n\n :param content: block of data to encrypt\n :type content: str\n :param salt: Encryption salt\n :type salt: str\n :param key: Encryption key data\n :type key: str\n :param private_key: DH private key\n :type key: object\n :param keyid: Internal key identifier for private key info\n :type keyid: str\n :param dh: Remote Diffie Hellman sequence\n :type dh: str\n :param rs: Record size\n :type rs: int\n :param auth_secret: Authorization secret\n :type auth_secret: str\n :param version: ECE Method version\n :type version: enumerate('aes128gcm', 'aesgcm', 'aesgcm128')\n :return: Encrypted message content\n :rtype str\n\n ", "Compose the header and content of an aes128gcm encrypted\n message body\n\n :param salt: The sender's salt value\n :type salt: str\n :param content: The encrypted body of the message\n :type content: str\n :param rs: Override for the content length\n :type rs: int\n :param keyid: The keyid to use for this message\n :type keyid: str\n\n " ]
Please provide a description of the function:def parameters(self, namespaced=False): if namespaced: return json.loads(json.dumps(self.args[0]['parameters']), object_hook=lambda d: SimpleNamespace(**d)) else: return self.args[0].get('parameters')
[ "returns the exception varlink error parameters" ]
Please provide a description of the function:def GetInfo(self): return { 'vendor': self.vendor, 'product': self.product, 'version': self.version, 'url': self.url, 'interfaces': list(self.interfaces.keys()) }
[ "The standardized org.varlink.service.GetInfo() varlink method." ]
Please provide a description of the function:def GetInterfaceDescription(self, interface): try: i = self.interfaces[interface] except KeyError: raise InterfaceNotFound(interface) return {'description': i.description}
[ "The standardized org.varlink.service.GetInterfaceDescription() varlink method." ]
Please provide a description of the function:def handle(self, message, _server=None, _request=None): if not message: return if message[-1] == 0: message = message[:-1] string = message.decode('utf-8') handle = self._handle(json.loads(string), message, _server, _request) for out in handle: if out == None: return try: yield json.dumps(out, cls=VarlinkEncoder).encode('utf-8') except ConnectionError as e: try: handle.throw(e) except StopIteration: pass
[ "This generator function handles any incoming message.\n\n Write any returned bytes to the output stream.\n\n >>> for outgoing_message in service.handle(incoming_message):\n >>> connection.write(outgoing_message)\n " ]
Please provide a description of the function:def server_bind(self): if self.allow_reuse_address: self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self.socket.setblocking(True) if not self.listen_fd: self.socket.bind(self.server_address) self.server_address = self.socket.getsockname() if self.server_address[0] == 0: self.server_address = '@' + self.server_address[1:].decode('utf-8') if self.mode: os.fchmod(self.socket.fileno(), mode=int(self.mode, 8)) elif self.mode: os.chmod(self.server_address, mode=int(self.mode, 8))
[ "Called by constructor to bind the socket.\n\n May be overridden.\n\n " ]
Please provide a description of the function:def server_close(self): if self.remove_file: try: os.remove(self.remove_file) except: pass self.socket.close()
[ "Called to clean-up the server.\n\n May be overridden.\n\n " ]
Please provide a description of the function:def shutdown_request(self, request): try: # explicitly shutdown. socket.close() merely releases # the socket and waits for GC to perform the actual close. request.shutdown(socket.SHUT_RDWR) except: pass # some platforms may raise ENOTCONN here self.close_request(request)
[ "Called to shutdown and close an individual request." ]
Please provide a description of the function:def open(self, interface_name, namespaced=False, connection=None): if not connection: connection = self.open_connection() if interface_name not in self._interfaces: self.get_interface(interface_name, socket_connection=connection) if interface_name not in self._interfaces: raise InterfaceNotFound(interface_name) return self.handler(self._interfaces[interface_name], connection, namespaced=namespaced)
[ "Open a new connection and get a client interface handle with the varlink methods installed.\n\n :param interface_name: an interface name, which the service this client object is\n connected to, provides.\n :param namespaced: If arguments and return values are instances of SimpleNamespace\n rather than dictionaries.\n :param connection: If set, get the interface handle for an already opened connection.\n :exception InterfaceNotFound: if the interface is not found\n\n " ]
Please provide a description of the function:def get_interfaces(self, socket_connection=None): if not socket_connection: socket_connection = self.open_connection() close_socket = True else: close_socket = False # noinspection PyUnresolvedReferences _service = self.handler(self._interfaces["org.varlink.service"], socket_connection) self.info = _service.GetInfo() if close_socket: socket_connection.close() return self.info['interfaces']
[ "Returns the a list of Interface objects the service implements." ]
Please provide a description of the function:def add_interface(self, interface): if not isinstance(interface, Interface): raise TypeError self._interfaces[interface.name] = interface
[ "Manually add or overwrite an interface definition from an Interface object.\n\n :param interface: an Interface() object\n\n " ]
Please provide a description of the function:def rematch_entry(envkernel, gamma = 0.1, threshold = 1e-6): n, m = envkernel.shape K = np.exp(-(1 - envkernel) / gamma) # initialisation u = np.ones((n,)) / n v = np.ones((m,)) / m en = np.ones((n,)) / float(n) em = np.ones((m,)) / float(m) Kp = (1 / en).reshape(-1, 1) * K # converge balancing vectors u and v itercount = 0 error = 1 while (error > threshold): uprev = u vprev = v v = np.divide(em, np.dot(K.T, u)) u = np.divide(en, np.dot(K, v)) # determine error every now and then if itercount % 5: error = np.sum((u - uprev) ** 2) / np.sum((u) ** 2) + np.sum((v - vprev) ** 2) / np.sum((v) ** 2) itercount += 1 # using Tr(X.T Y) = Sum[ij](Xij * Yij) # P.T * C # P_ij = u_i * v_j * K_ij pity = np.multiply( np.multiply(K, u.reshape((-1,1))) , v) glosim = np.sum( np.multiply( pity, envkernel)) return glosim
[ "\n Compute the global similarity between two structures A and B.\n It uses the Sinkhorn algorithm as reported in:\n Phys. Chem. Chem. Phys., 2016, 18, p. 13768\n Args:\n envkernel: NxM matrix of structure A with \n N and structure B with M atoms\n gamma: parameter to control between best match gamma = 0\n and average kernel gamma = inf.\n " ]
Please provide a description of the function:def create(atoms_list,N, L, cutoff = 0, all_atomtypes=[]): myAlphas, myBetas = genBasis.getBasisFunc(cutoff, N) # get information about feature length n_datapoints = len(atoms_list) atoms = atoms_list[0] x = get_lastatom_soap(atoms, cutoff, myAlphas, myBetas,N,L, all_atomtypes=all_atomtypes) n_features = x.shape[1] print("soap first", x.shape) print(n_datapoints, n_features) soapmatrix = np.zeros((n_datapoints, n_features)) i = -1 for atoms in atoms_list: i +=1 #atoms print("Processing " + str(atoms.info)," Run time: " + str(time.time()-t0_total), end="\r") soapmatrix[i,:] = get_lastatom_soap(atoms, cutoff, myAlphas, myBetas, N, L, all_atomtypes=all_atomtypes) print("") # infos print("shape", soapmatrix.shape) return soapmatrix
[ "Takes a trajectory xyz file and writes soap features\n " ]
Please provide a description of the function:def getPoly(rCut, nMax): rCutVeryHard = rCut+5.0 rx = 0.5*rCutVeryHard*(x + 1) basisFunctions = [] for i in range(1, nMax + 1): basisFunctions.append(lambda rr, i=i, rCut=rCut: (rCut - np.clip(rr, 0, rCut))**(i+2)) # Calculate the overlap of the different polynomial functions in a # matrix S. These overlaps defined through the dot product over the # radial coordinate are analytically calculable: Integrate[(rc - r)^(a # + 2) (rc - r)^(b + 2) r^2, {r, 0, rc}]. Then the weights B that make # the basis orthonormal are given by B=S^{-1/2} S = np.zeros((nMax, nMax)) for i in range(1, nMax+1): for j in range(1, nMax+1): S[i-1, j-1] = (2*(rCut)**(7+i+j))/((5+i+j)*(6+i+j)*(7+i+j)) betas = sqrtm(np.linalg.inv(S)) # If the result is complex, the calculation is currently halted. if (betas.dtype == np.complex128): raise ValueError( "Could not calculate normalization factors for the polynomial basis" " in the domain of real numbers. Lowering the number of radial " "basis functions is advised." ) fs = np.zeros([nMax, len(x)]) for n in range(1, nMax+1): fs[n-1, :] = (rCut-np.clip(rx, 0, rCut))**(n+2) gss = np.dot(betas, fs) return nMax, rx, gss
[ "Used to calculate discrete vectors for the polynomial basis functions.\n\n Args:\n rCut(float): Radial cutoff\n nMax(int): Number of polynomial radial functions\n " ]
Please provide a description of the function:def _format_ase2clusgeo(obj, all_atomtypes=None): #atoms metadata totalAN = len(obj) if all_atomtypes is not None: atomtype_set = set(all_atomtypes) else: atomtype_set = set(obj.get_atomic_numbers()) atomtype_lst = np.sort(list(atomtype_set)) n_atoms_per_type_lst = [] pos_lst = [] for atomtype in atomtype_lst: condition = obj.get_atomic_numbers() == atomtype pos_onetype = obj.get_positions()[condition] n_onetype = pos_onetype.shape[0] # store data in lists pos_lst.append(pos_onetype) n_atoms_per_type_lst.append(n_onetype) typeNs = n_atoms_per_type_lst Ntypes = len(n_atoms_per_type_lst) atomtype_lst Apos = np.concatenate(pos_lst).ravel() return Apos, typeNs, Ntypes, atomtype_lst, totalAN
[ " Takes an ase Atoms object and returns numpy arrays and integers\n which are read by the internal clusgeo. Apos is currently a flattened\n out numpy array\n\n Args:\n obj():\n all_atomtypes():\n sort():\n " ]
Please provide a description of the function:def _get_supercell(obj, rCut=5.0): rCutHard = rCut + 5 # Giving extra space for hard cutOff cell_vectors = obj.get_cell() a1, a2, a3 = cell_vectors[0], cell_vectors[1], cell_vectors[2] # vectors perpendicular to two cell vectors b1 = np.cross(a2, a3, axis=0) b2 = np.cross(a3, a1, axis=0) b3 = np.cross(a1, a2, axis=0) # projections onto perpendicular vectors p1 = np.dot(a1, b1) / np.dot(b1, b1) * b1 p2 = np.dot(a2, b2) / np.dot(b2, b2) * b2 p3 = np.dot(a3, b3) / np.dot(b3, b3) * b3 xyz_arr = np.linalg.norm(np.array([p1, p2, p3]), axis=1) cell_images = np.ceil(rCutHard/xyz_arr) nx = int(cell_images[0]) ny = int(cell_images[1]) nz = int(cell_images[2]) suce = obj * (1+2*nx, 1+2*ny, 1+2*nz) shift = obj.get_cell() shifted_suce = suce.copy() shifted_suce.translate(-shift[0]*nx - shift[1]*ny - shift[2]*nz) return shifted_suce
[ " Takes atoms object (with a defined cell) and a radial cutoff.\n Returns a supercell centered around the original cell\n generously extended to contain all spheres with the given radial\n cutoff centered around the original atoms.\n " ]
Please provide a description of the function:def get_soap_locals(obj, Hpos, alp, bet, rCut=5.0, nMax=5, Lmax=5, crossOver=True, all_atomtypes=None, eta=1.0): rCutHard = rCut + 5 assert Lmax <= 9, "l cannot exceed 9. Lmax={}".format(Lmax) assert Lmax >= 0, "l cannot be negative.Lmax={}".format(Lmax) assert rCutHard < 17.0001, "hard radius cuttof cannot be larger than 17 Angs. rCut={}".format(rCutHard) assert rCutHard > 1.999, "hard redius cuttof cannot be lower than 1 Ang. rCut={}".format(rCutHard) assert nMax >= 2, "number of basis functions cannot be lower than 2. nMax={}".format(nMax) assert nMax <= 13, "number of basis functions cannot exceed 12. nMax={}".format(nMax) assert eta >= 0.0001, "Eta cannot be zero or negative. nMax={}".format(eta) # get clusgeo internal format for c-code Apos, typeNs, py_Ntypes, atomtype_lst, totalAN = _format_ase2clusgeo(obj, all_atomtypes) Hpos = np.array(Hpos) py_Hsize = Hpos.shape[0] # flatten arrays Hpos = Hpos.flatten() alp = alp.flatten() bet = bet.flatten() # convert int to c_int lMax = c_int(Lmax) Hsize = c_int(py_Hsize) Ntypes = c_int(py_Ntypes) totalAN = c_int(totalAN) rCutHard = c_double(rCutHard) Nsize = c_int(nMax) c_eta = c_double(eta) #convert int array to c_int array typeNs = (c_int * len(typeNs))(*typeNs) # convert to c_double arrays # alphas alphas = (c_double * len(alp))(*alp.tolist()) # betas betas = (c_double * len(bet))(*bet.tolist()) #Apos axyz = (c_double * len(Apos))(*Apos.tolist()) #Hpos hxyz = (c_double * len(Hpos))(*Hpos.tolist()) ### START SOAP### #path_to_so = os.path.dirname(os.path.abspath(__file__)) _PATH_TO_SOAPLITE_SO = os.path.dirname(os.path.abspath(__file__)) _SOAPLITE_SOFILES = glob.glob( "".join([ _PATH_TO_SOAPLITE_SO, "/../lib/libsoap*.*so"]) ) ## NOT SURE ABOUT THIS if py_Ntypes == 1 or (not crossOver): substring = "lib/libsoapPySig." libsoap = CDLL(next((s for s in _SOAPLITE_SOFILES if substring in s), None)) libsoap.soap.argtypes = [POINTER (c_double),POINTER (c_double), POINTER (c_double),POINTER (c_double), POINTER (c_double), POINTER (c_int),c_double,c_int,c_int,c_int,c_int,c_int,c_double] libsoap.soap.restype = POINTER (c_double) c = (c_double*(int((nMax*(nMax+1))/2)*(Lmax+1)*py_Ntypes*py_Hsize))() libsoap.soap( c, axyz, hxyz, alphas, betas, typeNs, rCutHard, totalAN, Ntypes, Nsize, lMax, Hsize,c_eta) else: substring = "lib/libsoapGTO." libsoapGTO = CDLL(next((s for s in _SOAPLITE_SOFILES if substring in s), None)) libsoapGTO.soap.argtypes = [POINTER (c_double),POINTER (c_double), POINTER (c_double),POINTER (c_double), POINTER (c_double), POINTER (c_int),c_double,c_int,c_int,c_int,c_int,c_int,c_double] libsoapGTO.soap.restype = POINTER (c_double) c = (c_double*(int((nMax*(nMax+1))/2)*(Lmax+1)*int((py_Ntypes*(py_Ntypes +1))/2)*py_Hsize))() libsoapGTO.soap( c, axyz, hxyz, alphas, betas, typeNs, rCutHard, totalAN, Ntypes, Nsize, lMax, Hsize,c_eta) # return c; if crossOver: crosTypes = int((py_Ntypes*(py_Ntypes+1))/2) shape = (py_Hsize, int((nMax*(nMax+1))/2)*(Lmax+1)*crosTypes) else: shape = (py_Hsize, int((nMax*(nMax+1))/2)*(Lmax+1)*py_Ntypes) a = np.ctypeslib.as_array(c) a = a.reshape(shape) return a
[ "Get the RBF basis SOAP output for the given positions in a finite system.\n\n Args:\n obj(ase.Atoms): Atomic structure for which the SOAP output is\n calculated.\n Hpos: Positions at which to calculate SOAP\n alp: Alphas\n bet: Betas\n rCut: Radial cutoff.\n nMax: Maximum number of radial basis functions\n Lmax: Maximum spherical harmonics degree\n crossOver:\n all_atomtypes: Can be used to specify the atomic elements for which to\n calculate the output. If given the output is calculated only for the\n given species and is ordered by atomic number.\n eta: The gaussian smearing width.\n\n Returns:\n np.ndarray: SOAP output for the given positions.\n " ]
Please provide a description of the function:def get_soap_structure(obj, alp, bet, rCut=5.0, nMax=5, Lmax=5, crossOver=True, all_atomtypes=None, eta=1.0): Hpos = obj.get_positions() arrsoap = get_soap_locals(obj, Hpos, alp, bet, rCut, nMax, Lmax, crossOver, all_atomtypes=all_atomtypes, eta=eta) return arrsoap
[ "Get the RBF basis SOAP output for atoms in a finite structure.\n\n Args:\n obj(ase.Atoms): Atomic structure for which the SOAP output is\n calculated.\n alp: Alphas\n bet: Betas\n rCut: Radial cutoff.\n nMax: Maximum nmber of radial basis functions\n Lmax: Maximum spherical harmonics degree\n crossOver:\n all_atomtypes: Can be used to specify the atomic elements for which to\n calculate the output. If given the output is calculated only for the\n given species.\n eta: The gaussian smearing width.\n\n Returns:\n np.ndarray: SOAP output for the given structure.\n " ]
Please provide a description of the function:def get_periodic_soap_locals(obj, Hpos, alp, bet, rCut=5.0, nMax=5, Lmax=5, crossOver=True, all_atomtypes=None, eta=1.0): suce = _get_supercell(obj, rCut) arrsoap = get_soap_locals(suce, Hpos, alp, bet, rCut, nMax=nMax, Lmax=Lmax, crossOver=crossOver, all_atomtypes=all_atomtypes, eta=eta) return arrsoap
[ "Get the RBF basis SOAP output for the given position in a periodic system.\n\n Args:\n obj(ase.Atoms): Atomic structure for which the SOAP output is\n calculated.\n alp: Alphas\n bet: Betas\n rCut: Radial cutoff.\n nMax: Maximum nmber of radial basis functions\n Lmax: Maximum spherical harmonics degree\n crossOver:\n all_atomtypes: Can be used to specify the atomic elements for which to\n calculate the output. If given the output is calculated only for the\n given species.\n eta: The gaussian smearing width.\n\n Returns:\n np.ndarray: SOAP output for the given position.\n " ]
Please provide a description of the function:def get_nnsoap(obj, first_shell, alphas, betas, rcut=6, nmax=10, lmax=9, all_atomtypes=[]): soap_vector = [] nnn = len(first_shell) for tbh in range(0,3): try: atom_idx = first_shell[tbh] except: soap_vector.append(soap_zero) else: Hpos = [] print(atom_idx) pos = obj.get_positions()[atom_idx] Hpos.append(pos) x = soapPy.get_soap_locals(obj, Hpos, myAlphas, myBetas, rCut=rcut, NradBas=nmax, Lmax=lmax,crossOver=False, all_atomtypes=all_atomtypes) soap_zero = np.zeros(x.shape) soap_vector.append(x) print(len(soap_vector), soap_vector[0].shape, soap_vector[1].shape, soap_vector[2].shape) print("exemplary soapvalues",soap_vector[0][0,1], soap_vector[1][0,1], soap_vector[2][0,1]) soap_array = np.hstack(soap_vector) return soap_array
[ "Takes cluster structure and nearest neighbour information of a datapoint,\n Returns concatenated soap vectors for each nearest\n neighbour (up to 3). Top, bridge, hollow fill the initial\n zero soap vector from left to right.\n " ]
Please provide a description of the function:def get_sitecenteredsoap(obj, first_shell, alphas, betas, rcut=6, nmax=10, lmax=9, all_atomtypes=[]): soap_vector = [] nnn = len(first_shell) center_of_atoms = 1.0 / nnn * np.mean(obj.get_positions()[first_shell], axis = 0) #print("center of atoms", center_of_atoms) Hpos = [center_of_atoms] soap_vector = soapPy.get_soap_locals(obj, Hpos, myAlphas, myBetas, rCut=rcut, NradBas=nmax, Lmax=lmax,crossOver=False, all_atomtypes=all_atomtypes) #print(len(soap_vector), soap_vector.shape) #print("exemplary soapvalues",soap_vector[0,0], soap_vector[0,1], soap_vector[0,2]) return soap_vector
[ "Takes cluster structure and nearest neighbour information of a datapoint,\n Returns concatenated soap vectors for each nearest\n neighbour (up to 3). Top, bridge, hollow fill the initial\n zero soap vector from left to right.\n " ]
Please provide a description of the function:def w(self, units=None): if units is None: if self.hamiltonian is None: units = dimensionless else: units = self.hamiltonian.units return super(Orbit, self).w(units=units)
[ "\n This returns a single array containing the phase-space positions.\n\n Parameters\n ----------\n units : `~gala.units.UnitSystem` (optional)\n The unit system to represent the position and velocity in\n before combining into the full array.\n\n Returns\n -------\n w : `~numpy.ndarray`\n A numpy array of all positions and velocities, without units.\n Will have shape ``(2*ndim,...)``.\n\n " ]
Please provide a description of the function:def represent_as(self, new_pos, new_vel=None): o = super(Orbit, self).represent_as(new_pos=new_pos, new_vel=new_vel) return self.__class__(pos=o.pos, vel=o.vel, hamiltonian=self.hamiltonian)
[ "\n Represent the position and velocity of the orbit in an alternate\n coordinate system. Supports any of the Astropy coordinates\n representation classes.\n\n Parameters\n ----------\n new_pos : :class:`~astropy.coordinates.BaseRepresentation`\n The type of representation to generate. Must be a class (not an\n instance), or the string name of the representation class.\n new_vel : :class:`~astropy.coordinates.BaseDifferential` (optional)\n Class in which any velocities should be represented. Must be a class\n (not an instance), or the string name of the differential class. If\n None, uses the default differential for the new position class.\n\n Returns\n -------\n new_orbit : `gala.dynamics.Orbit`\n " ]
Please provide a description of the function:def to_hdf5(self, f): f = super(Orbit, self).to_hdf5(f) if self.potential is not None: import yaml from ..potential.potential.io import to_dict f['potential'] = yaml.dump(to_dict(self.potential)).encode('utf-8') if self.t: quantity_to_hdf5(f, 'time', self.t) return f
[ "\n Serialize this object to an HDF5 file.\n\n Requires ``h5py``.\n\n Parameters\n ----------\n f : str, :class:`h5py.File`\n Either the filename or an open HDF5 file.\n " ]
Please provide a description of the function:def from_hdf5(cls, f): # TODO: this is duplicated code from PhaseSpacePosition if isinstance(f, str): import h5py f = h5py.File(f) pos = quantity_from_hdf5(f['pos']) vel = quantity_from_hdf5(f['vel']) time = None if 'time' in f: time = quantity_from_hdf5(f['time']) frame = None if 'frame' in f: g = f['frame'] frame_mod = g.attrs['module'] frame_cls = g.attrs['class'] frame_units = [u.Unit(x.decode('utf-8')) for x in g['units']] if u.dimensionless_unscaled in frame_units: units = DimensionlessUnitSystem() else: units = UnitSystem(*frame_units) pars = dict() for k in g['parameters']: pars[k] = quantity_from_hdf5(g['parameters/'+k]) exec("from {0} import {1}".format(frame_mod, frame_cls)) frame_cls = eval(frame_cls) frame = frame_cls(units=units, **pars) potential = None if 'potential' in f: import yaml from ..potential.potential.io import from_dict _dict = yaml.load(f['potential'][()].decode('utf-8')) potential = from_dict(_dict) return cls(pos=pos, vel=vel, t=time, frame=frame, potential=potential)
[ "\n Load an object from an HDF5 file.\n\n Requires ``h5py``.\n\n Parameters\n ----------\n f : str, :class:`h5py.File`\n Either the filename or an open HDF5 file.\n " ]
Please provide a description of the function:def orbit_gen(self): if self.norbits == 1: yield self else: for i in range(self.norbits): yield self[:, i]
[ "\n Generator for iterating over each orbit.\n " ]
Please provide a description of the function:def potential_energy(self, potential=None): r if self.hamiltonian is None and potential is None: raise ValueError("To compute the potential energy, a potential" " object must be provided!") if potential is None: potential = self.hamiltonian.potential return super(Orbit,self).potential_energy(potential)
[ "\n The potential energy *per unit mass*:\n\n .. math::\n\n E_\\Phi = \\Phi(\\boldsymbol{q})\n\n Returns\n -------\n E : :class:`~astropy.units.Quantity`\n The potential energy.\n " ]
Please provide a description of the function:def energy(self, hamiltonian=None): r if self.hamiltonian is None and hamiltonian is None: raise ValueError("To compute the total energy, a hamiltonian" " object must be provided!") from ..potential import PotentialBase if isinstance(hamiltonian, PotentialBase): from ..potential import Hamiltonian warnings.warn("This function now expects a `Hamiltonian` instance " "instead of a `PotentialBase` subclass instance. If " "you are using a static reference frame, you just " "need to pass your potential object in to the " "Hamiltonian constructor to use, e.g., Hamiltonian" "(potential).", DeprecationWarning) hamiltonian = Hamiltonian(hamiltonian) if hamiltonian is None: hamiltonian = self.hamiltonian return hamiltonian(self)
[ "\n The total energy *per unit mass*:\n\n Returns\n -------\n E : :class:`~astropy.units.Quantity`\n The total energy.\n " ]
Please provide a description of the function:def _max_helper(self, arr, approximate=False, interp_kwargs=None, minimize_kwargs=None): assert self.norbits == 1 assert self.t[-1] > self.t[0] # time must increase _ix = argrelmax(arr.value, mode='wrap')[0] _ix = _ix[(_ix != 0) & (_ix != (len(arr)-1))] # remove edges t = self.t.value approx_arr = arr[_ix] approx_t = t[_ix] if approximate: return approx_arr, approx_t * self.t.unit if interp_kwargs is None: interp_kwargs = dict() if minimize_kwargs is None: minimize_kwargs = dict() # default scipy function kwargs interp_kwargs.setdefault('k', 3) interp_kwargs.setdefault('ext', 3) # don't extrapolate, use boundary minimize_kwargs.setdefault('method', 'powell') # Interpolating function to upsample array: # Negative sign because we assume we're always finding the maxima interp_func = InterpolatedUnivariateSpline(t, -arr.value, **interp_kwargs) better_times = np.zeros(_ix.shape, dtype=float) for i, j in enumerate(_ix): res = minimize(interp_func, t[j], **minimize_kwargs) better_times[i] = res.x better_arr = -interp_func(better_times) return better_arr * arr.unit, better_times * self.t.unit
[ "\n Helper function for computing extrema (apocenter, pericenter, z_height)\n and times of extrema.\n\n Parameters\n ----------\n arr : `numpy.ndarray`\n " ]
Please provide a description of the function:def pericenter(self, return_times=False, func=np.mean, interp_kwargs=None, minimize_kwargs=None, approximate=False): if return_times and func is not None: raise ValueError("Cannot return times if reducing pericenters " "using an input function. Pass `func=None` if " "you want to return all individual pericenters " "and times.") if func is None: reduce = False func = lambda x: x else: reduce = True # time must increase if self.t[-1] < self.t[0]: self = self[::-1] vals = [] times = [] for orbit in self.orbit_gen(): v, t = orbit._max_helper(-orbit.physicsspherical.r, # pericenter interp_kwargs=interp_kwargs, minimize_kwargs=minimize_kwargs, approximate=approximate) vals.append(func(-v)) # negative for pericenter times.append(t) return self._max_return_helper(vals, times, return_times, reduce)
[ "\n Estimate the pericenter(s) of the orbit by identifying local minima in\n the spherical radius and interpolating between timesteps near the\n minima.\n\n By default, this returns the mean of all local minima (pericenters). To\n get, e.g., the minimum pericenter, pass in ``func=np.min``. To get\n all pericenters, pass in ``func=None``.\n\n Parameters\n ----------\n func : func (optional)\n A function to evaluate on all of the identified pericenter times.\n return_times : bool (optional)\n Also return the pericenter times.\n interp_kwargs : dict (optional)\n Keyword arguments to be passed to\n :class:`scipy.interpolate.InterpolatedUnivariateSpline`.\n minimize_kwargs : dict (optional)\n Keyword arguments to be passed to :class:`scipy.optimize.minimize`.\n approximate : bool (optional)\n Compute an approximate pericenter by skipping interpolation.\n\n Returns\n -------\n peri : float, :class:`~numpy.ndarray`\n Either a single number or an array of pericenters.\n times : :class:`~numpy.ndarray` (optional, see ``return_times``)\n If ``return_times=True``, also returns an array of the pericenter\n times.\n\n " ]
Please provide a description of the function:def zmax(self, return_times=False, func=np.mean, interp_kwargs=None, minimize_kwargs=None, approximate=False): if return_times and func is not None: raise ValueError("Cannot return times if reducing " "using an input function. Pass `func=None` if " "you want to return all individual values " "and times.") if func is None: reduce = False func = lambda x: x else: reduce = True # time must increase if self.t[-1] < self.t[0]: self = self[::-1] vals = [] times = [] for orbit in self.orbit_gen(): v, t = orbit._max_helper(np.abs(orbit.cylindrical.z), interp_kwargs=interp_kwargs, minimize_kwargs=minimize_kwargs, approximate=approximate) vals.append(func(v)) times.append(t) return self._max_return_helper(vals, times, return_times, reduce)
[ "\n Estimate the maximum ``z`` height of the orbit by identifying local\n maxima in the absolute value of the ``z`` position and interpolating\n between timesteps near the maxima.\n\n By default, this returns the mean of all local maxima. To get, e.g., the\n largest ``z`` excursion, pass in ``func=np.max``. To get all ``z``\n maxima, pass in ``func=None``.\n\n Parameters\n ----------\n func : func (optional)\n A function to evaluate on all of the identified z maximum times.\n return_times : bool (optional)\n Also return the times of maximum.\n interp_kwargs : dict (optional)\n Keyword arguments to be passed to\n :class:`scipy.interpolate.InterpolatedUnivariateSpline`.\n minimize_kwargs : dict (optional)\n Keyword arguments to be passed to :class:`scipy.optimize.minimize`.\n approximate : bool (optional)\n Compute approximate values by skipping interpolation.\n\n Returns\n -------\n zs : float, :class:`~numpy.ndarray`\n Either a single number or an array of maximum z heights.\n times : :class:`~numpy.ndarray` (optional, see ``return_times``)\n If ``return_times=True``, also returns an array of the apocenter\n times.\n\n " ]
Please provide a description of the function:def eccentricity(self, **kw): r ra = self.apocenter(**kw) rp = self.pericenter(**kw) return (ra - rp) / (ra + rp)
[ "\n Returns the eccentricity computed from the mean apocenter and\n mean pericenter.\n\n .. math::\n\n e = \\frac{r_{\\rm apo} - r_{\\rm per}}{r_{\\rm apo} + r_{\\rm per}}\n\n Parameters\n ----------\n **kw\n Any keyword arguments passed to ``apocenter()`` and\n ``pericenter()``. For example, ``approximate=True``.\n\n Returns\n -------\n ecc : float\n The orbital eccentricity.\n\n " ]
Please provide a description of the function:def circulation(self): L = self.angular_momentum() # if only 2D, add another empty axis if L.ndim == 2: single_orbit = True L = L[...,None] else: single_orbit = False ndim,ntimes,norbits = L.shape # initial angular momentum L0 = L[:,0] # see if at any timestep the sign has changed circ = np.ones((ndim,norbits)) for ii in range(ndim): cnd = (np.sign(L0[ii]) != np.sign(L[ii,1:])) | \ (np.abs(L[ii,1:]).value < 1E-13) ix = np.atleast_1d(np.any(cnd, axis=0)) circ[ii,ix] = 0 circ = circ.astype(int) if single_orbit: return circ.reshape((ndim,)) else: return circ
[ "\n Determine which axes the Orbit circulates around by checking\n whether there is a change of sign of the angular momentum\n about an axis. Returns a 2D array with ``ndim`` integers per orbit\n point. If a box orbit, all integers will be 0. A 1 indicates\n circulation about the corresponding axis.\n\n TODO: clockwise / counterclockwise?\n\n For example, for a single 3D orbit:\n\n - Box and boxlet = [0,0,0]\n - z-axis (short-axis) tube = [0,0,1]\n - x-axis (long-axis) tube = [1,0,0]\n\n Returns\n -------\n circulation : :class:`numpy.ndarray`\n An array that specifies whether there is circulation about any of\n the axes of the input orbit. For a single orbit, will return a\n 1D array, but for multiple orbits, the shape will be\n ``(3, norbits)``.\n\n " ]
Please provide a description of the function:def align_circulation_with_z(self, circulation=None): if circulation is None: circulation = self.circulation() circulation = atleast_2d(circulation, insert_axis=1) cart = self.cartesian pos = cart.xyz vel = np.vstack((cart.v_x.value[None], cart.v_y.value[None], cart.v_z.value[None])) * cart.v_x.unit if pos.ndim < 3: pos = pos[...,np.newaxis] vel = vel[...,np.newaxis] if (circulation.shape[0] != self.ndim or circulation.shape[1] != pos.shape[2]): raise ValueError("Shape of 'circulation' array should match the " "shape of the position/velocity (minus the time " "axis).") new_pos = pos.copy() new_vel = vel.copy() for n in range(pos.shape[2]): if circulation[2,n] == 1 or np.all(circulation[:,n] == 0): # already circulating about z or box orbit continue if sum(circulation[:,n]) > 1: logger.warning("Circulation about multiple axes - are you sure " "the orbit has been integrated for long enough?") if circulation[0,n] == 1: circ = 0 elif circulation[1,n] == 1: circ = 1 else: raise RuntimeError("Should never get here...") new_pos[circ,:,n] = pos[2,:,n] new_pos[2,:,n] = pos[circ,:,n] new_vel[circ,:,n] = vel[2,:,n] new_vel[2,:,n] = vel[circ,:,n] return self.__class__(pos=new_pos.reshape(cart.xyz.shape), vel=new_vel.reshape(cart.xyz.shape), t=self.t, hamiltonian=self.hamiltonian)
[ "\n If the input orbit is a tube orbit, this function aligns the circulation\n axis with the z axis and returns a copy.\n\n Parameters\n ----------\n circulation : array_like (optional)\n Array of bits that specify the axis about which the orbit\n circulates. If not provided, will compute this using\n :meth:`~gala.dynamics.Orbit.circulation`. See that method for more\n information.\n\n Returns\n -------\n orb : :class:`~gala.dynamics.Orbit`\n A copy of the original orbit object with circulation aligned with\n the z axis.\n " ]
Please provide a description of the function:def to_frame(self, frame, current_frame=None, **kwargs): kw = kwargs.copy() # TODO: need a better way to do this! from ..potential.frame.builtin import ConstantRotatingFrame for fr in [frame, current_frame, self.frame]: if isinstance(fr, ConstantRotatingFrame): if 't' not in kw: kw['t'] = self.t psp = super(Orbit, self).to_frame(frame, current_frame, **kw) return Orbit(pos=psp.pos, vel=psp.vel, t=self.t, frame=frame, potential=self.potential)
[ "\n TODO:\n\n Parameters\n ----------\n frame : `gala.potential.CFrameBase`\n The frame to transform to.\n current_frame : `gala.potential.CFrameBase` (optional)\n If the Orbit has no associated Hamiltonian, this specifies the\n current frame of the orbit.\n\n Returns\n -------\n orbit : `gala.dynamics.Orbit`\n The orbit in the new reference frame.\n\n " ]
Please provide a description of the function:def greatcircle_to_greatcircle(from_greatcircle_coord, to_greatcircle_frame): # This transform goes through the parent frames on each side. # from_frame -> from_frame.origin -> to_frame.origin -> to_frame intermediate_from = from_greatcircle_coord.transform_to( from_greatcircle_coord.pole) intermediate_to = intermediate_from.transform_to( to_greatcircle_frame.pole) return intermediate_to.transform_to(to_greatcircle_frame)
[ "Transform between two greatcircle frames." ]
Please provide a description of the function:def reference_to_greatcircle(reference_frame, greatcircle_frame): # Define rotation matrices along the position angle vector, and # relative to the origin. pole = greatcircle_frame.pole.transform_to(coord.ICRS) ra0 = greatcircle_frame.ra0 center = greatcircle_frame.center R_rot = rotation_matrix(greatcircle_frame.rotation, 'z') if not np.isnan(ra0): xaxis = np.array([np.cos(ra0), np.sin(ra0), 0.]) zaxis = pole.cartesian.xyz.value if np.abs(zaxis[2]) >= 1e-15: xaxis[2] = -(zaxis[0]*xaxis[0] + zaxis[1]*xaxis[1]) / zaxis[2] # what? else: xaxis[2] = 0. xaxis = xaxis / np.sqrt(np.sum(xaxis**2)) yaxis = np.cross(zaxis, xaxis) R = np.stack((xaxis, yaxis, zaxis)) elif center is not None: R1 = rotation_matrix(pole.ra, 'z') R2 = rotation_matrix(90*u.deg - pole.dec, 'y') Rtmp = matrix_product(R2, R1) rot = center.cartesian.transform(Rtmp) rot_lon = rot.represent_as(coord.UnitSphericalRepresentation).lon R3 = rotation_matrix(rot_lon, 'z') R = matrix_product(R3, R2, R1) else: R1 = rotation_matrix(pole.ra, 'z') R2 = rotation_matrix(pole.dec, 'y') R = matrix_product(R2, R1) return matrix_product(R_rot, R)
[ "Convert a reference coordinate to a great circle frame." ]
Please provide a description of the function:def pole_from_endpoints(coord1, coord2): c1 = coord1.cartesian / coord1.cartesian.norm() coord2 = coord2.transform_to(coord1.frame) c2 = coord2.cartesian / coord2.cartesian.norm() pole = c1.cross(c2) pole = pole / pole.norm() return coord1.frame.realize_frame(pole)
[ "Compute the pole from a great circle that connects the two specified\n coordinates.\n\n This assumes a right-handed rule from coord1 to coord2: the pole is the\n north pole under that assumption.\n\n Parameters\n ----------\n coord1 : `~astropy.coordinates.SkyCoord`\n Coordinate of one point on a great circle.\n coord2 : `~astropy.coordinates.SkyCoord`\n Coordinate of the other point on a great circle.\n\n Returns\n -------\n pole : `~astropy.coordinates.SkyCoord`\n The coordinates of the pole.\n " ]
Please provide a description of the function:def sph_midpoint(coord1, coord2): c1 = coord1.cartesian / coord1.cartesian.norm() coord2 = coord2.transform_to(coord1.frame) c2 = coord2.cartesian / coord2.cartesian.norm() midpt = 0.5 * (c1 + c2) usph = midpt.represent_as(coord.UnitSphericalRepresentation) return coord1.frame.realize_frame(usph)
[ "Compute the midpoint between two points on the sphere.\n\n Parameters\n ----------\n coord1 : `~astropy.coordinates.SkyCoord`\n Coordinate of one point on a great circle.\n coord2 : `~astropy.coordinates.SkyCoord`\n Coordinate of the other point on a great circle.\n\n Returns\n -------\n midpt : `~astropy.coordinates.SkyCoord`\n The coordinates of the spherical midpoint.\n " ]
Please provide a description of the function:def get_uv_tan(c): l = c.spherical.lon b = c.spherical.lat p = np.array([-np.sin(l), np.cos(l), np.zeros_like(l.value)]).T q = np.array([-np.cos(l)*np.sin(b), -np.sin(l)*np.sin(b), np.cos(b)]).T return np.stack((p, q), axis=-1)
[ "Get tangent plane basis vectors on the unit sphere at the given\n spherical coordinates.\n " ]
Please provide a description of the function:def get_transform_matrix(from_frame, to_frame): path, distance = coord.frame_transform_graph.find_shortest_path( from_frame, to_frame) matrices = [] currsys = from_frame for p in path[1:]: # first element is fromsys so we skip it trans = coord.frame_transform_graph._graph[currsys][p] if isinstance(trans, coord.DynamicMatrixTransform): M = trans.matrix_func(currsys(), p) elif isinstance(trans, coord.StaticMatrixTransform): M = trans.matrix else: raise ValueError("Transform path contains a '{0}': cannot " "be composed into a single transformation " "matrix.".format(trans.__class__.__name__)) matrices.append(M) currsys = p M = None for Mi in reversed(matrices): if M is None: M = Mi else: M = matrix_product(M, Mi) return M
[ "Compose sequential matrix transformations (static or dynamic) to get a\n single transformation matrix from a given path through the Astropy\n transformation machinery.\n\n Parameters\n ----------\n from_frame : `~astropy.coordinates.BaseCoordinateFrame` subclass\n The *class* of the frame you're transforming from.\n to_frame : `~astropy.coordinates.BaseCoordinateFrame` subclass\n The *class* of the frame you're transfrorming to.\n " ]
Please provide a description of the function:def transform_pm_cov(c, cov, to_frame): if c.isscalar and cov.shape != (2, 2): raise ValueError('If input coordinate object is a scalar coordinate, ' 'the proper motion covariance matrix must have shape ' '(2, 2), not {}'.format(cov.shape)) elif not c.isscalar and len(c) != cov.shape[0]: raise ValueError('Input coordinates and covariance matrix must have ' 'the same number of entries ({} vs {}).' .format(len(c), cov.shape[0])) # 3D rotation matrix, to be projected onto the tangent plane if hasattr(c, 'frame'): frame = c.frame else: frame = c R = get_transform_matrix(frame.__class__, to_frame) # Get input coordinates in the desired frame: c_to = c.transform_to(to_frame) # Get tangent plane coordinates: uv_in = get_uv_tan(c) uv_to = get_uv_tan(c_to) if not c.isscalar: G = np.einsum('nab,nac->nbc', uv_to, np.einsum('ji,nik->njk', R, uv_in)) # transform cov_to = np.einsum('nba,nac->nbc', G, np.einsum('nij,nki->njk', cov, G)) else: G = np.einsum('ab,ac->bc', uv_to, np.einsum('ji,ik->jk', R, uv_in)) # transform cov_to = np.einsum('ba,ac->bc', G, np.einsum('ij,ki->jk', cov, G)) return cov_to
[ "Transform a proper motion covariance matrix to a new frame.\n\n Parameters\n ----------\n c : `~astropy.coordinates.SkyCoord`\n The sky coordinates of the sources in the initial coordinate frame.\n cov : array_like\n The covariance matrix of the proper motions. Must have same length as\n the input coordinates.\n to_frame : `~astropy.coordinates.BaseCoordinateFrame` subclass\n The frame to transform to as an Astropy coordinate frame class or\n instance.\n\n Returns\n -------\n new_cov : array_like\n The transformed covariance matrix.\n\n " ]
Please provide a description of the function:def rodrigues_axis_angle_rotate(x, vec, theta): x = np.array(x).T vec = np.array(vec).T theta = np.array(theta).T[...,None] out = np.cos(theta)*x + np.sin(theta)*np.cross(vec, x) + \ (1 - np.cos(theta)) * (vec * x).sum(axis=-1)[...,None] * vec return out.T
[ "\n Rotated the input vector or set of vectors `x` around the axis\n `vec` by the angle `theta`.\n\n Parameters\n ----------\n x : array_like\n The vector or array of vectors to transform. Must have shape\n\n\n " ]
Please provide a description of the function:def z_angle_rotate(xy, theta): xy = np.array(xy).T theta = np.array(theta).T out = np.zeros_like(xy) out[...,0] = np.cos(theta)*xy[...,0] - np.sin(theta)*xy[...,1] out[...,1] = np.sin(theta)*xy[...,0] + np.cos(theta)*xy[...,1] return out.T
[ "\n Rotated the input vector or set of vectors `xy` by the angle `theta`.\n\n Parameters\n ----------\n xy : array_like\n The vector or array of vectors to transform. Must have shape\n\n\n " ]
Please provide a description of the function:def static_to_constantrotating(frame_i, frame_r, w, t=None): return _constantrotating_static_helper(frame_r=frame_r, frame_i=frame_i, w=w, t=t, sign=1.)
[ "\n Transform from an inertial static frame to a rotating frame.\n\n Parameters\n ----------\n frame_i : `~gala.potential.StaticFrame`\n frame_r : `~gala.potential.ConstantRotatingFrame`\n w : `~gala.dynamics.PhaseSpacePosition`, `~gala.dynamics.Orbit`\n t : quantity_like (optional)\n Required if input coordinates are just a phase-space position.\n\n Returns\n -------\n pos : `~astropy.units.Quantity`\n Position in rotating frame.\n vel : `~astropy.units.Quantity`\n Velocity in rotating frame.\n " ]
Please provide a description of the function:def constantrotating_to_static(frame_r, frame_i, w, t=None): return _constantrotating_static_helper(frame_r=frame_r, frame_i=frame_i, w=w, t=t, sign=-1.)
[ "\n Transform from a constantly rotating frame to a static, inertial frame.\n\n Parameters\n ----------\n frame_i : `~gala.potential.StaticFrame`\n frame_r : `~gala.potential.ConstantRotatingFrame`\n w : `~gala.dynamics.PhaseSpacePosition`, `~gala.dynamics.Orbit`\n t : quantity_like (optional)\n Required if input coordinates are just a phase-space position.\n\n Returns\n -------\n pos : `~astropy.units.Quantity`\n Position in static, inertial frame.\n vel : `~astropy.units.Quantity`\n Velocity in static, inertial frame.\n " ]
Please provide a description of the function:def from_dict(d, module=None): # need this here for circular import from .. import potential as gala_potential if module is None: potential = gala_potential else: potential = module if 'type' in d and d['type'] == 'composite': p = getattr(potential, d['class'])() for i, component in enumerate(d['components']): c = _parse_component(component, module) name = component.get('name', str(i)) p[name] = c elif 'type' in d and d['type'] == 'custom': param_groups = dict() for i, component in enumerate(d['components']): c = _parse_component(component, module) try: name = component['name'] except KeyError: raise KeyError("For custom potentials, component specification " "must include the component name (e.g., name: " "'blah')") params = component.get('parameters', {}) params = _unpack_params(params) # unpack quantities param_groups[name] = params p = getattr(potential, d['class'])(**param_groups) else: p = _parse_component(d, module) return p
[ "\n Convert a dictionary potential specification into a\n :class:`~gala.potential.PotentialBase` subclass object.\n\n Parameters\n ----------\n d : dict\n Dictionary specification of a potential.\n module : namespace (optional)\n\n " ]
Please provide a description of the function:def to_dict(potential): from .. import potential as gp if isinstance(potential, gp.CompositePotential): d = dict() d['class'] = potential.__class__.__name__ d['components'] = [] for k, p in potential.items(): comp_dict = _to_dict_help(p) comp_dict['name'] = k d['components'].append(comp_dict) if potential.__class__.__name__ == 'CompositePotential' or \ potential.__class__.__name__ == 'CCompositePotential': d['type'] = 'composite' else: d['type'] = 'custom' else: d = _to_dict_help(potential) return d
[ "\n Turn a potential object into a dictionary that fully specifies the\n state of the object.\n\n Parameters\n ----------\n potential : :class:`~gala.potential.PotentialBase`\n The instantiated :class:`~gala.potential.PotentialBase` object.\n\n " ]
Please provide a description of the function:def load(f, module=None): if hasattr(f, 'read'): p_dict = yaml.load(f.read()) else: with open(os.path.abspath(f), 'r') as fil: p_dict = yaml.load(fil.read()) return from_dict(p_dict, module=module)
[ "\n Read a potential specification file and return a\n :class:`~gala.potential.PotentialBase` object instantiated with parameters\n specified in the spec file.\n\n Parameters\n ----------\n f : str, file_like\n A block of text, filename, or file-like object to parse and read\n a potential from.\n module : namespace (optional)\n\n " ]
Please provide a description of the function:def save(potential, f): d = to_dict(potential) if hasattr(f, 'write'): yaml.dump(d, f, default_flow_style=False) else: with open(f, 'w') as f2: yaml.dump(d, f2, default_flow_style=False)
[ "\n Write a :class:`~gala.potential.PotentialBase` object out to a text (YAML)\n file.\n\n Parameters\n ----------\n potential : :class:`~gala.potential.PotentialBase`\n The instantiated :class:`~gala.potential.PotentialBase` object.\n f : str, file_like\n A filename or file-like object to write the input potential object to.\n\n " ]
Please provide a description of the function:def _prepare_ws(self, w0, mmap, n_steps): from ..dynamics import PhaseSpacePosition if not isinstance(w0, PhaseSpacePosition): w0 = PhaseSpacePosition.from_w(w0) arr_w0 = w0.w(self._func_units) self.ndim, self.norbits = arr_w0.shape self.ndim = self.ndim//2 return_shape = (2*self.ndim, n_steps+1, self.norbits) if mmap is None: # create the return arrays ws = np.zeros(return_shape, dtype=float) else: if mmap.shape != return_shape: raise ValueError("Shape of memory-mapped array doesn't match " "expected shape of return array ({} vs {})" .format(mmap.shape, return_shape)) if not mmap.flags.writeable: raise TypeError("Memory-mapped array must be a writable mode, " " not '{}'".format(mmap.mode)) ws = mmap return w0, arr_w0, ws
[ "\n Decide how to make the return array. If mmap is False, this returns a\n full array of zeros, but with the correct shape as the output. If mmap\n is True, return a pointer to a memory-mapped array. The latter is\n particularly useful for integrating a large number of orbits or\n integrating a large number of time steps.\n " ]
Please provide a description of the function:def fast_lyapunov_max(w0, hamiltonian, dt, n_steps, d0=1e-5, n_steps_per_pullback=10, noffset_orbits=2, t1=0., atol=1E-10, rtol=1E-10, nmax=0, return_orbit=True): from .lyapunov import dop853_lyapunov_max, dop853_lyapunov_max_dont_save # TODO: remove in v1.0 if isinstance(hamiltonian, PotentialBase): from ..potential import Hamiltonian hamiltonian = Hamiltonian(hamiltonian) if not hamiltonian.c_enabled: raise TypeError("Input Hamiltonian must contain a C-implemented " "potential and frame.") if not isinstance(w0, PhaseSpacePosition): w0 = np.asarray(w0) ndim = w0.shape[0]//2 w0 = PhaseSpacePosition(pos=w0[:ndim], vel=w0[ndim:]) _w0 = np.squeeze(w0.w(hamiltonian.units)) if _w0.ndim > 1: raise ValueError("Can only compute fast Lyapunov exponent for a single orbit.") if return_orbit: t,w,l = dop853_lyapunov_max(hamiltonian, _w0, dt, n_steps+1, t1, d0, n_steps_per_pullback, noffset_orbits, atol, rtol, nmax) w = np.rollaxis(w, -1) try: tunit = hamiltonian.units['time'] except (TypeError, AttributeError): tunit = u.dimensionless_unscaled orbit = Orbit.from_w(w=w, units=hamiltonian.units, t=t*tunit, hamiltonian=hamiltonian) return l/tunit, orbit else: l = dop853_lyapunov_max_dont_save(hamiltonian, _w0, dt, n_steps+1, t1, d0, n_steps_per_pullback, noffset_orbits, atol, rtol, nmax) try: tunit = hamiltonian.units['time'] except (TypeError, AttributeError): tunit = u.dimensionless_unscaled return l/tunit
[ "\n Compute the maximum Lyapunov exponent using a C-implemented estimator\n that uses the DOPRI853 integrator.\n\n Parameters\n ----------\n w0 : `~gala.dynamics.PhaseSpacePosition`, array_like\n Initial conditions.\n hamiltonian : `~gala.potential.Hamiltonian`\n dt : numeric\n Timestep.\n n_steps : int\n Number of steps to run for.\n d0 : numeric (optional)\n The initial separation.\n n_steps_per_pullback : int (optional)\n Number of steps to run before re-normalizing the offset vectors.\n noffset_orbits : int (optional)\n Number of offset orbits to run.\n t1 : numeric (optional)\n Time of initial conditions. Assumed to be t=0.\n return_orbit : bool (optional)\n Store the full orbit for the parent and all offset orbits.\n\n Returns\n -------\n LEs : :class:`~astropy.units.Quantity`\n Lyapunov exponents calculated from each offset / deviation orbit.\n orbit : `~gala.dynamics.Orbit` (optional)\n\n " ]
Please provide a description of the function:def surface_of_section(orbit, plane_ix, interpolate=False): w = orbit.w() if w.ndim == 2: w = w[...,None] ndim,ntimes,norbits = w.shape H_dim = ndim // 2 p_ix = plane_ix + H_dim if interpolate: raise NotImplementedError("Not yet implemented, sorry!") # record position on specified plane when orbit crosses all_sos = np.zeros((ndim,norbits), dtype=object) for n in range(norbits): cross_ix = argrelmin(w[plane_ix,:,n]**2)[0] cross_ix = cross_ix[w[p_ix,cross_ix,n] > 0.] sos = w[:,cross_ix,n] for j in range(ndim): all_sos[j,n] = sos[j,:] return all_sos
[ "\n Generate and return a surface of section from the given orbit.\n\n .. warning::\n\n This is an experimental function and the API may change.\n\n Parameters\n ----------\n orbit : `~gala.dynamics.Orbit`\n plane_ix : int\n Integer that represents the coordinate to record crossings in. For\n example, for a 2D Hamiltonian where you want to make a SoS in\n :math:`y-p_y`, you would specify ``plane_ix=0`` (crossing the\n :math:`x` axis), and this will only record crossings for which\n :math:`p_x>0`.\n interpolate : bool (optional)\n Whether or not to interpolate on to the plane of interest. This\n makes it much slower, but will work for orbits with a coarser\n sampling.\n\n Returns\n -------\n\n Examples\n --------\n If your orbit of interest is a tube orbit, it probably conserves (at\n least approximately) some equivalent to angular momentum in the direction\n of the circulation axis. Therefore, a surface of section in R-z should\n be instructive for classifying these orbits. TODO...show how to convert\n an orbit to Cylindrical..etc...\n\n " ]
Please provide a description of the function:def _remove_units(self, x): if hasattr(x, 'unit'): x = x.decompose(self.units).value else: x = np.array(x) return x
[ "\n Always returns an array. If a Quantity is passed in, it converts to the\n units associated with this object and returns the value.\n " ]
Please provide a description of the function:def energy(self, q, t=0.): q = self._remove_units_prepare_shape(q) orig_shape, q = self._get_c_valid_arr(q) t = self._validate_prepare_time(t, q) ret_unit = self.units['energy'] / self.units['mass'] return self._energy(q, t=t).T.reshape(orig_shape[1:]) * ret_unit
[ "\n Compute the potential energy at the given position(s).\n\n Parameters\n ----------\n q : `~gala.dynamics.PhaseSpacePosition`, `~astropy.units.Quantity`, array_like\n The position to compute the value of the potential. If the\n input position object has no units (i.e. is an `~numpy.ndarray`),\n it is assumed to be in the same unit system as the potential.\n\n Returns\n -------\n E : `~astropy.units.Quantity`\n The potential energy per unit mass or value of the potential.\n " ]
Please provide a description of the function:def gradient(self, q, t=0.): q = self._remove_units_prepare_shape(q) orig_shape, q = self._get_c_valid_arr(q) t = self._validate_prepare_time(t, q) ret_unit = self.units['length'] / self.units['time']**2 return (self._gradient(q, t=t).T.reshape(orig_shape) * ret_unit).to(self.units['acceleration'])
[ "\n Compute the gradient of the potential at the given position(s).\n\n Parameters\n ----------\n q : `~gala.dynamics.PhaseSpacePosition`, `~astropy.units.Quantity`, array_like\n The position to compute the value of the potential. If the\n input position object has no units (i.e. is an `~numpy.ndarray`),\n it is assumed to be in the same unit system as the potential.\n\n Returns\n -------\n grad : `~astropy.units.Quantity`\n The gradient of the potential. Will have the same shape as\n the input position.\n " ]
Please provide a description of the function:def density(self, q, t=0.): q = self._remove_units_prepare_shape(q) orig_shape, q = self._get_c_valid_arr(q) t = self._validate_prepare_time(t, q) ret_unit = self.units['mass'] / self.units['length']**3 return (self._density(q, t=t).T * ret_unit).to(self.units['mass density'])
[ "\n Compute the density value at the given position(s).\n\n Parameters\n ----------\n q : `~gala.dynamics.PhaseSpacePosition`, `~astropy.units.Quantity`, array_like\n The position to compute the value of the potential. If the\n input position object has no units (i.e. is an `~numpy.ndarray`),\n it is assumed to be in the same unit system as the potential.\n\n Returns\n -------\n dens : `~astropy.units.Quantity`\n The potential energy or value of the potential. If the input\n position has shape ``q.shape``, the output energy will have\n shape ``q.shape[1:]``.\n " ]
Please provide a description of the function:def hessian(self, q, t=0.): if (self.R is not None and not np.allclose(np.diag(self.R), 1., atol=1e-15, rtol=0)): raise NotImplementedError("Computing Hessian matrices for rotated " "potentials is currently not supported.") q = self._remove_units_prepare_shape(q) orig_shape,q = self._get_c_valid_arr(q) t = self._validate_prepare_time(t, q) ret_unit = 1 / self.units['time']**2 hess = np.moveaxis(self._hessian(q, t=t), 0, -1) return hess.reshape((orig_shape[0], orig_shape[0]) + orig_shape[1:]) * ret_unit
[ "\n Compute the Hessian of the potential at the given position(s).\n\n Parameters\n ----------\n q : `~gala.dynamics.PhaseSpacePosition`, `~astropy.units.Quantity`, array_like\n The position to compute the value of the potential. If the\n input position object has no units (i.e. is an `~numpy.ndarray`),\n it is assumed to be in the same unit system as the potential.\n\n Returns\n -------\n hess : `~astropy.units.Quantity`\n The Hessian matrix of second derivatives of the potential. If the input\n position has shape ``q.shape``, the output energy will have shape\n ``(q.shape[0],q.shape[0]) + q.shape[1:]``. That is, an ``n_dim`` by\n ``n_dim`` array (matrix) for each position.\n " ]
Please provide a description of the function:def mass_enclosed(self, q, t=0.): q = self._remove_units_prepare_shape(q) orig_shape, q = self._get_c_valid_arr(q) t = self._validate_prepare_time(t, q) # small step-size in direction of q h = 1E-3 # MAGIC NUMBER # Radius r = np.sqrt(np.sum(q**2, axis=1)) epsilon = h*q/r[:, np.newaxis] dPhi_dr_plus = self._energy(q + epsilon, t=t) dPhi_dr_minus = self._energy(q - epsilon, t=t) diff = (dPhi_dr_plus - dPhi_dr_minus) if isinstance(self.units, DimensionlessUnitSystem): Gee = 1. else: Gee = G.decompose(self.units).value Menc = np.abs(r*r * diff / Gee / (2.*h)) Menc = Menc.reshape(orig_shape[1:]) sgn = 1. if 'm' in self.parameters and self.parameters['m'] < 0: sgn = -1. return sgn * Menc * self.units['mass']
[ "\n Estimate the mass enclosed within the given position by assuming the potential\n is spherical.\n\n Parameters\n ----------\n q : `~gala.dynamics.PhaseSpacePosition`, `~astropy.units.Quantity`, array_like\n Position(s) to estimate the enclossed mass.\n\n Returns\n -------\n menc : `~astropy.units.Quantity`\n Mass enclosed at the given position(s). If the input position\n has shape ``q.shape``, the output energy will have shape\n ``q.shape[1:]``.\n " ]
Please provide a description of the function:def circular_velocity(self, q, t=0.): q = self._remove_units_prepare_shape(q) # Radius r = np.sqrt(np.sum(q**2, axis=0)) * self.units['length'] dPhi_dxyz = self.gradient(q, t=t) dPhi_dr = np.sum(dPhi_dxyz * q/r.value, axis=0) return self.units.decompose(np.sqrt(r * np.abs(dPhi_dr)))
[ "\n Estimate the circular velocity at the given position assuming the\n potential is spherical.\n\n Parameters\n ----------\n q : array_like, numeric\n Position(s) to estimate the circular velocity.\n\n Returns\n -------\n vcirc : `~astropy.units.Quantity`\n Circular velocity at the given position(s). If the input position\n has shape ``q.shape``, the output energy will have shape\n ``q.shape[1:]``.\n\n " ]
Please provide a description of the function:def plot_contours(self, grid, filled=True, ax=None, labels=None, subplots_kw=dict(), **kwargs): import matplotlib.pyplot as plt from matplotlib import cm # figure out which elements are iterable, which are numeric _grids = [] _slices = [] for ii, g in enumerate(grid): if isiterable(g): _grids.append((ii, g)) else: _slices.append((ii, g)) # figure out the dimensionality ndim = len(_grids) # if ndim > 2, don't know how to handle this! if ndim > 2: raise ValueError("ndim > 2: you can only make contours on a 2D grid. For other " "dimensions, you have to specify values to slice.") if ax is None: # default figsize fig, ax = plt.subplots(1, 1, **subplots_kw) else: fig = ax.figure if ndim == 1: # 1D curve x1 = _grids[0][1] r = np.zeros((len(_grids) + len(_slices), len(x1))) r[_grids[0][0]] = x1 for ii, slc in _slices: r[ii] = slc Z = self.energy(r*self.units['length']).value ax.plot(x1, Z, **kwargs) if labels is not None: ax.set_xlabel(labels[0]) ax.set_ylabel("potential") else: # 2D contours x1, x2 = np.meshgrid(_grids[0][1], _grids[1][1]) shp = x1.shape x1, x2 = x1.ravel(), x2.ravel() r = np.zeros((len(_grids) + len(_slices), len(x1))) r[_grids[0][0]] = x1 r[_grids[1][0]] = x2 for ii, slc in _slices: r[ii] = slc Z = self.energy(r*self.units['length']).value # make default colormap not suck cmap = kwargs.pop('cmap', cm.Blues) if filled: cs = ax.contourf(x1.reshape(shp), x2.reshape(shp), Z.reshape(shp), cmap=cmap, **kwargs) else: cs = ax.contour(x1.reshape(shp), x2.reshape(shp), Z.reshape(shp), cmap=cmap, **kwargs) if labels is not None: ax.set_xlabel(labels[0]) ax.set_ylabel(labels[1]) return fig
[ "\n Plot equipotentials contours. Computes the potential energy on a grid\n (specified by the array `grid`).\n\n .. warning:: Right now the grid input must be arrays and must already\n be in the unit system of the potential. Quantity support is coming...\n\n Parameters\n ----------\n grid : tuple\n Coordinate grids or slice value for each dimension. Should be a\n tuple of 1D arrays or numbers.\n filled : bool (optional)\n Use :func:`~matplotlib.pyplot.contourf` instead of\n :func:`~matplotlib.pyplot.contour`. Default is ``True``.\n ax : matplotlib.Axes (optional)\n labels : iterable (optional)\n List of axis labels.\n subplots_kw : dict\n kwargs passed to matplotlib's subplots() function if an axes object\n is not specified.\n kwargs : dict\n kwargs passed to either contourf() or plot().\n\n Returns\n -------\n fig : `~matplotlib.Figure`\n\n " ]
Please provide a description of the function:def total_energy(self, x, v): warnings.warn("Use the energy methods on Orbit objects instead. In a future " "release this will be removed.", DeprecationWarning) v = atleast_2d(v, insert_axis=1) return self.energy(x) + 0.5*np.sum(v**2, axis=0)
[ "\n Compute the total energy (per unit mass) of a point in phase-space\n in this potential. Assumes the last axis of the input position /\n velocity is the dimension axis, e.g., for 100 points in 3-space,\n the arrays should have shape (100,3).\n\n Parameters\n ----------\n x : array_like, numeric\n Position.\n v : array_like, numeric\n Velocity.\n " ]
Please provide a description of the function:def replace_units(self, units, copy=True): if copy: pot = pycopy.deepcopy(self) else: pot = self PotentialBase.__init__(pot, parameters=self.parameters, origin=self.origin, R=self.R, ndim=self.ndim, units=units) return pot
[ "Change the unit system of this potential.\n\n Parameters\n ----------\n units : `~gala.units.UnitSystem`\n Set of non-reducable units that specify (at minimum) the\n length, mass, time, and angle units.\n copy : bool (optional)\n If True, returns a copy, if False, changes this object.\n " ]
Please provide a description of the function:def replace_units(self, units, copy=True): _lock = self.lock if copy: pots = self.__class__() else: pots = self pots._units = None pots.lock = False for k, v in self.items(): pots[k] = v.replace_units(units, copy=copy) pots.lock = _lock return pots
[ "Change the unit system of this potential.\n\n Parameters\n ----------\n units : `~gala.units.UnitSystem`\n Set of non-reducable units that specify (at minimum) the\n length, mass, time, and angle units.\n copy : bool (optional)\n If True, returns a copy, if False, changes this object.\n " ]
Please provide a description of the function:def from_equation(expr, vars, pars, name=None, hessian=False): r try: import sympy from sympy.utilities.lambdify import lambdify except ImportError: raise ImportError("sympy is required to use 'from_equation()' " "potential class creation.") # convert all input to Sympy objects expr = sympy.sympify(expr) vars = [sympy.sympify(v) for v in vars] var_names = [v.name for v in vars] pars = [sympy.sympify(p) for p in pars] par_names = [p.name for p in pars] ndim = len(vars) # Energy / value energyfunc = lambdify(vars + pars, expr, dummify=False, modules='numpy') # Gradient gradfuncs = [] for var in vars: gradfuncs.append(lambdify(vars + pars, sympy.diff(expr,var), dummify=False, modules='numpy')) class CustomPotential(PotentialBase): def __init__(self, units=None, **kwargs): for par in par_names: if par not in kwargs: raise ValueError("You must specify a value for " "parameter '{}'.".format(par)) super(CustomPotential,self).__init__(units=units, parameters=kwargs, ndim=ndim) def _energy(self, w, t=0.): kw = self.parameters.copy() for k,v in kw.items(): kw[k] = v.value for i,name in enumerate(var_names): kw[name] = w[:,i] return np.array(energyfunc(**kw)) def _gradient(self, w, t=0.): kw = self.parameters.copy() for k,v in kw.items(): kw[k] = v.value for i,name in enumerate(var_names): kw[name] = w[:,i] grad = np.vstack([f(**kw)[np.newaxis] for f in gradfuncs]) return grad.T if name is not None: # name = _classnamify(name) if "potential" not in name.lower(): name = name + "Potential" CustomPotential.__name__ = str(name) # Hessian if hessian: hessfuncs = [] for var1 in vars: for var2 in vars: hessfuncs.append(lambdify(vars + pars, sympy.diff(expr,var1,var2), dummify=False, modules='numpy')) def _hessian(self, w, t): kw = self.parameters.copy() for k,v in kw.items(): kw[k] = v.value for i,name in enumerate(var_names): kw[name] = w[:,i] # expand = [np.newaxis] * w[i].ndim # This ain't pretty, bub arrs = [] for f in hessfuncs: hess_arr = np.array(f(**kw)) if hess_arr.shape != w[:,i].shape: hess_arr = np.tile(hess_arr, reps=w[:,i].shape) arrs.append(hess_arr) hess = np.vstack(arrs) return hess.reshape((ndim,ndim,len(w[:,i]))) CustomPotential._hessian = _hessian CustomPotential.save = None return CustomPotential
[ "\n Create a potential class from an expression for the potential.\n\n .. note::\n\n This utility requires having `Sympy <http://www.sympy.org/>`_ installed.\n\n .. warning::\n\n These potentials are *not* pickle-able and cannot be written\n out to YAML files (using `~gala.potential.PotentialBase.save()`)\n\n Parameters\n ----------\n expr : :class:`sympy.core.expr.Expr`, str\n Either a ``Sympy`` expression, or a string that can be converted to\n a ``Sympy`` expression.\n vars : iterable\n An iterable of variable names in the expression.\n pars : iterable\n An iterable of parameter names in the expression.\n name : str (optional)\n The name of the potential class returned.\n hessian : bool (optional)\n Generate a function to compute the Hessian.\n\n Returns\n -------\n CustomPotential : `~gala.potential.PotentialBase`\n A potential class that represents the input equation. To instantiate the\n potential, use just like a normal class with parameters.\n\n Examples\n --------\n Here we'll create a potential class for the harmonic oscillator\n potential, :math:`\\Phi(x) = \\frac{1}{2}\\,k\\,x^2`::\n\n >>> Potential = from_equation(\"1/2*k*x**2\", vars=\"x\", pars=\"k\",\n ... name='HarmonicOscillator')\n >>> p1 = Potential(k=1.)\n >>> p1\n <HarmonicOscillatorPotential: k=1.00 (dimensionless)>\n\n The potential class (and object) is a fully-fledged subclass of\n `~gala.potential.PotentialBase` and therefore has many useful methods.\n For example, to integrate an orbit::\n\n >>> orbit = p1.integrate_orbit([1.,0], dt=0.01, n_steps=1000)\n\n " ]
Please provide a description of the function:def format_doc(*args, **kwargs): def set_docstring(obj): # None means: use the objects __doc__ doc = obj.__doc__ # Delete documentation in this case so we don't end up with # awkwardly self-inserted docs. obj.__doc__ = None # If the original has a not-empty docstring append it to the format # kwargs. kwargs['__doc__'] = obj.__doc__ or '' obj.__doc__ = doc.format(*args, **kwargs) return obj return set_docstring
[ "\n Replaces the docstring of the decorated object and then formats it.\n\n Modeled after astropy.utils.decorators.format_doc\n " ]
Please provide a description of the function:def quantity_from_hdf5(dset): if 'unit' in dset.attrs and dset.attrs['unit'] is not None: unit = u.Unit(dset.attrs['unit']) else: unit = 1. return dset[:] * unit
[ "\n Return an Astropy Quantity object from a key in an HDF5 file,\n group, or dataset. This checks to see if the input file/group/dataset\n contains a ``'unit'`` attribute (e.g., in `f.attrs`).\n\n Parameters\n ----------\n dset : :class:`h5py.DataSet`\n\n Returns\n -------\n q : `astropy.units.Quantity`, `numpy.ndarray`\n If a unit attribute exists, this returns a Quantity. Otherwise, it\n returns a numpy array.\n " ]
Please provide a description of the function:def quantity_to_hdf5(f, key, q): if hasattr(q, 'unit'): f[key] = q.value f[key].attrs['unit'] = str(q.unit) else: f[key] = q f[key].attrs['unit'] = ""
[ "\n Turn an Astropy Quantity object into something we can write out to\n an HDF5 file.\n\n Parameters\n ----------\n f : :class:`h5py.File`, :class:`h5py.Group`, :class:`h5py.DataSet`\n key : str\n The name.\n q : float, `astropy.units.Quantity`\n The quantity.\n\n " ]
Please provide a description of the function:def decompose(self, q): try: ptype = q.unit.physical_type except AttributeError: raise TypeError("Object must be an astropy.units.Quantity, not " "a '{}'.".format(q.__class__.__name__)) if ptype in self._registry: return q.to(self._registry[ptype]) else: return q.decompose(self)
[ "\n A thin wrapper around :meth:`astropy.units.Quantity.decompose` that\n knows how to handle Quantities with physical types with non-default\n representations.\n\n Parameters\n ----------\n q : :class:`~astropy.units.Quantity`\n An instance of an astropy Quantity object.\n\n Returns\n -------\n q : :class:`~astropy.units.Quantity`\n A new quantity, decomposed to represented in this unit system.\n " ]
Please provide a description of the function:def get_constant(self, name): try: c = getattr(const, name) except AttributeError: raise ValueError("Constant name '{}' doesn't exist in astropy.constants".format(name)) return c.decompose(self._core_units).value
[ "\n Retrieve a constant with specified name in this unit system.\n\n Parameters\n ----------\n name : str\n The name of the constant, e.g., G.\n\n Returns\n -------\n const : float\n The value of the constant represented in this unit system.\n\n Examples\n --------\n\n >>> usys = UnitSystem(u.kpc, u.Myr, u.radian, u.Msun)\n >>> usys.get_constant('c')\n 306.6013937879527\n\n " ]
Please provide a description of the function:def rolling_window(arr, window_size, stride=1, return_idx=False): window_size = int(window_size) stride = int(stride) if window_size < 0 or stride < 1: raise ValueError arr_len = len(arr) if arr_len < window_size: if return_idx: yield (0,arr_len),arr else: yield arr ix1 = 0 while ix1 < arr_len: ix2 = ix1 + window_size result = arr[ix1:ix2] if return_idx: yield (ix1,ix2),result else: yield result if len(result) < window_size or ix2 >= arr_len: break ix1 += stride
[ "\n There is an example of an iterator for pure-Python objects in:\n http://stackoverflow.com/questions/6822725/rolling-or-sliding-window-iterator-in-python\n This is a rolling-window iterator Numpy arrays, with window size and\n stride control. See examples below for demos.\n\n Parameters\n ----------\n arr : array_like\n Input numpy array.\n window_size : int\n Width of the window.\n stride : int (optional)\n Number of indices to advance the window each iteration step.\n return_idx : bool (optional)\n Whether to return the slice indices alone with the array segment.\n\n Examples\n --------\n >>> a = np.array([1,2,3,4,5,6])\n >>> for x in rolling_window(a, 3):\n ... print(x)\n [1 2 3]\n [2 3 4]\n [3 4 5]\n [4 5 6]\n >>> for x in rolling_window(a, 2, stride=2):\n ... print(x)\n [1 2]\n [3 4]\n [5 6]\n >>> for (i1,i2),x in rolling_window(a, 2, stride=2, return_idx=True): # doctest: +SKIP\n ... print(i1, i2, x)\n (0, 2, array([1, 2]))\n (2, 4, array([3, 4]))\n (4, 6, array([5, 6]))\n\n " ]