docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Sets time in seconds since Epoch Args: time (:obj:`float`): time in seconds since Epoch (see time.time()) Returns: None
def set(self, time): self._time = time self._pb.sec = int(self._time) self._pb.nsec = int((self._time - self._pb.sec) * 10 ** 9)
875,238
Deregisters a local check Parameters: check (ObjectID): Check ID Returns: bool: ``True`` on success The agent will take care of deregistering the check from the Catalog.
async def deregister(self, check): check_id = extract_attr(check, keys=["CheckID", "ID"]) response = await self._api.get("/v1/agent/check/deregister", check_id) return response.status == 200
875,272
Fetches existing prepared query Parameters: query (ObjectID): Query ID dc (str): Specify datacenter that will be used. Defaults to the agent's local datacenter. watch (Blocking): Do a blocking query consistency (Consistency): Force consistency Returns: Object: Query definition Raises: NotFound: Query does not exist
async def read(self, query, *, dc=None, watch=None, consistency=None): query_id = extract_attr(query, keys=["ID"]) response = await self._api.get("/v1/query", query_id, params={ "dc": dc}, watch=watch, consistency=consistency) result = response.body[0] return result
875,421
Updates existing prepared query Parameters: Query (Object): Query definition dc (str): Specify datacenter that will be used. Defaults to the agent's local datacenter. Returns: bool: ``True`` on success
async def update(self, query, *, dc=None): query_id = extract_attr(query, keys=["ID"]) response = await self._api.put("/v1/query", query_id, params={"dc": dc}, data=query) return response.status == 200
875,422
Delete existing prepared query Parameters: query (ObjectID): Query ID dc (str): Specify datacenter that will be used. Defaults to the agent's local datacenter. Results: bool: ``True`` on success
async def delete(self, query, *, dc=None): query_id = extract_attr(query, keys=["ID"]) response = await self._api.delete("/v1/query", query_id, params={"dc": dc}) return response.status == 200
875,423
Fetches existing prepared query Parameters: query (ObjectID): Query ID dc (str): Specify datacenter that will be used. Defaults to the agent's local datacenter. watch (Blocking): Do a blocking query consistency (Consistency): Force consistency Returns: Object: the query Raises: NotFound: the query does not exist
async def explain(self, query, *, dc=None, consistency=None): query_id = extract_attr(query, keys=["ID"]) path = "/v1/query/%s/explain" % query_id response = await self._api.get(path, consistency=consistency, params={ "dc": dc}) result = response.body return result
875,425
Add multiple command line flags Arguments: flags (:obj:`list` of :obj:`tuple`): List of flags in tuples (name, flag_type, description, (optional) default) Raises: TypeError: Provided wrong arguments or arguments of wrong types, method will raise TypeError
def add_multiple(self, flags): if not isinstance(flags, list): raise TypeError("Expected list of flags, got object of type{}".format(type(flags))) for flag in flags: if isinstance(flag, Flag): self.add_item(flag) elif isinstance(flag, tuple): try: item = Flag(*flag) self.add_item(item) except TypeError as e: raise TypeError("Invalid arguments to initialize a flag definition, expect ({0} [, {1}]) but got {3}" .format(", ".join(Flag.REQUIRED_FIELDS), ", ".join(Flag.OPTIONAL_FIELDS), flag))
875,445
Validates incoming data Args: data(dict): the incoming data Returns: True if the data is valid Raises: ValueError: the data is not valid
def validate(data): text = data.get('text') if not isinstance(text, _string_types) or len(text) == 0: raise ValueError('text field is required and should not be empty') if 'markdown' in data and not type(data['markdown']) is bool: raise ValueError('markdown field should be bool') if 'attachments' in data: if not isinstance(data['attachments'], (list, tuple)): raise ValueError('attachments field should be list or tuple') for attachment in data['attachments']: if 'text' not in attachment and 'title' not in attachment: raise ValueError('text or title is required in attachment') return True
875,563
Sends an incoming message Args: url(str): the incoming hook url data(dict): the sending data Returns: requests.Response
def send(url, data): validate(data) return requests.post(url, json=data)
875,564
Sends a RTMMessage Should be called after starting the loop Args: message(RTMMessage): the sending message Raises: WebSocketConnectionClosedException: if the loop is closed
def send(self, message): if "call_id" not in message: message["call_id"] = self.gen_call_id() self._ws.send(message.to_json())
875,628
Removes and returns a RTMMessage from self._inbox Args: block(bool): if True block until a RTMMessage is available, else it will return None when self._inbox is empty timeout(int): it blocks at most timeout seconds Returns: RTMMessage if self._inbox is not empty, else None
def get_message(self, block=False, timeout=None): try: message = self._inbox.get(block=block, timeout=timeout) return message except Exception: return None
875,629
Removes and returns an error from self._errors Args: block(bool): if True block until a RTMMessage is available, else it will return None when self._inbox is empty timeout(int): it blocks at most timeout seconds Returns: error if inbox is not empty, else None
def get_error(self, block=False, timeout=None): try: error = self._errors.get(block=block, timeout=timeout) return error except Exception: return None
875,630
Deregisters a local service Parameters: service (ObjectID): Service ID Returns: bool: ``True`` on success The deregister endpoint is used to remove a service from the local agent. The agent will take care of deregistering the service with the Catalog. If there is an associated check, that is also deregistered.
async def deregister(self, service): service_id = extract_attr(service, keys=["ServiceID", "ID"]) response = await self._api.get( "/v1/agent/service/deregister", service_id) return response.status == 200
875,873
Resumes normal operation for service Parameters: service (ObjectID): Service ID reason (str): Text string explaining the reason for placing the service into normal mode. Returns: bool: ``True`` on success
async def enable(self, service, *, reason=None): return await self.maintenance(service, False, reason=reason)
875,875
Enters maintenance mode / Resumes normal operation for service Parameters: service (ObjectID): Service ID enable (bool): Enter or exit maintenance mode reason (str): Text string explaining the reason for placing the service into normal mode. Returns: bool: ``True`` on success
async def maintenance(self, service, enable, *, reason=None): service_id = extract_attr(service, keys=["ServiceID", "ID"]) response = await self._api.put( "/v1/agent/service/maintenance", service_id, params={"enable": enable, "reason": reason}) return response.status == 200
875,876
Creates a MercedesMeOAuth object Parameters: - client_id - the client id of your app - client_secret - the client secret of your app - redirect_uri - the redirect URI of your app - scope - the desired scope of the request - cache_path - path to location to save tokens
def __init__(self, client_id, client_secret, redirect_uri, scope, cache_path): self.client_id = client_id self.client_secret = client_secret self.redirect_uri = redirect_uri self.cache_path = cache_path self.scope=scope
875,922
Gets the access token for the app given the code Parameters: - code - the response code
def get_access_token(self, code): payload = {'redirect_uri': self.redirect_uri, 'code': code, 'grant_type': 'authorization_code'} headers = self._make_authorization_headers() response = requests.post(self.OAUTH_TOKEN_URL, data=payload, headers=headers, verify=LOGIN_VERIFY_SSL_CERT) if response.status_code is not 200: raise MercedesMeAuthError(response.reason) token_info = response.json() token_info = self._add_custom_values_to_token_info(token_info) self._save_token_info(token_info) return token_info
875,926
Gets user information by user id Args: user_id(int): the id of user Returns: User Throws: RTMServiceError when request failed
def info(self, user_id): resp = self._rtm_client.get('v1/user.info?user_id={}'.format(user_id)) if resp.is_fail(): raise RTMServiceError('Failed to get user information', resp) return resp.data['result']
876,263
Gets channel information by channel id Args: channel_id(int): the id of channel Returns: Channel Throws: RTMServiceError when request failed
def info(self, channel_id): resource = 'v1/channel.info?channel_id={}'.format(channel_id) resp = self._rtm_client.get(resource) if resp.is_fail(): raise RTMServiceError("Failed to get channel information", resp) return resp.data['result']
876,264
Prepare check for catalog endpoint Parameters: data (Object or ObjectID): Check ID or check definition Returns: Tuple[str, dict]: where first is ID and second is check definition
def prepare_check(data): if not data: return None, {} if isinstance(data, str): return data, {} result = {} if "ID" in data: result["CheckID"] = data["ID"] for k in ("Node", "CheckID", "Name", "Notes", "Status", "ServiceID"): if k in data: result[k] = data[k] if list(result) == ["CheckID"]: return result["CheckID"], {} return result.get("CheckID"), result
876,629
Replys a text message Args: text(str): message content Returns: RTMMessage
def reply(self, text): data = {'text': text, 'vchannel_id': self['vchannel_id']} if self.is_p2p(): data['type'] = RTMMessageType.P2PMessage data['to_uid'] = self['uid'] else: data['type'] = RTMMessageType.ChannelMessage data['channel_id'] = self['channel_id'] return RTMMessage(data)
876,695
Refers current message and replys a new message Args: text(str): message content Returns: RTMMessage
def refer(self, text): data = self.reply(text) data['refer_key'] = self['key'] return data
876,696
Does the request job Args: resource(str): resource uri(relative path) method(str): HTTP method params(dict): uri queries data(dict): HTTP body(form) json(dict): HTTP body(json) headers(dict): HTTP headers Returns: RTMResponse
def do(self, resource, method, params=None, data=None, json=None, headers=None): uri = "{0}/{1}".format(self._api_base, resource) if not params: params = {} params.update({'token': self._token}) req = Request( method=method, url=uri, params=params, headers=headers, data=data, json=json) s = Session() prepped = s.prepare_request(req) resp = s.send(prepped) return RTMResponse(resp)
876,928
Searches across Google Image Search with the specified image query and downloads the specified count of images Arguments: imageQuery {[str]} -- [Image Search Query] Keyword Arguments: imageCount {[int]} -- [Count of images that need to be downloaded] destinationFolder {[str]} -- [Download Destination Folder] threadCount {[int]} -- [Count of Threads, to parallelize download of images]
def extract_images(self, imageQuery, imageCount=100, destinationFolder='./', threadCount=4): # Initialize the chrome driver self._initialize_chrome_driver() # Initialize the image download parameters self._imageQuery = imageQuery self._imageCount = imageCount self._destinationFolder = destinationFolder self._threadCount = threadCount self._get_image_urls() self._create_storage_folder() self._download_images() # Print the final message specifying the total count of images downloaded print(colored('\n\nImages Downloaded: ' + str(self._imageCounter) + ' of ' + str(self._imageCount) + ' in ' + format_timespan(self._downloadProgressBar.data()['total_seconds_elapsed']) + '\n', 'green')) # Terminate the chrome instance self._chromeDriver.close() self._reset_helper_variables()
877,365
Downloads an image file from the given image URL Arguments: imageURL {[str]} -- [Image URL]
def _download_image(self, imageURL): # If the required count of images have been download, # refrain from downloading the remainder of the images if(self._imageCounter >= self._imageCount): return try: imageResponse = requests.get(imageURL) # Generate image file name as <_imageQuery>_<_imageCounter>.<extension> imageType, imageEncoding = mimetypes.guess_type(imageURL) if imageType is not None: imageExtension = mimetypes.guess_extension(imageType) else: imageExtension = mimetypes.guess_extension( imageResponse.headers['Content-Type']) imageFileName = self._imageQuery.replace( ' ', '_') + '_' + str(self._imageCounter) + imageExtension imageFileName = os.path.join(self._storageFolder, imageFileName) image = Image.open(BytesIO(imageResponse.content)) image.save(imageFileName) self._imageCounter += 1 self._downloadProgressBar.update(self._imageCounter) except Exception as exception: pass
877,371
Update ConfigMap from mapping/iterable. If the key exists the entry is updated else it is added. Args: *args: variable length argument list. A valid argument is a two item tuple/list. The first item is the key and the second is the value. **kwargs: Arbitrary keyword arguments representing the config.
def update(self, *args, **kwargs): for k, v in args: self[k] = v for k, v in kwargs.items(): self[k] = v
877,699
Enters maintenance mode Parameters: reason (str): Reason of disabling Returns: bool: ``True`` on success
async def disable(self, reason=None): params = {"enable": True, "reason": reason} response = await self._api.put("/v1/agent/maintenance", params=params) return response.status == 200
877,883
Resumes normal operation Parameters: reason (str): Reason of enabling Returns: bool: ``True`` on success
async def enable(self, reason=None): params = {"enable": False, "reason": reason} response = await self._api.put("/v1/agent/maintenance", params=params) return response.status == 200
877,884
Triggers the local agent to join a node Parameters: address (str): Address of node wan (bool): Attempt to join using the WAN pool Returns: bool: ``True`` on success This endpoint is used to instruct the agent to attempt to connect to a given address. For agents running in server mode, providing ``wan`` parameter causes the agent to attempt to join using the WAN pool.
async def join(self, address, *, wan=None): response = await self._api.get("/v1/agent/join", address, params={"wan": wan}) return response.status == 200
878,054
Metric data Args: value (:obj:`bool` or :obj:`int` or :obj:`long` or :obj:`float` or :obj:`basestring` or :obj:`bytes`) Returns: value Raises: :obj:`TypeError`
def data(self): if self._data_type == int: if self._pb.HasField("int64_data"): return self._pb.int64_data if self._pb.HasField("int32_data"): return self._pb.int32_data if self._pb.HasField("uint64_data"): return self._pb.uint64_data if self._pb.HasField("uint32_data"): return self._pb.uint32_data elif self._data_type == float: if self._pb.HasField("float32_data"): return self._pb.float32_data if self._pb.HasField("float64_data"): return self._pb.float64_data elif self._data_type == str: return self._pb.string_data elif self._data_type == bool: return self._pb.bool_data elif self._data_type == bytes: return self._pb.bytes_data return None
878,078
Destroys a given token. Parameters: token (ObjectID): Token ID Returns: bool: ``True`` on success
async def destroy(self, token): token_id = extract_attr(token, keys=["ID"]) response = await self._api.put("/v1/acl/destroy", token_id) return response.body
878,125
Draw molecule structure image. Args: canvas: draw.drawable.Drawable mol: model.graphmol.Compound
def draw(canvas, mol): mol.require("ScaleAndCenter") mlb = mol.size2d[2] if not mol.atom_count(): return bond_type_fn = { 1: { 0: single_bond, 1: wedged_single, 2: dashed_wedged_single, 3: wave_single, }, 2: { 0: cw_double, 1: counter_cw_double, 2: double_bond, 3: cross_double }, 3: { 0: triple_bond } } # Draw bonds for u, v, bond in mol.bonds_iter(): if not bond.visible: continue if (u < v) == bond.is_lower_first: f, s = (u, v) else: s, f = (u, v) p1 = mol.atom(f).coords p2 = mol.atom(s).coords if p1 == p2: continue # avoid zero division if mol.atom(f).visible: p1 = gm.t_seg(p1, p2, F_AOVL, 2)[0] if mol.atom(s).visible: p2 = gm.t_seg(p1, p2, F_AOVL, 1)[1] color1 = mol.atom(f).color color2 = mol.atom(s).color bond_type_fn[bond.order][bond.type]( canvas, p1, p2, color1, color2, mlb) # Draw atoms for n, atom in mol.atoms_iter(): if not atom.visible: continue p = atom.coords color = atom.color # Determine text direction if atom.H_count: cosnbrs = [] hrzn = (p[0] + 1, p[1]) for nbr in mol.graph.neighbors(n): pnbr = mol.atom(nbr).coords try: cosnbrs.append(gm.dot_product(hrzn, pnbr, p) / gm.distance(p, pnbr)) except ZeroDivisionError: pass if not cosnbrs or min(cosnbrs) > 0: # [atom]< or isolated node(ex. H2O, HCl) text = atom.formula_html(True) canvas.draw_text(p, text, color, "right") continue elif max(cosnbrs) < 0: # >[atom] text = atom.formula_html() canvas.draw_text(p, text, color, "left") continue # -[atom]- or no hydrogens text = atom.formula_html() canvas.draw_text(p, text, color, "center")
878,381
Base query for an url and xpath Args: url (str): URL to search xpath (str): xpath to search (may be ``None``)
def _query(self, url, xpath): return self.session.query(CachedRequest).filter(CachedRequest.url == url).filter(CachedRequest.xpath == xpath)
878,417
Clear cache Args: url (str): If given, clear specific item only. Otherwise remove the DB file. xpath (str): xpath to search (may be ``None``)
def clear(self, url=None, xpath=None): if url is not None: query = self._query(url, xpath) if query.count() > 0: query.delete() self.session.commit() else: raise KeyError("Cannot clear URL, not in cache: " + str(url) + " xpath:" + str(xpath)) else: # remove the DB file self.close() if path.exists(self.db_path): remove(self.db_path)
878,419
Check if a URL (and xpath) exists in the cache If DB has not been initialized yet, returns ``False`` for any URL. Args: url (str): If given, clear specific item only. Otherwise remove the DB file. xpath (str): xpath to search (may be ``None``) Returns: bool: ``True`` if URL exists, ``False`` otherwise
def has(self, url, xpath=None): if not path.exists(self.db_path): return False return self._query(url, xpath).count() > 0
878,420
Get time stamp of cached query result. If DB has not yet been initialized or url/xpath has not been queried yet, return None. Args: url (str): If given, clear specific item only. Otherwise remove the DB file. xpath (str): xpath to search (may be ``None``) Returns: datetime.datetime: cached response timestamp, None if not available
def get_timestamp(self, url, xpath=None): if not path.exists(self.db_path): return None if self._query(url, xpath).count() > 0: return self._query(url, xpath).one().queried_on
878,421
Save molecules to the SDFile format file Args: mols: list of molecule objects path: file path to save
def mols_to_file(mols, path): with open(path, 'w') as f: f.write(mols_to_text(mols))
878,449
Starts the client listener to listen for server responses. Args: None Returns: None
def listen(self): logger.info("Listening on port " + str(self.listener.listen_port)) self.listener.listen()
878,451
Processes messages that have been delivered from the transport protocol. Args: data (dict): A dictionary containing the packet data to resend. Returns: None Examples: >>> data {'method': 'REGISTER', 'address': ('192.168.0.20', 40080)}
def retransmit(self, data): # Handle retransmitting REGISTER requests if we don't hear back from # the server. if data["method"] == "REGISTER": if not self.registered and self.register_retries < self.max_retries: logger.debug("<%s> Timeout exceeded. " % str(self.cuuid) + \ "Retransmitting REGISTER request.") self.register_retries += 1 self.register(data["address"], retry=False) else: logger.debug("<%s> No need to retransmit." % str(self.cuuid)) if data["method"] == "EVENT": if data["euuid"] in self.event_uuids: # Increment the current retry count of the euuid self.event_uuids[data["euuid"]]["retry"] += 1 if self.event_uuids[data["euuid"]]["retry"] > self.max_retries: logger.debug("<%s> Max retries exceeded. Timed out waiting " "for server for event: %s" % (data["cuuid"], data["euuid"])) logger.debug("<%s> <euuid:%s> Deleting event from currently " "processing event uuids" % (data["cuuid"], str(data["euuid"]))) del self.event_uuids[data["euuid"]] else: # Retransmit that shit self.listener.send_datagram( serialize_data(data, self.compression, self.encryption, self.server_key), self.server) # Then we set another schedule to check again logger.debug("<%s> <euuid:%s> Scheduling to retry in %s " "seconds" % (data["cuuid"], str(data["euuid"]), str(self.timeout))) self.listener.call_later( self.timeout, self.retransmit, data) else: logger.debug("<%s> <euuid:%s> No need to " "retransmit." % (str(self.cuuid), str(data["euuid"])))
878,452
This function will send a register packet to the discovered Neteria server. Args: address (tuple): A tuple of the (address, port) to send the register request to. retry (boolean): Whether or not we want to reset the current number of registration retries to 0. Returns: None Examples: >>> address ('192.168.0.20', 40080)
def register(self, address, retry=True): logger.debug("<%s> Sending REGISTER request to: %s" % (str(self.cuuid), str(address))) if not self.listener.listening: logger.warning("Neteria client is not listening.") # Construct the message to send message = {"method": "REGISTER", "cuuid": str(self.cuuid)} # If we have encryption enabled, send our public key with our REGISTER # request if self.encryption: message["encryption"] = [self.encryption.n, self.encryption.e] # Send a REGISTER to the server self.listener.send_datagram( serialize_data(message, self.compression, encryption=False), address) if retry: # Reset the current number of REGISTER retries self.register_retries = 0 # Schedule a task to run in x seconds to check to see if we've timed # out in receiving a response from the server self.listener.call_later( self.timeout, self.retransmit, {"method": "REGISTER", "address": address})
878,455
This method handles event legality check messages from the server. Args: message (dict): The unserialized legality dictionary received from the server. Returns: None Examples: >>> message
def legal_check(self, message): # If the event was legal, remove it from our event buffer if message["method"] == "LEGAL": logger.debug("<%s> <euuid:%s> Event LEGAL" % (str(self.cuuid), message["euuid"])) logger.debug("<%s> <euuid:%s> Removing event from event " "buffer." % (str(self.cuuid), message["euuid"])) # If the message was a high priority, then we keep track of legal # events too if message["priority"] == "high": self.event_confirmations[ message["euuid"]] = self.event_uuids[message["euuid"]] logger.debug("<%s> <euuid:%s> Event was high priority. Adding " "to confirmations buffer." % (str(self.cuuid), message["euuid"])) logger.debug("<%s> <euuid:%s> Current event confirmation " "buffer: %s" % (str(self.cuuid), message["euuid"], pformat(self.event_confirmations))) # Try and remove the event from the currently processing events try: del self.event_uuids[message["euuid"]] except KeyError: logger.warning("<%s> <euuid:%s> Euuid does not exist in event " "buffer. Key was removed before we could process " "it." % (str(self.cuuid), message["euuid"])) # If the event was illegal, remove it from our event buffer and add it # to our rollback list elif message["method"] == "ILLEGAL": logger.debug("<%s> <euuid:%s> Event ILLEGAL" % (str(self.cuuid), message["euuid"])) logger.debug("<%s> <euuid:%s> Removing event from event buffer and " "adding to rollback buffer." % (str(self.cuuid), message["euuid"])) self.event_rollbacks[ message["euuid"]] = self.event_uuids[message["euuid"]] del self.event_uuids[message["euuid"]]
878,457
Score examples from a new matrix X Args: estimator: an sklearn estimator object X: design matrix with the same features that the estimator was trained on Returns: a vector of scores of the same length as X Note that estimator.predict_proba is preferred but when unavailable (e.g. SVM without probability calibration) decision_function is used.
def y_score(estimator, X): try: y = estimator.predict_proba(X) return y[:, 1] except(AttributeError): return estimator.decision_function(X)
878,462
Locu Venue Details API Call Wrapper Args: list of ids : ids of a particular venues to get insights about. Can process up to 5 ids
def get_details(self, ids): if isinstance(ids, list): if len(ids) > 5: ids = ids[:5] id_param = ';'.join(ids) + '/' else: ids = str(ids) id_param = ids + '/' header, content = self._http_request(id_param) resp = json.loads(content) if not self._is_http_response_ok(header): error = resp.get('error_message', 'Unknown Error') raise HttpException(header.status, header.reason, error) return resp
878,506
Query all entities of a specific type, with their attributes Args: type_to_query (str): type of entity to query client: DB client to perform query with Returns: pandas.DataFrame: table of entities, with attributes as columns
def query_with_attributes(type_to_query, client): session = client.create_session() # query all data query = session.query(Attribute.name, Attribute.value, Entity.id) \ .join(Entity) \ .filter(Entity.type == type_to_query) df = client.df_query(query) session.close() # don't store NaN values df = df.dropna(how='any') # pivot attribute names to columns, drop column names to one level # ('unstack' generated multi-level names) df = df.set_index(['id', 'name']).unstack().reset_index() # noinspection PyUnresolvedReferences df.columns = ['id'] + list(df.columns.get_level_values(1)[1:]) return df
878,529
Convert a GeoJSON polygon feature to a numpy array Args: feature (pygeoj.Feature): polygon feature to draw shape (tuple(int, int)): shape of 2D target numpy array to draw polygon in lat_idx (func): function converting a latitude to the (fractional) row index in the map lon_idx (func): function converting a longitude to the (fractional) column index in the map Returns: np.array: mask, background is zero, foreground is one
def geojson_polygon_to_mask(feature, shape, lat_idx, lon_idx): import matplotlib # specify 'agg' renderer, Mac renderer does not support what we want to do below matplotlib.use('agg') import matplotlib.pyplot as plt from matplotlib import patches import numpy as np # we can only do polygons right now if feature.geometry.type not in ('Polygon', 'MultiPolygon'): raise ValueError("Cannot handle feature of type " + feature.geometry.type) # fictional dpi - don't matter in the end dpi = 100 # -- start documentation include: poly-setup # make a new figure with no frame, no axes, with the correct size, black background fig = plt.figure(frameon=False, dpi=dpi, ) fig.set_size_inches(shape[1] / float(dpi), shape[0] / float(dpi)) ax = plt.Axes(fig, [0., 0., 1., 1.]) ax.set_axis_off() # noinspection PyTypeChecker ax.set_xlim([0, shape[1]]) # noinspection PyTypeChecker ax.set_ylim([0, shape[0]]) fig.add_axes(ax) # -- end documentation include: poly-setup # for normal polygons make coordinates iterable if feature.geometry.type == 'Polygon': coords = [feature.geometry.coordinates] else: coords = feature.geometry.coordinates for poly_coords in coords: # the polygon may contain multiple outlines; the first is # always the outer one, the others are 'holes' for i, outline in enumerate(poly_coords): # inside/outside fill value: figure background is white by # default, draw inverted polygon and invert again later value = 0. if i == 0 else 1. # convert lats/lons to row/column indices in the array outline = np.array(outline) xs = lon_idx(outline[:, 0]) ys = lat_idx(outline[:, 1]) # draw the polygon poly = patches.Polygon(list(zip(xs, ys)), facecolor=(value, value, value), edgecolor='none', antialiased=True) ax.add_patch(poly) # -- start documentation include: poly-extract # extract the figure to a numpy array, fig.canvas.draw() data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='') # reshape to a proper numpy array, keep one channel only data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))[:, :, 0] # -- end documentation include: poly-extract # make sure we get the right shape back assert data.shape[0] == shape[0] assert data.shape[1] == shape[1] # convert from uints back to floats and invert to get black background data = 1. - data.astype(float) / 255. # type: np.array # image is flipped horizontally w.r.t. map data = data[::-1, :] # done, clean up plt.close('all') return data
878,552
Load data table from tsv file, from default location Args: key_filter (str): additional filter for key column - regex matching key values to include; None for no filter header_preproc (func): function to apply to column headers to extract year numbers (as strings) Returns: pd.DataFrame: data
def load(self, key_filter=None, header_preproc=None): # read file, keep all values as strings df = pd.read_csv(self.input_file, sep='\t', dtype=object) if key_filter is not None: # filter on key column (first column) df = df[df[df.columns[0]].str.match(key_filter)] # first column contains metadata, with NUTS2 region key as last (comma-separated) value meta_col = df.columns[0] df[meta_col] = df[meta_col].str.split(',').str[-1] # convert columns to numbers, skip first column (containing metadata) for col_name in df.columns[1:]: # some values have lower-case characters indicating footnotes, strip them stripped = df[col_name].str.replace(r'[a-z]', '') # convert to numbers, convert any remaining empty values (indicated by ':' in the input table) to NaN df[col_name] = pd.to_numeric(stripped, errors='coerce') # preprocess headers if header_preproc is not None: df.columns = list(df.columns[:1]) + [header_preproc(c) for c in df.columns[1:]] # rename columns, convert years to integers # noinspection PyTypeChecker df.columns = ['key'] + [int(y) for y in df.columns[1:]] return df
878,557
Apply function to each step object in the index Args: fn: function to apply. If a list then each function is applied pairwise: whether to apply the function to pairs of steps symmetric, diagonal, block: passed to apply_pairwise when pairwise=True kwargs: a keyword arguments to pass to each function. Arguments with list value are grid searched using util.dict_product. Returns: a StepFrame or StepSeries
def dapply(self, fn, pairwise=False, symmetric=True, diagonal=False, block=None, **kwargs): search_keys = [k for k, v in kwargs.items() if isinstance(v, list) and len(v) > 1] functions = util.make_list(fn) search = list(product(functions, util.dict_product(kwargs))) results = [] for fn, kw in search: if not pairwise: r = self.index.to_series().apply(lambda step: fn(step, **kw)) else: r = apply_pairwise(self, fn, symmetric=symmetric, diagonal=diagonal, block=block, **kw) name = [] if len(functions) == 1 else [fn.__name__] name += util.dict_subset(kw, search_keys).values() if isinstance(r, pd.DataFrame): columns = pd.MultiIndex.from_tuples( [tuple(name + util.make_list(c)) for c in r.columns]) r.columns = columns else: r.name = tuple(name) results.append(r) if len(results) > 1: result = pd.concat(results, axis=1) # get subset of parameters that were searched over column_names = [] if len(functions) == 1 else [None] column_names += search_keys column_names += [None]*(len(result.columns.names)-len(column_names)) result.columns.names = column_names return StepFrame(result) else: result = results[0] if isinstance(result, pd.DataFrame): return StepFrame(result) else: result.name = functions[0].__name__ return StepSeries(result)
878,608
Helper function for pairwise apply. Args: steps: an ordered collection of steps function: function to apply, first two positional arguments are steps symmetric: whether function is symmetric in the two steps diagonal: whether to apply on the diagonal block: apply only when the given columns match kwargs: keyword arguments to pass to the function Returns: DataFrame with index and columns equal to the steps argument
def apply_pairwise(self, function, symmetric=True, diagonal=False, block=None, **kwargs): steps = self.index r = pd.DataFrame(index=steps, columns=steps) for i, s1 in enumerate(steps): j = range(i+1 if symmetric else len(steps)) if not diagonal: j.remove(i) other = set(steps[j]) if block is not None: df = self.reset_index() df = df.merge(df, on=block) other &= set(df[df.index_x == s1].index_y) for s2 in other: r.ix[s1, s2] = function(s1, s2, **kwargs) return r
878,609
Replace unhashable values in a DataFrame with their string repr Args: df: DataFrame columns: columns to replace, if necessary. Default None replaces all columns.
def _print_unhashable(df, columns=None): for c in df.columns if columns is None else columns: if df.dtypes[c] == object: try: df[c].apply(hash) except TypeError: df[c] = df[c].dropna().apply(pformat).ix[df.index] return df
878,611
Extracts a LinkableClass from a jar. Args: jar: An open ZipFile instance. name: A string containing the binary name of a class. Raises: KeyError: The class does not exist in the jar.
def extract_class(jar, name): with jar.open(name) as entry: return LinkableClass(javatools.unpack_class(entry))
878,662
Creates a package-list URL and a link base from a docroot element. Args: app: the global app object root: the docroot element [string or dictionary]
def normalize_docroot(app, root): srcdir = app.env.srcdir default_version = app.config.javalink_default_version if isinstance(root, basestring): (url, base) = _parse_docroot_str(srcdir, root) return {'root': url, 'base': base, 'version': default_version} else: normalized = {} normalized['root'] = _parse_docroot_str(srcdir, root['root'])[0] if 'base' in root: normalized['base'] = _parse_docroot_str(srcdir, root['base'])[1] else: normalized['base'] = _parse_docroot_str(srcdir, root['root'])[1] if 'version' in root: normalized['version'] = root['version'] else: normalized['version'] = default_version return normalized
878,747
Helper function: parse attribute data from a wiki html doc Args: doc (document parsed with lxml.html): parsed wiki page Returns: dict: attributes values and listed links, format ``{<key>: {'value': <value>, 'link': <link>}}``; only the first hyperlink listed in each attribute value is included
def get_attribute_data(doc): attributes = dict() for attribute_node in doc.xpath("//div[contains(@class, 'pi-data ')]"): # label node node = attribute_node.xpath(".//*[contains(@class, 'pi-data-label')]")[0] label = " ".join(node.itertext()).strip() # value node node = attribute_node.xpath(".//*[contains(@class, 'pi-data-value')]")[0] # get value, first link, and the link text value = " ".join(node.itertext()).strip() link_node = node.find('a') if link_node is not None: link = link_node.get('href') link_text = link_node.text else: link = None link_text = None # store result attributes[label] = dict(value=value, link=link, link_text=link_text) return attributes
878,779
Rotate and return an image according to its Exif information. ROTATION_NEEDED = { 1: 0, 2: 0 (Mirrored), 3: 180, 4: 180 (Mirrored), 5: -90 (Mirrored), 6: -90, 7: 90 (Mirrored), 8: 90, } Args: image (PIL.Image.Image): PIL image to rotate orientation (): Optional orientation value in [1, 8] Returns: A :py:class:`~PIL.Image.Image` image.
def autorotate(image, orientation=None): orientation_value = orientation if orientation else \ image._getexif().get(EXIF_KEYS.get('Orientation')) if orientation_value is None: raise ImDirectException("No orientation available in Exif " "tag or given explicitly.") if orientation_value in (1, 2): i = image elif orientation_value in (3, 4): i = image.transpose(Image.ROTATE_180) elif orientation_value in (5, 6): i = image.transpose(Image.ROTATE_270) elif orientation_value in (7, 8): i = image.transpose(Image.ROTATE_90) else: i = image if orientation_value in (2, 4, 5, 7): i = i.transpose(Image.FLIP_LEFT_RIGHT) return i
878,801
Modifies the Exif tag if rotation has been performed. 0th, 1st -------- ImageWidth = 256 ImageLength = 257 XResolution = 282 YResolution = 283 TileWidth = 322 TileLength = 323 Exif ---- PixelXDimension = 40962 PixelYDimension = 40963 Args: exif (dict): The parsed Exif tag Returns: The modified Exif dict.
def update_exif_for_rotated_image(exif): orientation_value = exif.get('0th', ).get( piexif.ImageIFD.Orientation, exif.get('1st', ).get( piexif.ImageIFD.Orientation, None)) if orientation_value is not None: # Update orientation. exif['0th'][piexif.ImageIFD.Orientation] = 1 if exif.get('1st', {}).get(piexif.ImageIFD.Orientation) is not None: exif['1st'][piexif.ImageIFD.Orientation] = 1 # If 90 or 270 degree rotation, x dimensions are now y dimensions, # so flip all such properties. if orientation_value > 4: for exif_tag in ['0th', '1st']: if exif.get(exif_tag) is not None: x, y = (exif.get(exif_tag).get(piexif.ImageIFD.ImageWidth), exif.get(exif_tag).get(piexif.ImageIFD.ImageLength)) if x is not None and y is not None: exif[exif_tag][piexif.ImageIFD.ImageWidth] = y exif[exif_tag][piexif.ImageIFD.ImageLength] = x x, y = (exif.get(exif_tag).get(piexif.ImageIFD.XResolution), exif.get(exif_tag).get(piexif.ImageIFD.YResolution)) if x is not None and y is not None: exif[exif_tag][piexif.ImageIFD.XResolution] = y exif[exif_tag][piexif.ImageIFD.YResolution] = x x, y = (exif.get(exif_tag).get(piexif.ImageIFD.TileWidth), exif.get(exif_tag).get(piexif.ImageIFD.TileLength)) if x is not None and y is not None: exif[exif_tag][piexif.ImageIFD.TileWidth] = y exif[exif_tag][piexif.ImageIFD.TileLength] = x if exif.get('Exif') is not None: x, y = (exif.get('Exif').get(piexif.ExifIFD.PixelXDimension), exif.get('Exif').get(piexif.ExifIFD.PixelYDimension)) if x is not None and y is not None: exif['Exif'][piexif.ExifIFD.PixelXDimension] = y exif['Exif'][piexif.ExifIFD.PixelYDimension] = x if exif.get('thumbnail') is not None: try: thumbnail = pil_open(io.BytesIO(exif.get('thumbnail'))) thumbnail = autorotate(thumbnail, orientation=orientation_value) with io.BytesIO() as bio: thumbnail.save(bio, format='jpeg') bio.seek(0) exif['thumbnail'] = bio.read() except Exception as e: warnings.warn("deprecated", DeprecationWarning) return exif
878,802
Monkey patching PIL.Image.open method Args: enabled (bool): If the monkey patch should be activated or deactivated.
def monkey_patch(enabled=True): if enabled: Image.open = imdirect_open else: Image.open = pil_open
878,804
Saves an image using PIL, preserving the exif information. Args: img (PIL.Image.Image): *args: The arguments for the `save` method of the Image class. **kwargs: The keywords for the `save` method of the Image class.
def save_with_exif_info(img, *args, **kwargs): if 'exif' in kwargs: exif = kwargs.pop('exif') else: exif = img.info.get('exif') img.save(*args, exif=exif, **kwargs)
878,805
scale vector Args: p: point (x, y) factor: scaling factor o: origin (x, y)
def scale(p, factor, o=(0, 0)): v = vector(o, p) sv = v[0] * factor, v[1] * factor return translate(sv, o)
878,892
unit vector Args: v: vector (x, y) lg: length Raises: ValueError: Null vector was given
def unit(v, lg=1): try: res = scale(v, lg / distance((0, 0), v)) except ZeroDivisionError: raise ValueError("Null vector was given") return res
878,893
rotate vector Args: p: point (x, y) rad: angle(radian) o: origin (x, y)
def rotate(p, rad, o=(0, 0)): v = vector(o, p) fx = lambda x, y, d: x * cos(d) - y * sin(d) fy = lambda x, y, d: x * sin(d) + y * cos(d) rv = fx(v[0], v[1], rad), fy(v[0], v[1], rad) return translate(rv, o)
878,894
Returns cross product Args: p1, p2: point (x, y) o: origin
def cross_product(p1, p2, o=(0, 0)): v1 = vector(o, p1) v2 = vector(o, p2) return v1[0] * v2[1] - v1[1] * v2[0]
878,895
Returns dot product Args: p1, p2: point (x, y) o: origin
def dot_product(p1, p2, o=(0, 0)): v1 = vector(o, p1) v2 = vector(o, p2) return v1[0] * v2[0] + v1[1] * v2[1]
878,896
Returns interior angle of two vector(0 <= θ <= pi) Args: p1, p2: point (x, y) o: origin Raises: ValueError: p1 or p2 is overlapped with origin
def interior_angle(p1, p2, o=(0, 0)): v1 = vector(o, p1) v2 = vector(o, p2) len1 = distance(o, p1) len2 = distance(o, p2) try: return acos(dot_product(v1, v2) / (len1 * len2)) except ZeroDivisionError: raise ValueError("p1 or p2 is overlapped with origin")
878,897
move segment by distance Args: p1, p2: point(x, y) rad: relative direction angle(radian) dist: distance Return: translated segment(p1, p2)
def m_seg(p1, p2, rad, dist): v = vector(p1, p2) m = unit(rotate(v, rad), dist) return translate(p1, m), translate(p2, m)
878,898
trim segment Args: p1, p2: point(x, y) t: scaling factor (1 - trimed segment / original segment) align: 1: trim p2, 2: trim p1, 0: both side Return: trimmed segment(p1, p2)
def t_seg(p1, p2, t, align=0): v = vector(p1, p2) result = { 1: lambda a, b: (a, translate(b, scale(v, -t))), 2: lambda a, b: (translate(a, scale(v, t)), b), 0: lambda a, b: (translate(a, scale(v, t / 2)), translate(b, scale(v, -t / 2))) } return result[align](p1, p2)
878,899
parallel segment Args: p1, p2: point(x, y) cw: m_seg rad True: -π/2, False: π/2 interval: m_seg dist trim: t_seg trim align: t_seg align
def p_seg(p1, p2, cw, interval, trim=0, align=0): case = {True: pi / -2, False: pi / 2} p1m, p2m = m_seg(p1, p2, case[cw], interval) return t_seg(p1m, p2m, trim, align)
878,900
Evaluate whether vertices are in clockwise order. Args: vertices: list of vertices (x, y) in polygon. Returns: True: clockwise, False: counter-clockwise Raises: ValueError: the polygon is complex or overlapped.
def is_clockwise(vertices): it = iterator.consecutive(cycle(vertices), 3) clockwise = 0 counter = 0 for _ in range(len(vertices)): p0, p1, p2 = next(it) cross = cross_product(p1, p2, p0) int_angle = interior_angle(p0, p2, p1) # raises ValueError if cross < 0: clockwise += int_angle counter += 2 * pi - int_angle else: clockwise += 2 * pi - int_angle counter += int_angle if round(clockwise / pi) == len(vertices) - 2: return True elif round(counter / pi) == len(vertices) - 2: return False else: raise ValueError("the polygon is complex or overlapped")
878,901
Convenient wrapper around functions that should exit or raise an exception Example: assert "Can't create folder" in verify_abort(ensure_folder, "/dev/null/not-there") Args: func (callable): Function to execute *args: Args to pass to 'func' **kwargs: Named args to pass to 'func' Returns: (str): Chatter from call to 'func', if it did indeed raise
def verify_abort(func, *args, **kwargs): expected_exception = kwargs.pop("expected_exception", runez.system.AbortException) with CaptureOutput() as logged: try: value = func(*args, **kwargs) assert False, "%s did not raise, but returned %s" % (func, value) except expected_exception: return str(logged)
878,909
Save svg as file(.svg) Args: path (str): destination to save file
def save(self, path): with open(path, 'w') as f: f.write(self.contents())
878,927
Instantiate the task and build it with luigi Args: local_scheduler (bool): use a local scheduler (True, default) or a remote scheduler task_params: parameters to pass to task for instantiation
def build(cls, local_scheduler=True, **task_params): luigi.build([cls(**task_params)], local_scheduler=local_scheduler)
878,998
Commit and close the DB session associated with this task (no error is raised if None is open) Args: commit (bool): commit session before closing (default=True)
def close_session(self, commit=True): if self._session is not None: if commit: self._session.commit() self._session.close() self._session = None
879,002
Adds hydrogens Args: num (int): number of hydrogens
def add_hydrogen(self, num): self.H_count = num if num > 0 and self.symbol in ("N", "O"): self.H_donor = 1 else: self.H_donor = 0
879,030
Chemical formula HTML Args: reversed (bool): reversed text for leftmost atom groups
def formula_html(self, reversed_=False): if self.H_count == 1: text = "H" elif self.H_count > 1: text = "H<sub>{}</sub>".format(self.H_count) else: text = "" seq = [self.symbol, text, self.charge_sign_html()] if reversed_: seq = reversed(seq) return "".join(seq)
879,031
if mol is exactly same structure as the query, return True Args: mol: Compound query: Compound
def equal(mol, query, largest_only=True, ignore_hydrogen=True): m = molutil.clone(mol) q = molutil.clone(query) if largest_only: m = molutil.largest_graph(m) q = molutil.largest_graph(q) if ignore_hydrogen: m = molutil.make_Hs_implicit(m) q = molutil.make_Hs_implicit(q) if molutil.mw(m) == molutil.mw(q): gm = GraphMatcher(q.graph, m.graph, node_match=atom_match) return gm.is_isomorphic() return False
879,036
if mol is a substructure of the query, return True Args: mol: Compound query: Compound largest_only: compare only largest graph molecule
def substructure(mol, query, largest_only=True, ignore_hydrogen=True): def subset_filter(cnt1, cnt2): diff = cnt2 diff.subtract(cnt1) if any(v < 0 for v in diff.values()): return True if not (len(mol) and len(query)): return False # two blank molecules are not isomorphic m = molutil.clone(mol) q = molutil.clone(query) if largest_only: m = molutil.largest_graph(m) q = molutil.largest_graph(q) if ignore_hydrogen: m = molutil.make_Hs_implicit(m) q = molutil.make_Hs_implicit(q) if filter_(m, q, f=subset_filter): gm = GraphMatcher(q.graph, m.graph, node_match=atom_match) return gm.subgraph_is_isomorphic() return False
879,037
Instantiate a client object A client can be configured either from a parameters dictionary ``params`` or directly from an :mod:`sqlalchemy` connection string ``connection_string``. Exactly one of the two must be provided. Args: params (dict): database configuration, as defined in :mod:`ozelot.config` connection_string (str): :mod:`sqlalchemy` connection string
def __init__(self, params=None, connection_string=None): if params is None and connection_string is None: raise RuntimeError("Please provide either 'params' or 'connection_string'") if params is not None and connection_string is not None: raise RuntimeError("Please provide only on of 'params' or 'connection_string'") if params is not None: # log connection string with password hidden # noinspection PyTypeChecker connection_string_no_pw = self.get_connection_string(params=params, hide_password=True) config.logger.info("Client connecting to: " + connection_string_no_pw) # noinspection PyTypeChecker connection_string = self.get_connection_string(params=params, hide_password=False) else: # noinspection PyTypeChecker config.logger.info("Client connecting to: " + connection_string) # create the engine self.engine = sa.create_engine(connection_string) # turn on foreign key support for SQLite (required for cascading deletes etc.) if connection_string.startswith('sqlite://'): def on_connect(conn, _): conn.execute('pragma foreign_keys=ON') from sqlalchemy import event event.listen(self.engine, 'connect', on_connect) # create a session factory self.session_maker = orm.sessionmaker(bind=self.get_engine())
879,079
Store the password for a database connection using :mod:`keyring` Use the ``user`` field as the user name and ``<host>:<driver>`` as service name. Args: params (dict): database configuration, as defined in :mod:`ozelot.config` password (str): password to store
def store_password(params, password): user_name = params['user'] service_name = params['host'] + ':' + params['driver'] keyring.set_password(service_name=service_name, username=user_name, password=password)
879,080
Get the password for a database connection from :mod:`keyring` Args: params (dict): database configuration, as defined in :mod:`ozelot.config` Returns: str: password
def _get_password(params): user_name = params['user'] service_name = params['host'] + ':' + params['driver'] return keyring.get_password(service_name=service_name, username=user_name)
879,081
Get a database connection string Args: params (dict): database configuration, as defined in :mod:`ozelot.config` hide_password (bool): if True, the password is hidden in the returned string (use this for logging purposes). Returns: str: connection string
def get_connection_string(params, hide_password=True): connection_string = params['driver'] + '://' user = params.get('user', None) password = params.get('password', None) host = params.get('host', None) port = params.get('port', None) database = params.get('database', None) if database is None: raise ValueError("Field 'database' of connection parameters cannot be None.") # if password is not set, try to get it from keyring if password is None and user is not None: # noinspection PyTypeChecker password = Client._get_password(params) if password is None: raise RuntimeError("Password not defined and not available in keyring.") # don't add host/port/user/password if no host given if host is not None: # don't add user/password if user not given if user is not None: connection_string += user # omit zero-length passwords if len(password) > 0: if hide_password: connection_string += ":[password hidden]" else: connection_string += ":" + password connection_string += "@" connection_string += host if port is not None: connection_string += ':' + str(port) # noinspection PyTypeChecker connection_string += '/' + database return connection_string
879,082
Initialize the class. Arguments: api_key -- your Gemini API key secret_key -- your Gemini API secret key for signatures live -- use the live API? otherwise, use the sandbox (default False)
def __init__(self, api_key='', secret_key='', live=False): self.api_key = api_key self.secret_key = secret_key if live: self.base_url = self.live_url
879,087
Send a request to get the public order book, return the response. Arguments: symbol -- currency symbol (default 'btcusd') limit_bids -- limit the number of bids returned (default 0) limit_asks -- limit the number of asks returned (default 0)
def book(self, symbol='btcusd', limit_bids=0, limit_asks=0): url = self.base_url + '/v1/book/' + symbol params = { 'limit_bids': limit_bids, 'limit_asks': limit_asks } return requests.get(url, params)
879,089
Send a request to get all public trades, return the response. Arguments: symbol -- currency symbol (default 'btcusd') since -- only return trades after this unix timestamp (default 0) limit_trades -- maximum number of trades to return (default 50). include_breaks -- whether to display broken trades (default False)
def trades(self, symbol='btcusd', since=0, limit_trades=50, include_breaks=0): url = self.base_url + '/v1/trades/' + symbol params = { 'since': since, 'limit_trades': limit_trades, 'include_breaks': include_breaks } return requests.get(url, params)
879,090
Send a request for auction history info, return the response. Arguments: symbol -- currency symbol (default 'btcusd') since -- only return auction events after this timestamp (default 0) limit_auction_results -- maximum number of auction events to return (default 50). include_indicative -- whether to include publication of indicative prices and quantities. (default True)
def auction_history(self, symbol='btcusd', since=0, limit_auction_results=50, include_indicative=1): url = self.base_url + '/v1/auction/' + symbol + '/history' params = { 'since': since, 'limit_auction_results': limit_auction_results, 'include_indicative': include_indicative } return requests.get(url, params)
879,092
Send a request to place an order, return the response. Arguments: amount -- quoted decimal amount of BTC to purchase price -- quoted decimal amount of USD to spend per BTC side -- 'buy' or 'sell' client_order_id -- an optional client-specified order id (default None) symbol -- currency symbol (default 'btcusd') type -- the order type (default 'exchange limit')
def new_order(self, amount, price, side, client_order_id=None, symbol='btcusd', type='exchange limit', options=None): request = '/v1/order/new' url = self.base_url + request params = { 'request': request, 'nonce': self.get_nonce(), 'symbol': symbol, 'amount': amount, 'price': price, 'side': side, 'type': type } if client_order_id is not None: params['client_order_id'] = client_order_id if options is not None: params['options'] = options return requests.post(url, headers=self.prepare(params))
879,093
Send a request to cancel an order, return the response. Arguments: order_id - the order id to cancel
def cancel_order(self, order_id): request = '/v1/order/cancel' url = self.base_url + request params = { 'request': request, 'nonce': self.get_nonce(), 'order_id': order_id } return requests.post(url, headers=self.prepare(params))
879,094
Prepare, return the required HTTP headers. Base 64 encode the parameters, sign it with the secret key, create the HTTP headers, return the whole payload. Arguments: params -- a dictionary of parameters
def prepare(self, params): jsonparams = json.dumps(params) payload = base64.b64encode(jsonparams.encode()) signature = hmac.new(self.secret_key.encode(), payload, hashlib.sha384).hexdigest() return {'X-GEMINI-APIKEY': self.api_key, 'X-GEMINI-PAYLOAD': payload, 'X-GEMINI-SIGNATURE': signature}
879,098
Follow the a graph to find the nodes connected to a given node. Args: id: the id of the starting node edges: a pandas DataFrame of edges. Each row is an edge with two columns containing the ids of the vertices. directed: If True, edges are directed from first column to second column. Otherwise edges are undirected. _visited: used internally for recursion Returns: the set of all nodes connected to the starting node.
def follow(id, edges, directed=False, _visited=None): if _visited is None: _visited = set() _visited.add(id) for row in edges[edges.ix[:, 0] == id].values: if(row[1] not in _visited): follow(row[1], edges, directed, _visited) if not directed: for row in edges[edges.ix[:, 1] == id].values: if(row[0] not in _visited): follow(row[0], edges, directed, _visited) return _visited
879,121
Return connected components from graph determined by edges matrix Args: edges: DataFrame of (undirected) edges. vertices: set of vertices in graph. Defaults to union of all vertices in edges. Returns: set of connected components, each of which is a set of vertices.
def get_components(edges, vertices=None): if vertices is None: vertices = set(chain(edges.ix[:, 0], edges.ix[:, 1])) visited = set() components = [] for id in vertices: if id not in visited: c = follow(id, edges) visited.update(c) components.append(c) return components
879,122
safely load steps in place, excluding those that fail Args: steps: the steps to load
def load(steps, reload=False): # work on collections by default for fewer isinstance() calls per call to load() if reload: _STEP_CACHE.clear() if callable(steps): steps = steps() if not isinstance(steps, collections.Iterable): return load([steps])[0] loaded = [] for s in steps: digest = s._digest if digest in _STEP_CACHE: loaded.append(_STEP_CACHE[digest]) else: try: s.load() _STEP_CACHE[digest] = s loaded.append(s) except(Exception): logging.warn('Error during step load:\n%s' % util.indent(traceback.format_exc())) return loaded
879,209
Merges results to form arguments to run(). There are two cases for each result: - dictionary: dictionaries get merged and passed as keyword arguments - list: lists get concatenated to positional arguments - Arguments: kwargs gets merged and args gets appended - else: concatenated and passed as postitional arguments Args: inputs: the inputs whose results to merge arguments: an optional existing Arguments object to merge into
def merge_results(inputs, arguments=None): if arguments is None: arguments = Arguments() args = arguments.args kwargs = arguments.kwargs for i in inputs: # without a mapping we handle two cases # when the result is a dict merge it with a global dict if isinstance(i.result, dict): # but do not override kwargs.update({k: v for k, v in i.result.items() if k not in kwargs}) elif isinstance(i.result, list): args.extend(i.result) elif isinstance(i.result, Arguments): args.extend(i.result.args) kwargs.update({k: v for k, v in i.result.kwargs.items() if k not in kwargs}) # otherwise use it as a positional argument else: args.append(i.result) return arguments
879,211
Run this step, recursively running or loading inputs. Used in bin/run_step.py which is run by drake. Args: inputs: collection of steps that should be loaded output: step that should be dumped after it is run load_targets (boolean): load all steps which are targets. This argument is not used by run_step.py because target does not get serialized. But it can be useful for running steps directly.
def execute(self, inputs=None, output=None, load_targets=False): if self == output: if os.path.exists(self._dump_dirname): shutil.rmtree(self._dump_dirname) if os.path.exists(self._target_filename): os.remove(self._target_filename) os.makedirs(self._dump_dirname) if inputs is None: inputs = [] if not hasattr(self, 'result'): if self in inputs or (load_targets and self.target): logging.info('Loading\n%s' % util.indent(str(self))) self.load() else: for i in self.inputs: i.execute(inputs=inputs, output=output, load_targets=load_targets) args = merge_results(self.inputs) logging.info('Running\n%s' % util.indent(str(self))) self.result = self.run(*args.args, **args.kwargs) if self == output: logging.info('Dumping\n%s' % util.indent(str(self))) self.dump() util.touch(self._target_filename)
879,217
Searches the tree for a step Args: value: The value to search for. If value is a string then the search looks for a step of that name. If the value is a type, it looks for a step of that type. Returns: The first step found via a depth-first search.
def get_input(self, value, _search=None): if _search is None: if isinstance(value, string_types): _search = lambda s: s.name # noqa: E731 elif isinstance(value, type): _search = type for i in self.inputs: step = i.get_input(value, _search) if step is not None: return step if _search(self) == value: return self
879,218
Fetches the row-aggregated input columns for this ColumnFunction. Args: aggregator (Aggregator) Returns: pd.DataFrame: The dataframe has columns with names self.names that were created by this ColumnFunction, and is indexed by the index that was passed to aggregator.aggregate(index).
def apply_and_name(self, aggregator): reduced_df = self._apply(aggregator) if len(self.names) != len(reduced_df.columns): raise IndexError("ColumnFunction creates more columns than it has names for.") reduced_df.columns = self.names return reduced_df
879,252
This function gets called by ColumnFunction._apply(). After a ColumnFunction has been passed to Aggregator's constructor, the ColumnFunction can use this function to request the populated, aggregated columns that correspond to its ColumnReductions. Args: column_reduction (list[ColumnReduction]) Returns: pd.DataFrame: A dataframe, where the column names are ColumnReductions.
def get_reduced(self, column_reductions): for cr in column_reductions: if cr not in self.column_reductions: raise ValueError("Column reduction %r is not known to this Aggregator!" % cr) return self.reduced_df[column_reductions]
879,255
Performs a groupby of the unique Columns by index, as constructed from self.df. Args: index (str, or pd.Index): Index or column name of self.df. Returns: pd.DataFrame: A dataframe, aggregated by index, that contains the result of the various ColumnFunctions, and named accordingly.
def aggregate(self, index): # deal with index as a string vs index as a index/MultiIndex if isinstance(index, string_types): col_df_grouped = self.col_df.groupby(self.df[index]) else: self.col_df.index = pd.MultiIndex.from_arrays([self.df[i] for i in index]) col_df_grouped = self.col_df.groupby(level=index) self.col_df.index = self.df.index # perform the actual aggregation self.reduced_df = pd.DataFrame({ colred: col_df_grouped[colred.column].agg(colred.agg_func) for colred in self.column_reductions }) # then apply the functions to produce the final dataframe reduced_dfs = [] for cf in self.column_functions: # each apply_and_name() calls get_reduced() with the column reductions it wants reduced_dfs.append(cf.apply_and_name(self)) return pd.concat(reduced_dfs, axis=1)
879,256
Parse molfile part into molecule object Args: lines (list): lines of molfile part Raises: ValueError: Symbol not defined in periodictable.yaml (Polymer expression not supported yet)
def molecule(lines): count_line = lines[3] num_atoms = int(count_line[0:3]) num_bonds = int(count_line[3:6]) # chiral_flag = int(count_line[12:15]) # Not used # num_prop = int(count_line[30:33]) # "No longer supported" compound = Compound() compound.graph._node = atoms(lines[4: num_atoms+4]) compound.graph._adj = bonds(lines[num_atoms+4: num_atoms+num_bonds+4], compound.graph._node.keys()) props = properties(lines[num_atoms+num_bonds+4:]) add_properties(props, compound) return compound
879,284
Yields molecules generated from CTAB text Args: lines (iterable): CTAB text lines no_halt (boolean): True: shows warning messages for invalid format and go on. False: throws an exception for it and stop parsing. assign_descriptors (boolean): if True, default descriptors are automatically assigned.
def mol_supplier(lines, no_halt, assign_descriptors): def sdf_block(lns): mol = [] opt = [] is_mol = True for line in lns: if line.startswith("$$$$"): yield mol[:], opt[:] is_mol = True mol.clear() opt.clear() elif line.startswith("M END"): is_mol = False elif is_mol: mol.append(line.rstrip()) else: opt.append(line.rstrip()) if mol: yield mol, opt for i, (mol, opt) in enumerate(sdf_block(lines)): try: c = molecule(mol) if assign_descriptors: molutil.assign_descriptors(c) except ValueError as err: if no_halt: print("Unsupported symbol: {} (#{} in v2000reader)".format( err, i + 1)) c = molutil.null_molecule(assign_descriptors) else: raise ValueError("Unsupported symbol: {}".format(err)) except RuntimeError as err: if no_halt: print( "Failed to minimize ring: {} (#{} in v2000reader)".format( err, i + 1) ) else: raise RuntimeError("Failed to minimize ring: {}".format(err)) except: if no_halt: print("Unexpected error (#{} in v2000reader)".format(i + 1)) c = molutil.null_molecule(assign_descriptors) c.data = optional_data(opt) yield c continue else: print(traceback.format_exc()) raise Exception("Unsupported Error") c.data = optional_data(opt) yield c
879,285
Python 2/3 friendly decoding of output. Args: value (str | unicode | bytes | None): The value to decode. strip (bool): If True, `strip()` the returned string. (Default value = False) Returns: str: Decoded value, if applicable.
def decode(value, strip=False): if value is None: return None if isinstance(value, bytes) and not isinstance(value, unicode): value = value.decode("utf-8") if strip: return unicode(value).strip() return unicode(value)
879,379
Conveniently set one or more fields at a time. Args: *args: Optionally set from other objects, available fields from the passed object are used in order **kwargs: Set from given key/value pairs (only names defined in __slots__ are used)
def set(self, *args, **kwargs): if args: for arg in args: if arg is not None: for name in self.__slots__: self._set(name, getattr(arg, name, UNSET)) for name in kwargs: self._set(name, kwargs.get(name, UNSET))
879,384
Load step from yaml file Args: filename: a target or step.yaml filename
def load(filename): yaml_filename = os.path.join(os.path.dirname(filename), 'step.yaml') with open(yaml_filename) as f: return yaml.load(f)
879,456
connect atom group (for SMILES parser) May requires recalculation of 2D coordinate for drawing Args: mol: graphmol.Compound() the original object will be copied. bond: Bond object to be connected. the original will not be copied so be careful. base: index of atom in self to connect target: index of atom in group to be connected Raises: TypeError
def add_molecule(self, mol, bond=None, base=None, target=None): ai = self.available_idx() mapping = {n: n + ai - 1 for n, _ in mol.atoms_iter()} relabeled = nx.relabel_nodes(mol.graph, mapping) # copy=True self.graph.add_nodes_from(relabeled.nodes(data=True)) self.graph.add_edges_from(relabeled.edges(data=True)) if bond: self.add_bond(base, mapping[target], bond)
879,661