repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
HDI-Project/BTB
btb/tuning/gp.py
https://github.com/HDI-Project/BTB/blob/7f489ebc5591bd0886652ef743098c022d7f7460/btb/tuning/gp.py#L97-L115
def fit(self, X, y): """ Train a gaussian process like normal, then compute a "Probability Of Uniform selection" (POU) value. """ # first, train a gaussian process like normal super(GPEiVelocity, self).fit(X, y) # probability of uniform self.POU = 0 if len(y) >= self.r_minimum: # get the best few scores so far, and compute the average distance # between them. top_y = sorted(y)[-self.N_BEST_Y:] velocities = [top_y[i + 1] - top_y[i] for i in range(len(top_y) - 1)] # the probability of returning random parameters scales inversely with # the "velocity" of top scores. self.POU = np.exp(self.MULTIPLIER * np.mean(velocities))
[ "def", "fit", "(", "self", ",", "X", ",", "y", ")", ":", "# first, train a gaussian process like normal", "super", "(", "GPEiVelocity", ",", "self", ")", ".", "fit", "(", "X", ",", "y", ")", "# probability of uniform", "self", ".", "POU", "=", "0", "if", "len", "(", "y", ")", ">=", "self", ".", "r_minimum", ":", "# get the best few scores so far, and compute the average distance", "# between them.", "top_y", "=", "sorted", "(", "y", ")", "[", "-", "self", ".", "N_BEST_Y", ":", "]", "velocities", "=", "[", "top_y", "[", "i", "+", "1", "]", "-", "top_y", "[", "i", "]", "for", "i", "in", "range", "(", "len", "(", "top_y", ")", "-", "1", ")", "]", "# the probability of returning random parameters scales inversely with", "# the \"velocity\" of top scores.", "self", ".", "POU", "=", "np", ".", "exp", "(", "self", ".", "MULTIPLIER", "*", "np", ".", "mean", "(", "velocities", ")", ")" ]
Train a gaussian process like normal, then compute a "Probability Of Uniform selection" (POU) value.
[ "Train", "a", "gaussian", "process", "like", "normal", "then", "compute", "a", "Probability", "Of", "Uniform", "selection", "(", "POU", ")", "value", "." ]
python
train
40.315789
ARMmbed/mbed-cloud-sdk-python
src/mbed_cloud/_backends/device_directory/apis/default_api.py
https://github.com/ARMmbed/mbed-cloud-sdk-python/blob/c0af86fb2cdd4dc7ed26f236139241067d293509/src/mbed_cloud/_backends/device_directory/apis/default_api.py#L535-L559
def device_log_list(self, **kwargs): # noqa: E501 """DEPRECATED: List all device events. # noqa: E501 DEPRECATED: List all device events. Use `/v3/device-events/` instead. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.device_log_list(asynchronous=True) >>> result = thread.get() :param asynchronous bool :param int limit: How many objects to retrieve in the page. :param str order: The order of the records based on creation time, `ASC` or `DESC`; by default `ASC`. :param str after: The ID of The item after which to retrieve the next page. :param str include: Comma-separated list of data fields to return. Currently supported: `total_count`. :param str filter: URL encoded query string parameter to filter returned data. ##### Filtering ```?filter={URL encoded query string}``` The query string is made up of key/value pairs separated by ampersands. So for a query of ```key1=value1&key2=value2&key3=value3``` this would be encoded as follows: ```?filter=key1%3Dvalue1%26key2%3Dvalue2%26key3%3Dvalue3``` ###### Filterable fields: The below table lists all the fields that can be filtered on with certain filters: <table> <thead> <tr> <th>Field</th> <th>= / __eq / __neq</th> <th>__in / __nin</th> <th>__lte / __gte</th> <tr> <thead> <tbody> <tr> <td>date_time</td> <td>βœ“</td> <td>βœ“</td> <td>βœ“</td> </tr> <tr> <td>description</td> <td>βœ“</td> <td>βœ“</td> <td>&nbsp;</td> </tr> <tr> <td>id</td> <td>βœ“</td> <td>βœ“</td> <td>&nbsp;</td> </tr> <tr> <td>device_id</td> <td>βœ“</td> <td>βœ“</td> <td>&nbsp;</td> </tr> <tr> <td>event_type</td> <td>βœ“</td> <td>βœ“</td> <td>&nbsp;</td> </tr> <tr> <td>state_change</td> <td>βœ“</td> <td>βœ“</td> <td>&nbsp;</td> </tr> </tbody> </table> &nbsp; The examples below show the queries in *unencoded* form. ###### By id: ```id={id}``` ###### By state change: ```state_change=[True|False]``` ###### By event type: ```event_type={value}``` ###### On date-time fields: Date-time fields should be specified in UTC RFC3339 format ```YYYY-MM-DDThh:mm:ss.msZ```. There are three permitted variations: * UTC RFC3339 with milliseconds e.g. 2016-11-30T16:25:12.1234Z * UTC RFC3339 without milliseconds e.g. 2016-11-30T16:25:12Z * UTC RFC3339 shortened - without milliseconds and punctuation e.g. 20161130T162512Z Date-time filtering supports three operators: * equality * greater than or equal to &ndash; field name suffixed with ```__gte``` * less than or equal to &ndash; field name suffixed with ```__lte``` Lower and upper limits to a date-time range may be specified by including both the ```__gte``` and ```__lte``` forms in the filter. ```{field name}[|__lte|__gte]={UTC RFC3339 date-time}``` ##### Multi-field example ```id=0158d38771f70000000000010010038c&state_change=True&date_time__gte=2016-11-30T16:25:12.1234Z``` Encoded: ```?filter=id%3D0158d38771f70000000000010010038c%26state_change%3DTrue%26date_time__gte%3D2016-11-30T16%3A25%3A12.1234Z``` ##### Filtering with filter operators String field filtering supports the following operators: * equality: `__eq` * non-equality: `__neq` * in : `__in` * not in: `__nin` For `__in` and `__nin` filters list of parameters must be comma-separated: `event_type__in=update.device.device-created,update.device.device-updated` :return: DeviceEventPage If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('asynchronous'): return self.device_log_list_with_http_info(**kwargs) # noqa: E501 else: (data) = self.device_log_list_with_http_info(**kwargs) # noqa: E501 return data
[ "def", "device_log_list", "(", "self", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'asynchronous'", ")", ":", "return", "self", ".", "device_log_list_with_http_info", "(", "*", "*", "kwargs", ")", "# noqa: E501", "else", ":", "(", "data", ")", "=", "self", ".", "device_log_list_with_http_info", "(", "*", "*", "kwargs", ")", "# noqa: E501", "return", "data" ]
DEPRECATED: List all device events. # noqa: E501 DEPRECATED: List all device events. Use `/v3/device-events/` instead. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.device_log_list(asynchronous=True) >>> result = thread.get() :param asynchronous bool :param int limit: How many objects to retrieve in the page. :param str order: The order of the records based on creation time, `ASC` or `DESC`; by default `ASC`. :param str after: The ID of The item after which to retrieve the next page. :param str include: Comma-separated list of data fields to return. Currently supported: `total_count`. :param str filter: URL encoded query string parameter to filter returned data. ##### Filtering ```?filter={URL encoded query string}``` The query string is made up of key/value pairs separated by ampersands. So for a query of ```key1=value1&key2=value2&key3=value3``` this would be encoded as follows: ```?filter=key1%3Dvalue1%26key2%3Dvalue2%26key3%3Dvalue3``` ###### Filterable fields: The below table lists all the fields that can be filtered on with certain filters: <table> <thead> <tr> <th>Field</th> <th>= / __eq / __neq</th> <th>__in / __nin</th> <th>__lte / __gte</th> <tr> <thead> <tbody> <tr> <td>date_time</td> <td>βœ“</td> <td>βœ“</td> <td>βœ“</td> </tr> <tr> <td>description</td> <td>βœ“</td> <td>βœ“</td> <td>&nbsp;</td> </tr> <tr> <td>id</td> <td>βœ“</td> <td>βœ“</td> <td>&nbsp;</td> </tr> <tr> <td>device_id</td> <td>βœ“</td> <td>βœ“</td> <td>&nbsp;</td> </tr> <tr> <td>event_type</td> <td>βœ“</td> <td>βœ“</td> <td>&nbsp;</td> </tr> <tr> <td>state_change</td> <td>βœ“</td> <td>βœ“</td> <td>&nbsp;</td> </tr> </tbody> </table> &nbsp; The examples below show the queries in *unencoded* form. ###### By id: ```id={id}``` ###### By state change: ```state_change=[True|False]``` ###### By event type: ```event_type={value}``` ###### On date-time fields: Date-time fields should be specified in UTC RFC3339 format ```YYYY-MM-DDThh:mm:ss.msZ```. There are three permitted variations: * UTC RFC3339 with milliseconds e.g. 2016-11-30T16:25:12.1234Z * UTC RFC3339 without milliseconds e.g. 2016-11-30T16:25:12Z * UTC RFC3339 shortened - without milliseconds and punctuation e.g. 20161130T162512Z Date-time filtering supports three operators: * equality * greater than or equal to &ndash; field name suffixed with ```__gte``` * less than or equal to &ndash; field name suffixed with ```__lte``` Lower and upper limits to a date-time range may be specified by including both the ```__gte``` and ```__lte``` forms in the filter. ```{field name}[|__lte|__gte]={UTC RFC3339 date-time}``` ##### Multi-field example ```id=0158d38771f70000000000010010038c&state_change=True&date_time__gte=2016-11-30T16:25:12.1234Z``` Encoded: ```?filter=id%3D0158d38771f70000000000010010038c%26state_change%3DTrue%26date_time__gte%3D2016-11-30T16%3A25%3A12.1234Z``` ##### Filtering with filter operators String field filtering supports the following operators: * equality: `__eq` * non-equality: `__neq` * in : `__in` * not in: `__nin` For `__in` and `__nin` filters list of parameters must be comma-separated: `event_type__in=update.device.device-created,update.device.device-updated` :return: DeviceEventPage If the method is called asynchronously, returns the request thread.
[ "DEPRECATED", ":", "List", "all", "device", "events", ".", "#", "noqa", ":", "E501" ]
python
train
162.64
Miserlou/Zappa
zappa/cli.py
https://github.com/Miserlou/Zappa/blob/3ccf7490a8d8b8fa74a61ee39bf44234f3567739/zappa/cli.py#L2443-L2455
def remove_local_zip(self): """ Remove our local zip file. """ if self.stage_config.get('delete_local_zip', True): try: if os.path.isfile(self.zip_path): os.remove(self.zip_path) if self.handler_path and os.path.isfile(self.handler_path): os.remove(self.handler_path) except Exception as e: # pragma: no cover sys.exit(-1)
[ "def", "remove_local_zip", "(", "self", ")", ":", "if", "self", ".", "stage_config", ".", "get", "(", "'delete_local_zip'", ",", "True", ")", ":", "try", ":", "if", "os", ".", "path", ".", "isfile", "(", "self", ".", "zip_path", ")", ":", "os", ".", "remove", "(", "self", ".", "zip_path", ")", "if", "self", ".", "handler_path", "and", "os", ".", "path", ".", "isfile", "(", "self", ".", "handler_path", ")", ":", "os", ".", "remove", "(", "self", ".", "handler_path", ")", "except", "Exception", "as", "e", ":", "# pragma: no cover", "sys", ".", "exit", "(", "-", "1", ")" ]
Remove our local zip file.
[ "Remove", "our", "local", "zip", "file", "." ]
python
train
35
Clinical-Genomics/trailblazer
trailblazer/store/api.py
https://github.com/Clinical-Genomics/trailblazer/blob/27f3cd21043a1077bd7029e85783459a50a7b798/trailblazer/store/api.py#L26-L33
def find_analysis(self, family, started_at, status): """Find a single analysis.""" query = self.Analysis.query.filter_by( family=family, started_at=started_at, status=status, ) return query.first()
[ "def", "find_analysis", "(", "self", ",", "family", ",", "started_at", ",", "status", ")", ":", "query", "=", "self", ".", "Analysis", ".", "query", ".", "filter_by", "(", "family", "=", "family", ",", "started_at", "=", "started_at", ",", "status", "=", "status", ",", ")", "return", "query", ".", "first", "(", ")" ]
Find a single analysis.
[ "Find", "a", "single", "analysis", "." ]
python
train
32.25
chaoss/grimoirelab-perceval
perceval/backends/core/slack.py
https://github.com/chaoss/grimoirelab-perceval/blob/41c908605e88b7ebc3a536c643fa0f212eaf9e0e/perceval/backends/core/slack.py#L98-L158
def fetch_items(self, category, **kwargs): """Fetch the messages :param category: the category of items to fetch :param kwargs: backend arguments :returns: a generator of items """ from_date = kwargs['from_date'] latest = kwargs['latest'] logger.info("Fetching messages of '%s' channel from %s", self.channel, str(from_date)) raw_info = self.client.channel_info(self.channel) channel_info = self.parse_channel_info(raw_info) channel_info['num_members'] = self.client.conversation_members(self.channel) oldest = datetime_to_utc(from_date).timestamp() # Minimum value supported by Slack is 0 not 0.0 if oldest == 0.0: oldest = 0 # Slack does not include on its result the lower limit # of the search if it has the same date of 'oldest'. To get # this messages too, we substract a low value to be sure # the dates are not the same. To avoid precision problems # it is substracted by five decimals and not by six. if oldest > 0.0: oldest -= .00001 fetching = True nmsgs = 0 while fetching: raw_history = self.client.history(self.channel, oldest=oldest, latest=latest) messages, fetching = self.parse_history(raw_history) for message in messages: # Fetch user data user_id = None if 'user' in message: user_id = message['user'] elif 'comment' in message: user_id = message['comment']['user'] if user_id: message['user_data'] = self.__get_or_fetch_user(user_id) message['channel_info'] = channel_info yield message nmsgs += 1 if fetching: latest = float(message['ts']) logger.info("Fetch process completed: %s message fetched", nmsgs)
[ "def", "fetch_items", "(", "self", ",", "category", ",", "*", "*", "kwargs", ")", ":", "from_date", "=", "kwargs", "[", "'from_date'", "]", "latest", "=", "kwargs", "[", "'latest'", "]", "logger", ".", "info", "(", "\"Fetching messages of '%s' channel from %s\"", ",", "self", ".", "channel", ",", "str", "(", "from_date", ")", ")", "raw_info", "=", "self", ".", "client", ".", "channel_info", "(", "self", ".", "channel", ")", "channel_info", "=", "self", ".", "parse_channel_info", "(", "raw_info", ")", "channel_info", "[", "'num_members'", "]", "=", "self", ".", "client", ".", "conversation_members", "(", "self", ".", "channel", ")", "oldest", "=", "datetime_to_utc", "(", "from_date", ")", ".", "timestamp", "(", ")", "# Minimum value supported by Slack is 0 not 0.0", "if", "oldest", "==", "0.0", ":", "oldest", "=", "0", "# Slack does not include on its result the lower limit", "# of the search if it has the same date of 'oldest'. To get", "# this messages too, we substract a low value to be sure", "# the dates are not the same. To avoid precision problems", "# it is substracted by five decimals and not by six.", "if", "oldest", ">", "0.0", ":", "oldest", "-=", ".00001", "fetching", "=", "True", "nmsgs", "=", "0", "while", "fetching", ":", "raw_history", "=", "self", ".", "client", ".", "history", "(", "self", ".", "channel", ",", "oldest", "=", "oldest", ",", "latest", "=", "latest", ")", "messages", ",", "fetching", "=", "self", ".", "parse_history", "(", "raw_history", ")", "for", "message", "in", "messages", ":", "# Fetch user data", "user_id", "=", "None", "if", "'user'", "in", "message", ":", "user_id", "=", "message", "[", "'user'", "]", "elif", "'comment'", "in", "message", ":", "user_id", "=", "message", "[", "'comment'", "]", "[", "'user'", "]", "if", "user_id", ":", "message", "[", "'user_data'", "]", "=", "self", ".", "__get_or_fetch_user", "(", "user_id", ")", "message", "[", "'channel_info'", "]", "=", "channel_info", "yield", "message", "nmsgs", "+=", "1", "if", "fetching", ":", "latest", "=", "float", "(", "message", "[", "'ts'", "]", ")", "logger", ".", "info", "(", "\"Fetch process completed: %s message fetched\"", ",", "nmsgs", ")" ]
Fetch the messages :param category: the category of items to fetch :param kwargs: backend arguments :returns: a generator of items
[ "Fetch", "the", "messages" ]
python
test
33.213115
BerkeleyAutomation/perception
perception/kinect2_sensor.py
https://github.com/BerkeleyAutomation/perception/blob/03d9b37dd6b66896cdfe173905c9413c8c3c5df6/perception/kinect2_sensor.py#L168-L213
def start(self): """Starts the Kinect v2 sensor stream. Raises ------ IOError If the Kinect v2 is not detected. """ # open packet pipeline if self._packet_pipeline_mode == Kinect2PacketPipelineMode.OPENGL: self._pipeline = lf2.OpenGLPacketPipeline() elif self._packet_pipeline_mode == Kinect2PacketPipelineMode.CPU: self._pipeline = lf2.CpuPacketPipeline() # setup logger self._logger = lf2.createConsoleLogger(lf2.LoggerLevel.Warning) lf2.setGlobalLogger(self._logger) # check devices self._fn_handle = lf2.Freenect2() self._num_devices = self._fn_handle.enumerateDevices() if self._num_devices == 0: raise IOError('Failed to start stream. No Kinect2 devices available!') if self._num_devices <= self._device_num: raise IOError('Failed to start stream. Device num %d unavailable!' %(self._device_num)) # open device self._serial = self._fn_handle.getDeviceSerialNumber(self._device_num) self._device = self._fn_handle.openDevice(self._serial, pipeline=self._pipeline) # add device sync modes self._listener = lf2.SyncMultiFrameListener( lf2.FrameType.Color | lf2.FrameType.Ir | lf2.FrameType.Depth) self._device.setColorFrameListener(self._listener) self._device.setIrAndDepthFrameListener(self._listener) # start device self._device.start() # open registration self._registration = None if self._registration_mode == Kinect2RegistrationMode.COLOR_TO_DEPTH: logging.debug('Using color to depth registration') self._registration = lf2.Registration(self._device.getIrCameraParams(), self._device.getColorCameraParams()) self._running = True
[ "def", "start", "(", "self", ")", ":", "# open packet pipeline", "if", "self", ".", "_packet_pipeline_mode", "==", "Kinect2PacketPipelineMode", ".", "OPENGL", ":", "self", ".", "_pipeline", "=", "lf2", ".", "OpenGLPacketPipeline", "(", ")", "elif", "self", ".", "_packet_pipeline_mode", "==", "Kinect2PacketPipelineMode", ".", "CPU", ":", "self", ".", "_pipeline", "=", "lf2", ".", "CpuPacketPipeline", "(", ")", "# setup logger", "self", ".", "_logger", "=", "lf2", ".", "createConsoleLogger", "(", "lf2", ".", "LoggerLevel", ".", "Warning", ")", "lf2", ".", "setGlobalLogger", "(", "self", ".", "_logger", ")", "# check devices", "self", ".", "_fn_handle", "=", "lf2", ".", "Freenect2", "(", ")", "self", ".", "_num_devices", "=", "self", ".", "_fn_handle", ".", "enumerateDevices", "(", ")", "if", "self", ".", "_num_devices", "==", "0", ":", "raise", "IOError", "(", "'Failed to start stream. No Kinect2 devices available!'", ")", "if", "self", ".", "_num_devices", "<=", "self", ".", "_device_num", ":", "raise", "IOError", "(", "'Failed to start stream. Device num %d unavailable!'", "%", "(", "self", ".", "_device_num", ")", ")", "# open device", "self", ".", "_serial", "=", "self", ".", "_fn_handle", ".", "getDeviceSerialNumber", "(", "self", ".", "_device_num", ")", "self", ".", "_device", "=", "self", ".", "_fn_handle", ".", "openDevice", "(", "self", ".", "_serial", ",", "pipeline", "=", "self", ".", "_pipeline", ")", "# add device sync modes", "self", ".", "_listener", "=", "lf2", ".", "SyncMultiFrameListener", "(", "lf2", ".", "FrameType", ".", "Color", "|", "lf2", ".", "FrameType", ".", "Ir", "|", "lf2", ".", "FrameType", ".", "Depth", ")", "self", ".", "_device", ".", "setColorFrameListener", "(", "self", ".", "_listener", ")", "self", ".", "_device", ".", "setIrAndDepthFrameListener", "(", "self", ".", "_listener", ")", "# start device", "self", ".", "_device", ".", "start", "(", ")", "# open registration", "self", ".", "_registration", "=", "None", "if", "self", ".", "_registration_mode", "==", "Kinect2RegistrationMode", ".", "COLOR_TO_DEPTH", ":", "logging", ".", "debug", "(", "'Using color to depth registration'", ")", "self", ".", "_registration", "=", "lf2", ".", "Registration", "(", "self", ".", "_device", ".", "getIrCameraParams", "(", ")", ",", "self", ".", "_device", ".", "getColorCameraParams", "(", ")", ")", "self", ".", "_running", "=", "True" ]
Starts the Kinect v2 sensor stream. Raises ------ IOError If the Kinect v2 is not detected.
[ "Starts", "the", "Kinect", "v2", "sensor", "stream", "." ]
python
train
40.847826
vals/umis
umis/umis.py
https://github.com/vals/umis/blob/e8adb8486d9e9134ab8a6cad9811a7e74dcc4a2c/umis/umis.py#L334-L348
def _extract_readnum(read_dict): """Extract read numbers from old-style fastqs. Handles read 1 and 2 specifications where naming is readname/1 readname/2 """ pat = re.compile(r"(?P<readnum>/\d+)$") parts = pat.split(read_dict["name"]) if len(parts) == 3: name, readnum, endofline = parts read_dict["name"] = name read_dict["readnum"] = readnum else: read_dict["readnum"] = "" return read_dict
[ "def", "_extract_readnum", "(", "read_dict", ")", ":", "pat", "=", "re", ".", "compile", "(", "r\"(?P<readnum>/\\d+)$\"", ")", "parts", "=", "pat", ".", "split", "(", "read_dict", "[", "\"name\"", "]", ")", "if", "len", "(", "parts", ")", "==", "3", ":", "name", ",", "readnum", ",", "endofline", "=", "parts", "read_dict", "[", "\"name\"", "]", "=", "name", "read_dict", "[", "\"readnum\"", "]", "=", "readnum", "else", ":", "read_dict", "[", "\"readnum\"", "]", "=", "\"\"", "return", "read_dict" ]
Extract read numbers from old-style fastqs. Handles read 1 and 2 specifications where naming is readname/1 readname/2
[ "Extract", "read", "numbers", "from", "old", "-", "style", "fastqs", "." ]
python
train
29.8
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_aaa.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_aaa.py#L161-L169
def service_password_encryption(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") service = ET.SubElement(config, "service", xmlns="urn:brocade.com:mgmt:brocade-aaa") password_encryption = ET.SubElement(service, "password-encryption") callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "service_password_encryption", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "service", "=", "ET", ".", "SubElement", "(", "config", ",", "\"service\"", ",", "xmlns", "=", "\"urn:brocade.com:mgmt:brocade-aaa\"", ")", "password_encryption", "=", "ET", ".", "SubElement", "(", "service", ",", "\"password-encryption\"", ")", "callback", "=", "kwargs", ".", "pop", "(", "'callback'", ",", "self", ".", "_callback", ")", "return", "callback", "(", "config", ")" ]
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
42.333333
pyapi-gitlab/pyapi-gitlab
gitlab/base.py
https://github.com/pyapi-gitlab/pyapi-gitlab/blob/f74b6fb5c13cecae9524997847e928905cc60acf/gitlab/base.py#L55-L74
def get(self, uri, default_response=None, **kwargs): """ Call GET on the Gitlab server >>> gitlab = Gitlab(host='http://localhost:10080', verify_ssl=False) >>> gitlab.login(user='root', password='5iveL!fe') >>> gitlab.get('/users/5') :param uri: String with the URI for the endpoint to GET from :param default_response: Return value if JSONDecodeError :param kwargs: Key word arguments to use as GET arguments :return: Dictionary containing response data :raise: HttpError: If invalid response returned """ url = self.api_url + uri response = requests.get(url, params=kwargs, headers=self.headers, verify=self.verify_ssl, auth=self.auth, timeout=self.timeout) return self.success_or_raise(response, default_response=default_response)
[ "def", "get", "(", "self", ",", "uri", ",", "default_response", "=", "None", ",", "*", "*", "kwargs", ")", ":", "url", "=", "self", ".", "api_url", "+", "uri", "response", "=", "requests", ".", "get", "(", "url", ",", "params", "=", "kwargs", ",", "headers", "=", "self", ".", "headers", ",", "verify", "=", "self", ".", "verify_ssl", ",", "auth", "=", "self", ".", "auth", ",", "timeout", "=", "self", ".", "timeout", ")", "return", "self", ".", "success_or_raise", "(", "response", ",", "default_response", "=", "default_response", ")" ]
Call GET on the Gitlab server >>> gitlab = Gitlab(host='http://localhost:10080', verify_ssl=False) >>> gitlab.login(user='root', password='5iveL!fe') >>> gitlab.get('/users/5') :param uri: String with the URI for the endpoint to GET from :param default_response: Return value if JSONDecodeError :param kwargs: Key word arguments to use as GET arguments :return: Dictionary containing response data :raise: HttpError: If invalid response returned
[ "Call", "GET", "on", "the", "Gitlab", "server" ]
python
train
44.65
tango-controls/pytango
tango/tango_object.py
https://github.com/tango-controls/pytango/blob/9cf78c517c9cdc1081ff6d080a9646a740cc1d36/tango/tango_object.py#L476-L501
def get_devices(self): """ Helper that retuns a dict of devices for this server. :return: Returns a tuple of two elements: - dict<tango class name : list of device names> - dict<device names : tango class name> :rtype: tuple<dict, dict> """ if self.__util is None: import tango db = tango.Database() else: db = self.__util.get_database() server = self.server_instance dev_list = db.get_device_class_list(server) class_map, dev_map = {}, {} for class_name, dev_name in zip(dev_list[1::2], dev_list[::2]): dev_names = class_map.get(class_name) if dev_names is None: class_map[class_name] = dev_names = [] dev_name = dev_name.lower() dev_names.append(dev_name) dev_map[dev_name] = class_name return class_map, dev_map
[ "def", "get_devices", "(", "self", ")", ":", "if", "self", ".", "__util", "is", "None", ":", "import", "tango", "db", "=", "tango", ".", "Database", "(", ")", "else", ":", "db", "=", "self", ".", "__util", ".", "get_database", "(", ")", "server", "=", "self", ".", "server_instance", "dev_list", "=", "db", ".", "get_device_class_list", "(", "server", ")", "class_map", ",", "dev_map", "=", "{", "}", ",", "{", "}", "for", "class_name", ",", "dev_name", "in", "zip", "(", "dev_list", "[", "1", ":", ":", "2", "]", ",", "dev_list", "[", ":", ":", "2", "]", ")", ":", "dev_names", "=", "class_map", ".", "get", "(", "class_name", ")", "if", "dev_names", "is", "None", ":", "class_map", "[", "class_name", "]", "=", "dev_names", "=", "[", "]", "dev_name", "=", "dev_name", ".", "lower", "(", ")", "dev_names", ".", "append", "(", "dev_name", ")", "dev_map", "[", "dev_name", "]", "=", "class_name", "return", "class_map", ",", "dev_map" ]
Helper that retuns a dict of devices for this server. :return: Returns a tuple of two elements: - dict<tango class name : list of device names> - dict<device names : tango class name> :rtype: tuple<dict, dict>
[ "Helper", "that", "retuns", "a", "dict", "of", "devices", "for", "this", "server", "." ]
python
train
36.076923
uber/doubles
doubles/target.py
https://github.com/uber/doubles/blob/15e68dcf98f709b19a581915fa6af5ef49ebdd8a/doubles/target.py#L64-L74
def _determine_doubled_obj_type(self): """Returns the type (class) of the target object. :return: The type (class) of the target. :rtype: type, classobj """ if isclass(self.doubled_obj) or ismodule(self.doubled_obj): return self.doubled_obj return self.doubled_obj.__class__
[ "def", "_determine_doubled_obj_type", "(", "self", ")", ":", "if", "isclass", "(", "self", ".", "doubled_obj", ")", "or", "ismodule", "(", "self", ".", "doubled_obj", ")", ":", "return", "self", ".", "doubled_obj", "return", "self", ".", "doubled_obj", ".", "__class__" ]
Returns the type (class) of the target object. :return: The type (class) of the target. :rtype: type, classobj
[ "Returns", "the", "type", "(", "class", ")", "of", "the", "target", "object", "." ]
python
train
29.727273
Opentrons/opentrons
api/src/opentrons/drivers/smoothie_drivers/driver_3_0.py
https://github.com/Opentrons/opentrons/blob/a7c15cc2636ecb64ab56c7edc1d8a57163aaeadf/api/src/opentrons/drivers/smoothie_drivers/driver_3_0.py#L630-L654
def set_active_current(self, settings): ''' Sets the amperage of each motor for when it is activated by driver. Values are initialized from the `robot_config.high_current` values, and can then be changed through this method by other parts of the API. For example, `Pipette` setting the active-current of it's pipette, depending on what model pipette it is, and what action it is performing settings Dict with axes as valies (e.g.: 'X', 'Y', 'Z', 'A', 'B', or 'C') and floating point number for current (generally between 0.1 and 2) ''' self._active_current_settings['now'].update(settings) # if an axis specified in the `settings` is currently active, # reset it's current to the new active-current value active_axes_to_update = { axis: amperage for axis, amperage in self._active_current_settings['now'].items() if self._active_axes.get(axis) is True if self.current[axis] != amperage } if active_axes_to_update: self._save_current(active_axes_to_update, axes_active=True)
[ "def", "set_active_current", "(", "self", ",", "settings", ")", ":", "self", ".", "_active_current_settings", "[", "'now'", "]", ".", "update", "(", "settings", ")", "# if an axis specified in the `settings` is currently active,", "# reset it's current to the new active-current value", "active_axes_to_update", "=", "{", "axis", ":", "amperage", "for", "axis", ",", "amperage", "in", "self", ".", "_active_current_settings", "[", "'now'", "]", ".", "items", "(", ")", "if", "self", ".", "_active_axes", ".", "get", "(", "axis", ")", "is", "True", "if", "self", ".", "current", "[", "axis", "]", "!=", "amperage", "}", "if", "active_axes_to_update", ":", "self", ".", "_save_current", "(", "active_axes_to_update", ",", "axes_active", "=", "True", ")" ]
Sets the amperage of each motor for when it is activated by driver. Values are initialized from the `robot_config.high_current` values, and can then be changed through this method by other parts of the API. For example, `Pipette` setting the active-current of it's pipette, depending on what model pipette it is, and what action it is performing settings Dict with axes as valies (e.g.: 'X', 'Y', 'Z', 'A', 'B', or 'C') and floating point number for current (generally between 0.1 and 2)
[ "Sets", "the", "amperage", "of", "each", "motor", "for", "when", "it", "is", "activated", "by", "driver", ".", "Values", "are", "initialized", "from", "the", "robot_config", ".", "high_current", "values", "and", "can", "then", "be", "changed", "through", "this", "method", "by", "other", "parts", "of", "the", "API", "." ]
python
train
45.92
CloudGenix/sdk-python
cloudgenix/__init__.py
https://github.com/CloudGenix/sdk-python/blob/1b2f92582b6a19769134914793bfd00e4caa074b/cloudgenix/__init__.py#L755-L890
def rest_call(self, url, method, data=None, sensitive=False, timeout=None, content_json=True, retry=None, max_retry=None, retry_sleep=None): """ Generic REST call worker function **Parameters:** - **url:** URL for the REST call - **method:** METHOD for the REST call - **data:** Optional DATA for the call (for POST/PUT/etc.) - **sensitive:** Flag if content request/response should be hidden from logging functions - **timeout:** Requests Timeout - **content_json:** Bool on whether the Content-Type header should be set to application/json - **retry:** DEPRECATED - please use `cloudgenix.API.modify_rest_retry` instead. - **max_retry:** DEPRECATED - please use `cloudgenix.API.modify_rest_retry` instead. - **retry_sleep:** DEPRECATED - please use `cloudgenix.API.modify_rest_retry` instead. **Returns:** Requests.Response object, extended with: - **cgx_status**: Bool, True if a successful CloudGenix response, False if error. - **cgx_content**: Content of the response, guaranteed to be in Dict format. Empty/invalid responses will be converted to a Dict response. """ # pull retry related items from Constructor if not specified. if timeout is None: timeout = self.rest_call_timeout if retry is not None: # Someone using deprecated retry code. Notify. sys.stderr.write("WARNING: 'retry' option of rest_call() has been deprecated. " "Please use 'API.modify_rest_retry()' instead.") if max_retry is not None: # Someone using deprecated retry code. Notify. sys.stderr.write("WARNING: 'max_retry' option of rest_call() has been deprecated. " "Please use 'API.modify_rest_retry()' instead.") if retry_sleep is not None: # Someone using deprecated retry code. Notify. sys.stderr.write("WARNING: 'max_retry' option of rest_call() has been deprecated. " "Please use 'API.modify_rest_retry()' instead.") # Get logging level, use this to bypass logging functions with possible large content if not set. logger_level = api_logger.getEffectiveLevel() # populate headers and cookies from session. if content_json and method.lower() not in ['get', 'delete']: headers = { 'Content-Type': 'application/json' } else: headers = {} # add session headers headers.update(self._session.headers) cookie = self._session.cookies.get_dict() # make sure data is populated if present. if isinstance(data, (list, dict)): data = json.dumps(data) api_logger.debug('REST_CALL URL = %s', url) # make request try: if not sensitive: api_logger.debug('\n\tREQUEST: %s %s\n\tHEADERS: %s\n\tCOOKIES: %s\n\tDATA: %s\n', method.upper(), url, headers, cookie, data) # Actual request response = self._session.request(method, url, data=data, verify=self.ca_verify_filename, stream=True, timeout=timeout, headers=headers, allow_redirects=False) # Request complete - lets parse. # if it's a non-CGX-good response, return with cgx_status = False if response.status_code not in [requests.codes.ok, requests.codes.no_content, requests.codes.found, requests.codes.moved]: # Simple JSON debug if not sensitive: try: api_logger.debug('RESPONSE HEADERS: %s\n', json.dumps( json.loads(text_type(response.headers)), indent=4)) except ValueError: api_logger.debug('RESPONSE HEADERS: %s\n', text_type(response.headers)) try: api_logger.debug('RESPONSE: %s\n', json.dumps(response.json(), indent=4)) except ValueError: api_logger.debug('RESPONSE: %s\n', text_type(response.text)) else: api_logger.debug('RESPONSE NOT LOGGED (sensitive content)') api_logger.debug("Error, non-200 response received: %s", response.status_code) # CGX extend requests.Response for return response.cgx_status = False response.cgx_content = self._catch_nonjson_streamresponse(response.text) return response else: # Simple JSON debug if not sensitive and (logger_level <= logging.DEBUG and logger_level != logging.NOTSET): try: api_logger.debug('RESPONSE HEADERS: %s\n', json.dumps( json.loads(text_type(response.headers)), indent=4)) api_logger.debug('RESPONSE: %s\n', json.dumps(response.json(), indent=4)) except ValueError: api_logger.debug('RESPONSE HEADERS: %s\n', text_type(response.headers)) api_logger.debug('RESPONSE: %s\n', text_type(response.text)) elif sensitive: api_logger.debug('RESPONSE NOT LOGGED (sensitive content)') # CGX extend requests.Response for return response.cgx_status = True response.cgx_content = self._catch_nonjson_streamresponse(response.text) return response except (requests.exceptions.Timeout, requests.exceptions.ConnectionError, urllib3.exceptions.MaxRetryError)\ as e: api_logger.info("Error, %s.", text_type(e)) # make a requests.Response object for return since we didn't get one. response = requests.Response # CGX extend requests.Response for return response.cgx_status = False response.cgx_content = { '_error': [ { 'message': 'REST Request Exception: {}'.format(e), 'data': {}, } ] } return response
[ "def", "rest_call", "(", "self", ",", "url", ",", "method", ",", "data", "=", "None", ",", "sensitive", "=", "False", ",", "timeout", "=", "None", ",", "content_json", "=", "True", ",", "retry", "=", "None", ",", "max_retry", "=", "None", ",", "retry_sleep", "=", "None", ")", ":", "# pull retry related items from Constructor if not specified.", "if", "timeout", "is", "None", ":", "timeout", "=", "self", ".", "rest_call_timeout", "if", "retry", "is", "not", "None", ":", "# Someone using deprecated retry code. Notify.", "sys", ".", "stderr", ".", "write", "(", "\"WARNING: 'retry' option of rest_call() has been deprecated. \"", "\"Please use 'API.modify_rest_retry()' instead.\"", ")", "if", "max_retry", "is", "not", "None", ":", "# Someone using deprecated retry code. Notify.", "sys", ".", "stderr", ".", "write", "(", "\"WARNING: 'max_retry' option of rest_call() has been deprecated. \"", "\"Please use 'API.modify_rest_retry()' instead.\"", ")", "if", "retry_sleep", "is", "not", "None", ":", "# Someone using deprecated retry code. Notify.", "sys", ".", "stderr", ".", "write", "(", "\"WARNING: 'max_retry' option of rest_call() has been deprecated. \"", "\"Please use 'API.modify_rest_retry()' instead.\"", ")", "# Get logging level, use this to bypass logging functions with possible large content if not set.", "logger_level", "=", "api_logger", ".", "getEffectiveLevel", "(", ")", "# populate headers and cookies from session.", "if", "content_json", "and", "method", ".", "lower", "(", ")", "not", "in", "[", "'get'", ",", "'delete'", "]", ":", "headers", "=", "{", "'Content-Type'", ":", "'application/json'", "}", "else", ":", "headers", "=", "{", "}", "# add session headers", "headers", ".", "update", "(", "self", ".", "_session", ".", "headers", ")", "cookie", "=", "self", ".", "_session", ".", "cookies", ".", "get_dict", "(", ")", "# make sure data is populated if present.", "if", "isinstance", "(", "data", ",", "(", "list", ",", "dict", ")", ")", ":", "data", "=", "json", ".", "dumps", "(", "data", ")", "api_logger", ".", "debug", "(", "'REST_CALL URL = %s'", ",", "url", ")", "# make request", "try", ":", "if", "not", "sensitive", ":", "api_logger", ".", "debug", "(", "'\\n\\tREQUEST: %s %s\\n\\tHEADERS: %s\\n\\tCOOKIES: %s\\n\\tDATA: %s\\n'", ",", "method", ".", "upper", "(", ")", ",", "url", ",", "headers", ",", "cookie", ",", "data", ")", "# Actual request", "response", "=", "self", ".", "_session", ".", "request", "(", "method", ",", "url", ",", "data", "=", "data", ",", "verify", "=", "self", ".", "ca_verify_filename", ",", "stream", "=", "True", ",", "timeout", "=", "timeout", ",", "headers", "=", "headers", ",", "allow_redirects", "=", "False", ")", "# Request complete - lets parse.", "# if it's a non-CGX-good response, return with cgx_status = False", "if", "response", ".", "status_code", "not", "in", "[", "requests", ".", "codes", ".", "ok", ",", "requests", ".", "codes", ".", "no_content", ",", "requests", ".", "codes", ".", "found", ",", "requests", ".", "codes", ".", "moved", "]", ":", "# Simple JSON debug", "if", "not", "sensitive", ":", "try", ":", "api_logger", ".", "debug", "(", "'RESPONSE HEADERS: %s\\n'", ",", "json", ".", "dumps", "(", "json", ".", "loads", "(", "text_type", "(", "response", ".", "headers", ")", ")", ",", "indent", "=", "4", ")", ")", "except", "ValueError", ":", "api_logger", ".", "debug", "(", "'RESPONSE HEADERS: %s\\n'", ",", "text_type", "(", "response", ".", "headers", ")", ")", "try", ":", "api_logger", ".", "debug", "(", "'RESPONSE: %s\\n'", ",", "json", ".", "dumps", "(", "response", ".", "json", "(", ")", ",", "indent", "=", "4", ")", ")", "except", "ValueError", ":", "api_logger", ".", "debug", "(", "'RESPONSE: %s\\n'", ",", "text_type", "(", "response", ".", "text", ")", ")", "else", ":", "api_logger", ".", "debug", "(", "'RESPONSE NOT LOGGED (sensitive content)'", ")", "api_logger", ".", "debug", "(", "\"Error, non-200 response received: %s\"", ",", "response", ".", "status_code", ")", "# CGX extend requests.Response for return", "response", ".", "cgx_status", "=", "False", "response", ".", "cgx_content", "=", "self", ".", "_catch_nonjson_streamresponse", "(", "response", ".", "text", ")", "return", "response", "else", ":", "# Simple JSON debug", "if", "not", "sensitive", "and", "(", "logger_level", "<=", "logging", ".", "DEBUG", "and", "logger_level", "!=", "logging", ".", "NOTSET", ")", ":", "try", ":", "api_logger", ".", "debug", "(", "'RESPONSE HEADERS: %s\\n'", ",", "json", ".", "dumps", "(", "json", ".", "loads", "(", "text_type", "(", "response", ".", "headers", ")", ")", ",", "indent", "=", "4", ")", ")", "api_logger", ".", "debug", "(", "'RESPONSE: %s\\n'", ",", "json", ".", "dumps", "(", "response", ".", "json", "(", ")", ",", "indent", "=", "4", ")", ")", "except", "ValueError", ":", "api_logger", ".", "debug", "(", "'RESPONSE HEADERS: %s\\n'", ",", "text_type", "(", "response", ".", "headers", ")", ")", "api_logger", ".", "debug", "(", "'RESPONSE: %s\\n'", ",", "text_type", "(", "response", ".", "text", ")", ")", "elif", "sensitive", ":", "api_logger", ".", "debug", "(", "'RESPONSE NOT LOGGED (sensitive content)'", ")", "# CGX extend requests.Response for return", "response", ".", "cgx_status", "=", "True", "response", ".", "cgx_content", "=", "self", ".", "_catch_nonjson_streamresponse", "(", "response", ".", "text", ")", "return", "response", "except", "(", "requests", ".", "exceptions", ".", "Timeout", ",", "requests", ".", "exceptions", ".", "ConnectionError", ",", "urllib3", ".", "exceptions", ".", "MaxRetryError", ")", "as", "e", ":", "api_logger", ".", "info", "(", "\"Error, %s.\"", ",", "text_type", "(", "e", ")", ")", "# make a requests.Response object for return since we didn't get one.", "response", "=", "requests", ".", "Response", "# CGX extend requests.Response for return", "response", ".", "cgx_status", "=", "False", "response", ".", "cgx_content", "=", "{", "'_error'", ":", "[", "{", "'message'", ":", "'REST Request Exception: {}'", ".", "format", "(", "e", ")", ",", "'data'", ":", "{", "}", ",", "}", "]", "}", "return", "response" ]
Generic REST call worker function **Parameters:** - **url:** URL for the REST call - **method:** METHOD for the REST call - **data:** Optional DATA for the call (for POST/PUT/etc.) - **sensitive:** Flag if content request/response should be hidden from logging functions - **timeout:** Requests Timeout - **content_json:** Bool on whether the Content-Type header should be set to application/json - **retry:** DEPRECATED - please use `cloudgenix.API.modify_rest_retry` instead. - **max_retry:** DEPRECATED - please use `cloudgenix.API.modify_rest_retry` instead. - **retry_sleep:** DEPRECATED - please use `cloudgenix.API.modify_rest_retry` instead. **Returns:** Requests.Response object, extended with: - **cgx_status**: Bool, True if a successful CloudGenix response, False if error. - **cgx_content**: Content of the response, guaranteed to be in Dict format. Empty/invalid responses will be converted to a Dict response.
[ "Generic", "REST", "call", "worker", "function" ]
python
train
47.154412
Apstra/aeon-venos
pylib/aeon/nxos/autoload/install_os.py
https://github.com/Apstra/aeon-venos/blob/4d4f73d5904831ddc78c30922a8a226c90cf7d90/pylib/aeon/nxos/autoload/install_os.py#L56-L76
def copy_from(self, location, timeout=10 * 60): """ This method will fetch the image; the fetch will happen from the device-side using the 'copy' command. Note that the NXAPI appears to be single-threaded, so the code needs to wait until this operation has completed before attempting another API call. Therefore the :timeout: value is set very high (10min) :param location: URL to the location of the file. This URL must be a valid source field to the NXOS 'copy' command :keyword timeout: Timeout in seconds :return: """ cmd = 'copy {location} {dir}: vrf {vrf_name}'.format( location=location, dir=self.DESTDIR, vrf_name=self.VRF_NAME) run = self.device.api.exec_opcmd run(cmd, msg_type='cli_show_ascii', timeout=timeout)
[ "def", "copy_from", "(", "self", ",", "location", ",", "timeout", "=", "10", "*", "60", ")", ":", "cmd", "=", "'copy {location} {dir}: vrf {vrf_name}'", ".", "format", "(", "location", "=", "location", ",", "dir", "=", "self", ".", "DESTDIR", ",", "vrf_name", "=", "self", ".", "VRF_NAME", ")", "run", "=", "self", ".", "device", ".", "api", ".", "exec_opcmd", "run", "(", "cmd", ",", "msg_type", "=", "'cli_show_ascii'", ",", "timeout", "=", "timeout", ")" ]
This method will fetch the image; the fetch will happen from the device-side using the 'copy' command. Note that the NXAPI appears to be single-threaded, so the code needs to wait until this operation has completed before attempting another API call. Therefore the :timeout: value is set very high (10min) :param location: URL to the location of the file. This URL must be a valid source field to the NXOS 'copy' command :keyword timeout: Timeout in seconds :return:
[ "This", "method", "will", "fetch", "the", "image", ";", "the", "fetch", "will", "happen", "from", "the", "device", "-", "side", "using", "the", "copy", "command", ".", "Note", "that", "the", "NXAPI", "appears", "to", "be", "single", "-", "threaded", "so", "the", "code", "needs", "to", "wait", "until", "this", "operation", "has", "completed", "before", "attempting", "another", "API", "call", ".", "Therefore", "the", ":", "timeout", ":", "value", "is", "set", "very", "high", "(", "10min", ")" ]
python
train
39.761905
koszullab/metaTOR
metator/metator.py
https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/metator.py#L97-L111
def main(): """This module just acts as an entry point to the bulk of the pipeline. All argument parsing is delegated to metator.sh """ metator_args = sys.argv[1:] entry_point = pkg_resources.resource_filename("metator", "bin/metator.sh") try: metator_process = subprocess.Popen((entry_point, *metator_args)) except PermissionError: # some issues occured for non-bash users metator_process = subprocess.Popen( (entry_point, *metator_args), shell=True ) metator_process.wait()
[ "def", "main", "(", ")", ":", "metator_args", "=", "sys", ".", "argv", "[", "1", ":", "]", "entry_point", "=", "pkg_resources", ".", "resource_filename", "(", "\"metator\"", ",", "\"bin/metator.sh\"", ")", "try", ":", "metator_process", "=", "subprocess", ".", "Popen", "(", "(", "entry_point", ",", "*", "metator_args", ")", ")", "except", "PermissionError", ":", "# some issues occured for non-bash users", "metator_process", "=", "subprocess", ".", "Popen", "(", "(", "entry_point", ",", "*", "metator_args", ")", ",", "shell", "=", "True", ")", "metator_process", ".", "wait", "(", ")" ]
This module just acts as an entry point to the bulk of the pipeline. All argument parsing is delegated to metator.sh
[ "This", "module", "just", "acts", "as", "an", "entry", "point", "to", "the", "bulk", "of", "the", "pipeline", ".", "All", "argument", "parsing", "is", "delegated", "to", "metator", ".", "sh" ]
python
train
35.466667
noahbenson/neuropythy
neuropythy/geometry/mesh.py
https://github.com/noahbenson/neuropythy/blob/b588889f6db36ddb9602ae4a72c1c0d3f41586b2/neuropythy/geometry/mesh.py#L3912-L3932
def close_path_traces(*args): ''' close_path_traces(pt1, pt2...) yields the path-trace formed by joining the list of path traces at their intersection points in the order given. Note that the direction in which each individual path trace's coordinates are specified is ultimately ignored by this function--the only ordering that matters is the order in which the list of paths is given. Each path argument may alternately be a curve-spline object or coordinate matrix, so long as all paths and curves track the same 2D space. ''' pts = [x for x in args if is_path_trace(x)] if len(pts) == 0: if len(args) == 1 and hasattr(args[0], '__iter__'): return close_path_traces(*args[0]) raise ValueError('at least one argument to close_path_traces must be a path trace') mp0 = pts[0].map_projection if not all(mp is None or mp.normalize() == mp0.normalize() for x in pts[1:] for mp in [x.map_projection]): warnings.warn('path traces do not share a map projection') crvs = [x.curve if is_path_trace(x) else to_curve_spline(x) for x in args] loop = close_curves(*crvs) return path_trace(mp0, loop.coordinates, closed=True)
[ "def", "close_path_traces", "(", "*", "args", ")", ":", "pts", "=", "[", "x", "for", "x", "in", "args", "if", "is_path_trace", "(", "x", ")", "]", "if", "len", "(", "pts", ")", "==", "0", ":", "if", "len", "(", "args", ")", "==", "1", "and", "hasattr", "(", "args", "[", "0", "]", ",", "'__iter__'", ")", ":", "return", "close_path_traces", "(", "*", "args", "[", "0", "]", ")", "raise", "ValueError", "(", "'at least one argument to close_path_traces must be a path trace'", ")", "mp0", "=", "pts", "[", "0", "]", ".", "map_projection", "if", "not", "all", "(", "mp", "is", "None", "or", "mp", ".", "normalize", "(", ")", "==", "mp0", ".", "normalize", "(", ")", "for", "x", "in", "pts", "[", "1", ":", "]", "for", "mp", "in", "[", "x", ".", "map_projection", "]", ")", ":", "warnings", ".", "warn", "(", "'path traces do not share a map projection'", ")", "crvs", "=", "[", "x", ".", "curve", "if", "is_path_trace", "(", "x", ")", "else", "to_curve_spline", "(", "x", ")", "for", "x", "in", "args", "]", "loop", "=", "close_curves", "(", "*", "crvs", ")", "return", "path_trace", "(", "mp0", ",", "loop", ".", "coordinates", ",", "closed", "=", "True", ")" ]
close_path_traces(pt1, pt2...) yields the path-trace formed by joining the list of path traces at their intersection points in the order given. Note that the direction in which each individual path trace's coordinates are specified is ultimately ignored by this function--the only ordering that matters is the order in which the list of paths is given. Each path argument may alternately be a curve-spline object or coordinate matrix, so long as all paths and curves track the same 2D space.
[ "close_path_traces", "(", "pt1", "pt2", "...", ")", "yields", "the", "path", "-", "trace", "formed", "by", "joining", "the", "list", "of", "path", "traces", "at", "their", "intersection", "points", "in", "the", "order", "given", ".", "Note", "that", "the", "direction", "in", "which", "each", "individual", "path", "trace", "s", "coordinates", "are", "specified", "is", "ultimately", "ignored", "by", "this", "function", "--", "the", "only", "ordering", "that", "matters", "is", "the", "order", "in", "which", "the", "list", "of", "paths", "is", "given", "." ]
python
train
57.047619
materialsproject/pymatgen
pymatgen/analysis/eos.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/analysis/eos.py#L311-L318
def _func(self, volume, params): """ Pourier-Tarantola equation from PRB 70, 224107 """ e0, b0, b1, v0 = tuple(params) eta = (volume / v0) ** (1. / 3.) squiggle = -3.*np.log(eta) return e0 + b0 * v0 * squiggle ** 2 / 6. * (3. + squiggle * (b1 - 2))
[ "def", "_func", "(", "self", ",", "volume", ",", "params", ")", ":", "e0", ",", "b0", ",", "b1", ",", "v0", "=", "tuple", "(", "params", ")", "eta", "=", "(", "volume", "/", "v0", ")", "**", "(", "1.", "/", "3.", ")", "squiggle", "=", "-", "3.", "*", "np", ".", "log", "(", "eta", ")", "return", "e0", "+", "b0", "*", "v0", "*", "squiggle", "**", "2", "/", "6.", "*", "(", "3.", "+", "squiggle", "*", "(", "b1", "-", "2", ")", ")" ]
Pourier-Tarantola equation from PRB 70, 224107
[ "Pourier", "-", "Tarantola", "equation", "from", "PRB", "70", "224107" ]
python
train
37.125
sosy-lab/benchexec
benchexec/cgroups.py
https://github.com/sosy-lab/benchexec/blob/44428f67f41384c03aea13e7e25f884764653617/benchexec/cgroups.py#L312-L318
def get_all_tasks(self, subsystem): """ Return a generator of all PIDs currently in this cgroup for the given subsystem. """ with open(os.path.join(self.per_subsystem[subsystem], 'tasks'), 'r') as tasksFile: for line in tasksFile: yield int(line)
[ "def", "get_all_tasks", "(", "self", ",", "subsystem", ")", ":", "with", "open", "(", "os", ".", "path", ".", "join", "(", "self", ".", "per_subsystem", "[", "subsystem", "]", ",", "'tasks'", ")", ",", "'r'", ")", "as", "tasksFile", ":", "for", "line", "in", "tasksFile", ":", "yield", "int", "(", "line", ")" ]
Return a generator of all PIDs currently in this cgroup for the given subsystem.
[ "Return", "a", "generator", "of", "all", "PIDs", "currently", "in", "this", "cgroup", "for", "the", "given", "subsystem", "." ]
python
train
42.857143
twisted/txaws
txaws/route53/client.py
https://github.com/twisted/txaws/blob/5c3317376cd47e536625027e38c3b37840175ce0/txaws/route53/client.py#L60-L73
def get_route53_client(agent, region, cooperator=None): """ Get a non-registration Route53 client. """ if cooperator is None: cooperator = task return region.get_client( _Route53Client, agent=agent, creds=region.creds, region=REGION_US_EAST_1, endpoint=AWSServiceEndpoint(_OTHER_ENDPOINT), cooperator=cooperator, )
[ "def", "get_route53_client", "(", "agent", ",", "region", ",", "cooperator", "=", "None", ")", ":", "if", "cooperator", "is", "None", ":", "cooperator", "=", "task", "return", "region", ".", "get_client", "(", "_Route53Client", ",", "agent", "=", "agent", ",", "creds", "=", "region", ".", "creds", ",", "region", "=", "REGION_US_EAST_1", ",", "endpoint", "=", "AWSServiceEndpoint", "(", "_OTHER_ENDPOINT", ")", ",", "cooperator", "=", "cooperator", ",", ")" ]
Get a non-registration Route53 client.
[ "Get", "a", "non", "-", "registration", "Route53", "client", "." ]
python
train
27.214286
Diviyan-Kalainathan/CausalDiscoveryToolbox
cdt/causality/graph/CGNN.py
https://github.com/Diviyan-Kalainathan/CausalDiscoveryToolbox/blob/be228b078ba9eb76c01b3ccba9a1c0ad9e9e5ed1/cdt/causality/graph/CGNN.py#L127-L145
def run(self, data, train_epochs=1000, test_epochs=1000, verbose=None, idx=0, lr=0.01, **kwargs): """Run the CGNN on a given graph.""" verbose = SETTINGS.get_default(verbose=verbose) optim = th.optim.Adam(self.parameters(), lr=lr) self.score.zero_() with trange(train_epochs + test_epochs, disable=not verbose) as t: for epoch in t: optim.zero_grad() generated_data = self.forward() mmd = self.criterion(generated_data, data) if not epoch % 200: t.set_postfix(idx=idx, epoch=epoch, loss=mmd.item()) mmd.backward() optim.step() if epoch >= test_epochs: self.score.add_(mmd.data) return self.score.cpu().numpy() / test_epochs
[ "def", "run", "(", "self", ",", "data", ",", "train_epochs", "=", "1000", ",", "test_epochs", "=", "1000", ",", "verbose", "=", "None", ",", "idx", "=", "0", ",", "lr", "=", "0.01", ",", "*", "*", "kwargs", ")", ":", "verbose", "=", "SETTINGS", ".", "get_default", "(", "verbose", "=", "verbose", ")", "optim", "=", "th", ".", "optim", ".", "Adam", "(", "self", ".", "parameters", "(", ")", ",", "lr", "=", "lr", ")", "self", ".", "score", ".", "zero_", "(", ")", "with", "trange", "(", "train_epochs", "+", "test_epochs", ",", "disable", "=", "not", "verbose", ")", "as", "t", ":", "for", "epoch", "in", "t", ":", "optim", ".", "zero_grad", "(", ")", "generated_data", "=", "self", ".", "forward", "(", ")", "mmd", "=", "self", ".", "criterion", "(", "generated_data", ",", "data", ")", "if", "not", "epoch", "%", "200", ":", "t", ".", "set_postfix", "(", "idx", "=", "idx", ",", "epoch", "=", "epoch", ",", "loss", "=", "mmd", ".", "item", "(", ")", ")", "mmd", ".", "backward", "(", ")", "optim", ".", "step", "(", ")", "if", "epoch", ">=", "test_epochs", ":", "self", ".", "score", ".", "add_", "(", "mmd", ".", "data", ")", "return", "self", ".", "score", ".", "cpu", "(", ")", ".", "numpy", "(", ")", "/", "test_epochs" ]
Run the CGNN on a given graph.
[ "Run", "the", "CGNN", "on", "a", "given", "graph", "." ]
python
valid
43.684211
petl-developers/petlx
petlx/bio/gff3.py
https://github.com/petl-developers/petlx/blob/54039e30388c7da12407d6b5c3cb865b00436004/petlx/bio/gff3.py#L17-L33
def gff3_parse_attributes(attributes_string): """ Parse a string of GFF3 attributes ('key=value' pairs delimited by ';') and return a dictionary. """ attributes = dict() fields = attributes_string.split(';') for f in fields: if '=' in f: key, value = f.split('=') attributes[unquote_plus(key).strip()] = unquote_plus(value.strip()) elif len(f) > 0: # not strictly kosher attributes[unquote_plus(f).strip()] = True return attributes
[ "def", "gff3_parse_attributes", "(", "attributes_string", ")", ":", "attributes", "=", "dict", "(", ")", "fields", "=", "attributes_string", ".", "split", "(", "';'", ")", "for", "f", "in", "fields", ":", "if", "'='", "in", "f", ":", "key", ",", "value", "=", "f", ".", "split", "(", "'='", ")", "attributes", "[", "unquote_plus", "(", "key", ")", ".", "strip", "(", ")", "]", "=", "unquote_plus", "(", "value", ".", "strip", "(", ")", ")", "elif", "len", "(", "f", ")", ">", "0", ":", "# not strictly kosher", "attributes", "[", "unquote_plus", "(", "f", ")", ".", "strip", "(", ")", "]", "=", "True", "return", "attributes" ]
Parse a string of GFF3 attributes ('key=value' pairs delimited by ';') and return a dictionary.
[ "Parse", "a", "string", "of", "GFF3", "attributes", "(", "key", "=", "value", "pairs", "delimited", "by", ";", ")", "and", "return", "a", "dictionary", "." ]
python
train
31.294118
KnowledgeLinks/rdfframework
rdfframework/connections/connmanager.py
https://github.com/KnowledgeLinks/rdfframework/blob/9ec32dcc4bed51650a4b392cc5c15100fef7923a/rdfframework/connections/connmanager.py#L135-L149
def get(self, conn_name, default=None, **kwargs): """ returns the specified connection args: conn_name: the name of the connection """ if isinstance(conn_name, RdfwConnections): return conn_name try: return self.conns[conn_name] except KeyError: if default: return self.get(default, **kwargs) raise LookupError("'%s' connection has not been set" % conn_name)
[ "def", "get", "(", "self", ",", "conn_name", ",", "default", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "isinstance", "(", "conn_name", ",", "RdfwConnections", ")", ":", "return", "conn_name", "try", ":", "return", "self", ".", "conns", "[", "conn_name", "]", "except", "KeyError", ":", "if", "default", ":", "return", "self", ".", "get", "(", "default", ",", "*", "*", "kwargs", ")", "raise", "LookupError", "(", "\"'%s' connection has not been set\"", "%", "conn_name", ")" ]
returns the specified connection args: conn_name: the name of the connection
[ "returns", "the", "specified", "connection" ]
python
train
31.333333
jonashaag/django-addanother
django_addanother/widgets.py
https://github.com/jonashaag/django-addanother/blob/83dc0c8cc7665cc481dd58da0b9a746972264046/django_addanother/widgets.py#L28-L31
def build_attrs(self, extra_attrs=None, **kwargs): "Helper function for building an attribute dictionary." self.attrs = self.widget.build_attrs(extra_attrs=None, **kwargs) return self.attrs
[ "def", "build_attrs", "(", "self", ",", "extra_attrs", "=", "None", ",", "*", "*", "kwargs", ")", ":", "self", ".", "attrs", "=", "self", ".", "widget", ".", "build_attrs", "(", "extra_attrs", "=", "None", ",", "*", "*", "kwargs", ")", "return", "self", ".", "attrs" ]
Helper function for building an attribute dictionary.
[ "Helper", "function", "for", "building", "an", "attribute", "dictionary", "." ]
python
test
52.5
tensorflow/tensor2tensor
tensor2tensor/data_generators/mnist.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/mnist.py#L69-L83
def _extract_mnist_labels(filename, num_labels): """Extract labels from an MNIST file into integers. Args: filename: The path to an MNIST labels file. num_labels: The number of labels in the file. Returns: A int64 numpy array of shape [num_labels] """ with gzip.open(filename) as bytestream: bytestream.read(8) buf = bytestream.read(num_labels) labels = np.frombuffer(buf, dtype=np.uint8).astype(np.int64) return labels
[ "def", "_extract_mnist_labels", "(", "filename", ",", "num_labels", ")", ":", "with", "gzip", ".", "open", "(", "filename", ")", "as", "bytestream", ":", "bytestream", ".", "read", "(", "8", ")", "buf", "=", "bytestream", ".", "read", "(", "num_labels", ")", "labels", "=", "np", ".", "frombuffer", "(", "buf", ",", "dtype", "=", "np", ".", "uint8", ")", ".", "astype", "(", "np", ".", "int64", ")", "return", "labels" ]
Extract labels from an MNIST file into integers. Args: filename: The path to an MNIST labels file. num_labels: The number of labels in the file. Returns: A int64 numpy array of shape [num_labels]
[ "Extract", "labels", "from", "an", "MNIST", "file", "into", "integers", "." ]
python
train
29.533333
DataDog/integrations-core
tokumx/datadog_checks/tokumx/vendor/pymongo/cursor.py
https://github.com/DataDog/integrations-core/blob/ebd41c873cf9f97a8c51bf9459bc6a7536af8acd/tokumx/datadog_checks/tokumx/vendor/pymongo/cursor.py#L1033-L1080
def _refresh(self): """Refreshes the cursor with more data from Mongo. Returns the length of self.__data after refresh. Will exit early if self.__data is already non-empty. Raises OperationFailure when the cursor cannot be refreshed due to an error on the query. """ if len(self.__data) or self.__killed: return len(self.__data) if self.__id is None: # Query self.__send_message(_Query(self.__query_flags, self.__collection.database.name, self.__collection.name, self.__skip, self.__query_spec(), self.__projection, self.__codec_options, self.__read_preference, self.__limit, self.__batch_size, self.__read_concern, self.__collation)) if not self.__id: self.__killed = True elif self.__id: # Get More if self.__limit: limit = self.__limit - self.__retrieved if self.__batch_size: limit = min(limit, self.__batch_size) else: limit = self.__batch_size # Exhaust cursors don't send getMore messages. if self.__exhaust: self.__send_message(None) else: self.__send_message(_GetMore(self.__collection.database.name, self.__collection.name, limit, self.__id, self.__codec_options, self.__max_await_time_ms)) else: # Cursor id is zero nothing else to return self.__killed = True return len(self.__data)
[ "def", "_refresh", "(", "self", ")", ":", "if", "len", "(", "self", ".", "__data", ")", "or", "self", ".", "__killed", ":", "return", "len", "(", "self", ".", "__data", ")", "if", "self", ".", "__id", "is", "None", ":", "# Query", "self", ".", "__send_message", "(", "_Query", "(", "self", ".", "__query_flags", ",", "self", ".", "__collection", ".", "database", ".", "name", ",", "self", ".", "__collection", ".", "name", ",", "self", ".", "__skip", ",", "self", ".", "__query_spec", "(", ")", ",", "self", ".", "__projection", ",", "self", ".", "__codec_options", ",", "self", ".", "__read_preference", ",", "self", ".", "__limit", ",", "self", ".", "__batch_size", ",", "self", ".", "__read_concern", ",", "self", ".", "__collation", ")", ")", "if", "not", "self", ".", "__id", ":", "self", ".", "__killed", "=", "True", "elif", "self", ".", "__id", ":", "# Get More", "if", "self", ".", "__limit", ":", "limit", "=", "self", ".", "__limit", "-", "self", ".", "__retrieved", "if", "self", ".", "__batch_size", ":", "limit", "=", "min", "(", "limit", ",", "self", ".", "__batch_size", ")", "else", ":", "limit", "=", "self", ".", "__batch_size", "# Exhaust cursors don't send getMore messages.", "if", "self", ".", "__exhaust", ":", "self", ".", "__send_message", "(", "None", ")", "else", ":", "self", ".", "__send_message", "(", "_GetMore", "(", "self", ".", "__collection", ".", "database", ".", "name", ",", "self", ".", "__collection", ".", "name", ",", "limit", ",", "self", ".", "__id", ",", "self", ".", "__codec_options", ",", "self", ".", "__max_await_time_ms", ")", ")", "else", ":", "# Cursor id is zero nothing else to return", "self", ".", "__killed", "=", "True", "return", "len", "(", "self", ".", "__data", ")" ]
Refreshes the cursor with more data from Mongo. Returns the length of self.__data after refresh. Will exit early if self.__data is already non-empty. Raises OperationFailure when the cursor cannot be refreshed due to an error on the query.
[ "Refreshes", "the", "cursor", "with", "more", "data", "from", "Mongo", "." ]
python
train
44.020833
saltstack/salt
salt/cloud/clouds/msazure.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/msazure.py#L901-L1016
def destroy(name, conn=None, call=None, kwargs=None): ''' Destroy a VM CLI Examples: .. code-block:: bash salt-cloud -d myminion salt-cloud -a destroy myminion service_name=myservice ''' if call == 'function': raise SaltCloudSystemExit( 'The destroy action must be called with -d, --destroy, ' '-a or --action.' ) if not conn: conn = get_conn() if kwargs is None: kwargs = {} instance_data = show_instance(name, call='action') service_name = instance_data['deployment']['name'] disk_name = instance_data['role_info']['os_virtual_hard_disk']['disk_name'] ret = {} # TODO: Add the ability to delete or not delete a hosted service when # deleting a VM try: log.debug('Deleting role') result = conn.delete_role(service_name, service_name, name) delete_type = 'delete_role' except AzureException: log.debug('Failed to delete role, deleting deployment') try: result = conn.delete_deployment(service_name, service_name) except AzureConflictHttpError as exc: log.error(exc.message) raise SaltCloudSystemExit('{0}: {1}'.format(name, exc.message)) delete_type = 'delete_deployment' _wait_for_async(conn, result.request_id) ret[name] = { delete_type: {'request_id': result.request_id}, } if __opts__.get('update_cachedir', False) is True: __utils__['cloud.delete_minion_cachedir'](name, __active_provider_name__.split(':')[0], __opts__) cleanup_disks = config.get_cloud_config_value( 'cleanup_disks', get_configured_provider(), __opts__, search_global=False, default=False, ) if cleanup_disks: cleanup_vhds = kwargs.get('delete_vhd', config.get_cloud_config_value( 'cleanup_vhds', get_configured_provider(), __opts__, search_global=False, default=False, )) log.debug('Deleting disk %s', disk_name) if cleanup_vhds: log.debug('Deleting vhd') def wait_for_destroy(): ''' Wait for the VM to be deleted ''' try: data = delete_disk(kwargs={'name': disk_name, 'delete_vhd': cleanup_vhds}, call='function') return data except AzureConflictHttpError: log.debug('Waiting for VM to be destroyed...') time.sleep(5) return False data = salt.utils.cloud.wait_for_fun( wait_for_destroy, timeout=config.get_cloud_config_value( 'wait_for_fun_timeout', {}, __opts__, default=15 * 60), ) ret[name]['delete_disk'] = { 'name': disk_name, 'delete_vhd': cleanup_vhds, 'data': data } # Services can't be cleaned up unless disks are too cleanup_services = config.get_cloud_config_value( 'cleanup_services', get_configured_provider(), __opts__, search_global=False, default=False ) if cleanup_services: log.debug('Deleting service %s', service_name) def wait_for_disk_delete(): ''' Wait for the disk to be deleted ''' try: data = delete_service(kwargs={'name': service_name}, call='function') return data except AzureConflictHttpError: log.debug('Waiting for disk to be deleted...') time.sleep(5) return False data = salt.utils.cloud.wait_for_fun( wait_for_disk_delete, timeout=config.get_cloud_config_value( 'wait_for_fun_timeout', {}, __opts__, default=15 * 60), ) ret[name]['delete_services'] = { 'name': service_name, 'data': data } return ret
[ "def", "destroy", "(", "name", ",", "conn", "=", "None", ",", "call", "=", "None", ",", "kwargs", "=", "None", ")", ":", "if", "call", "==", "'function'", ":", "raise", "SaltCloudSystemExit", "(", "'The destroy action must be called with -d, --destroy, '", "'-a or --action.'", ")", "if", "not", "conn", ":", "conn", "=", "get_conn", "(", ")", "if", "kwargs", "is", "None", ":", "kwargs", "=", "{", "}", "instance_data", "=", "show_instance", "(", "name", ",", "call", "=", "'action'", ")", "service_name", "=", "instance_data", "[", "'deployment'", "]", "[", "'name'", "]", "disk_name", "=", "instance_data", "[", "'role_info'", "]", "[", "'os_virtual_hard_disk'", "]", "[", "'disk_name'", "]", "ret", "=", "{", "}", "# TODO: Add the ability to delete or not delete a hosted service when", "# deleting a VM", "try", ":", "log", ".", "debug", "(", "'Deleting role'", ")", "result", "=", "conn", ".", "delete_role", "(", "service_name", ",", "service_name", ",", "name", ")", "delete_type", "=", "'delete_role'", "except", "AzureException", ":", "log", ".", "debug", "(", "'Failed to delete role, deleting deployment'", ")", "try", ":", "result", "=", "conn", ".", "delete_deployment", "(", "service_name", ",", "service_name", ")", "except", "AzureConflictHttpError", "as", "exc", ":", "log", ".", "error", "(", "exc", ".", "message", ")", "raise", "SaltCloudSystemExit", "(", "'{0}: {1}'", ".", "format", "(", "name", ",", "exc", ".", "message", ")", ")", "delete_type", "=", "'delete_deployment'", "_wait_for_async", "(", "conn", ",", "result", ".", "request_id", ")", "ret", "[", "name", "]", "=", "{", "delete_type", ":", "{", "'request_id'", ":", "result", ".", "request_id", "}", ",", "}", "if", "__opts__", ".", "get", "(", "'update_cachedir'", ",", "False", ")", "is", "True", ":", "__utils__", "[", "'cloud.delete_minion_cachedir'", "]", "(", "name", ",", "__active_provider_name__", ".", "split", "(", "':'", ")", "[", "0", "]", ",", "__opts__", ")", "cleanup_disks", "=", "config", ".", "get_cloud_config_value", "(", "'cleanup_disks'", ",", "get_configured_provider", "(", ")", ",", "__opts__", ",", "search_global", "=", "False", ",", "default", "=", "False", ",", ")", "if", "cleanup_disks", ":", "cleanup_vhds", "=", "kwargs", ".", "get", "(", "'delete_vhd'", ",", "config", ".", "get_cloud_config_value", "(", "'cleanup_vhds'", ",", "get_configured_provider", "(", ")", ",", "__opts__", ",", "search_global", "=", "False", ",", "default", "=", "False", ",", ")", ")", "log", ".", "debug", "(", "'Deleting disk %s'", ",", "disk_name", ")", "if", "cleanup_vhds", ":", "log", ".", "debug", "(", "'Deleting vhd'", ")", "def", "wait_for_destroy", "(", ")", ":", "'''\n Wait for the VM to be deleted\n '''", "try", ":", "data", "=", "delete_disk", "(", "kwargs", "=", "{", "'name'", ":", "disk_name", ",", "'delete_vhd'", ":", "cleanup_vhds", "}", ",", "call", "=", "'function'", ")", "return", "data", "except", "AzureConflictHttpError", ":", "log", ".", "debug", "(", "'Waiting for VM to be destroyed...'", ")", "time", ".", "sleep", "(", "5", ")", "return", "False", "data", "=", "salt", ".", "utils", ".", "cloud", ".", "wait_for_fun", "(", "wait_for_destroy", ",", "timeout", "=", "config", ".", "get_cloud_config_value", "(", "'wait_for_fun_timeout'", ",", "{", "}", ",", "__opts__", ",", "default", "=", "15", "*", "60", ")", ",", ")", "ret", "[", "name", "]", "[", "'delete_disk'", "]", "=", "{", "'name'", ":", "disk_name", ",", "'delete_vhd'", ":", "cleanup_vhds", ",", "'data'", ":", "data", "}", "# Services can't be cleaned up unless disks are too", "cleanup_services", "=", "config", ".", "get_cloud_config_value", "(", "'cleanup_services'", ",", "get_configured_provider", "(", ")", ",", "__opts__", ",", "search_global", "=", "False", ",", "default", "=", "False", ")", "if", "cleanup_services", ":", "log", ".", "debug", "(", "'Deleting service %s'", ",", "service_name", ")", "def", "wait_for_disk_delete", "(", ")", ":", "'''\n Wait for the disk to be deleted\n '''", "try", ":", "data", "=", "delete_service", "(", "kwargs", "=", "{", "'name'", ":", "service_name", "}", ",", "call", "=", "'function'", ")", "return", "data", "except", "AzureConflictHttpError", ":", "log", ".", "debug", "(", "'Waiting for disk to be deleted...'", ")", "time", ".", "sleep", "(", "5", ")", "return", "False", "data", "=", "salt", ".", "utils", ".", "cloud", ".", "wait_for_fun", "(", "wait_for_disk_delete", ",", "timeout", "=", "config", ".", "get_cloud_config_value", "(", "'wait_for_fun_timeout'", ",", "{", "}", ",", "__opts__", ",", "default", "=", "15", "*", "60", ")", ",", ")", "ret", "[", "name", "]", "[", "'delete_services'", "]", "=", "{", "'name'", ":", "service_name", ",", "'data'", ":", "data", "}", "return", "ret" ]
Destroy a VM CLI Examples: .. code-block:: bash salt-cloud -d myminion salt-cloud -a destroy myminion service_name=myservice
[ "Destroy", "a", "VM" ]
python
train
33.637931
uchicago-cs/deepdish
deepdish/image.py
https://github.com/uchicago-cs/deepdish/blob/01af93621fe082a3972fe53ba7375388c02b0085/deepdish/image.py#L135-L157
def load(path, dtype=np.float64): """ Loads an image from file. Parameters ---------- path : str Path to image file. dtype : np.dtype Defaults to ``np.float64``, which means the image will be returned as a float with values between 0 and 1. If ``np.uint8`` is specified, the values will be between 0 and 255 and no conversion cost will be incurred. """ _import_skimage() import skimage.io im = skimage.io.imread(path) if dtype == np.uint8: return im elif dtype in {np.float16, np.float32, np.float64}: return im.astype(dtype) / 255 else: raise ValueError('Unsupported dtype')
[ "def", "load", "(", "path", ",", "dtype", "=", "np", ".", "float64", ")", ":", "_import_skimage", "(", ")", "import", "skimage", ".", "io", "im", "=", "skimage", ".", "io", ".", "imread", "(", "path", ")", "if", "dtype", "==", "np", ".", "uint8", ":", "return", "im", "elif", "dtype", "in", "{", "np", ".", "float16", ",", "np", ".", "float32", ",", "np", ".", "float64", "}", ":", "return", "im", ".", "astype", "(", "dtype", ")", "/", "255", "else", ":", "raise", "ValueError", "(", "'Unsupported dtype'", ")" ]
Loads an image from file. Parameters ---------- path : str Path to image file. dtype : np.dtype Defaults to ``np.float64``, which means the image will be returned as a float with values between 0 and 1. If ``np.uint8`` is specified, the values will be between 0 and 255 and no conversion cost will be incurred.
[ "Loads", "an", "image", "from", "file", "." ]
python
train
29.130435
CITGuru/PyInquirer
PyInquirer/color_print.py
https://github.com/CITGuru/PyInquirer/blob/10d53723b36ebc7bba311457ec4afd9747a5c777/PyInquirer/color_print.py#L10-L27
def _print_token_factory(col): """Internal helper to provide color names.""" def _helper(msg): style = style_from_dict({ Token.Color: col, }) tokens = [ (Token.Color, msg) ] print_tokens(tokens, style=style) def _helper_no_terminal(msg): # workaround if we have no terminal print(msg) if sys.stdout.isatty(): return _helper else: return _helper_no_terminal
[ "def", "_print_token_factory", "(", "col", ")", ":", "def", "_helper", "(", "msg", ")", ":", "style", "=", "style_from_dict", "(", "{", "Token", ".", "Color", ":", "col", ",", "}", ")", "tokens", "=", "[", "(", "Token", ".", "Color", ",", "msg", ")", "]", "print_tokens", "(", "tokens", ",", "style", "=", "style", ")", "def", "_helper_no_terminal", "(", "msg", ")", ":", "# workaround if we have no terminal", "print", "(", "msg", ")", "if", "sys", ".", "stdout", ".", "isatty", "(", ")", ":", "return", "_helper", "else", ":", "return", "_helper_no_terminal" ]
Internal helper to provide color names.
[ "Internal", "helper", "to", "provide", "color", "names", "." ]
python
test
25.333333
datalib/StatsCounter
statscounter/_stats.py
https://github.com/datalib/StatsCounter/blob/5386c967808bbe451025af1d550f060cd7f86669/statscounter/_stats.py#L154-L200
def median_grouped(data, interval=1): """"Return the 50th percentile (median) of grouped continuous data. >>> median_grouped([1, 2, 2, 3, 4, 4, 4, 4, 4, 5]) 3.7 >>> median_grouped([52, 52, 53, 54]) 52.5 This calculates the median as the 50th percentile, and should be used when your data is continuous and grouped. In the above example, the values 1, 2, 3, etc. actually represent the midpoint of classes 0.5-1.5, 1.5-2.5, 2.5-3.5, etc. The middle value falls somewhere in class 3.5-4.5, and interpolation is used to estimate it. Optional argument ``interval`` represents the class interval, and defaults to 1. Changing the class interval naturally will change the interpolated 50th percentile value: >>> median_grouped([1, 3, 3, 5, 7], interval=1) 3.25 >>> median_grouped([1, 3, 3, 5, 7], interval=2) 3.5 This function does not check whether the data points are at least ``interval`` apart. """ data = sorted(data) n = len(data) if n == 0: raise StatisticsError("no median for empty data") elif n == 1: return data[0] # Find the value at the midpoint. Remember this corresponds to the # centre of the class interval. x = data[n//2] for obj in (x, interval): if isinstance(obj, (str, bytes)): raise TypeError('expected number but got %r' % obj) try: L = x - interval/2 # The lower limit of the median interval. except TypeError: # Mixed type. For now we just coerce to float. L = float(x) - float(interval)/2 cf = data.index(x) # Number of values below the median interval. # FIXME The following line could be more efficient for big lists. f = data.count(x) # Number of data points in the median interval. return L + interval*(n/2 - cf)/f
[ "def", "median_grouped", "(", "data", ",", "interval", "=", "1", ")", ":", "data", "=", "sorted", "(", "data", ")", "n", "=", "len", "(", "data", ")", "if", "n", "==", "0", ":", "raise", "StatisticsError", "(", "\"no median for empty data\"", ")", "elif", "n", "==", "1", ":", "return", "data", "[", "0", "]", "# Find the value at the midpoint. Remember this corresponds to the", "# centre of the class interval.", "x", "=", "data", "[", "n", "//", "2", "]", "for", "obj", "in", "(", "x", ",", "interval", ")", ":", "if", "isinstance", "(", "obj", ",", "(", "str", ",", "bytes", ")", ")", ":", "raise", "TypeError", "(", "'expected number but got %r'", "%", "obj", ")", "try", ":", "L", "=", "x", "-", "interval", "/", "2", "# The lower limit of the median interval.", "except", "TypeError", ":", "# Mixed type. For now we just coerce to float.", "L", "=", "float", "(", "x", ")", "-", "float", "(", "interval", ")", "/", "2", "cf", "=", "data", ".", "index", "(", "x", ")", "# Number of values below the median interval.", "# FIXME The following line could be more efficient for big lists.", "f", "=", "data", ".", "count", "(", "x", ")", "# Number of data points in the median interval.", "return", "L", "+", "interval", "*", "(", "n", "/", "2", "-", "cf", ")", "/", "f" ]
Return the 50th percentile (median) of grouped continuous data. >>> median_grouped([1, 2, 2, 3, 4, 4, 4, 4, 4, 5]) 3.7 >>> median_grouped([52, 52, 53, 54]) 52.5 This calculates the median as the 50th percentile, and should be used when your data is continuous and grouped. In the above example, the values 1, 2, 3, etc. actually represent the midpoint of classes 0.5-1.5, 1.5-2.5, 2.5-3.5, etc. The middle value falls somewhere in class 3.5-4.5, and interpolation is used to estimate it. Optional argument ``interval`` represents the class interval, and defaults to 1. Changing the class interval naturally will change the interpolated 50th percentile value: >>> median_grouped([1, 3, 3, 5, 7], interval=1) 3.25 >>> median_grouped([1, 3, 3, 5, 7], interval=2) 3.5 This function does not check whether the data points are at least ``interval`` apart.
[ "Return", "the", "50th", "percentile", "(", "median", ")", "of", "grouped", "continuous", "data", "." ]
python
train
38.212766
saltstack/salt
salt/cache/etcd_cache.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cache/etcd_cache.py#L90-L119
def _init_client(): '''Setup client and init datastore. ''' global client, path_prefix if client is not None: return etcd_kwargs = { 'host': __opts__.get('etcd.host', '127.0.0.1'), 'port': __opts__.get('etcd.port', 2379), 'protocol': __opts__.get('etcd.protocol', 'http'), 'allow_reconnect': __opts__.get('etcd.allow_reconnect', True), 'allow_redirect': __opts__.get('etcd.allow_redirect', False), 'srv_domain': __opts__.get('etcd.srv_domain', None), 'read_timeout': __opts__.get('etcd.read_timeout', 60), 'username': __opts__.get('etcd.username', None), 'password': __opts__.get('etcd.password', None), 'cert': __opts__.get('etcd.cert', None), 'ca_cert': __opts__.get('etcd.ca_cert', None), } path_prefix = __opts__.get('etcd.path_prefix', _DEFAULT_PATH_PREFIX) if path_prefix != "": path_prefix = '/{0}'.format(path_prefix.strip('/')) log.info("etcd: Setting up client with params: %r", etcd_kwargs) client = etcd.Client(**etcd_kwargs) try: client.read(path_prefix) except etcd.EtcdKeyNotFound: log.info("etcd: Creating dir %r", path_prefix) client.write(path_prefix, None, dir=True)
[ "def", "_init_client", "(", ")", ":", "global", "client", ",", "path_prefix", "if", "client", "is", "not", "None", ":", "return", "etcd_kwargs", "=", "{", "'host'", ":", "__opts__", ".", "get", "(", "'etcd.host'", ",", "'127.0.0.1'", ")", ",", "'port'", ":", "__opts__", ".", "get", "(", "'etcd.port'", ",", "2379", ")", ",", "'protocol'", ":", "__opts__", ".", "get", "(", "'etcd.protocol'", ",", "'http'", ")", ",", "'allow_reconnect'", ":", "__opts__", ".", "get", "(", "'etcd.allow_reconnect'", ",", "True", ")", ",", "'allow_redirect'", ":", "__opts__", ".", "get", "(", "'etcd.allow_redirect'", ",", "False", ")", ",", "'srv_domain'", ":", "__opts__", ".", "get", "(", "'etcd.srv_domain'", ",", "None", ")", ",", "'read_timeout'", ":", "__opts__", ".", "get", "(", "'etcd.read_timeout'", ",", "60", ")", ",", "'username'", ":", "__opts__", ".", "get", "(", "'etcd.username'", ",", "None", ")", ",", "'password'", ":", "__opts__", ".", "get", "(", "'etcd.password'", ",", "None", ")", ",", "'cert'", ":", "__opts__", ".", "get", "(", "'etcd.cert'", ",", "None", ")", ",", "'ca_cert'", ":", "__opts__", ".", "get", "(", "'etcd.ca_cert'", ",", "None", ")", ",", "}", "path_prefix", "=", "__opts__", ".", "get", "(", "'etcd.path_prefix'", ",", "_DEFAULT_PATH_PREFIX", ")", "if", "path_prefix", "!=", "\"\"", ":", "path_prefix", "=", "'/{0}'", ".", "format", "(", "path_prefix", ".", "strip", "(", "'/'", ")", ")", "log", ".", "info", "(", "\"etcd: Setting up client with params: %r\"", ",", "etcd_kwargs", ")", "client", "=", "etcd", ".", "Client", "(", "*", "*", "etcd_kwargs", ")", "try", ":", "client", ".", "read", "(", "path_prefix", ")", "except", "etcd", ".", "EtcdKeyNotFound", ":", "log", ".", "info", "(", "\"etcd: Creating dir %r\"", ",", "path_prefix", ")", "client", ".", "write", "(", "path_prefix", ",", "None", ",", "dir", "=", "True", ")" ]
Setup client and init datastore.
[ "Setup", "client", "and", "init", "datastore", "." ]
python
train
42.566667
tensorflow/tensor2tensor
tensor2tensor/models/research/transformer_vae.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/transformer_vae.py#L907-L916
def imagetransformer_ae_imagenet(): """For 64x64 ImageNet. ~56M trainable variables.""" hparams = imagetransformer_ae_cifar() hparams.max_length = int(64 * 64 * 3) hparams.img_len = 64 hparams.num_heads = 4 # Heads are expensive on TPUs. # Reduce architecture from 32x32 CIFAR-10 in order to fit in memory. hparams.num_decoder_layers = 8 hparams.num_compress_steps = 2 return hparams
[ "def", "imagetransformer_ae_imagenet", "(", ")", ":", "hparams", "=", "imagetransformer_ae_cifar", "(", ")", "hparams", ".", "max_length", "=", "int", "(", "64", "*", "64", "*", "3", ")", "hparams", ".", "img_len", "=", "64", "hparams", ".", "num_heads", "=", "4", "# Heads are expensive on TPUs.", "# Reduce architecture from 32x32 CIFAR-10 in order to fit in memory.", "hparams", ".", "num_decoder_layers", "=", "8", "hparams", ".", "num_compress_steps", "=", "2", "return", "hparams" ]
For 64x64 ImageNet. ~56M trainable variables.
[ "For", "64x64", "ImageNet", ".", "~56M", "trainable", "variables", "." ]
python
train
39.3
saltstack/salt
salt/states/zk_concurrency.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/zk_concurrency.py#L115-L155
def unlock(name, zk_hosts=None, # in case you need to unlock without having run lock (failed execution for example) identifier=None, max_concurrency=1, ephemeral_lease=False, profile=None, scheme=None, username=None, password=None, default_acl=None): ''' Remove lease from semaphore. ''' ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''} conn_kwargs = {'profile': profile, 'scheme': scheme, 'username': username, 'password': password, 'default_acl': default_acl} if __opts__['test']: ret['result'] = None ret['comment'] = 'Released lock if it is here' return ret if identifier is None: identifier = __grains__['id'] unlocked = __salt__['zk_concurrency.unlock'](name, zk_hosts=zk_hosts, identifier=identifier, max_concurrency=max_concurrency, ephemeral_lease=ephemeral_lease, **conn_kwargs) if unlocked: ret['result'] = True else: ret['comment'] = 'Unable to find lease for path {0}'.format(name) return ret
[ "def", "unlock", "(", "name", ",", "zk_hosts", "=", "None", ",", "# in case you need to unlock without having run lock (failed execution for example)", "identifier", "=", "None", ",", "max_concurrency", "=", "1", ",", "ephemeral_lease", "=", "False", ",", "profile", "=", "None", ",", "scheme", "=", "None", ",", "username", "=", "None", ",", "password", "=", "None", ",", "default_acl", "=", "None", ")", ":", "ret", "=", "{", "'name'", ":", "name", ",", "'changes'", ":", "{", "}", ",", "'result'", ":", "False", ",", "'comment'", ":", "''", "}", "conn_kwargs", "=", "{", "'profile'", ":", "profile", ",", "'scheme'", ":", "scheme", ",", "'username'", ":", "username", ",", "'password'", ":", "password", ",", "'default_acl'", ":", "default_acl", "}", "if", "__opts__", "[", "'test'", "]", ":", "ret", "[", "'result'", "]", "=", "None", "ret", "[", "'comment'", "]", "=", "'Released lock if it is here'", "return", "ret", "if", "identifier", "is", "None", ":", "identifier", "=", "__grains__", "[", "'id'", "]", "unlocked", "=", "__salt__", "[", "'zk_concurrency.unlock'", "]", "(", "name", ",", "zk_hosts", "=", "zk_hosts", ",", "identifier", "=", "identifier", ",", "max_concurrency", "=", "max_concurrency", ",", "ephemeral_lease", "=", "ephemeral_lease", ",", "*", "*", "conn_kwargs", ")", "if", "unlocked", ":", "ret", "[", "'result'", "]", "=", "True", "else", ":", "ret", "[", "'comment'", "]", "=", "'Unable to find lease for path {0}'", ".", "format", "(", "name", ")", "return", "ret" ]
Remove lease from semaphore.
[ "Remove", "lease", "from", "semaphore", "." ]
python
train
33.609756
codelv/enaml-native
src/enamlnative/core/eventloop/ioloop.py
https://github.com/codelv/enaml-native/blob/c33986e9eda468c508806e0a3e73c771401e5718/src/enamlnative/core/eventloop/ioloop.py#L679-L692
def handle_callback_exception(self, callback): """This method is called whenever a callback run by the `IOLoop` throws an exception. By default simply logs the exception as an error. Subclasses may override this method to customize reporting of exceptions. The exception itself is not passed explicitly, but is available in `sys.exc_info`. """ if self._error_handler: self._error_handler(callback) else: app_log.error("Exception in callback %r", callback, exc_info=True)
[ "def", "handle_callback_exception", "(", "self", ",", "callback", ")", ":", "if", "self", ".", "_error_handler", ":", "self", ".", "_error_handler", "(", "callback", ")", "else", ":", "app_log", ".", "error", "(", "\"Exception in callback %r\"", ",", "callback", ",", "exc_info", "=", "True", ")" ]
This method is called whenever a callback run by the `IOLoop` throws an exception. By default simply logs the exception as an error. Subclasses may override this method to customize reporting of exceptions. The exception itself is not passed explicitly, but is available in `sys.exc_info`.
[ "This", "method", "is", "called", "whenever", "a", "callback", "run", "by", "the", "IOLoop", "throws", "an", "exception", "." ]
python
train
39.714286
spacetelescope/stsci.tools
lib/stsci/tools/fileutil.py
https://github.com/spacetelescope/stsci.tools/blob/9a022503ad24ca54ce83331482dfa3ff6de9f403/lib/stsci/tools/fileutil.py#L308-L343
def verifyWriteMode(files): """ Checks whether files are writable. It is up to the calling routine to raise an Exception, if desired. This function returns True, if all files are writable and False, if any are not writable. In addition, for all files found to not be writable, it will print out the list of names of affected files. """ # Start by insuring that input is a list of filenames, # if only a single filename has been given as input, # convert it to a list with len == 1. if not isinstance(files, list): files = [files] # Keep track of the name of each file which is not writable not_writable = [] writable = True # Check each file in input list for fname in files: try: f = open(fname,'a') f.close() del f except: not_writable.append(fname) writable = False if not writable: print('The following file(s) do not have write permission!') for fname in not_writable: print(' ', fname) return writable
[ "def", "verifyWriteMode", "(", "files", ")", ":", "# Start by insuring that input is a list of filenames,", "# if only a single filename has been given as input,", "# convert it to a list with len == 1.", "if", "not", "isinstance", "(", "files", ",", "list", ")", ":", "files", "=", "[", "files", "]", "# Keep track of the name of each file which is not writable", "not_writable", "=", "[", "]", "writable", "=", "True", "# Check each file in input list", "for", "fname", "in", "files", ":", "try", ":", "f", "=", "open", "(", "fname", ",", "'a'", ")", "f", ".", "close", "(", ")", "del", "f", "except", ":", "not_writable", ".", "append", "(", "fname", ")", "writable", "=", "False", "if", "not", "writable", ":", "print", "(", "'The following file(s) do not have write permission!'", ")", "for", "fname", "in", "not_writable", ":", "print", "(", "' '", ",", "fname", ")", "return", "writable" ]
Checks whether files are writable. It is up to the calling routine to raise an Exception, if desired. This function returns True, if all files are writable and False, if any are not writable. In addition, for all files found to not be writable, it will print out the list of names of affected files.
[ "Checks", "whether", "files", "are", "writable", ".", "It", "is", "up", "to", "the", "calling", "routine", "to", "raise", "an", "Exception", "if", "desired", "." ]
python
train
29.611111
fermiPy/fermipy
fermipy/castro.py
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/castro.py#L1162-L1200
def create_from_tables(cls, norm_type='eflux', tab_s="SCANDATA", tab_e="EBOUNDS"): """Create a CastroData object from two tables Parameters ---------- norm_type : str Type of normalization to use. Valid options are: * norm : Normalization w.r.t. to test source * flux : Flux of the test source ( ph cm^-2 s^-1 ) * eflux: Energy Flux of the test source ( MeV cm^-2 s^-1 ) * npred: Number of predicted photons (Not implemented) * dnde : Differential flux of the test source ( ph cm^-2 s^-1 MeV^-1 ) tab_s : str table scan data tab_e : str table energy binning and normalization data Returns ------- castro : `~fermipy.castro.CastroData` """ if norm_type in ['flux', 'eflux', 'dnde']: norm_vals = np.array(tab_s['norm_scan'] * tab_e['ref_%s' % norm_type][:, np.newaxis]) elif norm_type == "norm": norm_vals = np.array(tab_s['norm_scan']) else: raise Exception('Unrecognized normalization type: %s' % norm_type) nll_vals = -np.array(tab_s['dloglike_scan']) rs = ReferenceSpec.create_from_table(tab_e) return cls(norm_vals, nll_vals, rs, norm_type)
[ "def", "create_from_tables", "(", "cls", ",", "norm_type", "=", "'eflux'", ",", "tab_s", "=", "\"SCANDATA\"", ",", "tab_e", "=", "\"EBOUNDS\"", ")", ":", "if", "norm_type", "in", "[", "'flux'", ",", "'eflux'", ",", "'dnde'", "]", ":", "norm_vals", "=", "np", ".", "array", "(", "tab_s", "[", "'norm_scan'", "]", "*", "tab_e", "[", "'ref_%s'", "%", "norm_type", "]", "[", ":", ",", "np", ".", "newaxis", "]", ")", "elif", "norm_type", "==", "\"norm\"", ":", "norm_vals", "=", "np", ".", "array", "(", "tab_s", "[", "'norm_scan'", "]", ")", "else", ":", "raise", "Exception", "(", "'Unrecognized normalization type: %s'", "%", "norm_type", ")", "nll_vals", "=", "-", "np", ".", "array", "(", "tab_s", "[", "'dloglike_scan'", "]", ")", "rs", "=", "ReferenceSpec", ".", "create_from_table", "(", "tab_e", ")", "return", "cls", "(", "norm_vals", ",", "nll_vals", ",", "rs", ",", "norm_type", ")" ]
Create a CastroData object from two tables Parameters ---------- norm_type : str Type of normalization to use. Valid options are: * norm : Normalization w.r.t. to test source * flux : Flux of the test source ( ph cm^-2 s^-1 ) * eflux: Energy Flux of the test source ( MeV cm^-2 s^-1 ) * npred: Number of predicted photons (Not implemented) * dnde : Differential flux of the test source ( ph cm^-2 s^-1 MeV^-1 ) tab_s : str table scan data tab_e : str table energy binning and normalization data Returns ------- castro : `~fermipy.castro.CastroData`
[ "Create", "a", "CastroData", "object", "from", "two", "tables" ]
python
train
35.179487
python-openxml/python-docx
docx/opc/pkgreader.py
https://github.com/python-openxml/python-docx/blob/6756f6cd145511d3eb6d1d188beea391b1ddfd53/docx/opc/pkgreader.py#L49-L58
def iter_srels(self): """ Generate a 2-tuple `(source_uri, srel)` for each of the relationships in the package. """ for srel in self._pkg_srels: yield (PACKAGE_URI, srel) for spart in self._sparts: for srel in spart.srels: yield (spart.partname, srel)
[ "def", "iter_srels", "(", "self", ")", ":", "for", "srel", "in", "self", ".", "_pkg_srels", ":", "yield", "(", "PACKAGE_URI", ",", "srel", ")", "for", "spart", "in", "self", ".", "_sparts", ":", "for", "srel", "in", "spart", ".", "srels", ":", "yield", "(", "spart", ".", "partname", ",", "srel", ")" ]
Generate a 2-tuple `(source_uri, srel)` for each of the relationships in the package.
[ "Generate", "a", "2", "-", "tuple", "(", "source_uri", "srel", ")", "for", "each", "of", "the", "relationships", "in", "the", "package", "." ]
python
train
33
Nachtfeuer/pipeline
spline/components/tasks.py
https://github.com/Nachtfeuer/pipeline/blob/04ca18c4e95e4349532bb45b768206393e1f2c13/spline/components/tasks.py#L230-L249
def process_shell(self, creator, entry, config): """Processing a shell entry.""" self.logger.info("Processing Bash code: start") output = [] shell = creator(entry, config) for line in shell.process(): output.append(line) self.logger.info(" | %s", line) if shell.success: self.logger.info("Processing Bash code: finished") return {'success': True, 'output': output} for line in self.run_cleanup(config.env, shell.exit_code): output.append(line) self.logger.error("Pipeline has failed: leaving as soon as possible!") self.event.failed() return {'success': False, 'output': output}
[ "def", "process_shell", "(", "self", ",", "creator", ",", "entry", ",", "config", ")", ":", "self", ".", "logger", ".", "info", "(", "\"Processing Bash code: start\"", ")", "output", "=", "[", "]", "shell", "=", "creator", "(", "entry", ",", "config", ")", "for", "line", "in", "shell", ".", "process", "(", ")", ":", "output", ".", "append", "(", "line", ")", "self", ".", "logger", ".", "info", "(", "\" | %s\"", ",", "line", ")", "if", "shell", ".", "success", ":", "self", ".", "logger", ".", "info", "(", "\"Processing Bash code: finished\"", ")", "return", "{", "'success'", ":", "True", ",", "'output'", ":", "output", "}", "for", "line", "in", "self", ".", "run_cleanup", "(", "config", ".", "env", ",", "shell", ".", "exit_code", ")", ":", "output", ".", "append", "(", "line", ")", "self", ".", "logger", ".", "error", "(", "\"Pipeline has failed: leaving as soon as possible!\"", ")", "self", ".", "event", ".", "failed", "(", ")", "return", "{", "'success'", ":", "False", ",", "'output'", ":", "output", "}" ]
Processing a shell entry.
[ "Processing", "a", "shell", "entry", "." ]
python
train
35.15
gwpy/gwpy
gwpy/cli/timeseries.py
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/cli/timeseries.py#L35-L47
def get_ylabel(self): """Text for y-axis label, check if channel defines it """ units = self.units if len(units) == 1 and str(units[0]) == '': # dimensionless return '' if len(units) == 1 and self.usetex: return units[0].to_string('latex') elif len(units) == 1: return units[0].to_string() elif len(units) > 1: return 'Multiple units' return super(TimeSeries, self).get_ylabel()
[ "def", "get_ylabel", "(", "self", ")", ":", "units", "=", "self", ".", "units", "if", "len", "(", "units", ")", "==", "1", "and", "str", "(", "units", "[", "0", "]", ")", "==", "''", ":", "# dimensionless", "return", "''", "if", "len", "(", "units", ")", "==", "1", "and", "self", ".", "usetex", ":", "return", "units", "[", "0", "]", ".", "to_string", "(", "'latex'", ")", "elif", "len", "(", "units", ")", "==", "1", ":", "return", "units", "[", "0", "]", ".", "to_string", "(", ")", "elif", "len", "(", "units", ")", ">", "1", ":", "return", "'Multiple units'", "return", "super", "(", "TimeSeries", ",", "self", ")", ".", "get_ylabel", "(", ")" ]
Text for y-axis label, check if channel defines it
[ "Text", "for", "y", "-", "axis", "label", "check", "if", "channel", "defines", "it" ]
python
train
36.923077
jfear/sramongo
sramongo/services/entrez.py
https://github.com/jfear/sramongo/blob/82a9a157e44bda4100be385c644b3ac21be66038/sramongo/services/entrez.py#L118-L171
def elink(db: str, dbfrom: str, ids=False, webenv=False, query_key=False, api_key=False, email=False, **kwargs) -> Optional[ElinkResult]: """Get document summaries using the Entrez ESearch API. Parameters ---------- db : str Entez database to get ids from. dbfrom : str Entez database the provided ids are from. ids : list or str List of IDs to submit to the server. webenv : str An Entrez WebEnv to use saved history. query_key : str An Entrez query_key to use saved history. api_key : str A users API key which allows more requests per second email : str A users email which is required if not using API. Returns ------- list A list of ElinkResult with values [id, srx, create_date, update_date] """ url = BASE_URL + f'elink.fcgi?dbfrom={dbfrom}&db={db}&retmode=json&cmd=neighbor_history' url = check_webenv(webenv, url) url = check_query_key(query_key, url) url = check_api_key(api_key, url) url = check_email(email, url) if ids: if isinstance(ids, str): id = ids else: id = ','.join(ids) url += f'&id={id}' time.sleep(PAUSE) resp = requests.get(url) if resp.status_code != 200: print('There was a server error') return text = resp.json() time.sleep(.5) return ElinkResult( text['linksets'][0].get('dbfrom', ''), text['linksets'][0].get('linksetdbhistories', [{'dbto': ''}])[0].get('dbto', ''), text['linksets'][0].get('webenv', ''), text['linksets'][0].get('linksetdbhistories', [{'querykey': ''}])[0].get('querykey', ''), )
[ "def", "elink", "(", "db", ":", "str", ",", "dbfrom", ":", "str", ",", "ids", "=", "False", ",", "webenv", "=", "False", ",", "query_key", "=", "False", ",", "api_key", "=", "False", ",", "email", "=", "False", ",", "*", "*", "kwargs", ")", "->", "Optional", "[", "ElinkResult", "]", ":", "url", "=", "BASE_URL", "+", "f'elink.fcgi?dbfrom={dbfrom}&db={db}&retmode=json&cmd=neighbor_history'", "url", "=", "check_webenv", "(", "webenv", ",", "url", ")", "url", "=", "check_query_key", "(", "query_key", ",", "url", ")", "url", "=", "check_api_key", "(", "api_key", ",", "url", ")", "url", "=", "check_email", "(", "email", ",", "url", ")", "if", "ids", ":", "if", "isinstance", "(", "ids", ",", "str", ")", ":", "id", "=", "ids", "else", ":", "id", "=", "','", ".", "join", "(", "ids", ")", "url", "+=", "f'&id={id}'", "time", ".", "sleep", "(", "PAUSE", ")", "resp", "=", "requests", ".", "get", "(", "url", ")", "if", "resp", ".", "status_code", "!=", "200", ":", "print", "(", "'There was a server error'", ")", "return", "text", "=", "resp", ".", "json", "(", ")", "time", ".", "sleep", "(", ".5", ")", "return", "ElinkResult", "(", "text", "[", "'linksets'", "]", "[", "0", "]", ".", "get", "(", "'dbfrom'", ",", "''", ")", ",", "text", "[", "'linksets'", "]", "[", "0", "]", ".", "get", "(", "'linksetdbhistories'", ",", "[", "{", "'dbto'", ":", "''", "}", "]", ")", "[", "0", "]", ".", "get", "(", "'dbto'", ",", "''", ")", ",", "text", "[", "'linksets'", "]", "[", "0", "]", ".", "get", "(", "'webenv'", ",", "''", ")", ",", "text", "[", "'linksets'", "]", "[", "0", "]", ".", "get", "(", "'linksetdbhistories'", ",", "[", "{", "'querykey'", ":", "''", "}", "]", ")", "[", "0", "]", ".", "get", "(", "'querykey'", ",", "''", ")", ",", ")" ]
Get document summaries using the Entrez ESearch API. Parameters ---------- db : str Entez database to get ids from. dbfrom : str Entez database the provided ids are from. ids : list or str List of IDs to submit to the server. webenv : str An Entrez WebEnv to use saved history. query_key : str An Entrez query_key to use saved history. api_key : str A users API key which allows more requests per second email : str A users email which is required if not using API. Returns ------- list A list of ElinkResult with values [id, srx, create_date, update_date]
[ "Get", "document", "summaries", "using", "the", "Entrez", "ESearch", "API", "." ]
python
train
30.722222
Genida/django-appsettings
src/appsettings/settings.py
https://github.com/Genida/django-appsettings/blob/f98867d133558af7dc067f12b44fc1ee4edd4239/src/appsettings/settings.py#L522-L531
def run_validators(self, value): """Run the validators on the setting value.""" errors = [] for validator in self.validators: try: validator(value) except ValidationError as error: errors.extend(error.messages) if errors: raise ValidationError(errors)
[ "def", "run_validators", "(", "self", ",", "value", ")", ":", "errors", "=", "[", "]", "for", "validator", "in", "self", ".", "validators", ":", "try", ":", "validator", "(", "value", ")", "except", "ValidationError", "as", "error", ":", "errors", ".", "extend", "(", "error", ".", "messages", ")", "if", "errors", ":", "raise", "ValidationError", "(", "errors", ")" ]
Run the validators on the setting value.
[ "Run", "the", "validators", "on", "the", "setting", "value", "." ]
python
train
34.2
aloetesting/aloe_webdriver
aloe_webdriver/util.py
https://github.com/aloetesting/aloe_webdriver/blob/65d847da4bdc63f9c015cb19d4efdee87df8ffad/aloe_webdriver/util.py#L142-L154
def _select(self): """Fetch the elements from the browser.""" for element in self.browser.find_elements_by_xpath(self.xpath): if self.filter_displayed: if not element.is_displayed(): continue if self.filter_enabled: if not element.is_enabled(): continue yield element
[ "def", "_select", "(", "self", ")", ":", "for", "element", "in", "self", ".", "browser", ".", "find_elements_by_xpath", "(", "self", ".", "xpath", ")", ":", "if", "self", ".", "filter_displayed", ":", "if", "not", "element", ".", "is_displayed", "(", ")", ":", "continue", "if", "self", ".", "filter_enabled", ":", "if", "not", "element", ".", "is_enabled", "(", ")", ":", "continue", "yield", "element" ]
Fetch the elements from the browser.
[ "Fetch", "the", "elements", "from", "the", "browser", "." ]
python
train
29.384615
PmagPy/PmagPy
programs/vgpmap_magic2.py
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/programs/vgpmap_magic2.py#L13-L251
def main(): """ NAME vgpmap_magic.py DESCRIPTION makes a map of vgps and a95/dp,dm for site means in a pmag_results table SYNTAX vgpmap_magic.py [command line options] OPTIONS -h prints help and quits -eye ELAT ELON [specify eyeball location], default is 90., 0. -f FILE pmag_results format file, [default is pmag_results.txt] -res [c,l,i,h] specify resolution (crude, low, intermediate, high] -etp plot the etopo20 topographpy data (requires high resolution data set) -prj PROJ, specify one of the following: ortho = orthographic lcc = lambert conformal moll = molweide merc = mercator -sym SYM SIZE: choose a symbol and size, examples: ro 5 : small red circles bs 10 : intermediate blue squares g^ 20 : large green triangles -ell plot dp/dm or a95 ellipses -rev RSYM RSIZE : flip reverse poles to normal antipode -S: plot antipodes of all poles -age : plot the ages next to the poles -crd [g,t] : choose coordinate system, default is to plot all site VGPs -fmt [pdf, png, eps...] specify output format, default is pdf -sav save and quit DEFAULTS FILE: pmag_results.txt res: c prj: ortho ELAT,ELON = 0,0 SYM SIZE: ro 8 RSYM RSIZE: g^ 8 """ dir_path = '.' res, ages = 'c', 0 plot = 0 proj = 'ortho' results_file = 'pmag_results.txt' ell, flip = 0, 0 lat_0, lon_0 = 90., 0. fmt = 'pdf' sym, size = 'ro', 8 rsym, rsize = 'g^', 8 anti = 0 fancy = 0 coord = "" if '-WD' in sys.argv: ind = sys.argv.index('-WD') dir_path = sys.argv[ind+1] if '-h' in sys.argv: print(main.__doc__) sys.exit() if '-S' in sys.argv: anti = 1 if '-fmt' in sys.argv: ind = sys.argv.index('-fmt') fmt = sys.argv[ind+1] if '-sav' in sys.argv: plot = 1 if '-res' in sys.argv: ind = sys.argv.index('-res') res = sys.argv[ind+1] if '-etp' in sys.argv: fancy = 1 if '-prj' in sys.argv: ind = sys.argv.index('-prj') proj = sys.argv[ind+1] if '-rev' in sys.argv: flip = 1 ind = sys.argv.index('-rev') rsym = (sys.argv[ind+1]) rsize = int(sys.argv[ind+2]) if '-sym' in sys.argv: ind = sys.argv.index('-sym') sym = (sys.argv[ind+1]) size = int(sys.argv[ind+2]) if '-eye' in sys.argv: ind = sys.argv.index('-eye') lat_0 = float(sys.argv[ind+1]) lon_0 = float(sys.argv[ind+2]) if '-ell' in sys.argv: ell = 1 if '-age' in sys.argv: ages = 1 if '-f' in sys.argv: ind = sys.argv.index('-f') results_file = sys.argv[ind+1] if '-crd' in sys.argv: ind = sys.argv.index('-crd') crd = sys.argv[ind+1] if crd == 'g': coord = '0' if crd == 't': coord = '100' results_file = dir_path+'/'+results_file data, file_type = pmag.magic_read(results_file) if file_type != 'pmag_results': print("bad results file") sys.exit() FIG = {'map': 1} pmagplotlib.plot_init(FIG['map'], 6, 6) # read in er_sites file lats, lons, dp, dm, a95 = [], [], [], [], [] Pars = [] dates, rlats, rlons = [], [], [] if 'data_type' in data[0].keys(): # get all site level data Results = pmag.get_dictitem(data, 'data_type', 'i', 'T') else: Results = data # get all non-blank latitudes Results = pmag.get_dictitem(Results, 'vgp_lat', '', 'F') # get all non-blank longitudes Results = pmag.get_dictitem(Results, 'vgp_lon', '', 'F') if coord != "": # get specified coordinate system Results = pmag.get_dictitem(Results, 'tilt_correction', coord, 'T') location = "" for rec in Results: if rec['er_location_names'] not in location: location = location+':'+rec['er_location_names'] if 'average_age' in rec.keys() and rec['average_age'] != "" and ages == 1: dates.append(rec['average_age']) lat = float(rec['vgp_lat']) lon = float(rec['vgp_lon']) if flip == 0: lats.append(lat) lons.append(lon) elif flip == 1: if lat < 0: rlats.append(-lat) lon = lon+180. if lon > 360: lon = lon-360. rlons.append(lon) else: lats.append(lat) lons.append(lon) elif anti == 1: lats.append(-lat) lon = lon+180. if lon > 360: lon = lon-360. lons.append(lon) ppars = [] ppars.append(lon) ppars.append(lat) ell1, ell2 = "", "" if 'vgp_dm' in rec.keys() and rec['vgp_dm'] != "": ell1 = float(rec['vgp_dm']) if 'vgp_dp' in rec.keys() and rec['vgp_dp'] != "": ell2 = float(rec['vgp_dp']) if 'vgp_alpha95' in rec.keys() and rec['vgp_alpha95'] != "": ell1, ell2 = float(rec['vgp_alpha95']), float(rec['vgp_alpha95']) if ell1 != "" and ell2 != "": ppars = [] ppars.append(lons[-1]) ppars.append(lats[-1]) ppars.append(ell1) ppars.append(lons[-1]) isign = abs(lats[-1])/lats[-1] ppars.append(lats[-1]-isign*90.) ppars.append(ell2) ppars.append(lons[-1]+90.) ppars.append(0.) Pars.append(ppars) location = location.strip(':') Opts = {'latmin': -90, 'latmax': 90, 'lonmin': 0., 'lonmax': 360., 'lat_0': lat_0, 'lon_0': lon_0, 'proj': proj, 'sym': 'bs', 'symsize': 3, 'pltgrid': 0, 'res': res, 'boundinglat': 0.} Opts['details'] = {'coasts': 1, 'rivers': 0, 'states': 0, 'countries': 0, 'ocean': 1, 'fancy': fancy} # make the base map with a blue triangle at the pole` pmagplotlib.plot_map(FIG['map'], [90.], [0.], Opts) Opts['pltgrid'] = -1 Opts['sym'] = sym Opts['symsize'] = size if len(dates) > 0: Opts['names'] = dates if len(lats) > 0: # add the lats and lons of the poles pmagplotlib.plot_map(FIG['map'], lats, lons, Opts) Opts['names'] = [] if len(rlats) > 0: Opts['sym'] = rsym Opts['symsize'] = rsize # add the lats and lons of the poles pmagplotlib.plot_map(FIG['map'], rlats, rlons, Opts) if plot == 0: pmagplotlib.draw_figs(FIG) if ell == 1: # add ellipses if desired. Opts['details'] = {'coasts': 0, 'rivers': 0, 'states': 0, 'countries': 0, 'ocean': 0} Opts['pltgrid'] = -1 # turn off meridian replotting Opts['symsize'] = 2 Opts['sym'] = 'g-' for ppars in Pars: if ppars[2] != 0: PTS = pmagplotlib.plot_ell(FIG['map'], ppars, 'g.', 0, 0) elats, elons = [], [] for pt in PTS: elons.append(pt[0]) elats.append(pt[1]) # make the base map with a blue triangle at the pole` pmagplotlib.plot_map(FIG['map'], elats, elons, Opts) if plot == 0: pmagplotlib.draw_figs(FIG) files = {} for key in FIG.keys(): if pmagplotlib.isServer: # use server plot naming convention files[key] = 'LO:_'+location+'_VGP_map.'+fmt else: # use more readable plot naming convention files[key] = '{}_VGP_map.{}'.format( location.replace(' ', '_'), fmt) if pmagplotlib.isServer: black = '#000000' purple = '#800080' titles = {} titles['eq'] = 'LO:_'+location+'_VGP_map' FIG = pmagplotlib.add_borders(FIG, titles, black, purple) pmagplotlib.save_plots(FIG, files) elif plot == 0: pmagplotlib.draw_figs(FIG) ans = input(" S[a]ve to save plot, Return to quit: ") if ans == "a": pmagplotlib.save_plots(FIG, files) else: print("Good bye") sys.exit() else: pmagplotlib.save_plots(FIG, files)
[ "def", "main", "(", ")", ":", "dir_path", "=", "'.'", "res", ",", "ages", "=", "'c'", ",", "0", "plot", "=", "0", "proj", "=", "'ortho'", "results_file", "=", "'pmag_results.txt'", "ell", ",", "flip", "=", "0", ",", "0", "lat_0", ",", "lon_0", "=", "90.", ",", "0.", "fmt", "=", "'pdf'", "sym", ",", "size", "=", "'ro'", ",", "8", "rsym", ",", "rsize", "=", "'g^'", ",", "8", "anti", "=", "0", "fancy", "=", "0", "coord", "=", "\"\"", "if", "'-WD'", "in", "sys", ".", "argv", ":", "ind", "=", "sys", ".", "argv", ".", "index", "(", "'-WD'", ")", "dir_path", "=", "sys", ".", "argv", "[", "ind", "+", "1", "]", "if", "'-h'", "in", "sys", ".", "argv", ":", "print", "(", "main", ".", "__doc__", ")", "sys", ".", "exit", "(", ")", "if", "'-S'", "in", "sys", ".", "argv", ":", "anti", "=", "1", "if", "'-fmt'", "in", "sys", ".", "argv", ":", "ind", "=", "sys", ".", "argv", ".", "index", "(", "'-fmt'", ")", "fmt", "=", "sys", ".", "argv", "[", "ind", "+", "1", "]", "if", "'-sav'", "in", "sys", ".", "argv", ":", "plot", "=", "1", "if", "'-res'", "in", "sys", ".", "argv", ":", "ind", "=", "sys", ".", "argv", ".", "index", "(", "'-res'", ")", "res", "=", "sys", ".", "argv", "[", "ind", "+", "1", "]", "if", "'-etp'", "in", "sys", ".", "argv", ":", "fancy", "=", "1", "if", "'-prj'", "in", "sys", ".", "argv", ":", "ind", "=", "sys", ".", "argv", ".", "index", "(", "'-prj'", ")", "proj", "=", "sys", ".", "argv", "[", "ind", "+", "1", "]", "if", "'-rev'", "in", "sys", ".", "argv", ":", "flip", "=", "1", "ind", "=", "sys", ".", "argv", ".", "index", "(", "'-rev'", ")", "rsym", "=", "(", "sys", ".", "argv", "[", "ind", "+", "1", "]", ")", "rsize", "=", "int", "(", "sys", ".", "argv", "[", "ind", "+", "2", "]", ")", "if", "'-sym'", "in", "sys", ".", "argv", ":", "ind", "=", "sys", ".", "argv", ".", "index", "(", "'-sym'", ")", "sym", "=", "(", "sys", ".", "argv", "[", "ind", "+", "1", "]", ")", "size", "=", "int", "(", "sys", ".", "argv", "[", "ind", "+", "2", "]", ")", "if", "'-eye'", "in", "sys", ".", "argv", ":", "ind", "=", "sys", ".", "argv", ".", "index", "(", "'-eye'", ")", "lat_0", "=", "float", "(", "sys", ".", "argv", "[", "ind", "+", "1", "]", ")", "lon_0", "=", "float", "(", "sys", ".", "argv", "[", "ind", "+", "2", "]", ")", "if", "'-ell'", "in", "sys", ".", "argv", ":", "ell", "=", "1", "if", "'-age'", "in", "sys", ".", "argv", ":", "ages", "=", "1", "if", "'-f'", "in", "sys", ".", "argv", ":", "ind", "=", "sys", ".", "argv", ".", "index", "(", "'-f'", ")", "results_file", "=", "sys", ".", "argv", "[", "ind", "+", "1", "]", "if", "'-crd'", "in", "sys", ".", "argv", ":", "ind", "=", "sys", ".", "argv", ".", "index", "(", "'-crd'", ")", "crd", "=", "sys", ".", "argv", "[", "ind", "+", "1", "]", "if", "crd", "==", "'g'", ":", "coord", "=", "'0'", "if", "crd", "==", "'t'", ":", "coord", "=", "'100'", "results_file", "=", "dir_path", "+", "'/'", "+", "results_file", "data", ",", "file_type", "=", "pmag", ".", "magic_read", "(", "results_file", ")", "if", "file_type", "!=", "'pmag_results'", ":", "print", "(", "\"bad results file\"", ")", "sys", ".", "exit", "(", ")", "FIG", "=", "{", "'map'", ":", "1", "}", "pmagplotlib", ".", "plot_init", "(", "FIG", "[", "'map'", "]", ",", "6", ",", "6", ")", "# read in er_sites file", "lats", ",", "lons", ",", "dp", ",", "dm", ",", "a95", "=", "[", "]", ",", "[", "]", ",", "[", "]", ",", "[", "]", ",", "[", "]", "Pars", "=", "[", "]", "dates", ",", "rlats", ",", "rlons", "=", "[", "]", ",", "[", "]", ",", "[", "]", "if", "'data_type'", "in", "data", "[", "0", "]", ".", "keys", "(", ")", ":", "# get all site level data", "Results", "=", "pmag", ".", "get_dictitem", "(", "data", ",", "'data_type'", ",", "'i'", ",", "'T'", ")", "else", ":", "Results", "=", "data", "# get all non-blank latitudes", "Results", "=", "pmag", ".", "get_dictitem", "(", "Results", ",", "'vgp_lat'", ",", "''", ",", "'F'", ")", "# get all non-blank longitudes", "Results", "=", "pmag", ".", "get_dictitem", "(", "Results", ",", "'vgp_lon'", ",", "''", ",", "'F'", ")", "if", "coord", "!=", "\"\"", ":", "# get specified coordinate system", "Results", "=", "pmag", ".", "get_dictitem", "(", "Results", ",", "'tilt_correction'", ",", "coord", ",", "'T'", ")", "location", "=", "\"\"", "for", "rec", "in", "Results", ":", "if", "rec", "[", "'er_location_names'", "]", "not", "in", "location", ":", "location", "=", "location", "+", "':'", "+", "rec", "[", "'er_location_names'", "]", "if", "'average_age'", "in", "rec", ".", "keys", "(", ")", "and", "rec", "[", "'average_age'", "]", "!=", "\"\"", "and", "ages", "==", "1", ":", "dates", ".", "append", "(", "rec", "[", "'average_age'", "]", ")", "lat", "=", "float", "(", "rec", "[", "'vgp_lat'", "]", ")", "lon", "=", "float", "(", "rec", "[", "'vgp_lon'", "]", ")", "if", "flip", "==", "0", ":", "lats", ".", "append", "(", "lat", ")", "lons", ".", "append", "(", "lon", ")", "elif", "flip", "==", "1", ":", "if", "lat", "<", "0", ":", "rlats", ".", "append", "(", "-", "lat", ")", "lon", "=", "lon", "+", "180.", "if", "lon", ">", "360", ":", "lon", "=", "lon", "-", "360.", "rlons", ".", "append", "(", "lon", ")", "else", ":", "lats", ".", "append", "(", "lat", ")", "lons", ".", "append", "(", "lon", ")", "elif", "anti", "==", "1", ":", "lats", ".", "append", "(", "-", "lat", ")", "lon", "=", "lon", "+", "180.", "if", "lon", ">", "360", ":", "lon", "=", "lon", "-", "360.", "lons", ".", "append", "(", "lon", ")", "ppars", "=", "[", "]", "ppars", ".", "append", "(", "lon", ")", "ppars", ".", "append", "(", "lat", ")", "ell1", ",", "ell2", "=", "\"\"", ",", "\"\"", "if", "'vgp_dm'", "in", "rec", ".", "keys", "(", ")", "and", "rec", "[", "'vgp_dm'", "]", "!=", "\"\"", ":", "ell1", "=", "float", "(", "rec", "[", "'vgp_dm'", "]", ")", "if", "'vgp_dp'", "in", "rec", ".", "keys", "(", ")", "and", "rec", "[", "'vgp_dp'", "]", "!=", "\"\"", ":", "ell2", "=", "float", "(", "rec", "[", "'vgp_dp'", "]", ")", "if", "'vgp_alpha95'", "in", "rec", ".", "keys", "(", ")", "and", "rec", "[", "'vgp_alpha95'", "]", "!=", "\"\"", ":", "ell1", ",", "ell2", "=", "float", "(", "rec", "[", "'vgp_alpha95'", "]", ")", ",", "float", "(", "rec", "[", "'vgp_alpha95'", "]", ")", "if", "ell1", "!=", "\"\"", "and", "ell2", "!=", "\"\"", ":", "ppars", "=", "[", "]", "ppars", ".", "append", "(", "lons", "[", "-", "1", "]", ")", "ppars", ".", "append", "(", "lats", "[", "-", "1", "]", ")", "ppars", ".", "append", "(", "ell1", ")", "ppars", ".", "append", "(", "lons", "[", "-", "1", "]", ")", "isign", "=", "abs", "(", "lats", "[", "-", "1", "]", ")", "/", "lats", "[", "-", "1", "]", "ppars", ".", "append", "(", "lats", "[", "-", "1", "]", "-", "isign", "*", "90.", ")", "ppars", ".", "append", "(", "ell2", ")", "ppars", ".", "append", "(", "lons", "[", "-", "1", "]", "+", "90.", ")", "ppars", ".", "append", "(", "0.", ")", "Pars", ".", "append", "(", "ppars", ")", "location", "=", "location", ".", "strip", "(", "':'", ")", "Opts", "=", "{", "'latmin'", ":", "-", "90", ",", "'latmax'", ":", "90", ",", "'lonmin'", ":", "0.", ",", "'lonmax'", ":", "360.", ",", "'lat_0'", ":", "lat_0", ",", "'lon_0'", ":", "lon_0", ",", "'proj'", ":", "proj", ",", "'sym'", ":", "'bs'", ",", "'symsize'", ":", "3", ",", "'pltgrid'", ":", "0", ",", "'res'", ":", "res", ",", "'boundinglat'", ":", "0.", "}", "Opts", "[", "'details'", "]", "=", "{", "'coasts'", ":", "1", ",", "'rivers'", ":", "0", ",", "'states'", ":", "0", ",", "'countries'", ":", "0", ",", "'ocean'", ":", "1", ",", "'fancy'", ":", "fancy", "}", "# make the base map with a blue triangle at the pole`", "pmagplotlib", ".", "plot_map", "(", "FIG", "[", "'map'", "]", ",", "[", "90.", "]", ",", "[", "0.", "]", ",", "Opts", ")", "Opts", "[", "'pltgrid'", "]", "=", "-", "1", "Opts", "[", "'sym'", "]", "=", "sym", "Opts", "[", "'symsize'", "]", "=", "size", "if", "len", "(", "dates", ")", ">", "0", ":", "Opts", "[", "'names'", "]", "=", "dates", "if", "len", "(", "lats", ")", ">", "0", ":", "# add the lats and lons of the poles", "pmagplotlib", ".", "plot_map", "(", "FIG", "[", "'map'", "]", ",", "lats", ",", "lons", ",", "Opts", ")", "Opts", "[", "'names'", "]", "=", "[", "]", "if", "len", "(", "rlats", ")", ">", "0", ":", "Opts", "[", "'sym'", "]", "=", "rsym", "Opts", "[", "'symsize'", "]", "=", "rsize", "# add the lats and lons of the poles", "pmagplotlib", ".", "plot_map", "(", "FIG", "[", "'map'", "]", ",", "rlats", ",", "rlons", ",", "Opts", ")", "if", "plot", "==", "0", ":", "pmagplotlib", ".", "draw_figs", "(", "FIG", ")", "if", "ell", "==", "1", ":", "# add ellipses if desired.", "Opts", "[", "'details'", "]", "=", "{", "'coasts'", ":", "0", ",", "'rivers'", ":", "0", ",", "'states'", ":", "0", ",", "'countries'", ":", "0", ",", "'ocean'", ":", "0", "}", "Opts", "[", "'pltgrid'", "]", "=", "-", "1", "# turn off meridian replotting", "Opts", "[", "'symsize'", "]", "=", "2", "Opts", "[", "'sym'", "]", "=", "'g-'", "for", "ppars", "in", "Pars", ":", "if", "ppars", "[", "2", "]", "!=", "0", ":", "PTS", "=", "pmagplotlib", ".", "plot_ell", "(", "FIG", "[", "'map'", "]", ",", "ppars", ",", "'g.'", ",", "0", ",", "0", ")", "elats", ",", "elons", "=", "[", "]", ",", "[", "]", "for", "pt", "in", "PTS", ":", "elons", ".", "append", "(", "pt", "[", "0", "]", ")", "elats", ".", "append", "(", "pt", "[", "1", "]", ")", "# make the base map with a blue triangle at the pole`", "pmagplotlib", ".", "plot_map", "(", "FIG", "[", "'map'", "]", ",", "elats", ",", "elons", ",", "Opts", ")", "if", "plot", "==", "0", ":", "pmagplotlib", ".", "draw_figs", "(", "FIG", ")", "files", "=", "{", "}", "for", "key", "in", "FIG", ".", "keys", "(", ")", ":", "if", "pmagplotlib", ".", "isServer", ":", "# use server plot naming convention", "files", "[", "key", "]", "=", "'LO:_'", "+", "location", "+", "'_VGP_map.'", "+", "fmt", "else", ":", "# use more readable plot naming convention", "files", "[", "key", "]", "=", "'{}_VGP_map.{}'", ".", "format", "(", "location", ".", "replace", "(", "' '", ",", "'_'", ")", ",", "fmt", ")", "if", "pmagplotlib", ".", "isServer", ":", "black", "=", "'#000000'", "purple", "=", "'#800080'", "titles", "=", "{", "}", "titles", "[", "'eq'", "]", "=", "'LO:_'", "+", "location", "+", "'_VGP_map'", "FIG", "=", "pmagplotlib", ".", "add_borders", "(", "FIG", ",", "titles", ",", "black", ",", "purple", ")", "pmagplotlib", ".", "save_plots", "(", "FIG", ",", "files", ")", "elif", "plot", "==", "0", ":", "pmagplotlib", ".", "draw_figs", "(", "FIG", ")", "ans", "=", "input", "(", "\" S[a]ve to save plot, Return to quit: \"", ")", "if", "ans", "==", "\"a\"", ":", "pmagplotlib", ".", "save_plots", "(", "FIG", ",", "files", ")", "else", ":", "print", "(", "\"Good bye\"", ")", "sys", ".", "exit", "(", ")", "else", ":", "pmagplotlib", ".", "save_plots", "(", "FIG", ",", "files", ")" ]
NAME vgpmap_magic.py DESCRIPTION makes a map of vgps and a95/dp,dm for site means in a pmag_results table SYNTAX vgpmap_magic.py [command line options] OPTIONS -h prints help and quits -eye ELAT ELON [specify eyeball location], default is 90., 0. -f FILE pmag_results format file, [default is pmag_results.txt] -res [c,l,i,h] specify resolution (crude, low, intermediate, high] -etp plot the etopo20 topographpy data (requires high resolution data set) -prj PROJ, specify one of the following: ortho = orthographic lcc = lambert conformal moll = molweide merc = mercator -sym SYM SIZE: choose a symbol and size, examples: ro 5 : small red circles bs 10 : intermediate blue squares g^ 20 : large green triangles -ell plot dp/dm or a95 ellipses -rev RSYM RSIZE : flip reverse poles to normal antipode -S: plot antipodes of all poles -age : plot the ages next to the poles -crd [g,t] : choose coordinate system, default is to plot all site VGPs -fmt [pdf, png, eps...] specify output format, default is pdf -sav save and quit DEFAULTS FILE: pmag_results.txt res: c prj: ortho ELAT,ELON = 0,0 SYM SIZE: ro 8 RSYM RSIZE: g^ 8
[ "NAME", "vgpmap_magic", ".", "py" ]
python
train
34.179916
neon-jungle/wagtailmodelchooser
wagtailmodelchooser/views.py
https://github.com/neon-jungle/wagtailmodelchooser/blob/8dd1e33dd61418a726ff3acf67a956626c8b7ba1/wagtailmodelchooser/views.py#L15-L39
def instance_from_str(instance_str): """ Given an instance string in the form "app.Model:pk", returns a tuple of ``(model, instance)``. If the pk part is empty, ``instance`` will be ``None``. Raises ``ValueError`` on invalid model strings or missing instances. """ match = instance_str_re.match(instance_str) if not match: raise ValueError("Invalid instance string") model_string = match.group(1) try: model = apps.get_model(model_string) except (LookupError, ValueError): raise ValueError("Invalid instance string") pk = match.group(2) if pk: try: return model, model._default_manager.get(pk=pk) except model.DoesNotExist: raise ValueError("Invalid instance string") return model, None
[ "def", "instance_from_str", "(", "instance_str", ")", ":", "match", "=", "instance_str_re", ".", "match", "(", "instance_str", ")", "if", "not", "match", ":", "raise", "ValueError", "(", "\"Invalid instance string\"", ")", "model_string", "=", "match", ".", "group", "(", "1", ")", "try", ":", "model", "=", "apps", ".", "get_model", "(", "model_string", ")", "except", "(", "LookupError", ",", "ValueError", ")", ":", "raise", "ValueError", "(", "\"Invalid instance string\"", ")", "pk", "=", "match", ".", "group", "(", "2", ")", "if", "pk", ":", "try", ":", "return", "model", ",", "model", ".", "_default_manager", ".", "get", "(", "pk", "=", "pk", ")", "except", "model", ".", "DoesNotExist", ":", "raise", "ValueError", "(", "\"Invalid instance string\"", ")", "return", "model", ",", "None" ]
Given an instance string in the form "app.Model:pk", returns a tuple of ``(model, instance)``. If the pk part is empty, ``instance`` will be ``None``. Raises ``ValueError`` on invalid model strings or missing instances.
[ "Given", "an", "instance", "string", "in", "the", "form", "app", ".", "Model", ":", "pk", "returns", "a", "tuple", "of", "(", "model", "instance", ")", ".", "If", "the", "pk", "part", "is", "empty", "instance", "will", "be", "None", ".", "Raises", "ValueError", "on", "invalid", "model", "strings", "or", "missing", "instances", "." ]
python
valid
31.4
allenai/allennlp
allennlp/common/params.py
https://github.com/allenai/allennlp/blob/648a36f77db7e45784c047176074f98534c76636/allennlp/common/params.py#L396-L404
def assert_empty(self, class_name: str): """ Raises a ``ConfigurationError`` if ``self.params`` is not empty. We take ``class_name`` as an argument so that the error message gives some idea of where an error happened, if there was one. ``class_name`` should be the name of the `calling` class, the one that got extra parameters (if there are any). """ if self.params: raise ConfigurationError("Extra parameters passed to {}: {}".format(class_name, self.params))
[ "def", "assert_empty", "(", "self", ",", "class_name", ":", "str", ")", ":", "if", "self", ".", "params", ":", "raise", "ConfigurationError", "(", "\"Extra parameters passed to {}: {}\"", ".", "format", "(", "class_name", ",", "self", ".", "params", ")", ")" ]
Raises a ``ConfigurationError`` if ``self.params`` is not empty. We take ``class_name`` as an argument so that the error message gives some idea of where an error happened, if there was one. ``class_name`` should be the name of the `calling` class, the one that got extra parameters (if there are any).
[ "Raises", "a", "ConfigurationError", "if", "self", ".", "params", "is", "not", "empty", ".", "We", "take", "class_name", "as", "an", "argument", "so", "that", "the", "error", "message", "gives", "some", "idea", "of", "where", "an", "error", "happened", "if", "there", "was", "one", ".", "class_name", "should", "be", "the", "name", "of", "the", "calling", "class", "the", "one", "that", "got", "extra", "parameters", "(", "if", "there", "are", "any", ")", "." ]
python
train
58.111111
csparpa/pyowm
pyowm/uvindexapi30/uvindex.py
https://github.com/csparpa/pyowm/blob/cdd59eb72f32f7238624ceef9b2e2329a5ebd472/pyowm/uvindexapi30/uvindex.py#L123-L143
def to_XML(self, xml_declaration=True, xmlns=True): """ Dumps object fields to an XML-formatted string. The 'xml_declaration' switch enables printing of a leading standard XML line containing XML version and encoding. The 'xmlns' switch enables printing of qualified XMLNS prefixes. :param XML_declaration: if ``True`` (default) prints a leading XML declaration line :type XML_declaration: bool :param xmlns: if ``True`` (default) prints full XMLNS prefixes :type xmlns: bool :returns: an XML-formatted string """ root_node = self._to_DOM() if xmlns: xmlutils.annotate_with_XMLNS(root_node, UVINDEX_XMLNS_PREFIX, UVINDEX_XMLNS_URL) return xmlutils.DOM_node_to_XML(root_node, xml_declaration)
[ "def", "to_XML", "(", "self", ",", "xml_declaration", "=", "True", ",", "xmlns", "=", "True", ")", ":", "root_node", "=", "self", ".", "_to_DOM", "(", ")", "if", "xmlns", ":", "xmlutils", ".", "annotate_with_XMLNS", "(", "root_node", ",", "UVINDEX_XMLNS_PREFIX", ",", "UVINDEX_XMLNS_URL", ")", "return", "xmlutils", ".", "DOM_node_to_XML", "(", "root_node", ",", "xml_declaration", ")" ]
Dumps object fields to an XML-formatted string. The 'xml_declaration' switch enables printing of a leading standard XML line containing XML version and encoding. The 'xmlns' switch enables printing of qualified XMLNS prefixes. :param XML_declaration: if ``True`` (default) prints a leading XML declaration line :type XML_declaration: bool :param xmlns: if ``True`` (default) prints full XMLNS prefixes :type xmlns: bool :returns: an XML-formatted string
[ "Dumps", "object", "fields", "to", "an", "XML", "-", "formatted", "string", ".", "The", "xml_declaration", "switch", "enables", "printing", "of", "a", "leading", "standard", "XML", "line", "containing", "XML", "version", "and", "encoding", ".", "The", "xmlns", "switch", "enables", "printing", "of", "qualified", "XMLNS", "prefixes", "." ]
python
train
42.47619
viniciuschiele/flask-io
flask_io/io.py
https://github.com/viniciuschiele/flask-io/blob/4e559419b3d8e6859f83fa16557b00542d5f3aa7/flask_io/io.py#L334-L369
def __make_response(self, data, default_renderer=None): """ Creates a Flask response object from the specified data. The appropriated encoder is taken based on the request header Accept. If there is not data to be serialized the response status code is 204. :param data: The Python object to be serialized. :return: A Flask response object. """ status = headers = None if isinstance(data, tuple): data, status, headers = unpack(data) if data is None: data = self.__app.response_class(status=204) elif not isinstance(data, self.__app.response_class): renderer, mimetype = self.content_negotiation.select_renderer(request, self.default_renderers) if not renderer: if not default_renderer: raise NotAcceptable() renderer = default_renderer mimetype = default_renderer.mimetype data_bytes = renderer.render(data, mimetype) data = self.__app.response_class(data_bytes, mimetype=str(mimetype)) if status is not None: data.status_code = status if headers: data.headers.extend(headers) return data
[ "def", "__make_response", "(", "self", ",", "data", ",", "default_renderer", "=", "None", ")", ":", "status", "=", "headers", "=", "None", "if", "isinstance", "(", "data", ",", "tuple", ")", ":", "data", ",", "status", ",", "headers", "=", "unpack", "(", "data", ")", "if", "data", "is", "None", ":", "data", "=", "self", ".", "__app", ".", "response_class", "(", "status", "=", "204", ")", "elif", "not", "isinstance", "(", "data", ",", "self", ".", "__app", ".", "response_class", ")", ":", "renderer", ",", "mimetype", "=", "self", ".", "content_negotiation", ".", "select_renderer", "(", "request", ",", "self", ".", "default_renderers", ")", "if", "not", "renderer", ":", "if", "not", "default_renderer", ":", "raise", "NotAcceptable", "(", ")", "renderer", "=", "default_renderer", "mimetype", "=", "default_renderer", ".", "mimetype", "data_bytes", "=", "renderer", ".", "render", "(", "data", ",", "mimetype", ")", "data", "=", "self", ".", "__app", ".", "response_class", "(", "data_bytes", ",", "mimetype", "=", "str", "(", "mimetype", ")", ")", "if", "status", "is", "not", "None", ":", "data", ".", "status_code", "=", "status", "if", "headers", ":", "data", ".", "headers", ".", "extend", "(", "headers", ")", "return", "data" ]
Creates a Flask response object from the specified data. The appropriated encoder is taken based on the request header Accept. If there is not data to be serialized the response status code is 204. :param data: The Python object to be serialized. :return: A Flask response object.
[ "Creates", "a", "Flask", "response", "object", "from", "the", "specified", "data", ".", "The", "appropriated", "encoder", "is", "taken", "based", "on", "the", "request", "header", "Accept", ".", "If", "there", "is", "not", "data", "to", "be", "serialized", "the", "response", "status", "code", "is", "204", "." ]
python
train
34.416667
materialsproject/pymatgen
pymatgen/io/vasp/inputs.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/vasp/inputs.py#L1620-L1632
def element(self): """ Attempt to return the atomic symbol based on the VRHFIN keyword. """ element = self.keywords["VRHFIN"].split(":")[0].strip() try: return Element(element).symbol except ValueError: # VASP incorrectly gives the element symbol for Xe as "X" # Some potentials, e.g., Zr_sv, gives the symbol as r. if element == "X": return "Xe" return Element(self.symbol.split("_")[0]).symbol
[ "def", "element", "(", "self", ")", ":", "element", "=", "self", ".", "keywords", "[", "\"VRHFIN\"", "]", ".", "split", "(", "\":\"", ")", "[", "0", "]", ".", "strip", "(", ")", "try", ":", "return", "Element", "(", "element", ")", ".", "symbol", "except", "ValueError", ":", "# VASP incorrectly gives the element symbol for Xe as \"X\"", "# Some potentials, e.g., Zr_sv, gives the symbol as r.", "if", "element", "==", "\"X\"", ":", "return", "\"Xe\"", "return", "Element", "(", "self", ".", "symbol", ".", "split", "(", "\"_\"", ")", "[", "0", "]", ")", ".", "symbol" ]
Attempt to return the atomic symbol based on the VRHFIN keyword.
[ "Attempt", "to", "return", "the", "atomic", "symbol", "based", "on", "the", "VRHFIN", "keyword", "." ]
python
train
39
OpenHumans/open-humans-api
ohapi/utils_fs.py
https://github.com/OpenHumans/open-humans-api/blob/ca2a28cf5d55cfdae13dd222ba58c25565bdb86e/ohapi/utils_fs.py#L176-L206
def load_metadata_csv(input_filepath): """ Return dict of metadata. Format is either dict (filenames are keys) or dict-of-dicts (project member IDs as top level keys, then filenames as keys). :param input_filepath: This field is the filepath of the csv file. """ with open(input_filepath) as f: csv_in = csv.reader(f) header = next(csv_in) if 'tags' in header: tags_idx = header.index('tags') else: raise ValueError('"tags" is a compulsory column in metadata file.') if header[0] == 'project_member_id': if header[1] == 'filename': metadata = load_metadata_csv_multi_user(csv_in, header, tags_idx) else: raise ValueError('The second column must be "filename"') elif header[0] == 'filename': metadata = load_metadata_csv_single_user(csv_in, header, tags_idx) else: raise ValueError('Incorrect Formatting of metadata. The first' + ' column for single user upload should be' + ' "filename". For multiuser uploads the first ' + 'column should be "project member id" and the' + ' second column should be "filename"') return metadata
[ "def", "load_metadata_csv", "(", "input_filepath", ")", ":", "with", "open", "(", "input_filepath", ")", "as", "f", ":", "csv_in", "=", "csv", ".", "reader", "(", "f", ")", "header", "=", "next", "(", "csv_in", ")", "if", "'tags'", "in", "header", ":", "tags_idx", "=", "header", ".", "index", "(", "'tags'", ")", "else", ":", "raise", "ValueError", "(", "'\"tags\" is a compulsory column in metadata file.'", ")", "if", "header", "[", "0", "]", "==", "'project_member_id'", ":", "if", "header", "[", "1", "]", "==", "'filename'", ":", "metadata", "=", "load_metadata_csv_multi_user", "(", "csv_in", ",", "header", ",", "tags_idx", ")", "else", ":", "raise", "ValueError", "(", "'The second column must be \"filename\"'", ")", "elif", "header", "[", "0", "]", "==", "'filename'", ":", "metadata", "=", "load_metadata_csv_single_user", "(", "csv_in", ",", "header", ",", "tags_idx", ")", "else", ":", "raise", "ValueError", "(", "'Incorrect Formatting of metadata. The first'", "+", "' column for single user upload should be'", "+", "' \"filename\". For multiuser uploads the first '", "+", "'column should be \"project member id\" and the'", "+", "' second column should be \"filename\"'", ")", "return", "metadata" ]
Return dict of metadata. Format is either dict (filenames are keys) or dict-of-dicts (project member IDs as top level keys, then filenames as keys). :param input_filepath: This field is the filepath of the csv file.
[ "Return", "dict", "of", "metadata", "." ]
python
train
43.967742
thefab/tornadis
tornadis/pool.py
https://github.com/thefab/tornadis/blob/f9dc883e46eb5971b62eab38346319757e5f900f/tornadis/pool.py#L180-L195
def preconnect(self, size=-1): """(pre)Connects some or all redis clients inside the pool. Args: size (int): number of redis clients to build and to connect (-1 means all clients if pool max_size > -1) Raises: ClientError: when size == -1 and pool max_size == -1 """ if size == -1 and self.max_size == -1: raise ClientError("size=-1 not allowed with pool max_size=-1") limit = min(size, self.max_size) if size != -1 else self.max_size clients = yield [self.get_connected_client() for _ in range(0, limit)] for client in clients: self.release_client(client)
[ "def", "preconnect", "(", "self", ",", "size", "=", "-", "1", ")", ":", "if", "size", "==", "-", "1", "and", "self", ".", "max_size", "==", "-", "1", ":", "raise", "ClientError", "(", "\"size=-1 not allowed with pool max_size=-1\"", ")", "limit", "=", "min", "(", "size", ",", "self", ".", "max_size", ")", "if", "size", "!=", "-", "1", "else", "self", ".", "max_size", "clients", "=", "yield", "[", "self", ".", "get_connected_client", "(", ")", "for", "_", "in", "range", "(", "0", ",", "limit", ")", "]", "for", "client", "in", "clients", ":", "self", ".", "release_client", "(", "client", ")" ]
(pre)Connects some or all redis clients inside the pool. Args: size (int): number of redis clients to build and to connect (-1 means all clients if pool max_size > -1) Raises: ClientError: when size == -1 and pool max_size == -1
[ "(", "pre", ")", "Connects", "some", "or", "all", "redis", "clients", "inside", "the", "pool", "." ]
python
train
41.9375
VisTrails/tej
tej/submission.py
https://github.com/VisTrails/tej/blob/b8dedaeb6bdeb650b46cfe6d85e5aa9284fc7f0b/tej/submission.py#L52-L62
def escape_queue(s): """Escapes the path to a queue, e.g. preserves ~ at the begining. """ if isinstance(s, PosixPath): s = unicode_(s) elif isinstance(s, bytes): s = s.decode('utf-8') if s.startswith('~/'): return '~/' + shell_escape(s[2:]) else: return shell_escape(s)
[ "def", "escape_queue", "(", "s", ")", ":", "if", "isinstance", "(", "s", ",", "PosixPath", ")", ":", "s", "=", "unicode_", "(", "s", ")", "elif", "isinstance", "(", "s", ",", "bytes", ")", ":", "s", "=", "s", ".", "decode", "(", "'utf-8'", ")", "if", "s", ".", "startswith", "(", "'~/'", ")", ":", "return", "'~/'", "+", "shell_escape", "(", "s", "[", "2", ":", "]", ")", "else", ":", "return", "shell_escape", "(", "s", ")" ]
Escapes the path to a queue, e.g. preserves ~ at the begining.
[ "Escapes", "the", "path", "to", "a", "queue", "e", ".", "g", ".", "preserves", "~", "at", "the", "begining", "." ]
python
train
28.727273
IdentityPython/pysaml2
src/saml2/s2repoze/plugins/sp.py
https://github.com/IdentityPython/pysaml2/blob/d3aa78eeb7d37c12688f783cb4db1c7263a14ad6/src/saml2/s2repoze/plugins/sp.py#L51-L59
def construct_came_from(environ): """ The URL that the user used when the process where interupted for single-sign-on processing. """ came_from = environ.get("PATH_INFO") qstr = environ.get("QUERY_STRING", "") if qstr: came_from += "?" + qstr return came_from
[ "def", "construct_came_from", "(", "environ", ")", ":", "came_from", "=", "environ", ".", "get", "(", "\"PATH_INFO\"", ")", "qstr", "=", "environ", ".", "get", "(", "\"QUERY_STRING\"", ",", "\"\"", ")", "if", "qstr", ":", "came_from", "+=", "\"?\"", "+", "qstr", "return", "came_from" ]
The URL that the user used when the process where interupted for single-sign-on processing.
[ "The", "URL", "that", "the", "user", "used", "when", "the", "process", "where", "interupted", "for", "single", "-", "sign", "-", "on", "processing", "." ]
python
train
31.555556
petl-developers/petl
petl/io/csv.py
https://github.com/petl-developers/petl/blob/1d33ca055f7e04e0d28a772041c9fd30c8d415d6/petl/io/csv.py#L115-L131
def appendcsv(table, source=None, encoding=None, errors='strict', write_header=False, **csvargs): """ Append data rows to an existing CSV file. As :func:`petl.io.csv.tocsv` but the file is opened in append mode and the table header is not written by default. Note that no attempt is made to check that the fields or row lengths are consistent with the existing data, the data rows from the table are simply appended to the file. """ source = write_source_from_arg(source) csvargs.setdefault('dialect', 'excel') appendcsv_impl(table, source=source, encoding=encoding, errors=errors, write_header=write_header, **csvargs)
[ "def", "appendcsv", "(", "table", ",", "source", "=", "None", ",", "encoding", "=", "None", ",", "errors", "=", "'strict'", ",", "write_header", "=", "False", ",", "*", "*", "csvargs", ")", ":", "source", "=", "write_source_from_arg", "(", "source", ")", "csvargs", ".", "setdefault", "(", "'dialect'", ",", "'excel'", ")", "appendcsv_impl", "(", "table", ",", "source", "=", "source", ",", "encoding", "=", "encoding", ",", "errors", "=", "errors", ",", "write_header", "=", "write_header", ",", "*", "*", "csvargs", ")" ]
Append data rows to an existing CSV file. As :func:`petl.io.csv.tocsv` but the file is opened in append mode and the table header is not written by default. Note that no attempt is made to check that the fields or row lengths are consistent with the existing data, the data rows from the table are simply appended to the file.
[ "Append", "data", "rows", "to", "an", "existing", "CSV", "file", ".", "As", ":", "func", ":", "petl", ".", "io", ".", "csv", ".", "tocsv", "but", "the", "file", "is", "opened", "in", "append", "mode", "and", "the", "table", "header", "is", "not", "written", "by", "default", "." ]
python
train
40.176471
hyperledger/indy-sdk
wrappers/python/indy/did.py
https://github.com/hyperledger/indy-sdk/blob/55240dc170308d7883c48f03f308130a6d077be6/wrappers/python/indy/did.py#L245-L275
async def get_key_metadata(wallet_handle: int, verkey: str) -> str: """ Retrieves the meta information for the giving key in the wallet. :param wallet_handle: Wallet handle (created by open_wallet). :param verkey: The key (verkey, key id) to retrieve metadata. :return: metadata: The meta information stored with the key; Can be null if no metadata was saved for this key. """ logger = logging.getLogger(__name__) logger.debug("get_key_metadata: >>> wallet_handle: %r, verkey: %r", wallet_handle, verkey) if not hasattr(get_key_metadata, "cb"): logger.debug("get_key_metadata: Creating callback") get_key_metadata.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_char_p)) c_wallet_handle = c_int32(wallet_handle) c_verkey = c_char_p(verkey.encode('utf-8')) metadata = await do_call('indy_get_key_metadata', c_wallet_handle, c_verkey, get_key_metadata.cb) res = metadata.decode() logger.debug("get_key_metadata: <<< res: %r", res) return res
[ "async", "def", "get_key_metadata", "(", "wallet_handle", ":", "int", ",", "verkey", ":", "str", ")", "->", "str", ":", "logger", "=", "logging", ".", "getLogger", "(", "__name__", ")", "logger", ".", "debug", "(", "\"get_key_metadata: >>> wallet_handle: %r, verkey: %r\"", ",", "wallet_handle", ",", "verkey", ")", "if", "not", "hasattr", "(", "get_key_metadata", ",", "\"cb\"", ")", ":", "logger", ".", "debug", "(", "\"get_key_metadata: Creating callback\"", ")", "get_key_metadata", ".", "cb", "=", "create_cb", "(", "CFUNCTYPE", "(", "None", ",", "c_int32", ",", "c_int32", ",", "c_char_p", ")", ")", "c_wallet_handle", "=", "c_int32", "(", "wallet_handle", ")", "c_verkey", "=", "c_char_p", "(", "verkey", ".", "encode", "(", "'utf-8'", ")", ")", "metadata", "=", "await", "do_call", "(", "'indy_get_key_metadata'", ",", "c_wallet_handle", ",", "c_verkey", ",", "get_key_metadata", ".", "cb", ")", "res", "=", "metadata", ".", "decode", "(", ")", "logger", ".", "debug", "(", "\"get_key_metadata: <<< res: %r\"", ",", "res", ")", "return", "res" ]
Retrieves the meta information for the giving key in the wallet. :param wallet_handle: Wallet handle (created by open_wallet). :param verkey: The key (verkey, key id) to retrieve metadata. :return: metadata: The meta information stored with the key; Can be null if no metadata was saved for this key.
[ "Retrieves", "the", "meta", "information", "for", "the", "giving", "key", "in", "the", "wallet", "." ]
python
train
36.870968
lmcinnes/umap
umap/rp_tree.py
https://github.com/lmcinnes/umap/blob/bbb01c03ba49f7bff8f77fd662d00e50d6686c77/umap/rp_tree.py#L596-L605
def max_sparse_hyperplane_size(tree): """Determine the most number on non zeros in a hyperplane entry""" if tree.is_leaf: return 0 else: return max( tree.hyperplane.shape[1], max_sparse_hyperplane_size(tree.left_child), max_sparse_hyperplane_size(tree.right_child), )
[ "def", "max_sparse_hyperplane_size", "(", "tree", ")", ":", "if", "tree", ".", "is_leaf", ":", "return", "0", "else", ":", "return", "max", "(", "tree", ".", "hyperplane", ".", "shape", "[", "1", "]", ",", "max_sparse_hyperplane_size", "(", "tree", ".", "left_child", ")", ",", "max_sparse_hyperplane_size", "(", "tree", ".", "right_child", ")", ",", ")" ]
Determine the most number on non zeros in a hyperplane entry
[ "Determine", "the", "most", "number", "on", "non", "zeros", "in", "a", "hyperplane", "entry" ]
python
train
33
Erotemic/ubelt
ubelt/util_hash.py
https://github.com/Erotemic/ubelt/blob/db802f3ad8abba025db74b54f86e6892b8927325/ubelt/util_hash.py#L381-L447
def _register_numpy_extensions(self): """ Numpy extensions are builtin """ # system checks import numpy as np numpy_floating_types = (np.float16, np.float32, np.float64) if hasattr(np, 'float128'): # nocover numpy_floating_types = numpy_floating_types + (np.float128,) @self.add_iterable_check def is_object_ndarray(data): # ndarrays of objects cannot be hashed directly. return isinstance(data, np.ndarray) and data.dtype.kind == 'O' @self.register(np.ndarray) def hash_numpy_array(data): """ Example: >>> import ubelt as ub >>> if not ub.modname_to_modpath('numpy'): ... raise pytest.skip() >>> import numpy as np >>> data_f32 = np.zeros((3, 3, 3), dtype=np.float64) >>> data_i64 = np.zeros((3, 3, 3), dtype=np.int64) >>> data_i32 = np.zeros((3, 3, 3), dtype=np.int32) >>> hash_f64 = _hashable_sequence(data_f32, types=True) >>> hash_i64 = _hashable_sequence(data_i64, types=True) >>> hash_i32 = _hashable_sequence(data_i64, types=True) >>> assert hash_i64 != hash_f64 >>> assert hash_i64 != hash_i32 """ if data.dtype.kind == 'O': msg = 'directly hashing ndarrays with dtype=object is unstable' raise TypeError(msg) else: # tobytes() views the array in 1D (via ravel()) # encode the shape as well header = b''.join(_hashable_sequence((len(data.shape), data.shape))) dtype = b''.join(_hashable_sequence(data.dtype.descr)) hashable = header + dtype + data.tobytes() prefix = b'NDARR' return prefix, hashable @self.register((np.int64, np.int32, np.int16, np.int8) + (np.uint64, np.uint32, np.uint16, np.uint8)) def _hash_numpy_int(data): return _convert_to_hashable(int(data)) @self.register(numpy_floating_types) def _hash_numpy_float(data): return _convert_to_hashable(float(data)) @self.register(np.random.RandomState) def _hash_numpy_random_state(data): """ Example: >>> import ubelt as ub >>> if not ub.modname_to_modpath('numpy'): ... raise pytest.skip() >>> import numpy as np >>> rng = np.random.RandomState(0) >>> _hashable_sequence(rng, types=True) """ hashable = b''.join(_hashable_sequence(data.get_state())) prefix = b'RNG' return prefix, hashable
[ "def", "_register_numpy_extensions", "(", "self", ")", ":", "# system checks", "import", "numpy", "as", "np", "numpy_floating_types", "=", "(", "np", ".", "float16", ",", "np", ".", "float32", ",", "np", ".", "float64", ")", "if", "hasattr", "(", "np", ",", "'float128'", ")", ":", "# nocover", "numpy_floating_types", "=", "numpy_floating_types", "+", "(", "np", ".", "float128", ",", ")", "@", "self", ".", "add_iterable_check", "def", "is_object_ndarray", "(", "data", ")", ":", "# ndarrays of objects cannot be hashed directly.", "return", "isinstance", "(", "data", ",", "np", ".", "ndarray", ")", "and", "data", ".", "dtype", ".", "kind", "==", "'O'", "@", "self", ".", "register", "(", "np", ".", "ndarray", ")", "def", "hash_numpy_array", "(", "data", ")", ":", "\"\"\"\n Example:\n >>> import ubelt as ub\n >>> if not ub.modname_to_modpath('numpy'):\n ... raise pytest.skip()\n >>> import numpy as np\n >>> data_f32 = np.zeros((3, 3, 3), dtype=np.float64)\n >>> data_i64 = np.zeros((3, 3, 3), dtype=np.int64)\n >>> data_i32 = np.zeros((3, 3, 3), dtype=np.int32)\n >>> hash_f64 = _hashable_sequence(data_f32, types=True)\n >>> hash_i64 = _hashable_sequence(data_i64, types=True)\n >>> hash_i32 = _hashable_sequence(data_i64, types=True)\n >>> assert hash_i64 != hash_f64\n >>> assert hash_i64 != hash_i32\n \"\"\"", "if", "data", ".", "dtype", ".", "kind", "==", "'O'", ":", "msg", "=", "'directly hashing ndarrays with dtype=object is unstable'", "raise", "TypeError", "(", "msg", ")", "else", ":", "# tobytes() views the array in 1D (via ravel())", "# encode the shape as well", "header", "=", "b''", ".", "join", "(", "_hashable_sequence", "(", "(", "len", "(", "data", ".", "shape", ")", ",", "data", ".", "shape", ")", ")", ")", "dtype", "=", "b''", ".", "join", "(", "_hashable_sequence", "(", "data", ".", "dtype", ".", "descr", ")", ")", "hashable", "=", "header", "+", "dtype", "+", "data", ".", "tobytes", "(", ")", "prefix", "=", "b'NDARR'", "return", "prefix", ",", "hashable", "@", "self", ".", "register", "(", "(", "np", ".", "int64", ",", "np", ".", "int32", ",", "np", ".", "int16", ",", "np", ".", "int8", ")", "+", "(", "np", ".", "uint64", ",", "np", ".", "uint32", ",", "np", ".", "uint16", ",", "np", ".", "uint8", ")", ")", "def", "_hash_numpy_int", "(", "data", ")", ":", "return", "_convert_to_hashable", "(", "int", "(", "data", ")", ")", "@", "self", ".", "register", "(", "numpy_floating_types", ")", "def", "_hash_numpy_float", "(", "data", ")", ":", "return", "_convert_to_hashable", "(", "float", "(", "data", ")", ")", "@", "self", ".", "register", "(", "np", ".", "random", ".", "RandomState", ")", "def", "_hash_numpy_random_state", "(", "data", ")", ":", "\"\"\"\n Example:\n >>> import ubelt as ub\n >>> if not ub.modname_to_modpath('numpy'):\n ... raise pytest.skip()\n >>> import numpy as np\n >>> rng = np.random.RandomState(0)\n >>> _hashable_sequence(rng, types=True)\n \"\"\"", "hashable", "=", "b''", ".", "join", "(", "_hashable_sequence", "(", "data", ".", "get_state", "(", ")", ")", ")", "prefix", "=", "b'RNG'", "return", "prefix", ",", "hashable" ]
Numpy extensions are builtin
[ "Numpy", "extensions", "are", "builtin" ]
python
valid
41.537313
tonioo/sievelib
sievelib/parser.py
https://github.com/tonioo/sievelib/blob/88822d1f1daf30ef3dd9ac74911301b0773ef3c8/sievelib/parser.py#L124-L138
def __reset_parser(self): """Reset parser's internal variables Restore the parser to an initial state. Useful when creating a new parser or reusing an existing one. """ self.result = [] self.hash_comments = [] self.__cstate = None self.__curcommand = None self.__curstringlist = None self.__expected = None self.__opened_blocks = 0 RequireCommand.loaded_extensions = []
[ "def", "__reset_parser", "(", "self", ")", ":", "self", ".", "result", "=", "[", "]", "self", ".", "hash_comments", "=", "[", "]", "self", ".", "__cstate", "=", "None", "self", ".", "__curcommand", "=", "None", "self", ".", "__curstringlist", "=", "None", "self", ".", "__expected", "=", "None", "self", ".", "__opened_blocks", "=", "0", "RequireCommand", ".", "loaded_extensions", "=", "[", "]" ]
Reset parser's internal variables Restore the parser to an initial state. Useful when creating a new parser or reusing an existing one.
[ "Reset", "parser", "s", "internal", "variables" ]
python
train
30.2
aouyar/PyMunin
pymunin/plugins/phpopcstats.py
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pymunin/plugins/phpopcstats.py#L148-L177
def retrieveVals(self): """Retrieve values for graphs.""" opcinfo = OPCinfo(self._host, self._port, self._user, self._password, self._monpath, self._ssl) stats = opcinfo.getAllStats() if self.hasGraph('php_opc_memory') and stats: mem = stats['memory_usage'] keys = ('used_memory', 'wasted_memory', 'free_memory') map(lambda k:self.setGraphVal('php_opc_memory',k,mem[k]), keys) if self.hasGraph('php_opc_opcache_statistics') and stats: st = stats['opcache_statistics'] self.setGraphVal('php_opc_opcache_statistics', 'hits', st['hits']) self.setGraphVal('php_opc_opcache_statistics', 'misses', st['misses']) if self.hasGraph('php_opc_opcache_hitrate') and stats: st = stats['opcache_statistics'] self.setGraphVal('php_opc_opcache_hitrate', 'opcache_hit_rate', st['opcache_hit_rate']) if self.hasGraph('php_opc_key_status') and stats: st = stats['opcache_statistics'] wasted = st['num_cached_keys'] - st['num_cached_scripts'] free = st['max_cached_keys'] - st['num_cached_keys'] self.setGraphVal('php_opc_key_status', 'num_cached_scripts', st['num_cached_scripts']) self.setGraphVal('php_opc_key_status', 'num_wasted_keys', wasted) self.setGraphVal('php_opc_key_status', 'num_free_keys', free)
[ "def", "retrieveVals", "(", "self", ")", ":", "opcinfo", "=", "OPCinfo", "(", "self", ".", "_host", ",", "self", ".", "_port", ",", "self", ".", "_user", ",", "self", ".", "_password", ",", "self", ".", "_monpath", ",", "self", ".", "_ssl", ")", "stats", "=", "opcinfo", ".", "getAllStats", "(", ")", "if", "self", ".", "hasGraph", "(", "'php_opc_memory'", ")", "and", "stats", ":", "mem", "=", "stats", "[", "'memory_usage'", "]", "keys", "=", "(", "'used_memory'", ",", "'wasted_memory'", ",", "'free_memory'", ")", "map", "(", "lambda", "k", ":", "self", ".", "setGraphVal", "(", "'php_opc_memory'", ",", "k", ",", "mem", "[", "k", "]", ")", ",", "keys", ")", "if", "self", ".", "hasGraph", "(", "'php_opc_opcache_statistics'", ")", "and", "stats", ":", "st", "=", "stats", "[", "'opcache_statistics'", "]", "self", ".", "setGraphVal", "(", "'php_opc_opcache_statistics'", ",", "'hits'", ",", "st", "[", "'hits'", "]", ")", "self", ".", "setGraphVal", "(", "'php_opc_opcache_statistics'", ",", "'misses'", ",", "st", "[", "'misses'", "]", ")", "if", "self", ".", "hasGraph", "(", "'php_opc_opcache_hitrate'", ")", "and", "stats", ":", "st", "=", "stats", "[", "'opcache_statistics'", "]", "self", ".", "setGraphVal", "(", "'php_opc_opcache_hitrate'", ",", "'opcache_hit_rate'", ",", "st", "[", "'opcache_hit_rate'", "]", ")", "if", "self", ".", "hasGraph", "(", "'php_opc_key_status'", ")", "and", "stats", ":", "st", "=", "stats", "[", "'opcache_statistics'", "]", "wasted", "=", "st", "[", "'num_cached_keys'", "]", "-", "st", "[", "'num_cached_scripts'", "]", "free", "=", "st", "[", "'max_cached_keys'", "]", "-", "st", "[", "'num_cached_keys'", "]", "self", ".", "setGraphVal", "(", "'php_opc_key_status'", ",", "'num_cached_scripts'", ",", "st", "[", "'num_cached_scripts'", "]", ")", "self", ".", "setGraphVal", "(", "'php_opc_key_status'", ",", "'num_wasted_keys'", ",", "wasted", ")", "self", ".", "setGraphVal", "(", "'php_opc_key_status'", ",", "'num_free_keys'", ",", "free", ")" ]
Retrieve values for graphs.
[ "Retrieve", "values", "for", "graphs", "." ]
python
train
50.433333
sentinel-hub/eo-learn
features/eolearn/features/interpolation.py
https://github.com/sentinel-hub/eo-learn/blob/b8c390b9f553c561612fe9eb64e720611633a035/features/eolearn/features/interpolation.py#L182-L209
def _copy_old_features(new_eopatch, old_eopatch, copy_features): """ Copy features from old EOPatch :param new_eopatch: New EOPatch container where the old features will be copied to :type new_eopatch: EOPatch :param old_eopatch: Old EOPatch container where the old features are located :type old_eopatch: EOPatch :param copy_features: List of tuples of type (FeatureType, str) or (FeatureType, str, str) that are copied over into the new EOPatch. The first string is the feature name, and the second one (optional) is a new name to be used for the feature :type copy_features: list((FeatureType, str) or (FeatureType, str, str)) """ if copy_features: existing_features = set(new_eopatch.get_feature_list()) for copy_feature_type, copy_feature_name, copy_new_feature_name in copy_features: new_feature = copy_feature_type, copy_new_feature_name if new_feature in existing_features: raise ValueError('Feature {} of {} already exists in the new EOPatch! ' 'Use a different name!'.format(copy_new_feature_name, copy_feature_type)) else: existing_features.add(new_feature) new_eopatch[copy_feature_type][copy_new_feature_name] = \ old_eopatch[copy_feature_type][copy_feature_name] return new_eopatch
[ "def", "_copy_old_features", "(", "new_eopatch", ",", "old_eopatch", ",", "copy_features", ")", ":", "if", "copy_features", ":", "existing_features", "=", "set", "(", "new_eopatch", ".", "get_feature_list", "(", ")", ")", "for", "copy_feature_type", ",", "copy_feature_name", ",", "copy_new_feature_name", "in", "copy_features", ":", "new_feature", "=", "copy_feature_type", ",", "copy_new_feature_name", "if", "new_feature", "in", "existing_features", ":", "raise", "ValueError", "(", "'Feature {} of {} already exists in the new EOPatch! '", "'Use a different name!'", ".", "format", "(", "copy_new_feature_name", ",", "copy_feature_type", ")", ")", "else", ":", "existing_features", ".", "add", "(", "new_feature", ")", "new_eopatch", "[", "copy_feature_type", "]", "[", "copy_new_feature_name", "]", "=", "old_eopatch", "[", "copy_feature_type", "]", "[", "copy_feature_name", "]", "return", "new_eopatch" ]
Copy features from old EOPatch :param new_eopatch: New EOPatch container where the old features will be copied to :type new_eopatch: EOPatch :param old_eopatch: Old EOPatch container where the old features are located :type old_eopatch: EOPatch :param copy_features: List of tuples of type (FeatureType, str) or (FeatureType, str, str) that are copied over into the new EOPatch. The first string is the feature name, and the second one (optional) is a new name to be used for the feature :type copy_features: list((FeatureType, str) or (FeatureType, str, str))
[ "Copy", "features", "from", "old", "EOPatch" ]
python
train
52.5
swans-one/django-kittens
settings.py
https://github.com/swans-one/django-kittens/blob/31e1ff54737c8ba3e99880dbff285a730ddac851/settings.py#L6-L33
def configure_settings(): """ Configures settings for manage.py and for run_tests.py. """ if not settings.configured: db_config = { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': 'django_kittens_db.sqlite3', } settings.configure( TEST_RUNNER='django_nose.NoseTestSuiteRunner', NOSE_ARGS=['--nocapture', '--nologcapture', '--verbosity=1'], DATABASES={ 'default': db_config, }, INSTALLED_APPS=( 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.admin', 'django_kittens', 'django_kittens.tests', ), ROOT_URLCONF='django_kittens.urls', DEBUG=True, MIDDLEWARE_CLASSES=(), )
[ "def", "configure_settings", "(", ")", ":", "if", "not", "settings", ".", "configured", ":", "db_config", "=", "{", "'ENGINE'", ":", "'django.db.backends.sqlite3'", ",", "'NAME'", ":", "'django_kittens_db.sqlite3'", ",", "}", "settings", ".", "configure", "(", "TEST_RUNNER", "=", "'django_nose.NoseTestSuiteRunner'", ",", "NOSE_ARGS", "=", "[", "'--nocapture'", ",", "'--nologcapture'", ",", "'--verbosity=1'", "]", ",", "DATABASES", "=", "{", "'default'", ":", "db_config", ",", "}", ",", "INSTALLED_APPS", "=", "(", "'django.contrib.auth'", ",", "'django.contrib.contenttypes'", ",", "'django.contrib.sessions'", ",", "'django.contrib.admin'", ",", "'django_kittens'", ",", "'django_kittens.tests'", ",", ")", ",", "ROOT_URLCONF", "=", "'django_kittens.urls'", ",", "DEBUG", "=", "True", ",", "MIDDLEWARE_CLASSES", "=", "(", ")", ",", ")" ]
Configures settings for manage.py and for run_tests.py.
[ "Configures", "settings", "for", "manage", ".", "py", "and", "for", "run_tests", ".", "py", "." ]
python
train
31.5
santosjorge/cufflinks
cufflinks/colors.py
https://github.com/santosjorge/cufflinks/blob/ca1cbf93998dc793d0b1f8ac30fe1f2bd105f63a/cufflinks/colors.py#L198-L281
def color_table(color, N=1, sort=False, sort_values=False, inline=False, as_html=False): """ Generates a colour table Parameters: ----------- color : string | list | dict Color representation in rgba|rgb|hex If a list of colors is passed then these are displayed in a table N : int number of colours to generate When color is not a list then it generaes a range of N colors sort : bool if True then items are sorted sort_values : bool if True then items are sorted by color values. Only applies if color is a dictionary inline : bool if True it returns single line color blocks as_html : bool if True it returns the HTML code Example: color_table('#ff9933') color_table(cufflinks.cnames) color_table(['pink','salmon','yellow']) Note: This function only works in iPython Notebook """ if isinstance(color, list): c_ = '' rgb_tup = [normalize(c) for c in color] if sort: rgb_tup.sort() elif isinstance(color, dict): c_ = '' items = [(k, normalize(v), hex_to_hsv(normalize(v))) for k, v in list(color.items())] if sort_values: items = sorted(items, key=operator.itemgetter(2)) elif sort: items = sorted(items, key=operator.itemgetter(0)) rgb_tup = [(k, v) for k, v, _ in items] else: c_ = normalize(color) if N > 1: rgb_tup = np.array(color_range(c_, N))[::-1] else: rgb_tup = [c_] def _color(c): if hex_to_hsv(c)[2] < .5: color = "#ffffff" shadow = '0 1px 0 #000' else: color = "#000000" shadow = '0 1px 0 rgba(255,255,255,0.6)' if c == c_: border = " border: 1px solid #ffffff;" else: border = '' return color, shadow, border s = '<ul style="list-style-type: none;">' if not inline else '' for c in rgb_tup: if isinstance(c, tuple): k, c = c k += ' : ' else: k = '' if inline: s += '<div style="background-color:{0};height:20px;width:20px;display:inline-block;"></div>'.format( c) else: color, shadow, border = _color(c) s += """<li style="text-align:center;""" + border + """line-height:30px;background-color:""" + c + """;"> <span style=" text-shadow:""" + shadow + """; color:""" + color + """;">""" + k + c.upper() + """</span> </li>""" s += '</ul>' if not inline else '' if as_html: return s return display(HTML(s))
[ "def", "color_table", "(", "color", ",", "N", "=", "1", ",", "sort", "=", "False", ",", "sort_values", "=", "False", ",", "inline", "=", "False", ",", "as_html", "=", "False", ")", ":", "if", "isinstance", "(", "color", ",", "list", ")", ":", "c_", "=", "''", "rgb_tup", "=", "[", "normalize", "(", "c", ")", "for", "c", "in", "color", "]", "if", "sort", ":", "rgb_tup", ".", "sort", "(", ")", "elif", "isinstance", "(", "color", ",", "dict", ")", ":", "c_", "=", "''", "items", "=", "[", "(", "k", ",", "normalize", "(", "v", ")", ",", "hex_to_hsv", "(", "normalize", "(", "v", ")", ")", ")", "for", "k", ",", "v", "in", "list", "(", "color", ".", "items", "(", ")", ")", "]", "if", "sort_values", ":", "items", "=", "sorted", "(", "items", ",", "key", "=", "operator", ".", "itemgetter", "(", "2", ")", ")", "elif", "sort", ":", "items", "=", "sorted", "(", "items", ",", "key", "=", "operator", ".", "itemgetter", "(", "0", ")", ")", "rgb_tup", "=", "[", "(", "k", ",", "v", ")", "for", "k", ",", "v", ",", "_", "in", "items", "]", "else", ":", "c_", "=", "normalize", "(", "color", ")", "if", "N", ">", "1", ":", "rgb_tup", "=", "np", ".", "array", "(", "color_range", "(", "c_", ",", "N", ")", ")", "[", ":", ":", "-", "1", "]", "else", ":", "rgb_tup", "=", "[", "c_", "]", "def", "_color", "(", "c", ")", ":", "if", "hex_to_hsv", "(", "c", ")", "[", "2", "]", "<", ".5", ":", "color", "=", "\"#ffffff\"", "shadow", "=", "'0 1px 0 #000'", "else", ":", "color", "=", "\"#000000\"", "shadow", "=", "'0 1px 0 rgba(255,255,255,0.6)'", "if", "c", "==", "c_", ":", "border", "=", "\" border: 1px solid #ffffff;\"", "else", ":", "border", "=", "''", "return", "color", ",", "shadow", ",", "border", "s", "=", "'<ul style=\"list-style-type: none;\">'", "if", "not", "inline", "else", "''", "for", "c", "in", "rgb_tup", ":", "if", "isinstance", "(", "c", ",", "tuple", ")", ":", "k", ",", "c", "=", "c", "k", "+=", "' : '", "else", ":", "k", "=", "''", "if", "inline", ":", "s", "+=", "'<div style=\"background-color:{0};height:20px;width:20px;display:inline-block;\"></div>'", ".", "format", "(", "c", ")", "else", ":", "color", ",", "shadow", ",", "border", "=", "_color", "(", "c", ")", "s", "+=", "\"\"\"<li style=\"text-align:center;\"\"\"", "+", "border", "+", "\"\"\"line-height:30px;background-color:\"\"\"", "+", "c", "+", "\"\"\";\"> \n <span style=\" text-shadow:\"\"\"", "+", "shadow", "+", "\"\"\"; color:\"\"\"", "+", "color", "+", "\"\"\";\">\"\"\"", "+", "k", "+", "c", ".", "upper", "(", ")", "+", "\"\"\"</span>\n </li>\"\"\"", "s", "+=", "'</ul>'", "if", "not", "inline", "else", "''", "if", "as_html", ":", "return", "s", "return", "display", "(", "HTML", "(", "s", ")", ")" ]
Generates a colour table Parameters: ----------- color : string | list | dict Color representation in rgba|rgb|hex If a list of colors is passed then these are displayed in a table N : int number of colours to generate When color is not a list then it generaes a range of N colors sort : bool if True then items are sorted sort_values : bool if True then items are sorted by color values. Only applies if color is a dictionary inline : bool if True it returns single line color blocks as_html : bool if True it returns the HTML code Example: color_table('#ff9933') color_table(cufflinks.cnames) color_table(['pink','salmon','yellow']) Note: This function only works in iPython Notebook
[ "Generates", "a", "colour", "table" ]
python
train
34.27381
sveetch/boussole
boussole/conf/model.py
https://github.com/sveetch/boussole/blob/22cc644e9d633f41ebfc167d427a71c1726cee21/boussole/conf/model.py#L77-L96
def update(self, settings): """ Update object attributes from given settings Args: settings (dict): Dictionnary of elements to update settings. Returns: dict: Dictionnary of all current saved settings. """ settings = self.clean(settings) # Update internal dict self._settings.update(settings) # Push every setting items as class object attributes self.set_settings(settings) return self._settings
[ "def", "update", "(", "self", ",", "settings", ")", ":", "settings", "=", "self", ".", "clean", "(", "settings", ")", "# Update internal dict", "self", ".", "_settings", ".", "update", "(", "settings", ")", "# Push every setting items as class object attributes", "self", ".", "set_settings", "(", "settings", ")", "return", "self", ".", "_settings" ]
Update object attributes from given settings Args: settings (dict): Dictionnary of elements to update settings. Returns: dict: Dictionnary of all current saved settings.
[ "Update", "object", "attributes", "from", "given", "settings" ]
python
train
24.75
numenta/htmresearch
htmresearch/frameworks/union_temporal_pooling/activation/excite_functions/excite_functions_all.py
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/frameworks/union_temporal_pooling/activation/excite_functions/excite_functions_all.py#L70-L82
def plot(self): """ plot the activation function """ plt.ion() plt.show() x = numpy.linspace(0, 15, 100) y = numpy.zeros(x.shape) y = self.excite(y, x) plt.plot(x, y) plt.xlabel('Input') plt.ylabel('Persistence') plt.title('Sigmoid Activation Function')
[ "def", "plot", "(", "self", ")", ":", "plt", ".", "ion", "(", ")", "plt", ".", "show", "(", ")", "x", "=", "numpy", ".", "linspace", "(", "0", ",", "15", ",", "100", ")", "y", "=", "numpy", ".", "zeros", "(", "x", ".", "shape", ")", "y", "=", "self", ".", "excite", "(", "y", ",", "x", ")", "plt", ".", "plot", "(", "x", ",", "y", ")", "plt", ".", "xlabel", "(", "'Input'", ")", "plt", ".", "ylabel", "(", "'Persistence'", ")", "plt", ".", "title", "(", "'Sigmoid Activation Function'", ")" ]
plot the activation function
[ "plot", "the", "activation", "function" ]
python
train
22.230769
RJT1990/pyflux
pyflux/gas/gasrank.py
https://github.com/RJT1990/pyflux/blob/297f2afc2095acd97c12e827dd500e8ea5da0c0f/pyflux/gas/gasrank.py#L294-L335
def _model_abilities_two_components(self,beta): """ Creates the structure of the model - store abilities Parameters ---------- beta : np.array Contains untransformed starting values for latent variables Returns ---------- theta : np.array Contains the predicted values for the time series Y : np.array Contains the length-adjusted time series (accounting for lags) scores : np.array Contains the scores for the time series """ parm = np.array([self.latent_variables.z_list[k].prior.transform(beta[k]) for k in range(beta.shape[0])]) scale, shape, skewness = self._get_scale_and_shape(parm) state_vectors = np.zeros(shape=(self.max_team+1)) state_vectors_2 = np.zeros(shape=(self.max_team_2+1)) state_vectors_store_1 = np.zeros(shape=(int(np.max(self.home_count)+50),int(self.max_team+1))) state_vectors_store_2 = np.zeros(shape=(int(np.max(self.home_2_count)+50),int(self.max_team_2+1))) theta = np.zeros(shape=(self.data.shape[0])) for t in range(0,self.data.shape[0]): theta[t] = parm[0] + state_vectors_2[self.home_2_id[t]] - state_vectors_2[self.away_2_id[t]] + state_vectors[self.home_id[t]] - state_vectors[self.away_id[t]] state_vectors[self.home_id[t]] += parm[1]*self.family.score_function(self.data[t], self.link(theta[t]), scale, shape, skewness) state_vectors[self.away_id[t]] += -parm[1]*self.family.score_function(self.data[t], self.link(theta[t]), scale, shape, skewness) state_vectors_2[self.home_2_id[t]] += parm[2]*self.family.score_function(self.data[t], self.link(theta[t]), scale, shape, skewness) state_vectors_2[self.away_2_id[t]] += -parm[2]*self.family.score_function(self.data[t], self.link(theta[t]), scale, shape, skewness) state_vectors_store_1[int(self.home_count[t]), self.home_id[t]] = state_vectors_store_1[max(0,int(self.home_count[t])-1), self.home_id[t]] + parm[1]*self.family.score_function(self.data[t], self.link(theta[t]), scale, shape, skewness) state_vectors_store_1[int(self.away_count[t]), self.away_id[t]] = state_vectors_store_1[max(0,int(self.away_count[t])-1), self.away_id[t]] -parm[1]*self.family.score_function(self.data[t], self.link(theta[t]), scale, shape, skewness) state_vectors_store_2[int(self.home_2_count[t]), self.home_2_id[t]] = state_vectors_store_2[max(0,int(self.home_2_count[t])-1), self.home_2_id[t]] + parm[1]*self.family.score_function(self.data[t], self.link(theta[t]), scale, shape, skewness) state_vectors_store_2[int(self.away_2_count[t]), self.away_2_id[t]] = state_vectors_store_2[max(0,int(self.away_2_count[t])-1), self.away_2_id[t]] -parm[1]*self.family.score_function(self.data[t], self.link(theta[t]), scale, shape, skewness) return state_vectors_store_1, state_vectors_store_2
[ "def", "_model_abilities_two_components", "(", "self", ",", "beta", ")", ":", "parm", "=", "np", ".", "array", "(", "[", "self", ".", "latent_variables", ".", "z_list", "[", "k", "]", ".", "prior", ".", "transform", "(", "beta", "[", "k", "]", ")", "for", "k", "in", "range", "(", "beta", ".", "shape", "[", "0", "]", ")", "]", ")", "scale", ",", "shape", ",", "skewness", "=", "self", ".", "_get_scale_and_shape", "(", "parm", ")", "state_vectors", "=", "np", ".", "zeros", "(", "shape", "=", "(", "self", ".", "max_team", "+", "1", ")", ")", "state_vectors_2", "=", "np", ".", "zeros", "(", "shape", "=", "(", "self", ".", "max_team_2", "+", "1", ")", ")", "state_vectors_store_1", "=", "np", ".", "zeros", "(", "shape", "=", "(", "int", "(", "np", ".", "max", "(", "self", ".", "home_count", ")", "+", "50", ")", ",", "int", "(", "self", ".", "max_team", "+", "1", ")", ")", ")", "state_vectors_store_2", "=", "np", ".", "zeros", "(", "shape", "=", "(", "int", "(", "np", ".", "max", "(", "self", ".", "home_2_count", ")", "+", "50", ")", ",", "int", "(", "self", ".", "max_team_2", "+", "1", ")", ")", ")", "theta", "=", "np", ".", "zeros", "(", "shape", "=", "(", "self", ".", "data", ".", "shape", "[", "0", "]", ")", ")", "for", "t", "in", "range", "(", "0", ",", "self", ".", "data", ".", "shape", "[", "0", "]", ")", ":", "theta", "[", "t", "]", "=", "parm", "[", "0", "]", "+", "state_vectors_2", "[", "self", ".", "home_2_id", "[", "t", "]", "]", "-", "state_vectors_2", "[", "self", ".", "away_2_id", "[", "t", "]", "]", "+", "state_vectors", "[", "self", ".", "home_id", "[", "t", "]", "]", "-", "state_vectors", "[", "self", ".", "away_id", "[", "t", "]", "]", "state_vectors", "[", "self", ".", "home_id", "[", "t", "]", "]", "+=", "parm", "[", "1", "]", "*", "self", ".", "family", ".", "score_function", "(", "self", ".", "data", "[", "t", "]", ",", "self", ".", "link", "(", "theta", "[", "t", "]", ")", ",", "scale", ",", "shape", ",", "skewness", ")", "state_vectors", "[", "self", ".", "away_id", "[", "t", "]", "]", "+=", "-", "parm", "[", "1", "]", "*", "self", ".", "family", ".", "score_function", "(", "self", ".", "data", "[", "t", "]", ",", "self", ".", "link", "(", "theta", "[", "t", "]", ")", ",", "scale", ",", "shape", ",", "skewness", ")", "state_vectors_2", "[", "self", ".", "home_2_id", "[", "t", "]", "]", "+=", "parm", "[", "2", "]", "*", "self", ".", "family", ".", "score_function", "(", "self", ".", "data", "[", "t", "]", ",", "self", ".", "link", "(", "theta", "[", "t", "]", ")", ",", "scale", ",", "shape", ",", "skewness", ")", "state_vectors_2", "[", "self", ".", "away_2_id", "[", "t", "]", "]", "+=", "-", "parm", "[", "2", "]", "*", "self", ".", "family", ".", "score_function", "(", "self", ".", "data", "[", "t", "]", ",", "self", ".", "link", "(", "theta", "[", "t", "]", ")", ",", "scale", ",", "shape", ",", "skewness", ")", "state_vectors_store_1", "[", "int", "(", "self", ".", "home_count", "[", "t", "]", ")", ",", "self", ".", "home_id", "[", "t", "]", "]", "=", "state_vectors_store_1", "[", "max", "(", "0", ",", "int", "(", "self", ".", "home_count", "[", "t", "]", ")", "-", "1", ")", ",", "self", ".", "home_id", "[", "t", "]", "]", "+", "parm", "[", "1", "]", "*", "self", ".", "family", ".", "score_function", "(", "self", ".", "data", "[", "t", "]", ",", "self", ".", "link", "(", "theta", "[", "t", "]", ")", ",", "scale", ",", "shape", ",", "skewness", ")", "state_vectors_store_1", "[", "int", "(", "self", ".", "away_count", "[", "t", "]", ")", ",", "self", ".", "away_id", "[", "t", "]", "]", "=", "state_vectors_store_1", "[", "max", "(", "0", ",", "int", "(", "self", ".", "away_count", "[", "t", "]", ")", "-", "1", ")", ",", "self", ".", "away_id", "[", "t", "]", "]", "-", "parm", "[", "1", "]", "*", "self", ".", "family", ".", "score_function", "(", "self", ".", "data", "[", "t", "]", ",", "self", ".", "link", "(", "theta", "[", "t", "]", ")", ",", "scale", ",", "shape", ",", "skewness", ")", "state_vectors_store_2", "[", "int", "(", "self", ".", "home_2_count", "[", "t", "]", ")", ",", "self", ".", "home_2_id", "[", "t", "]", "]", "=", "state_vectors_store_2", "[", "max", "(", "0", ",", "int", "(", "self", ".", "home_2_count", "[", "t", "]", ")", "-", "1", ")", ",", "self", ".", "home_2_id", "[", "t", "]", "]", "+", "parm", "[", "1", "]", "*", "self", ".", "family", ".", "score_function", "(", "self", ".", "data", "[", "t", "]", ",", "self", ".", "link", "(", "theta", "[", "t", "]", ")", ",", "scale", ",", "shape", ",", "skewness", ")", "state_vectors_store_2", "[", "int", "(", "self", ".", "away_2_count", "[", "t", "]", ")", ",", "self", ".", "away_2_id", "[", "t", "]", "]", "=", "state_vectors_store_2", "[", "max", "(", "0", ",", "int", "(", "self", ".", "away_2_count", "[", "t", "]", ")", "-", "1", ")", ",", "self", ".", "away_2_id", "[", "t", "]", "]", "-", "parm", "[", "1", "]", "*", "self", ".", "family", ".", "score_function", "(", "self", ".", "data", "[", "t", "]", ",", "self", ".", "link", "(", "theta", "[", "t", "]", ")", ",", "scale", ",", "shape", ",", "skewness", ")", "return", "state_vectors_store_1", ",", "state_vectors_store_2" ]
Creates the structure of the model - store abilities Parameters ---------- beta : np.array Contains untransformed starting values for latent variables Returns ---------- theta : np.array Contains the predicted values for the time series Y : np.array Contains the length-adjusted time series (accounting for lags) scores : np.array Contains the scores for the time series
[ "Creates", "the", "structure", "of", "the", "model", "-", "store", "abilities" ]
python
train
69.833333
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/core/ultratb.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/core/ultratb.py#L138-L211
def findsource(object): """Return the entire source file and starting line number for an object. The argument may be a module, class, method, function, traceback, frame, or code object. The source code is returned as a list of all the lines in the file and the line number indexes a line in that list. An IOError is raised if the source code cannot be retrieved. FIXED version with which we monkeypatch the stdlib to work around a bug.""" file = getsourcefile(object) or getfile(object) # If the object is a frame, then trying to get the globals dict from its # module won't work. Instead, the frame object itself has the globals # dictionary. globals_dict = None if inspect.isframe(object): # XXX: can this ever be false? globals_dict = object.f_globals else: module = getmodule(object, file) if module: globals_dict = module.__dict__ lines = linecache.getlines(file, globals_dict) if not lines: raise IOError('could not get source code') if ismodule(object): return lines, 0 if isclass(object): name = object.__name__ pat = re.compile(r'^(\s*)class\s*' + name + r'\b') # make some effort to find the best matching class definition: # use the one with the least indentation, which is the one # that's most probably not inside a function definition. candidates = [] for i in range(len(lines)): match = pat.match(lines[i]) if match: # if it's at toplevel, it's already the best one if lines[i][0] == 'c': return lines, i # else add whitespace to candidate list candidates.append((match.group(1), i)) if candidates: # this will sort by whitespace, and by line number, # less whitespace first candidates.sort() return lines, candidates[0][1] else: raise IOError('could not find class definition') if ismethod(object): object = object.im_func if isfunction(object): object = object.func_code if istraceback(object): object = object.tb_frame if isframe(object): object = object.f_code if iscode(object): if not hasattr(object, 'co_firstlineno'): raise IOError('could not find function definition') pat = re.compile(r'^(\s*def\s)|(.*(?<!\w)lambda(:|\s))|^(\s*@)') pmatch = pat.match # fperez - fix: sometimes, co_firstlineno can give a number larger than # the length of lines, which causes an error. Safeguard against that. lnum = min(object.co_firstlineno,len(lines))-1 while lnum > 0: if pmatch(lines[lnum]): break lnum -= 1 return lines, lnum raise IOError('could not find code object')
[ "def", "findsource", "(", "object", ")", ":", "file", "=", "getsourcefile", "(", "object", ")", "or", "getfile", "(", "object", ")", "# If the object is a frame, then trying to get the globals dict from its", "# module won't work. Instead, the frame object itself has the globals", "# dictionary.", "globals_dict", "=", "None", "if", "inspect", ".", "isframe", "(", "object", ")", ":", "# XXX: can this ever be false?", "globals_dict", "=", "object", ".", "f_globals", "else", ":", "module", "=", "getmodule", "(", "object", ",", "file", ")", "if", "module", ":", "globals_dict", "=", "module", ".", "__dict__", "lines", "=", "linecache", ".", "getlines", "(", "file", ",", "globals_dict", ")", "if", "not", "lines", ":", "raise", "IOError", "(", "'could not get source code'", ")", "if", "ismodule", "(", "object", ")", ":", "return", "lines", ",", "0", "if", "isclass", "(", "object", ")", ":", "name", "=", "object", ".", "__name__", "pat", "=", "re", ".", "compile", "(", "r'^(\\s*)class\\s*'", "+", "name", "+", "r'\\b'", ")", "# make some effort to find the best matching class definition:", "# use the one with the least indentation, which is the one", "# that's most probably not inside a function definition.", "candidates", "=", "[", "]", "for", "i", "in", "range", "(", "len", "(", "lines", ")", ")", ":", "match", "=", "pat", ".", "match", "(", "lines", "[", "i", "]", ")", "if", "match", ":", "# if it's at toplevel, it's already the best one", "if", "lines", "[", "i", "]", "[", "0", "]", "==", "'c'", ":", "return", "lines", ",", "i", "# else add whitespace to candidate list", "candidates", ".", "append", "(", "(", "match", ".", "group", "(", "1", ")", ",", "i", ")", ")", "if", "candidates", ":", "# this will sort by whitespace, and by line number,", "# less whitespace first", "candidates", ".", "sort", "(", ")", "return", "lines", ",", "candidates", "[", "0", "]", "[", "1", "]", "else", ":", "raise", "IOError", "(", "'could not find class definition'", ")", "if", "ismethod", "(", "object", ")", ":", "object", "=", "object", ".", "im_func", "if", "isfunction", "(", "object", ")", ":", "object", "=", "object", ".", "func_code", "if", "istraceback", "(", "object", ")", ":", "object", "=", "object", ".", "tb_frame", "if", "isframe", "(", "object", ")", ":", "object", "=", "object", ".", "f_code", "if", "iscode", "(", "object", ")", ":", "if", "not", "hasattr", "(", "object", ",", "'co_firstlineno'", ")", ":", "raise", "IOError", "(", "'could not find function definition'", ")", "pat", "=", "re", ".", "compile", "(", "r'^(\\s*def\\s)|(.*(?<!\\w)lambda(:|\\s))|^(\\s*@)'", ")", "pmatch", "=", "pat", ".", "match", "# fperez - fix: sometimes, co_firstlineno can give a number larger than", "# the length of lines, which causes an error. Safeguard against that.", "lnum", "=", "min", "(", "object", ".", "co_firstlineno", ",", "len", "(", "lines", ")", ")", "-", "1", "while", "lnum", ">", "0", ":", "if", "pmatch", "(", "lines", "[", "lnum", "]", ")", ":", "break", "lnum", "-=", "1", "return", "lines", ",", "lnum", "raise", "IOError", "(", "'could not find code object'", ")" ]
Return the entire source file and starting line number for an object. The argument may be a module, class, method, function, traceback, frame, or code object. The source code is returned as a list of all the lines in the file and the line number indexes a line in that list. An IOError is raised if the source code cannot be retrieved. FIXED version with which we monkeypatch the stdlib to work around a bug.
[ "Return", "the", "entire", "source", "file", "and", "starting", "line", "number", "for", "an", "object", "." ]
python
test
38.445946
twitterdev/tweet_parser
tweet_parser/getter_methods/tweet_entities.py
https://github.com/twitterdev/tweet_parser/blob/3435de8367d36b483a6cfd8d46cc28694ee8a42e/tweet_parser/getter_methods/tweet_entities.py#L215-L245
def get_hashtags(tweet): """ Get a list of hashtags in the Tweet Note that in the case of a quote-tweet, this does not return the hashtags in the quoted status. Args: tweet (Tweet or dict): A Tweet object or dictionary Returns: list (a list of strings): list of all of the hashtags in the Tweet Example: >>> from tweet_parser.getter_methods.tweet_entities import get_hashtags >>> original = {"created_at": "Wed May 24 20:17:19 +0000 2017", ... "entities": {"hashtags": [{"text":"1hashtag"}]}} >>> get_hashtags(original) ['1hashtag'] >>> activity = {"postedTime": "2017-05-24T20:17:19.000Z", ... "verb": "post", ... "twitter_entities": {"hashtags": [ ... {"text":"1hashtag"}, ... {"text": "moreHashtags"}]}} >>> get_hashtags(activity) ['1hashtag', 'moreHashtags'] """ entities = get_entities(tweet) hashtags = entities.get("hashtags") hashtags = [tag["text"] for tag in hashtags] if hashtags else [] return hashtags
[ "def", "get_hashtags", "(", "tweet", ")", ":", "entities", "=", "get_entities", "(", "tweet", ")", "hashtags", "=", "entities", ".", "get", "(", "\"hashtags\"", ")", "hashtags", "=", "[", "tag", "[", "\"text\"", "]", "for", "tag", "in", "hashtags", "]", "if", "hashtags", "else", "[", "]", "return", "hashtags" ]
Get a list of hashtags in the Tweet Note that in the case of a quote-tweet, this does not return the hashtags in the quoted status. Args: tweet (Tweet or dict): A Tweet object or dictionary Returns: list (a list of strings): list of all of the hashtags in the Tweet Example: >>> from tweet_parser.getter_methods.tweet_entities import get_hashtags >>> original = {"created_at": "Wed May 24 20:17:19 +0000 2017", ... "entities": {"hashtags": [{"text":"1hashtag"}]}} >>> get_hashtags(original) ['1hashtag'] >>> activity = {"postedTime": "2017-05-24T20:17:19.000Z", ... "verb": "post", ... "twitter_entities": {"hashtags": [ ... {"text":"1hashtag"}, ... {"text": "moreHashtags"}]}} >>> get_hashtags(activity) ['1hashtag', 'moreHashtags']
[ "Get", "a", "list", "of", "hashtags", "in", "the", "Tweet", "Note", "that", "in", "the", "case", "of", "a", "quote", "-", "tweet", "this", "does", "not", "return", "the", "hashtags", "in", "the", "quoted", "status", "." ]
python
train
36.225806
programa-stic/barf-project
barf/arch/x86/translator.py
https://github.com/programa-stic/barf-project/blob/18ed9e5eace55f7bf6015ec57f037c364099021c/barf/arch/x86/translator.py#L261-L322
def resolve_memory_access(self, tb, x86_mem_operand): """Return operand memory access translation. """ size = self.__get_memory_access_size(x86_mem_operand) addr = None if x86_mem_operand.base: addr = ReilRegisterOperand(x86_mem_operand.base, size) if x86_mem_operand.index and x86_mem_operand.scale != 0x0: index = ReilRegisterOperand(x86_mem_operand.index, size) scale = ReilImmediateOperand(x86_mem_operand.scale, size) scaled_index = tb.temporal(size) tb.add(tb._builder.gen_mul(index, scale, scaled_index)) if addr: tmp = tb.temporal(size) tb.add(tb._builder.gen_add(addr, scaled_index, tmp)) addr = tmp else: addr = scaled_index if x86_mem_operand.displacement != 0x0: disp = ReilImmediateOperand(x86_mem_operand.displacement, size) if addr: tmp = tb.temporal(size) tb.add(tb._builder.gen_add(addr, disp, tmp)) addr = tmp else: addr = disp else: if not addr: disp = ReilImmediateOperand(x86_mem_operand.displacement, size) addr = disp # TODO Improve this code and add support for the rest of the segments. if x86_mem_operand.segment in ["gs", "fs"]: seg_base_addr_map = { "gs": "gs_base_addr", "fs": "fs_base_addr", } seg_base = ReilRegisterOperand(seg_base_addr_map[x86_mem_operand.segment], size) if addr: tmp = tb.temporal(size) tb.add(tb._builder.gen_add(addr, seg_base, tmp)) addr = tmp else: addr = seg_base return addr
[ "def", "resolve_memory_access", "(", "self", ",", "tb", ",", "x86_mem_operand", ")", ":", "size", "=", "self", ".", "__get_memory_access_size", "(", "x86_mem_operand", ")", "addr", "=", "None", "if", "x86_mem_operand", ".", "base", ":", "addr", "=", "ReilRegisterOperand", "(", "x86_mem_operand", ".", "base", ",", "size", ")", "if", "x86_mem_operand", ".", "index", "and", "x86_mem_operand", ".", "scale", "!=", "0x0", ":", "index", "=", "ReilRegisterOperand", "(", "x86_mem_operand", ".", "index", ",", "size", ")", "scale", "=", "ReilImmediateOperand", "(", "x86_mem_operand", ".", "scale", ",", "size", ")", "scaled_index", "=", "tb", ".", "temporal", "(", "size", ")", "tb", ".", "add", "(", "tb", ".", "_builder", ".", "gen_mul", "(", "index", ",", "scale", ",", "scaled_index", ")", ")", "if", "addr", ":", "tmp", "=", "tb", ".", "temporal", "(", "size", ")", "tb", ".", "add", "(", "tb", ".", "_builder", ".", "gen_add", "(", "addr", ",", "scaled_index", ",", "tmp", ")", ")", "addr", "=", "tmp", "else", ":", "addr", "=", "scaled_index", "if", "x86_mem_operand", ".", "displacement", "!=", "0x0", ":", "disp", "=", "ReilImmediateOperand", "(", "x86_mem_operand", ".", "displacement", ",", "size", ")", "if", "addr", ":", "tmp", "=", "tb", ".", "temporal", "(", "size", ")", "tb", ".", "add", "(", "tb", ".", "_builder", ".", "gen_add", "(", "addr", ",", "disp", ",", "tmp", ")", ")", "addr", "=", "tmp", "else", ":", "addr", "=", "disp", "else", ":", "if", "not", "addr", ":", "disp", "=", "ReilImmediateOperand", "(", "x86_mem_operand", ".", "displacement", ",", "size", ")", "addr", "=", "disp", "# TODO Improve this code and add support for the rest of the segments.", "if", "x86_mem_operand", ".", "segment", "in", "[", "\"gs\"", ",", "\"fs\"", "]", ":", "seg_base_addr_map", "=", "{", "\"gs\"", ":", "\"gs_base_addr\"", ",", "\"fs\"", ":", "\"fs_base_addr\"", ",", "}", "seg_base", "=", "ReilRegisterOperand", "(", "seg_base_addr_map", "[", "x86_mem_operand", ".", "segment", "]", ",", "size", ")", "if", "addr", ":", "tmp", "=", "tb", ".", "temporal", "(", "size", ")", "tb", ".", "add", "(", "tb", ".", "_builder", ".", "gen_add", "(", "addr", ",", "seg_base", ",", "tmp", ")", ")", "addr", "=", "tmp", "else", ":", "addr", "=", "seg_base", "return", "addr" ]
Return operand memory access translation.
[ "Return", "operand", "memory", "access", "translation", "." ]
python
train
29.419355
abseil/abseil-py
absl/logging/__init__.py
https://github.com/abseil/abseil-py/blob/9d73fdaa23a6b6726aa5731390f388c0c6250ee5/absl/logging/__init__.py#L389-L411
def _seconds_have_elapsed(token, num_seconds): """Tests if 'num_seconds' have passed since 'token' was requested. Not strictly thread-safe - may log with the wrong frequency if called concurrently from multiple threads. Accuracy depends on resolution of 'timeit.default_timer()'. Always returns True on the first call for a given 'token'. Args: token: The token for which to look up the count. num_seconds: The number of seconds to test for. Returns: Whether it has been >= 'num_seconds' since 'token' was last requested. """ now = timeit.default_timer() then = _log_timer_per_token.get(token, None) if then is None or (now - then) >= num_seconds: _log_timer_per_token[token] = now return True else: return False
[ "def", "_seconds_have_elapsed", "(", "token", ",", "num_seconds", ")", ":", "now", "=", "timeit", ".", "default_timer", "(", ")", "then", "=", "_log_timer_per_token", ".", "get", "(", "token", ",", "None", ")", "if", "then", "is", "None", "or", "(", "now", "-", "then", ")", ">=", "num_seconds", ":", "_log_timer_per_token", "[", "token", "]", "=", "now", "return", "True", "else", ":", "return", "False" ]
Tests if 'num_seconds' have passed since 'token' was requested. Not strictly thread-safe - may log with the wrong frequency if called concurrently from multiple threads. Accuracy depends on resolution of 'timeit.default_timer()'. Always returns True on the first call for a given 'token'. Args: token: The token for which to look up the count. num_seconds: The number of seconds to test for. Returns: Whether it has been >= 'num_seconds' since 'token' was last requested.
[ "Tests", "if", "num_seconds", "have", "passed", "since", "token", "was", "requested", "." ]
python
train
32.26087
jasonrbriggs/stomp.py
stomp/utils.py
https://github.com/jasonrbriggs/stomp.py/blob/643843c5fbf25fd24339dd0e69a9411c3d8b94c7/stomp/utils.py#L103-L122
def parse_headers(lines, offset=0): """ Parse the headers in a STOMP response :param list(str) lines: the lines received in the message response :param int offset: the starting line number :rtype: dict(str,str) """ headers = {} for header_line in lines[offset:]: header_match = HEADER_LINE_RE.match(header_line) if header_match: key = header_match.group('key') key = re.sub(r'\\.', _unescape_header, key) if key not in headers: value = header_match.group('value') value = re.sub(r'\\.', _unescape_header, value) headers[key] = value return headers
[ "def", "parse_headers", "(", "lines", ",", "offset", "=", "0", ")", ":", "headers", "=", "{", "}", "for", "header_line", "in", "lines", "[", "offset", ":", "]", ":", "header_match", "=", "HEADER_LINE_RE", ".", "match", "(", "header_line", ")", "if", "header_match", ":", "key", "=", "header_match", ".", "group", "(", "'key'", ")", "key", "=", "re", ".", "sub", "(", "r'\\\\.'", ",", "_unescape_header", ",", "key", ")", "if", "key", "not", "in", "headers", ":", "value", "=", "header_match", ".", "group", "(", "'value'", ")", "value", "=", "re", ".", "sub", "(", "r'\\\\.'", ",", "_unescape_header", ",", "value", ")", "headers", "[", "key", "]", "=", "value", "return", "headers" ]
Parse the headers in a STOMP response :param list(str) lines: the lines received in the message response :param int offset: the starting line number :rtype: dict(str,str)
[ "Parse", "the", "headers", "in", "a", "STOMP", "response" ]
python
train
33.3
ejeschke/ginga
ginga/rv/plugins/ColorMapPicker.py
https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/rv/plugins/ColorMapPicker.py#L168-L215
def rebuild_cmaps(self): """Builds a color RGB image containing color bars of all the possible color maps and their labels. """ self.logger.info("building color maps image") ht, wd, sep = self._cmht, self._cmwd, self._cmsep viewer = self.p_view # put the canvas into pick mode canvas = viewer.get_canvas() canvas.delete_all_objects() # get the list of color maps cm_names = self.cm_names num_cmaps = len(cm_names) viewer.configure_surface(500, (ht + sep) * num_cmaps) # create a bunch of color bars and make one large compound object # with callbacks for clicking on individual color bars l2 = [] ColorBar = canvas.get_draw_class('drawablecolorbar') Text = canvas.get_draw_class('text') #ch_rgbmap = chviewer.get_rgbmap() #dist = ch_rgbmap.get_dist() dist = None #imap = ch_rgbmap.get_imap() logger = viewer.get_logger() for i, name in enumerate(cm_names): rgbmap = RGBMap.RGBMapper(logger, dist=dist) rgbmap.set_cmap(cmap.get_cmap(name)) #rgbmap.set_imap(imap) x1, y1 = self._cmxoff, i * (ht + sep) x2, y2 = x1 + wd, y1 + ht cbar = ColorBar(x1, y1, x2, y2, cm_name=name, showrange=False, rgbmap=rgbmap, coord='window') l2.append(cbar) l2.append(Text(x2 + sep, y2, name, color='white', fontsize=16, coord='window')) Compound = canvas.get_draw_class('compoundobject') obj = Compound(*l2) canvas.add(obj) self._max_y = y2 rgb_img = self.p_view.get_image_as_array() self.r_image.set_data(rgb_img)
[ "def", "rebuild_cmaps", "(", "self", ")", ":", "self", ".", "logger", ".", "info", "(", "\"building color maps image\"", ")", "ht", ",", "wd", ",", "sep", "=", "self", ".", "_cmht", ",", "self", ".", "_cmwd", ",", "self", ".", "_cmsep", "viewer", "=", "self", ".", "p_view", "# put the canvas into pick mode", "canvas", "=", "viewer", ".", "get_canvas", "(", ")", "canvas", ".", "delete_all_objects", "(", ")", "# get the list of color maps", "cm_names", "=", "self", ".", "cm_names", "num_cmaps", "=", "len", "(", "cm_names", ")", "viewer", ".", "configure_surface", "(", "500", ",", "(", "ht", "+", "sep", ")", "*", "num_cmaps", ")", "# create a bunch of color bars and make one large compound object", "# with callbacks for clicking on individual color bars", "l2", "=", "[", "]", "ColorBar", "=", "canvas", ".", "get_draw_class", "(", "'drawablecolorbar'", ")", "Text", "=", "canvas", ".", "get_draw_class", "(", "'text'", ")", "#ch_rgbmap = chviewer.get_rgbmap()", "#dist = ch_rgbmap.get_dist()", "dist", "=", "None", "#imap = ch_rgbmap.get_imap()", "logger", "=", "viewer", ".", "get_logger", "(", ")", "for", "i", ",", "name", "in", "enumerate", "(", "cm_names", ")", ":", "rgbmap", "=", "RGBMap", ".", "RGBMapper", "(", "logger", ",", "dist", "=", "dist", ")", "rgbmap", ".", "set_cmap", "(", "cmap", ".", "get_cmap", "(", "name", ")", ")", "#rgbmap.set_imap(imap)", "x1", ",", "y1", "=", "self", ".", "_cmxoff", ",", "i", "*", "(", "ht", "+", "sep", ")", "x2", ",", "y2", "=", "x1", "+", "wd", ",", "y1", "+", "ht", "cbar", "=", "ColorBar", "(", "x1", ",", "y1", ",", "x2", ",", "y2", ",", "cm_name", "=", "name", ",", "showrange", "=", "False", ",", "rgbmap", "=", "rgbmap", ",", "coord", "=", "'window'", ")", "l2", ".", "append", "(", "cbar", ")", "l2", ".", "append", "(", "Text", "(", "x2", "+", "sep", ",", "y2", ",", "name", ",", "color", "=", "'white'", ",", "fontsize", "=", "16", ",", "coord", "=", "'window'", ")", ")", "Compound", "=", "canvas", ".", "get_draw_class", "(", "'compoundobject'", ")", "obj", "=", "Compound", "(", "*", "l2", ")", "canvas", ".", "add", "(", "obj", ")", "self", ".", "_max_y", "=", "y2", "rgb_img", "=", "self", ".", "p_view", ".", "get_image_as_array", "(", ")", "self", ".", "r_image", ".", "set_data", "(", "rgb_img", ")" ]
Builds a color RGB image containing color bars of all the possible color maps and their labels.
[ "Builds", "a", "color", "RGB", "image", "containing", "color", "bars", "of", "all", "the", "possible", "color", "maps", "and", "their", "labels", "." ]
python
train
36.333333
jtpaasch/simplygithub
simplygithub/branches.py
https://github.com/jtpaasch/simplygithub/blob/b77506275ec276ce90879bf1ea9299a79448b903/simplygithub/branches.py#L72-L95
def create_branch(profile, name, branch_off): """Create a branch. Args: profile A profile generated from ``simplygithub.authentication.profile``. Such profiles tell this module (i) the ``repo`` to connect to, and (ii) the ``token`` to connect with. name The name of the new branch. branch_off The name of a branch to create the new branch off of. Returns: A dict with data about the new branch. """ branch_off_sha = get_branch_sha(profile, branch_off) ref = "heads/" + name data = refs.create_ref(profile, ref, branch_off_sha) return data
[ "def", "create_branch", "(", "profile", ",", "name", ",", "branch_off", ")", ":", "branch_off_sha", "=", "get_branch_sha", "(", "profile", ",", "branch_off", ")", "ref", "=", "\"heads/\"", "+", "name", "data", "=", "refs", ".", "create_ref", "(", "profile", ",", "ref", ",", "branch_off_sha", ")", "return", "data" ]
Create a branch. Args: profile A profile generated from ``simplygithub.authentication.profile``. Such profiles tell this module (i) the ``repo`` to connect to, and (ii) the ``token`` to connect with. name The name of the new branch. branch_off The name of a branch to create the new branch off of. Returns: A dict with data about the new branch.
[ "Create", "a", "branch", "." ]
python
train
26.875
assamite/creamas
creamas/ds.py
https://github.com/assamite/creamas/blob/54dc3e31c97a3f938e58272f8ab80b6bcafeff58/creamas/ds.py#L262-L280
def get_slave_managers(self, as_coro=False): """Return all slave environment manager addresses. :param bool as_coro: If ``True`` returns awaitable coroutine, otherwise runs the calls to the slave managers asynchoronously in the event loop. This method returns the addresses of the true slave environment managers, i.e. managers derived from :class:`~creamas.mp.EnvManager`, not multi-environment managers. For example, if this node environment has two nodes with four slave environments in each, then this method returns 8 addresses. """ async def slave_task(addr): r_manager = await self.env.connect(addr) return await r_manager.get_slave_managers() tasks = create_tasks(slave_task, self.addrs) return run_or_coro(tasks, as_coro)
[ "def", "get_slave_managers", "(", "self", ",", "as_coro", "=", "False", ")", ":", "async", "def", "slave_task", "(", "addr", ")", ":", "r_manager", "=", "await", "self", ".", "env", ".", "connect", "(", "addr", ")", "return", "await", "r_manager", ".", "get_slave_managers", "(", ")", "tasks", "=", "create_tasks", "(", "slave_task", ",", "self", ".", "addrs", ")", "return", "run_or_coro", "(", "tasks", ",", "as_coro", ")" ]
Return all slave environment manager addresses. :param bool as_coro: If ``True`` returns awaitable coroutine, otherwise runs the calls to the slave managers asynchoronously in the event loop. This method returns the addresses of the true slave environment managers, i.e. managers derived from :class:`~creamas.mp.EnvManager`, not multi-environment managers. For example, if this node environment has two nodes with four slave environments in each, then this method returns 8 addresses.
[ "Return", "all", "slave", "environment", "manager", "addresses", "." ]
python
train
44.789474
dwavesystems/dwave-cloud-client
dwave/cloud/client.py
https://github.com/dwavesystems/dwave-cloud-client/blob/df3221a8385dc0c04d7b4d84f740bf3ad6706230/dwave/cloud/client.py#L965-L1025
def _do_submit_problems(self): """Pull problems from the submission queue and submit them. Note: This method is always run inside of a daemon thread. """ try: while True: # Pull as many problems as we can, block on the first one, # but once we have one problem, switch to non-blocking then # submit without blocking again. # `None` task is used to signal thread termination item = self._submission_queue.get() if item is None: break ready_problems = [item] while len(ready_problems) < self._SUBMIT_BATCH_SIZE: try: ready_problems.append(self._submission_queue.get_nowait()) except queue.Empty: break # Submit the problems _LOGGER.debug("Submitting %d problems", len(ready_problems)) body = '[' + ','.join(mess.body for mess in ready_problems) + ']' try: try: response = self.session.post(posixpath.join(self.endpoint, 'problems/'), body) localtime_of_response = epochnow() except requests.exceptions.Timeout: raise RequestTimeout if response.status_code == 401: raise SolverAuthenticationError() response.raise_for_status() message = response.json() _LOGGER.debug("Finished submitting %d problems", len(ready_problems)) except BaseException as exception: _LOGGER.debug("Submit failed for %d problems", len(ready_problems)) if not isinstance(exception, SolverAuthenticationError): exception = IOError(exception) for mess in ready_problems: mess.future._set_error(exception, sys.exc_info()) self._submission_queue.task_done() continue # Pass on the information for submission, res in zip(ready_problems, message): submission.future._set_clock_diff(response, localtime_of_response) self._handle_problem_status(res, submission.future) self._submission_queue.task_done() # this is equivalent to a yield to scheduler in other threading libraries time.sleep(0) except BaseException as err: _LOGGER.exception(err)
[ "def", "_do_submit_problems", "(", "self", ")", ":", "try", ":", "while", "True", ":", "# Pull as many problems as we can, block on the first one,", "# but once we have one problem, switch to non-blocking then", "# submit without blocking again.", "# `None` task is used to signal thread termination", "item", "=", "self", ".", "_submission_queue", ".", "get", "(", ")", "if", "item", "is", "None", ":", "break", "ready_problems", "=", "[", "item", "]", "while", "len", "(", "ready_problems", ")", "<", "self", ".", "_SUBMIT_BATCH_SIZE", ":", "try", ":", "ready_problems", ".", "append", "(", "self", ".", "_submission_queue", ".", "get_nowait", "(", ")", ")", "except", "queue", ".", "Empty", ":", "break", "# Submit the problems", "_LOGGER", ".", "debug", "(", "\"Submitting %d problems\"", ",", "len", "(", "ready_problems", ")", ")", "body", "=", "'['", "+", "','", ".", "join", "(", "mess", ".", "body", "for", "mess", "in", "ready_problems", ")", "+", "']'", "try", ":", "try", ":", "response", "=", "self", ".", "session", ".", "post", "(", "posixpath", ".", "join", "(", "self", ".", "endpoint", ",", "'problems/'", ")", ",", "body", ")", "localtime_of_response", "=", "epochnow", "(", ")", "except", "requests", ".", "exceptions", ".", "Timeout", ":", "raise", "RequestTimeout", "if", "response", ".", "status_code", "==", "401", ":", "raise", "SolverAuthenticationError", "(", ")", "response", ".", "raise_for_status", "(", ")", "message", "=", "response", ".", "json", "(", ")", "_LOGGER", ".", "debug", "(", "\"Finished submitting %d problems\"", ",", "len", "(", "ready_problems", ")", ")", "except", "BaseException", "as", "exception", ":", "_LOGGER", ".", "debug", "(", "\"Submit failed for %d problems\"", ",", "len", "(", "ready_problems", ")", ")", "if", "not", "isinstance", "(", "exception", ",", "SolverAuthenticationError", ")", ":", "exception", "=", "IOError", "(", "exception", ")", "for", "mess", "in", "ready_problems", ":", "mess", ".", "future", ".", "_set_error", "(", "exception", ",", "sys", ".", "exc_info", "(", ")", ")", "self", ".", "_submission_queue", ".", "task_done", "(", ")", "continue", "# Pass on the information", "for", "submission", ",", "res", "in", "zip", "(", "ready_problems", ",", "message", ")", ":", "submission", ".", "future", ".", "_set_clock_diff", "(", "response", ",", "localtime_of_response", ")", "self", ".", "_handle_problem_status", "(", "res", ",", "submission", ".", "future", ")", "self", ".", "_submission_queue", ".", "task_done", "(", ")", "# this is equivalent to a yield to scheduler in other threading libraries", "time", ".", "sleep", "(", "0", ")", "except", "BaseException", "as", "err", ":", "_LOGGER", ".", "exception", "(", "err", ")" ]
Pull problems from the submission queue and submit them. Note: This method is always run inside of a daemon thread.
[ "Pull", "problems", "from", "the", "submission", "queue", "and", "submit", "them", "." ]
python
train
43.262295
optimizely/python-sdk
optimizely/optimizely.py
https://github.com/optimizely/python-sdk/blob/ec028d9efcf22498c3820f2650fa10f5c30bec90/optimizely/optimizely.py#L112-L134
def _validate_user_inputs(self, attributes=None, event_tags=None): """ Helper method to validate user inputs. Args: attributes: Dict representing user attributes. event_tags: Dict representing metadata associated with an event. Returns: Boolean True if inputs are valid. False otherwise. """ if attributes and not validator.are_attributes_valid(attributes): self.logger.error('Provided attributes are in an invalid format.') self.error_handler.handle_error(exceptions.InvalidAttributeException(enums.Errors.INVALID_ATTRIBUTE_FORMAT)) return False if event_tags and not validator.are_event_tags_valid(event_tags): self.logger.error('Provided event tags are in an invalid format.') self.error_handler.handle_error(exceptions.InvalidEventTagException(enums.Errors.INVALID_EVENT_TAG_FORMAT)) return False return True
[ "def", "_validate_user_inputs", "(", "self", ",", "attributes", "=", "None", ",", "event_tags", "=", "None", ")", ":", "if", "attributes", "and", "not", "validator", ".", "are_attributes_valid", "(", "attributes", ")", ":", "self", ".", "logger", ".", "error", "(", "'Provided attributes are in an invalid format.'", ")", "self", ".", "error_handler", ".", "handle_error", "(", "exceptions", ".", "InvalidAttributeException", "(", "enums", ".", "Errors", ".", "INVALID_ATTRIBUTE_FORMAT", ")", ")", "return", "False", "if", "event_tags", "and", "not", "validator", ".", "are_event_tags_valid", "(", "event_tags", ")", ":", "self", ".", "logger", ".", "error", "(", "'Provided event tags are in an invalid format.'", ")", "self", ".", "error_handler", ".", "handle_error", "(", "exceptions", ".", "InvalidEventTagException", "(", "enums", ".", "Errors", ".", "INVALID_EVENT_TAG_FORMAT", ")", ")", "return", "False", "return", "True" ]
Helper method to validate user inputs. Args: attributes: Dict representing user attributes. event_tags: Dict representing metadata associated with an event. Returns: Boolean True if inputs are valid. False otherwise.
[ "Helper", "method", "to", "validate", "user", "inputs", "." ]
python
train
38.173913
tensorflow/datasets
tensorflow_datasets/core/features/feature.py
https://github.com/tensorflow/datasets/blob/46ceb0cf7b4690f38ecbbc689e4d659a903d08dc/tensorflow_datasets/core/features/feature.py#L654-L657
def _assert_keys_match(keys1, keys2): """Ensure the two list of keys matches.""" if set(keys1) != set(keys2): raise ValueError('{} {}'.format(list(keys1), list(keys2)))
[ "def", "_assert_keys_match", "(", "keys1", ",", "keys2", ")", ":", "if", "set", "(", "keys1", ")", "!=", "set", "(", "keys2", ")", ":", "raise", "ValueError", "(", "'{} {}'", ".", "format", "(", "list", "(", "keys1", ")", ",", "list", "(", "keys2", ")", ")", ")" ]
Ensure the two list of keys matches.
[ "Ensure", "the", "two", "list", "of", "keys", "matches", "." ]
python
train
43.25
Jammy2211/PyAutoLens
autolens/data/array/grids.py
https://github.com/Jammy2211/PyAutoLens/blob/91e50369c7a9c048c83d217625578b72423cd5a7/autolens/data/array/grids.py#L1034-L1067
def relocated_grid_from_grid_jit(grid, border_grid): """ Relocate the coordinates of a grid to its border if they are outside the border. This is performed as \ follows: 1) Use the mean value of the grid's y and x coordinates to determine the origin of the grid. 2) Compute the radial distance of every grid coordinate from the origin. 3) For every coordinate, find its nearest pixel in the border. 4) Determine if it is outside the border, by comparing its radial distance from the origin to its paid \ border pixel's radial distance. 5) If its radial distance is larger, use the ratio of radial distances to move the coordinate to the border \ (if its inside the border, do nothing). """ border_origin = np.zeros(2) border_origin[0] = np.mean(border_grid[:, 0]) border_origin[1] = np.mean(border_grid[:, 1]) border_grid_radii = np.sqrt(np.add(np.square(np.subtract(border_grid[:, 0], border_origin[0])), np.square(np.subtract(border_grid[:, 1], border_origin[1])))) border_min_radii = np.min(border_grid_radii) grid_radii = np.sqrt(np.add(np.square(np.subtract(grid[:, 0], border_origin[0])), np.square(np.subtract(grid[:, 1], border_origin[1])))) for pixel_index in range(grid.shape[0]): if grid_radii[pixel_index] > border_min_radii: closest_pixel_index = np.argmin(np.square(grid[pixel_index, 0] - border_grid[:, 0]) + np.square(grid[pixel_index, 1] - border_grid[:, 1])) move_factor = border_grid_radii[closest_pixel_index] / grid_radii[pixel_index] if move_factor < 1.0: grid[pixel_index, :] = move_factor * (grid[pixel_index, :] - border_origin[:]) + border_origin[:] return grid
[ "def", "relocated_grid_from_grid_jit", "(", "grid", ",", "border_grid", ")", ":", "border_origin", "=", "np", ".", "zeros", "(", "2", ")", "border_origin", "[", "0", "]", "=", "np", ".", "mean", "(", "border_grid", "[", ":", ",", "0", "]", ")", "border_origin", "[", "1", "]", "=", "np", ".", "mean", "(", "border_grid", "[", ":", ",", "1", "]", ")", "border_grid_radii", "=", "np", ".", "sqrt", "(", "np", ".", "add", "(", "np", ".", "square", "(", "np", ".", "subtract", "(", "border_grid", "[", ":", ",", "0", "]", ",", "border_origin", "[", "0", "]", ")", ")", ",", "np", ".", "square", "(", "np", ".", "subtract", "(", "border_grid", "[", ":", ",", "1", "]", ",", "border_origin", "[", "1", "]", ")", ")", ")", ")", "border_min_radii", "=", "np", ".", "min", "(", "border_grid_radii", ")", "grid_radii", "=", "np", ".", "sqrt", "(", "np", ".", "add", "(", "np", ".", "square", "(", "np", ".", "subtract", "(", "grid", "[", ":", ",", "0", "]", ",", "border_origin", "[", "0", "]", ")", ")", ",", "np", ".", "square", "(", "np", ".", "subtract", "(", "grid", "[", ":", ",", "1", "]", ",", "border_origin", "[", "1", "]", ")", ")", ")", ")", "for", "pixel_index", "in", "range", "(", "grid", ".", "shape", "[", "0", "]", ")", ":", "if", "grid_radii", "[", "pixel_index", "]", ">", "border_min_radii", ":", "closest_pixel_index", "=", "np", ".", "argmin", "(", "np", ".", "square", "(", "grid", "[", "pixel_index", ",", "0", "]", "-", "border_grid", "[", ":", ",", "0", "]", ")", "+", "np", ".", "square", "(", "grid", "[", "pixel_index", ",", "1", "]", "-", "border_grid", "[", ":", ",", "1", "]", ")", ")", "move_factor", "=", "border_grid_radii", "[", "closest_pixel_index", "]", "/", "grid_radii", "[", "pixel_index", "]", "if", "move_factor", "<", "1.0", ":", "grid", "[", "pixel_index", ",", ":", "]", "=", "move_factor", "*", "(", "grid", "[", "pixel_index", ",", ":", "]", "-", "border_origin", "[", ":", "]", ")", "+", "border_origin", "[", ":", "]", "return", "grid" ]
Relocate the coordinates of a grid to its border if they are outside the border. This is performed as \ follows: 1) Use the mean value of the grid's y and x coordinates to determine the origin of the grid. 2) Compute the radial distance of every grid coordinate from the origin. 3) For every coordinate, find its nearest pixel in the border. 4) Determine if it is outside the border, by comparing its radial distance from the origin to its paid \ border pixel's radial distance. 5) If its radial distance is larger, use the ratio of radial distances to move the coordinate to the border \ (if its inside the border, do nothing).
[ "Relocate", "the", "coordinates", "of", "a", "grid", "to", "its", "border", "if", "they", "are", "outside", "the", "border", ".", "This", "is", "performed", "as", "\\", "follows", ":" ]
python
valid
56.411765
trailofbits/manticore
manticore/native/cpu/x86.py
https://github.com/trailofbits/manticore/blob/54c5a15b1119c523ae54c09972413e8b97f11629/manticore/native/cpu/x86.py#L1013-L1059
def AAA(cpu): """ ASCII adjust after addition. Adjusts the sum of two unpacked BCD values to create an unpacked BCD result. The AL register is the implied source and destination operand for this instruction. The AAA instruction is only useful when it follows an ADD instruction that adds (binary addition) two unpacked BCD values and stores a byte result in the AL register. The AAA instruction then adjusts the contents of the AL register to contain the correct 1-digit unpacked BCD result. If the addition produces a decimal carry, the AH register is incremented by 1, and the CF and AF flags are set. If there was no decimal carry, the CF and AF flags are cleared and the AH register is unchanged. In either case, bits 4 through 7 of the AL register are cleared to 0. This instruction executes as described in compatibility mode and legacy mode. It is not valid in 64-bit mode. :: IF ((AL AND 0FH) > 9) Operators.OR(AF = 1) THEN AL = (AL + 6); AH = AH + 1; AF = 1; CF = 1; ELSE AF = 0; CF = 0; FI; AL = AL AND 0FH; :param cpu: current CPU. """ cpu.AF = Operators.OR(cpu.AL & 0x0F > 9, cpu.AF) cpu.CF = cpu.AF cpu.AH = Operators.ITEBV(8, cpu.AF, cpu.AH + 1, cpu.AH) cpu.AL = Operators.ITEBV(8, cpu.AF, cpu.AL + 6, cpu.AL) """ if (cpu.AL & 0x0F > 9) or cpu.AF == 1: cpu.AL = cpu.AL + 6 cpu.AH = cpu.AH + 1 cpu.AF = True cpu.CF = True else: cpu.AF = False cpu.CF = False """ cpu.AL = cpu.AL & 0x0f
[ "def", "AAA", "(", "cpu", ")", ":", "cpu", ".", "AF", "=", "Operators", ".", "OR", "(", "cpu", ".", "AL", "&", "0x0F", ">", "9", ",", "cpu", ".", "AF", ")", "cpu", ".", "CF", "=", "cpu", ".", "AF", "cpu", ".", "AH", "=", "Operators", ".", "ITEBV", "(", "8", ",", "cpu", ".", "AF", ",", "cpu", ".", "AH", "+", "1", ",", "cpu", ".", "AH", ")", "cpu", ".", "AL", "=", "Operators", ".", "ITEBV", "(", "8", ",", "cpu", ".", "AF", ",", "cpu", ".", "AL", "+", "6", ",", "cpu", ".", "AL", ")", "\"\"\"\n if (cpu.AL & 0x0F > 9) or cpu.AF == 1:\n cpu.AL = cpu.AL + 6\n cpu.AH = cpu.AH + 1\n cpu.AF = True\n cpu.CF = True\n else:\n cpu.AF = False\n cpu.CF = False\n \"\"\"", "cpu", ".", "AL", "=", "cpu", ".", "AL", "&", "0x0f" ]
ASCII adjust after addition. Adjusts the sum of two unpacked BCD values to create an unpacked BCD result. The AL register is the implied source and destination operand for this instruction. The AAA instruction is only useful when it follows an ADD instruction that adds (binary addition) two unpacked BCD values and stores a byte result in the AL register. The AAA instruction then adjusts the contents of the AL register to contain the correct 1-digit unpacked BCD result. If the addition produces a decimal carry, the AH register is incremented by 1, and the CF and AF flags are set. If there was no decimal carry, the CF and AF flags are cleared and the AH register is unchanged. In either case, bits 4 through 7 of the AL register are cleared to 0. This instruction executes as described in compatibility mode and legacy mode. It is not valid in 64-bit mode. :: IF ((AL AND 0FH) > 9) Operators.OR(AF = 1) THEN AL = (AL + 6); AH = AH + 1; AF = 1; CF = 1; ELSE AF = 0; CF = 0; FI; AL = AL AND 0FH; :param cpu: current CPU.
[ "ASCII", "adjust", "after", "addition", "." ]
python
valid
39.510638
pyQode/pyqode.core
pyqode/core/managers/panels.py
https://github.com/pyQode/pyqode.core/blob/a99ec6cd22d519394f613309412f8329dc4e90cb/pyqode/core/managers/panels.py#L212-L232
def _update(self, rect, delta_y, force_update_margins=False): """ Updates panels """ helper = TextHelper(self.editor) if not self: return for zones_id, zone in self._panels.items(): if zones_id == Panel.Position.TOP or \ zones_id == Panel.Position.BOTTOM: continue panels = list(zone.values()) for panel in panels: if panel.scrollable and delta_y: panel.scroll(0, delta_y) line, col = helper.cursor_position() oline, ocol = self._cached_cursor_pos if line != oline or col != ocol or panel.scrollable: panel.update(0, rect.y(), panel.width(), rect.height()) self._cached_cursor_pos = helper.cursor_position() if (rect.contains(self.editor.viewport().rect()) or force_update_margins): self._update_viewport_margins()
[ "def", "_update", "(", "self", ",", "rect", ",", "delta_y", ",", "force_update_margins", "=", "False", ")", ":", "helper", "=", "TextHelper", "(", "self", ".", "editor", ")", "if", "not", "self", ":", "return", "for", "zones_id", ",", "zone", "in", "self", ".", "_panels", ".", "items", "(", ")", ":", "if", "zones_id", "==", "Panel", ".", "Position", ".", "TOP", "or", "zones_id", "==", "Panel", ".", "Position", ".", "BOTTOM", ":", "continue", "panels", "=", "list", "(", "zone", ".", "values", "(", ")", ")", "for", "panel", "in", "panels", ":", "if", "panel", ".", "scrollable", "and", "delta_y", ":", "panel", ".", "scroll", "(", "0", ",", "delta_y", ")", "line", ",", "col", "=", "helper", ".", "cursor_position", "(", ")", "oline", ",", "ocol", "=", "self", ".", "_cached_cursor_pos", "if", "line", "!=", "oline", "or", "col", "!=", "ocol", "or", "panel", ".", "scrollable", ":", "panel", ".", "update", "(", "0", ",", "rect", ".", "y", "(", ")", ",", "panel", ".", "width", "(", ")", ",", "rect", ".", "height", "(", ")", ")", "self", ".", "_cached_cursor_pos", "=", "helper", ".", "cursor_position", "(", ")", "if", "(", "rect", ".", "contains", "(", "self", ".", "editor", ".", "viewport", "(", ")", ".", "rect", "(", ")", ")", "or", "force_update_margins", ")", ":", "self", ".", "_update_viewport_margins", "(", ")" ]
Updates panels
[ "Updates", "panels" ]
python
train
45.761905
iotile/coretools
iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Tool/mslink.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Tool/mslink.py#L215-L227
def embedManifestDllCheck(target, source, env): """Function run by embedManifestDllCheckAction to check for existence of manifest and other conditions, and embed the manifest by calling embedManifestDllAction if so.""" if env.get('WINDOWS_EMBED_MANIFEST', 0): manifestSrc = target[0].get_abspath() + '.manifest' if os.path.exists(manifestSrc): ret = (embedManifestDllAction) ([target[0]],None,env) if ret: raise SCons.Errors.UserError("Unable to embed manifest into %s" % (target[0])) return ret else: print('(embed: no %s.manifest found; not embedding.)'%str(target[0])) return 0
[ "def", "embedManifestDllCheck", "(", "target", ",", "source", ",", "env", ")", ":", "if", "env", ".", "get", "(", "'WINDOWS_EMBED_MANIFEST'", ",", "0", ")", ":", "manifestSrc", "=", "target", "[", "0", "]", ".", "get_abspath", "(", ")", "+", "'.manifest'", "if", "os", ".", "path", ".", "exists", "(", "manifestSrc", ")", ":", "ret", "=", "(", "embedManifestDllAction", ")", "(", "[", "target", "[", "0", "]", "]", ",", "None", ",", "env", ")", "if", "ret", ":", "raise", "SCons", ".", "Errors", ".", "UserError", "(", "\"Unable to embed manifest into %s\"", "%", "(", "target", "[", "0", "]", ")", ")", "return", "ret", "else", ":", "print", "(", "'(embed: no %s.manifest found; not embedding.)'", "%", "str", "(", "target", "[", "0", "]", ")", ")", "return", "0" ]
Function run by embedManifestDllCheckAction to check for existence of manifest and other conditions, and embed the manifest by calling embedManifestDllAction if so.
[ "Function", "run", "by", "embedManifestDllCheckAction", "to", "check", "for", "existence", "of", "manifest", "and", "other", "conditions", "and", "embed", "the", "manifest", "by", "calling", "embedManifestDllAction", "if", "so", "." ]
python
train
52.307692
Celeo/Preston
preston/preston.py
https://github.com/Celeo/Preston/blob/7c94bf0b7dabecad0bd8b66229b2906dabdb8e79/preston/preston.py#L173-L201
def authenticate(self, code: str) -> 'Preston': """Authenticates using the code from the EVE SSO. A new Preston object is returned; this object is not modified. The intended usage is: auth = preston.authenticate('some_code_here') Args: code: SSO code Returns: new Preston, authenticated """ headers = self._get_authorization_headers() data = { 'grant_type': 'authorization_code', 'code': code } r = self.session.post(self.TOKEN_URL, headers=headers, data=data) if not r.status_code == 200: raise Exception(f'Could not authenticate, got repsonse code {r.status_code}') new_kwargs = dict(self._kwargs) response_data = r.json() new_kwargs['access_token'] = response_data['access_token'] new_kwargs['access_expiration'] = time.time() + float(response_data['expires_in']) new_kwargs['refresh_token'] = response_data['refresh_token'] return Preston(**new_kwargs)
[ "def", "authenticate", "(", "self", ",", "code", ":", "str", ")", "->", "'Preston'", ":", "headers", "=", "self", ".", "_get_authorization_headers", "(", ")", "data", "=", "{", "'grant_type'", ":", "'authorization_code'", ",", "'code'", ":", "code", "}", "r", "=", "self", ".", "session", ".", "post", "(", "self", ".", "TOKEN_URL", ",", "headers", "=", "headers", ",", "data", "=", "data", ")", "if", "not", "r", ".", "status_code", "==", "200", ":", "raise", "Exception", "(", "f'Could not authenticate, got repsonse code {r.status_code}'", ")", "new_kwargs", "=", "dict", "(", "self", ".", "_kwargs", ")", "response_data", "=", "r", ".", "json", "(", ")", "new_kwargs", "[", "'access_token'", "]", "=", "response_data", "[", "'access_token'", "]", "new_kwargs", "[", "'access_expiration'", "]", "=", "time", ".", "time", "(", ")", "+", "float", "(", "response_data", "[", "'expires_in'", "]", ")", "new_kwargs", "[", "'refresh_token'", "]", "=", "response_data", "[", "'refresh_token'", "]", "return", "Preston", "(", "*", "*", "new_kwargs", ")" ]
Authenticates using the code from the EVE SSO. A new Preston object is returned; this object is not modified. The intended usage is: auth = preston.authenticate('some_code_here') Args: code: SSO code Returns: new Preston, authenticated
[ "Authenticates", "using", "the", "code", "from", "the", "EVE", "SSO", "." ]
python
train
35.896552
WoLpH/python-statsd
statsd/timer.py
https://github.com/WoLpH/python-statsd/blob/a757da04375c48d03d322246405b33382d37f03f/statsd/timer.py#L50-L65
def send(self, subname, delta): '''Send the data to statsd via self.connection :keyword subname: The subname to report the data to (appended to the client name) :type subname: str :keyword delta: The time delta (time.time() - time.time()) to report :type delta: float ''' ms = delta * 1000 if ms > self.min_send_threshold: name = self._get_name(self.name, subname) self.logger.info('%s: %0.08fms', name, ms) return statsd.Client._send(self, {name: '%0.08f|ms' % ms}) else: return True
[ "def", "send", "(", "self", ",", "subname", ",", "delta", ")", ":", "ms", "=", "delta", "*", "1000", "if", "ms", ">", "self", ".", "min_send_threshold", ":", "name", "=", "self", ".", "_get_name", "(", "self", ".", "name", ",", "subname", ")", "self", ".", "logger", ".", "info", "(", "'%s: %0.08fms'", ",", "name", ",", "ms", ")", "return", "statsd", ".", "Client", ".", "_send", "(", "self", ",", "{", "name", ":", "'%0.08f|ms'", "%", "ms", "}", ")", "else", ":", "return", "True" ]
Send the data to statsd via self.connection :keyword subname: The subname to report the data to (appended to the client name) :type subname: str :keyword delta: The time delta (time.time() - time.time()) to report :type delta: float
[ "Send", "the", "data", "to", "statsd", "via", "self", ".", "connection" ]
python
train
37.625
dereneaton/ipyrad
ipyrad/analysis/twiist.py
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/analysis/twiist.py#L129-L155
def run_tree_inference(self, nexus, idx): """ Write nexus to tmpfile, runs phyml tree inference, and parses and returns the resulting tree. """ ## create a tmpdir for this test tmpdir = tempfile.tempdir tmpfile = os.path.join(tempfile.NamedTemporaryFile( delete=False, prefix=str(idx), dir=tmpdir, )) ## write nexus to tmpfile tmpfile.write(nexus) tmpfile.flush() ## infer the tree rax = raxml(name=str(idx), data=tmpfile.name, workdir=tmpdir, N=1, T=2) rax.run(force=True, block=True, quiet=True) ## clean up tmpfile.close() ## return tree order order = get_order(toytree.tree(rax.trees.bestTree)) return "".join(order)
[ "def", "run_tree_inference", "(", "self", ",", "nexus", ",", "idx", ")", ":", "## create a tmpdir for this test", "tmpdir", "=", "tempfile", ".", "tempdir", "tmpfile", "=", "os", ".", "path", ".", "join", "(", "tempfile", ".", "NamedTemporaryFile", "(", "delete", "=", "False", ",", "prefix", "=", "str", "(", "idx", ")", ",", "dir", "=", "tmpdir", ",", ")", ")", "## write nexus to tmpfile", "tmpfile", ".", "write", "(", "nexus", ")", "tmpfile", ".", "flush", "(", ")", "## infer the tree", "rax", "=", "raxml", "(", "name", "=", "str", "(", "idx", ")", ",", "data", "=", "tmpfile", ".", "name", ",", "workdir", "=", "tmpdir", ",", "N", "=", "1", ",", "T", "=", "2", ")", "rax", ".", "run", "(", "force", "=", "True", ",", "block", "=", "True", ",", "quiet", "=", "True", ")", "## clean up", "tmpfile", ".", "close", "(", ")", "## return tree order", "order", "=", "get_order", "(", "toytree", ".", "tree", "(", "rax", ".", "trees", ".", "bestTree", ")", ")", "return", "\"\"", ".", "join", "(", "order", ")" ]
Write nexus to tmpfile, runs phyml tree inference, and parses and returns the resulting tree.
[ "Write", "nexus", "to", "tmpfile", "runs", "phyml", "tree", "inference", "and", "parses", "and", "returns", "the", "resulting", "tree", "." ]
python
valid
29.148148
pandas-dev/pandas
pandas/core/generic.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/pandas/core/generic.py#L3065-L3081
def get(self, key, default=None): """ Get item from object for given key (DataFrame column, Panel slice, etc.). Returns default value if not found. Parameters ---------- key : object Returns ------- value : same type as items contained in object """ try: return self[key] except (KeyError, ValueError, IndexError): return default
[ "def", "get", "(", "self", ",", "key", ",", "default", "=", "None", ")", ":", "try", ":", "return", "self", "[", "key", "]", "except", "(", "KeyError", ",", "ValueError", ",", "IndexError", ")", ":", "return", "default" ]
Get item from object for given key (DataFrame column, Panel slice, etc.). Returns default value if not found. Parameters ---------- key : object Returns ------- value : same type as items contained in object
[ "Get", "item", "from", "object", "for", "given", "key", "(", "DataFrame", "column", "Panel", "slice", "etc", ".", ")", ".", "Returns", "default", "value", "if", "not", "found", "." ]
python
train
25.588235
andreasjansson/head-in-the-clouds
headintheclouds/dependencies/PyDbLite/PyDbLite_SQL.py
https://github.com/andreasjansson/head-in-the-clouds/blob/32c1d00d01036834dc94368e7f38b0afd3f7a82f/headintheclouds/dependencies/PyDbLite/PyDbLite_SQL.py#L59-L70
def open(self): """Open an existing database""" if self._table_exists(): self.mode = "open" # get table info self._get_table_info() self.types = dict([ (f[0],self.conv_func[f[1].upper()]) for f in self.fields if f[1].upper() in self.conv_func ]) return self else: # table not found raise IOError,"Table %s doesn't exist" %self.name
[ "def", "open", "(", "self", ")", ":", "if", "self", ".", "_table_exists", "(", ")", ":", "self", ".", "mode", "=", "\"open\"", "# get table info\r", "self", ".", "_get_table_info", "(", ")", "self", ".", "types", "=", "dict", "(", "[", "(", "f", "[", "0", "]", ",", "self", ".", "conv_func", "[", "f", "[", "1", "]", ".", "upper", "(", ")", "]", ")", "for", "f", "in", "self", ".", "fields", "if", "f", "[", "1", "]", ".", "upper", "(", ")", "in", "self", ".", "conv_func", "]", ")", "return", "self", "else", ":", "# table not found\r", "raise", "IOError", ",", "\"Table %s doesn't exist\"", "%", "self", ".", "name" ]
Open an existing database
[ "Open", "an", "existing", "database" ]
python
train
38
biocommons/biocommons.seqrepo
biocommons/seqrepo/fastadir/fabgz.py
https://github.com/biocommons/biocommons.seqrepo/blob/fb6d88682cb73ee6971cfa47d4dcd90a9c649167/biocommons/seqrepo/fastadir/fabgz.py#L32-L38
def _get_bgzip_version(exe): """return bgzip version as string""" p = subprocess.Popen([exe, "-h"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True) output = p.communicate() version_line = output[0].splitlines()[1] version = re.match(r"(?:Version:|bgzip \(htslib\))\s+(\d+\.\d+(\.\d+)?)", version_line).group(1) return version
[ "def", "_get_bgzip_version", "(", "exe", ")", ":", "p", "=", "subprocess", ".", "Popen", "(", "[", "exe", ",", "\"-h\"", "]", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "STDOUT", ",", "universal_newlines", "=", "True", ")", "output", "=", "p", ".", "communicate", "(", ")", "version_line", "=", "output", "[", "0", "]", ".", "splitlines", "(", ")", "[", "1", "]", "version", "=", "re", ".", "match", "(", "r\"(?:Version:|bgzip \\(htslib\\))\\s+(\\d+\\.\\d+(\\.\\d+)?)\"", ",", "version_line", ")", ".", "group", "(", "1", ")", "return", "version" ]
return bgzip version as string
[ "return", "bgzip", "version", "as", "string" ]
python
train
52.857143
lisael/fastidious
fastidious/parser_base.py
https://github.com/lisael/fastidious/blob/2542db9de779ddabc3a64e9eb19a4e2de99741dc/fastidious/parser_base.py#L74-L77
def p_debug(self, message): "Format and print debug messages" print("{}{} `{}`".format(self._debug_indent * " ", message, repr(self.p_suffix(10))))
[ "def", "p_debug", "(", "self", ",", "message", ")", ":", "print", "(", "\"{}{} `{}`\"", ".", "format", "(", "self", ".", "_debug_indent", "*", "\" \"", ",", "message", ",", "repr", "(", "self", ".", "p_suffix", "(", "10", ")", ")", ")", ")" ]
Format and print debug messages
[ "Format", "and", "print", "debug", "messages" ]
python
train
48.25
ramses-tech/nefertari
nefertari/view.py
https://github.com/ramses-tech/nefertari/blob/c7caffe11576c11aa111adbdbadeff70ce66b1dd/nefertari/view.py#L160-L167
def set_override_rendered(self): """ Set self.request.override_renderer if needed. """ if '' in self.request.accept: self.request.override_renderer = self._default_renderer elif 'application/json' in self.request.accept: self.request.override_renderer = 'nefertari_json' elif 'text/plain' in self.request.accept: self.request.override_renderer = 'string'
[ "def", "set_override_rendered", "(", "self", ")", ":", "if", "''", "in", "self", ".", "request", ".", "accept", ":", "self", ".", "request", ".", "override_renderer", "=", "self", ".", "_default_renderer", "elif", "'application/json'", "in", "self", ".", "request", ".", "accept", ":", "self", ".", "request", ".", "override_renderer", "=", "'nefertari_json'", "elif", "'text/plain'", "in", "self", ".", "request", ".", "accept", ":", "self", ".", "request", ".", "override_renderer", "=", "'string'" ]
Set self.request.override_renderer if needed.
[ "Set", "self", ".", "request", ".", "override_renderer", "if", "needed", "." ]
python
train
51.875
Kozea/wdb
client/wdb/__init__.py
https://github.com/Kozea/wdb/blob/6af7901b02e866d76f8b0a697a8c078e5b70d1aa/client/wdb/__init__.py#L946-L953
def breaks(self, frame, no_remove=False): """Return True if there's a breakpoint at frame""" for breakpoint in set(self.breakpoints): if breakpoint.breaks(frame): if breakpoint.temporary and not no_remove: self.breakpoints.remove(breakpoint) return True return False
[ "def", "breaks", "(", "self", ",", "frame", ",", "no_remove", "=", "False", ")", ":", "for", "breakpoint", "in", "set", "(", "self", ".", "breakpoints", ")", ":", "if", "breakpoint", ".", "breaks", "(", "frame", ")", ":", "if", "breakpoint", ".", "temporary", "and", "not", "no_remove", ":", "self", ".", "breakpoints", ".", "remove", "(", "breakpoint", ")", "return", "True", "return", "False" ]
Return True if there's a breakpoint at frame
[ "Return", "True", "if", "there", "s", "a", "breakpoint", "at", "frame" ]
python
train
43.375
wdecoster/nanoget
nanoget/extraction_functions.py
https://github.com/wdecoster/nanoget/blob/fb7306220e261849b96785fab02dd2f35a0e3b60/nanoget/extraction_functions.py#L290-L297
def process_fastq_plain(fastq, **kwargs): """Combine metrics extracted from a fastq file.""" logging.info("Nanoget: Starting to collect statistics from plain fastq file.") inputfastq = handle_compressed_input(fastq) return ut.reduce_memory_usage(pd.DataFrame( data=[res for res in extract_from_fastq(inputfastq) if res], columns=["quals", "lengths"] ).dropna())
[ "def", "process_fastq_plain", "(", "fastq", ",", "*", "*", "kwargs", ")", ":", "logging", ".", "info", "(", "\"Nanoget: Starting to collect statistics from plain fastq file.\"", ")", "inputfastq", "=", "handle_compressed_input", "(", "fastq", ")", "return", "ut", ".", "reduce_memory_usage", "(", "pd", ".", "DataFrame", "(", "data", "=", "[", "res", "for", "res", "in", "extract_from_fastq", "(", "inputfastq", ")", "if", "res", "]", ",", "columns", "=", "[", "\"quals\"", ",", "\"lengths\"", "]", ")", ".", "dropna", "(", ")", ")" ]
Combine metrics extracted from a fastq file.
[ "Combine", "metrics", "extracted", "from", "a", "fastq", "file", "." ]
python
train
48.75
nion-software/nionswift
nion/swift/model/HardwareSource.py
https://github.com/nion-software/nionswift/blob/d43693eaf057b8683b9638e575000f055fede452/nion/swift/model/HardwareSource.py#L1226-L1232
def pause(self) -> None: """Pause recording. Thread safe and UI safe.""" with self.__state_lock: if self.__state == DataChannelBuffer.State.started: self.__state = DataChannelBuffer.State.paused
[ "def", "pause", "(", "self", ")", "->", "None", ":", "with", "self", ".", "__state_lock", ":", "if", "self", ".", "__state", "==", "DataChannelBuffer", ".", "State", ".", "started", ":", "self", ".", "__state", "=", "DataChannelBuffer", ".", "State", ".", "paused" ]
Pause recording. Thread safe and UI safe.
[ "Pause", "recording", "." ]
python
train
34.428571
pantsbuild/pants
src/python/pants/goal/products.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/goal/products.py#L100-L113
def get_product_target_mappings_for_targets(self, targets): """Gets the product-target associations for the given targets, preserving the input order. :API: public :param targets: The targets to lookup products for. :returns: The ordered (product, target) tuples. """ product_target_mappings = [] for target in targets: for product in self._products_by_target[target]: product_target_mappings.append((product, target)) return product_target_mappings
[ "def", "get_product_target_mappings_for_targets", "(", "self", ",", "targets", ")", ":", "product_target_mappings", "=", "[", "]", "for", "target", "in", "targets", ":", "for", "product", "in", "self", ".", "_products_by_target", "[", "target", "]", ":", "product_target_mappings", ".", "append", "(", "(", "product", ",", "target", ")", ")", "return", "product_target_mappings" ]
Gets the product-target associations for the given targets, preserving the input order. :API: public :param targets: The targets to lookup products for. :returns: The ordered (product, target) tuples.
[ "Gets", "the", "product", "-", "target", "associations", "for", "the", "given", "targets", "preserving", "the", "input", "order", "." ]
python
train
34.642857
VIVelev/PyDojoML
dojo/metrics/classification.py
https://github.com/VIVelev/PyDojoML/blob/773fdce6866aa6decd306a5a85f94129fed816eb/dojo/metrics/classification.py#L145-L166
def precision(y, y_pred): """Precision score precision = true_positives / (true_positives + false_positives) Parameters: ----------- y : vector, shape (n_samples,) The target labels. y_pred : vector, shape (n_samples,) The predicted labels. Returns: -------- precision : float """ tp = true_positives(y, y_pred) fp = false_positives(y, y_pred) return tp / (tp + fp)
[ "def", "precision", "(", "y", ",", "y_pred", ")", ":", "tp", "=", "true_positives", "(", "y", ",", "y_pred", ")", "fp", "=", "false_positives", "(", "y", ",", "y_pred", ")", "return", "tp", "/", "(", "tp", "+", "fp", ")" ]
Precision score precision = true_positives / (true_positives + false_positives) Parameters: ----------- y : vector, shape (n_samples,) The target labels. y_pred : vector, shape (n_samples,) The predicted labels. Returns: -------- precision : float
[ "Precision", "score" ]
python
train
19.227273
tensorflow/tensor2tensor
tensor2tensor/utils/yellowfin.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/utils/yellowfin.py#L308-L349
def _prepare_variables(self): """Prepare Variables for YellowFin. Returns: Grad**2, Norm, Norm**2, Mean(Norm**2) ops """ self._moving_averager = tf.train.ExponentialMovingAverage( decay=self._beta, zero_debias=self._zero_debias) # assert self._grad is not None and len(self._grad) > 0 # List for the returned Operations prepare_variables_op = [] # Get per var g**2 and norm**2 self._grad_squared = [] self._grad_norm_squared = [] # Gradient squared for v, g in zip(self._vars, self._grad): if g is None: continue with tf.colocate_with(v): self._grad_squared.append(tf.square(g)) # Norm squared. self._grad_norm_squared = [tf.reduce_sum(g_sq) for g_sq in self._grad_squared] if self._sparsity_debias: avg_op_sparsity = self._grad_sparsity() prepare_variables_op.append(avg_op_sparsity) # The following running average on squared norm of gradient # is shared by grad_var and dist_to_opt avg_op = self._moving_averager.apply(self._grad_norm_squared) with tf.control_dependencies([avg_op]): self._grad_norm_squared_avg = [self._moving_averager.average(val) for val in self._grad_norm_squared] self._grad_norm_squared = tf.add_n(self._grad_norm_squared) self._grad_norm_squared_avg = tf.add_n(self._grad_norm_squared_avg) prepare_variables_op.append(avg_op) return tf.group(*prepare_variables_op)
[ "def", "_prepare_variables", "(", "self", ")", ":", "self", ".", "_moving_averager", "=", "tf", ".", "train", ".", "ExponentialMovingAverage", "(", "decay", "=", "self", ".", "_beta", ",", "zero_debias", "=", "self", ".", "_zero_debias", ")", "# assert self._grad is not None and len(self._grad) > 0", "# List for the returned Operations", "prepare_variables_op", "=", "[", "]", "# Get per var g**2 and norm**2", "self", ".", "_grad_squared", "=", "[", "]", "self", ".", "_grad_norm_squared", "=", "[", "]", "# Gradient squared", "for", "v", ",", "g", "in", "zip", "(", "self", ".", "_vars", ",", "self", ".", "_grad", ")", ":", "if", "g", "is", "None", ":", "continue", "with", "tf", ".", "colocate_with", "(", "v", ")", ":", "self", ".", "_grad_squared", ".", "append", "(", "tf", ".", "square", "(", "g", ")", ")", "# Norm squared.", "self", ".", "_grad_norm_squared", "=", "[", "tf", ".", "reduce_sum", "(", "g_sq", ")", "for", "g_sq", "in", "self", ".", "_grad_squared", "]", "if", "self", ".", "_sparsity_debias", ":", "avg_op_sparsity", "=", "self", ".", "_grad_sparsity", "(", ")", "prepare_variables_op", ".", "append", "(", "avg_op_sparsity", ")", "# The following running average on squared norm of gradient", "# is shared by grad_var and dist_to_opt", "avg_op", "=", "self", ".", "_moving_averager", ".", "apply", "(", "self", ".", "_grad_norm_squared", ")", "with", "tf", ".", "control_dependencies", "(", "[", "avg_op", "]", ")", ":", "self", ".", "_grad_norm_squared_avg", "=", "[", "self", ".", "_moving_averager", ".", "average", "(", "val", ")", "for", "val", "in", "self", ".", "_grad_norm_squared", "]", "self", ".", "_grad_norm_squared", "=", "tf", ".", "add_n", "(", "self", ".", "_grad_norm_squared", ")", "self", ".", "_grad_norm_squared_avg", "=", "tf", ".", "add_n", "(", "self", ".", "_grad_norm_squared_avg", ")", "prepare_variables_op", ".", "append", "(", "avg_op", ")", "return", "tf", ".", "group", "(", "*", "prepare_variables_op", ")" ]
Prepare Variables for YellowFin. Returns: Grad**2, Norm, Norm**2, Mean(Norm**2) ops
[ "Prepare", "Variables", "for", "YellowFin", "." ]
python
train
35.095238
cloudsmith-io/cloudsmith-cli
cloudsmith_cli/core/api/user.py
https://github.com/cloudsmith-io/cloudsmith-cli/blob/5bc245ca5d0bfa85380be48e7c206b4c86cc6c8e/cloudsmith_cli/core/api/user.py#L34-L42
def get_user_brief(): """Retrieve brief for current user (if any).""" client = get_user_api() with catch_raise_api_exception(): data, _, headers = client.user_self_with_http_info() ratelimits.maybe_rate_limit(client, headers) return data.authenticated, data.slug, data.email, data.name
[ "def", "get_user_brief", "(", ")", ":", "client", "=", "get_user_api", "(", ")", "with", "catch_raise_api_exception", "(", ")", ":", "data", ",", "_", ",", "headers", "=", "client", ".", "user_self_with_http_info", "(", ")", "ratelimits", ".", "maybe_rate_limit", "(", "client", ",", "headers", ")", "return", "data", ".", "authenticated", ",", "data", ".", "slug", ",", "data", ".", "email", ",", "data", ".", "name" ]
Retrieve brief for current user (if any).
[ "Retrieve", "brief", "for", "current", "user", "(", "if", "any", ")", "." ]
python
train
34.111111
matthewdeanmartin/jiggle_version
jiggle_version/central_module_finder.py
https://github.com/matthewdeanmartin/jiggle_version/blob/963656a0a47b7162780a5f6c8f4b8bbbebc148f5/jiggle_version/central_module_finder.py#L171-L198
def remove_likely_non_central(self, candidates): # type: (List[str]) -> List[str] """ Stuff that is likely to be in find_packages(exclude...) :param candidates: :return: """ if len(candidates) > 1: for unlikely in [ "test", "tests", "example", "examples", "demo", "demos", "test_files", "doc", "docs", ]: if unlikely in candidates: logger.warning("Assuming {0} is not the project".format(unlikely)) candidates.remove(unlikely) for candidate in candidates: if candidate.startswith(unlikely + "."): logger.warning( "Assuming {0} is not the project".format(candidate) ) candidates.remove(candidate) return candidates
[ "def", "remove_likely_non_central", "(", "self", ",", "candidates", ")", ":", "# type: (List[str]) -> List[str]", "if", "len", "(", "candidates", ")", ">", "1", ":", "for", "unlikely", "in", "[", "\"test\"", ",", "\"tests\"", ",", "\"example\"", ",", "\"examples\"", ",", "\"demo\"", ",", "\"demos\"", ",", "\"test_files\"", ",", "\"doc\"", ",", "\"docs\"", ",", "]", ":", "if", "unlikely", "in", "candidates", ":", "logger", ".", "warning", "(", "\"Assuming {0} is not the project\"", ".", "format", "(", "unlikely", ")", ")", "candidates", ".", "remove", "(", "unlikely", ")", "for", "candidate", "in", "candidates", ":", "if", "candidate", ".", "startswith", "(", "unlikely", "+", "\".\"", ")", ":", "logger", ".", "warning", "(", "\"Assuming {0} is not the project\"", ".", "format", "(", "candidate", ")", ")", "candidates", ".", "remove", "(", "candidate", ")", "return", "candidates" ]
Stuff that is likely to be in find_packages(exclude...) :param candidates: :return:
[ "Stuff", "that", "is", "likely", "to", "be", "in", "find_packages", "(", "exclude", "...", ")", ":", "param", "candidates", ":", ":", "return", ":" ]
python
train
35.821429
PSPC-SPAC-buyandsell/von_anchor
von_anchor/anchor/holderprover.py
https://github.com/PSPC-SPAC-buyandsell/von_anchor/blob/78ac1de67be42a676274f4bf71fe12f66e72f309/von_anchor/anchor/holderprover.py#L720-L771
async def get_cred_infos_by_filter(self, filt: dict = None) -> str: """ Return cred-info (json list) from wallet by input filter for schema identifier and/or credential definition identifier components; return info of all credentials for no filter. Raise WalletState if the wallet is closed. :param filt: indy-sdk filter for credentials; i.e., :: { "schema_id": string, # optional "schema_issuer_did": string, # optional "schema_name": string, # optional "schema_version": string, # optional "issuer_did": string, # optional "cred_def_id": string # optional } :return: credential infos as json list; i.e., :: [ { "referent": string, # credential identifier in the wallet "attrs": { "attr1" : {"raw": "value1", "encoded": "value1_as_int" }, "attr2" : {"raw": "value2", "encoded": "value2_as_int" }, ... } "schema_id": string, "cred_def_id": string, "rev_reg_id": Optional<string>, "cred_rev_id": Optional<string> }, ... ] """ LOGGER.debug('HolderProver.get_cred_infos_by_filter >>> filt: %s', filt) if not self.wallet.handle: LOGGER.debug('HolderProver.get_cred_infos_by_filter <!< Wallet %s is closed', self.name) raise WalletState('Wallet {} is closed'.format(self.name)) rv_json = await anoncreds.prover_get_credentials(self.wallet.handle, json.dumps(filt or {})) LOGGER.debug('HolderProver.get_cred_infos_by_filter <<< %s', rv_json) return rv_json
[ "async", "def", "get_cred_infos_by_filter", "(", "self", ",", "filt", ":", "dict", "=", "None", ")", "->", "str", ":", "LOGGER", ".", "debug", "(", "'HolderProver.get_cred_infos_by_filter >>> filt: %s'", ",", "filt", ")", "if", "not", "self", ".", "wallet", ".", "handle", ":", "LOGGER", ".", "debug", "(", "'HolderProver.get_cred_infos_by_filter <!< Wallet %s is closed'", ",", "self", ".", "name", ")", "raise", "WalletState", "(", "'Wallet {} is closed'", ".", "format", "(", "self", ".", "name", ")", ")", "rv_json", "=", "await", "anoncreds", ".", "prover_get_credentials", "(", "self", ".", "wallet", ".", "handle", ",", "json", ".", "dumps", "(", "filt", "or", "{", "}", ")", ")", "LOGGER", ".", "debug", "(", "'HolderProver.get_cred_infos_by_filter <<< %s'", ",", "rv_json", ")", "return", "rv_json" ]
Return cred-info (json list) from wallet by input filter for schema identifier and/or credential definition identifier components; return info of all credentials for no filter. Raise WalletState if the wallet is closed. :param filt: indy-sdk filter for credentials; i.e., :: { "schema_id": string, # optional "schema_issuer_did": string, # optional "schema_name": string, # optional "schema_version": string, # optional "issuer_did": string, # optional "cred_def_id": string # optional } :return: credential infos as json list; i.e., :: [ { "referent": string, # credential identifier in the wallet "attrs": { "attr1" : {"raw": "value1", "encoded": "value1_as_int" }, "attr2" : {"raw": "value2", "encoded": "value2_as_int" }, ... } "schema_id": string, "cred_def_id": string, "rev_reg_id": Optional<string>, "cred_rev_id": Optional<string> }, ... ]
[ "Return", "cred", "-", "info", "(", "json", "list", ")", "from", "wallet", "by", "input", "filter", "for", "schema", "identifier", "and", "/", "or", "credential", "definition", "identifier", "components", ";", "return", "info", "of", "all", "credentials", "for", "no", "filter", "." ]
python
train
35.865385
lawsie/guizero
guizero/ButtonGroup.py
https://github.com/lawsie/guizero/blob/84c7f0b314fa86f9fc88eb11c9a0f6c4b57155e2/guizero/ButtonGroup.py#L290-L310
def update_command(self, command, args=None): """ Updates the callback command which is called when the ButtonGroup changes. Setting to `None` stops the callback. :param callback command: The callback function to call. :param callback args: A list of arguments to pass to the widgets `command`, defaults to `None`. """ if command is None: self._command = lambda: None else: if args is None: self._command = command else: self._command = utils.with_args(command, *args)
[ "def", "update_command", "(", "self", ",", "command", ",", "args", "=", "None", ")", ":", "if", "command", "is", "None", ":", "self", ".", "_command", "=", "lambda", ":", "None", "else", ":", "if", "args", "is", "None", ":", "self", ".", "_command", "=", "command", "else", ":", "self", ".", "_command", "=", "utils", ".", "with_args", "(", "command", ",", "*", "args", ")" ]
Updates the callback command which is called when the ButtonGroup changes. Setting to `None` stops the callback. :param callback command: The callback function to call. :param callback args: A list of arguments to pass to the widgets `command`, defaults to `None`.
[ "Updates", "the", "callback", "command", "which", "is", "called", "when", "the", "ButtonGroup", "changes", "." ]
python
train
29.857143
OpenKMIP/PyKMIP
kmip/core/messages/payloads/revoke.py
https://github.com/OpenKMIP/PyKMIP/blob/b51c5b044bd05f8c85a1d65d13a583a4d8fc1b0e/kmip/core/messages/payloads/revoke.py#L172-L193
def read(self, istream, kmip_version=enums.KMIPVersion.KMIP_1_0): """ Read the data encoding the RevokeResponsePayload object and decode it into its constituent parts. Args: istream (Stream): A data stream containing encoded object data, supporting a read method; usually a BytearrayStream object. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be decoded. Optional, defaults to KMIP 1.0. """ super(RevokeResponsePayload, self).read( istream, kmip_version=kmip_version ) tstream = BytearrayStream(istream.read(self.length)) self.unique_identifier = attributes.UniqueIdentifier() self.unique_identifier.read(tstream, kmip_version=kmip_version) self.is_oversized(tstream) self.validate()
[ "def", "read", "(", "self", ",", "istream", ",", "kmip_version", "=", "enums", ".", "KMIPVersion", ".", "KMIP_1_0", ")", ":", "super", "(", "RevokeResponsePayload", ",", "self", ")", ".", "read", "(", "istream", ",", "kmip_version", "=", "kmip_version", ")", "tstream", "=", "BytearrayStream", "(", "istream", ".", "read", "(", "self", ".", "length", ")", ")", "self", ".", "unique_identifier", "=", "attributes", ".", "UniqueIdentifier", "(", ")", "self", ".", "unique_identifier", ".", "read", "(", "tstream", ",", "kmip_version", "=", "kmip_version", ")", "self", ".", "is_oversized", "(", "tstream", ")", "self", ".", "validate", "(", ")" ]
Read the data encoding the RevokeResponsePayload object and decode it into its constituent parts. Args: istream (Stream): A data stream containing encoded object data, supporting a read method; usually a BytearrayStream object. kmip_version (KMIPVersion): An enumeration defining the KMIP version with which the object will be decoded. Optional, defaults to KMIP 1.0.
[ "Read", "the", "data", "encoding", "the", "RevokeResponsePayload", "object", "and", "decode", "it", "into", "its", "constituent", "parts", ".", "Args", ":", "istream", "(", "Stream", ")", ":", "A", "data", "stream", "containing", "encoded", "object", "data", "supporting", "a", "read", "method", ";", "usually", "a", "BytearrayStream", "object", ".", "kmip_version", "(", "KMIPVersion", ")", ":", "An", "enumeration", "defining", "the", "KMIP", "version", "with", "which", "the", "object", "will", "be", "decoded", ".", "Optional", "defaults", "to", "KMIP", "1", ".", "0", "." ]
python
test
41.227273
jwhitlock/drf-cached-instances
drf_cached_instances/models.py
https://github.com/jwhitlock/drf-cached-instances/blob/ec4e8a6e1e83eeea6ec0b924b2eaa40a38d5963a/drf_cached_instances/models.py#L95-L100
def pks(self): """Lazy-load the primary keys.""" if self._primary_keys is None: self._primary_keys = list( self.queryset.values_list('pk', flat=True)) return self._primary_keys
[ "def", "pks", "(", "self", ")", ":", "if", "self", ".", "_primary_keys", "is", "None", ":", "self", ".", "_primary_keys", "=", "list", "(", "self", ".", "queryset", ".", "values_list", "(", "'pk'", ",", "flat", "=", "True", ")", ")", "return", "self", ".", "_primary_keys" ]
Lazy-load the primary keys.
[ "Lazy", "-", "load", "the", "primary", "keys", "." ]
python
train
37.166667
EventTeam/beliefs
src/beliefs/cells/lists.py
https://github.com/EventTeam/beliefs/blob/c07d22b61bebeede74a72800030dde770bf64208/src/beliefs/cells/lists.py#L84-L91
def is_entailed_by(self, other): """ Other is more specific than self. Other is bounded within self. """ other = self.coerce(other) to_i = self.to_i return to_i(other.low) >= to_i(self.low) and \ to_i(other.high) <= to_i(self.high)
[ "def", "is_entailed_by", "(", "self", ",", "other", ")", ":", "other", "=", "self", ".", "coerce", "(", "other", ")", "to_i", "=", "self", ".", "to_i", "return", "to_i", "(", "other", ".", "low", ")", ">=", "to_i", "(", "self", ".", "low", ")", "and", "to_i", "(", "other", ".", "high", ")", "<=", "to_i", "(", "self", ".", "high", ")" ]
Other is more specific than self. Other is bounded within self.
[ "Other", "is", "more", "specific", "than", "self", ".", "Other", "is", "bounded", "within", "self", "." ]
python
train
36.25
mpapi/lazylights
lazylights.py
https://github.com/mpapi/lazylights/blob/536dbd3ce75c28b3545cf66f25fc72589488063f/lazylights.py#L319-L339
def run(self): """ Process all outgoing packets, until `stop()` is called. Intended to run in its own thread. """ while True: to_send = self._queue.get() if to_send is _SHUTDOWN: break # If we get a gateway object, connect to it. Otherwise, assume # it's a bytestring and send it out on the socket. if isinstance(to_send, Gateway): self._gateway = to_send self._connected.set() else: if not self._gateway: raise SendException('no gateway') dest = (self._gateway.addr, self._gateway.port) sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) sock.sendto(to_send, dest)
[ "def", "run", "(", "self", ")", ":", "while", "True", ":", "to_send", "=", "self", ".", "_queue", ".", "get", "(", ")", "if", "to_send", "is", "_SHUTDOWN", ":", "break", "# If we get a gateway object, connect to it. Otherwise, assume", "# it's a bytestring and send it out on the socket.", "if", "isinstance", "(", "to_send", ",", "Gateway", ")", ":", "self", ".", "_gateway", "=", "to_send", "self", ".", "_connected", ".", "set", "(", ")", "else", ":", "if", "not", "self", ".", "_gateway", ":", "raise", "SendException", "(", "'no gateway'", ")", "dest", "=", "(", "self", ".", "_gateway", ".", "addr", ",", "self", ".", "_gateway", ".", "port", ")", "sock", "=", "socket", ".", "socket", "(", "socket", ".", "AF_INET", ",", "socket", ".", "SOCK_DGRAM", ")", "sock", ".", "sendto", "(", "to_send", ",", "dest", ")" ]
Process all outgoing packets, until `stop()` is called. Intended to run in its own thread.
[ "Process", "all", "outgoing", "packets", "until", "stop", "()", "is", "called", ".", "Intended", "to", "run", "in", "its", "own", "thread", "." ]
python
train
37.857143