Unnamed: 0
int64
0
389k
code
stringlengths
26
79.6k
docstring
stringlengths
1
46.9k
388,200
def get_review_requests(self): return ( github.PaginatedList.PaginatedList( github.NamedUser.NamedUser, self._requester, self.url + "/requested_reviewers", None, list_item= ), github.PaginatedList.PaginatedList( github.Team.Team, self._requester, self.url + "/requested_reviewers", None, list_item= ) )
:calls: `GET /repos/:owner/:repo/pulls/:number/requested_reviewers <https://developer.github.com/v3/pulls/review_requests/>`_ :rtype: tuple of :class:`github.PaginatedList.PaginatedList` of :class:`github.NamedUser.NamedUser` and of :class:`github.PaginatedList.PaginatedList` of :class:`github.Team.Team`
388,201
def list_dfu_devices(*args, **kwargs): devices = get_dfu_devices(*args, **kwargs) if not devices: print("No DFU capable devices found") return for device in devices: print("Bus {} Device {:03d}: ID {:04x}:{:04x}" .format(device.bus, device.address, device.idVendor, device.idProduct)) layout = get_memory_layout(device) print("Memory Layout") for entry in layout: print(" 0x{:x} {:2d} pages of {:3d}K bytes" .format(entry[], entry[], entry[] // 1024))
Prints a lits of devices detected in DFU mode.
388,202
def delete_resource(resource_name, key, identifier_fields, profile=, subdomain=None, api_key=None): resource = get_resource(resource_name, key, identifier_fields, profile, subdomain, api_key) if resource: if __opts__[]: return else: return True
delete any pagerduty resource Helper method for absent() example: delete_resource("users", key, ["id","name","email"]) # delete by id or name or email
388,203
def __make_points(self, measurement, additional_tags, ts, fields): tags = self.tags.copy() tags.update(additional_tags) return { "measurement": measurement, "tags": tags, "time": int(ts), "fields": fields, }
Parameters ---------- measurement : string measurement type (e.g. monitoring, overall_meta, net_codes, proto_codes, overall_quantiles) additional_tags : dict custom additional tags for this points ts : integer timestamp fields : dict influxdb columns Returns ------- dict points for InfluxDB client
388,204
def reset(self): self.grid = [[0 for dummy_l in range(self.grid_width)] for dummy_l in range(self.grid_height)]
Reset the game so the grid is zeros (or default items)
388,205
def stabilize(self, test_func, error, timeoutSecs=10, retryDelaySecs=0.5): timeTakenSecsnumberOfRetries start = time.time() numberOfRetries = 0 while h2o_args.no_timeout or (time.time() - start < timeoutSecs): if test_func(self, tries=numberOfRetries, timeoutSecs=timeoutSecs): break time.sleep(retryDelaySecs) numberOfRetries += 1 error, timeTakenSecs, numberOfRetries)) else: msg = error(self, timeTakenSecs, numberOfRetries) raise Exception(msg)
Repeatedly test a function waiting for it to return True. Arguments: test_func -- A function that will be run repeatedly error -- A function that will be run to produce an error message it will be called with (node, timeTakenSecs, numberOfRetries) OR -- A string that will be interpolated with a dictionary of { 'timeTakenSecs', 'numberOfRetries' } timeoutSecs -- How long in seconds to keep trying before declaring a failure retryDelaySecs -- How long to wait between retry attempts
388,206
def confirm_authorization_request(self): server = self.server scope = request.values.get() or scopes = scope.split() credentials = dict( client_id=request.values.get(), redirect_uri=request.values.get(, None), response_type=request.values.get(, None), state=request.values.get(, None) ) log.debug(, credentials) redirect_uri = credentials.get() log.debug(, redirect_uri) uri, http_method, body, headers = extract_params() try: ret = server.create_authorization_response( uri, http_method, body, headers, scopes, credentials) log.debug() return create_response(*ret) except oauth2.FatalClientError as e: log.debug(, e, exc_info=True) return self._on_exception(e, e.in_uri(self.error_uri)) except oauth2.OAuth2Error as e: log.debug(, e, exc_info=True) ))
When consumer confirm the authorization.
388,207
def serialize_operator_less_than(self, op): elem = etree.Element() return self.serialize_value_list(elem, op.args)
Serializer for :meth:`SpiffWorkflow.operators.NotEqual`. Example:: <less-than> <value>text</value> <value><attribute>foobar</attribute></value> </less-than>
388,208
def GetRawDevice(path): path = CanonicalPathToLocalPath(path) try: path = win32file.GetLongPathName(path) except pywintypes.error: pass try: mount_point = win32file.GetVolumePathName(path) except pywintypes.error as details: logging.info("path not found. %s", details) raise IOError("No mountpoint for path: %s" % path) if not path.startswith(mount_point): stripped_mp = mount_point.rstrip("\\") if not path.startswith(stripped_mp): raise IOError("path %s is not mounted under %s" % (path, mount_point)) corrected_path = LocalPathToCanonicalPath(path[len(mount_point):]) corrected_path = utils.NormalizePath(corrected_path) volume = win32file.GetVolumeNameForVolumeMountPoint(mount_point).rstrip("\\") volume = LocalPathToCanonicalPath(volume) result = rdf_paths.PathSpec( path=volume, pathtype=rdf_paths.PathSpec.PathType.OS, mount_point=mount_point.rstrip("\\")) return result, corrected_path
Resolves the raw device that contains the path. Args: path: A path to examine. Returns: A pathspec to read the raw device as well as the modified path to read within the raw device. This is usually the path without the mount point. Raises: IOError: if the path does not exist or some unexpected behaviour occurs.
388,209
def v1_label_negative_inference(request, response, visid_to_dbid, dbid_to_visid, label_store, cid): ``, the connected components of ``cid`` and ``cid lab_to_json = partial(label_to_json, dbid_to_visid) labs = imap(lab_to_json, label_store.negative_inference(visid_to_dbid(cid))) return list(paginate(request, response, labs))
Return inferred negative labels. The route for this endpoint is: ``/dossier/v1/label/<cid>/negative-inference``. Negative labels are inferred by first getting all other content ids connected to ``cid`` through a negative label. For each directly adjacent ``cid'``, the connected components of ``cid`` and ``cid'`` are traversed to find negative labels. The data returned is a JSON list of labels. Each label is a dictionary with the following keys: ``content_id1``, ``content_id2``, ``subtopic_id1``, ``subtopic_id2``, ``annotator_id``, ``epoch_ticks`` and ``value``.
388,210
def get_or_default(func=None, default=None): def decorator(func): @wraps(func) def wrapper(*args, **kwargs): try: return func(*args, **kwargs) except ObjectDoesNotExist: if callable(default): return default() else: return default return wrapper if func is None: return decorator else: return decorator(func)
Wrapper around Django's ORM `get` functionality. Wrap anything that raises ObjectDoesNotExist exception and provide the default value if necessary. `default` by default is None. `default` can be any callable, if it is callable it will be called when ObjectDoesNotExist exception will be raised.
388,211
def _get_os(osvi = None): if not osvi: osvi = GetVersionEx() if osvi.dwPlatformId == VER_PLATFORM_WIN32_NT and osvi.dwMajorVersion > 4: if osvi.dwMajorVersion == 6: if osvi.dwMinorVersion == 0: if osvi.wProductType == VER_NT_WORKSTATION: if bits == 64 or wow64: return return else: if bits == 64 or wow64: return return if osvi.dwMinorVersion == 1: if osvi.wProductType == VER_NT_WORKSTATION: if bits == 64 or wow64: return return else: if bits == 64 or wow64: return return if osvi.dwMajorVersion == 5: if osvi.dwMinorVersion == 2: if GetSystemMetrics(SM_SERVERR2): if bits == 64 or wow64: return return if osvi.wSuiteMask in (VER_SUITE_STORAGE_SERVER, VER_SUITE_WH_SERVER): if bits == 64 or wow64: return return if osvi.wProductType == VER_NT_WORKSTATION and arch == ARCH_AMD64: return else: if bits == 64 or wow64: return return if osvi.dwMinorVersion == 1: return if osvi.dwMinorVersion == 0: return if osvi.dwMajorVersion == 4: return return
Determines the current operating system. This function allows you to quickly tell apart major OS differences. For more detailed information call L{GetVersionEx} instead. @note: Wine reports itself as Windows XP 32 bits (even if the Linux host is 64 bits). ReactOS may report itself as Windows 2000 or Windows XP, depending on the version of ReactOS. @type osvi: L{OSVERSIONINFOEXA} @param osvi: Optional. The return value from L{GetVersionEx}. @rtype: str @return: One of the following values: - L{OS_UNKNOWN} (C{"Unknown"}) - L{OS_NT} (C{"Windows NT"}) - L{OS_W2K} (C{"Windows 2000"}) - L{OS_XP} (C{"Windows XP"}) - L{OS_XP_64} (C{"Windows XP (64 bits)"}) - L{OS_W2K3} (C{"Windows 2003"}) - L{OS_W2K3_64} (C{"Windows 2003 (64 bits)"}) - L{OS_W2K3R2} (C{"Windows 2003 R2"}) - L{OS_W2K3R2_64} (C{"Windows 2003 R2 (64 bits)"}) - L{OS_W2K8} (C{"Windows 2008"}) - L{OS_W2K8_64} (C{"Windows 2008 (64 bits)"}) - L{OS_W2K8R2} (C{"Windows 2008 R2"}) - L{OS_W2K8R2_64} (C{"Windows 2008 R2 (64 bits)"}) - L{OS_VISTA} (C{"Windows Vista"}) - L{OS_VISTA_64} (C{"Windows Vista (64 bits)"}) - L{OS_W7} (C{"Windows 7"}) - L{OS_W7_64} (C{"Windows 7 (64 bits)"})
388,212
def _get_event_cls(view_obj, events_map): request = view_obj.request view_method = getattr(view_obj, request.action) event_action = ( getattr(view_method, , None) or request.action) return events_map[event_action]
Helper function to get event class. :param view_obj: Instance of View that processes the request. :param events_map: Map of events from which event class should be picked. :returns: Found event class.
388,213
def construct_message(self, email=None): self.multipart[] = self.subject self.multipart[] = self.config[] self.multipart[] = formatdate(localtime=True) if email is None and self.send_as_one: self.multipart[] = ", ".join(self.addresses) elif email is not None and self.send_as_one is False: self.multipart[] = email if self.ccs is not None and self.ccs: self.multipart[] = ", ".join(self.ccs) html = MIMEText(self.html, ) alt_text = MIMEText(self.text, ) self.multipart.attach(html) self.multipart.attach(alt_text) for file in self.files: self.multipart.attach(file)
construct the email message
388,214
def _is_paste(keys): text_count = 0 newline_count = 0 for k in keys: if isinstance(k.key, six.text_type): text_count += 1 if k.key == Keys.ControlJ: newline_count += 1 return newline_count >= 1 and text_count > 1
Return `True` when we should consider this list of keys as a paste event. Pasted text on windows will be turned into a `Keys.BracketedPaste` event. (It's not 100% correct, but it is probably the best possible way to detect pasting of text and handle that correctly.)
388,215
def setVisible(self, state): super(XLineEdit, self).setVisible(state) self.adjustStyleSheet() self.adjustTextMargins()
Sets the visible state for this line edit. :param state | <bool>
388,216
def handle_failed_login(self, login_result): error_code = login_result.get() if in error_code: utils.error_message() self.trigger_two_step_login(login_result) self.finish_two_step_login() else: utils.error_message_and_exit(, login_result)
If Two Factor Authentication (2FA/2SV) is enabled, the initial login will fail with a predictable error. Catching this error allows us to begin the authentication process. Other types of errors can be treated in a similar way.
388,217
def right_click(self, x, y, n=1, pre_dl=None, post_dl=None): self.delay(pre_dl) self.m.click(x, y, 2, n) self.delay(post_dl)
Right click at ``(x, y)`` on screen for ``n`` times. at begin. **中文文档** 在屏幕的 ``(x, y)`` 坐标处右键单击 ``n`` 次。
388,218
def check_selection(self): if self.current_prompt_pos is None: self.set_cursor_position() else: self.truncate_selection(self.current_prompt_pos)
Check if selected text is r/w, otherwise remove read-only parts of selection
388,219
def get_composition_query_session_for_repository(self, repository_id, proxy): if repository_id is None: raise NullArgument() if not self.supports_composition_query(): raise Unimplemented() try: from . import sessions except ImportError: raise proxy = self._convert_proxy(proxy) try: session = sessions.CompositionQuerySession(repository_id, proxy, runtime=self._runtime) except AttributeError: raise return session
Gets a composition query session for the given repository. arg: repository_id (osid.id.Id): the Id of the repository arg proxy (osid.proxy.Proxy): a proxy return: (osid.repository.CompositionQuerySession) - a CompositionQuerySession raise: NotFound - repository_id not found raise: NullArgument - repository_id is null raise: OperationFailed - unable to complete request raise: Unimplemented - supports_composition_query() or supports_visible_federation() is false compliance: optional - This method must be implemented if supports_composition_query() and supports_visible_federation() are true.
388,220
def convert_bbox_to_albumentations(bbox, source_format, rows, cols, check_validity=False): if source_format not in {, }: raise ValueError( "Unknown source_format {}. Supported formats are: and ".format(source_format) ) if source_format == : x_min, y_min, width, height = bbox[:4] x_max = x_min + width y_max = y_min + height else: x_min, y_min, x_max, y_max = bbox[:4] bbox = [x_min, y_min, x_max, y_max] + list(bbox[4:]) bbox = normalize_bbox(bbox, rows, cols) if check_validity: check_bbox(bbox) return bbox
Convert a bounding box from a format specified in `source_format` to the format used by albumentations: normalized coordinates of bottom-left and top-right corners of the bounding box in a form of `[x_min, y_min, x_max, y_max]` e.g. `[0.15, 0.27, 0.67, 0.5]`. Args: bbox (list): bounding box source_format (str): format of the bounding box. Should be 'coco' or 'pascal_voc'. check_validity (bool): check if all boxes are valid boxes rows (int): image height cols (int): image width Note: The `coco` format of a bounding box looks like `[x_min, y_min, width, height]`, e.g. [97, 12, 150, 200]. The `pascal_voc` format of a bounding box looks like `[x_min, y_min, x_max, y_max]`, e.g. [97, 12, 247, 212]. Raises: ValueError: if `target_format` is not equal to `coco` or `pascal_voc`.
388,221
def unregister(self, token_to_remove): for i, (_, token, _) in enumerate(self._filter_order): if token == token_to_remove: break else: raise ValueError("unregistered token: {!r}".format( token_to_remove)) del self._filter_order[i]
Unregister a filter function. :param token_to_remove: The token as returned by :meth:`register`. Unregister a function from the filter chain using the token returned by :meth:`register`.
388,222
def fromexportreg(cls, bundle, export_reg): exc = export_reg.get_exception() if exc: return RemoteServiceAdminEvent( RemoteServiceAdminEvent.EXPORT_ERROR, bundle, export_reg.get_export_container_id(), export_reg.get_remoteservice_id(), None, None, exc, export_reg.get_description(), ) return RemoteServiceAdminEvent( RemoteServiceAdminEvent.EXPORT_REGISTRATION, bundle, export_reg.get_export_container_id(), export_reg.get_remoteservice_id(), None, export_reg.get_export_reference(), None, export_reg.get_description(), )
Creates a RemoteServiceAdminEvent object from an ExportRegistration
388,223
def delete_intent(self, name, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None): if not in self._inner_api_calls: self._inner_api_calls[ ] = google.api_core.gapic_v1.method.wrap_method( self.transport.delete_intent, default_retry=self._method_configs[].retry, default_timeout=self._method_configs[] .timeout, client_info=self._client_info, ) request = intent_pb2.DeleteIntentRequest(name=name, ) self._inner_api_calls[]( request, retry=retry, timeout=timeout, metadata=metadata)
Deletes the specified intent. Example: >>> import dialogflow_v2 >>> >>> client = dialogflow_v2.IntentsClient() >>> >>> name = client.intent_path('[PROJECT]', '[INTENT]') >>> >>> client.delete_intent(name) Args: name (str): Required. The name of the intent to delete. Format: ``projects/<Project ID>/agent/intents/<Intent ID>``. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid.
388,224
def save_state(self, fname): log.info("Saving state to %s", fname) data = { : self.ksize, : self.alpha, : self.node.id, : self.bootstrappable_neighbors() } if not data[]: log.warning("No known neighbors, so not writing to cache.") return with open(fname, ) as file: pickle.dump(data, file)
Save the state of this node (the alpha/ksize/id/immediate neighbors) to a cache file with the given fname.
388,225
def RgbToWebSafe(r, g, b, alt=False): web safe(%g, %g, %g)(1, 0.6, 0) webSafeComponent = Color._WebSafeComponent return tuple((webSafeComponent(v, alt) for v in (r, g, b)))
Convert the color from RGB to 'web safe' RGB Parameters: :r: The Red component value [0...1] :g: The Green component value [0...1] :b: The Blue component value [0...1] :alt: If True, use the alternative color instead of the nearest one. Can be used for dithering. Returns: The color as an (r, g, b) tuple in the range: the range: r[0...1], g[0...1], b[0...1] >>> '(%g, %g, %g)' % Color.RgbToWebSafe(1, 0.55, 0.0) '(1, 0.6, 0)'
388,226
def apply_widget_css_class(self, field_name): field = self.fields[field_name] class_name = self.get_widget_css_class(field_name, field) if class_name: field.widget.attrs[] = join_css_class( field.widget.attrs.get(, None), class_name)
Applies CSS classes to widgets if available. The method uses the `get_widget_css_class` method to determine if the widget CSS class should be changed. If a CSS class is returned, it is appended to the current value of the class property of the widget instance. :param field_name: A field name of the form.
388,227
def labels(self, include_missing=False, include_transforms_for_dims=False): return [ dim.labels(include_missing, include_transforms_for_dims) for dim in self.dimensions ]
Gets labels for each cube's dimension. Args include_missing (bool): Include labels for missing values Returns labels (list of lists): Labels for each dimension
388,228
def analyze_all(self, analysis_directory = None): for analysis_set in self.analysis_sets: self.analyze(analysis_set, analysis_directory = analysis_directory)
This function runs the analysis and creates the plots and summary file.
388,229
def get_rate_limits(self): resp, body = self.method_get("/limits") rate_limits = body.get("limits", {}).get("rate") ret = [] for rate_limit in rate_limits: limits = rate_limit["limit"] uri_limits = {"uri": rate_limit["uri"], "limits": limits} ret.append(uri_limits) return ret
Returns a dict with the current rate limit information for domain and status requests.
388,230
def validate(self, skip_utf8_validation=False): if self.rsv1 or self.rsv2 or self.rsv3: raise WebSocketProtocolException("rsv is not implemented, yet") if self.opcode not in ABNF.OPCODES: raise WebSocketProtocolException("Invalid opcode %r", self.opcode) if self.opcode == ABNF.OPCODE_PING and not self.fin: raise WebSocketProtocolException("Invalid ping frame.") if self.opcode == ABNF.OPCODE_CLOSE: l = len(self.data) if not l: return if l == 1 or l >= 126: raise WebSocketProtocolException("Invalid close frame.") if l > 2 and not skip_utf8_validation and not validate_utf8(self.data[2:]): raise WebSocketProtocolException("Invalid close frame.") code = 256 * \ six.byte2int(self.data[0:1]) + six.byte2int(self.data[1:2]) if not self._is_valid_close_status(code): raise WebSocketProtocolException("Invalid close opcode.")
validate the ABNF frame. skip_utf8_validation: skip utf8 validation.
388,231
def normalized_energy_at_conditions(self, pH, V): return self.energy_at_conditions(pH, V) * self.normalization_factor
Energy at an electrochemical condition, compatible with numpy arrays for pH/V input Args: pH (float): pH at condition V (float): applied potential at condition Returns: energy normalized by number of non-O/H atoms at condition
388,232
def create_default_units_and_dimensions(): default_units_file_location = os.path.realpath(\ os.path.join(os.path.dirname(os.path.realpath(__file__)), , , )) d=None with open(default_units_file_location) as json_data: d = json.load(json_data) json_data.close() for json_dimension in d["dimension"]: new_dimension = None dimension_name = get_utf8_encoded_string(json_dimension["name"]) db_dimensions_by_name = db.DBSession.query(Dimension).filter(Dimension.name==dimension_name).all() if len(db_dimensions_by_name) == 0: log.debug("Adding Dimension `{}`".format(dimension_name)) new_dimension = Dimension() if "id" in json_dimension: new_dimension.id = json_dimension["id"] new_dimension.name = dimension_name db.DBSession.add(new_dimension) db.DBSession.flush() new_dimension = get_dimension_from_db_by_name(dimension_name) for json_unit in json_dimension["unit"]: db_units_by_name = db.DBSession.query(Unit).filter(Unit.abbreviation==get_utf8_encoded_string(json_unit[])).all() if len(db_units_by_name) == 0: log.debug("Adding Unit %s in %s",json_unit[], json_dimension["name"]) new_unit = Unit() if "id" in json_unit: new_unit.id = json_unit["id"] new_unit.dimension_id = new_dimension.id new_unit.name = get_utf8_encoded_string(json_unit[]) new_unit.abbreviation = get_utf8_encoded_string(json_unit[]) new_unit.lf = get_utf8_encoded_string(json_unit[]) new_unit.cf = get_utf8_encoded_string(json_unit[]) if "description" in json_unit: new_unit.description = get_utf8_encoded_string(json_unit["description"]) db.DBSession.add(new_unit) db.DBSession.flush() else: pass try: db.DBSession.commit() except Exception as e: pass return
Adds the units and the dimensions reading a json file. It adds only dimensions and units that are not inside the db It is possible adding new dimensions and units to the DB just modifiyin the json file
388,233
def callCount(cls, spy, number): cls.__is_spy(spy) if not (spy.callCount == number): raise cls.failException(cls.message)
Checking the inspector is called number times Args: SinonSpy, number
388,234
def run(self, clock, generalLedger): if not self._meet_execution_criteria(clock.timestep_ix): return generalLedger.create_transaction( self.description if self.description is not None else self.name, description=, tx_date=clock.get_datetime(), dt_account=self.dt_account, cr_account=self.cr_account, source=self.path, amount=self.amount)
Execute the activity at the current clock cycle. :param clock: The clock containing the current execution time and period information. :param generalLedger: The general ledger into which to create the transactions.
388,235
def _children(self): if self.declarations: yield self.declarations if isinstance(self.condition, CodeExpression): yield self.condition if self.increment: yield self.increment for codeobj in self.body._children(): yield codeobj
Yield all direct children of this object.
388,236
def protect(self, password=None, read_protect=False, protect_from=0): return super(NTAG203, self).protect( password, read_protect, protect_from)
Set lock bits to disable future memory modifications. If *password* is None, all memory pages except the 16-bit counter in page 41 are protected by setting the relevant lock bits (note that lock bits can not be reset). If valid NDEF management data is found in page 4, protect() also sets the NDEF write flag to read-only. The NTAG203 can not be password protected. If a *password* argument is provided, the protect() method always returns False.
388,237
def deprecated(msg=): @decorator.decorator def wrap(function, *args, **kwargs): if not kwargs.pop(, False): warn(msg, DeprecationWarning) return function(*args, **kwargs) return wrap
Deprecate decorated method.
388,238
def clear(self): for slot in self._slots: slot.grid_forget() slot.destroy() self._slots = []
Clear out the multi-frame :return:
388,239
def match_agent_id(self, agent_id, match): self._add_match(, str(agent_id), bool(match))
Matches the agent identified by the given ``Id``. arg: agent_id (osid.id.Id): the Id of the ``Agent`` arg: match (boolean): ``true`` if a positive match, ``false`` for a negative match raise: NullArgument - ``agent_id`` is ``null`` *compliance: mandatory -- This method must be implemented.*
388,240
def member_create(self, params, member_id): member_config = params.get(, {}) server_id = params.pop(, None) version = params.pop(, self._version) proc_params = {: self.repl_id} proc_params.update(params.get(, {})) if self.enable_ipv6: enable_ipv6_single(proc_params) server_id = self._servers.create( name=, procParams=proc_params, sslParams=self.sslParams, version=version, server_id=server_id ) member_config.update({"_id": member_id, "host": self._servers.hostname(server_id)}) return member_config
start new mongod instances as part of replica set Args: params - member params member_id - member index return member config
388,241
def sync_next_id(self): if self.next_id is not None: if len(self): n = max(self.getColumnByName(self.next_id.column_name)) + 1 else: n = type(self.next_id)(0) if n > self.next_id: self.set_next_id(n) return self.next_id
Determines the highest-numbered ID in this table, and sets the table's .next_id attribute to the next highest ID in sequence. If the .next_id attribute is already set to a value greater than the highest value found, then it is left unmodified. The return value is the ID identified by this method. If the table's .next_id attribute is None, then this function is a no-op. Note that tables of the same name typically share a common .next_id attribute (it is a class attribute, not an attribute of each instance) so that IDs can be generated that are unique across all tables in the document. Running sync_next_id() on all the tables in a document that are of the same type will have the effect of setting the ID to the next ID higher than any ID in any of those tables. Example: >>> import lsctables >>> tbl = lsctables.New(lsctables.ProcessTable) >>> print tbl.sync_next_id() process:process_id:0
388,242
def gp_ccX(): inDir, outDir = getWorkDirs() data, alldata = OrderedDict(), None for infile in os.listdir(inDir): key = os.path.splitext(infile)[0].replace(, ) data_import = np.loadtxt(open(os.path.join(inDir, infile), )) data_import[:,3] = 0.434*(data_import[:,3]/data_import[:,1]) data_import[:,(0,1)] = np.log10(data_import[:,(0,1)]) data_import[:,2] = 0 data[key] = data_import alldata = data[key] if alldata is None else np.vstack((alldata, data[key])) lindata = alldata[alldata[:,0]>2.5] m = (lindata[-1,1]-lindata[0,1])/(lindata[-1,0]-lindata[0,0]) t = lindata[0,1] - m * lindata[0,0] popt1, pcov = curve_fit( linear, lindata[:,0], lindata[:,1], p0=[m, t], sigma=lindata[:,3], absolute_sigma=True ) popt2, pcov = curve_fit( lambda x, c, d: fitfunc(x, popt1[0], popt1[1], c, d), alldata[:,0], alldata[:,1], sigma=alldata[:,3], absolute_sigma=True, ) popt = np.hstack((popt1, popt2)) model = lambda x: fitfunc(x, *popt) yfit = np.array([model(x) for x in alldata[:,0]]) stddev = 1.5*np.sqrt( np.average((alldata[:,1]-yfit)**2, weights=1./alldata[:,3]) ) print % stddev errorband = np.array([[x, model(x), 0, 0, stddev] for x in np.linspace(1,4)]) fitdata = np.array([[x, model(x), 0, 0, 0] for x in np.linspace(1,4)]) par_names = [, , , ] energies = [19.6, 27, 39, 62.4, 200] labels = dict( ( % (par_name, popt[i]), [3.3, 3-i*0.2, True]) for i,par_name in enumerate(par_names) ) ccX_vals = [10**model(np.log10(energy)) for energy in energies] ccX = [.join([ % energy, .format(ufloat(ccX_vals[i], stddev/0.434*ccX_vals[i])), ]) for i,energy in enumerate(energies)] print ccX make_plot( data = [errorband] + data.values() + [fitdata], properties = [ % default_colors[8] ] + [ % (default_colors[i]) for i in xrange(len(data)) ] + [], titles = [] + data.keys() + [], xlabel = , ylabel = , name = os.path.join(outDir, ), size = , xr = [1, 4], yr = [0.5,4.5], key = [, , ], tmargin = 0.98, rmargin = 0.99, bmargin = 0.13, lines = dict( ( % (np.log10(energy)), ) for energy in energies ), labels = labels, )
fit experimental data
388,243
def get_file_info_web(self, fname, delim=): txt = f = mod_file.File(fname[0]) txt += + f.name + + delim txt += + fname[1] + + delim txt += + f.GetDateAsString(f.date_modified)[2:10] + + delim txt += + str(f.size) + + delim return txt +
gathers info on a python program in list and formats as string
388,244
def qurl(url, add=None, exclude=None, remove=None): urlp = list(urlparse(url)) qp = parse_qsl(urlp[4]) add = add if add else {} for name, value in add.items(): if isinstance(value, (list, tuple)): value = [smart_str(v) for v in value] qp = [p for p in qp if p[0] != name or p[1] not in value] qp.extend([(name, smart_str(val)) for val in value]) else: qp = [p for p in qp if p[0] != name] qp.append((name, smart_str(value))) exclude = exclude if exclude else {} for name, value in exclude.items(): if not isinstance(value, (list, tuple)): value = [value] value = [smart_str(v) for v in value] qp = [p for p in qp if p[0] != name or p[1] not in value] remove = remove if remove else [] for name in remove: qp = [p for p in qp if p[0] != name] urlp[4] = urlencode(qp, True) return urlunparse(urlp)
Returns the url with changed parameters
388,245
def sigmask(self, sigsetsize=None): if self._sigmask is None: if sigsetsize is not None: sc = self.state.solver.eval(sigsetsize) self.state.add_constraints(sc == sigsetsize) self._sigmask = self.state.solver.BVS(, sc*self.state.arch.byte_width, key=(,), eternal=True) else: self._sigmask = self.state.solver.BVS(, self.sigmask_bits, key=(,), eternal=True) return self._sigmask
Gets the current sigmask. If it's blank, a new one is created (of sigsetsize). :param sigsetsize: the size (in *bytes* of the sigmask set) :return: the sigmask
388,246
def monitor_module(module, summary_writer, track_data=True, track_grad=True, track_update=True, track_update_ratio=False, bins=51): module.track_data = track_data module.track_grad = track_grad module.track_update = track_update module.track_update_ratio = track_update_ratio if not hasattr(module, ): module.global_step = 0 if not hasattr(module, ): module.is_monitoring = True if not hasattr(module, ): set_monitoring(module) if not hasattr(module, ): module.last_state_dict = dict() if not hasattr(module, ): module.var_hooks = dict() if not hasattr(module, ): module.param_hooks = dict() for name, mod in module.named_modules(): if not hasattr(mod, ): set_monitor(mod) if not hasattr(mod, ): mod.monitored_vars = dict() module.monitoring(True) module.register_forward_pre_hook(remove_grad_hooks) monitor_forward_and_backward = get_monitor_forward_and_backward(summary_writer, bins) module.register_forward_hook(monitor_forward_and_backward)
Allows for remote monitoring of a module's params and buffers. The following may be monitored: 1. Forward Values - Histograms of the values for parameter and buffer tensors 2. Gradient Values - Histograms of the gradients for parameter and buffer tensors 3. Update Values - Histograms of the change in parameter and buffer tensor values from the last recorded forward pass 4. Update Ratios - Histograms of the ratio of change in value for parameter and value tensors from the last iteration to the actual values. I.e., what is the relative size of the update. Generally we like to see values of about .001. See [cite Andrej Karpathy's babysitting dnn's blog post]
388,247
def shell(filepath, wsgiapp, interpreter, models): model_base_classes = get_model_base_classes() imported_objects = {} if models and model_base_classes: insert_import_path_to_sys_modules(filepath) for module in find_modules_from_path(filepath): for name in dir(module): if name.startswith(): continue obj = getattr(module, name) if isinstance(obj, model_base_classes): key = name.split()[-1] if in name else name if key in imported_objects: continue imported_objects[key] = obj module = SourceFileLoader(, filepath).load_module() imported_objects[] = getattr(module, wsgiapp) for key in imported_objects.keys(): click.secho("import {}".format(key), fg=) run_python(interpreter, imported_objects)
Runs a python shell. Usage: $ wsgicli shell app.py app -i ipython
388,248
def _authenticated_call_geocoder(self, url, timeout=DEFAULT_SENTINEL): if self.token is None or int(time()) > self.token_expiry: self._refresh_authentication_token() request = Request( "&".join((url, urlencode({"token": self.token}))), headers={"Referer": self.referer} ) return self._base_call_geocoder(request, timeout=timeout)
Wrap self._call_geocoder, handling tokens.
388,249
def main(argv: Sequence[str] = SYS_ARGV) -> int: args = default_parser().parse_args(argv) try: seq = POPULATIONS[args.population] except KeyError: try: with open(args.population, , encoding=args.encoding) as file_: seq = list(file_) except (OSError, UnicodeError) as ex: print(ex, file=sys.stderr) return 1 main_key = key(seq=seq, nteeth=args.nteeth, delimiter=args.delimiter) print(main_key) if args.stats: print(, len(main_key), ) print(, args.nteeth, , len(seq)) print( .format( sign= if args.delimiter else , nbits=round(math.log(len(seq), 2) * args.nteeth, 2), ), ) return 0
Execute CLI commands.
388,250
def map_E_to_height(self, alat, alon, height, newheight, E): return self._map_EV_to_height(alat, alon, height, newheight, E, )
Performs mapping of electric field along the magnetic field. It is assumed that the electric field is perpendicular to B. Parameters ========== alat : (N,) array_like or float Modified apex latitude alon : (N,) array_like or float Modified apex longitude height : (N,) array_like or float Source altitude in km newheight : (N,) array_like or float Destination altitude in km E : (3,) or (3, N) array_like Electric field (at `alat`, `alon`, `height`) in geodetic east, north, and up components Returns ======= E : (3, N) or (3,) ndarray The electric field at `newheight` (geodetic east, north, and up components)
388,251
def get_module_functions(modules): module_fns = set() for module in modules: for key in dir(module): attr = getattr(module, key) if isinstance( attr, (types.BuiltinFunctionType, types.FunctionType, numpy.ufunc)): module_fns.add(attr) return module_fns
Finds functions that do not have implemented derivatives. Args: modules: A list of Python modules. Functions contained in these modules will be checked for membership in 'implemented', and if not found, will be added to an 'unimplemented' set implemented: A Python object containing implemented derivatives. A function should be checkable for membership using the `fn in implemented` syntax. Returns: module_fns: A set of functions, builtins or ufuncs in `modules`.
388,252
def iter_schemas(self, schema: Schema) -> Iterable[Tuple[str, Any]]: if not schema: return yield self.to_tuple(schema) for name, field in self.iter_fields(schema): if isinstance(field, Nested): yield self.to_tuple(field.schema) yield from self.iter_schemas(field.schema) if isinstance(field, List) and isinstance(field.container, Nested): yield self.to_tuple(field.container.schema) yield from self.iter_schemas(field.container.schema)
Build zero or more JSON schemas for a marshmallow schema. Generates: name, schema pairs.
388,253
def get_field_label(self, field_name, field=None): label = None if field is not None: label = getattr(field, , None) if label is None: label = getattr(field, , None) if label is None: label = field_name return label.capitalize()
Return a label to display for a field
388,254
def apply(self, q, bindings, cuts): info = [] for (ref, operator, value) in self.parse(cuts): if map_is_class and isinstance(value, map): value = list(value) self._check_type(ref, value) info.append({: ref, : operator, : value}) table, column = self.cube.model[ref].bind(self.cube) bindings.append(Binding(table, ref)) q = q.where(column.in_(value)) return info, q, bindings
Apply a set of filters, which can be given as a set of tuples in the form (ref, operator, value), or as a string in query form. If it is ``None``, no filter will be applied.
388,255
def make_url_absolute(self, url, resolve_base=False): if self.config[]: if resolve_base: ubody = self.doc.unicode_body() base_url = find_base_url(ubody) if base_url: return urljoin(base_url, url) return urljoin(self.config[], url) else: return url
Make url absolute using previous request url as base url.
388,256
def _init_usrgos(self, goids): usrgos = set() goids_missing = set() _go2obj = self.gosubdag.go2obj for goid in goids: if goid in _go2obj: usrgos.add(goid) else: goids_missing.add(goid) if goids_missing: print("MISSING GO IDs: {GOs}".format(GOs=goids_missing)) print("{N} of {M} GO IDs ARE MISSING".format(N=len(goids_missing), M=len(goids))) return usrgos
Return user GO IDs which have GO Terms.
388,257
def text_height(text): (d1, d2, ymin, ymax) = get_dimension(text) return (ymax - ymin, ymax)
Return the total height of the <text> and the length from the base point to the top of the text box.
388,258
def read_links_file(self,file_path): IPOwww.cs.columbia.edu articles = [] with open(file_path) as f: for line in f: line = line.strip() if len(line) != 0: link,category = line.split() articles.append((category.rstrip(),link.strip())) return articles
Read links and associated categories for specified articles in text file seperated by a space Args: file_path (str): The path to text file with news article links and category Returns: articles: Array of tuples that contains article link & cateogory ex. [('IPO','www.cs.columbia.edu')]
388,259
def parse_keypair_lines(content, delim=, kv_sep=): r = [] if content: for row in [line for line in content if line]: item_dict = {} for item in row.split(delim): key, value = [i.strip("'\"").strip() for i in item.strip().split(kv_sep)] item_dict[key] = value r.append(item_dict) return r
Parses a set of entities, where each entity is a set of key-value pairs contained all on one line. Each entity is parsed into a dictionary and added to the list returned from this function.
388,260
def render_pdf_file_to_image_files__ghostscript_png(pdf_file_name, root_output_file_path, res_x=150, res_y=150): if not gs_executable: init_and_test_gs_executable(exit_on_fail=True) command = [gs_executable, "-dBATCH", "-dNOPAUSE", "-sDEVICE=pnggray", "-r"+res_x+"x"+res_y, "-sOutputFile="+root_output_file_path+"-%06d.png", pdf_file_name] comm_output = get_external_subprocess_output(command, env=gs_environment) return comm_output
Use Ghostscript to render a PDF file to .png images. The root_output_file_path is prepended to all the output files, which have numbers and extensions added. Return the command output.
388,261
def exchange_additional_URL(self, handle, old, new): LOGGER.debug() handlerecord_json = self.retrieve_handle_record_json(handle) if handlerecord_json is None: msg = raise HandleNotFoundException( handle=handle, msg=msg ) list_of_entries = handlerecord_json[] if not self.is_URL_contained_in_10320LOC(handle, old, handlerecord_json): LOGGER.debug() else: self.__exchange_URL_in_13020loc(old, new, list_of_entries, handle) op = resp, put_payload = self.__send_handle_put_request( handle, list_of_entries, overwrite=True, op=op ) if hsresponses.handle_success(resp): pass else: msg = + str(old) + + str(new) raise GenericHandleError( operation=op, handle=handle, reponse=resp, msg=msg, payload=put_payload )
Exchange an URL in the 10320/LOC entry against another, keeping the same id and other attributes. :param handle: The handle to modify. :param old: The URL to replace. :param new: The URL to set as new URL.
388,262
def render(self, template=None, additional=None): template_path = template or self.get_template_path() template_vars = {: self} if additional: template_vars.update(additional) rendered = render_to_string(template_path, template_vars) return mark_safe(rendered)
Render single model to its html representation. You may set template path in render function argument, or model's variable named 'template_path', or get default name: $app_label$/models/$model_name$.html Settings: * MODEL_RENDER_DEFAULT_EXTENSION set default template extension. Usable if you use jinja or others. :param template: custom template_path :return: rendered model html string
388,263
def set_item(filename, item): with atomic_write(os.fsencode(str(filename))) as temp_file: with open(os.fsencode(str(filename))) as products_file: products_data = json.load(products_file) uuid_list = [i for i in filter( lambda z: z["uuid"] == str(item["uuid"]), products_data)] if len(uuid_list) == 0: products_data.append(item) json.dump(products_data, temp_file) return True return None
Save entry to JSON file
388,264
def _get_measure_outcome(self, qubit): axis = list(range(self._number_of_qubits)) axis.remove(self._number_of_qubits - 1 - qubit) probabilities = np.sum(np.abs(self._statevector) ** 2, axis=tuple(axis)) random_number = self._local_random.rand() if random_number < probabilities[0]: return , probabilities[0] return , probabilities[1]
Simulate the outcome of measurement of a qubit. Args: qubit (int): the qubit to measure Return: tuple: pair (outcome, probability) where outcome is '0' or '1' and probability is the probability of the returned outcome.
388,265
def server_list_detailed(self): nt_ks = self.compute_conn ret = {} for item in nt_ks.servers.list(): try: ret[item.name] = { : {}, : {}, : item.accessIPv4, : item.accessIPv6, : item.addresses, : item.created, : {: item.flavor[], : item.flavor[]}, : item.hostId, : item.id, : {: item.image[] if item.image else , : item.image[] if item.image else }, : item.key_name, : item.links, : item.metadata, : item.name, : item.status, : item.tenant_id, : item.updated, : item.user_id, } except TypeError: continue ret[item.name][] = getattr(item, , ) if hasattr(item.__dict__, ): ret[item.name][] = { : item.__dict__[] } if hasattr(item.__dict__, ): ret[item.name][][] = \ item.__dict__[] if hasattr(item.__dict__, ): ret[item.name][][] = \ item.__dict__[] if hasattr(item.__dict__, ): ret[item.name][][] = \ item.__dict__[] if hasattr(item.__dict__, ): ret[item.name][][] = \ item.__dict__[] if hasattr(item.__dict__, ): ret[item.name][][] = \ item.__dict__[] if hasattr(item.__dict__, ): ret[item.name][][] = \ item.__dict__[] if hasattr(item.__dict__, ): ret[item.name][] = \ item.__dict__[] return ret
Detailed list of servers
388,266
def match(self, path): this = self.segments that = path.split() current_var = None bindings = {} segment_count = self.segment_count j = 0 for i in range(0, len(this)): if j >= len(that): break if this[i].kind == _TERMINAL: if this[i].literal == : bindings[current_var] = that[j] j += 1 elif this[i].literal == : until = j + len(that) - segment_count + 1 segment_count += len(that) - segment_count bindings[current_var] = .join(that[j:until]) j = until elif this[i].literal != that[j]: raise ValidationException( %s\%s\ % ( this[i].literal, that[j])) else: j += 1 elif this[i].kind == _BINDING: current_var = this[i].literal if j != len(that) or j != segment_count: raise ValidationException( .format(path)) return bindings
Matches a fully qualified path template string. Args: path (str): A fully qualified path template string. Returns: dict: Var names to matched binding values. Raises: ValidationException: If path can't be matched to the template.
388,267
def bft(self): queue = deque([self]) while queue: node = queue.pop() yield node if hasattr(node, "childs"): queue.extendleft(node.childs)
Generator that returns each element of the tree in Breadth-first order
388,268
def build_subtree_strut(self, result, *args, **kwargs): return self.service.build_subtree_strut(result=result, *args, **kwargs)
Returns a dictionary in form of {node:Resource, children:{node_id: Resource}} :param result: :return:
388,269
def get_group_summary(self, group_id, **kwargs): kwargs[] = True if kwargs.get(): return self.get_group_summary_with_http_info(group_id, **kwargs) else: (data) = self.get_group_summary_with_http_info(group_id, **kwargs) return data
Get group information. # noqa: E501 An endpoint for getting general information about the group. **Example usage:** `curl https://api.us-east-1.mbedcloud.com/v3/policy-groups/{group-id} -H 'Authorization: Bearer API_KEY'` # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.get_group_summary(group_id, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param str group_id: The ID of the group to be retrieved. (required) :return: GroupSummary If the method is called asynchronously, returns the request thread.
388,270
def load_remotes(extra_path=None, load_user=True): from os.path import getmtime try: remotes_file = find_config_file(REMOTES_FILE, extra_path=extra_path, load_user=load_user) except ConfigurationError: remotes_file = None if remotes_file is not None and os.path.exists(remotes_file): config = AttrDict() config.update_yaml(remotes_file) if not in config: config.remotes = AttrDict() config.remotes.loaded = [remotes_file, getmtime(remotes_file)] return config else: return None
Load the YAML remotes file, which sort of combines the Accounts file with part of the remotes sections from the main config :return: An `AttrDict`
388,271
def addKwdArgsToSig(sigStr, kwArgsDict): retval = sigStr if len(kwArgsDict) > 0: retval = retval.strip() for k in kwArgsDict: if retval[-1] != : retval += ", " retval += str(k)+"="+str(kwArgsDict[k]) retval += retval = retval return retval
Alter the passed function signature string to add the given kewords
388,272
def _generate_union(self, union_type): union_name = fmt_type_name(union_type) self._emit_jsdoc_header(union_type.doc) self.emit( % union_name) variant_types = [] for variant in union_type.all_fields: variant_types.append("" % variant.name) variant_data_type, _, _ = unwrap(variant.data_type)
Emits a JSDoc @typedef for a union type.
388,273
def snapshot(self, filename="tmp.png"): if not filename: filename = "tmp.png" if self.handle: try: screenshot(filename, self.handle) except win32gui.error: self.handle = None screenshot(filename) else: screenshot(filename) img = aircv.imread(filename) os.remove(filename) return img
Take a screenshot and save it to `tmp.png` filename by default Args: filename: name of file where to store the screenshot Returns: display the screenshot
388,274
def render_field_errors(field): if field.errors: html = .format( errors=.join(field.errors) ) return HTMLString(html) return None
Render field errors as html.
388,275
def detect(self, app): script = os.path.join(self.folder, , ) cmd = % (script, app.folder) result = run(cmd) return result.status_code == 0
Given an app, run detect script on it to determine whether it can be built with this pack. Return True/False.
388,276
def get_model_spec_ting(atomic_number): DATA_DIR = "/Users/annaho/Data/LAMOST/Mass_And_Age" temp = np.load("%s/X_u_template_KGh_res=1800.npz" %DATA_DIR) X_u_template = temp["X_u_template"] wl = temp["wavelength"] grad_spec = X_u_template[:,atomic_number] return wl, grad_spec
X_u_template[0:2] are teff, logg, vturb in km/s X_u_template[:,3] -> onward, put atomic number atomic_number is 6 for C, 7 for N
388,277
def event(self, *topics, **kwargs): workers = kwargs.pop("workers", 1) multi = kwargs.pop("multi", False) queue_limit = kwargs.pop("queue_limit", 10000) def wrapper(func): for topic in topics: queues = [Queue() for _ in range(workers)] hash_ring = ketama.Continuum() for q in queues: hash_ring[str(hash(q))] = q self.worker_queues[topic] = hash_ring self.workers[topic] = WorkerPool( queues, topic, func, multi=multi, queue_limit=queue_limit, logger_name="%s.%s" % (self.name, topic)) self.socket.setsockopt(zmq.SUBSCRIBE, asbytes(topic)) return func return wrapper
Topic callback registry. callback func should receive two args: topic and pk, and then process the replication job. Note: The callback func must return True/False. When passed a list of pks, the func should return a list of True/False with the same length of pks. :param topics: a list of topics :param workers: how many workers to process this topic :param multi: whether pass multiple pks :param queue_limit: when queue size is larger than the limit, the worker should run deduplicate procedure
388,278
def launch_image( player: Player, nth_player: int, num_players: int, headless: bool, game_name: str, map_name: str, game_type: GameType, game_speed: int, timeout: Optional[int], hide_names: bool, random_names: bool, drop_players: bool, allow_input: bool, auto_launch: bool, game_dir: str, bot_dir: str, map_dir: str, bwapi_data_bwta_dir: str, bwapi_data_bwta2_dir: str, vnc_base_port: int, vnc_host: int, capture_movement: bool, docker_image: str, docker_opts: List[str] ) -> None: container_name = f"{game_name}_{nth_player}_{player.name.replace(, )}" log_dir = f"{game_dir}/{game_name}/logs_{nth_player}" crashes_dir = f"{game_dir}/{game_name}/crashes_{nth_player}" os.makedirs(log_dir, mode=0o777, exist_ok=True) os.makedirs(crashes_dir, mode=0o777, exist_ok=True) volumes = { xoscmounts(log_dir): {"bind": LOG_DIR, "mode": "rw"}, xoscmounts(map_dir): {"bind": MAP_DIR, "mode": "rw"}, xoscmounts(crashes_dir): {"bind": ERRORS_DIR, "mode": "rw"}, xoscmounts(bwapi_data_bwta_dir): {"bind": BWAPI_DATA_BWTA_DIR, "mode": "rw"}, xoscmounts(bwapi_data_bwta2_dir): {"bind": BWAPI_DATA_BWTA2_DIR, "mode": "rw"}, } ports = {} if not headless: ports.update({"5900/tcp": vnc_base_port + nth_player}) env = dict( PLAYER_NAME=player.name if not random_names else random_string(8), PLAYER_RACE=player.race.value, NTH_PLAYER=nth_player, NUM_PLAYERS=num_players, GAME_NAME=game_name, MAP_NAME=f"/app/sc/maps/{map_name}", GAME_TYPE=game_type.value, SPEED_OVERRIDE=game_speed, HIDE_NAMES="1" if hide_names else "0", DROP_PLAYERS="1" if drop_players else "0", TM_LOG_RESULTS=f"../logs/scores.json", TM_LOG_FRAMETIMES=f"../logs/frames.csv", TM_SPEED_OVERRIDE=game_speed, TM_ALLOW_USER_INPUT="1" if isinstance(player, HumanPlayer) or allow_input else "0", EXIT_CODE_REALTIME_OUTED=EXIT_CODE_REALTIME_OUTED, CAPTURE_MOUSE_MOVEMENT="1" if capture_movement else "0", HEADFUL_AUTO_LAUNCH="1" if auto_launch else "0", JAVA_DEBUG="0" ) if timeout is not None: env["PLAY_TIMEOUT"] = timeout if isinstance(player, BotPlayer): bot_data_write_dir = f"{game_dir}/{game_name}/write_{nth_player}/" os.makedirs(bot_data_write_dir, mode=0o777, exist_ok=True) volumes.update({ xoscmounts(bot_data_write_dir): {"bind": BOT_DATA_WRITE_DIR, "mode": "rw"}, xoscmounts(player.bot_dir): {"bind": BOT_DIR, "mode": "ro"}, }) env["BOT_FILE"] = player.bot_basefilename env["BOT_BWAPI"] = player.bwapi_version env["JAVA_DEBUG"] = "0" env["JAVA_DEBUG_PORT"] = "" env["JAVA_OPTS"] = "" command = ["/app/play_bot.sh"] if player.meta.javaDebugPort is not None: ports.update({"player.meta.javaDebugPort/tcp": player.meta.javaDebugPort}) env["JAVA_DEBUG"] = "1" env["JAVA_DEBUG_PORT"] = player.meta.javaDebugPort if player.meta.javaOpts is not None: env["JAVA_OPTS"] = player.meta.javaOpts if player.meta.port is not None: if isinstance(player.meta.port, int) or player.meta.port.isdigit(): ports.update({str(player.meta.port) + : int(player.meta.port)}) else: forward, local = [int(x) for x in player.meta.port.split()] ports.update({str(local) + : forward}) else: command = ["/app/play_human.sh"] is_server = nth_player == 0 entrypoint_opts = ["--headful"] if headless: entrypoint_opts = [ "--game", game_name, "--name", player.name, "--race", player.race.value, "--lan" ] if is_server: entrypoint_opts += ["--host", "--map", f"/app/sc/maps/{map_name}"] else: entrypoint_opts += ["--join"] command += entrypoint_opts logger.debug( "\n" f"docker_image={docker_image}\n" f"command={pformat(command, indent=4)}\n" f"name={container_name}\n" f"detach={True}\n" f"environment={pformat(env, indent=4)}\n" f"privileged={True}\n" f"volumes={pformat(volumes, indent=4)}\n" f"network={DOCKER_STARCRAFT_NETWORK}\n" f"ports={ports}\n" ) container = docker_client.containers.run( docker_image, command=command, name=container_name, detach=True, environment=env, privileged=True, volumes=volumes, network=DOCKER_STARCRAFT_NETWORK, ports=ports ) if container: container_id = running_containers(container_name) logger.info(f"launched {player}") logger.debug(f"container name = , container id = ") else: raise DockerException(f"could not launch {player} in container {container_name}")
:raises docker,errors.APIError :raises DockerException
388,279
def data_parallelism(daisy_chain_variables=True, all_workers=False, ps_replicas=0, ps_job="/job:ps", ps_gpu=0, schedule="continuous_train_and_eval", sync=False, worker_gpu=1, worker_replicas=1, worker_id=0, gpu_order="", worker_job="/job:localhost", no_data_parallelism=False): tf.logging.info("schedule=%s" % schedule) tf.logging.info("worker_gpu=%s" % worker_gpu) tf.logging.info("sync=%s" % sync) def _ps_replicas(all_workers=False): if all_workers: return list(range(ps_replicas)) num_replicas = ps_replicas // worker_replicas return [d + worker_id * num_replicas for d in range(num_replicas)] def _gpu_order(num_gpus): if gpu_order: ret = [int(s) for s in gpu_order.split(" ")] if len(ret) == num_gpus: return ret return list(range(num_gpus)) def _ps_gpus(all_workers=False): ps_gpus = [] for d in _ps_replicas(all_workers=all_workers): ps_gpus.extend([(d, gpu) for gpu in _gpu_order(ps_gpu)]) return ps_gpus def ps_devices(all_workers=False): if ps_replicas > 0: if ps_gpu > 0: return [ ps_job + "/task:%d/GPU:%d" % (d, gpu) for (d, gpu) in _ps_gpus(all_workers=all_workers) ] else: return [ ps_job + "/task:%d" % d for d in _ps_replicas(all_workers=all_workers) ] else: if worker_gpu > 0: return ["gpu:%d" % d for d in _gpu_order(worker_gpu)] else: return [""] def _replica_device_setter(worker_device): if ps_replicas == 0: return worker_device return tf.train.replica_device_setter( worker_device=worker_device, ps_tasks=ps_replicas, ps_device=ps_job + "/GPU:0" if ps_gpu > 0 else ps_job) is_single_machine = ps_replicas == 0 and worker_replicas == 1 if no_data_parallelism: datashard_devices = [""] caching_devices = None elif is_single_machine: tf.logging.warn( "Schedule=%s. Assuming that training is running on a single machine.", schedule) datashard_devices = ["gpu:%d" % d for d in _gpu_order(worker_gpu)] if worker_gpu < 1: datashard_devices += ["cpu:0"] caching_devices = None elif sync and ps_replicas > 0: datashard_devices = [ _replica_device_setter(d) for d in ps_devices(all_workers=all_workers) ] if ps_gpu > 0 and ps_replicas > 1: caching_devices = [ ps_job + "/task:%d/cpu:0" % d for (d, _) in _ps_gpus(all_workers=all_workers) ] else: caching_devices = None else: if worker_gpu > 1: datashard_devices = [ _replica_device_setter(worker_job + "/GPU:%d" % d) for d in _gpu_order(worker_gpu) ] caching_devices = None else: datashard_devices = [_replica_device_setter(worker_job)] caching_devices = None tf.logging.info("datashard_devices: %s", datashard_devices) tf.logging.info("caching_devices: %s", caching_devices) tf.logging.info("ps_devices: %s", ps_devices(all_workers=all_workers)) return eu.Parallelism( datashard_devices, caching_devices=caching_devices, daisy_chain_variables=daisy_chain_variables, ps_devices=ps_devices(all_workers=all_workers))
See data_parallelism_from_flags.
388,280
def op(name, data, bucket_count=None, display_name=None, description=None, collections=None): import tensorflow.compat.v1 as tf if display_name is None: display_name = name summary_metadata = metadata.create_summary_metadata( display_name=display_name, description=description) with tf.name_scope(name): tensor = _buckets(data, bucket_count=bucket_count) return tf.summary.tensor_summary(name=, tensor=tensor, collections=collections, summary_metadata=summary_metadata)
Create a legacy histogram summary op. Arguments: name: A unique name for the generated summary node. data: A `Tensor` of any shape. Must be castable to `float64`. bucket_count: Optional positive `int`. The output will have this many buckets, except in two edge cases. If there is no data, then there are no buckets. If there is data but all points have the same value, then there is one bucket whose left and right endpoints are the same. display_name: Optional name for this summary in TensorBoard, as a constant `str`. Defaults to `name`. description: Optional long-form description for this summary, as a constant `str`. Markdown is supported. Defaults to empty. collections: Optional list of graph collections keys. The new summary op is added to these collections. Defaults to `[Graph Keys.SUMMARIES]`. Returns: A TensorFlow summary op.
388,281
def _wait_for_result(self): basetime = 0.018 if self._low_res else 0.128 sleep(basetime * (self._mtreg / 69.0) + self._delay)
Wait for the sensor to be ready for measurement.
388,282
def del_team(self, team, sync=True): LOGGER.debug("OSInstance.del_team") if not sync: self.team_2_rm.append(team) else: if team.id is None: team.sync() if self.id is not None and team.id is not None: params = { : self.id, : team.id } args = {: , : , : params} response = OSInstanceService.requester.call(args) if response.rc != 0: LOGGER.warning( + self.name + + str(response.response_content) + + str(response.error_message) + " (" + str(response.rc) + ")" ) else: self.team_ids.remove(team.id) team.osi_ids.remove(self.id) else: LOGGER.warning( + self.name + + team.name + )
delete team from this OS instance :param team: the team to be deleted from this OS instance :param sync: If sync=True(default) synchronize with Ariane server. If sync=False, add the team object on list to be removed on next save(). :return:
388,283
def _build_id_tuple(params, spec): if spec is None: return (None, None) required_class = spec.class_ required_tag = spec.tag _tag_type_to_explicit_implicit(params) if in params: if isinstance(params[], tuple): required_class, required_tag = params[] else: required_class = 2 required_tag = params[] elif in params: if isinstance(params[], tuple): required_class, required_tag = params[] else: required_class = 2 required_tag = params[] if required_class is not None and not isinstance(required_class, int_types): required_class = CLASS_NAME_TO_NUM_MAP[required_class] required_class = params.get(, required_class) required_tag = params.get(, required_tag) return (required_class, required_tag)
Builds a 2-element tuple used to identify fields by grabbing the class_ and tag from an Asn1Value class and the params dict being passed to it :param params: A dict of params to pass to spec :param spec: An Asn1Value class :return: A 2-element integer tuple in the form (class_, tag)
388,284
def format_output(old_maps, new_maps): if isinstance(old_maps, record.FieldArray): keys = new_maps.keys() values = [new_maps[key] for key in keys] for key, vals in zip(keys, values): try: old_maps = old_maps.add_fields([vals], [key]) except ValueError: old_maps[key] = vals return old_maps elif isinstance(old_maps, dict): out = old_maps.copy() out.update(new_maps) return out else: raise TypeError("Input type must be FieldArray or dict.")
This function takes the returned dict from `transform` and converts it to the same datatype as the input. Parameters ---------- old_maps : {FieldArray, dict} The mapping object to add new maps to. new_maps : dict A dict with key as parameter name and value is numpy.array. Returns ------- {FieldArray, dict} The old_maps object with new keys from new_maps.
388,285
def subsite_upcoming_events(context): request = context[] home = request.site.root_page return {: request, : getAllUpcomingEvents(request, home=home)}
Displays a list of all upcoming events in this site.
388,286
def get_student_item_dict(self, anonymous_user_id=None): item_id = self._serialize_opaque_key(self.scope_ids.usage_id) if hasattr(self, "xmodule_runtime"): course_id = self.get_course_id() if anonymous_user_id: student_id = anonymous_user_id else: student_id = self.xmodule_runtime.anonymous_student_id else: course_id = "edX/Enchantment_101/April_1" if self.scope_ids.user_id is None: student_id = else: student_id = unicode(self.scope_ids.user_id) student_item_dict = dict( student_id=student_id, item_id=item_id, course_id=course_id, item_type= ) return student_item_dict
Create a student_item_dict from our surrounding context. See also: submissions.api for details. Args: anonymous_user_id(str): A unique anonymous_user_id for (user, course) pair. Returns: (dict): The student item associated with this XBlock instance. This includes the student id, item id, and course id.
388,287
def rotate(self, img): try: exif = image2exif.get_exif(img) except AttributeError: landscape = img.height < img.width if orientation == 6 and landscape: print("ROTATING") return img.rotate(-90) return img
Rotate image if exif says it needs it
388,288
def all_hosts(self): return set(imap(common.clean_node, itertools.chain( self._doc.get(, []), self._doc.get(, []), self._doc.get(, []))))
List of hosts, passives, and arbiters known to this server.
388,289
def _generate_union_class(self, ns, data_type): self.emit(self._class_declaration_for_type(ns, data_type)) with self.indent(): self.emit() self.emit() self._generate_union_class_vars(data_type) self._generate_union_class_variant_creators(ns, data_type) self._generate_union_class_is_set(data_type) self._generate_union_class_get_helpers(ns, data_type) self._generate_union_class_custom_annotations(ns, data_type) self._generate_union_class_repr(data_type) self.emit(.format( class_name_for_data_type(data_type) )) self.emit()
Defines a Python class that represents a union in Stone.
388,290
def aitoffImageToSphere(x, y): x = x - 360.*(x>180) x = np.asarray(np.radians(x)) y = np.asarray(np.radians(y)) z = np.sqrt(1. - (x / 4.)**2 - (y / 2.)**2) lon = 2. * np.arctan2((2. * z**2) - 1, (z / 2.) * x) lat = np.arcsin( y * z) return ((180. - np.degrees(lon)) % 360.), np.degrees(lat)
Inverse Hammer-Aitoff projection (deg).
388,291
def sortedbyAge(self): ageAll = numpy.zeros(self.length) for i in range(self.length): ageAll[i] = self.Ind[i].age ageSorted = ageAll.argsort() return ageSorted[::-1]
Sorting the pop. base of the age
388,292
def _print_checker_doc(checker_name, info, stream=None): if not stream: stream = sys.stdout doc = info.get("doc") module = info.get("module") msgs = info.get("msgs") options = info.get("options") reports = info.get("reports") checker_title = "%s checker" % (checker_name.replace("_", " ").title()) if module: print(".. _%s:\n" % module, file=stream) print(checker_title, file=stream) print("~" * len(checker_title), file=stream) print("", file=stream) if module: print("This checker is provided by ``%s``." % module, file=stream) print("Verbatim name of the checker is ``%s``." % checker_name, file=stream) print("", file=stream) if doc: title = "{} Documentation".format(checker_title) print(title, file=stream) print("^" * len(title), file=stream) print(cleandoc(doc), file=stream) print("", file=stream) if options: title = "{} Options".format(checker_title) print(title, file=stream) print("^" * len(title), file=stream) _rest_format_section(stream, None, options) print("", file=stream) if msgs: title = "{} Messages".format(checker_title) print(title, file=stream) print("^" * len(title), file=stream) for msgid, msg in sorted( msgs.items(), key=lambda kv: (_MSG_ORDER.index(kv[0][0]), kv[1]) ): msg = build_message_definition(checker_name, msgid, msg) print(msg.format_help(checkerref=False), file=stream) print("", file=stream) if reports: title = "{} Reports".format(checker_title) print(title, file=stream) print("^" * len(title), file=stream) for report in reports: print(":%s: %s" % report[:2], file=stream) print("", file=stream) print("", file=stream)
Helper method for print_full_documentation. Also used by doc/exts/pylint_extensions.py.
388,293
def sort_by_ref(vcf_file, data): out_file = "%s-prep.vcf.gz" % utils.splitext_plus(vcf_file)[0] if not utils.file_uptodate(out_file, vcf_file): with file_transaction(data, out_file) as tx_out_file: header_file = "%s-header.txt" % utils.splitext_plus(tx_out_file)[0] with open(header_file, "w") as out_handle: for region in ref.file_contigs(dd.get_ref_file(data), data["config"]): out_handle.write(" cat_cmd = "zcat" if vcf_file.endswith("vcf.gz") else "cat" cmd = ("{cat_cmd} {vcf_file} | grep -v ^ "vt sort -m full -o {tx_out_file} -") with utils.chdir(os.path.dirname(tx_out_file)): do.run(cmd.format(**locals()), "Sort VCF by reference") return bgzip_and_index(out_file, data["config"])
Sort a VCF file by genome reference and position, adding contig information.
388,294
def do_POST(self): if not self.is_rpc_path_valid(): self.report_404() return try: max_chunk_size = 10 * 1024 * 1024 size_remaining = int(self.headers["content-length"]) L = [] while size_remaining: chunk_size = min(size_remaining, max_chunk_size) L.append(self.rfile.read(chunk_size)) size_remaining -= len(L[-1]) data = .join(L) response = self.server._marshaled_dispatch( data, getattr(self, , None) ) except: self.send_response(500) self.end_headers() else: self.send_response(200) self.send_header("Content-type", "text/xml") self.send_header("Content-length", str(len(response))) self.end_headers() self.wfile.write(response) self.wfile.flush() self.connection.shutdown(1)
Handles the HTTP POST request. Attempts to interpret all HTTP POST requests as XML-RPC calls, which are forwarded to the server's _dispatch method for handling.
388,295
def act(self, cmd_name, params=None): command = getattr(self, cmd_name) if params: command(params) else: command()
Run the specified command with its parameters.
388,296
def discrete_rainbow(N=7, cmap=cm.Set1, usepreset=True, shuffle=False, \ plot=False): import random from scipy import interpolate if usepreset: if 0 < N <= 5: cmap = cm.gist_rainbow elif N <= 20: cmap = cm.Set1 else: sys.exit(discrete_rainbow.__doc__) cdict = cmap._segmentdata.copy() colors_i = np.linspace(0,1.,N) indices = np.linspace(0,1.,N+1) rgbs = [] for key in (,,): D = np.array(cdict[key]) I = interpolate.interp1d(D[:,0], D[:,1]) colors = I(colors_i) rgbs.append(colors) A = np.zeros((N+1,3), float) A[:,0] = indices A[1:,1] = colors A[:-1,2] = colors L = [] for l in A: L.append(tuple(l)) cdict[key] = tuple(L) palette = zip(*rgbs) if shuffle: random.shuffle(palette) if plot: print_colors(palette) return mpl.colors.LinearSegmentedColormap(,cdict,1024), palette
Return a discrete colormap and the set of colors. modified from <http://www.scipy.org/Cookbook/Matplotlib/ColormapTransformations> cmap: colormap instance, eg. cm.jet. N: Number of colors. Example >>> x = resize(arange(100), (5,100)) >>> djet = cmap_discretize(cm.jet, 5) >>> imshow(x, cmap=djet) See available matplotlib colormaps at: <http://dept.astro.lsa.umich.edu/~msshin/science/code/matplotlib_cm/> If N>20 the sampled colors might not be very distinctive. If you want to error and try anyway, set usepreset=False
388,297
def normalize_unicode(text): if isinstance(text, six.text_type): return unicodedata.normalize(, text).encode(, ).decode() else: return text
Normalize any unicode characters to ascii equivalent https://docs.python.org/2/library/unicodedata.html#unicodedata.normalize
388,298
def convert_tkinter_size_to_Wx(size): qtsize = size if size[1] is not None and size[1] < DEFAULT_PIXEL_TO_CHARS_CUTOFF: qtsize = size[0]*DEFAULT_PIXELS_TO_CHARS_SCALING[0], size[1]*DEFAULT_PIXELS_TO_CHARS_SCALING[1] return qtsize
Converts size in characters to size in pixels :param size: size in characters, rows :return: size in pixels, pixels
388,299
def iterate(t_table, wordlist, stanzas, schemes, rprobs, maxsteps): data_probs = numpy.zeros(len(stanzas)) old_data_probs = None probs = None num_words = len(wordlist) ctr = 0 for ctr in range(maxsteps): logging.info("Iteration {}".format(ctr)) old_data_probs = data_probs logging.info("Expectation step") probs = expectation_step(t_table, stanzas, schemes, rprobs) logging.info("Maximization step") t_table, rprobs = maximization_step(num_words, stanzas, schemes, probs) data_probs = numpy.logaddexp.reduce(probs, axis=1) if ctr == maxsteps - 1 and not numpy.allclose(data_probs, old_data_probs): logging.warning("Warning: EM did not converge") logging.info("Stopped after {} interations".format(ctr)) return probs
Iterate EM and return final probabilities