language
stringclasses
2 values
func_code_string
stringlengths
63
466k
python
def calculate2d(self, force=True): """ recalculate 2d coordinates. currently rings can be calculated badly. :param force: ignore existing coordinates of atoms """ for ml in (self.__reagents, self.__reactants, self.__products): for m in ml: m.calculate2d(force) self.fix_positions()
python
def connection(self) -> Iterator[amqp.Connection]: """Returns a new connection as a context manager.""" TCP_USER_TIMEOUT = 18 # constant is available on Python 3.6+. socket_settings = {TCP_USER_TIMEOUT: self.config.TCP_USER_TIMEOUT} if sys.platform.startswith('darwin'): del socket_settings[TCP_USER_TIMEOUT] conn = amqp.Connection( host="%s:%s" % (self.config.RABBIT_HOST, self.config.RABBIT_PORT), userid=self.config.RABBIT_USER, password=self.config.RABBIT_PASSWORD, virtual_host=self.config.RABBIT_VIRTUAL_HOST, connect_timeout=self.config.RABBIT_CONNECT_TIMEOUT, read_timeout=self.config.RABBIT_READ_TIMEOUT, write_timeout=self.config.RABBIT_WRITE_TIMEOUT, socket_settings=socket_settings, heartbeat=self.config.RABBIT_HEARTBEAT, ) conn.connect() logger.info('Connected to RabbitMQ') with _safe_close(conn): yield conn
java
public void marshall(EventDescription eventDescription, ProtocolMarshaller protocolMarshaller) { if (eventDescription == null) { throw new SdkClientException("Invalid argument passed to marshall(...)"); } try { protocolMarshaller.marshall(eventDescription.getLatestDescription(), LATESTDESCRIPTION_BINDING); } catch (Exception e) { throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e); } }
java
@Override public String sendEmailMessage(final EmailMessage emailMessage) throws MessagingException { Transport.send(emailMessage); return emailMessage.getMessageID(); }
python
def set_ipcsem_params(self, ftok=None, persistent=None): """Sets ipcsem lock engine params. :param str|unicode ftok: Set the ipcsem key via ftok() for avoiding duplicates. :param bool persistent: Do not remove ipcsem's on shutdown. """ self._set('ftok', ftok) self._set('persistent-ipcsem', persistent, cast=bool) return self._section
python
def bed(args): """ %prog bed bedfile bamfiles Convert bam files to bed. """ p = OptionParser(bed.__doc__) opts, args = p.parse_args(args) if len(args) < 2: sys.exit(not p.print_help()) bedfile = args[0] bamfiles = args[1:] for bamfile in bamfiles: cmd = "bamToBed -i {0}".format(bamfile) sh(cmd, outfile=bedfile, append=True)
python
def build_body(self, template_file=INDEX): """ Params: template_file (text): Path to an index.html template Returns: body (bytes): THe utf-8 encoded document body """ if self.params['error'] == 'access_denied': message = docs.OAUTH_ACCESS_DENIED elif self.params['error'] is not None: message = docs.OAUTH_ERROR.format(error=self.params['error']) elif self.params['state'] is None or self.params['code'] is None: message = docs.OAUTH_INVALID else: message = docs.OAUTH_SUCCESS with codecs.open(template_file, 'r', 'utf-8') as fp: index_text = fp.read() body = string.Template(index_text).substitute(message=message) body = codecs.encode(body, 'utf-8') return body
java
private void pbOpenActionPerformed (java.awt.event.ActionEvent evt)//GEN-FIRST:event_pbOpenActionPerformed {//GEN-HEADEREND:event_pbOpenActionPerformed // Add your handling code here: pbTestActionPerformed(evt); if (theConnection != null) { synchronized(Main.getProperties()) { int i = 0; while (Main.getProperties().getProperty(Main.PROPERTY_JDBCDRIVER + i) != null) { Main.getProperties().remove(Main.PROPERTY_JDBCDRIVER + i); i++; } while (Main.getProperties().getProperty(Main.PROPERTY_JDBCURL + i) != null) { Main.getProperties().remove(Main.PROPERTY_JDBCURL + i); i++; } for (i = 0; i < cmbJDBCDriver.getModel().getSize(); i++) Main.getProperties().setProperty(Main.PROPERTY_JDBCDRIVER + i, cmbJDBCDriver.getModel().getElementAt(i).toString()); for (i = 0; i < cmbJDBCURL.getModel().getSize(); i++) Main.getProperties().setProperty(Main.PROPERTY_JDBCURL + i, cmbJDBCURL.getModel().getElementAt(i).toString()); Main.getProperties().setProperty(Main.PROPERTY_JDBCUSER, tfUsername.getText()); Main.getProperties().storeProperties(""); } dispose(); } }
python
def _histogram_equalization_helper(valid_data, number_of_bins, clip_limit=None, slope_limit=None): """Calculate the simplest possible histogram equalization, using only valid data. Returns: cumulative distribution function and bin information """ # bucket all the selected data using np's histogram function temp_histogram, temp_bins = np.histogram(valid_data, number_of_bins) # if we have a clip limit and we should do our clipping before building # the cumulative distribution function, clip off our histogram if clip_limit is not None: # clip our histogram and remember how much we removed pixels_to_clip_at = int(clip_limit * (valid_data.size / float(number_of_bins))) mask_to_clip = temp_histogram > clip_limit # num_bins_clipped = sum(mask_to_clip) # num_pixels_clipped = sum(temp_histogram[mask_to_clip]) - (num_bins_clipped * pixels_to_clip_at) temp_histogram[mask_to_clip] = pixels_to_clip_at # calculate the cumulative distribution function cumulative_dist_function = temp_histogram.cumsum() # if we have a clip limit and we should do our clipping after building the # cumulative distribution function, clip off our cdf if slope_limit is not None: # clip our cdf and remember how much we removed pixel_height_limit = int(slope_limit * (valid_data.size / float(number_of_bins))) cumulative_excess_height = 0 num_clipped_pixels = 0 weight_metric = np.zeros(cumulative_dist_function.shape, dtype=float) for pixel_index in range(1, cumulative_dist_function.size): current_pixel_count = cumulative_dist_function[pixel_index] diff_from_acceptable = ( current_pixel_count - cumulative_dist_function[pixel_index - 1] - pixel_height_limit - cumulative_excess_height) if diff_from_acceptable < 0: weight_metric[pixel_index] = abs(diff_from_acceptable) cumulative_excess_height += max(diff_from_acceptable, 0) cumulative_dist_function[ pixel_index] = current_pixel_count - cumulative_excess_height num_clipped_pixels = num_clipped_pixels + cumulative_excess_height # now normalize the overall distribution function cumulative_dist_function = (number_of_bins - 1) * cumulative_dist_function / cumulative_dist_function[-1] # return what someone else will need in order to apply the equalization later return cumulative_dist_function, temp_bins
python
def check_perf(): "Suggest how to improve the setup to speed things up" from PIL import features, Image from packaging import version print("Running performance checks.") # libjpeg_turbo check print("\n*** libjpeg-turbo status") if version.parse(Image.PILLOW_VERSION) >= version.parse("5.3.9"): if features.check_feature('libjpeg_turbo'): print("โœ” libjpeg-turbo is on") else: print("โœ˜ libjpeg-turbo is not on. It's recommended you install libjpeg-turbo to speed up JPEG decoding. See https://docs.fast.ai/performance.html#libjpeg-turbo") else: print(f"โ“ libjpeg-turbo's status can't be derived - need Pillow(-SIMD)? >= 5.4.0 to tell, current version {Image.PILLOW_VERSION}") # XXX: remove this check/note once Pillow and Pillow-SIMD 5.4.0 is available pillow_ver_5_4_is_avail = pypi_module_version_is_available("Pillow", "5.4.0") if pillow_ver_5_4_is_avail == False: print("5.4.0 is not yet available, other than the dev version on github, which can be installed via pip from git+https://github.com/python-pillow/Pillow. See https://docs.fast.ai/performance.html#libjpeg-turbo") # Pillow-SIMD check print("\n*** Pillow-SIMD status") if re.search(r'\.post\d+', Image.PILLOW_VERSION): print(f"โœ” Running Pillow-SIMD {Image.PILLOW_VERSION}") else: print(f"โœ˜ Running Pillow {Image.PILLOW_VERSION}; It's recommended you install Pillow-SIMD to speed up image resizing and other operations. See https://docs.fast.ai/performance.html#pillow-simd") # CUDA version check # compatibility table: k: min nvidia ver is required for v: cuda ver # note: windows nvidia driver version is slightly higher, see: # https://docs.nvidia.com/cuda/cuda-toolkit-release-notes/index.html # note: add new entries if pytorch starts supporting new cudaXX nvidia2cuda = { "410.00": "10.0", "384.81": "9.0", "367.48": "8.0", } print("\n*** CUDA status") if torch.cuda.is_available(): pynvml = load_pynvml_env() nvidia_ver = (pynvml.nvmlSystemGetDriverVersion().decode('utf-8') if platform.system() != "Darwin" else "Cannot be determined on OSX yet") cuda_ver = torch.version.cuda max_cuda = "8.0" for k in sorted(nvidia2cuda.keys()): if version.parse(nvidia_ver) > version.parse(k): max_cuda = nvidia2cuda[k] if version.parse(str(max_cuda)) <= version.parse(cuda_ver): print(f"โœ” Running the latest CUDA {cuda_ver} with NVIDIA driver {nvidia_ver}") else: print(f"โœ˜ You are running pytorch built against cuda {cuda_ver}, your NVIDIA driver {nvidia_ver} supports cuda10. See https://pytorch.org/get-started/locally/ to install pytorch built against the faster CUDA version.") else: print(f"โ“ Running cpu-only torch version, CUDA check is not relevant") print("\nRefer to https://docs.fast.ai/performance.html to make sense out of these checks and suggestions.")
python
def do_lmfit(data, params, B=None, errs=None, dojac=True): """ Fit the model to the data data may contain 'flagged' or 'masked' data with the value of np.NaN Parameters ---------- data : 2d-array Image data params : lmfit.Parameters Initial model guess. B : 2d-array B matrix to be used in residual calculations. Default = None. errs : 1d-array dojac : bool If true then an analytic jacobian will be passed to the fitting routine. Returns ------- result : ? lmfit.minimize result. params : lmfit.Params Fitted model. See Also -------- :func:`AegeanTools.fitting.lmfit_jacobian` """ # copy the params so as not to change the initial conditions # in case we want to use them elsewhere params = copy.deepcopy(params) data = np.array(data) mask = np.where(np.isfinite(data)) def residual(params, **kwargs): """ The residual function required by lmfit Parameters ---------- params: lmfit.Params The parameters of the model being fit Returns ------- result : numpy.ndarray Model - Data """ f = ntwodgaussian_lmfit(params) # A function describing the model model = f(*mask) # The actual model if B is None: return model - data[mask] else: return (model - data[mask]).dot(B) if dojac: result = lmfit.minimize(residual, params, kws={'x': mask[0], 'y': mask[1], 'B': B, 'errs': errs}, Dfun=lmfit_jacobian) else: result = lmfit.minimize(residual, params, kws={'x': mask[0], 'y': mask[1], 'B': B, 'errs': errs}) # Remake the residual so that it is once again (model - data) if B is not None: result.residual = result.residual.dot(inv(B)) return result, params
python
def resources(self): """ Returns a list of all :class:`~plexapi.myplex.MyPlexResource` objects connected to the server. """ data = self.query(MyPlexResource.key) return [MyPlexResource(self, elem) for elem in data]
java
public IssueCategory getByID(String categoryID) { IssueCategory issueCategory = this.issueCategories.get(categoryID); if (issueCategory == null) { // We do not have this one yet, so store it as a placeholder. It will presumably be loaded later on. issueCategory = new IssueCategory(categoryID, "placeholder", categoryID, categoryID, 0, true); this.issueCategories.put(categoryID, issueCategory); } return issueCategory; }
python
def asML(self): """ Convert this vector to the new mllib-local representation. This does NOT copy the data; it copies references. :return: :py:class:`pyspark.ml.linalg.SparseVector` .. versionadded:: 2.0.0 """ return newlinalg.SparseVector(self.size, self.indices, self.values)
python
def webui_schematics_panels_panel_name(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") webui = ET.SubElement(config, "webui", xmlns="http://tail-f.com/ns/webui") schematics = ET.SubElement(webui, "schematics") panels = ET.SubElement(schematics, "panels") panel = ET.SubElement(panels, "panel") name = ET.SubElement(panel, "name") name.text = kwargs.pop('name') callback = kwargs.pop('callback', self._callback) return callback(config)
python
def set_rotation(self, rotation): """Set the rotation of the stereonet in degrees clockwise from North.""" self._rotation = np.radians(rotation) self._polar.set_theta_offset(self._rotation + np.pi / 2.0) self.transData.invalidate() self.transAxes.invalidate() self._set_lim_and_transforms()
java
@Deprecated public <T> Class<T> getPropertyAsClass(String key, Class<T> targetType) { Optional<String> property = propertyResolver.getProperty(NameUtils.hyphenate(key), String.class); if (property.isPresent()) { Optional<Class> aClass = ClassUtils.forName(property.get(), Thread.currentThread().getContextClassLoader()); if (aClass.isPresent()) { //noinspection unchecked return aClass.get(); } } return null; }
python
def radlToSimple(radl_data): """ Return a list of maps whose values are only other maps or lists. """ aspects = (radl_data.ansible_hosts + radl_data.networks + radl_data.systems + radl_data.configures + radl_data.deploys) if radl_data.contextualize.items is not None: aspects.append(radl_data.contextualize) return [aspectToSimple(a) for a in aspects]
python
def append(self, bs): """Append a bitstring to the current bitstring. bs -- The bitstring to append. """ # The offset is a hint to make bs easily appendable. bs = self._converttobitstring(bs, offset=(self.len + self._offset) % 8) self._append(bs)
java
public static NodeImpl setTypeParameter(final NodeImpl node, final Class<?> containerClass, final Integer typeArgumentIndex) { return new NodeImpl( // node.name, // node.parent, // node.isIterableValue, // node.index, // node.key, // node.kind, // node.parameterTypes, // node.parameterIndex, // node.value, // containerClass, // typeArgumentIndex // ); }
python
def unload(module): ''' Unload specified fault manager module module: string module to unload CLI Example: .. code-block:: bash salt '*' fmadm.unload software-response ''' ret = {} fmadm = _check_fmadm() cmd = '{cmd} unload {module}'.format( cmd=fmadm, module=module ) res = __salt__['cmd.run_all'](cmd) retcode = res['retcode'] result = {} if retcode != 0: result['Error'] = res['stderr'] else: result = True return result
java
public MessageToSend prepareMessageToSend() { originalMessage.getParts().clear(); originalMessage.getParts().addAll(publicParts); return originalMessage; }
java
public final BatchCreateNotesResponse batchCreateNotes( ProjectName parent, Map<String, Note> notes) { BatchCreateNotesRequest request = BatchCreateNotesRequest.newBuilder() .setParent(parent == null ? null : parent.toString()) .putAllNotes(notes) .build(); return batchCreateNotes(request); }
python
def serialize(ms, version=_default_version, properties=True, pretty_print=False, color=False): """Serialize an MRS structure into a SimpleMRS string.""" delim = '\n' if pretty_print else _default_mrs_delim output = delim.join( _serialize_mrs(m, properties=properties, version=version, pretty_print=pretty_print) for m in ms ) if color: output = highlight(output) return output
java
public BoxAuthenticationInfo getAuthInfo(String userId, Context context) { return userId == null ? null : getAuthInfoMap(context).get(userId); }
java
public static Properties splitArrayElementsIntoProperties(String[] array, String delimiter) { return splitArrayElementsIntoProperties(array, delimiter, null); }
python
def _display_choices(self, choices): """Prints a mapping of numbers to choices and returns the mapping as a dictionary. """ print("Choose the number of the correct choice:") choice_map = {} for i, choice in enumerate(choices): i = str(i) print('{}) {}'.format(i, format.indent(choice, ' ' * (len(i) + 2)).strip())) choice = format.normalize(choice) choice_map[i] = choice return choice_map
python
def _convert_service_properties_to_xml(logging, hour_metrics, minute_metrics, cors, target_version=None, delete_retention_policy=None, static_website=None): ''' <?xml version="1.0" encoding="utf-8"?> <StorageServiceProperties> <Logging> <Version>version-number</Version> <Delete>true|false</Delete> <Read>true|false</Read> <Write>true|false</Write> <RetentionPolicy> <Enabled>true|false</Enabled> <Days>number-of-days</Days> </RetentionPolicy> </Logging> <HourMetrics> <Version>version-number</Version> <Enabled>true|false</Enabled> <IncludeAPIs>true|false</IncludeAPIs> <RetentionPolicy> <Enabled>true|false</Enabled> <Days>number-of-days</Days> </RetentionPolicy> </HourMetrics> <MinuteMetrics> <Version>version-number</Version> <Enabled>true|false</Enabled> <IncludeAPIs>true|false</IncludeAPIs> <RetentionPolicy> <Enabled>true|false</Enabled> <Days>number-of-days</Days> </RetentionPolicy> </MinuteMetrics> <Cors> <CorsRule> <AllowedOrigins>comma-separated-list-of-allowed-origins</AllowedOrigins> <AllowedMethods>comma-separated-list-of-HTTP-verb</AllowedMethods> <MaxAgeInSeconds>max-caching-age-in-seconds</MaxAgeInSeconds> <ExposedHeaders>comma-seperated-list-of-response-headers</ExposedHeaders> <AllowedHeaders>comma-seperated-list-of-request-headers</AllowedHeaders> </CorsRule> </Cors> <DeleteRetentionPolicy> <Enabled>true|false</Enabled> <Days>number-of-days</Days> </DeleteRetentionPolicy> <StaticWebsite> <Enabled>true|false</Enabled> <IndexDocument></IndexDocument> <ErrorDocument404Path></ErrorDocument404Path> </StaticWebsite> </StorageServiceProperties> ''' service_properties_element = ETree.Element('StorageServiceProperties') # Logging if logging: logging_element = ETree.SubElement(service_properties_element, 'Logging') ETree.SubElement(logging_element, 'Version').text = logging.version ETree.SubElement(logging_element, 'Delete').text = str(logging.delete) ETree.SubElement(logging_element, 'Read').text = str(logging.read) ETree.SubElement(logging_element, 'Write').text = str(logging.write) retention_element = ETree.SubElement(logging_element, 'RetentionPolicy') _convert_retention_policy_to_xml(logging.retention_policy, retention_element) # HourMetrics if hour_metrics: hour_metrics_element = ETree.SubElement(service_properties_element, 'HourMetrics') _convert_metrics_to_xml(hour_metrics, hour_metrics_element) # MinuteMetrics if minute_metrics: minute_metrics_element = ETree.SubElement(service_properties_element, 'MinuteMetrics') _convert_metrics_to_xml(minute_metrics, minute_metrics_element) # CORS # Make sure to still serialize empty list if cors is not None: cors_element = ETree.SubElement(service_properties_element, 'Cors') for rule in cors: cors_rule = ETree.SubElement(cors_element, 'CorsRule') ETree.SubElement(cors_rule, 'AllowedOrigins').text = ",".join(rule.allowed_origins) ETree.SubElement(cors_rule, 'AllowedMethods').text = ",".join(rule.allowed_methods) ETree.SubElement(cors_rule, 'MaxAgeInSeconds').text = str(rule.max_age_in_seconds) ETree.SubElement(cors_rule, 'ExposedHeaders').text = ",".join(rule.exposed_headers) ETree.SubElement(cors_rule, 'AllowedHeaders').text = ",".join(rule.allowed_headers) # Target version if target_version: ETree.SubElement(service_properties_element, 'DefaultServiceVersion').text = target_version # DeleteRetentionPolicy if delete_retention_policy: policy_element = ETree.SubElement(service_properties_element, 'DeleteRetentionPolicy') ETree.SubElement(policy_element, 'Enabled').text = str(delete_retention_policy.enabled) if delete_retention_policy.enabled: ETree.SubElement(policy_element, 'Days').text = str(delete_retention_policy.days) # StaticWebsite if static_website: static_website_element = ETree.SubElement(service_properties_element, 'StaticWebsite') ETree.SubElement(static_website_element, 'Enabled').text = str(static_website.enabled) if static_website.enabled: if static_website.index_document is not None: ETree.SubElement(static_website_element, 'IndexDocument').text = str(static_website.index_document) if static_website.error_document_404_path is not None: ETree.SubElement(static_website_element, 'ErrorDocument404Path').text = \ str(static_website.error_document_404_path) # Add xml declaration and serialize try: stream = BytesIO() ETree.ElementTree(service_properties_element).write(stream, xml_declaration=True, encoding='utf-8', method='xml') except: raise finally: output = stream.getvalue() stream.close() return output
python
def permute(self, idx): """Permutes the columns of the factor matrices inplace """ # Check that input is a true permutation if set(idx) != set(range(self.rank)): raise ValueError('Invalid permutation specified.') # Update factors self.factors = [f[:, idx] for f in self.factors] return self.factors
java
private void selectNode(final Object key, final Handler<AsyncResult<String>> doneHandler) { context.execute(new Action<String>() { @Override public String perform() { String address = nodeSelectors.get(key); if (address != null) { return address; } Set<String> nodes = new HashSet<>(); for (String group : groups.keySet()) { nodes.addAll(groups.get(group)); } int index = new Random().nextInt(nodes.size()); int i = 0; for (String node : nodes) { if (i == index) { nodeSelectors.put(key, node); return node; } i++; } return null; } }, doneHandler); }
java
public IScheduler getScheduler (final String schedName) throws SchedulerException { final SchedulerRepository schedRep = SchedulerRepository.getInstance (); return schedRep.lookup (schedName); }
java
public void setCollections(java.util.Collection<CurrentMetricData> collections) { if (collections == null) { this.collections = null; return; } this.collections = new java.util.ArrayList<CurrentMetricData>(collections); }
python
def _GetEventIdentifiers(self, event): """Retrieves different identifiers of the event. Every event contains event data, which consists of attributes and values. These attributes and values can be represented as a string and used for sorting and uniquely identifying events. This function determines multiple identifiers: * an identifier of the attributes and values without the timestamp description (or usage). This is referred to as the MACB group identifier. * an identifier of the attributes and values including the timestamp description (or usage). This is referred to as the event content identifier. The identifier without the timestamp description can be used to group events that have the same MACB (modification, access, change, birth) timestamps. The PsortEventHeap will store these events individually and relies on PsortMultiProcessEngine to do the actual grouping of events. Args: event (EventObject): event. Returns: tuple: containing: str: identifier of the event MACB group or None if the event cannot be grouped. str: identifier of the event content. """ attributes = [] attribute_string = 'data_type: {0:s}'.format(event.data_type) attributes.append(attribute_string) for attribute_name, attribute_value in sorted(event.GetAttributes()): if attribute_name in self._IDENTIFIER_EXCLUDED_ATTRIBUTES: continue if not attribute_value: continue if attribute_name == 'pathspec': attribute_value = attribute_value.comparable elif isinstance(attribute_value, dict): attribute_value = sorted(attribute_value.items()) elif isinstance(attribute_value, set): attribute_value = sorted(list(attribute_value)) elif isinstance(attribute_value, py2to3.BYTES_TYPE): attribute_value = repr(attribute_value) try: attribute_string = '{0:s}: {1!s}'.format( attribute_name, attribute_value) except UnicodeDecodeError: logger.error('Failed to decode attribute {0:s}'.format( attribute_name)) attributes.append(attribute_string) # The 'atime', 'ctime', 'crtime', 'mtime' are included for backwards # compatibility with the filestat parser. if event.timestamp_desc in ( 'atime', 'ctime', 'crtime', 'mtime', definitions.TIME_DESCRIPTION_LAST_ACCESS, definitions.TIME_DESCRIPTION_CHANGE, definitions.TIME_DESCRIPTION_CREATION, definitions.TIME_DESCRIPTION_MODIFICATION): macb_group_identifier = ', '.join(attributes) else: macb_group_identifier = None attributes.insert(0, event.timestamp_desc) content_identifier = ', '.join(attributes) return macb_group_identifier, content_identifier
python
def bind_extensions(app): """Configure extensions. Args: app (Flask): initialized Flask app instance """ # bind plugin to app object app.db = app.config['PUZZLE_BACKEND'] app.db.init_app(app) # bind bootstrap blueprints bootstrap.init_app(app) markdown(app) @app.template_filter('islist') def islist(object): return isinstance(object, (tuple, list))
python
def gaussian_distribution(mean, stdev, num_pts=50): """ get an x and y numpy.ndarray that spans the +/- 4 standard deviation range of a gaussian distribution with a given mean and standard deviation. useful for plotting Parameters ---------- mean : float the mean of the distribution stdev : float the standard deviation of the distribution num_pts : int the number of points in the returned ndarrays. Default is 50 Returns ------- x : numpy.ndarray the x-values of the distribution y : numpy.ndarray the y-values of the distribution """ warnings.warn("pyemu.helpers.gaussian_distribution() has moved to plot_utils",PyemuWarning) from pyemu import plot_utils return plot_utils.gaussian_distribution(mean=mean,stdev=stdev,num_pts=num_pts)
java
public HostName toCanonicalHostName() { HostName host = canonicalHost; if(host == null) { if(isMultiple()) { throw new IncompatibleAddressException(this, "ipaddress.error.unavailable.numeric"); } InetAddress inetAddress = toInetAddress(); String hostStr = inetAddress.getCanonicalHostName();//note: this does not return ipv6 addresses enclosed in brackets [] if(hostStr.equals(inetAddress.getHostAddress())) { //we got back the address, so the host is me host = new HostName(hostStr, new ParsedHost(hostStr, getProvider())); host.resolvedAddress = this; } else { //the reverse lookup succeeded in finding a host string //we might not be the default resolved address for the host, so we don't set that field host = new HostName(hostStr); } } return host; }
java
private JTextField getTextFieldContextName() { if (txtContextName == null) { txtContextName = new JTextField(); txtContextName.setText(SOABase.DEFAULT_NAME); txtContextName.setColumns(10); } return txtContextName; }
java
private int findSmallestFrom(int startId, List<String> sortFlds, List<Integer> sortDirs) { int minId = startId; moveToId(startId); while (super.next()) { int id = currentId(); if (minId < 0 || compareRecords(minId, id, sortFlds, sortDirs) > 0) minId = id; moveToId(id); } return minId; }
java
public static <T> T max(Iterator<T> self) { return max((Iterable<T>)toList(self)); }
python
def wind_shear(shear: str, unit_alt: str = 'ft', unit_wind: str = 'kt') -> str: """ Format wind shear string into a spoken word string """ unit_alt = SPOKEN_UNITS.get(unit_alt, unit_alt) unit_wind = SPOKEN_UNITS.get(unit_wind, unit_wind) return translate.wind_shear(shear, unit_alt, unit_wind, spoken=True) or 'Wind shear unknown'
java
public void setCustomRules(java.util.Collection<CustomRule> customRules) { if (customRules == null) { this.customRules = null; return; } this.customRules = new java.util.ArrayList<CustomRule>(customRules); }
python
def _prune_edges(G, X, traj_lengths, pruning_thresh=0.1, verbose=False): '''Prune edges in graph G via cosine distance with trajectory edges.''' W = G.matrix('dense', copy=True) degree = G.degree(kind='out', weighted=False) i = 0 num_bad = 0 for n in traj_lengths: s, t = np.nonzero(W[i:i+n-1]) graph_edges = X[t] - X[s+i] traj_edges = np.diff(X[i:i+n], axis=0) traj_edges = np.repeat(traj_edges, degree[i:i+n-1], axis=0) theta = paired_distances(graph_edges, traj_edges, 'cosine') bad_edges = theta > pruning_thresh s, t = s[bad_edges], t[bad_edges] if verbose: # pragma: no cover num_bad += np.count_nonzero(W[s,t]) W[s,t] = 0 i += n if verbose: # pragma: no cover print('removed %d bad edges' % num_bad) return Graph.from_adj_matrix(W)
java
public EClass getIfcCompositeCurveSegment() { if (ifcCompositeCurveSegmentEClass == null) { ifcCompositeCurveSegmentEClass = (EClass) EPackage.Registry.INSTANCE.getEPackage(Ifc2x3tc1Package.eNS_URI) .getEClassifiers().get(93); } return ifcCompositeCurveSegmentEClass; }
python
def prompt_gui(path): """Prompt for a new filename via GUI.""" import subprocess filepath, extension = os.path.splitext(path) basename = os.path.basename(filepath) dirname = os.path.dirname(filepath) retry_text = 'Sorry, please try again...' icon = 'video-x-generic' # detect and configure dialog program if have('yad'): args = ['yad', '--borders=5', '--entry', '--entry-label=Filename:', '--entry-text=' + basename, '--title=Batch Tool', '--window-icon=' + icon] retry_args = args + ['--text=<b>' + retry_text + '</b>', '--text-align=center'] elif have('zenity'): base = ['zenity', '--entry', '--entry-text=' + basename, '--title=Batch Tool', '--window-icon=info'] args = base + ['--text=Filename:'] retry_args = base + ['--text=' + retry_text] else: fatal('Please install yad (or zenity)') # display filename prompt try: new_basename = subprocess.check_output( args, universal_newlines=True).strip() except subprocess.CalledProcessError: sys.exit(1) # retry prompt if new filename already exists while os.path.exists(os.path.join(dirname, new_basename + extension)) and \ new_basename != basename: try: new_basename = subprocess.check_output( retry_args, universal_newlines=True).strip() except subprocess.CalledProcessError: sys.exit(1) if new_basename == '': new_basename = basename return os.path.join(dirname, new_basename + extension)
python
def rename_categories(self, key: str, categories: Sequence[Any]): """Rename categories of annotation ``key`` in :attr:`obs`, :attr:`var` and :attr:`uns`. Only supports passing a list/array-like ``categories`` argument. Besides calling ``self.obs[key].cat.categories = categories`` - similar for :attr:`var` - this also renames categories in unstructured annotation that uses the categorical annotation ``key``. Parameters ---------- key Key for observations or variables annotation. categories New categories, the same number as the old categories. """ if isinstance(categories, Mapping): raise ValueError('Only list-like `categories` is supported.') if key in self.obs: old_categories = self.obs[key].cat.categories.tolist() self.obs[key].cat.rename_categories(categories, inplace=True) elif key in self.var: old_categories = self.var[key].cat.categories.tolist() self.var[key].cat.rename_categories(categories, inplace=True) else: raise ValueError('{} is neither in `.obs` nor in `.var`.' .format(key)) # this is not a good solution # but depends on the scanpy conventions for storing the categorical key # as `groupby` in the `params` slot for k1, v1 in self.uns.items(): if isinstance(v1, Mapping): if 'params' in v1 and 'groupby' in v1['params']: if v1['params']['groupby'] == key: for k2, v2 in v1.items(): # picks out the recarrays that are named according to the old # categories if isinstance(v2, np.ndarray) and v2.dtype.names is not None: if list(v2.dtype.names) == old_categories: self.uns[k1][k2].dtype.names = categories else: logger.warning( 'Omitting {}/{} as old categories do not match.' .format(k1, k2))
java
synchronized void register(LogWithPatternAndLevel log, Duration period) { // if we haven't seen this period before, we'll need to add a schedule to the ScheduledExecutorService // to perform a counter reset with that periodicity, otherwise we can count on the existing schedule // taking care of it. boolean needToScheduleReset = false; ConcurrentHashMap<LogWithPatternAndLevel, Boolean> logLinesForPeriod = registry.get(period); if (logLinesForPeriod == null) { needToScheduleReset = true; logLinesForPeriod = new ConcurrentHashMap<>(); registry.put(period, logLinesForPeriod); } else { if (logLinesForPeriod.get(log) != null) { return; // this has already been registered } } logLinesForPeriod.put(log, Boolean.TRUE); if (needToScheduleReset) { final ConcurrentHashMap<LogWithPatternAndLevel, Boolean> finalLogLinesForPeriod = logLinesForPeriod; resetScheduler.scheduleWithFixedDelay(() -> { try { resetAllCounters(finalLogLinesForPeriod); } catch (Exception e) { logger.warn("failed to reset counters: " + e, e); // but carry on in the next iteration } }, period.toMillis(), period.toMillis(), TimeUnit.MILLISECONDS); } }
python
def _filterByPaddingNum(cls, iterable, num): """ Yield only path elements from iterable which have a frame padding that matches the given target padding number Args: iterable (collections.Iterable): num (int): Yields: str: """ _check = DISK_RE.match for item in iterable: # Add a filter for paths that don't match the frame # padding of a given number matches = _check(item) if not matches: if num <= 0: # Not a sequence pattern, but we were asked # to match on a zero padding yield item continue frame = matches.group(3) or '' if not frame: if num <= 0: # No frame value was parsed, but we were asked # to match on a zero padding yield item continue # We have a frame number if frame[0] == '0' or frame[:2] == '-0': if len(frame) == num: # A frame leading with '0' is explicitly # padded and can only be a match if its exactly # the target padding number yield item continue if len(frame) >= num: # A frame that does not lead with '0' can match # a padding width >= to the target padding number yield item continue
python
def membership_vector(clusterer, points_to_predict): """Predict soft cluster membership. The result produces a vector for each point in ``points_to_predict`` that gives a probability that the given point is a member of a cluster for each of the selected clusters of the ``clusterer``. Parameters ---------- clusterer : HDBSCAN A clustering object that has been fit to the data and either had ``prediction_data=True`` set, or called the ``generate_prediction_data`` method after the fact. points_to_predict : array, or array-like (n_samples, n_features) The new data points to predict cluster labels for. They should have the same dimensionality as the original dataset over which clusterer was fit. Returns ------- membership_vectors : array (n_samples, n_clusters) The probability that point ``i`` is a member of cluster ``j`` is in ``membership_vectors[i, j]``. See Also -------- :py:func:`hdbscan.predict.predict` :py:func:`hdbscan.predict.all_points_membership_vectors` """ points_to_predict = points_to_predict.astype(np.float64) clusters = np.array( sorted(list(clusterer.condensed_tree_._select_clusters()))).astype(np.intp) result = np.empty((points_to_predict.shape[0], clusters.shape[0]), dtype=np.float64) min_samples = clusterer.min_samples or clusterer.min_cluster_size neighbor_distances, neighbor_indices = \ clusterer.prediction_data_.tree.query(points_to_predict, k=2 * min_samples) for i in range(points_to_predict.shape[0]): # We need to find where in the tree the new point would go # for the purposes of outlier membership approximation nearest_neighbor, lambda_ = \ _find_neighbor_and_lambda( neighbor_indices[i], neighbor_distances[i], clusterer.prediction_data_.core_distances, min_samples) neighbor_tree_row = get_tree_row_with_child( clusterer.condensed_tree_._raw_tree, nearest_neighbor) if neighbor_tree_row['lambda_val'] <= lambda_: lambda_ = neighbor_tree_row['lambda_val'] distance_vec = dist_membership_vector( points_to_predict[i], clusterer.prediction_data_.exemplars, clusterer.prediction_data_.dist_metric) outlier_vec = outlier_membership_vector( nearest_neighbor, lambda_, clusters, clusterer.condensed_tree_._raw_tree, clusterer.prediction_data_.leaf_max_lambdas, clusterer.prediction_data_.cluster_tree) result[i] = distance_vec ** 0.5 * outlier_vec ** 2.0 result[i] /= result[i].sum() result[i] *= prob_in_some_cluster( nearest_neighbor, lambda_, clusters, clusterer.condensed_tree_._raw_tree, clusterer.prediction_data_.leaf_max_lambdas, clusterer.prediction_data_.cluster_tree) return result
python
def doublewell_eigs(n_grid, lag_time=1): """Analytic eigenvalues/eigenvectors for the doublwell system TODO: DOCUMENT ME """ return _brownian_eigs(n_grid, lag_time, DOUBLEWELL_GRAD_POTENTIAL, -np.pi, np.pi, reflect_bc=True)
python
def apply(db, op): """ Apply operation in db """ dbname = op['ns'].split('.')[0] or "admin" opts = bson.CodecOptions(uuid_representation=bson.binary.STANDARD) db[dbname].command("applyOps", [op], codec_options=opts)
python
def flip_ctrlpts2d_file(file_in='', file_out='ctrlpts_flip.txt'): """ Flips u and v directions of a 2D control points file and saves flipped coordinates to a file. :param file_in: name of the input file (to be read) :type file_in: str :param file_out: name of the output file (to be saved) :type file_out: str :raises IOError: an error occurred reading or writing the file """ # Read control points ctrlpts2d, size_u, size_v = _read_ctrltps2d_file(file_in) # Flip control points array new_ctrlpts2d = flip_ctrlpts2d(ctrlpts2d, size_u, size_v) # Save new control points _save_ctrlpts2d_file(new_ctrlpts2d, size_u, size_v, file_out)
python
def _get_ssl_sock(self): """Get raw SSL socket.""" assert self.scheme == u"https", self raw_connection = self.url_connection.raw._connection if raw_connection.sock is None: # sometimes the socket is not yet connected # see https://github.com/kennethreitz/requests/issues/1966 raw_connection.connect() return raw_connection.sock
python
def onMessageUnsent( self, mid=None, author_id=None, thread_id=None, thread_type=None, ts=None, msg=None, ): """ Called when the client is listening, and someone unsends (deletes for everyone) a message :param mid: ID of the unsent message :param author_id: The ID of the person who unsent the message :param thread_id: Thread ID that the action was sent to. See :ref:`intro_threads` :param thread_type: Type of thread that the action was sent to. See :ref:`intro_threads` :param ts: A timestamp of the action :param msg: A full set of the data recieved :type thread_type: models.ThreadType """ log.info( "{} unsent the message {} in {} ({}) at {}s".format( author_id, repr(mid), thread_id, thread_type.name, ts / 1000 ) )
python
def _generate_signature_for_function(self, func): """Given a function, returns a string representing its args.""" args_list = [] argspec = inspect.getargspec(func) first_arg_with_default = ( len(argspec.args or []) - len(argspec.defaults or [])) for arg in argspec.args[:first_arg_with_default]: if arg == "self": # Python documentation typically skips `self` when printing method # signatures. continue args_list.append(arg) # TODO(mrry): This is a workaround for documenting signature of # functions that have the @contextlib.contextmanager decorator. # We should do something better. if argspec.varargs == "args" and argspec.keywords == "kwds": original_func = func.__closure__[0].cell_contents return self._generate_signature_for_function(original_func) if argspec.defaults: for arg, default in zip( argspec.args[first_arg_with_default:], argspec.defaults): if callable(default): args_list.append("%s=%s" % (arg, default.__name__)) else: args_list.append("%s=%r" % (arg, default)) if argspec.varargs: args_list.append("*" + argspec.varargs) if argspec.keywords: args_list.append("**" + argspec.keywords) return "(" + ", ".join(args_list) + ")"
java
public FieldValue evaluate(int index){ if(this.indexedNames == null){ throw new IllegalStateException(); } FieldValue value = this.indexedValues[index]; if(value == EvaluationContext.UNDECLARED_VALUE){ FieldName name = this.indexedNames[index]; value = evaluate(name); this.indexedValues[index] = value; } return value; }
python
def should_see_link(self, link_url): """Assert a link with the provided URL is visible on the page.""" elements = ElementSelector( world.browser, str('//a[@href="%s"]' % link_url), filter_displayed=True, ) if not elements: raise AssertionError("Expected link not found.")
python
def seek_to(self, position): """Move the Shard's iterator to the earliest record after the :class:`~datetime.datetime` time. Returns the first records at or past ``position``. If the list is empty, the seek failed to find records, either because the Shard is exhausted or it reached the HEAD of an open Shard. :param position: The position in time to move to. :type position: :class:`~datetime.datetime` :returns: A list of the first records found after ``position``. May be empty. """ # 0) We have no way to associate the date with a position, # so we have to scan the shard from the beginning. self.jump_to(iterator_type="trim_horizon") position = int(position.timestamp()) while (not self.exhausted) and (self.empty_responses < CALLS_TO_REACH_HEAD): records = self.get_records() # We can skip the whole record set if the newest (last) record isn't new enough. if records and records[-1]["meta"]["created_at"].timestamp() >= position: # Looking for the first number *below* the position. for offset, record in enumerate(reversed(records)): if record["meta"]["created_at"].timestamp() < position: index = len(records) - offset return records[index:] return records # Either exhausted the Shard or caught up to HEAD. return []
java
private void adaptForInsert(final ITreeData paramNewNode, final boolean addAsFirstChild) throws TTException { assert paramNewNode != null; if (paramNewNode instanceof ITreeStructData) { final ITreeStructData strucNode = (ITreeStructData)paramNewNode; final ITreeStructData parent = (ITreeStructData)getPtx().getData(paramNewNode.getParentKey()); parent.incrementChildCount(); if (addAsFirstChild) { parent.setFirstChildKey(paramNewNode.getDataKey()); } getPtx().setData(parent); if (strucNode.hasRightSibling()) { final ITreeStructData rightSiblingNode = (ITreeStructData)getPtx().getData(strucNode.getRightSiblingKey()); rightSiblingNode.setLeftSiblingKey(paramNewNode.getDataKey()); getPtx().setData(rightSiblingNode); } if (strucNode.hasLeftSibling()) { final ITreeStructData leftSiblingNode = (ITreeStructData)getPtx().getData(strucNode.getLeftSiblingKey()); leftSiblingNode.setRightSiblingKey(paramNewNode.getDataKey()); getPtx().setData(leftSiblingNode); } } }
java
@BetaApi public final Operation patchNetwork( ProjectGlobalNetworkName network, Network networkResource, List<String> fieldMask) { PatchNetworkHttpRequest request = PatchNetworkHttpRequest.newBuilder() .setNetwork(network == null ? null : network.toString()) .setNetworkResource(networkResource) .addAllFieldMask(fieldMask) .build(); return patchNetwork(request); }
python
def draw_image(self, metric, limit=5): """Display a series of images at different time steps.""" rows = 1 cols = limit self.ax.axis("off") # Take the Axes gridspec and divide it into a grid gs = matplotlib.gridspec.GridSpecFromSubplotSpec( rows, cols, subplot_spec=self.gs) # Loop through images in last few steps for i, image in enumerate(metric.data[-cols:]): ax = self.figure.add_subplot(gs[0, i]) ax.axis('off') ax.set_title(metric.formatted_steps[-cols:][i]) ax.imshow(norm(image))
python
def get_distributed_session_creator(server): """ Args: server (tf.train.Server): Returns: tf.train.SessionCreator """ server_def = server.server_def is_chief = (server_def.job_name == 'worker') and (server_def.task_index == 0) init_op = tf.global_variables_initializer() local_init_op = tf.local_variables_initializer() ready_op = tf.report_uninitialized_variables() ready_for_local_init_op = tf.report_uninitialized_variables(tf.global_variables()) sm = tf.train.SessionManager( local_init_op=local_init_op, ready_op=ready_op, ready_for_local_init_op=ready_for_local_init_op, graph=tf.get_default_graph()) # to debug wrong variable collection # from pprint import pprint # print("GLOBAL:") # pprint([(k.name, k.device) for k in tf.global_variables()]) # print("LOCAL:") # pprint([(k.name, k.device) for k in tf.local_variables()]) class _Creator(tf.train.SessionCreator): def create_session(self): if is_chief: return sm.prepare_session(master=server.target, init_op=init_op) else: tf.logging.set_verbosity(tf.logging.INFO) # print message about uninitialized vars ret = sm.wait_for_session(master=server.target) tf.logging.set_verbosity(tf.logging.WARN) return ret return _Creator()
java
public static Method[] getAllMethods(Class<?> clz) { Set<Method> set = new HashSet<>(); List<Class<?>> classes = new ArrayList<>(); classes.add(clz); classes.addAll(Arrays.asList(getAllSuperClasses(clz))); classes.addAll(Arrays.asList(getAllInterfaces(clz))); for (Class<?> c : classes) { set.addAll(Arrays.asList(c.getDeclaredMethods())); } return set.toArray(new Method[set.size()]); }
java
boolean shouldExecuteOnProject() { File report = pathResolver.relativeFile(fileSystem.baseDir(), configuration.getItReportPath()); boolean foundReport = report.exists() && report.isFile(); boolean shouldExecute = configuration.shouldExecuteOnProject(foundReport); if (!foundReport && shouldExecute) { JaCoCoExtensions.logger().info(this.toString() + ": JaCoCo IT report not found."); } return shouldExecute; }
java
private static Class<?> getInvalidBusinessExtends(Class<?> wrapperInterface) { if ((EJBLocalObject.class).isAssignableFrom(wrapperInterface)) return EJBLocalObject.class; if ((EJBLocalHome.class).isAssignableFrom(wrapperInterface)) return EJBLocalHome.class; if ((EJBObject.class).isAssignableFrom(wrapperInterface)) return EJBObject.class; if ((EJBHome.class).isAssignableFrom(wrapperInterface)) return EJBHome.class; return null; }
python
def AddClientKeywords(self, client_id, keywords): """Associates the provided keywords with the client.""" if client_id not in self.metadatas: raise db.UnknownClientError(client_id) for kw in keywords: self.keywords.setdefault(kw, {}) self.keywords[kw][client_id] = rdfvalue.RDFDatetime.Now()
python
def add_children_gos(self, gos): """Return children of input gos plus input gos.""" lst = [] obo_dag = self.obo_dag get_children = lambda go_obj: list(go_obj.get_all_children()) + [go_obj.id] for go_id in gos: go_obj = obo_dag[go_id] lst.extend(get_children(go_obj)) return set(lst)
java
@Override public void stage3CreateKAMstore(final DBConnection db, String schemaName) throws CreateKAMFailure { if (db == null) { throw new InvalidArgument("db", db); } try { ksss.setupKAMStoreSchema(db, schemaName); } catch (IOException e) { throw new CreateKAMFailure(db, e.getMessage()); } catch (SQLException e) { throw new CreateKAMFailure(db, e.getMessage()); } }
java
public boolean isNamespacePresent(final String requestedNamespace) { checkArgument(requestedNamespace.length() > 0); checkArgument(!requestedNamespace.endsWith(DELIM)); final String probe = requestedNamespace + DELIM; return Iterables.any(params.keySet(), StringUtils.startsWith(probe)); }
python
def _add_sections(self): '''Add the found and required sections to the templ_dict.''' for section in self.template_sections: try: sec_start = self._get_section_start_index(section) except NonextantSectionException: if section in self.sections_not_required: continue raise sec_end = self._get_section_end_index(section, sec_start) section_value = self.template_str[sec_start+1:sec_end].strip() section, section_value = self._transform_key_value( section, section_value, self.section_map ) self.templ_dict['actions']['definition'][section] = section_value self.template_str = self.template_str[:sec_start+1] + \ self.template_str[sec_end:]
python
def getStates(self): ''' Calculates updated values of normalized market resources and permanent income level. Uses pLvlNow, aNrmNow, PermShkNow, TranShkNow. Parameters ---------- None Returns ------- None ''' pLvlPrev = self.pLvlNow aNrmPrev = self.aNrmNow # Calculate new states: normalized market resources and permanent income level self.pLvlNow = pLvlPrev*self.PermShkNow # Same as in IndShockConsType self.kNrmNow = aNrmPrev/self.PermShkNow self.yNrmNow = self.kNrmNow**self.CapShare*self.TranShkNow**(1.-self.CapShare) self.Rfree = 1. + self.CapShare*self.kNrmNow**(self.CapShare-1.)*self.TranShkNow**(1.-self.CapShare) - self.DeprFac self.wRte = (1.-self.CapShare)*self.kNrmNow**self.CapShare*self.TranShkNow**(-self.CapShare) self.mNrmNow = self.Rfree*self.kNrmNow + self.wRte*self.TranShkNow
python
def jsLocal(self, time_zone=''): ''' a method to report a javascript string from a labDT object :param time_zone: [optional] string with timezone to report in :return: string with date and time info ''' # validate inputs js_format = '%a %b %d %Y %H:%M:%S GMT%z (%Z)' title = 'Timezone input for labDT.jsLocal' get_tz = get_localzone() if time_zone: # if time_zone.lower() in ('utc', 'uct', 'universal', 'zulu'): # raise ValueError('time_zone cannot be UTC. %s requires a local timezone value. Try:\nfor tz in pytz.all_timezones:\n print tz' % title) try: get_tz = tz.gettz(time_zone) except: raise ValueError('\n%s is not a valid timezone format. Try:\nfor tz in pytz.all_timezones:\n print tz' % title) # construct javascript datetime from labDT dtLocal = self.astimezone(get_tz) return format(dtLocal, js_format)
java
public CompletableFuture<SubscriptionRuntimeInfo> getSubscriptionRuntimeInfoAsync(String topicPath, String subscriptionName) { EntityNameHelper.checkValidTopicName(topicPath); EntityNameHelper.checkValidSubscriptionName(subscriptionName); String path = EntityNameHelper.formatSubscriptionPath(topicPath, subscriptionName); CompletableFuture<String> contentFuture = getEntityAsync(path, null, true); CompletableFuture<SubscriptionRuntimeInfo> sdFuture = new CompletableFuture<>(); contentFuture.handleAsync((content, ex) -> { if (ex != null) { sdFuture.completeExceptionally(ex); } else { try { sdFuture.complete(SubscriptionRuntimeInfoSerializer.parseFromContent(topicPath, content)); } catch (MessagingEntityNotFoundException e) { sdFuture.completeExceptionally(e); } } return null; }, MessagingFactory.INTERNAL_THREAD_POOL); return sdFuture; }
python
def _create_board_image_cv(self, board=None): """Return a cv image of the board or empty board if not provided.""" board = board or base.Board() # empty board by default tile_h, tile_w = self._TILE_SHAPE[0:2] board_shape = tile_h * 8, tile_w * 8, 3 board_image = numpy.zeros(board_shape, dtype=numpy.uint8) # place each tile on the image for (row, col), tile in board.positions_with_tile(): tile_image = self._tile_images[tile._type] t, l = row * tile_h, col * tile_w b, r = t + tile_h, l + tile_w board_image[t:b, l:r] = tile_image return board_image
java
public OvhSubscription subscription_subscriptionType_GET(String subscriptionType) throws IOException { String qPath = "/me/subscription/{subscriptionType}"; StringBuilder sb = path(qPath, subscriptionType); String resp = exec(qPath, "GET", sb.toString(), null); return convertTo(resp, OvhSubscription.class); }
java
public Config setWanReplicationConfigs(Map<String, WanReplicationConfig> wanReplicationConfigs) { this.wanReplicationConfigs.clear(); this.wanReplicationConfigs.putAll(wanReplicationConfigs); for (final Entry<String, WanReplicationConfig> entry : this.wanReplicationConfigs.entrySet()) { entry.getValue().setName(entry.getKey()); } return this; }
python
def send(self, strict_validation=True): """Send the request and return the response or raise SearchAPIError. Calling this method blocks the program until the response is returned, if you want the request to be sent asynchronously please refer to the send_async method. The response is returned as a SearchAPIResponse object. `strict_vailidation` is a bool argument that's passed to the validate_query_params method. Raises ValueError (raised from validate_query_params), HttpError/URLError and SearchAPIError (when the response is returned but contains an error). Example: >>> from osrframework.thirdparties.pipl_com.lib.search import SearchAPIRequest, SearchAPIError >>> request = SearchAPIRequest('samplekey', email='[email protected]') >>> try: ... response = request.send() ... except SearchAPIError as e: ... print e.http_status_code, e """ self.validate_query_params(strict=strict_validation) query = { 'key': self.api_key or default_api_key, 'person': self.person.to_json(), 'query_params_mode': self.query_params_mode, 'exact_name': self.exact_name, 'prioritize_records_by': ','.join(self._prioritize_records_by), 'filter_records_by': self._filter_records_by, } request = urllib2.Request(url=SearchAPIRequest.BASE_URL, data=urllib.urlencode(query, True), headers=SearchAPIRequest.HEADERS) try: json_response = urllib2.urlopen(request).read() except urllib2.HTTPError as e: json_error = e.read() if not json_error: raise e try: raise SearchAPIError.from_json(json_error) except ValueError: raise e return SearchAPIResponse.from_json(json_response)
java
public void unregisterWorkflowDef(String name, Integer version) { Preconditions.checkArgument(StringUtils.isNotBlank(name), "Workflow name cannot be blank"); Preconditions.checkNotNull(version, "Version cannot be null"); delete("metadata/workflow/{name}/{version}", name, version); }
java
public static Speed of(final double speed, final Unit unit) { requireNonNull(unit); return new Speed(Unit.METERS_PER_SECOND.convert(speed, unit)); }
java
@Override public Pair<T, I> get(int index) { return indexToPair(matrix.get(index)); }
java
public void marshall(UpdatePartitionRequest updatePartitionRequest, ProtocolMarshaller protocolMarshaller) { if (updatePartitionRequest == null) { throw new SdkClientException("Invalid argument passed to marshall(...)"); } try { protocolMarshaller.marshall(updatePartitionRequest.getCatalogId(), CATALOGID_BINDING); protocolMarshaller.marshall(updatePartitionRequest.getDatabaseName(), DATABASENAME_BINDING); protocolMarshaller.marshall(updatePartitionRequest.getTableName(), TABLENAME_BINDING); protocolMarshaller.marshall(updatePartitionRequest.getPartitionValueList(), PARTITIONVALUELIST_BINDING); protocolMarshaller.marshall(updatePartitionRequest.getPartitionInput(), PARTITIONINPUT_BINDING); } catch (Exception e) { throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e); } }
java
public void fatalError(SAXParseException e) throws SAXException { if(null != m_errorHandler) { try { m_errorHandler.fatalError(e); } catch(SAXParseException se) { // ignore } // clearCoRoutine(e); } // This is not great, but we really would rather have the error // handler be the error listener if it is a error handler. Coroutine's fatalError // can't really be configured, so I think this is the best thing right now // for error reporting. Possibly another JAXP 1.1 hole. -sb javax.xml.transform.ErrorListener errorListener = m_transformer.getErrorListener(); if(errorListener instanceof ErrorHandler) { ((ErrorHandler)errorListener).fatalError(e); if(null != m_errorHandler) m_errorHandler.fatalError(e); // may not be called. } else { try { errorListener.fatalError(new javax.xml.transform.TransformerException(e)); if(null != m_errorHandler) m_errorHandler.fatalError(e); // may not be called. } catch(javax.xml.transform.TransformerException te) { throw e; } } }
java
public static <T> T implement(Class<T> type, Object object) { if (type.isInstance(object)) return type.cast(object); return type.cast(Proxy.newProxyInstance(type.getClassLoader(), new Class[]{type}, new DuckType(object))); }
python
def move_in(self, session, space, offset, length, width, extended=False): """Moves a block of data to local memory from the specified address space and offset. Corresponds to viMoveIn* functions of the VISA library. :param session: Unique logical identifier to a session. :param space: Specifies the address space. (Constants.*SPACE*) :param offset: Offset (in bytes) of the address or register from which to read. :param length: Number of elements to transfer, where the data width of the elements to transfer is identical to the source data width. :param width: Number of bits to read per element. :param extended: Use 64 bits offset independent of the platform. :return: Data read from the bus, return value of the library call. :rtype: list, :class:`pyvisa.constants.StatusCode` """ if width == 8: return self.move_in_8(session, space, offset, length, extended) elif width == 16: return self.move_in_16(session, space, offset, length, extended) elif width == 32: return self.move_in_32(session, space, offset, length, extended) elif width == 64: return self.move_in_64(session, space, offset, length, extended) raise ValueError('%s is not a valid size. Valid values are 8, 16, 32 or 64' % width)
java
public Object getValue(InternalWorkingMemory workingMemory, Object object) { return MVELSafeHelper.getEvaluator().executeExpression( mvelExpression, object ); }
java
public static SingleInputSemanticProperties readSingleConstantAnnotations(UserCodeWrapper<?> udf) { // get constantSet annotation from stub AllFieldsConstants allConstants = udf.getUserCodeAnnotation(AllFieldsConstants.class); ConstantFields constantSet = udf.getUserCodeAnnotation(ConstantFields.class); ConstantFieldsExcept notConstantSet = udf.getUserCodeAnnotation(ConstantFieldsExcept.class); if (notConstantSet != null && (constantSet != null || allConstants != null)) { throw new RuntimeException("Either ConstantFields or ConstantFieldsExcept can be specified, not both."); } // extract notConstantSet from annotation if (notConstantSet != null) { FieldSet nonConstant = new FieldSet(notConstantSet.value()); return new ImplicitlyForwardingSingleInputSemanticProperties(nonConstant); } // extract notConstantSet from annotation if (allConstants != null) { FieldSet nonConstant = new FieldSet(); return new ImplicitlyForwardingSingleInputSemanticProperties(nonConstant); } SingleInputSemanticProperties semanticProperties = new SingleInputSemanticProperties(); // extract constantSet from annotation if (constantSet != null) { for (int value: constantSet.value()) { semanticProperties.addForwardedField(value,value); } } return semanticProperties; }
java
private static Element execute(SourceRange sourceRange, Property a, Property b) { assert (a != null); assert (b != null); Element result = null; if ((a instanceof LongProperty) && (b instanceof LongProperty)) { long l1 = ((Long) a.getValue()).longValue(); long l2 = ((Long) b.getValue()).longValue(); result = LongProperty.getInstance(l1 + l2); } else if ((a instanceof NumberProperty) && (b instanceof NumberProperty)) { double d1 = ((NumberProperty) a).doubleValue(); double d2 = ((NumberProperty) b).doubleValue(); result = DoubleProperty.getInstance(d1 + d2); } else if ((a instanceof StringProperty) && (b instanceof StringProperty)) { String s1 = (String) a.getValue(); String s2 = (String) b.getValue(); result = StringProperty.getInstance(s1 + s2); } else { throw new EvaluationException(MessageUtils .format(MSG_MISMATCHED_ARGS_ADD), sourceRange); } return result; }
python
def get_distribution_dir(catalog_id, dataset_id, distribution_id, catalogs_dir=CATALOGS_DIR, use_short_path=False): """Genera el path estรกndar de un catรกlogo en un filesystem.""" if use_short_path: catalog_path = os.path.join(catalogs_dir, "catalog", catalog_id) distribution_dir = os.path.join(catalog_path, dataset_id) else: catalog_path = os.path.join(catalogs_dir, "catalog", catalog_id) dataset_path = os.path.join(catalog_path, "dataset", dataset_id) distribution_dir = os.path.join( dataset_path, "distribution", distribution_id) return os.path.abspath(distribution_dir)
python
def handle(self, *args, **options): """ By default, we're going to do this in chunks. This way, if there ends up being an error, we can check log messages and continue from that point after fixing the issue. """ # Note that by taking last_id here, we're going to miss any submissions created *during* the command execution # But that's okay! All new entries have already been created using the new style, no acion needed there last_id = Submission._objects.all().aggregate(Max('id'))['id__max'] log.info("Beginning uuid update") current = options['start'] while current < last_id: end_chunk = current + options['chunk'] if last_id - options['chunk'] >= current else last_id log.info("Updating entries in range [{}, {}]".format(current, end_chunk)) with transaction.atomic(): for submission in Submission._objects.filter(id__gte=current, id__lte=end_chunk).iterator(): submission.save(update_fields=['uuid']) time.sleep(options['wait']) current = end_chunk + 1
python
def get_field(brain_or_object, name, default=None): """Return the named field """ fields = get_fields(brain_or_object) return fields.get(name, default)
python
def decode(cls, line): """Remove backslash escaping from line.value.""" if line.encoded: encoding = getattr(line, 'encoding_param', None) if encoding and encoding.upper() == cls.base64string: line.value = b64decode(line.value) else: line.value = stringToTextValues(line.value)[0] line.encoded = False
java
public static String getShortName(final ZoneId self, Locale locale) { return self.getDisplayName(TextStyle.SHORT, locale); }
java
public void setReplyToAddresses(java.util.Collection<String> replyToAddresses) { if (replyToAddresses == null) { this.replyToAddresses = null; return; } this.replyToAddresses = new com.amazonaws.internal.SdkInternalList<String>(replyToAddresses); }
java
public static <T> T last(Iterable<T> iterable) { if (iterable instanceof List<?>) { List<T> list = (List<T>) iterable; if (list.isEmpty()) return null; return list.get(list.size() - 1); } else if (iterable instanceof SortedSet) { SortedSet<T> sortedSet = (SortedSet<T>) iterable; if (sortedSet.isEmpty()) return null; return sortedSet.last(); } else { return IteratorExtensions.last(iterable.iterator()); } }
python
def get_injectable_func_source_data(name): """ Return data about an injectable function's source, including file name, line number, and source code. Parameters ---------- name : str Returns ------- filename : str lineno : int The line number on which the function starts. source : str """ if injectable_type(name) != 'function': raise ValueError('injectable {!r} is not a function'.format(name)) inj = get_raw_injectable(name) if isinstance(inj, _InjectableFuncWrapper): return utils.func_source_data(inj._func) elif hasattr(inj, '__wrapped__'): return utils.func_source_data(inj.__wrapped__) else: return utils.func_source_data(inj)
python
def render_GET(self, request): """Renders a GET request, by showing this nodes stats and children.""" fullPath = request.path.split('/') if not fullPath[-1]: fullPath = fullPath[:-1] parts = fullPath[2:] statDict = util.lookup(scales.getStats(), parts) if statDict is None: request.setResponseCode(404) return "Path not found." if 'query' in request.args: query = request.args['query'][0] else: query = None if 'format' in request.args and request.args['format'][0] == 'json': request.headers['content-type'] = 'text/javascript; charset=UTF-8' formats.jsonFormat(request, statDict, query) elif 'format' in request.args and request.args['format'][0] == 'prettyjson': request.headers['content-type'] = 'text/javascript; charset=UTF-8' formats.jsonFormat(request, statDict, query, pretty=True) else: formats.htmlHeader(request, '/' + '/'.join(parts), self.serverName, query) formats.htmlFormat(request, tuple(parts), statDict, query) return ''
python
def drop_duplicates(self, subset=None, keep='first', inplace=False): """ Return DataFrame with duplicate rows removed, optionally only considering certain columns. Indexes, including time indexes are ignored. Parameters ---------- subset : column label or sequence of labels, optional Only consider certain columns for identifying duplicates, by default use all of the columns keep : {'first', 'last', False}, default 'first' - ``first`` : Drop duplicates except for the first occurrence. - ``last`` : Drop duplicates except for the last occurrence. - False : Drop all duplicates. inplace : boolean, default False Whether to drop duplicates in place or to return a copy Returns ------- DataFrame """ if self.empty: return self.copy() inplace = validate_bool_kwarg(inplace, 'inplace') duplicated = self.duplicated(subset, keep=keep) if inplace: inds, = (-duplicated)._ndarray_values.nonzero() new_data = self._data.take(inds) self._update_inplace(new_data) else: return self[-duplicated]
java
public boolean remove(Object key, Object value) { Set<V> values = map.get(key); // If the key was not mapped to any values if (values == null) return false; boolean removed = values.remove(value); if (removed) range--; // if this was the last value mapping for this key, remove the // key altogether if (values.size() == 0) map.remove(key); return removed; }
python
def log_entry_generator(log_instance): """ :yield: The next LogEntry from the REST API :raise: StopIteration when there are no more log entries to show, please note that if you call this again at a later time the REST API could have different results and more data could be returned """ current_page_num = 0 while True: has_results = False for log_entry in log_instance.get_page(current_page_num): has_results = True yield log_entry if not has_results: break current_page_num += 1
python
def getPopUpURL(self, CorpNum, NTSConfirmNum, UserID=None): """ ํ™ˆํƒ์Šค ์ „์ž์„ธ๊ธˆ๊ณ„์‚ฐ์„œ ๋ณด๊ธฐ ํŒ์—… URL args CorpNum : ํŒ๋นŒํšŒ์› ์‚ฌ์—…์ž๋ฒˆํ˜ธ NTSConfirmNum : ๊ตญ์„ธ์ฒญ ์Šน์ธ ๋ฒˆํ˜ธ UserID : ํŒ๋นŒํšŒ์› ์•„์ด๋”” return ์ „์ž์„ธ๊ธˆ๊ณ„์‚ฐ์„œ ๋ณด๊ธฐ ํŒ์—… URL ๋ฐ˜ํ™˜ raise PopbillException """ if NTSConfirmNum == None or len(NTSConfirmNum) != 24: raise PopbillException(-99999999, "๊ตญ์„ธ์ฒญ์Šน์ธ๋ฒˆํ˜ธ(NTSConfirmNum)๊ฐ€ ์˜ฌ๋ฐ”๋ฅด์ง€ ์•Š์Šต๋‹ˆ๋‹ค.") return self._httpget('/HomeTax/Taxinvoice/' + NTSConfirmNum + '/PopUp', CorpNum, UserID).url
python
def set_orthogonal_selection(self, selection, value, fields=None): """Modify data via a selection for each dimension of the array. Parameters ---------- selection : tuple A selection for each dimension of the array. May be any combination of int, slice, integer array or Boolean array. value : scalar or array-like Value to be stored into the array. fields : str or sequence of str, optional For arrays with a structured dtype, one or more fields can be specified to set data for. Examples -------- Setup a 2-dimensional array:: >>> import zarr >>> import numpy as np >>> z = zarr.zeros((5, 5), dtype=int) Set data for a selection of rows:: >>> z.set_orthogonal_selection(([1, 4], slice(None)), 1) >>> z[...] array([[0, 0, 0, 0, 0], [1, 1, 1, 1, 1], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [1, 1, 1, 1, 1]]) Set data for a selection of columns:: >>> z.set_orthogonal_selection((slice(None), [1, 4]), 2) >>> z[...] array([[0, 2, 0, 0, 2], [1, 2, 1, 1, 2], [0, 2, 0, 0, 2], [0, 2, 0, 0, 2], [1, 2, 1, 1, 2]]) Set data for a selection of rows and columns:: >>> z.set_orthogonal_selection(([1, 4], [1, 4]), 3) >>> z[...] array([[0, 2, 0, 0, 2], [1, 3, 1, 1, 3], [0, 2, 0, 0, 2], [0, 2, 0, 0, 2], [1, 3, 1, 1, 3]]) For convenience, this functionality is also available via the `oindex` property. E.g.:: >>> z.oindex[[1, 4], [1, 4]] = 4 >>> z[...] array([[0, 2, 0, 0, 2], [1, 4, 1, 1, 4], [0, 2, 0, 0, 2], [0, 2, 0, 0, 2], [1, 4, 1, 1, 4]]) Notes ----- Orthogonal indexing is also known as outer indexing. Slices with step > 1 are supported, but slices with negative step are not. See Also -------- get_basic_selection, set_basic_selection, get_mask_selection, set_mask_selection, get_coordinate_selection, set_coordinate_selection, get_orthogonal_selection, vindex, oindex, __getitem__, __setitem__ """ # guard conditions if self._read_only: err_read_only() # refresh metadata if not self._cache_metadata: self._load_metadata_nosync() # setup indexer indexer = OrthogonalIndexer(selection, self) self._set_selection(indexer, value, fields=fields)