language
stringclasses
2 values
func_code_string
stringlengths
63
466k
python
def pop(self) -> Square: """ Removes a square from the set and returns it. :raises: :exc:`KeyError` on an empty set. """ if not self.mask: raise KeyError("pop from empty SquareSet") square = lsb(self.mask) self.mask &= (self.mask - 1) return square
java
protected void setObjects(PreparedStatement ps, Object... objects) throws SQLException { int index = 1; for (Object obj : objects) { ps.setObject(index, obj); index++; } }
java
@Nonnull public final <B> ImmutableList<B> bind(@Nonnull F<A, ImmutableList<B>> f) { return this.flatMap(f); }
java
public void setAddresses(java.util.Collection<Address> addresses) { if (addresses == null) { this.addresses = null; return; } this.addresses = new com.amazonaws.internal.SdkInternalList<Address>(addresses); }
java
public static void copyFile(File fromFile, File toFile) throws IOException { copyFile(new FileInputStream(fromFile), new FileOutputStream(toFile)); }
java
@Override public void close() throws TTIOException { try { mService.shutdown(); mService.awaitTermination(10, TimeUnit.SECONDS); } catch (InterruptedException exc) { throw new TTIOException(exc); } mFirstWriter.close(); mSecondWriter.close(); }
python
def parse_bed(bed_file): """Import a BED file (where the data entries are analogous to what may be expected in an info_frags.txt file) and return a scaffold dictionary, similarly to parse_info_frags. """ new_scaffolds = {} with open(bed_file) as bed_handle: for line in bed_handle: chrom, start, end, query, qual, strand = line.split()[:7] if strand == "+": ori = 1 elif strand == "-": ori = -1 else: raise ValueError( "Error when parsing strand " "orientation: {}".format(strand) ) if int(qual) > 0: bed_bin = [query, -2, int(start), int(end), ori] try: new_scaffolds[chrom].append(bed_bin) except KeyError: new_scaffolds[chrom] = [bed_bin] return new_scaffolds
java
public void marshall(UpdateTrailRequest updateTrailRequest, ProtocolMarshaller protocolMarshaller) { if (updateTrailRequest == null) { throw new SdkClientException("Invalid argument passed to marshall(...)"); } try { protocolMarshaller.marshall(updateTrailRequest.getName(), NAME_BINDING); protocolMarshaller.marshall(updateTrailRequest.getS3BucketName(), S3BUCKETNAME_BINDING); protocolMarshaller.marshall(updateTrailRequest.getS3KeyPrefix(), S3KEYPREFIX_BINDING); protocolMarshaller.marshall(updateTrailRequest.getSnsTopicName(), SNSTOPICNAME_BINDING); protocolMarshaller.marshall(updateTrailRequest.getIncludeGlobalServiceEvents(), INCLUDEGLOBALSERVICEEVENTS_BINDING); protocolMarshaller.marshall(updateTrailRequest.getIsMultiRegionTrail(), ISMULTIREGIONTRAIL_BINDING); protocolMarshaller.marshall(updateTrailRequest.getEnableLogFileValidation(), ENABLELOGFILEVALIDATION_BINDING); protocolMarshaller.marshall(updateTrailRequest.getCloudWatchLogsLogGroupArn(), CLOUDWATCHLOGSLOGGROUPARN_BINDING); protocolMarshaller.marshall(updateTrailRequest.getCloudWatchLogsRoleArn(), CLOUDWATCHLOGSROLEARN_BINDING); protocolMarshaller.marshall(updateTrailRequest.getKmsKeyId(), KMSKEYID_BINDING); protocolMarshaller.marshall(updateTrailRequest.getIsOrganizationTrail(), ISORGANIZATIONTRAIL_BINDING); } catch (Exception e) { throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e); } }
java
@SneakyThrows public static Resource prepareClasspathResourceIfNeeded(final Resource resource, final boolean isDirectory, final String containsName) { LOGGER.trace("Preparing possible classpath resource [{}]", resource); if (resource == null) { LOGGER.debug("No resource defined to prepare. Returning null"); return null; } if (org.springframework.util.ResourceUtils.isFileURL(resource.getURL())) { return resource; } val url = org.springframework.util.ResourceUtils.extractArchiveURL(resource.getURL()); val file = org.springframework.util.ResourceUtils.getFile(url); val casDirectory = new File(FileUtils.getTempDirectory(), "cas"); val destination = new File(casDirectory, resource.getFilename()); if (isDirectory) { LOGGER.trace("Creating resource directory [{}]", destination); FileUtils.forceMkdir(destination); FileUtils.cleanDirectory(destination); } else if (destination.exists()) { LOGGER.trace("Deleting resource directory [{}]", destination); FileUtils.forceDelete(destination); } LOGGER.trace("Processing file [{}]", file); try (val jFile = new JarFile(file)) { val e = jFile.entries(); while (e.hasMoreElements()) { val entry = e.nextElement(); val name = entry.getName(); LOGGER.trace("Comparing [{}] against [{}] and pattern [{}]", name, resource.getFilename(), containsName); if (name.contains(resource.getFilename()) && RegexUtils.find(containsName, name)) { try (val stream = jFile.getInputStream(entry)) { var copyDestination = destination; if (isDirectory) { val entryFileName = new File(name); copyDestination = new File(destination, entryFileName.getName()); } LOGGER.trace("Copying resource entry [{}] to [{}]", name, copyDestination); try (val writer = Files.newBufferedWriter(copyDestination.toPath(), StandardCharsets.UTF_8)) { IOUtils.copy(stream, writer, StandardCharsets.UTF_8); } } } } } return new FileSystemResource(destination); }
java
public SpatialReferenceSystem queryForOrganizationCoordsysId( String organization, long organizationCoordsysId) throws SQLException { SpatialReferenceSystem srs = null; QueryBuilder<SpatialReferenceSystem, Long> qb = queryBuilder(); qb.where().like(SpatialReferenceSystem.COLUMN_ORGANIZATION, organization); qb.where().eq(SpatialReferenceSystem.COLUMN_ORGANIZATION_COORDSYS_ID, organizationCoordsysId); PreparedQuery<SpatialReferenceSystem> preparedQuery = qb.prepare(); List<SpatialReferenceSystem> results = query(preparedQuery); if (!results.isEmpty()) { if (results.size() > 1) { throw new SQLException("More than one " + SpatialReferenceSystem.class.getSimpleName() + " returned for Organization: " + organization + ", Organization Coordsys Id: " + organizationCoordsysId); } srs = results.get(0); } return srs; }
python
def _query_filter(search, urlkwargs, definitions): """Ingest query filter in query.""" filters, urlkwargs = _create_filter_dsl(urlkwargs, definitions) for filter_ in filters: search = search.filter(filter_) return (search, urlkwargs)
java
public <T0, T1> DataSource<Tuple2<T0, T1>> types(Class<T0> type0, Class<T1> type1) { TupleTypeInfo<Tuple2<T0, T1>> types = TupleTypeInfo.getBasicTupleTypeInfo(type0, type1); CsvInputFormat<Tuple2<T0, T1>> inputFormat = new CsvInputFormat<Tuple2<T0, T1>>(path); configureInputFormat(inputFormat, type0, type1); return new DataSource<Tuple2<T0, T1>>(executionContext, inputFormat, types); }
java
public static int unsignedUnion2by2( final short[] set1, final int offset1, final int length1, final short[] set2, final int offset2, final int length2, final short[] buffer) { if (0 == length2) { System.arraycopy(set1, offset1, buffer, 0, length1); return length1; } if (0 == length1) { System.arraycopy(set2, offset2, buffer, 0, length2); return length2; } int pos = 0; int k1 = offset1, k2 = offset2; short s1 = set1[k1]; short s2 = set2[k2]; while (true) { int v1 = toIntUnsigned(s1); int v2 = toIntUnsigned(s2); if (v1 < v2) { buffer[pos++] = s1; ++k1; if (k1 >= length1 + offset1) { System.arraycopy(set2, k2, buffer, pos, length2 - k2 + offset2); return pos + length2 - k2 + offset2; } s1 = set1[k1]; } else if (v1 == v2) { buffer[pos++] = s1; ++k1; ++k2; if (k1 >= length1 + offset1) { System.arraycopy(set2, k2, buffer, pos, length2 - k2 + offset2); return pos + length2 - k2 + offset2; } if (k2 >= length2 + offset2) { System.arraycopy(set1, k1, buffer, pos, length1 - k1 + offset1); return pos + length1 - k1 + offset1; } s1 = set1[k1]; s2 = set2[k2]; } else {// if (set1[k1]>set2[k2]) buffer[pos++] = s2; ++k2; if (k2 >= length2 + offset2) { System.arraycopy(set1, k1, buffer, pos, length1 - k1 + offset1); return pos + length1 - k1 + offset1; } s2 = set2[k2]; } } // return pos; }
java
public void marshall(UnsubscribeFromDatasetRequest unsubscribeFromDatasetRequest, ProtocolMarshaller protocolMarshaller) { if (unsubscribeFromDatasetRequest == null) { throw new SdkClientException("Invalid argument passed to marshall(...)"); } try { protocolMarshaller.marshall(unsubscribeFromDatasetRequest.getIdentityPoolId(), IDENTITYPOOLID_BINDING); protocolMarshaller.marshall(unsubscribeFromDatasetRequest.getIdentityId(), IDENTITYID_BINDING); protocolMarshaller.marshall(unsubscribeFromDatasetRequest.getDatasetName(), DATASETNAME_BINDING); protocolMarshaller.marshall(unsubscribeFromDatasetRequest.getDeviceId(), DEVICEID_BINDING); } catch (Exception e) { throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e); } }
java
public static String getValueFromStaticMapping(String mapping, String key) { Map<String, String> m = Splitter.on(";") .omitEmptyStrings() .trimResults() .withKeyValueSeparator("=") .split(mapping); return m.get(key); }
java
public static void main(final String[] args) { for (int i = 0; i < 10; i++) { perfTestEncode(i); perfTestDecode(i); } }
java
public static <K,V> Level0MapOperator<Map<K,V>,K,V> on(final Map<K,V> target) { return onMap(target); }
python
def iter_multichunks(iterable, chunksizes, bordermode=None): """ CommandLine: python -m utool.util_iter --test-iter_multichunks Example: >>> # ENABLE_DOCTEST >>> from utool.util_iter import * # NOQA >>> import utool as ut >>> iterable = list(range(20)) >>> chunksizes = (3, 2, 3) >>> bordermode = 'cycle' >>> genresult = iter_multichunks(iterable, chunksizes, bordermode) >>> multichunks = list(genresult) >>> depthprofile = ut.depth_profile(multichunks) >>> assert depthprofile[1:] == chunksizes, 'did not generate chunks correctly' >>> result = ut.repr4(list(map(str, multichunks)), nobr=True) >>> print(result) '[[[0, 1, 2], [3, 4, 5]], [[6, 7, 8], [9, 10, 11]], [[12, 13, 14], [15, 16, 17]]]', '[[[18, 19, 0], [1, 2, 3]], [[4, 5, 6], [7, 8, 9]], [[10, 11, 12], [13, 14, 15]]]', Example1: >>> # ENABLE_DOCTEST >>> from utool.util_iter import * # NOQA >>> import utool as ut >>> iterable = list(range(7)) >>> # when chunksizes is len == 1, then equlivalent to ichunks >>> chunksizes = (3,) >>> bordermode = 'cycle' >>> genresult = iter_multichunks(iterable, chunksizes, bordermode) >>> multichunks = list(genresult) >>> depthprofile = ut.depth_profile(multichunks) >>> assert depthprofile[1:] == chunksizes, 'did not generate chunks correctly' >>> result = str(multichunks) >>> print(result) [[0, 1, 2], [3, 4, 5], [6, 0, 1]] """ chunksize = reduce(operator.mul, chunksizes) for chunk in ichunks(iterable, chunksize, bordermode=bordermode): reshaped_chunk = chunk for d in chunksizes[1:][::-1]: reshaped_chunk = list(ichunks(reshaped_chunk, d)) yield reshaped_chunk
python
def build(self): """ Build the simulation components from the model. @return: A runnable simulation object @rtype: lems.sim.sim.Simulation """ self.sim = Simulation() for component_id in self.model.targets: if component_id not in self.model.components: raise SimBuildError("Unable to find target component '{0}'", component_id) component = self.model.fat_components[component_id] runnable = self.build_runnable(component) self.sim.add_runnable(runnable) return self.sim
java
public static String serializeKeys(Set<String> foreignKeys) { if (null == foreignKeys || foreignKeys.isEmpty()) { return null; } StringBuilder sb = new StringBuilder(); for (String key : foreignKeys) { if (sb.length() > 0) { sb.append(Constants.FOREIGN_KEY_SEPARATOR); } sb.append(key); } return sb.toString(); }
python
def remove_legend(self): """ Removes legend actor """ if hasattr(self, 'legend'): self.remove_actor(self.legend, reset_camera=False) self._render()
python
def cluster(self, dist_type='cosine', run_clustering=True, dendro=True, views=['N_row_sum', 'N_row_var'], linkage_type='average', sim_mat=False, filter_sim=0.1, calc_cat_pval=False, run_enrichr=None, enrichrgram=None): ''' The main function performs hierarchical clustering, optionally generates filtered views (e.g. row-filtered views), and generates the :``visualization_json``. ''' initialize_net.viz(self) make_clust_fun.make_clust(self, dist_type=dist_type, run_clustering=run_clustering, dendro=dendro, requested_views=views, linkage_type=linkage_type, sim_mat=sim_mat, filter_sim=filter_sim, calc_cat_pval=calc_cat_pval, run_enrichr=run_enrichr, enrichrgram=enrichrgram)
python
def save_as_pdf_pages(plots, filename=None, path=None, verbose=True, **kwargs): """ Save multiple :class:`ggplot` objects to a PDF file, one per page. Parameters ---------- plots : collection or generator of :class:`ggplot` Plot objects to write to file. `plots` may be either a collection such as a :py:class:`list` or :py:class:`set`: >>> base_plot = ggplot(…) >>> plots = [base_plot + ggtitle('%d of 3' % i) for i in range(1, 3)] >>> save_as_pdf_pages(plots) or, a generator that yields :class:`ggplot` objects: >>> def myplots(): >>> for i in range(1, 3): >>> yield ggplot(…) + ggtitle('%d of 3' % i) >>> save_as_pdf_pages(myplots()) filename : :py:class:`str`, optional File name to write the plot to. If not specified, a name like “plotnine-save-<hash>.pdf” is used. path : :py:class:`str`, optional Path to save plot to (if you just want to set path and not filename). verbose : :py:class:`bool` If ``True``, print the saving information. kwargs : :py:class:`dict` Additional arguments to pass to :py:meth:`matplotlib.figure.Figure.savefig`. Notes ----- Using pandas' :meth:`~pandas.DataFrame.groupby` methods, tidy data can be “faceted” across pages: >>> from plotnine.data import mtcars >>> def facet_pages(column) >>> base_plot = [ >>> aes(x='wt', y='mpg', label='name'), >>> geom_text(), >>> ] >>> for label, group_data in mtcars.groupby(column): >>> yield ggplot(group_data) + base_plot + ggtitle(label) >>> save_as_pdf_pages(facet_pages('cyl')) Unlike :meth:`ggplot.save`, :meth:`save_as_pdf_pages` does not process arguments for `height` or `width`. To set the figure size, add :class:`~plotnine.themes.themeable.figure_size` to the theme for some or all of the objects in `plots`: >>> plot = ggplot(…) >>> # The following are equivalent >>> plot.save('filename.pdf', height=6, width=8) >>> save_as_pdf_pages([plot + theme(figure_size=(8, 6))]) """ from itertools import chain from matplotlib.backends.backend_pdf import PdfPages # as in ggplot.save() fig_kwargs = {'bbox_inches': 'tight'} fig_kwargs.update(kwargs) figure = [None] # If plots is already an iterator, this is a no-op; otherwise convert a # list, etc. to an iterator plots = iter(plots) peek = [] # filename, depends on the object if filename is None: # Take the first element from the iterator, store it, and use it to # generate a file name peek = [next(plots)] filename = peek[0]._save_filename('pdf') if path: filename = os.path.join(path, filename) if verbose: warn('Filename: {}'.format(filename), PlotnineWarning) with PdfPages(filename) as pdf: # Re-add the first element to the iterator, if it was removed for plot in chain(peek, plots): try: fig = figure[0] = plot.draw() # as in ggplot.save() facecolor = fig.get_facecolor() edgecolor = fig.get_edgecolor() if edgecolor: fig_kwargs['facecolor'] = facecolor if edgecolor: fig_kwargs['edgecolor'] = edgecolor fig_kwargs['frameon'] = True # Save as a page in the PDF file pdf.savefig(figure[0], **fig_kwargs) except AttributeError as err: msg = 'non-ggplot object of %s: %s' % (type(plot), plot) raise TypeError(msg) from err except Exception: raise finally: # Close the figure whether or not there was an exception, to # conserve memory when plotting a large number of pages figure[0] and plt.close(figure[0])
java
public ServiceFuture<List<EventTypeInner>> listEventTypesAsync(String topicTypeName, final ServiceCallback<List<EventTypeInner>> serviceCallback) { return ServiceFuture.fromResponse(listEventTypesWithServiceResponseAsync(topicTypeName), serviceCallback); }
python
def _read_openephys(openephys_file): """Read the channel labels and their respective files from the 'Continuous_Data.openephys' file Parameters ---------- openephys_file : Path path to Continuous_Data.openephys inside the open-ephys folder Returns ------- int sampling frequency list of dict list of channels containing the label, the filename and the gain """ root = ElementTree.parse(openephys_file).getroot() channels = [] for recording in root: s_freq = float(recording.attrib['samplerate']) for processor in recording: for channel in processor: channels.append(channel.attrib) return s_freq, channels
python
def _parse_extra_args(self, api, extra_args_raw): """ Parses extra arguments into a map keyed on particular data types. """ extra_args = {} def invalid(msg, extra_arg_raw): print('Invalid --extra-arg:%s: %s' % (msg, extra_arg_raw), file=sys.stderr) sys.exit(1) for extra_arg_raw in extra_args_raw: try: extra_arg = json.loads(extra_arg_raw) except ValueError as e: invalid(str(e), extra_arg_raw) # Validate extra_arg JSON blob if 'match' not in extra_arg: invalid('No match key', extra_arg_raw) elif (not isinstance(extra_arg['match'], list) or len(extra_arg['match']) != 2): invalid('match key is not a list of two strings', extra_arg_raw) elif (not isinstance(extra_arg['match'][0], six.text_type) or not isinstance(extra_arg['match'][1], six.text_type)): print(type(extra_arg['match'][0])) invalid('match values are not strings', extra_arg_raw) elif 'arg_name' not in extra_arg: invalid('No arg_name key', extra_arg_raw) elif not isinstance(extra_arg['arg_name'], six.text_type): invalid('arg_name is not a string', extra_arg_raw) elif 'arg_type' not in extra_arg: invalid('No arg_type key', extra_arg_raw) elif not isinstance(extra_arg['arg_type'], six.text_type): invalid('arg_type is not a string', extra_arg_raw) elif ('arg_docstring' in extra_arg and not isinstance(extra_arg['arg_docstring'], six.text_type)): invalid('arg_docstring is not a string', extra_arg_raw) attr_key, attr_val = extra_arg['match'][0], extra_arg['match'][1] extra_args.setdefault(attr_key, {})[attr_val] = \ (extra_arg['arg_name'], extra_arg['arg_type'], extra_arg.get('arg_docstring')) # Extra arguments, keyed on data type objects. extra_args_for_types = {} # Locate data types that contain extra arguments for namespace in api.namespaces.values(): for route in namespace.routes: extra_parameters = [] if is_user_defined_type(route.arg_data_type): for attr_key in route.attrs: if attr_key not in extra_args: continue attr_val = route.attrs[attr_key] if attr_val in extra_args[attr_key]: extra_parameters.append(extra_args[attr_key][attr_val]) if len(extra_parameters) > 0: extra_args_for_types[route.arg_data_type] = extra_parameters return extra_args_for_types
python
def throttle(self): """Uses time.monotonic() (or time.sleep() if not available) to limit to the desired rate. Should be called once per iteration of action which is to be throttled. Returns None unless a custom wait_cmd was specified in the constructor in which case its return value is used if a wait was required. """ iterations = self.__iterations timestamp = monotonic() outdated_threshold = timestamp - self.__interval with self.__lock: # remove any iterations older than interval try: while iterations[0] < outdated_threshold: iterations.popleft() except IndexError: pass # apply throttling if rate would be exceeded if len(iterations) <= self.__max_iterations: iterations.append(timestamp) retval = None else: # wait until oldest sample is too old delay = max(0, iterations[0] + self.__interval - timestamp) # only notify user about longer delays if delay > 1: logger.warning('Send throttling delay (interval=%d, max_iterations=%d): %.2fs', self.__interval, self.__max_iterations, delay) retval = self.__wait_cmd(delay) # log actual addition time iterations.append(monotonic()) return retval
python
def _remaining_points(hands): ''' :param list hands: hands for which to compute the remaining points :return: a list indicating the amount of points remaining in each of the input hands ''' points = [] for hand in hands: points.append(sum(d.first + d.second for d in hand)) return points
python
def expand_config(dct, separator='.', skip_to=0, key_func=lambda key: key.lower(), key_parts_filter=lambda key_parts: True, value_func=lambda value: value): """ Expand a dictionary recursively by splitting keys along the separator. :param dct: a non-recursive dictionary :param separator: a separator charactor for splitting dictionary keys :param skip_to: index to start splitting keys on; can be used to skip over a key prefix :param key_func: a key mapping function :param key_parts_filter: a filter function for excluding keys :param value_func: a value mapping func """ config = {} for key, value in dct.items(): key_separator = separator(key) if callable(separator) else separator key_parts = key.split(key_separator) if not key_parts_filter(key_parts): continue key_config = config # skip prefix for key_part in key_parts[skip_to:-1]: key_config = key_config.setdefault(key_func(key_part), dict()) key_config[key_func(key_parts[-1])] = value_func(value) return config
python
def dafgh(): """ Return (get) the handle of the DAF currently being searched. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/dafgh_c.html :return: Handle for current DAF. :rtype: int """ outvalue = ctypes.c_int() libspice.dafgh_c(ctypes.byref(outvalue)) return outvalue.value
java
private void writeWBS(Task mpxj) { if (mpxj.getUniqueID().intValue() != 0) { WBSType xml = m_factory.createWBSType(); m_project.getWBS().add(xml); String code = mpxj.getWBS(); code = code == null || code.length() == 0 ? DEFAULT_WBS_CODE : code; Task parentTask = mpxj.getParentTask(); Integer parentObjectID = parentTask == null ? null : parentTask.getUniqueID(); xml.setCode(code); xml.setGUID(DatatypeConverter.printUUID(mpxj.getGUID())); xml.setName(mpxj.getName()); xml.setObjectId(mpxj.getUniqueID()); xml.setParentObjectId(parentObjectID); xml.setProjectObjectId(PROJECT_OBJECT_ID); xml.setSequenceNumber(Integer.valueOf(m_wbsSequence++)); xml.setStatus("Active"); } writeChildTasks(mpxj); }
python
def limits(self, low, high): """ Convenience function for determining appropriate limits in the API. If the (usually logged-in) client has the ``apihighlimits`` right, it will return *high*; otherwise it will return *low*. It's generally a good idea to use the highest limit possible; this reduces the amount of HTTP requests and therefore overhead. Read the API documentation for details on the limits for the function you are using. :param low: value to return if client does not have ``apihighlimits`` :param high: value to return if client has ``apihighlimits`` :returns: *low* or *high* """ if self._high_limits is None: result = self.call({'action': 'query', 'meta': 'userinfo', 'uiprop': 'rights'}) self._high_limits = 'apihighlimits' in \ result['query']['userinfo']['rights'] if self._high_limits: return high else: return low
java
private List<String> getDomainsFromUrl(URL url) { String host = url.getHost(); String[] paths = new String[] {}; if (url.getPath() != null) { paths = url.getPath().split("/"); } List<String> domains = new ArrayList<String>(paths.length + 1); StringBuilder relative = new StringBuilder().append("http://").append(host).append("/"); domains.add(relative.toString()); for (String path : paths) { if (path.length() > 0) { relative.append(path).append("/"); domains.add(relative.toString()); } } return domains; }
java
public void copyFrom(FileItem file) throws IOException, InterruptedException { if(channel==null) { try { file.write(writing(new File(remote))); } catch (IOException e) { throw e; } catch (Exception e) { throw new IOException(e); } } else { try (InputStream i = file.getInputStream(); OutputStream o = write()) { org.apache.commons.io.IOUtils.copy(i,o); } } }
python
def get_computers(self, filterTerm=None, domain=None): """ Return hosts from the database. """ cur = self.conn.cursor() # if we're returning a single host by ID if self.is_computer_valid(filterTerm): cur.execute("SELECT * FROM computers WHERE id=? LIMIT 1", [filterTerm]) # if we're filtering by domain controllers elif filterTerm == 'dc': if domain: cur.execute("SELECT * FROM computers WHERE dc=1 AND LOWER(domain)=LOWER(?)", [domain]) else: cur.execute("SELECT * FROM computers WHERE dc=1") # if we're filtering by ip/hostname elif filterTerm and filterTerm != "": cur.execute("SELECT * FROM computers WHERE ip LIKE ? OR LOWER(hostname) LIKE LOWER(?)", ['%{}%'.format(filterTerm), '%{}%'.format(filterTerm)]) # otherwise return all computers else: cur.execute("SELECT * FROM computers") results = cur.fetchall() cur.close() return results
java
@BetaApi( "The surface for long-running operations is not stable yet and may change in the future.") public final OperationFuture<Empty, Struct> restoreAgentAsync(RestoreAgentRequest request) { return restoreAgentOperationCallable().futureCall(request); }
java
@Nonnull public JSVar var (@Nonnull @Nonempty final String sName, final boolean bInitValue) throws JSNameAlreadyExistsException { return var (sName, JSExpr.lit (bInitValue)); }
java
public ServiceFuture<ApplicationSecurityGroupInner> beginCreateOrUpdateAsync(String resourceGroupName, String applicationSecurityGroupName, ApplicationSecurityGroupInner parameters, final ServiceCallback<ApplicationSecurityGroupInner> serviceCallback) { return ServiceFuture.fromResponse(beginCreateOrUpdateWithServiceResponseAsync(resourceGroupName, applicationSecurityGroupName, parameters), serviceCallback); }
python
def c_Prada(z, m, h=h, Om_M=Om_M, Om_L=Om_L): """Concentration from c(M) relation published in Prada et al. (2012). Parameters ---------- z : float or array_like Redshift(s) of halos. m : float or array_like Mass(es) of halos (m200 definition), in units of solar masses. h : float, optional Hubble parameter. Default is from Planck13. Om_M : float, optional Matter density parameter. Default is from Planck13. Om_L : float, optional Cosmological constant density parameter. Default is from Planck13. Returns ---------- ndarray Concentration values (c200) for halos. Notes ---------- This c(M) relation is somewhat controversial, due to its upturn in concentration for high masses (normally we expect concentration to decrease with increasing mass). See the reference below for discussion. References ---------- Calculation based on results of N-body simulations presented in: F. Prada, A.A. Klypin, A.J. Cuesta, J.E. Betancort-Rijo, and J. Primack, "Halo concentrations in the standard Lambda cold dark matter cosmology," Monthly Notices of the Royal Astronomical Society, Volume 423, Issue 4, pp. 3018-3030, 2012. """ z, m = _check_inputs(z, m) # EQ 13 x = (1. / (1. + z)) * (Om_L / Om_M)**(1. / 3.) # EQ 12 intEQ12 = np.zeros(len(x)) # integral for i in range(len(x)): # v is integration variable temp = integrate.quad(lambda v: (v / (1 + v**3.))**(1.5), 0, x[i]) intEQ12[i] = temp[0] Da = 2.5 * ((Om_M / Om_L)**(1. / 3.)) * (np.sqrt(1. + x**3.) / (x**(1.5))) * intEQ12 # EQ 23 y = (1.e+12) / (h * m) sigma = Da * (16.9 * y**0.41) / (1. + (1.102 * y**0.2) + (6.22 * y**0.333)) # EQ 21 & 22 (constants) c0 = 3.681 c1 = 5.033 alpha = 6.948 x0 = 0.424 s0 = 1.047 # sigma_0^-1 s1 = 1.646 # sigma_1^-1 beta = 7.386 x1 = 0.526 # EQ 19 & 20 cmin = c0 + (c1 - c0) * ((1. / np.pi) * np.arctan(alpha * (x - x0)) + 0.5) smin = s0 + (s1 - s0) * ((1. / np.pi) * np.arctan(beta * (x - x1)) + 0.5) # EQ 18 cmin1393 = c0 + (c1 - c0) * ((1. / np.pi) * np.arctan(alpha * (1.393 - x0)) + 0.5) smin1393 = s0 + (s1 - s0) * ((1. / np.pi) * np.arctan(beta * (1.393 - x1)) + 0.5) B0 = cmin / cmin1393 B1 = smin / smin1393 # EQ 15 sigma_prime = B1 * sigma # EQ 17 A = 2.881 b = 1.257 c = 1.022 d = 0.06 # EQ 16 Cs = A * ((sigma_prime / b)**c + 1.) * np.exp(d / (sigma_prime**2.)) # EQ 14 concentration = B0 * Cs return concentration
java
public Object getField(Object instance, String fieldname, boolean isStatic) throws IllegalAccessException { FieldReaderWriter fieldReaderWriter = locateField(fieldname); if (isStatic && !fieldReaderWriter.isStatic()) { throw new IncompatibleClassChangeError("Expected static field " + fieldReaderWriter.theField.getDeclaringTypeName() + "." + fieldReaderWriter.theField.getName()); } else if (!isStatic && fieldReaderWriter.isStatic()) { throw new IncompatibleClassChangeError("Expected non-static field " + fieldReaderWriter.theField.getDeclaringTypeName() + "." + fieldReaderWriter.theField.getName()); } Object o = null; if (fieldReaderWriter.isStatic()) { o = fieldReaderWriter.getStaticFieldValue(getClazz(), null); } else { o = fieldReaderWriter.getValue(instance, null); } return o; }
python
def parseCCUSysVar(self, data): """Helper to parse type of system variables of CCU""" if data['type'] == 'LOGIC': return data['name'], data['value'] == 'true' elif data['type'] == 'NUMBER': return data['name'], float(data['value']) elif data['type'] == 'LIST': return data['name'], int(data['value']) else: return data['name'], data['value']
python
def _set_adj_3way_state(self, v, load=False): """ Setter method for adj_3way_state, mapped from YANG variable /adj_neighbor_entries_state/adj_neighbor/adj_3way_state (isis-dcm-3way-adj-state) If this variable is read-only (config: false) in the source YANG file, then _set_adj_3way_state is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_adj_3way_state() directly. YANG Description: Adjacency 3 Way Hand Shaking State """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'adj-3way-state-init': {'value': 1}, u'adj-3way-state-invalid': {'value': 4}, u'adj-3way-state-down': {'value': 2}, u'adj-3way-state-accept': {'value': 3}, u'adj-3way-state-up': {'value': 0}},), is_leaf=True, yang_name="adj-3way-state", rest_name="adj-3way-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='isis-dcm-3way-adj-state', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """adj_3way_state must be of a type compatible with isis-dcm-3way-adj-state""", 'defined-type': "brocade-isis-operational:isis-dcm-3way-adj-state", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'adj-3way-state-init': {'value': 1}, u'adj-3way-state-invalid': {'value': 4}, u'adj-3way-state-down': {'value': 2}, u'adj-3way-state-accept': {'value': 3}, u'adj-3way-state-up': {'value': 0}},), is_leaf=True, yang_name="adj-3way-state", rest_name="adj-3way-state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='isis-dcm-3way-adj-state', is_config=False)""", }) self.__adj_3way_state = t if hasattr(self, '_set'): self._set()
python
def handle_form_submit(self): """Handle form submission """ protect.CheckAuthenticator(self.request) logger.info("Handle ResultsInterpration Submit") # Save the results interpretation res = self.request.form.get("ResultsInterpretationDepts", []) self.context.setResultsInterpretationDepts(res) self.add_status_message(_("Changes Saved"), level="info") # reindex the object after save to update all catalog metadata self.context.reindexObject() # notify object edited event event.notify(ObjectEditedEvent(self.context))
python
def drawSquiggle(self, p1, p2, breadth = 2): """Draw a squiggly line from p1 to p2. """ p1 = Point(p1) p2 = Point(p2) S = p2 - p1 # vector start - end rad = abs(S) # distance of points cnt = 4 * int(round(rad / (4 * breadth), 0)) # always take full phases if cnt < 4: raise ValueError("points too close") mb = rad / cnt # revised breadth matrix = TOOLS._hor_matrix(p1, p2) # normalize line to x-axis i_mat = ~matrix # get original position k = 2.4142135623765633 # y of drawCurve helper point points = [] # stores edges for i in range (1, cnt): if i % 4 == 1: # point "above" connection p = Point(i, -k) * mb elif i % 4 == 3: # point "below" connection p = Point(i, k) * mb else: # else on connection line p = Point(i, 0) * mb points.append(p * i_mat) points = [p1] + points + [p2] cnt = len(points) i = 0 while i + 2 < cnt: self.drawCurve(points[i], points[i+1], points[i+2]) i += 2 return p2
python
def reset(self, data, size): """ Set new contents for frame """ return lib.zframe_reset(self._as_parameter_, data, size)
python
def chunks(arr, size): """Splits a list into chunks :param arr: list to split :type arr: :class:`list` :param size: number of elements in each chunk :type size: :class:`int` :return: generator object :rtype: :class:`generator` """ for i in _range(0, len(arr), size): yield arr[i:i+size]
python
def untrace_modules(self, modules): """ Untraces given modules. :param modules: Modules to untrace. :type modules: list :return: Method success. :rtype: bool """ for module in modules: foundations.trace.untrace_module(module) self.__model__refresh_attributes() return True
python
def DecompressMessageList(cls, packed_message_list): """Decompress the message data from packed_message_list. Args: packed_message_list: A PackedMessageList rdfvalue with some data in it. Returns: a MessageList rdfvalue. Raises: DecodingError: If decompression fails. """ compression = packed_message_list.compression if compression == rdf_flows.PackedMessageList.CompressionType.UNCOMPRESSED: data = packed_message_list.message_list elif (compression == rdf_flows.PackedMessageList.CompressionType.ZCOMPRESSION): try: data = zlib.decompress(packed_message_list.message_list) except zlib.error as e: raise DecodingError("Failed to decompress: %s" % e) else: raise DecodingError("Compression scheme not supported") try: result = rdf_flows.MessageList.FromSerializedString(data) except rdfvalue.DecodeError: raise DecodingError("RDFValue parsing failed.") return result
python
def get_dsdl_signature_source_definition(self): """ Returns normalized DSDL definition text. Please refer to the specification for details about normalized DSDL definitions. """ txt = StringIO() txt.write(self.full_name + '\n') def adjoin(attrs): return txt.write('\n'.join(x.get_normalized_definition() for x in attrs) + '\n') if self.kind == CompoundType.KIND_SERVICE: if self.request_union: txt.write('\n@union\n') adjoin(self.request_fields) txt.write('\n---\n') if self.response_union: txt.write('\n@union\n') adjoin(self.response_fields) elif self.kind == CompoundType.KIND_MESSAGE: if self.union: txt.write('\n@union\n') adjoin(self.fields) else: error('Compound type of unknown kind [%s]', self.kind) return txt.getvalue().strip().replace('\n\n\n', '\n').replace('\n\n', '\n')
python
def main(_): """Run the sample attack""" # Images for inception classifier are normalized to be in [-1, 1] interval, # eps is a difference between pixels so it should be in [0, 2] interval. # Renormalizing epsilon from [0, 255] to [0, 2]. eps = 2.0 * FLAGS.max_epsilon / 255.0 alpha = 2.0 * FLAGS.iter_alpha / 255.0 num_iter = FLAGS.num_iter batch_shape = [FLAGS.batch_size, FLAGS.image_height, FLAGS.image_width, 3] nb_classes = 1001 tf.logging.set_verbosity(tf.logging.INFO) all_images_taget_class = load_target_class(FLAGS.input_dir) with tf.Graph().as_default(): # Prepare graph x_input = tf.placeholder(tf.float32, shape=batch_shape) x_max = tf.clip_by_value(x_input + eps, -1.0, 1.0) x_min = tf.clip_by_value(x_input - eps, -1.0, 1.0) with slim.arg_scope(inception.inception_v3_arg_scope()): inception.inception_v3( x_input, num_classes=nb_classes, is_training=False) x_adv = x_input target_class_input = tf.placeholder(tf.int32, shape=[FLAGS.batch_size]) one_hot_target_class = tf.one_hot(target_class_input, nb_classes) for _ in range(num_iter): with slim.arg_scope(inception.inception_v3_arg_scope()): logits, end_points = inception.inception_v3( x_adv, num_classes=nb_classes, is_training=False, reuse=True) cross_entropy = tf.losses.softmax_cross_entropy(one_hot_target_class, logits, label_smoothing=0.1, weights=1.0) cross_entropy += tf.losses.softmax_cross_entropy(one_hot_target_class, end_points['AuxLogits'], label_smoothing=0.1, weights=0.4) x_next = x_adv - alpha * tf.sign(tf.gradients(cross_entropy, x_adv)[0]) x_next = tf.clip_by_value(x_next, x_min, x_max) x_adv = x_next # Run computation saver = tf.train.Saver(slim.get_model_variables()) session_creator = tf.train.ChiefSessionCreator( scaffold=tf.train.Scaffold(saver=saver), checkpoint_filename_with_path=FLAGS.checkpoint_path, master=FLAGS.master) with tf.train.MonitoredSession(session_creator=session_creator) as sess: for filenames, images in load_images(FLAGS.input_dir, batch_shape): target_class_for_batch = ( [all_images_taget_class[n] for n in filenames] + [0] * (FLAGS.batch_size - len(filenames))) adv_images = sess.run(x_adv, feed_dict={ x_input: images, target_class_input: target_class_for_batch }) save_images(adv_images, filenames, FLAGS.output_dir)
java
protected FieldMetadata createParameterizedFieldMetadataFrom( Type type ) { FieldMetadata parameterizedTypeFieldMetadata = null; if (type.isSimpleType()) { SimpleType simpleType = (SimpleType)type; parameterizedTypeFieldMetadata = FieldMetadata.parametrizedType(JavaMetadataUtil.getName(simpleType.getName())); } // TODO also process QualifiedType return parameterizedTypeFieldMetadata; }
python
def load_validation_plugin(name=None): """Find and load the chosen validation plugin. Args: name (string): the name of the entry_point, as advertised in the setup.py of the providing package. Returns: an uninstantiated subclass of ``bigchaindb.validation.AbstractValidationRules`` """ if not name: return BaseValidationRules # TODO: This will return the first plugin with group `bigchaindb.validation` # and name `name` in the active WorkingSet. # We should probably support Requirements specs in the config, e.g. # validation_plugin: 'my-plugin-package==0.0.1;default' plugin = None for entry_point in iter_entry_points('bigchaindb.validation', name): plugin = entry_point.load() # No matching entry_point found if not plugin: raise ResolutionError( 'No plugin found in group `bigchaindb.validation` with name `{}`'. format(name)) # Is this strictness desireable? # It will probably reduce developer headaches in the wild. if not issubclass(plugin, (BaseValidationRules,)): raise TypeError('object of type "{}" does not implement `bigchaindb.' 'validation.BaseValidationRules`'.format(type(plugin))) return plugin
java
@Override public <A extends Annotation> A getAnnotation(Class<A> annotationClass) { return method.getAnnotation(annotationClass); }
java
public int getOntologyTermDistance(OntologyTerm ontologyTerm1, OntologyTerm ontologyTerm2) { String nodePath1 = getOntologyTermNodePath(ontologyTerm1); String nodePath2 = getOntologyTermNodePath(ontologyTerm2); if (StringUtils.isEmpty(nodePath1)) { throw new MolgenisDataAccessException( "The nodePath cannot be null : " + ontologyTerm1.toString()); } if (StringUtils.isEmpty(nodePath2)) { throw new MolgenisDataAccessException( "The nodePath cannot be null : " + ontologyTerm2.toString()); } return calculateNodePathDistance(nodePath1, nodePath2); }
python
def parse_hh_mm(self): """Parses raw time :return: Time parsed """ split_count = self.raw.count(":") if split_count == 1: # hh:mm return datetime.strptime(self.raw, "%H:%M").time() return datetime.strptime(self.raw, "%M").time()
java
public static Pair<INDArray, INDArray> mergeLabels(@NonNull INDArray[][] labelsToMerge, INDArray[][] labelMasksToMerge, int inOutIdx) { Pair<INDArray[], INDArray[]> p = selectColumnFromMDSData(labelsToMerge, labelMasksToMerge, inOutIdx); return mergeLabels(p.getFirst(), p.getSecond()); }
java
public static Calendar getDateCeil(Date date) { Calendar cal = new GregorianCalendar(); cal.setTime(date); return new GregorianCalendar(cal.get(Calendar.YEAR), cal.get(Calendar.MONTH), cal.get(Calendar.DAY_OF_MONTH)); }
python
def getresponse(self): """Wait for and return a HTTP response. The return value will be a :class:`HttpMessage`. When this method returns only the response header has been read. The response body can be read using :meth:`~gruvi.Stream.read` and similar methods on the message :attr:`~HttpMessage.body`. Note that if you use persistent connections (the default), it is required that you read the entire body of each response. If you don't then deadlocks may occur. """ if self._error: raise compat.saved_exc(self._error) elif self._transport is None: raise HttpError('not connected') message = self._queue.get(timeout=self._timeout) if isinstance(message, Exception): raise compat.saved_exc(message) return message
python
def query_relative(self, query, event_time=None, relative_duration_before=None, relative_duration_after=None): """Perform the query and calculate the time range based on the relative values.""" assert event_time is None or isinstance(event_time, datetime.datetime) assert relative_duration_before is None or isinstance(relative_duration_before, str) assert relative_duration_after is None or isinstance(relative_duration_after, str) if event_time is None: # use now as the default event_time = datetime.datetime.now() # use preconfigured defaults if relative_duration_before is None: relative_duration_before = self.relative_duration_before if relative_duration_after is None: relative_duration_after = self.relative_duration_after time_start = event_time - create_timedelta(relative_duration_before) time_end = event_time + create_timedelta(relative_duration_after) return self.query_with_time(query, time_start, time_end)
java
private Map<String, String[]> findAllFiles(Collection<String> pathsToScan, String[] includesPattern, Collection<String> excludes) { Map<String, String[]> pathToIncludedFilesMap = new HashMap<>(); pathsToScan.stream().forEach(scanFolder -> { String[] includedFiles = getDirectoryContent(new File(scanFolder).getPath(), includesPattern, excludes.toArray(new String[excludes.size()]), false, false); pathToIncludedFilesMap.put(new File(scanFolder).getAbsolutePath(), includedFiles); }); return pathToIncludedFilesMap; }
java
private List<ResolveTask> collectProcessingTopics(final Collection<FileInfo> fis, final KeyScope rootScope, final Document doc) { final List<ResolveTask> res = new ArrayList<>(); final FileInfo input = job.getFileInfo(fi -> fi.isInput).iterator().next(); res.add(new ResolveTask(rootScope, input, null)); // Collect topics from map and rewrite topicrefs for duplicates walkMap(doc.getDocumentElement(), rootScope, res); // Collect topics not in map and map itself for (final FileInfo f: fis) { if (!usage.containsKey(f.uri)) { res.add(processTopic(f, rootScope, f.isResourceOnly)); } } final List<ResolveTask> deduped = removeDuplicateResolveTargets(res); if (fileInfoFilter != null) { return adjustResourceRenames(deduped.stream() .filter(rs -> fileInfoFilter.test(rs.in)) .collect(Collectors.toList())); } else { return adjustResourceRenames(deduped); } }
python
def forward_iter(self, X, training=False, device='cpu'): """Yield outputs of module forward calls on each batch of data. The storage device of the yielded tensors is determined by the ``device`` parameter. Parameters ---------- X : input data, compatible with skorch.dataset.Dataset By default, you should be able to pass: * numpy arrays * torch tensors * pandas DataFrame or Series * scipy sparse CSR matrices * a dictionary of the former three * a list/tuple of the former three * a Dataset If this doesn't work with your data, you have to pass a ``Dataset`` that can deal with the data. training : bool (default=False) Whether to set the module to train mode or not. device : string (default='cpu') The device to store each inference result on. This defaults to CPU memory since there is genereally more memory available there. For performance reasons this might be changed to a specific CUDA device, e.g. 'cuda:0'. Yields ------ yp : torch tensor Result from a forward call on an individual batch. """ dataset = self.get_dataset(X) iterator = self.get_iterator(dataset, training=training) for data in iterator: Xi = unpack_data(data)[0] yp = self.evaluation_step(Xi, training=training) if isinstance(yp, tuple): yield tuple(n.to(device) for n in yp) else: yield yp.to(device)
java
private ValueNumber[] popInputValues(int numWordsConsumed) { ValueNumberFrame frame = getFrame(); ValueNumber[] inputValueList = allocateValueNumberArray(numWordsConsumed); // Pop off the input operands. try { frame.getTopStackWords(inputValueList); while (numWordsConsumed-- > 0) { frame.popValue(); } } catch (DataflowAnalysisException e) { throw new InvalidBytecodeException("Error getting input operands", e); } return inputValueList; }
java
@Override public List<SampleRowKeysResponse> sampleRowKeys(SampleRowKeysRequest request) { if (shouldOverrideAppProfile(request.getAppProfileId())) { request = request.toBuilder().setAppProfileId(clientDefaultAppProfileId).build(); } return createStreamingListener(request, sampleRowKeysAsync, request.getTableName()) .getBlockingResult(); }
java
private static int _parseEndOfLine (@Nonnull final String sHeaderPart, final int nEnd) { int nIndex = nEnd; for (;;) { final int nOffset = sHeaderPart.indexOf ('\r', nIndex); if (nOffset == -1 || nOffset + 1 >= sHeaderPart.length ()) throw new IllegalStateException ("Expected headers to be terminated by an empty line."); if (sHeaderPart.charAt (nOffset + 1) == '\n') return nOffset; nIndex = nOffset + 1; } }
java
@Override public DescribeAgentsResult describeAgents(DescribeAgentsRequest request) { request = beforeClientExecution(request); return executeDescribeAgents(request); }
python
def _resolve_placeholder(placeholder, original): """Resolve a placeholder to the given original object. :param placeholder: The placeholder to resolve, in place. :type placeholder: dict :param original: The object that the placeholder represents. :type original: dict """ new = copy.deepcopy(original) # The name remains the same. new["name"] = placeholder["name"] new["full_name"] = placeholder["full_name"] # Record where the placeholder originally came from. new["original_path"] = original["full_name"] # The source lines for this placeholder do not exist in this file. # The keys might not exist if original is a resolved placeholder. new.pop("from_line_no", None) new.pop("to_line_no", None) # Resolve the children stack = list(new.get("children", ())) while stack: child = stack.pop() # Relocate the child to the new location assert child["full_name"].startswith(original["full_name"]) suffix = child["full_name"][len(original["full_name"]) :] child["full_name"] = new["full_name"] + suffix # The source lines for this placeholder do not exist in this file. # The keys might not exist if original is a resolved placeholder. child.pop("from_line_no", None) child.pop("to_line_no", None) # Resolve the remaining children stack.extend(child.get("children", ())) placeholder.clear() placeholder.update(new)
python
def get_views(self, path, year=None, month=None, day=None, hour=None): """ Get the number of views for a Telegraph article :param path: Path to the Telegraph page :param year: Required if month is passed. If passed, the number of page views for the requested year will be returned :param month: Required if day is passed. If passed, the number of page views for the requested month will be returned :param day: Required if hour is passed. If passed, the number of page views for the requested day will be returned :param hour: If passed, the number of page views for the requested hour will be returned """ return self._telegraph.method('getViews', path=path, values={ 'year': year, 'month': month, 'day': day, 'hour': hour })
java
@BetaApi( "The surface for long-running operations is not stable yet and may change in the future.") public final OperationFuture<Instance, OperationMetadata> createInstanceAsync( LocationName parent, String instanceId, Instance instance) { CreateInstanceRequest request = CreateInstanceRequest.newBuilder() .setParent(parent == null ? null : parent.toString()) .setInstanceId(instanceId) .setInstance(instance) .build(); return createInstanceAsync(request); }
java
public long skip(long n) throws IOException { synchronized (lock) { ensureOpen(); long avail = count - pos; if (n > avail) { n = avail; } if (n < 0) { return 0; } pos += n; return n; } }
java
@Override public double getPopulationVariance() { Variance populationVariance = new Variance(_getSecondMoment()); populationVariance.setBiasCorrected(false); return populationVariance.getResult(); }
python
def load(self, value): """ enforce env > value when loading from file """ self.reset( value, validator=self.__dict__.get('validator'), env=self.__dict__.get('env'), )
python
def lastmod(self, category): """Return the last modification of the entry.""" lastitems = EntryModel.objects.published().order_by('-modification_date').filter(categories=category).only('modification_date') return lastitems[0].modification_date
python
def getarchive(project, user, format=None): """Generates and returns a download package (or 403 if one is already in the process of being prepared)""" if os.path.isfile(Project.path(project, user) + '.download'): #make sure we don't start two compression processes at the same time return withheaders(flask.make_response('Another compression is already running',403),headers={'allow_origin': settings.ALLOW_ORIGIN}) else: if not format: data = flask.request.values if 'format' in data: format = data['format'] else: format = 'zip' #default #validation, security contentencoding = None if format == 'zip': contenttype = 'application/zip' command = "/usr/bin/zip -r" #TODO: do not hard-code path! if os.path.isfile(Project.path(project, user) + "output/" + project + ".tar.gz"): os.unlink(Project.path(project, user) + "output/" + project + ".tar.gz") if os.path.isfile(Project.path(project, user) + "output/" + project + ".tar.bz2"): os.unlink(Project.path(project, user) + "output/" + project + ".tar.bz2") elif format == 'tar.gz': contenttype = 'application/x-tar' contentencoding = 'gzip' command = "/bin/tar -czf" if os.path.isfile(Project.path(project, user) + "output/" + project + ".zip"): os.unlink(Project.path(project, user) + "output/" + project + ".zip") if os.path.isfile(Project.path(project, user) + "output/" + project + ".tar.bz2"): os.unlink(Project.path(project, user) + "output/" + project + ".tar.bz2") elif format == 'tar.bz2': contenttype = 'application/x-bzip2' command = "/bin/tar -cjf" if os.path.isfile(Project.path(project, user) + "output/" + project + ".tar.gz"): os.unlink(Project.path(project, user) + "output/" + project + ".tar.gz") if os.path.isfile(Project.path(project, user) + "output/" + project + ".zip"): os.unlink(Project.path(project, user) + "output/" + project + ".zip") else: return withheaders(flask.make_response('Invalid archive format',403) ,headers={'allow_origin': settings.ALLOW_ORIGIN})#TODO: message won't show path = Project.path(project, user) + "output/" + project + "." + format if not os.path.isfile(path): printlog("Building download archive in " + format + " format") cmd = command + ' ' + project + '.' + format + ' *' printdebug(cmd) printdebug(Project.path(project, user)+'output/') process = subprocess.Popen(cmd, cwd=Project.path(project, user)+'output/', shell=True) if not process: return withheaders(flask.make_response("Unable to make download package",500),headers={'allow_origin': settings.ALLOW_ORIGIN}) else: pid = process.pid f = open(Project.path(project, user) + '.download','w') f.write(str(pid)) f.close() os.waitpid(pid, 0) #wait for process to finish os.unlink(Project.path(project, user) + '.download') extraheaders = {'allow_origin': settings.ALLOW_ORIGIN } if contentencoding: extraheaders['Content-Encoding'] = contentencoding return withheaders(flask.Response( getbinarydata(path) ), contenttype, extraheaders )
java
public SpannableStringBuilder replace(final int start, final int end, CharSequence tb, int tbstart, int tbend) { checkRange("replace", start, end); int filtercount = mFilters.length; for (int i = 0; i < filtercount; i++) { CharSequence repl = mFilters[i].filter(tb, tbstart, tbend, this, start, end); if (repl != null) { tb = repl; tbstart = 0; tbend = repl.length(); } } final int origLen = end - start; final int newLen = tbend - tbstart; if (origLen == 0 && newLen == 0 && !hasNonExclusiveExclusiveSpanAt(tb, tbstart)) { // This is a no-op iif there are no spans in tb that would be added (with a 0-length) // Early exit so that the text watchers do not get notified return this; } TextWatcher[] textWatchers = getSpans(start, start + origLen, TextWatcher.class); sendBeforeTextChanged(textWatchers, start, origLen, newLen); // Try to keep the cursor / selection at the same relative position during // a text replacement. If replaced or replacement text length is zero, this // is already taken care of. boolean adjustSelection = origLen != 0 && newLen != 0; int selectionStart = 0; int selectionEnd = 0; if (adjustSelection) { selectionStart = Selection.getSelectionStart(this); selectionEnd = Selection.getSelectionEnd(this); } change(start, end, tb, tbstart, tbend); if (adjustSelection) { if (selectionStart > start && selectionStart < end) { final int offset = (selectionStart - start) * newLen / origLen; selectionStart = start + offset; setSpan(false, Selection.SELECTION_START, selectionStart, selectionStart, Spanned.SPAN_POINT_POINT); } if (selectionEnd > start && selectionEnd < end) { final int offset = (selectionEnd - start) * newLen / origLen; selectionEnd = start + offset; setSpan(false, Selection.SELECTION_END, selectionEnd, selectionEnd, Spanned.SPAN_POINT_POINT); } } sendTextChanged(textWatchers, start, origLen, newLen); sendAfterTextChanged(textWatchers); // Span watchers need to be called after text watchers, which may update the layout sendToSpanWatchers(start, end, newLen - origLen); return this; }
java
protected List fetch() { EntityMetadata metadata = getEntityMetadata(); Client client = persistenceDelegeator.getClient(metadata); List results = isRelational(metadata) ? recursivelyPopulateEntities(metadata, client) : populateEntities( metadata, client); return results; }
java
public void getAllMapNames(Callback<List<SimpleName>> callback) throws NullPointerException { gw2API.getAllMapNames(GuildWars2.lang.getValue()).enqueue(callback); }
java
public static void printHelp(PrintStream stream) { stream.println(); stream.println("Voldemort Admin Tool Async-Job Commands"); stream.println("---------------------------------------"); stream.println("list Get async job list from nodes."); stream.println("stop Stop async jobs on one node."); stream.println(); stream.println("To get more information on each command,"); stream.println("please try \'help async-job <command-name>\'."); stream.println(); }
python
def _do_close(self): """ Tear down this object, after we've agreed to close with the server. """ AMQP_LOGGER.debug('Closed channel #%d' % self.channel_id) self.is_open = False del self.connection.channels[self.channel_id] self.channel_id = self.connection = None self.callbacks = {}
java
@Pure public static boolean intersectsSolidSphereOrientedBox( double sphereCenterx, double sphereCentery, double sphereCenterz, double sphereRadius, double boxCenterx, double boxCentery, double boxCenterz, double boxAxis1x, double boxAxis1y, double boxAxis1z, double boxAxis2x, double boxAxis2y, double boxAxis2z, double boxAxis3x, double boxAxis3y, double boxAxis3z, double boxExtentAxis1, double boxExtentAxis2, double boxExtentAxis3) { // Find points on OBB closest and farest to sphere center Point3f closest = new Point3f(); Point3f farest = new Point3f(); AbstractOrientedBox3F.computeClosestFarestOBBPoints( sphereCenterx, sphereCentery, sphereCenterz, boxCenterx, boxCentery, boxCenterz, boxAxis1x, boxAxis1y, boxAxis1z, boxAxis2x, boxAxis2y, boxAxis2z, boxAxis3x, boxAxis3y, boxAxis3z, boxExtentAxis1, boxExtentAxis2, boxExtentAxis3, closest, farest); // Sphere and OBB intersect if the (squared) distance from sphere // center to point p is less than the (squared) sphere radius double squaredRadius = sphereRadius * sphereRadius; return (FunctionalPoint3D.distanceSquaredPointPoint( sphereCenterx, sphereCentery, sphereCenterz, closest.getX(), closest.getY(), closest.getZ()) < squaredRadius); }
python
def _add_index(self, index): """ Adds an index to the table. :param index: The index to add :type index: Index :rtype: Table """ index_name = index.get_name() index_name = self._normalize_identifier(index_name) replaced_implicit_indexes = [] for name, implicit_index in self._implicit_indexes.items(): if implicit_index.is_fullfilled_by(index) and name in self._indexes: replaced_implicit_indexes.append(name) already_exists = ( index_name in self._indexes and index_name not in replaced_implicit_indexes or self._primary_key_name is not False and index.is_primary() ) if already_exists: raise IndexAlreadyExists(index_name, self._name) for name in replaced_implicit_indexes: del self._indexes[name] del self._implicit_indexes[name] if index.is_primary(): self._primary_key_name = index_name self._indexes[index_name] = index return self
java
@Override public CreateAccountResult createAccount(CreateAccountRequest request) { request = beforeClientExecution(request); return executeCreateAccount(request); }
python
def _process_input(self, data, events): """ _process_input: _process_input will be notified when there is data ready on the serial connection to be read. It will read and process the data into an API Frame and then either resolve a frame future, or push the frame into the queue of frames needing to be processed """ frame = APIFrame(escaped=self._escaped) byte = self.serial.read() if byte != APIFrame.START_BYTE: return # Save all following bytes, if they are not empty if len(byte) == 1: frame.fill(byte) while(frame.remaining_bytes() > 0): byte = self.serial.read() if len(byte) == 1: frame.fill(byte) try: # Try to parse and return result frame.parse() # Ignore empty frames if len(frame.data) == 0: return if self._frame_future is not None: self._frame_future.set_result(frame) self._frame_future = None else: self._frame_queue.append(frame) except ValueError: return
python
async def get_friendly_name(self) -> Text: """ Let's use the first name of the user as friendly name. In some cases the user object is incomplete, and in those cases the full user is fetched. """ if 'first_name' not in self._user: user = await self._get_full_user() else: user = self._user return user.get('first_name')
python
def _reverse(viewname, args=None, kwargs=None, request=None, format=None, **extra): """ Same as `django.core.urlresolvers.reverse`, but optionally takes a request and returns a fully qualified URL, using the request to get the base URL. """ if format is not None: kwargs = kwargs or {} kwargs['format'] = format url = django_reverse(viewname, args=args, kwargs=kwargs, **extra) if request: return request.build_absolute_uri(url) return url
python
def dirty(field,ttl=None): "decorator to cache the result of a function until a field changes" if ttl is not None: raise NotImplementedError('pg.dirty ttl feature') def decorator(f): @functools.wraps(f) def wrapper(self,*args,**kwargs): # warning: not reentrant d=self.dirty_cache[field] if field in self.dirty_cache else self.dirty_cache.setdefault(field,{}) return d[f.__name__] if f.__name__ in d else d.setdefault(f.__name__,f(self,*args,**kwargs)) return wrapper return decorator
java
public List executeNativeQuery(String jsonClause, EntityMetadata entityMetadata) { List entities = new ArrayList(); String[] tempArray = jsonClause.split("\\."); String tempClause = tempArray[tempArray.length - 1]; if (tempClause.contains("findOne(") || tempClause.contains("findAndModify(")) { DBObject obj = (BasicDBObject) executeScript(jsonClause); populateEntity(entityMetadata, entities, obj); return entities; } else if (tempClause.contains("find(") || jsonClause.contains("aggregate(")) { jsonClause = jsonClause.concat(".toArray()"); BasicDBList list = (BasicDBList) executeScript(jsonClause); for (Object obj : list) { populateEntity(entityMetadata, entities, (DBObject) obj); } return entities; } else if (tempClause.contains("count(") || tempClause.contains("dataSize(") || tempClause.contains("storageSize(") || tempClause.contains("totalIndexSize(") || tempClause.contains("totalSize(")) { Long count = ((Double) executeScript(jsonClause)).longValue(); entities.add(count); return entities; } else if (tempClause.contains("distinct(")) { BasicDBList list = (BasicDBList) executeScript(jsonClause); for (Object obj : list) { entities.add(obj); } return entities; } else if (jsonClause.contains("mapReduce(")) { final MapReduceCommand command = parseMapReduceCommand(jsonClause); final MapReduceOutput output = mongoDb.getCollection(command.getInput()).mapReduce(command); final BasicDBList list = new BasicDBList(); for (final DBObject item : output.results()) { list.add(item); } return list; } else { BasicDBList list = (BasicDBList) executeScript(jsonClause); for (Object obj : list) { entities.add(obj); } return entities; } }
python
def nPr(n, r): """ Calculates nPr. Args: n (int): total number of items. r (int): items to permute Returns: nPr. """ f = math.factorial return int(f(n) / f(n-r))
python
def complete_offer(self, offer_id, complete_dict): """ Completes an offer :param complete_dict: the complete dict with the template id :param offer_id: the offer id :return: Response """ return self._create_put_request( resource=OFFERS, billomat_id=offer_id, command=COMPLETE, send_data=complete_dict )
python
def delete(self, force_drop=False, *args, **kwargs): """ Deletes this row. Drops the tenant's schema if the attribute auto_drop_schema set to True. """ if connection.schema_name not in (self.schema_name, get_public_schema_name()): raise Exception("Can't delete tenant outside it's own schema or " "the public schema. Current schema is %s." % connection.schema_name) if schema_exists(self.schema_name) and (self.auto_drop_schema or force_drop): cursor = connection.cursor() cursor.execute('DROP SCHEMA IF EXISTS %s CASCADE' % self.schema_name) return super(TenantMixin, self).delete(*args, **kwargs)
python
def _get_desired_pkg(name, desired): ''' Helper function that retrieves and nicely formats the desired pkg (and version if specified) so that helpful information can be printed in the comment for the state. ''' if not desired[name] or desired[name].startswith(('<', '>', '=')): oper = '' else: oper = '=' return '{0}{1}{2}'.format(name, oper, '' if not desired[name] else desired[name])
java
public static void writeFullBeanXml( Object object, OutputStream outputStream) { XMLEncoder encoder = XmlEncoders.createVerbose(outputStream); encoder.setExceptionListener(new ExceptionListener() { @Override public void exceptionThrown(Exception e) { throw new XmlException("Could not encode object", e); } }); encoder.writeObject(object); encoder.flush(); encoder.close(); }
python
def docx_text_from_xml(xml: str, config: TextProcessingConfig) -> str: """ Converts an XML tree of a DOCX file to string contents. Args: xml: raw XML text config: :class:`TextProcessingConfig` control object Returns: contents as a string """ root = ElementTree.fromstring(xml) return docx_text_from_xml_node(root, 0, config)
java
private void checkCallConventions(NodeTraversal t, Node n) { SubclassRelationship relationship = compiler.getCodingConvention().getClassesDefinedByCall(n); TypedScope scope = t.getTypedScope(); if (relationship != null) { ObjectType superClass = TypeValidator.getInstanceOfCtor( scope.lookupQualifiedName(QualifiedName.of(relationship.superclassName))); ObjectType subClass = TypeValidator.getInstanceOfCtor( scope.lookupQualifiedName(QualifiedName.of(relationship.subclassName))); if (relationship.type == SubclassType.INHERITS && superClass != null && !superClass.isEmptyType() && subClass != null && !subClass.isEmptyType()) { validator.expectSuperType(n, superClass, subClass); } } }
python
def add_missing(self, distribution, requirement): """ Add a missing *requirement* for the given *distribution*. :type distribution: :class:`distutils2.database.InstalledDistribution` or :class:`distutils2.database.EggInfoDistribution` :type requirement: ``str`` """ logger.debug('%s missing %r', distribution, requirement) self.missing.setdefault(distribution, []).append(requirement)
java
@Nullable @Override public final ImageFormat determineFormat(byte[] headerBytes, int headerSize) { Preconditions.checkNotNull(headerBytes); if (WebpSupportStatus.isWebpHeader(headerBytes, 0, headerSize)) { return getWebpFormat(headerBytes, headerSize); } if (isJpegHeader(headerBytes, headerSize)) { return DefaultImageFormats.JPEG; } if (isPngHeader(headerBytes, headerSize)) { return DefaultImageFormats.PNG; } if (isGifHeader(headerBytes, headerSize)) { return DefaultImageFormats.GIF; } if (isBmpHeader(headerBytes, headerSize)) { return DefaultImageFormats.BMP; } if (isIcoHeader(headerBytes, headerSize)) { return DefaultImageFormats.ICO; } if (isHeifHeader(headerBytes, headerSize)) { return DefaultImageFormats.HEIF; } return ImageFormat.UNKNOWN; }
java
public static int lastIndexOf(final CharSequence seq, final int searchChar, final int startPos) { if (isEmpty(seq)) { return INDEX_NOT_FOUND; } return CharSequenceUtils.lastIndexOf(seq, searchChar, startPos); }
python
def _connect(self): """Connect to the server.""" assert get_thread_ident() == self.ioloop_thread_id if self._stream: self._logger.warn('Disconnecting existing connection to {0!r} ' 'to create a new connection') self._disconnect() yield self._disconnected.until_set() stream = None try: host, port = self._bindaddr stream = self._stream = yield self._tcp_client.connect( host, port, max_buffer_size=self.MAX_MSG_SIZE) stream.set_close_callback(partial(self._stream_closed_callback, stream)) # our message packets are small, don't delay sending them. stream.set_nodelay(True) stream.max_write_buffer_size = self.MAX_WRITE_BUFFER_SIZE self._logger.debug('Connected to {0} with client addr {1}' .format(self.bind_address_string, address_to_string(stream.socket.getsockname()))) if self._connect_failures >= 5: self._logger.warn("Reconnected to {0}" .format(self.bind_address_string)) self._connect_failures = 0 except Exception, e: if self._connect_failures % 5 == 0: # warn on every fifth failure # TODO (NM 2015-03-04) This can get a bit verbose, and typically we have # other mechanisms for tracking failed connections. Consider doing some # kind of exponential backoff starting at 5 times the reconnect time up to # once per 5 minutes self._logger.debug("Failed to connect to {0!r}: {1}" .format(self._bindaddr, e)) self._connect_failures += 1 stream = None yield gen.moment # TODO some kind of error rate limiting? if self._stream: # Can't use _disconnect() and wait on self._disconnected, since # exception may have been raised before _stream_closed_callback # was attached to the iostream. self._logger.debug('stream was set even though connecting failed') self._stream.close() self._disconnected.set() if stream: self._disconnected.clear() self._connected.set() self.last_connect_time = self.ioloop.time() try: self.notify_connected(True) except Exception: self._logger.exception("Notify connect failed. Disconnecting.") self._disconnect()
python
def make_uninstall(parser): """ Remove Ceph packages from remote hosts. """ parser.add_argument( 'host', metavar='HOST', nargs='+', help='hosts to uninstall Ceph from', ) parser.set_defaults( func=uninstall, )
java
public static Response execute(int maxBuffer, String ... command) { if(command.length == 0){ throw new IllegalArgumentException("Command must be provided."); } String[] commandAndArgs = command.length == 1 && command[0].contains(" ") ? Util.split(command[0], " ") : command; try { Process process = Runtime.getRuntime().exec(commandAndArgs); OutputReader stdOutReader = new OutputReader(process.getInputStream(), maxBuffer); OutputReader stdErrReader = new OutputReader(process.getErrorStream(), maxBuffer); Thread t1 = new Thread(stdOutReader); t1.start(); Thread t2 = new Thread(stdErrReader); t2.start(); int code = process.waitFor(); t1.join(); t2.join(); String out = stdOutReader.getOutput(); String err = stdErrReader.getOutput(); return new Response(out, err, code); } catch (InterruptedException ie) { Thread.currentThread().interrupt(); throw new RuntimeException("Interrupted"); }catch(IOException e){ throw new RuntimeException(e); } }