language
stringclasses
2 values
func_code_string
stringlengths
63
466k
python
def get_staff_url(self): """ Return the Admin URL for the current view. By default, it uses the :func:`get_staff_object` function to base the URL on. """ object = self.get_staff_object() if object is not None: # View is likely using SingleObjectMixin return reverse(admin_urlname(object._meta, 'change'), args=(object.pk,)) model = _get_view_model(self) if model is not None: # View is likely using MultipleObjectMixin (e.g. ListView) return reverse(admin_urlname(object._meta, 'changelist')) return None
java
public Properties getSystemProperties() throws Exception { final String[] address = { CORE_SERVICE, PLATFORM_MBEAN, "type", "runtime" }; final ModelNode op = createReadAttributeRequest(true, "system-properties", Address.root().add(address)); final ModelNode results = execute(op); if (isSuccess(results)) { // extract the DMR representation into a java Properties object final Properties sysprops = new Properties(); final ModelNode node = getResults(results); final List<Property> propertyList = node.asPropertyList(); for (Property property : propertyList) { final String name = property.getName(); final ModelNode value = property.getValue(); if (name != null) { sysprops.put(name, value != null ? value.asString() : ""); } } return sysprops; } else { throw new FailureException(results, "Failed to get system properties"); } }
java
public void registerGraph(long graphId, @NonNull SameDiff graph, ExecutorConfiguration configuration) { val g = graph.asFlatGraph(graphId, configuration); val v = blockingStub.registerGraph(g); if (v.status() != 0) throw new ND4JIllegalStateException("registerGraph() gRPC call failed"); }
python
def _set_windows(self, ticks, bars): """ be aware of default windows """ self.tick_window = ticks self.bar_window = bars
python
def get_user_data_triplet(username, password): """Print out user data triplet: username, password verifier, salt.""" context = SRPContext(username, password) username, password_verifier, salt = context.get_user_data_triplet(base64=True) click.secho('Username: %s' % username) click.secho('Password verifier: %s' % password_verifier) click.secho('Salt: %s' % salt)
python
def compile(stream_spec, cmd='ffmpeg', overwrite_output=False): """Build command-line for invoking ffmpeg. The :meth:`run` function uses this to build the commnad line arguments and should work in most cases, but calling this function directly is useful for debugging or if you need to invoke ffmpeg manually for whatever reason. This is the same as calling :meth:`get_args` except that it also includes the ``ffmpeg`` command as the first argument. """ if isinstance(cmd, basestring): cmd = [cmd] elif type(cmd) != list: cmd = list(cmd) return cmd + get_args(stream_spec, overwrite_output=overwrite_output)
java
private static List<ProtoFile> findRelateProtoFiles(File file, Set<String> dependencyNames) throws IOException { LinkedList<ProtoFile> protoFiles = new LinkedList<ProtoFile>(); ProtoFile protoFile = ProtoSchemaParser.parse(file); protoFiles.addFirst(protoFile); String parent = file.getParent(); // parse dependency, to find all PROTO file if using import command List<String> dependencies = protoFile.getDependencies(); if (dependencies != null && !dependencies.isEmpty()) { for (String fn : dependencies) { if (dependencyNames.contains(fn)) { continue; } File dependencyFile = new File(parent, fn); protoFiles.addAll(findRelateProtoFiles(dependencyFile, dependencyNames)); } } List<String> publicDependencies = protoFile.getPublicDependencies(); if (publicDependencies != null && !publicDependencies.isEmpty()) { for (String fn : publicDependencies) { if (dependencyNames.contains(fn)) { continue; } File dependencyFile = new File(parent, fn); protoFiles.addAll(findRelateProtoFiles(dependencyFile, dependencyNames)); } } return protoFiles; }
java
public void removeNotificationHandler(String serviceName, NotificationHandler handler){ ServiceInstanceUtils.validateServiceName(serviceName); if (handler == null) { throw new ServiceException(ErrorCode.SERVICE_DIRECTORY_NULL_ARGUMENT_ERROR, ErrorCode.SERVICE_DIRECTORY_NULL_ARGUMENT_ERROR.getMessageTemplate(), "NotificationHandler"); } synchronized(notificationHandlers){ if(notificationHandlers.containsKey(serviceName)){ List<NotificationHandler> list = notificationHandlers.get(serviceName); if(list.contains(handler)){ list.remove(handler); } if(list.isEmpty()){ notificationHandlers.remove(serviceName); } } } }
java
protected boolean containsFlag(final char flag, final String flags) { if (flags == null) return false; return flags.indexOf(flag) >= 0; }
java
public ElementWithOptions setOptions(LinkedHashMap<String, String> options) { this.optionGroups.clear(); for (Map.Entry<String, String> entry : options.entrySet()) { this.addOption(entry.getKey(), entry.getValue()); } return this; }
python
def get_interface_detail_output_interface_interface_name(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_interface_detail = ET.Element("get_interface_detail") config = get_interface_detail output = ET.SubElement(get_interface_detail, "output") interface = ET.SubElement(output, "interface") interface_type_key = ET.SubElement(interface, "interface-type") interface_type_key.text = kwargs.pop('interface_type') interface_name = ET.SubElement(interface, "interface-name") interface_name.text = kwargs.pop('interface_name') callback = kwargs.pop('callback', self._callback) return callback(config)
java
@Override public CommerceOrder findByG_C_Last(long groupId, long commerceAccountId, OrderByComparator<CommerceOrder> orderByComparator) throws NoSuchOrderException { CommerceOrder commerceOrder = fetchByG_C_Last(groupId, commerceAccountId, orderByComparator); if (commerceOrder != null) { return commerceOrder; } StringBundler msg = new StringBundler(6); msg.append(_NO_SUCH_ENTITY_WITH_KEY); msg.append("groupId="); msg.append(groupId); msg.append(", commerceAccountId="); msg.append(commerceAccountId); msg.append("}"); throw new NoSuchOrderException(msg.toString()); }
java
public static <T extends Enum<T>> T valueOf(final Class<T> enumType, final String name) { return valueOf(enumType, name, null); }
python
def save(self, data=None, shape=None, dtype=None, returnoffset=False, photometric=None, planarconfig=None, extrasamples=None, tile=None, contiguous=True, align=16, truncate=False, compress=0, rowsperstrip=None, predictor=False, colormap=None, description=None, datetime=None, resolution=None, subfiletype=0, software='tifffile.py', metadata={}, ijmetadata=None, extratags=()): """Write numpy array and tags to TIFF file. The data shape's last dimensions are assumed to be image depth, height (length), width, and samples. If a colormap is provided, the data's dtype must be uint8 or uint16 and the data values are indices into the last dimension of the colormap. If 'shape' and 'dtype' are specified, an empty array is saved. This option cannot be used with compression or multiple tiles. Image data are written uncompressed in one strip per plane by default. Dimensions larger than 2 to 4 (depending on photometric mode, planar configuration, and SGI mode) are flattened and saved as separate pages. The SampleFormat and BitsPerSample tags are derived from the data type. Parameters ---------- data : numpy.ndarray or None Input image array. shape : tuple or None Shape of the empty array to save. Used only if 'data' is None. dtype : numpy.dtype or None Data-type of the empty array to save. Used only if 'data' is None. returnoffset : bool If True and the image data in the file is memory-mappable, return the offset and number of bytes of the image data in the file. photometric : {'MINISBLACK', 'MINISWHITE', 'RGB', 'PALETTE', 'CFA'} The color space of the image data. By default, this setting is inferred from the data shape and the value of colormap. For CFA images, DNG tags must be specified in 'extratags'. planarconfig : {'CONTIG', 'SEPARATE'} Specifies if samples are stored interleaved or in separate planes. By default, this setting is inferred from the data shape. If this parameter is set, extra samples are used to store grayscale images. 'CONTIG': last dimension contains samples. 'SEPARATE': third last dimension contains samples. extrasamples : tuple of {'UNSPECIFIED', 'ASSOCALPHA', 'UNASSALPHA'} Defines the interpretation of extra components in pixels. 'UNSPECIFIED': no transparency information (default). 'ASSOCALPHA': single, true transparency with pre-multiplied color. 'UNASSALPHA': independent transparency masks. tile : tuple of int The shape (depth, length, width) of image tiles to write. If None (default), image data are written in strips. The tile length and width must be a multiple of 16. If the tile depth is provided, the SGI ImageDepth and TileDepth tags are used to save volume data. Unless a single tile is used, tiles cannot be used to write contiguous files. Few software can read the SGI format, e.g. MeVisLab. contiguous : bool If True (default) and the data and parameters are compatible with previous ones, if any, the image data are stored contiguously after the previous one. In that case, 'photometric', 'planarconfig', 'rowsperstrip', are ignored. Metadata such as 'description', 'metadata', 'datetime', and 'extratags' are written to the first page of a contiguous series only. align : int Byte boundary on which to align the image data in the file. Default 16. Use mmap.ALLOCATIONGRANULARITY for memory-mapped data. Following contiguous writes are not aligned. truncate : bool If True, only write the first page including shape metadata if possible (uncompressed, contiguous, not tiled). Other TIFF readers will only be able to read part of the data. compress : int or str or (str, int) If 0 (default), data are written uncompressed. If 0-9, the level of ADOBE_DEFLATE compression. If a str, one of TIFF.COMPRESSION, e.g. 'LZMA' or 'ZSTD'. If a tuple, first item is one of TIFF.COMPRESSION and second item is compression level. Compression cannot be used to write contiguous files. rowsperstrip : int The number of rows per strip. By default strips will be ~64 KB if compression is enabled, else rowsperstrip is set to the image length. Bilevel images are always stored in one strip per plane. predictor : bool If True, apply horizontal differencing or floating-point predictor before compression. colormap : numpy.ndarray RGB color values for the corresponding data value. Must be of shape (3, 2**(data.itemsize*8)) and dtype uint16. description : str The subject of the image. Must be 7-bit ASCII. Cannot be used with the ImageJ format. Saved with the first page only. datetime : datetime, str, or bool Date and time of image creation in '%Y:%m:%d %H:%M:%S' format or datetime object. Else if True, the current date and time is used. Saved with the first page only. resolution : (float, float[, str]) or ((int, int), (int, int)[, str]) X and Y resolutions in pixels per resolution unit as float or rational numbers. A third, optional parameter specifies the resolution unit, which must be None (default for ImageJ), 'INCH' (default), or 'CENTIMETER'. subfiletype : int Bitfield to indicate the kind of data. Set bit 0 if the image is a reduced-resolution version of another image. Set bit 1 if the image is part of a multi-page image. Set bit 2 if the image is transparency mask for another image (photometric must be MASK, SamplesPerPixel and BitsPerSample must be 1). software : str Name of the software used to create the file. Must be 7-bit ASCII. Saved with the first page only. metadata : dict Additional metadata to be saved along with shape information in JSON or ImageJ formats in an ImageDescription tag. If None, do not write a second ImageDescription tag. Strings must be 7-bit ASCII. Saved with the first page only. ijmetadata : dict Additional metadata to be saved in application specific IJMetadata and IJMetadataByteCounts tags. Refer to the imagej_metadata_tag function for valid keys and values. Saved with the first page only. extratags : sequence of tuples Additional tags as [(code, dtype, count, value, writeonce)]. code : int The TIFF tag Id. dtype : str Data type of items in 'value' in Python struct format. One of B, s, H, I, 2I, b, h, i, 2i, f, d, Q, or q. count : int Number of data values. Not used for string or byte string values. value : sequence 'Count' values compatible with 'dtype'. Byte strings must contain count values of dtype packed as binary data. writeonce : bool If True, the tag is written to the first page only. """ # TODO: refactor this function fh = self._fh byteorder = self._byteorder if data is None: if compress: raise ValueError('cannot save compressed empty file') datashape = shape datadtype = numpy.dtype(dtype).newbyteorder(byteorder) datadtypechar = datadtype.char else: data = numpy.asarray(data, byteorder+data.dtype.char, 'C') if data.size == 0: raise ValueError('cannot save empty array') datashape = data.shape datadtype = data.dtype datadtypechar = data.dtype.char returnoffset = returnoffset and datadtype.isnative bilevel = datadtypechar == '?' if bilevel: index = -1 if datashape[-1] > 1 else -2 datasize = product(datashape[:index]) if datashape[index] % 8: datasize *= datashape[index] // 8 + 1 else: datasize *= datashape[index] // 8 else: datasize = product(datashape) * datadtype.itemsize # just append contiguous data if possible self._truncate = bool(truncate) if self._datashape: if (not contiguous or self._datashape[1:] != datashape or self._datadtype != datadtype or (compress and self._tags) or tile or not numpy.array_equal(colormap, self._colormap)): # incompatible shape, dtype, compression mode, or colormap self._write_remaining_pages() self._write_image_description() self._truncate = False self._descriptionoffset = 0 self._descriptionlenoffset = 0 self._datashape = None self._colormap = None if self._imagej: raise ValueError( 'ImageJ does not support non-contiguous data') else: # consecutive mode self._datashape = (self._datashape[0] + 1,) + datashape if not compress: # write contiguous data, write IFDs/tags later offset = fh.tell() if data is None: fh.write_empty(datasize) else: fh.write_array(data) if returnoffset: return offset, datasize return None input_shape = datashape tagnoformat = self._tagnoformat valueformat = self._valueformat offsetformat = self._offsetformat offsetsize = self._offsetsize tagsize = self._tagsize MINISBLACK = TIFF.PHOTOMETRIC.MINISBLACK MINISWHITE = TIFF.PHOTOMETRIC.MINISWHITE RGB = TIFF.PHOTOMETRIC.RGB CFA = TIFF.PHOTOMETRIC.CFA PALETTE = TIFF.PHOTOMETRIC.PALETTE CONTIG = TIFF.PLANARCONFIG.CONTIG SEPARATE = TIFF.PLANARCONFIG.SEPARATE # parse input if photometric is not None: photometric = enumarg(TIFF.PHOTOMETRIC, photometric) if planarconfig: planarconfig = enumarg(TIFF.PLANARCONFIG, planarconfig) if extrasamples is None: extrasamples_ = None else: extrasamples_ = tuple(enumarg(TIFF.EXTRASAMPLE, es) for es in sequence(extrasamples)) if not compress: compress = False compresstag = 1 # TODO: support predictors without compression predictor = False predictortag = 1 else: if isinstance(compress, (tuple, list)): compress, compresslevel = compress elif isinstance(compress, int): compress, compresslevel = 'ADOBE_DEFLATE', int(compress) if not 0 <= compresslevel <= 9: raise ValueError('invalid compression level %s' % compress) else: compresslevel = None compress = compress.upper() compresstag = enumarg(TIFF.COMPRESSION, compress) if predictor: if datadtype.kind in 'iu': predictortag = 2 predictor = TIFF.PREDICTORS[2] elif datadtype.kind == 'f': predictortag = 3 predictor = TIFF.PREDICTORS[3] else: raise ValueError('cannot apply predictor to %s' % datadtype) # prepare ImageJ format if self._imagej: # if predictor or compress: # warnings.warn( # 'ImageJ cannot handle predictors or compression') if description: warnings.warn('not writing description to ImageJ file') description = None volume = False if datadtypechar not in 'BHhf': raise ValueError( 'ImageJ does not support data type %s' % datadtypechar) ijrgb = photometric == RGB if photometric else None if datadtypechar not in 'B': ijrgb = False ijshape = imagej_shape(datashape, ijrgb) if ijshape[-1] in (3, 4): photometric = RGB if datadtypechar not in 'B': raise ValueError('ImageJ does not support data type %s ' 'for RGB' % datadtypechar) elif photometric is None: photometric = MINISBLACK planarconfig = None if planarconfig == SEPARATE: raise ValueError('ImageJ does not support planar images') planarconfig = CONTIG if ijrgb else None # define compress function if compress: compressor = TIFF.COMPESSORS[compresstag] if predictor: def compress(data, level=compresslevel): data = predictor(data, axis=-2) return compressor(data, level) else: def compress(data, level=compresslevel): return compressor(data, level) # verify colormap and indices if colormap is not None: if datadtypechar not in 'BH': raise ValueError('invalid data dtype for palette mode') colormap = numpy.asarray(colormap, dtype=byteorder+'H') if colormap.shape != (3, 2**(datadtype.itemsize * 8)): raise ValueError('invalid color map shape') self._colormap = colormap # verify tile shape if tile: tile = tuple(int(i) for i in tile[:3]) volume = len(tile) == 3 if (len(tile) < 2 or tile[-1] % 16 or tile[-2] % 16 or any(i < 1 for i in tile)): raise ValueError('invalid tile shape') else: tile = () volume = False # normalize data shape to 5D or 6D, depending on volume: # (pages, planar_samples, [depth,] height, width, contig_samples) datashape = reshape_nd(datashape, 3 if photometric == RGB else 2) shape = datashape ndim = len(datashape) samplesperpixel = 1 extrasamples = 0 if volume and ndim < 3: volume = False if colormap is not None: photometric = PALETTE planarconfig = None if photometric is None: photometric = MINISBLACK if bilevel: photometric = MINISWHITE elif planarconfig == CONTIG: if ndim > 2 and shape[-1] in (3, 4): photometric = RGB elif planarconfig == SEPARATE: if volume and ndim > 3 and shape[-4] in (3, 4): photometric = RGB elif ndim > 2 and shape[-3] in (3, 4): photometric = RGB elif ndim > 2 and shape[-1] in (3, 4): photometric = RGB elif self._imagej: photometric = MINISBLACK elif volume and ndim > 3 and shape[-4] in (3, 4): photometric = RGB elif ndim > 2 and shape[-3] in (3, 4): photometric = RGB if planarconfig and len(shape) <= (3 if volume else 2): planarconfig = None if photometric not in (0, 1, 3, 4): photometric = MINISBLACK if photometric == RGB: if len(shape) < 3: raise ValueError('not a RGB(A) image') if len(shape) < 4: volume = False if planarconfig is None: if shape[-1] in (3, 4): planarconfig = CONTIG elif shape[-4 if volume else -3] in (3, 4): planarconfig = SEPARATE elif shape[-1] > shape[-4 if volume else -3]: planarconfig = SEPARATE else: planarconfig = CONTIG if planarconfig == CONTIG: datashape = (-1, 1) + shape[(-4 if volume else -3):] samplesperpixel = datashape[-1] else: datashape = (-1,) + shape[(-4 if volume else -3):] + (1,) samplesperpixel = datashape[1] if samplesperpixel > 3: extrasamples = samplesperpixel - 3 elif photometric == CFA: if len(shape) != 2: raise ValueError('invalid CFA image') volume = False planarconfig = None datashape = (-1, 1) + shape[-2:] + (1,) if 50706 not in (et[0] for et in extratags): raise ValueError('must specify DNG tags for CFA image') elif planarconfig and len(shape) > (3 if volume else 2): if planarconfig == CONTIG: datashape = (-1, 1) + shape[(-4 if volume else -3):] samplesperpixel = datashape[-1] else: datashape = (-1,) + shape[(-4 if volume else -3):] + (1,) samplesperpixel = datashape[1] extrasamples = samplesperpixel - 1 else: planarconfig = None while len(shape) > 2 and shape[-1] == 1: shape = shape[:-1] # remove trailing 1s if len(shape) < 3: volume = False if extrasamples_ is None: datashape = (-1, 1) + shape[(-3 if volume else -2):] + (1,) else: datashape = (-1, 1) + shape[(-4 if volume else -3):] samplesperpixel = datashape[-1] extrasamples = samplesperpixel - 1 if subfiletype & 0b100: # FILETYPE_MASK if not (bilevel and samplesperpixel == 1 and photometric in (0, 1, 4)): raise ValueError('invalid SubfileType MASK') photometric = TIFF.PHOTOMETRIC.MASK # normalize shape to 6D assert len(datashape) in (5, 6) if len(datashape) == 5: datashape = datashape[:2] + (1,) + datashape[2:] if datashape[0] == -1: s0 = product(input_shape) // product(datashape[1:]) datashape = (s0,) + datashape[1:] shape = datashape if data is not None: data = data.reshape(shape) if tile and not volume: tile = (1, tile[-2], tile[-1]) if photometric == PALETTE: if (samplesperpixel != 1 or extrasamples or shape[1] != 1 or shape[-1] != 1): raise ValueError('invalid data shape for palette mode') if photometric == RGB and samplesperpixel == 2: raise ValueError('not a RGB image (samplesperpixel=2)') if bilevel: if compresstag not in (1, 32773): raise ValueError('cannot compress bilevel image') if tile: raise ValueError('cannot save tiled bilevel image') if photometric not in (0, 1, 4): raise ValueError('cannot save bilevel image as %s' % str(photometric)) datashape = list(datashape) if datashape[-2] % 8: datashape[-2] = datashape[-2] // 8 + 1 else: datashape[-2] = datashape[-2] // 8 datashape = tuple(datashape) assert datasize == product(datashape) if data is not None: data = numpy.packbits(data, axis=-2) assert datashape[-2] == data.shape[-2] bytestr = bytes if sys.version[0] == '2' else ( lambda x: bytes(x, 'ascii') if isinstance(x, str) else x) tags = [] # list of (code, ifdentry, ifdvalue, writeonce) strip_or_tile = 'Tile' if tile else 'Strip' tagbytecounts = TIFF.TAG_NAMES[strip_or_tile + 'ByteCounts'] tagoffsets = TIFF.TAG_NAMES[strip_or_tile + 'Offsets'] self._tagoffsets = tagoffsets def pack(fmt, *val): return struct.pack(byteorder+fmt, *val) def addtag(code, dtype, count, value, writeonce=False): # Compute ifdentry & ifdvalue bytes from code, dtype, count, value # Append (code, ifdentry, ifdvalue, writeonce) to tags list code = int(TIFF.TAG_NAMES.get(code, code)) try: tifftype = TIFF.DATA_DTYPES[dtype] except KeyError: raise ValueError('unknown dtype %s' % dtype) rawcount = count if dtype == 's': # strings value = bytestr(value) + b'\0' count = rawcount = len(value) rawcount = value.find(b'\0\0') if rawcount < 0: rawcount = count else: rawcount += 1 # length of string without buffer value = (value,) elif isinstance(value, bytes): # packed binary data dtsize = struct.calcsize(dtype) if len(value) % dtsize: raise ValueError('invalid packed binary data') count = len(value) // dtsize if len(dtype) > 1: count *= int(dtype[:-1]) dtype = dtype[-1] ifdentry = [pack('HH', code, tifftype), pack(offsetformat, rawcount)] ifdvalue = None if struct.calcsize(dtype) * count <= offsetsize: # value(s) can be written directly if isinstance(value, bytes): ifdentry.append(pack(valueformat, value)) elif count == 1: if isinstance(value, (tuple, list, numpy.ndarray)): value = value[0] ifdentry.append(pack(valueformat, pack(dtype, value))) else: ifdentry.append(pack(valueformat, pack(str(count)+dtype, *value))) else: # use offset to value(s) ifdentry.append(pack(offsetformat, 0)) if isinstance(value, bytes): ifdvalue = value elif isinstance(value, numpy.ndarray): assert value.size == count assert value.dtype.char == dtype ifdvalue = value.tostring() elif isinstance(value, (tuple, list)): ifdvalue = pack(str(count)+dtype, *value) else: ifdvalue = pack(dtype, value) tags.append((code, b''.join(ifdentry), ifdvalue, writeonce)) def rational(arg, max_denominator=1000000): """"Return nominator and denominator from float or two integers.""" from fractions import Fraction # delayed import try: f = Fraction.from_float(arg) except TypeError: f = Fraction(arg[0], arg[1]) f = f.limit_denominator(max_denominator) return f.numerator, f.denominator if description: # user provided description addtag('ImageDescription', 's', 0, description, writeonce=True) # write shape and metadata to ImageDescription self._metadata = {} if not metadata else metadata.copy() if self._imagej: description = imagej_description( input_shape, shape[-1] in (3, 4), self._colormap is not None, **self._metadata) elif metadata or metadata == {}: if self._truncate: self._metadata.update(truncated=True) description = json_description(input_shape, **self._metadata) # elif metadata is None and self._truncate: # raise ValueError('cannot truncate without writing metadata') else: description = None if description: # add 64 bytes buffer # the image description might be updated later with the final shape description = str2bytes(description, 'ascii') description += b'\0' * 64 self._descriptionlen = len(description) addtag('ImageDescription', 's', 0, description, writeonce=True) if software: addtag('Software', 's', 0, software, writeonce=True) if datetime: if isinstance(datetime, str): if len(datetime) != 19 or datetime[16] != ':': raise ValueError('invalid datetime string') else: try: datetime = datetime.strftime('%Y:%m:%d %H:%M:%S') except AttributeError: datetime = self._now().strftime('%Y:%m:%d %H:%M:%S') addtag('DateTime', 's', 0, datetime, writeonce=True) addtag('Compression', 'H', 1, compresstag) if predictor: addtag('Predictor', 'H', 1, predictortag) addtag('ImageWidth', 'I', 1, shape[-2]) addtag('ImageLength', 'I', 1, shape[-3]) if tile: addtag('TileWidth', 'I', 1, tile[-1]) addtag('TileLength', 'I', 1, tile[-2]) if tile[0] > 1: addtag('ImageDepth', 'I', 1, shape[-4]) addtag('TileDepth', 'I', 1, tile[0]) addtag('NewSubfileType', 'I', 1, subfiletype) if not bilevel: sampleformat = {'u': 1, 'i': 2, 'f': 3, 'c': 6}[datadtype.kind] addtag('SampleFormat', 'H', samplesperpixel, (sampleformat,) * samplesperpixel) addtag('PhotometricInterpretation', 'H', 1, photometric.value) if colormap is not None: addtag('ColorMap', 'H', colormap.size, colormap) addtag('SamplesPerPixel', 'H', 1, samplesperpixel) if bilevel: pass elif planarconfig and samplesperpixel > 1: addtag('PlanarConfiguration', 'H', 1, planarconfig.value) addtag('BitsPerSample', 'H', samplesperpixel, (datadtype.itemsize * 8,) * samplesperpixel) else: addtag('BitsPerSample', 'H', 1, datadtype.itemsize * 8) if extrasamples: if extrasamples_ is not None: if extrasamples != len(extrasamples_): raise ValueError('wrong number of extrasamples specified') addtag('ExtraSamples', 'H', extrasamples, extrasamples_) elif photometric == RGB and extrasamples == 1: # Unassociated alpha channel addtag('ExtraSamples', 'H', 1, 2) else: # Unspecified alpha channel addtag('ExtraSamples', 'H', extrasamples, (0,) * extrasamples) if resolution is not None: addtag('XResolution', '2I', 1, rational(resolution[0])) addtag('YResolution', '2I', 1, rational(resolution[1])) if len(resolution) > 2: unit = resolution[2] unit = 1 if unit is None else enumarg(TIFF.RESUNIT, unit) elif self._imagej: unit = 1 else: unit = 2 addtag('ResolutionUnit', 'H', 1, unit) elif not self._imagej: addtag('XResolution', '2I', 1, (1, 1)) addtag('YResolution', '2I', 1, (1, 1)) addtag('ResolutionUnit', 'H', 1, 1) if ijmetadata: for t in imagej_metadata_tag(ijmetadata, byteorder): addtag(*t) contiguous = not compress if tile: # one chunk per tile per plane tiles = ((shape[2] + tile[0] - 1) // tile[0], (shape[3] + tile[1] - 1) // tile[1], (shape[4] + tile[2] - 1) // tile[2]) numtiles = product(tiles) * shape[1] databytecounts = [ product(tile) * shape[-1] * datadtype.itemsize] * numtiles addtag(tagbytecounts, offsetformat, numtiles, databytecounts) addtag(tagoffsets, offsetformat, numtiles, [0] * numtiles) contiguous = contiguous and product(tiles) == 1 if not contiguous: # allocate tile buffer chunk = numpy.empty(tile + (shape[-1],), dtype=datadtype) elif contiguous and (bilevel or rowsperstrip is None): # one strip per plane if bilevel: databytecounts = [product(datashape[2:])] * shape[1] else: databytecounts = [ product(datashape[2:]) * datadtype.itemsize] * shape[1] addtag(tagbytecounts, offsetformat, shape[1], databytecounts) addtag(tagoffsets, offsetformat, shape[1], [0] * shape[1]) addtag('RowsPerStrip', 'I', 1, shape[-3]) else: # use rowsperstrip rowsize = product(shape[-2:]) * datadtype.itemsize if rowsperstrip is None: # compress ~64 KB chunks by default rowsperstrip = 65536 // rowsize if compress else shape[-3] if rowsperstrip < 1: rowsperstrip = 1 elif rowsperstrip > shape[-3]: rowsperstrip = shape[-3] addtag('RowsPerStrip', 'I', 1, rowsperstrip) numstrips1 = (shape[-3] + rowsperstrip - 1) // rowsperstrip numstrips = numstrips1 * shape[1] if compress: databytecounts = [0] * numstrips else: # TODO: save bilevel data with rowsperstrip stripsize = rowsperstrip * rowsize databytecounts = [stripsize] * numstrips stripsize -= rowsize * (numstrips1 * rowsperstrip - shape[-3]) for i in range(numstrips1-1, numstrips, numstrips1): databytecounts[i] = stripsize addtag(tagbytecounts, offsetformat, numstrips, databytecounts) addtag(tagoffsets, offsetformat, numstrips, [0] * numstrips) if data is None and not contiguous: raise ValueError('cannot write non-contiguous empty file') # add extra tags from user for t in extratags: addtag(*t) # TODO: check TIFFReadDirectoryCheckOrder warning in files containing # multiple tags of same code # the entries in an IFD must be sorted in ascending order by tag code tags = sorted(tags, key=lambda x: x[0]) fhpos = fh.tell() if not (self._bigtiff or self._imagej) and fhpos + datasize > 2**32-1: raise ValueError('data too large for standard TIFF file') # if not compressed or multi-tiled, write the first IFD and then # all data contiguously; else, write all IFDs and data interleaved for pageindex in range(1 if contiguous else shape[0]): ifdpos = fhpos if ifdpos % 2: # location of IFD must begin on a word boundary fh.write(b'\0') ifdpos += 1 # update pointer at ifdoffset fh.seek(self._ifdoffset) fh.write(pack(offsetformat, ifdpos)) fh.seek(ifdpos) # create IFD in memory if pageindex < 2: ifd = io.BytesIO() ifd.write(pack(tagnoformat, len(tags))) tagoffset = ifd.tell() ifd.write(b''.join(t[1] for t in tags)) ifdoffset = ifd.tell() ifd.write(pack(offsetformat, 0)) # offset to next IFD # write tag values and patch offsets in ifdentries for tagindex, tag in enumerate(tags): offset = tagoffset + tagindex * tagsize + offsetsize + 4 code = tag[0] value = tag[2] if value: pos = ifd.tell() if pos % 2: # tag value is expected to begin on word boundary ifd.write(b'\0') pos += 1 ifd.seek(offset) ifd.write(pack(offsetformat, ifdpos + pos)) ifd.seek(pos) ifd.write(value) if code == tagoffsets: dataoffsetsoffset = offset, pos elif code == tagbytecounts: databytecountsoffset = offset, pos elif code == 270 and value.endswith(b'\0\0\0\0'): # image description buffer self._descriptionoffset = ifdpos + pos self._descriptionlenoffset = ( ifdpos + tagoffset + tagindex*tagsize + 4) elif code == tagoffsets: dataoffsetsoffset = offset, None elif code == tagbytecounts: databytecountsoffset = offset, None ifdsize = ifd.tell() if ifdsize % 2: ifd.write(b'\0') ifdsize += 1 # write IFD later when strip/tile bytecounts and offsets are known fh.seek(ifdsize, 1) # write image data dataoffset = fh.tell() skip = align - dataoffset % align fh.seek(skip, 1) dataoffset += skip if contiguous: if data is None: fh.write_empty(datasize) else: fh.write_array(data) elif tile: if data is None: fh.write_empty(numtiles * databytecounts[0]) else: stripindex = 0 for plane in data[pageindex]: for tz in range(tiles[0]): for ty in range(tiles[1]): for tx in range(tiles[2]): c0 = min(tile[0], shape[2] - tz*tile[0]) c1 = min(tile[1], shape[3] - ty*tile[1]) c2 = min(tile[2], shape[4] - tx*tile[2]) chunk[c0:, c1:, c2:] = 0 chunk[:c0, :c1, :c2] = plane[ tz*tile[0]:tz*tile[0]+c0, ty*tile[1]:ty*tile[1]+c1, tx*tile[2]:tx*tile[2]+c2] if compress: t = compress(chunk) fh.write(t) databytecounts[stripindex] = len(t) stripindex += 1 else: fh.write_array(chunk) # fh.flush() elif compress: # write one strip per rowsperstrip assert data.shape[2] == 1 # not handling depth numstrips = (shape[-3] + rowsperstrip - 1) // rowsperstrip stripindex = 0 for plane in data[pageindex]: for i in range(numstrips): strip = plane[0, i*rowsperstrip: (i+1)*rowsperstrip] strip = compress(strip) fh.write(strip) databytecounts[stripindex] = len(strip) stripindex += 1 else: fh.write_array(data[pageindex]) # update strip/tile offsets offset, pos = dataoffsetsoffset ifd.seek(offset) if pos: ifd.write(pack(offsetformat, ifdpos + pos)) ifd.seek(pos) offset = dataoffset for size in databytecounts: ifd.write(pack(offsetformat, offset)) offset += size else: ifd.write(pack(offsetformat, dataoffset)) if compress: # update strip/tile bytecounts offset, pos = databytecountsoffset ifd.seek(offset) if pos: ifd.write(pack(offsetformat, ifdpos + pos)) ifd.seek(pos) for size in databytecounts: ifd.write(pack(offsetformat, size)) else: ifd.write(pack(offsetformat, databytecounts[0])) fhpos = fh.tell() fh.seek(ifdpos) fh.write(iogetbuffer(ifd)) fh.flush() fh.seek(fhpos) self._ifdoffset = ifdpos + ifdoffset # remove tags that should be written only once if pageindex == 0: tags = [tag for tag in tags if not tag[-1]] self._shape = shape self._datashape = (1,) + input_shape self._datadtype = datadtype self._dataoffset = dataoffset self._databytecounts = databytecounts if contiguous: # write remaining IFDs/tags later self._tags = tags # return offset and size of image data if returnoffset: return dataoffset, sum(databytecounts) return None
python
def login_required(f): """Decorator function to check if user is loged in. :raises: :class:`FMBaseError` if not logged in """ @wraps(f) def check_login(cls, *args, **kwargs): if not cls.logged_in: raise FMBaseError('Please login to use this method') return f(cls, *args, **kwargs) return check_login
java
public void reset() { Arrays.fill(mean, 0.); Arrays.fill(nmea, 0.); if(elements != null) { for(int i = 0; i < elements.length; i++) { Arrays.fill(elements[i], 0.); } } else { elements = new double[mean.length][mean.length]; } wsum = 0.; }
python
def _openssl_key_iv(passphrase, salt): """ Returns a (key, iv) tuple that can be used in AES symmetric encryption from a *passphrase* (a byte or unicode string) and *salt* (a byte array). """ def _openssl_kdf(req): if hasattr(passphrase, 'encode'): passwd = passphrase.encode('ascii', 'ignore') else: passwd = passphrase prev = b'' while req > 0: digest = hashes.Hash(hashes.MD5(), backend=default_backend()) digest.update(prev + passwd + salt) prev = digest.finalize() req -= IV_BLOCK_SIZE yield prev assert passphrase is not None assert salt is not None # AES key: 32 bytes, IV: 16 bytes mat = b''.join([x for x in _openssl_kdf(32 + IV_BLOCK_SIZE)]) return mat[0:32], mat[32:32 + IV_BLOCK_SIZE]
python
def get_objective_hierarchy_design_session(self): """Gets the session for designing objective hierarchies. return: (osid.learning.ObjectiveHierarchyDesignSession) - an ObjectiveHierarchyDesignSession raise: OperationFailed - unable to complete request raise: Unimplemented - supports_objective_hierarchy_design() is false compliance: optional - This method must be implemented if supports_objective_hierarchy_design() is true. """ if not self.supports_objective_hierarchy_design(): raise Unimplemented() try: from . import sessions except ImportError: raise OperationFailed() try: session = sessions.ObjectiveHierarchyDesignSession(runtime=self._runtime) except AttributeError: raise OperationFailed() return session
java
public Type glb(Type t, Type s) { if (s == null) return t; else if (t.isPrimitive() || s.isPrimitive()) return syms.errType; else if (isSubtypeNoCapture(t, s)) return t; else if (isSubtypeNoCapture(s, t)) return s; List<Type> closure = union(closure(t), closure(s)); return glbFlattened(closure, t); }
python
def check_bounds(self, addr): """ Check whether the given address is within the array bounds. """ def is_boolean_array(arr): return hasattr(arr, 'dtype') and arr.dtype == bool def check_axis(x, size): if isinstance(x, (int, long, numpy.integer)): lower = upper = x elif isinstance(x, slice): lower = x.start or 0 upper = min(x.stop or size - 1, size - 1) # slices are allowed to go past the bounds elif isinstance(x, collections.Sized): if is_boolean_array(x): lower = 0 upper = x.size - 1 else: if len(x) == 0: raise ValueError("Empty address component (address was %s)" % str(addr)) if hasattr(x, "min"): lower = x.min() else: lower = min(x) if hasattr(x, "max"): upper = x.max() else: upper = max(x) else: raise TypeError("Invalid array address: %s (element of type %s)" % (str(addr), type(x))) if (lower < -size) or (upper >= size): raise IndexError("Index out of bounds") full_addr = self._full_address(addr) if isinstance(addr, numpy.ndarray) and addr.dtype == bool: if len(addr.shape) > len(self._shape): raise IndexError("Too many indices for array") for xmax, size in zip(addr.shape, self._shape): upper = xmax - 1 if upper >= size: raise IndexError("Index out of bounds") else: for i, size in zip(full_addr, self._shape): check_axis(i, size)
python
def is_cep(numero, estrito=False): """Uma versão conveniente para usar em testes condicionais. Apenas retorna verdadeiro ou falso, conforme o argumento é validado. :param bool estrito: Padrão ``False``, indica se apenas os dígitos do número deverão ser considerados. Se verdadeiro, potenciais caracteres que formam a máscara serão removidos antes da validação ser realizada. """ try: cep(digitos(numero) if not estrito else numero) return True except NumeroCEPError: pass return False
python
def split_seconds(seconds): """Split seconds into [day, hour, minute, second, ms] `divisor: 1, 24, 60, 60, 1000` `units: day, hour, minute, second, ms` >>> split_seconds(6666666) [77, 3, 51, 6, 0] """ ms = seconds * 1000 divisors = (1, 24, 60, 60, 1000) quotient, result = ms, [] for divisor in divisors[::-1]: quotient, remainder = divmod(quotient, divisor) result.append(quotient) if divisor == 1 else result.append(remainder) return result[::-1]
java
@Override public R visitStartElement(StartElementTree node, P p) { return defaultAction(node, p); }
python
def load(self, filenames=None, goto=None, word='', editorwindow=None, processevents=True, start_column=None, set_focus=True, add_where='end'): """ Load a text file editorwindow: load in this editorwindow (useful when clicking on outline explorer with multiple editor windows) processevents: determines if processEvents() should be called at the end of this method (set to False to prevent keyboard events from creeping through to the editor during debugging) """ # Switch to editor before trying to load a file try: self.switch_to_plugin() except AttributeError: pass editor0 = self.get_current_editor() if editor0 is not None: position0 = editor0.get_position('cursor') filename0 = self.get_current_filename() else: position0, filename0 = None, None if not filenames: # Recent files action action = self.sender() if isinstance(action, QAction): filenames = from_qvariant(action.data(), to_text_string) if not filenames: basedir = getcwd_or_home() if self.edit_filetypes is None: self.edit_filetypes = get_edit_filetypes() if self.edit_filters is None: self.edit_filters = get_edit_filters() c_fname = self.get_current_filename() if c_fname is not None and c_fname != self.TEMPFILE_PATH: basedir = osp.dirname(c_fname) self.redirect_stdio.emit(False) parent_widget = self.get_current_editorstack() if filename0 is not None: selectedfilter = get_filter(self.edit_filetypes, osp.splitext(filename0)[1]) else: selectedfilter = '' if not running_under_pytest(): filenames, _sf = getopenfilenames( parent_widget, _("Open file"), basedir, self.edit_filters, selectedfilter=selectedfilter, options=QFileDialog.HideNameFilterDetails) else: # Use a Qt (i.e. scriptable) dialog for pytest dialog = QFileDialog(parent_widget, _("Open file"), options=QFileDialog.DontUseNativeDialog) if dialog.exec_(): filenames = dialog.selectedFiles() self.redirect_stdio.emit(True) if filenames: filenames = [osp.normpath(fname) for fname in filenames] else: return focus_widget = QApplication.focusWidget() if self.editorwindows and not self.dockwidget.isVisible(): # We override the editorwindow variable to force a focus on # the editor window instead of the hidden editor dockwidget. # See PR #5742. if editorwindow not in self.editorwindows: editorwindow = self.editorwindows[0] editorwindow.setFocus() editorwindow.raise_() elif (self.dockwidget and not self.ismaximized and not self.dockwidget.isAncestorOf(focus_widget) and not isinstance(focus_widget, CodeEditor)): self.dockwidget.setVisible(True) self.dockwidget.setFocus() self.dockwidget.raise_() def _convert(fname): fname = osp.abspath(encoding.to_unicode_from_fs(fname)) if os.name == 'nt' and len(fname) >= 2 and fname[1] == ':': fname = fname[0].upper()+fname[1:] return fname if hasattr(filenames, 'replaceInStrings'): # This is a QStringList instance (PyQt API #1), converting to list: filenames = list(filenames) if not isinstance(filenames, list): filenames = [_convert(filenames)] else: filenames = [_convert(fname) for fname in list(filenames)] if isinstance(goto, int): goto = [goto] elif goto is not None and len(goto) != len(filenames): goto = None for index, filename in enumerate(filenames): # -- Do not open an already opened file focus = set_focus and index == 0 current_editor = self.set_current_filename(filename, editorwindow, focus=focus) if current_editor is None: # -- Not a valid filename: if not osp.isfile(filename): continue # -- current_es = self.get_current_editorstack(editorwindow) # Creating the editor widget in the first editorstack # (the one that can't be destroyed), then cloning this # editor widget in all other editorstacks: finfo = self.editorstacks[0].load( filename, set_current=False, add_where=add_where) finfo.path = self.main.get_spyder_pythonpath() self._clone_file_everywhere(finfo) current_editor = current_es.set_current_filename(filename, focus=focus) current_editor.debugger.load_breakpoints() current_editor.set_bookmarks(load_bookmarks(filename)) self.register_widget_shortcuts(current_editor) current_es.analyze_script() self.__add_recent_file(filename) if goto is not None: # 'word' is assumed to be None as well current_editor.go_to_line(goto[index], word=word, start_column=start_column) position = current_editor.get_position('cursor') self.cursor_moved(filename0, position0, filename, position) current_editor.clearFocus() current_editor.setFocus() current_editor.window().raise_() if processevents: QApplication.processEvents() else: # processevents is false only when calling from debugging current_editor.sig_debug_stop.emit(goto[index]) current_sw = self.main.ipyconsole.get_current_shellwidget() current_sw.sig_prompt_ready.connect( current_editor.sig_debug_stop[()].emit)
java
@VisibleForTesting static Iterable<SSTableReader> filterOldSSTables(List<SSTableReader> sstables, long maxSSTableAge, long now) { if (maxSSTableAge == 0) return sstables; final long cutoff = now - maxSSTableAge; return Iterables.filter(sstables, new Predicate<SSTableReader>() { @Override public boolean apply(SSTableReader sstable) { return sstable.getMaxTimestamp() >= cutoff; } }); }
java
public static Date addDays(Date d, int days) { Calendar cal = Calendar.getInstance(); cal.setTime(d); cal.add(Calendar.DAY_OF_YEAR, days); return cal.getTime(); }
python
def db_url_config(cls, url, engine=None): """Pulled from DJ-Database-URL, parse an arbitrary Database URL. Support currently exists for PostgreSQL, PostGIS, MySQL, Oracle and SQLite. SQLite connects to file based databases. The same URL format is used, omitting the hostname, and using the "file" portion as the filename of the database. This has the effect of four slashes being present for an absolute file path: >>> from environ import Env >>> Env.db_url_config('sqlite:////full/path/to/your/file.sqlite') {'ENGINE': 'django.db.backends.sqlite3', 'HOST': '', 'NAME': '/full/path/to/your/file.sqlite', 'PASSWORD': '', 'PORT': '', 'USER': ''} >>> Env.db_url_config('postgres://uf07k1i6d8ia0v:[email protected]:5431/d8r82722r2kuvn') {'ENGINE': 'django.db.backends.postgresql', 'HOST': 'ec2-107-21-253-135.compute-1.amazonaws.com', 'NAME': 'd8r82722r2kuvn', 'PASSWORD': 'wegauwhgeuioweg', 'PORT': 5431, 'USER': 'uf07k1i6d8ia0v'} """ if not isinstance(url, cls.URL_CLASS): if url == 'sqlite://:memory:': # this is a special case, because if we pass this URL into # urlparse, urlparse will choke trying to interpret "memory" # as a port number return { 'ENGINE': cls.DB_SCHEMES['sqlite'], 'NAME': ':memory:' } # note: no other settings are required for sqlite url = urlparse(url) config = {} # Remove query strings. path = url.path[1:] path = unquote_plus(path.split('?', 2)[0]) if url.scheme == 'sqlite': if path == '': # if we are using sqlite and we have no path, then assume we # want an in-memory database (this is the behaviour of sqlalchemy) path = ':memory:' if url.netloc: warnings.warn( 'SQLite URL contains host component %r, it will be ignored' % url.netloc, stacklevel=3) if url.scheme == 'ldap': path = '{scheme}://{hostname}'.format(scheme=url.scheme, hostname=url.hostname) if url.port: path += ':{port}'.format(port=url.port) # Update with environment configuration. config.update({ 'NAME': path or '', 'USER': _cast_urlstr(url.username) or '', 'PASSWORD': _cast_urlstr(url.password) or '', 'HOST': url.hostname or '', 'PORT': _cast_int(url.port) or '', }) if url.scheme == 'postgres' and path.startswith('/'): config['HOST'], config['NAME'] = path.rsplit('/', 1) if url.scheme == 'oracle' and path == '': config['NAME'] = config['HOST'] config['HOST'] = '' if url.scheme == 'oracle': # Django oracle/base.py strips port and fails on non-string value if not config['PORT']: del(config['PORT']) else: config['PORT'] = str(config['PORT']) if url.query: config_options = {} for k, v in parse_qs(url.query).items(): if k.upper() in cls._DB_BASE_OPTIONS: config.update({k.upper(): _cast(v[0])}) else: config_options.update({k: _cast_int(v[0])}) config['OPTIONS'] = config_options if engine: config['ENGINE'] = engine else: config['ENGINE'] = url.scheme if config['ENGINE'] in Env.DB_SCHEMES: config['ENGINE'] = Env.DB_SCHEMES[config['ENGINE']] if not config.get('ENGINE', False): warnings.warn("Engine not recognized from url: {0}".format(config)) return {} return config
python
def ln_from_etree(self, ln_element, context=''): """Parse rs:ln element from an etree, returning a dict of the data. Parameters: md_element - etree element <rs:md> context - context string for error reporting """ ln = {} # grab all understood attributes into ln dict for att in ('hash', 'href', 'length', 'modified', 'path', 'rel', 'pri', 'mime_type'): xml_att = self._xml_att_name(att) val = ln_element.attrib.get(xml_att, None) if (val is not None): ln[att] = val # now do some checks and conversions... # href (MANDATORY) if ('href' not in ln): raise SitemapParseError( "Missing href in <rs:ln> in %s" % (context)) # rel (MANDATORY) if ('rel' not in ln): raise SitemapParseError("Missing rel in <rs:ln> in %s" % (context)) # length in bytes if ('length' in ln): try: ln['length'] = int(ln['length']) except ValueError as e: raise SitemapParseError( "Invalid length attribute value in <rs:ln> for %s" % (context)) # pri - priority, must be a number between 1 and 999999 if ('pri' in ln): try: ln['pri'] = int(ln['pri']) except ValueError as e: raise SitemapParseError( "Invalid pri attribute in <rs:ln> for %s" % (context)) if (ln['pri'] < 1 or ln['pri'] > 999999): raise SitemapParseError( "Bad pri attribute value in <rs:ln> for %s" % (context)) return(ln)
java
public static void changeSign( DMatrix6 a ) { a.a1 = -a.a1; a.a2 = -a.a2; a.a3 = -a.a3; a.a4 = -a.a4; a.a5 = -a.a5; a.a6 = -a.a6; }
python
def fit(self, X, y): """Build an accelerated failure time model. Parameters ---------- X : array-like, shape = (n_samples, n_features) Data matrix. y : structured array, shape = (n_samples,) A structured array containing the binary event indicator as first field, and time of event or time of censoring as second field. Returns ------- self """ X, event, time = check_arrays_survival(X, y) weights = ipc_weights(event, time) super().fit(X, numpy.log(time), sample_weight=weights) return self
python
def clamped(values, output_min=0, output_max=1): """ Returns *values* clamped from *output_min* to *output_max*, i.e. any items less than *output_min* will be returned as *output_min* and any items larger than *output_max* will be returned as *output_max* (these default to 0 and 1 respectively). For example:: from gpiozero import PWMLED, MCP3008 from gpiozero.tools import clamped from signal import pause led = PWMLED(4) pot = MCP3008(channel=0) led.source = clamped(pot, 0.5, 1.0) pause() """ values = _normalize(values) if output_min >= output_max: raise ValueError('output_min must be smaller than output_max') for v in values: yield min(max(v, output_min), output_max)
java
private int assertToken(final StreamTokenizer tokeniser, Reader in, final String token) throws IOException, ParserException { return assertToken(tokeniser, in, token, false, false); }
python
def cj(job_ids): '''Simple implementation where joblist is expected to be a list of integers (job ids). The full grammar for this command allows more granular control.''' for job_id in job_ids: job_id_types = set(map(type, job_ids)) assert(len(job_id_types) == 1 and type(1) == job_id_types.pop()) args = shlex.split('qmod -cj {0}'.format(job_id)) subprocess.call(args, shell=False)
java
public Project updateProject(String oldProjectName, Project project) throws GreenPepperServerException { Project projectUpdated; try { sessionService.startSession(); sessionService.beginTransaction(); projectUpdated = projectDao.update(oldProjectName, project); sessionService.commitTransaction(); log.debug("Updated Project: " + project.getName()); } catch (Exception ex) { sessionService.rollbackTransaction(); throw handleException(PROJECT_UPDATE_FAILED, ex); } finally { sessionService.closeSession(); } return projectUpdated; }
java
public static dbuser[] get(nitro_service service) throws Exception{ dbuser obj = new dbuser(); dbuser[] response = (dbuser[])obj.get_resources(service); return response; }
java
public static String absorbInputStream(InputStream input) throws IOException { byte[] bytes = StreamSupport.absorbInputStream(input); return new String(bytes); }
python
def relabel_map(label_image, mapping, key=lambda x, y: x[y]): r""" Relabel an image using the supplied mapping. The ``mapping`` can be any kind of subscriptable object. The respective region id is used to access the new value from the ``mapping``. The ``key`` keyword parameter can be used to supply another access function. The ``key`` function must have the signature key(mapping, region-id) and return the new region-id to assign. Parameters ---------- label_image : array_like A nD label map. mapping : dictionary or subscriptable object A mapping object. key : function Can be used to defined the key-access to the ``mapping`` object. Returns ------- relabel_map : ndarray A label map with new region ids. Raises ------ ArgumentError If a region id is missing in the supplied mapping """ label_image = scipy.array(label_image) def _map(x): try: return key(mapping, x) except Exception as e: raise ArgumentError('No conversion for region id {} found in the supplied mapping. Error: {}'.format(x, e)) vmap = scipy.vectorize(_map, otypes=[label_image.dtype]) return vmap(label_image)
java
protected int max(int[] v) { int x = 0; for (int i : v) { if (i > x) x = i; } return x; }
java
public Object querySingleResult(String sql, String[] args, int column) { return db.querySingleResult(sql, args, column); }
java
protected final PrcBeginningInventoryLineGfr lazyGetPrcBeginningInventoryLineGfr( final Map<String, Object> pAddParam) throws Exception { @SuppressWarnings("unchecked") PrcBeginningInventoryLineGfr<RS> proc = (PrcBeginningInventoryLineGfr<RS>) this.processorsMap .get(PrcBeginningInventoryLineGfr.class.getSimpleName()); if (proc == null) { proc = new PrcBeginningInventoryLineGfr<RS>(); @SuppressWarnings("unchecked") PrcEntityPbCopy<RS, BeginningInventoryLine> procDlg = (PrcEntityPbCopy<RS, BeginningInventoryLine>) this.fctBnEntitiesProcessors .lazyGet(pAddParam, PrcEntityPbCopy.class.getSimpleName()); proc.setPrcAccEntityPbCopy(procDlg); //assigning fully initialized object: this.processorsMap .put(PrcBeginningInventoryLineGfr.class.getSimpleName(), proc); } return proc; }
java
public static String findTitle(final Document doc) { if (doc == null) return null; // loop through the child nodes until the title element is found final NodeList childNodes = doc.getDocumentElement().getChildNodes(); for (int i = 0; i < childNodes.getLength(); i++) { Node node = childNodes.item(i); // check if the node is the title and if its parent is the document root element if (node.getNodeName().equals(TOPIC_ROOT_TITLE_NODE_NAME) && node.getParentNode().equals(doc.getDocumentElement())) { return XMLUtilities.convertNodeToString(node, false); } } return null; }
java
public ItemRef on(StorageEvent eventType, final OnItemSnapshot onItemSnapshot, final OnError onError) { if(eventType == StorageEvent.PUT) { this.get(onItemSnapshot, onError); } Event ev = new Event(eventType, this.table.name, this.primaryKeyValue, this.secondaryKeyValue, false, false, pushNotificationsEnabled, onItemSnapshot); context.addEvent(ev); return this; }
java
public void removeAtRange(int index, int size) { final int end = Math.min(mSize, index + size); for (int i = index; i < end; i++) { removeAt(i); } }
java
private String buildMessage(String firstMessageLine, int exceptionLine) { if (additionalLines.size() == 0) { return firstMessageLine; } StringBuffer message = new StringBuffer(); if (firstMessageLine != null) { message.append(firstMessageLine); } int linesToProcess = (exceptionLine == -1?additionalLines.size(): exceptionLine); for (int i = 0; i < linesToProcess; i++) { message.append(newLine); message.append(additionalLines.get(i)); } return message.toString(); }
java
public void cleanup() { keys.cleanup(); counts.cleanup(); dictionary.cleanup(); // Decrement the dictionary memory by the total size of all the elements memoryEstimate.decrementDictionaryMemory(SizeOf.SIZE_OF_LONG * numElements); }
java
public Connector withParameters(java.util.Map<String, String> parameters) { setParameters(parameters); return this; }
java
@Override public void send(final SipMessage msg) { final DatagramPacket pkt = new DatagramPacket(toByteBuf(msg), getRemoteAddress()); channel().writeAndFlush(pkt); }
java
public void delete(String resourceGroupName, String name, Boolean forceDelete) { deleteWithServiceResponseAsync(resourceGroupName, name, forceDelete).toBlocking().last().body(); }
python
def ncores_used(self): """ Returns the number of cores used in this moment. A core is used if there's a job that is running on it. """ return sum(task.manager.num_cores for task in self if task.status == task.S_RUN)
python
def server(self): """ Returns :class:`plexapi.myplex.MyPlexResource` with server of current item. """ server = [s for s in self._server.resources() if s.clientIdentifier == self.machineIdentifier] if len(server) == 0: raise NotFound('Unable to find server with uuid %s' % self.machineIdentifier) return server[0]
python
def smoothed(mesh, angle): """ Return a non- watertight version of the mesh which will render nicely with smooth shading by disconnecting faces at sharp angles to each other. Parameters --------- mesh : trimesh.Trimesh Source geometry angle : float Angle in radians, adjacent faces which have normals below this angle will be smoothed Returns --------- smooth : trimesh.Trimesh Geometry with disconnected face patches """ # if the mesh has no adjacent faces return a copy if len(mesh.face_adjacency) == 0: return mesh.copy() # face pairs below angle threshold angle_ok = mesh.face_adjacency_angles <= angle # subset of face adjacency adjacency = mesh.face_adjacency[angle_ok] # list of connected groups of faces components = connected_components(adjacency, min_len=1, nodes=np.arange(len(mesh.faces))) # get a submesh as a single appended Trimesh smooth = mesh.submesh(components, only_watertight=False, append=True) return smooth
python
def get_vnetwork_vms_input_datacenter(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_vnetwork_vms = ET.Element("get_vnetwork_vms") config = get_vnetwork_vms input = ET.SubElement(get_vnetwork_vms, "input") datacenter = ET.SubElement(input, "datacenter") datacenter.text = kwargs.pop('datacenter') callback = kwargs.pop('callback', self._callback) return callback(config)
python
def on_epoch_end(self, epoch, **kwargs:Any)->None: "Compare the value monitored to its best and maybe reduce lr." current = self.get_monitor_value() if current is None: return if self.operator(current - self.min_delta, self.best): self.best,self.wait = current,0 else: self.wait += 1 if self.wait > self.patience: self.opt.lr *= self.factor self.wait = 0 print(f'Epoch {epoch}: reducing lr to {self.opt.lr}')
java
public T error(CharSequence error) { if (view!=null && view instanceof TextView) { ((TextView) view).setError(error); } return self(); }
python
def item_enclosure_mime_type(self, item): """ Guess the enclosure's mimetype. Note: this method is only called if item_enclosure_url has returned something. """ mime_type, encoding = guess_type(self.cached_enclosure_url) if mime_type: return mime_type return 'image/jpeg'
python
def split_input(cls, mapper_spec): """Returns a list of shard_count input_spec_shards for input_spec. Args: mapper_spec: The mapper specification to split from. Must contain 'blob_keys' parameter with one or more blob keys. Returns: A list of BlobstoreInputReaders corresponding to the specified shards. """ params = _get_params(mapper_spec) blob_keys = params[cls.BLOB_KEYS_PARAM] if isinstance(blob_keys, basestring): # This is a mechanism to allow multiple blob keys (which do not contain # commas) in a single string. It may go away. blob_keys = blob_keys.split(",") blob_sizes = {} for blob_key in blob_keys: blob_info = blobstore.BlobInfo.get(blobstore.BlobKey(blob_key)) blob_sizes[blob_key] = blob_info.size shard_count = min(cls._MAX_SHARD_COUNT, mapper_spec.shard_count) shards_per_blob = shard_count // len(blob_keys) if shards_per_blob == 0: shards_per_blob = 1 chunks = [] for blob_key, blob_size in blob_sizes.items(): blob_chunk_size = blob_size // shards_per_blob for i in xrange(shards_per_blob - 1): chunks.append(BlobstoreLineInputReader.from_json( {cls.BLOB_KEY_PARAM: blob_key, cls.INITIAL_POSITION_PARAM: blob_chunk_size * i, cls.END_POSITION_PARAM: blob_chunk_size * (i + 1)})) chunks.append(BlobstoreLineInputReader.from_json( {cls.BLOB_KEY_PARAM: blob_key, cls.INITIAL_POSITION_PARAM: blob_chunk_size * (shards_per_blob - 1), cls.END_POSITION_PARAM: blob_size})) return chunks
python
def add_process(self, name, cmd, quiet=False, env=None, cwd=None): """ Add a process to this manager instance. The process will not be started until :func:`~honcho.manager.Manager.loop` is called. """ assert name not in self._processes, "process names must be unique" proc = self._process_ctor(cmd, name=name, quiet=quiet, colour=next(self._colours), env=env, cwd=cwd) self._processes[name] = {} self._processes[name]['obj'] = proc # Update printer width to accommodate this process name self._printer.width = max(self._printer.width, len(name)) return proc
java
public static Path leftShift(Path self, byte[] bytes) throws IOException { append(self, bytes); return self; }
java
public ScreenModel doServletCommand(ScreenModel screenParent) { String strCommand = this.getProperty(DBParams.COMMAND); if (strCommand != null) if (this.getTask() != null) if (this.getTask().getApplication() != null) if (strCommand.equalsIgnoreCase(this.getTask().getApplication().getResources(ResourceConstants.MAIN_RESOURCE, true).getString(MenuConstants.SUBMIT))) this.setProperty(DBParams.COMMAND, MenuConstants.SUBMIT); ScreenModel screen = super.doServletCommand(screenParent); // Process params from previous screen if (MenuConstants.SUBMIT.equalsIgnoreCase(this.getProperty(DBParams.COMMAND))) { if (this.getTask().getStatusText(DBConstants.WARNING_MESSAGE) == null) { // Normal return = logged in, go to main menu. this.free(); return null; // This will cause the main menu to display } else { this.getScreenRecord().getField(UserScreenRecord.NEW_PASSWORD_1).setData(null, DBConstants.DISPLAY, DBConstants.INIT_MOVE); this.getScreenRecord().getField(UserScreenRecord.NEW_PASSWORD_2).setData(null, DBConstants.DISPLAY, DBConstants.INIT_MOVE); } } return screen; // By default, don't do anything }
python
def chart_range(self): """ Calculates the chart range from start and end. Downloads larger datasets (5y and 2y) when necessary, but defaults to 1y for performance reasons """ delta = datetime.datetime.now().year - self.start.year if 2 <= delta <= 5: return "5y" elif 1 <= delta <= 2: return "2y" elif 0 <= delta < 1: return "1y" else: raise ValueError( "Invalid date specified. Must be within past 5 years.")
java
public double getEstimate(final byte[] key) { if (key == null) { return Double.NaN; } checkMethodKeySize(key); final double est = maps_[0].getEstimate(key); if (est >= 0.0) { return est; } //key has been promoted final int level = -(int)est; final Map map = maps_[level]; return map.getEstimate(key); }
python
def incoming_messages(self) -> t.List[t.Tuple[float, bytes]]: """Consume the receive buffer and return the messages. If there are new messages added to the queue while this funciton is being processed, they will not be returned. This ensures that this terminates in a timely manner. """ approximate_messages = self._receive_buffer.qsize() messages = [] for _ in range(approximate_messages): try: messages.append(self._receive_buffer.get_nowait()) except queue.Empty: break return messages
java
public String getNodeData(String nodePath,String formId,String tableName,String dataColName,String idColName){ MicroMetaDao microDao=getInnerDao(); String select=dataColName+"->>'$."+dianNode(nodePath)+"' as dyna_data"; String sql="select "+select+" from "+tableName+" where "+idColName+"=?"; Object[] paramArray=new Object[1]; paramArray[0]=formId; Map retMap=microDao.querySingleObjJoinByCondition(sql,paramArray); //返回的一定是个map if(retMap!=null){ return (String) retMap.get("dyna_data"); } return null; }
java
public static ShortBuffer allocate (int capacity) { if (capacity < 0) { throw new IllegalArgumentException(); } ByteBuffer bb = ByteBuffer.allocateDirect(capacity * 2); bb.order(ByteOrder.nativeOrder()); return bb.asShortBuffer(); }
java
public String createPreparedStatementString(ControlBeanContext context, Connection connection, Method method, Object[] arguments) { final boolean callableStatement = setCallableStatement(arguments); StringBuilder sqlString = new StringBuilder(getPreparedStatementText(context, method, arguments)); if (callableStatement) { JdbcControl.SQLParameter[] params = (JdbcControl.SQLParameter[]) arguments[0]; if (params == null) { return sqlString.toString(); } sqlString.append(" Params: {"); for (int i = 0; i < params.length; i++) { if (i > 0) { sqlString.append(params[i].value.toString()); } } sqlString.append("}"); } else if (_batchUpdate) { sqlString.append(" Params: batch update."); } else { sqlString.append(" Params: {"); boolean first = true; for (SqlFragment sf : _children) { if (sf.hasParamValue()) { Object values[] = sf.getParameterValues(context, method, arguments); for (Object value : values) { if (!first) sqlString.append(", "); else first = false; sqlString.append(value); } } } sqlString.append("}"); } return sqlString.toString(); }
java
@Override public HandlerRegistration addClickHandler(final ClickHandler handler) { return anchor.addHandler(handler, ClickEvent.getType()); }
python
def simple_tokenize(name): """Simple tokenizer function to be used with the normalizers.""" last_names, first_names = name.split(',') last_names = _RE_NAME_TOKEN_SEPARATOR.split(last_names) first_names = _RE_NAME_TOKEN_SEPARATOR.split(first_names) first_names = [NameToken(n) if len(n) > 1 else NameInitial(n) for n in first_names if n] last_names = [NameToken(n) if len(n) > 1 else NameInitial(n) for n in last_names if n] return {'lastnames': last_names, 'nonlastnames': first_names}
java
private char matchPrefix(String str1, String str2) { //here always str1.startsWith(str2) colorless!color char rel = IMappingElement.IDK; int spacePos1 = str1.indexOf(' '); String suffix = str1.substring(str2.length()); if (-1 < spacePos1 && !suffixes.containsKey(suffix)) {//check suffixes - pole vault=pole vaulter if (str2.length() == spacePos1) {//plant part<plant rel = IMappingElement.LESS_GENERAL; } else {//plant part<plan String left = str1.substring(0, spacePos1); char secondRel = match(left, str2); if (IMappingElement.MORE_GENERAL == secondRel || IMappingElement.EQUIVALENCE == secondRel) { rel = IMappingElement.LESS_GENERAL; } else { //?,<,! rel = secondRel; } } } else { //spelling: -tree and tree if (suffix.startsWith("-")) { suffix = suffix.substring(1); } if (suffix.endsWith("-") || suffix.endsWith(";") || suffix.endsWith(".") || suffix.endsWith(",") || suffix.endsWith("-")) { suffix = suffix.substring(0, suffix.length() - 1); } if (suffixes.containsKey(suffix)) { rel = suffixes.get(suffix); rel = reverseRelation(rel); } //another approximation = Gversion4 // if (rel == MatchManager.LESS_GENERAL || rel == MatchManager.MORE_GENERAL) { // rel = MatchManager.EQUIVALENCE; // } } //filter = Gversion3 // if (MatchManager.LESS_GENERAL == rel || MatchManager.MORE_GENERAL == rel) { // rel = MatchManager.EQUIVALENCE; // } return rel; }
java
public void setCloudWatchLoggingOptionDescriptions(java.util.Collection<CloudWatchLoggingOptionDescription> cloudWatchLoggingOptionDescriptions) { if (cloudWatchLoggingOptionDescriptions == null) { this.cloudWatchLoggingOptionDescriptions = null; return; } this.cloudWatchLoggingOptionDescriptions = new java.util.ArrayList<CloudWatchLoggingOptionDescription>(cloudWatchLoggingOptionDescriptions); }
python
def save(self, *args, **kwargs): """ **uid**: :code:`electiontype:{name}` """ self.uid = 'electiontype:{}'.format(self.slug) super(ElectionType, self).save(*args, **kwargs)
java
@SuppressWarnings("unchecked") public static <E> E createRecord(Class<E> type, Schema schema) { // Don't instantiate SpecificRecords or interfaces. if (isGeneric(type) && !type.isInterface()) { if (GenericData.Record.class.equals(type)) { return (E) GenericData.get().newRecord(null, schema); } return (E) ReflectData.newInstance(type, schema); } return null; }
java
public void addAnnotation(final Class<? extends Annotation> clazz) { if (field.isAnnotationPresent(clazz)) { addAnnotation(clazz, field.getAnnotation(clazz)); } }
python
def get_form(): """ Return the form to use for commenting. """ global form_class from fluent_comments import appsettings if form_class is None: if appsettings.FLUENT_COMMENTS_FORM_CLASS: from django.utils.module_loading import import_string form_class = import_string(appsettings.FLUENT_COMMENTS_FORM_CLASS) else: from fluent_comments.forms import FluentCommentForm form_class = FluentCommentForm return form_class
python
def get_command_line(self): """ Retrieves the command line with wich the program was started. @rtype: str @return: Command line string. @raise WindowsError: On error an exception is raised. """ (Buffer, MaximumLength) = self.get_command_line_block() CommandLine = self.peek_string(Buffer, dwMaxSize=MaximumLength, fUnicode=True) gst = win32.GuessStringType if gst.t_default == gst.t_ansi: CommandLine = CommandLine.encode('cp1252') return CommandLine
python
def enterEvent( self, event ): """ Toggles the display for the tracker item. """ item = self.trackerItem() if ( item ): item.setVisible(True)
python
def PositionBox(position, *args, **kwargs): " Delegate the boxing. " obj = position.target return getattr(position.target, 'box_class', Box)(obj, *args, **kwargs)
java
public void start() throws IOException{ remote_sock.setSoTimeout(iddleTimeout); client_sock.setSoTimeout(iddleTimeout); log("Starting UDP relay server on "+relayIP+":"+relayPort); log("Remote socket "+remote_sock.getLocalAddress()+":"+ remote_sock.getLocalPort()); pipe_thread1 = new Thread(this,"pipe1"); pipe_thread2 = new Thread(this,"pipe2"); lastReadTime = System.currentTimeMillis(); pipe_thread1.start(); pipe_thread2.start(); }
python
def sliver_reader(filename_end_mask="*[0-9].mhd", sliver_reference_dir="~/data/medical/orig/sliver07/training/", read_orig=True, read_seg=False): """ Generator for reading sliver data from directory structure. :param filename_end_mask: file selection can be controlled with this parameter :param sliver_reference_dir: directory with sliver .mhd and .raw files :param read_orig: read image data if is set True :param read_seg: read segmentation data if is set True :return: numeric_label, vs_mm, oname, orig_data, rname, ref_data """ sliver_reference_dir = op.expanduser(sliver_reference_dir) orig_fnames = glob.glob(sliver_reference_dir + "*orig" + filename_end_mask) ref_fnames = glob.glob(sliver_reference_dir + "*seg"+ filename_end_mask) orig_fnames.sort() ref_fnames.sort() output = [] for i in range(0, len(orig_fnames)): oname = orig_fnames[i] rname = ref_fnames[i] vs_mm = None ref_data= None orig_data = None if read_orig: orig_data, metadata = io3d.datareader.read(oname) vs_mm = metadata['voxelsize_mm'] if read_seg: ref_data, metadata = io3d.datareader.read(rname) vs_mm = metadata['voxelsize_mm'] import re numeric_label = re.search(".*g(\d+)", oname).group(1) out = (numeric_label, vs_mm, oname, orig_data, rname, ref_data) yield out
java
public static void addSpoiler(Message message, String lang, String hint) { message.addExtension(new SpoilerElement(lang, hint)); }
python
def generate_gdt(self, fs, gs, fs_size=0xFFFFFFFF, gs_size=0xFFFFFFFF): """ Generate a GlobalDescriptorTable object and populate it using the value of the gs and fs register :param fs: value of the fs segment register :param gs: value of the gs segment register :param fs_size: size of the fs segment register :param gs_size: size of the gs segment register :return: gdt a GlobalDescriptorTable object """ A_PRESENT = 0x80 A_DATA = 0x10 A_DATA_WRITABLE = 0x2 A_PRIV_0 = 0x0 A_DIR_CON_BIT = 0x4 F_PROT_32 = 0x4 S_GDT = 0x0 S_PRIV_0 = 0x0 GDT_ADDR = 0x4000 GDT_LIMIT = 0x1000 normal_entry = self._create_gdt_entry(0, 0xFFFFFFFF, A_PRESENT | A_DATA | A_DATA_WRITABLE | A_PRIV_0 | A_DIR_CON_BIT, F_PROT_32) stack_entry = self._create_gdt_entry(0, 0xFFFFFFFF, A_PRESENT | A_DATA | A_DATA_WRITABLE | A_PRIV_0, F_PROT_32) fs_entry = self._create_gdt_entry(fs, fs_size, A_PRESENT | A_DATA | A_DATA_WRITABLE | A_PRIV_0 | A_DIR_CON_BIT, F_PROT_32) gs_entry = self._create_gdt_entry(gs, gs_size, A_PRESENT | A_DATA | A_DATA_WRITABLE | A_PRIV_0 | A_DIR_CON_BIT, F_PROT_32) table = normal_entry + stack_entry + fs_entry + gs_entry gdt = (GDT_ADDR << 16 | GDT_LIMIT) selector = self._create_selector(1, S_GDT | S_PRIV_0) cs = selector ds = selector es = selector selector = self._create_selector(2, S_GDT | S_PRIV_0) ss = selector selector = self._create_selector(3, S_GDT | S_PRIV_0) fs = selector selector = self._create_selector(4, S_GDT | S_PRIV_0) gs = selector global_descriptor_table = GlobalDescriptorTable(GDT_ADDR, GDT_LIMIT, table, gdt, cs, ds, es, ss, fs, gs) return global_descriptor_table
python
def console_map_ascii_codes_to_font( firstAsciiCode: int, nbCodes: int, fontCharX: int, fontCharY: int ) -> None: """Remap a contiguous set of codes to a contiguous set of tiles. Both the tile-set and character codes must be contiguous to use this function. If this is not the case you may want to use :any:`console_map_ascii_code_to_font`. Args: firstAsciiCode (int): The starting character code. nbCodes (int): The length of the contiguous set. fontCharX (int): The starting X tile coordinate on the loaded tileset. 0 is the leftmost tile. fontCharY (int): The starting Y tile coordinate on the loaded tileset. 0 is the topmost tile. """ lib.TCOD_console_map_ascii_codes_to_font( _int(firstAsciiCode), nbCodes, fontCharX, fontCharY )
python
def plot_confusion_matrix(cm, title="Confusion Matrix"): """Plots a confusion matrix for each subject """ import matplotlib.pyplot as plt import math plt.figure() subjects = len(cm) root_subjects = math.sqrt(subjects) cols = math.ceil(root_subjects) rows = math.ceil(subjects/cols) classes = cm[0].shape[0] for subject in range(subjects): plt.subplot(rows, cols, subject+1) plt.imshow(cm[subject], interpolation='nearest', cmap=plt.cm.bone) plt.xticks(np.arange(classes), range(1, classes+1)) plt.yticks(np.arange(classes), range(1, classes+1)) cbar = plt.colorbar(ticks=[0.0, 1.0], shrink=0.6) cbar.set_clim(0.0, 1.0) plt.xlabel("Predicted") plt.ylabel("True label") plt.title("{0:d}".format(subject + 1)) plt.suptitle(title) plt.tight_layout() plt.show()
java
@Trivial public static String doValidate(String value, ValueType valueType) { String vMsg = null; switch (valueType) { case VT_CLASS_RESOURCE: if (value.contains("\\")) { vMsg = "ANNO_UTIL_UNEXPECTED_BACKSLASH"; } else if (!value.endsWith(".class")) { vMsg = "ANNO_UTIL_EXPECTED_CLASS"; } break; case VT_CLASS_REFERENCE: if (value.contains("\\")) { vMsg = "ANNO_UTIL_UNEXPECTED_BACKSLASH"; } else if (value.endsWith(".class")) { vMsg = "ANNO_UTIL_UNEXPECTED_CLASS"; } break; case VT_CLASS_NAME: if (value.contains("\\")) { vMsg = "ANNO_UTIL_UNEXPECTED_BACKSLASH"; } else if (value.contains("/")) { vMsg = "ANNO_UTIL_UNEXPECTED_FORWARD_SLASH"; } else if (value.endsWith(".class")) { vMsg = "ANNO_UTIL_UNEXPECTED_CLASS"; } break; case VT_FIELD_NAME: if (value.contains("\\")) { vMsg = "ANNO_UTIL_UNEXPECTED_BACKSLASH"; } else if (value.contains("/")) { vMsg = "ANNO_UTIL_UNEXPECTED_FORWARD_SLASH"; } else if (value.endsWith(".class")) { vMsg = "ANNO_UTIL_UNEXPECTED_CLASS"; } break; case VT_METHOD_NAME: if (value.contains("\\")) { vMsg = "ANNO_UTIL_UNEXPECTED_BACKSLASH"; } else if (value.contains("/")) { vMsg = "ANNO_UTIL_UNEXPECTED_FORWARD_SLASH"; } else if (value.endsWith(".class")) { vMsg = "ANNO_UTIL_UNEXPECTED_CLASS"; } break; case VT_OTHER: break; default: vMsg = "ANNO_UTIL_UNRECOGNIZED_TYPE"; break; } return vMsg; }
java
@Override public CPDefinitionLocalization[] findByCPDefinitionId_PrevAndNext( long cpDefinitionLocalizationId, long CPDefinitionId, OrderByComparator<CPDefinitionLocalization> orderByComparator) throws NoSuchCPDefinitionLocalizationException { CPDefinitionLocalization cpDefinitionLocalization = findByPrimaryKey(cpDefinitionLocalizationId); Session session = null; try { session = openSession(); CPDefinitionLocalization[] array = new CPDefinitionLocalizationImpl[3]; array[0] = getByCPDefinitionId_PrevAndNext(session, cpDefinitionLocalization, CPDefinitionId, orderByComparator, true); array[1] = cpDefinitionLocalization; array[2] = getByCPDefinitionId_PrevAndNext(session, cpDefinitionLocalization, CPDefinitionId, orderByComparator, false); return array; } catch (Exception e) { throw processException(e); } finally { closeSession(session); } }
java
public Matrix3d rotation(double angle, Vector3fc axis) { return rotation(angle, axis.x(), axis.y(), axis.z()); }
python
def autocomplete(request, app_label=None, model=None): """returns ``\\n`` delimited strings in the form <tag>||(#) GET params are ``q``, ``limit``, ``counts``, ``q`` is what the user has typed, ``limit`` defaults to 10, and ``counts`` can be "model", "all" or, if absent, will default to all - ie a site-wide count. """ # get the relevent model if applicable if app_label and model: try: model = ContentType.objects.get(app_label=app_label, model=model) except: raise Http404 else: model = None if not request.GET.has_key("q"): raise Http404 else: q = request.GET["q"] # counts can be 'all', 'model' or 'None' counts = request.GET.get("counts", "all") limit = request.GET.get("limit", 10) if model: tags = Tag.objects.filter( items__content_type = model, name__istartswith = q ).distinct()[:limit] else: tags = Tag.objects.filter( name__istartswith = q ).distinct()[:limit] if counts == "all": l = sorted(list(tags), lambda x, y: cmp(y.items.all().count(), x.items.all().count()) ) tag_list = "\n".join([ '%s||(%s)' % (tag.name, tag.items.all().count() ) for tag in l if tag]) elif counts == "model": if model: l = sorted(list(tags), lambda x, y: cmp(y.items.filter(content_type=model).count(), x.items.filter(content_type=model).count()) ) tag_list = "\n".join( ["%s||(%s)" % (tag.name, tag.items.filter(content_type=model).count()) for tag in l if tag] ) else: raise Exception( 'You asked for a model with GET but did not pass one to the url' ) else: tag_list = "\n".join([tag.name for tag in tags if tag]) return HttpResponse(tag_list)
java
public static String findFirstWord(String buffer) { if (buffer.indexOf(SPACE_CHAR) < 0) return buffer; else { buffer = Parser.trim(buffer); int index = buffer.indexOf(SPACE_CHAR); if (index > 0) return buffer.substring(0, index); else return buffer; } }
python
def qnormal(mu, sigma, q, random_state): ''' mu: float or array_like of floats sigma: float or array_like of floats q: sample step random_state: an object of numpy.random.RandomState ''' return np.round(normal(mu, sigma, random_state) / q) * q
python
def filter_needs(needs, filter_string="", filter_parts=True, merge_part_with_parent=True): """ Filters given needs based on a given filter string. Returns all needs, which pass the given filter. :param merge_part_with_parent: If True, need_parts inherit options from their parent need :param filter_parts: If True, need_parts get also filtered :param filter_string: strings, which gets evaluated against each need :param needs: list of needs, which shall be filtered :return: """ if filter_string is None or filter_string == "": return needs found_needs = [] for filter_need in needs: try: if filter_single_need(filter_need, filter_string): found_needs.append(filter_need) except Exception as e: logger.warning("Filter {0} not valid: Error: {1}".format(filter_string, e)) return found_needs
python
def parse_keyring(self, namespace=None): """Find settings from keyring.""" results = {} if not keyring: return results if not namespace: namespace = self.prog for option in self._options: secret = keyring.get_password(namespace, option.name) if secret: results[option.dest] = option.type(secret) return results
python
def invoke(self, ctx): """Given a context, this invokes the attached callback (if it exists) in the right way. """ _maybe_show_deprecated_notice(self) if self.callback is not None: return ctx.invoke(self.callback, **ctx.params)
python
def load_from_output_metadata(output_metadata): """Set Impact Function based on an output of an analysis's metadata. If possible, we will try to use layers already in the legend and to not recreating new ones. We will keep the style for instance. :param output_metadata: Metadata from an output layer. :type output_metadata: OutputLayerMetadata :returns: Impact Function based on the metadata. :rtype: ImpactFunction """ impact_function = ImpactFunction() provenance = output_metadata['provenance_data'] # Set exposure layer exposure_path = get_provenance(provenance, provenance_exposure_layer) if exposure_path: impact_function.exposure = load_layer_from_registry(exposure_path) set_provenance( provenance, provenance_exposure_layer_id, impact_function.exposure.id()) # Set hazard layer hazard_path = get_provenance(provenance, provenance_hazard_layer) if hazard_path: impact_function.hazard = load_layer_from_registry(hazard_path) set_provenance( provenance, provenance_hazard_layer_id, impact_function.hazard.id()) # Set aggregation layer aggregation_path = get_provenance( provenance, provenance_aggregation_layer) if aggregation_path: impact_function.aggregation = ( load_layer_from_registry(aggregation_path)) set_provenance( provenance, provenance_aggregation_layer_id, impact_function.aggregation.id()) # Requested extent requested_extent = get_provenance( provenance, provenance_requested_extent) if requested_extent: impact_function.requested_extent = wkt_to_rectangle( requested_extent) # Analysis extent analysis_extent = get_provenance( provenance, provenance_analysis_extent) if analysis_extent: impact_function._analysis_extent = QgsGeometry.fromWkt( analysis_extent) # Data store data_store_uri = get_provenance(provenance, provenance_data_store_uri) if data_store_uri: impact_function.datastore = Folder(data_store_uri) # Name name = get_provenance(provenance, provenance_impact_function_name) impact_function._name = name # Title title = get_provenance(provenance, provenance_impact_function_title) impact_function._title = title # Start date time start_datetime = get_provenance( provenance, provenance_start_datetime) impact_function._start_datetime = start_datetime # End date time end_datetime = get_provenance( provenance, provenance_end_datetime) impact_function._end_datetime = end_datetime # Duration duration = get_provenance(provenance, provenance_duration) impact_function._duration = duration # Earthquake function earthquake_function = get_provenance( provenance, provenance_earthquake_function) impact_function._earthquake_function = earthquake_function # Use rounding impact_function.use_rounding = get_provenance( provenance, provenance_use_rounding) # Debug mode debug_mode = get_provenance(provenance, provenance_debug_mode) impact_function.debug_mode = debug_mode # Output layers # exposure_summary exposure_summary_path = get_provenance( provenance, provenance_layer_exposure_summary) if exposure_summary_path: impact_function._exposure_summary = load_layer_from_registry( exposure_summary_path) set_provenance( provenance, provenance_layer_exposure_summary_id, impact_function._exposure_summary.id()) # aggregate_hazard_impacted aggregate_hazard_impacted_path = get_provenance( provenance, provenance_layer_aggregate_hazard_impacted) if aggregate_hazard_impacted_path: impact_function._aggregate_hazard_impacted = ( load_layer_from_registry(aggregate_hazard_impacted_path)) set_provenance( provenance, provenance_layer_aggregate_hazard_impacted_id, impact_function._aggregate_hazard_impacted.id()) # aggregation_summary aggregation_summary_path = get_provenance( provenance, provenance_layer_aggregation_summary) if aggregation_summary_path: impact_function._aggregation_summary = load_layer_from_registry( aggregation_summary_path) set_provenance( provenance, provenance_layer_aggregation_summary_id, impact_function._aggregation_summary.id()) # analysis_impacted analysis_impacted_path = get_provenance( provenance, provenance_layer_analysis_impacted) if analysis_impacted_path: impact_function._analysis_impacted = load_layer_from_registry( analysis_impacted_path) set_provenance( provenance, provenance_layer_analysis_impacted_id, impact_function._analysis_impacted.id()) # exposure_summary_table exposure_summary_table_path = get_provenance( provenance, provenance_layer_exposure_summary_table) if exposure_summary_table_path: impact_function._exposure_summary_table = load_layer_from_registry( exposure_summary_table_path) set_provenance( provenance, provenance_layer_exposure_summary_table_id, impact_function._exposure_summary_table.id()) # profiling # Skip if it's debug mode if not impact_function.debug_mode: profiling_path = get_provenance( provenance, provenance_layer_profiling) if profiling_path: impact_function._profiling_table = load_layer_from_registry( profiling_path) impact_function._output_layer_expected = \ impact_function._compute_output_layer_expected() # crs crs = get_provenance(provenance, provenance_crs) if crs and not aggregation_path: impact_function._crs = QgsCoordinateReferenceSystem(crs) if aggregation_path: impact_function._crs = impact_function.aggregation.crs() # Set provenance data impact_function._provenance = provenance impact_function._provenance_ready = True return impact_function
python
def genty_repeat(count): """ To use in conjunction with a TestClass wrapped with @genty. Runs the wrapped test 'count' times: @genty_repeat(count) def test_some_function(self) ... Can also wrap a test already decorated with @genty_dataset @genty_repeat(3) @genty_dataset(True, False) def test_some__other_function(self, bool_value): ... This will run 6 tests in total, 3 each of the True and False cases. :param count: The number of times to run the test. :type count: `int` """ if count < 0: raise ValueError( "Really? Can't have {0} iterations. Please pick a value >= 0." .format(count) ) def wrap(test_method): test_method.genty_repeat_count = count return test_method return wrap
java
private void initialize() { this.setCursor(new java.awt.Cursor(java.awt.Cursor.WAIT_CURSOR)); this.setContentPane(getJPanel()); if (Model.getSingleton().getOptionsParam().getViewParam().getWmUiHandlingOption() == 0) { this.setSize(282, 118); } this.setDefaultCloseOperation(javax.swing.WindowConstants.DO_NOTHING_ON_CLOSE); this.setResizable(false); }
java
private CompletableFuture<Void> executeConditionally(Function<Duration, CompletableFuture<Long>> indexOperation, Duration timeout) { TimeoutTimer timer = new TimeoutTimer(timeout); return UPDATE_RETRY .runAsync(() -> executeConditionallyOnce(indexOperation, timer), this.executor) .exceptionally(this::handleIndexOperationException) .thenAccept(Callbacks::doNothing); }
python
def rename(self, node): """ Renames given Node associated path. :param node: Node. :type node: ProjectNode or DirectoryNode or FileNode :return: Method success. :rtype: bool """ source = node.path base_name, state = QInputDialog.getText(self, "Rename", "Enter your new name:", text=os.path.basename(source)) if not state: return False base_name = foundations.strings.to_string(base_name) if base_name == os.path.basename(source): return False parent_directory = os.path.dirname(source) target = os.path.join(parent_directory, base_name) if self.__script_editor.model.is_authoring_node(node): if not foundations.common.path_exists(source): LOGGER.info("{0} | Renaming '{1}' untitled file to '{2}'!".format( self.__class__.__name__, source, target)) self.__set_authoring_nodes(source, target) return True if not base_name in os.listdir(parent_directory): if node.family == "File": LOGGER.info("{0} | Renaming '{1}' file to '{2}'!".format(self.__class__.__name__, source, target)) self.__rename_file(source, target) elif node.family == "Directory": LOGGER.info("{0} | Renaming '{1}' directory to '{2}'!".format(self.__class__.__name__, source, target)) self.__rename_directory(source, target) elif node.family == "Project": LOGGER.info("{0} | Renaming '{1}' project to '{2}'!".format(self.__class__.__name__, source, target)) self.__rename_project(source, target) else: self.__raise_file_system_exception(base_name, parent_directory) return True
java
public static Function<Flux<Long>, Publisher<?>> instant() { return iterations -> iterations .flatMap(iteration -> Mono .just(0L) .doOnSubscribe(logDelay(Duration.ZERO)), 1); }
python
def log(self, logfile=None): """Log the ASCII traceback into a file object.""" if logfile is None: logfile = sys.stderr tb = self.plaintext.encode('utf-8', 'replace').rstrip() + '\n' logfile.write(tb)
java
@Override public <T> List<T> dynamicQuery(DynamicQuery dynamicQuery) { return cpDefinitionInventoryPersistence.findWithDynamicQuery(dynamicQuery); }
java
public EdgeIteratorState getOtherContinue(double prevLat, double prevLon, double prevOrientation) { int tmpSign; for (EdgeIteratorState edge : allowedOutgoingEdges) { GHPoint point = InstructionsHelper.getPointForOrientationCalculation(edge, nodeAccess); tmpSign = InstructionsHelper.calculateSign(prevLat, prevLon, point.getLat(), point.getLon(), prevOrientation); if (Math.abs(tmpSign) <= 1) { return edge; } } return null; }