language
stringclasses
2 values
func_code_string
stringlengths
63
466k
java
@GetMapping(path = {"/otp/qrgen"}) public void generate(final HttpServletResponse response, final HttpServletRequest request) throws Exception { response.setContentType("image/png"); val key = request.getParameter("key"); QRUtils.generateQRCode(response.getOutputStream(), key, QRUtils.WIDTH_LARGE, QRUtils.WIDTH_LARGE); }
python
def _encode_time(mtime: float): """Encode a mtime float as a 32-bit FAT time""" dt = arrow.get(mtime) dt = dt.to("local") date_val = ((dt.year - 1980) << 9) | (dt.month << 5) | dt.day secs = dt.second + dt.microsecond / 10**6 time_val = (dt.hour << 11) | (dt.minute << 5) | math.floor(secs / 2) return (date_val << 16) | time_val
python
def list_projects(root, backend=os.listdir): """List projects at `root` Arguments: root (str): Absolute path to the `be` root directory, typically the current working directory. """ projects = list() for project in sorted(backend(root)): abspath = os.path.join(root, project) if not isproject(abspath): continue projects.append(project) return projects
java
public Boolean deleteEntityPost(DeleteEntityRequest request) throws ApiException { ApiResponse<Boolean> resp = deleteEntityPostWithHttpInfo(request); return resp.getData(); }
java
public static Descriptor getDescriptor(Class<?> cls) throws IOException { String idl = ProtobufIDLGenerator.getIDL(cls); ProtoFile file = ProtoParser.parse(ProtobufIDLProxy.DEFAULT_FILE_NAME, idl); FileDescriptorProtoPOJO fileDescriptorProto = new FileDescriptorProtoPOJO(); fileDescriptorProto.name = ProtobufIDLProxy.DEFAULT_FILE_NAME; fileDescriptorProto.pkg = file.packageName(); fileDescriptorProto.dependencies = file.dependencies(); fileDescriptorProto.publicDependency = convertList(file.publicDependencies()); fileDescriptorProto.weakDependency = null; // XXX fileDescriptorProto.messageTypes = new ArrayList<DescriptorProtoPOJO>(); fileDescriptorProto.enumTypes = new ArrayList<EnumDescriptorProtoPOJO>(); fileDescriptorProto.services = new ArrayList<ServiceDescriptorProtoPOJO>(); Set<String> messageSet = new HashSet<String>(); Set<String> enumSet = new HashSet<String>(); List<TypeElement> typeElements = file.typeElements(); if (typeElements != null) { for (TypeElement typeElement : typeElements) { if (typeElement instanceof MessageElement) { messageSet.add(typeElement.name()); } else if (typeElement instanceof EnumElement) { enumSet.add(typeElement.name()); } } for (TypeElement typeElement : typeElements) { if (typeElement instanceof MessageElement) { fileDescriptorProto.messageTypes.add(getDescritorProtoPOJO(fileDescriptorProto, (MessageElement) typeElement, messageSet, enumSet)); } else if (typeElement instanceof EnumElement) { fileDescriptorProto.enumTypes.add( getDescritorProtoPOJO(fileDescriptorProto, (EnumElement) typeElement, messageSet, enumSet)); } } } FileDescriptorProto fileproto; try { byte[] bs = descriptorCodec.encode(fileDescriptorProto); fileproto = FileDescriptorProto.parseFrom(bs); } catch (InvalidProtocolBufferException e) { throw new IOException("Failed to parse protocol buffer descriptor for generated code.", e); } FileDescriptor fileDescriptor; try { fileDescriptor = FileDescriptor.buildFrom(fileproto, new com.google.protobuf.Descriptors.FileDescriptor[] {}); } catch (DescriptorValidationException e) { throw new IOException(e.getMessage(), e); } return fileDescriptor.getMessageTypes().get(0); }
python
def draw_axes(self): """ Removes all existing series and re-draws the axes. :return: None """ self.canvas.delete('all') rect = 50, 50, self.w - 50, self.h - 50 self.canvas.create_rectangle(rect, outline="black") for x in self.frange(0, self.x_max - self.x_min + 1, self.x_tick): value = Decimal(self.x_min + x) if self.x_min <= value <= self.x_max: x_step = (self.px_x * x) / self.x_tick coord = 50 + x_step, self.h - 50, 50 + x_step, self.h - 45 self.canvas.create_line(coord, fill="black") coord = 50 + x_step, self.h - 40 label = round(Decimal(self.x_min + x), 1) self.canvas.create_text(coord, fill="black", text=label) for y in self.frange(0, self.y_max - self.y_min + 1, self.y_tick): value = Decimal(self.y_max - y) if self.y_min <= value <= self.y_max: y_step = (self.px_y * y) / self.y_tick coord = 45, 50 + y_step, 50, 50 + y_step self.canvas.create_line(coord, fill="black") coord = 35, 50 + y_step label = round(value, 1) self.canvas.create_text(coord, fill="black", text=label)
java
public java.rmi.Remote getPort(Class serviceEndpointInterface) throws javax.xml.rpc.ServiceException { try { if (com.google.api.ads.adwords.axis.v201809.mcm.ManagedCustomerServiceInterface.class.isAssignableFrom(serviceEndpointInterface)) { com.google.api.ads.adwords.axis.v201809.mcm.ManagedCustomerServiceSoapBindingStub _stub = new com.google.api.ads.adwords.axis.v201809.mcm.ManagedCustomerServiceSoapBindingStub(new java.net.URL(ManagedCustomerServiceInterfacePort_address), this); _stub.setPortName(getManagedCustomerServiceInterfacePortWSDDServiceName()); return _stub; } } catch (java.lang.Throwable t) { throw new javax.xml.rpc.ServiceException(t); } throw new javax.xml.rpc.ServiceException("There is no stub implementation for the interface: " + (serviceEndpointInterface == null ? "null" : serviceEndpointInterface.getName())); }
python
def not_as_alias_handler(names_list): """Returns a list of names ignoring any aliases.""" list_ = list() for alias in names_list: list_.append(alias.name) return list_
python
def register_pb_devices(num_pbs: int = 100): """Register PBs devices. Note(BMo): Ideally we do not want to register any devices here. There does not seem to be a way to create a device server with no registered devices in Tango. This is (probably) because Tango devices must have been registered before the server starts ... """ tango_db = Database() LOG.info("Registering PB devices:") dev_info = DbDevInfo() # pylint: disable=protected-access dev_info._class = 'ProcessingBlockDevice' dev_info.server = 'processing_block_ds/1' for index in range(num_pbs): dev_info.name = 'sip_sdp/pb/{:05d}'.format(index) LOG.info("\t%s", dev_info.name) tango_db.add_device(dev_info)
java
public Connection<?> completeConnection(OAuth2ConnectionFactory<?> connectionFactory, NativeWebRequest request) { if (connectionFactory.supportsStateParameter()) { verifyStateParameter(request); } String code = request.getParameter("code"); try { AccessGrant accessGrant = connectionFactory.getOAuthOperations().exchangeForAccess(code, callbackUrl(request), null); return connectionFactory.createConnection(accessGrant); } catch (HttpClientErrorException e) { logger.warn("HttpClientErrorException while completing connection: " + e.getMessage()); logger.warn(" Response body: " + e.getResponseBodyAsString()); throw e; } }
java
public Object deserialize(String data) { if ((data == null) || (data.length() == 0)) { return null; } ObjectInputStream ois = null; ByteArrayInputStream bis = null; try { bis = new ByteArrayInputStream(Base64.decodeBase64(data.getBytes())); ois = new ObjectInputStream(bis); return ois.readObject(); } catch (ClassNotFoundException e) { LOGGER.error("Can't deserialize data from Base64", e); throw new IllegalArgumentException(e); } catch (IOException e) { LOGGER.error("Can't deserialize data from Base64", e); throw new IllegalArgumentException(e); } catch (Exception e) { LOGGER.error("Can't deserialize data from Base64", e); throw new IllegalArgumentException(e); } finally { try { if (ois != null) { ois.close(); } } catch (Exception e) { LOGGER.error("Can't close ObjetInputStream used for deserialize data from Base64", e); } } }
python
def find_ds_mapping(data_source, es_major_version): """ Find the mapping given a perceval data source :param data_source: name of the perceval data source :param es_major_version: string with the major version for Elasticsearch :return: a dict with the mappings (raw and enriched) """ mappings = {"raw": None, "enriched": None} # Backend connectors connectors = get_connectors() try: raw_klass = connectors[data_source][1] enrich_klass = connectors[data_source][2] except KeyError: print("Data source not found", data_source) sys.exit(1) # Mapping for raw index backend = raw_klass(None) if backend: mapping = json.loads(backend.mapping.get_elastic_mappings(es_major_version)['items']) mappings['raw'] = [mapping, find_general_mappings(es_major_version)] # Mapping for enriched index backend = enrich_klass(None) if backend: mapping = json.loads(backend.mapping.get_elastic_mappings(es_major_version)['items']) mappings['enriched'] = [mapping, find_general_mappings(es_major_version)] return mappings
java
public boolean add(AbstractRslNode node) { if (_specifications == null) _specifications = new LinkedList(); return _specifications.add(node); }
java
@Override public final void requestUpdate(final Transaction transaction) throws ProtocolException, TransactionException, SevereMessageStoreException { if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) SibTr.entry(this, tc, "requestUpdate", transaction); PersistentTransaction mstran = (PersistentTransaction) transaction; cmdRequestUpdate(mstran); Task task = new UpdateTask(this); mstran.addWork(task); if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) SibTr.exit(this, tc, "requestUpdate"); }
java
public void newName2ndData(String fieldName, String newFieldName, Object newFieldValue)throws Exception{ this.addFieldValue(newFieldName,newFieldValue);//将long类型的时间戳转换为Date类型 //忽略旧的名称 if(!fieldName.equals(newFieldName)) this.addIgnoreFieldMapping(fieldName); }
python
def run(self, switch_queue): """ 每个controller需要提供run方法, 来提供启动 """ self.switch_queue = switch_queue self.quit = False Thread(target=self._watchdog_queue).start() Thread(target=self._watchdog_time).start()
python
def parse_binary_descriptor(bindata, sensor_log=None): """Convert a binary streamer descriptor into a string descriptor. Binary streamer descriptors are 20-byte binary structures that encode all information needed to create a streamer. They are used to communicate that information to an embedded device in an efficent format. This function exists to turn such a compressed streamer description back into an understandable string. Args: bindata (bytes): The binary streamer descriptor that we want to understand. sensor_log (SensorLog): Optional sensor_log to add this streamer to a an underlying data store. Returns: DataStreamer: A DataStreamer object representing the streamer. You can get a useful human readable string by calling str() on the return value. """ if len(bindata) != 14: raise ArgumentError("Invalid length of binary data in streamer descriptor", length=len(bindata), expected=14, data=bindata) dest_tile, stream_id, trigger, format_code, type_code = struct.unpack("<8sHBBBx", bindata) dest_id = SlotIdentifier.FromEncoded(dest_tile) selector = DataStreamSelector.FromEncoded(stream_id) format_name = DataStreamer.KnownFormatCodes.get(format_code) type_name = DataStreamer.KnownTypeCodes.get(type_code) if format_name is None: raise ArgumentError("Unknown format code", code=format_code, known_code=DataStreamer.KnownFormatCodes) if type_name is None: raise ArgumentError("Unknown type code", code=type_code, known_codes=DataStreamer.KnownTypeCodes) with_other = None if trigger & (1 << 7): auto = False with_other = trigger & ((1 << 7) - 1) elif trigger == 0: auto = False elif trigger == 1: auto = True else: raise ArgumentError("Unknown trigger type for streamer", trigger_code=trigger) return DataStreamer(selector, dest_id, format_name, auto, type_name, with_other=with_other, sensor_log=sensor_log)
java
private void insertDefaultDifferentialListServers() { listeFilmlistenUrls_diff.add(new DatenFilmlisteUrl("http://m.picn.de/f/Filmliste-diff.xz", DatenFilmlisteUrl.SERVER_ART_DIFF)); listeFilmlistenUrls_diff.add(new DatenFilmlisteUrl("http://m1.picn.de/f/Filmliste-diff.xz", DatenFilmlisteUrl.SERVER_ART_DIFF)); listeFilmlistenUrls_diff.add(new DatenFilmlisteUrl("http://m2.picn.de/f/Filmliste-diff.xz", DatenFilmlisteUrl.SERVER_ART_DIFF)); listeFilmlistenUrls_diff.add(new DatenFilmlisteUrl("http://download10.onlinetvrecorder.com/mediathekview/Filmliste-diff.xz", DatenFilmlisteUrl.SERVER_ART_DIFF)); listeFilmlistenUrls_diff.add(new DatenFilmlisteUrl("http://mediathekview.jankal.me/Filmliste-diff.xz", DatenFilmlisteUrl.SERVER_ART_DIFF)); listeFilmlistenUrls_diff.add(new DatenFilmlisteUrl("http://verteiler1.mediathekview.de/Filmliste-diff.xz", DatenFilmlisteUrl.SERVER_ART_DIFF)); listeFilmlistenUrls_diff.add(new DatenFilmlisteUrl("http://verteiler2.mediathekview.de/Filmliste-diff.xz", DatenFilmlisteUrl.SERVER_ART_DIFF)); listeFilmlistenUrls_diff.add(new DatenFilmlisteUrl("http://verteiler3.mediathekview.de/Filmliste-diff.xz", DatenFilmlisteUrl.SERVER_ART_DIFF)); }
python
def set_flagged(self, *, start_date=None, due_date=None): """ Sets this message as flagged :param start_date: the start datetime of the followUp :param due_date: the due datetime of the followUp """ self.__status = Flag.Flagged start_date = start_date or dt.datetime.now() due_date = due_date or dt.datetime.now() if start_date.tzinfo is None: start_date = self.protocol.timezone.localize(start_date) if due_date.tzinfo is None: due_date = self.protocol.timezone.localize(due_date) self.__start = start_date self.__due_date = due_date self._track_changes()
python
def simulate_one(fw, name, size): """ Simulate a random sequence with name and size """ from random import choice seq = Seq(''.join(choice('ACGT') for _ in xrange(size))) s = SeqRecord(seq, id=name, description="Fake sequence") SeqIO.write([s], fw, "fasta")
python
def _proxy(self): """ Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: FieldContext for this FieldInstance :rtype: twilio.rest.autopilot.v1.assistant.task.field.FieldContext """ if self._context is None: self._context = FieldContext( self._version, assistant_sid=self._solution['assistant_sid'], task_sid=self._solution['task_sid'], sid=self._solution['sid'], ) return self._context
python
def get_nameserver_detail_input_rbridge_id(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_nameserver_detail = ET.Element("get_nameserver_detail") config = get_nameserver_detail input = ET.SubElement(get_nameserver_detail, "input") rbridge_id = ET.SubElement(input, "rbridge-id") rbridge_id.text = kwargs.pop('rbridge_id') callback = kwargs.pop('callback', self._callback) return callback(config)
java
public Object remove(Object key) { Object retVal = null; if (null == key) throw new IllegalArgumentException("key must not be null"); if (this.containsKey(key)) { retVal = super.remove(key); for (int i = 0; i < this.order.size(); i++) { Object obj = this.order.get(i); if (obj.equals(key)) { this.order.remove(i); break; } } } return retVal; }
python
def default_parser() -> argparse.ArgumentParser: """Create a parser for CLI arguments and options.""" parser = argparse.ArgumentParser( prog=CONSOLE_SCRIPT, formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) build_parser(parser) return parser
python
def to_sif(graph, output_path): """ Generates Simple Interaction Format output file from provided graph. The SIF specification is described `here <http://wiki.cytoscape.org/Cytoscape_User_Manual/Network_Formats>`_. :func:`.to_sif` will generate a .sif file describing the network, and a few .eda and .noa files containing edge and node attributes, respectively. These are equivalent to tab-delimited tables, and can be imported as such in Cytoscape 3.0. Parameters ---------- graph : networkx.Graph The Graph to be exported to SIF. output_path : str Full path, including filename (without suffix). e.g. using "./graphFolder/graphFile" will result in a SIF file at ./graphFolder/graphFile.sif, and corresponding .eda and .noa files. """ warnings.warn("Removed in 0.8. Use write_csv instead.", DeprecationWarning) graph = _strip_list_attributes(graph) if output_path[-4:] == ".sif": output_path = output_path[:-4] if nx.number_of_nodes(graph) == 0: # write an empty file (for a non-existant graph) f = open(output_path + ".sif","w") f.write("") f.close() else: # write node files nodes = graph.nodes(data=True) for node in nodes: node_name = node[0] node_attribs = node[1] for key, value in node_attribs.iteritems(): # generate a node attribute file for each node attribute if node == nodes[0]: # first node, overwrite file with open(output_path + "_" + key + ".noa", "w") as f: f.write(unicode(key) + '\n') f.write(unicode(node_name).replace(" ","_") + " = " + unicode(value) + "\n") else: # not first, append file with open(output_path + "_" + key + ".noa", "a") as f: f.write(unicode(node_name).replace(" ","_") + " = " + unicode(value) + "\n") if nx.number_of_edges(graph) == 0: # write an empty graph to a .sif file (just its nodes) for node in nodes: node_name = node[0] if node == nodes[0]: # first node, overwrite file with open(output_path + ".sif","w") as f: f.write(unicode(node_name).replace(" ","_") + "\n") else: # not first, append file with open(output_path + ".sif","a") as f: f.write(unicode(node_name).replace(" ","_") + "\n") else: # write the graph to a .sif file as well as other edge # attribute files if graph.is_multigraph(): # then the NetworkX graph supports multiple interaction # types just like the .sif format edges = graph.edges(data=True, keys=True) edge_attribs = set() for edge in edges: for key in edge[3].iterkeys(): edge_attribs.add(key) # create edge attribute files for attrib in edge_attribs: str_attrib = unicode(attrib) with open(output_path + '_' + str_attrib + ".eda","w") as f: f.write(unicode(attrib) + "\n") # add data to eda files and write sif file with open(output_path + '.sif', 'w') as f: for edge in edges: node1 = unicode(edge[0]).replace(" ", "_") node2 = unicode(edge[1]).replace(" ", "_") intr_type = unicode(edge[2]).replace(" ", "_") sif_line = node1 + ' ' + intr_type + ' ' + node2 + '\n' f.write(sif_line) for attrib, value in edge[3].iteritems(): eda_line = (node1 + ' (' + intr_type + ') ' + node2 + ' = ' + unicode(value) + '\n') with open(output_path + '_' + unicode(attrib) + '.eda', 'a') as g: g.write(eda_line) else: # then we support only one interaction type 'rel' edges = graph.edges(data=True) edge_attribs = set() for edge in edges: for key in edge[2].iterkeys(): edge_attribs.add(key) # create edge attribute files for attrib in edge_attribs: str_attrib = unicode(attrib) with open(output_path + '_' + str_attrib + ".eda","w") as f: f.write(unicode(attrib) + "\n") # add data to eda files and write sif file with open(output_path + '.sif', 'w') as f: for edge in edges: node1 = unicode(edge[0]).replace(" ", "_") node2 = unicode(edge[1]).replace(" ", "_") intr_type = 'rel' sif_line = node1 + ' ' + intr_type + ' ' + node2 + '\n' f.write(sif_line) for attrib, value in edge[2].iteritems(): eda_line = (node1 + ' (' + intr_type + ') ' + node2 + ' = ' + unicode(value) + '\n') with open(output_path + '_' + unicode(attrib) + '.eda', 'a') as g: g.write(eda_line)
java
protected String calculatePropertyName(String propertyName) { String lastAlias = getLastAlias(); if (lastAlias != null) { return lastAlias +'.'+propertyName; } return propertyName; }
python
def get_classes(module_label, classnames): """ Imports a set of classes from a given module. Usage:: get_classes('forum.models', ['Forum', 'ForumReadTrack', ]) """ app_label = module_label.split('.')[0] app_module_path = _get_app_module_path(module_label) if not app_module_path: raise AppNotFoundError('No app found matching \'{}\''.format(module_label)) # Determines the full module path by appending the module label # to the base package path of the considered application. module_path = app_module_path if '.' in app_module_path: base_package = app_module_path.rsplit('.' + app_label, 1)[0] module_path = '{}.{}'.format(base_package, module_label) # Try to import this module from the related app that is specified # in the Django settings. local_imported_module = _import_module(module_path, classnames) # If the module we tried to import is not located inside the machina # vanilla apps, try to import it from the corresponding machina app. machina_imported_module = None if not app_module_path.startswith('machina.apps'): machina_imported_module = _import_module( '{}.{}'.format('machina.apps', module_label), classnames, ) if local_imported_module is None and machina_imported_module is None: raise AppNotFoundError('Error importing \'{}\''.format(module_path)) # Any local module is prioritized over the corresponding machina module imported_modules = [ m for m in (local_imported_module, machina_imported_module) if m is not None ] return _pick_up_classes(imported_modules, classnames)
java
public PublicIPPrefixInner updateTags(String resourceGroupName, String publicIpPrefixName) { return updateTagsWithServiceResponseAsync(resourceGroupName, publicIpPrefixName).toBlocking().last().body(); }
java
@Nullable public final Folder findTargetFolder(@NotEmpty final String generatorName, @NotEmpty final String artifactName) { Contract.requireArgNotEmpty("generatorName", generatorName); Contract.requireArgNotEmpty("artifactName", artifactName); if (parent == null) { throw new IllegalStateException("Parent for generators is not set"); } try { return parent.findTargetFolder(generatorName, artifactName); } catch (final ProjectNameNotDefinedException ex) { throw new RuntimeException( "Couldn't determine target folder for generator '" + generatorName + "' and artifact '" + artifactName + "'", ex); } catch (final ArtifactNotFoundException ex) { throw new RuntimeException( "Couldn't determine target folder for generator '" + generatorName + "' and artifact '" + artifactName + "'", ex); } catch (final FolderNameNotDefinedException ex) { throw new RuntimeException( "Couldn't determine target folder for generator '" + generatorName + "' and artifact '" + artifactName + "'", ex); } catch (final GeneratorNotFoundException ex) { throw new RuntimeException( "Couldn't determine target folder for generator '" + generatorName + "' and artifact '" + artifactName + "'", ex); } catch (final ProjectNotFoundException ex) { throw new RuntimeException( "Couldn't determine target folder for generator '" + generatorName + "' and artifact '" + artifactName + "'", ex); } catch (final FolderNotFoundException ex) { throw new RuntimeException( "Couldn't determine target folder for generator '" + generatorName + "' and artifact '" + artifactName + "'", ex); } }
java
public static Reader uriReader(URI uri) throws IOException { return new BufferedReader(new InputStreamReader(uri.toURL().openStream(), StandardCharsets.UTF_8)); }
java
public int delete(DatabaseConnection databaseConnection, T data, ObjectCache objectCache) throws SQLException { if (mappedDelete == null) { mappedDelete = MappedDelete.build(dao, tableInfo); } int result = mappedDelete.delete(databaseConnection, data, objectCache); if (dao != null && !localIsInBatchMode.get()) { dao.notifyChanges(); } return result; }
java
@Override public int getInt(final int index) { final int i = this.lowerBoundary + index; checkIndex(i); checkIndex(i + 3); return (this.buffer[i] & 0xff) << 24 | (this.buffer[i + 1] & 0xff) << 16 | (this.buffer[i + 2] & 0xff) << 8 | (this.buffer[i + 3] & 0xff) << 0; }
java
synchronized void line(String line, long arrivalTime) { if (count.incrementAndGet() % logCountFrequency == 0) log.info("count=" + count.get() + ",buffer size=" + lines.size()); NmeaMessage nmea; try { nmea = NmeaUtil.parseNmea(line); } catch (NmeaMessageParseException e) { listener.invalidNmea(line, arrivalTime, e.getMessage()); return; } // if is multi line message then don't report to listener till last // message in sequence has been received. if (!nmea.isSingleSentence()) { Optional<List<NmeaMessage>> messages = nmeaBuffer.add(nmea); if (messages.isPresent()) { Optional<NmeaMessage> joined = AisNmeaBuffer .concatenateMessages(messages.get()); if (joined.isPresent()) { if (joined.get().getUnixTimeMillis() != null) listener.message(joined.get().toLine(), joined.get() .getUnixTimeMillis()); else listener.message(joined.get().toLine(), arrivalTime); } // TODO else report error, might need to change signature of // listener to handle problem with multi-line message } return; } if (nmea.getUnixTimeMillis() != null) { listener.message(line, nmea.getUnixTimeMillis()); return; } if (!matchWithTimestampLine) { listener.message(line, arrivalTime); return; } if (!NmeaUtil.isValid(line)) return; addLine(line, arrivalTime); log.debug("buffer lines=" + lines.size()); Integer earliestTimestampLineIndex = getEarliestTimestampLineIndex(lines); Set<Integer> removeThese; if (earliestTimestampLineIndex != null) { removeThese = matchWithClosestAisMessageIfBufferLargeEnough( arrivalTime, earliestTimestampLineIndex); } else removeThese = findExpiredIndexesBeforeIndex(lastIndex()); TreeSet<Integer> orderedIndexes = Sets.newTreeSet(removeThese); for (int index : orderedIndexes.descendingSet()) { removeLineWithIndex(index); } }
python
def nowrange(self, col, timeframe): """ Set the main dataframe with rows within a date range from now ex: ds.nowrange("Date", "3D") for a 3 days range. Units are: S, H, D, W, M, Y """ df = self._nowrange(col, timeframe) if df is None: self.err("Can not select range data from now") return self.df = df
python
def to_astropy_table(llwtable, apytable, copy=False, columns=None, use_numpy_dtypes=False, rename=None): """Convert a :class:`~ligo.lw.table.Table` to an `~astropy.tableTable` This method is designed as an internal method to be attached to :class:`~ligo.lw.table.Table` objects as `__astropy_table__`. Parameters ---------- llwtable : :class:`~ligo.lw.table.Table` the LIGO_LW table to convert from apytable : `type` `astropy.table.Table` class or subclass copy : `bool`, optional if `True` copy the input data, otherwise return a reference, default: `False` columns : `list` of `str`, optional the columns to populate, if not given, all columns present in the table are mapped use_map_dtypes : `bool`, optional force column `dtypes rename : `dict`, optional dict of ('old name', 'new name') pairs to rename columns from the original LIGO_LW table Returns ------- table : `EventTable` a view of the original data """ # set default keywords if rename is None: rename = {} if columns is None: columns = llwtable.columnnames # extract columns from LIGO_LW table as astropy.table.Column data = [] for colname in columns: arr = _get_column(llwtable, colname) # transform to astropy.table.Column copythis = isinstance(arr, numpy.ndarray) data.append(to_astropy_column(arr, apytable.Column, copy=copythis, use_numpy_dtype=use_numpy_dtypes, name=rename.get(colname, colname))) # build table and return return apytable(data, copy=False, meta={'tablename': str(llwtable.Name)})
java
public MPSRGLength createMPSRGLengthFromString(EDataType eDataType, String initialValue) { MPSRGLength result = MPSRGLength.get(initialValue); if (result == null) throw new IllegalArgumentException("The value '" + initialValue + "' is not a valid enumerator of '" + eDataType.getName() + "'"); return result; }
java
@Override public void set( int row , int col , double value ) { if( col < 0 || col >= numCols || row < 0 || row >= numRows ) { throw new IllegalArgumentException("Specified element is out of bounds: ("+row+" , "+col+")"); } data[ row * numCols + col ] = value; }
python
def log_stats(self): """Output the stats to the LOGGER.""" if not self.stats.get('counts'): if self.consumers: LOGGER.info('Did not receive any stats data from children') return if self.poll_data['processes']: LOGGER.warning('%i process(es) did not respond with stats: %r', len(self.poll_data['processes']), self.poll_data['processes']) if self.stats['counts']['processes'] > 1: LOGGER.info('%i consumers processed %i messages with %i errors', self.stats['counts']['processes'], self.stats['counts']['processed'], self.stats['counts']['failed']) for key in self.stats['consumers'].keys(): LOGGER.info('%i %s %s processed %i messages with %i errors', self.stats['consumers'][key]['processes'], key, self.consumer_keyword(self.stats['consumers'][key]), self.stats['consumers'][key]['processed'], self.stats['consumers'][key]['failed'])
java
@Deprecated public static BoundingBox fromCoordinates( @FloatRange(from = MIN_LONGITUDE, to = GeoJsonConstants.MAX_LONGITUDE) double west, @FloatRange(from = MIN_LATITUDE, to = GeoJsonConstants.MAX_LATITUDE) double south, @FloatRange(from = MIN_LONGITUDE, to = GeoJsonConstants.MAX_LONGITUDE) double east, @FloatRange(from = MIN_LATITUDE, to = GeoJsonConstants.MAX_LATITUDE) double north) { return fromLngLats(west, south, east, north); }
java
@Override public int compareTo(NodeInfo o) { if (o == null) { return 1; } else { return this.getHost().compareTo(o.getHost()); } }
python
def plot(self, axis=None, truncate_mode=None, p=0, vary_line_width=True, cmap='viridis', colorbar=True): """Plot a dendrogram of the single linkage tree. Parameters ---------- truncate_mode : str, optional The dendrogram can be hard to read when the original observation matrix from which the linkage is derived is large. Truncation is used to condense the dendrogram. There are several modes: ``None/'none'`` No truncation is performed (Default). ``'lastp'`` The last p non-singleton formed in the linkage are the only non-leaf nodes in the linkage; they correspond to rows Z[n-p-2:end] in Z. All other non-singleton clusters are contracted into leaf nodes. ``'level'/'mtica'`` No more than p levels of the dendrogram tree are displayed. This corresponds to Mathematica(TM) behavior. p : int, optional The ``p`` parameter for ``truncate_mode``. vary_line_width : boolean, optional Draw downward branches of the dendrogram with line thickness that varies depending on the size of the cluster. cmap : string or matplotlib colormap, optional The matplotlib colormap to use to color the cluster bars. A value of 'none' will result in black bars. (default 'viridis') colorbar : boolean, optional Whether to draw a matplotlib colorbar displaying the range of cluster sizes as per the colormap. (default True) Returns ------- axis : matplotlib axis The axis on which the dendrogram plot has been rendered. """ dendrogram_data = dendrogram(self._linkage, p=p, truncate_mode=truncate_mode, no_plot=True) X = dendrogram_data['icoord'] Y = dendrogram_data['dcoord'] try: import matplotlib.pyplot as plt except ImportError: raise ImportError('You must install the matplotlib library to plot the single linkage tree.') if axis is None: axis = plt.gca() if vary_line_width: linewidths = [(_line_width(y[0], self._linkage), _line_width(y[1], self._linkage)) for y in Y] else: linewidths = [(1.0, 1.0)] * len(Y) if cmap != 'none': color_array = np.log2(np.array(linewidths).flatten()) sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(0, color_array.max())) sm.set_array(color_array) for x, y, lw in zip(X, Y, linewidths): left_x = x[:2] right_x = x[2:] left_y = y[:2] right_y = y[2:] horizontal_x = x[1:3] horizontal_y = y[1:3] if cmap != 'none': axis.plot(left_x, left_y, color=sm.to_rgba(np.log2(lw[0])), linewidth=np.log2(1 + lw[0]), solid_joinstyle='miter', solid_capstyle='butt') axis.plot(right_x, right_y, color=sm.to_rgba(np.log2(lw[1])), linewidth=np.log2(1 + lw[1]), solid_joinstyle='miter', solid_capstyle='butt') else: axis.plot(left_x, left_y, color='k', linewidth=np.log2(1 + lw[0]), solid_joinstyle='miter', solid_capstyle='butt') axis.plot(right_x, right_y, color='k', linewidth=np.log2(1 + lw[1]), solid_joinstyle='miter', solid_capstyle='butt') axis.plot(horizontal_x, horizontal_y, color='k', linewidth=1.0, solid_joinstyle='miter', solid_capstyle='butt') if colorbar: cb = plt.colorbar(sm) cb.ax.set_ylabel('log(Number of points)') axis.set_xticks([]) for side in ('right', 'top', 'bottom'): axis.spines[side].set_visible(False) axis.set_ylabel('distance') return axis
java
public void reset() { // reset all offsets this.numRecords = 0; this.currentSortIndexOffset = 0; this.currentDataBufferOffset = 0; this.sortIndexBytes = 0; // return all memory returnToSegmentPool(); // grab first buffers this.currentSortIndexSegment = nextMemorySegment(); this.sortIndex.add(this.currentSortIndexSegment); this.recordCollector.reset(); }
java
private static Map<String, String> convertAdHocMonomersIntoSMILES(Map<String, String> monomersList) throws HELM1FormatException, ChemistryException { Map<String, String> convert = new HashMap<String, String>(); try { for (Map.Entry<String, String> element : monomersList.entrySet()) { Monomer m; m = MonomerFactory.getInstance().getMonomerStore().getMonomer(element.getValue().toString(), element.getKey().toString()); String smiles = m.getCanSMILES(); AbstractChemistryManipulator manipulator = Chemistry.getInstance().getManipulator(); String canSmiles = manipulator.canonicalize(smiles); convert.put(element.getKey().toString(), canSmiles); } return convert; } catch (MonomerLoadingException | CTKException e) { e.printStackTrace(); throw new HELM1FormatException("SMILES for Monomer can not be found: "); } }
java
public void setSubEntries(List<CmsClientSitemapEntry> children, I_CmsSitemapController controller) { m_childrenLoadedInitially = true; m_subEntries.clear(); if (children != null) { m_subEntries.addAll(children); for (CmsClientSitemapEntry child : children) { child.updateSitePath(CmsStringUtil.joinPaths(m_sitePath, child.getName()), controller); } } }
java
protected Future<?> callOnMessages(final List<? extends Message> messages) throws IllegalStateException { if(isClosed()) throw new IllegalStateException("Socket is closed"); if(messages.isEmpty()) throw new IllegalArgumentException("messages may not be empty"); return listenerManager.enqueueEvent( new ConcurrentListenerManager.Event<SocketListener>() { @Override public Runnable createCall(final SocketListener listener) { return new Runnable() { @Override public void run() { listener.onMessages( AbstractSocket.this, messages ); } }; } } ); }
java
public long getTotalDomLoadTime(final String intervalName, final TimeUnit unit) { return unit.transformMillis(totalDomLoadTime.getValueAsLong(intervalName)); }
java
@Bean public Config hazelcastConfig() { MapConfig mapConfig = new MapConfig("spring-boot-admin-event-store").setInMemoryFormat(InMemoryFormat.OBJECT) .setBackupCount(1) .setEvictionPolicy(EvictionPolicy.NONE) .setMergePolicyConfig(new MergePolicyConfig( PutIfAbsentMapMergePolicy.class.getName(), 100 )); return new Config().setProperty("hazelcast.jmx", "true").addMapConfig(mapConfig); }
python
def mapped(args): """ %prog mapped sam/bamfile Given an input sam/bam file, output a sam/bam file containing only the mapped reads. Optionally, extract the unmapped reads into a separate file """ import pysam from jcvi.apps.grid import Jobs p = OptionParser(mapped.__doc__) p.set_sam_options(extra=False) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(p.print_help()) samfile, = args view_opts = [] oext, mopts = (".sam", ["-S"]) \ if samfile.endswith(".sam") else (".bam", []) flag, ext = ("-b", ".bam") if opts.bam else ("-h", ".sam") mopts.append(flag) if opts.uniq: mopts.append("-q1") ext = ".uniq{0}".format(ext) if opts.unmapped: uopts = [x for x in mopts] uoutfile = samfile.replace(oext, ".unmapped{0}".format(ext)) uopts.extend(["-f4", samfile, "-o{0}".format(uoutfile)]) view_opts.append(uopts) outfile = samfile.replace(oext, ".mapped{0}".format(ext)) mopts.extend(["-F4", samfile, "-o{0}".format(outfile)]) view_opts.append(mopts) for vo in view_opts: logging.debug('samtools view {0}'.format(" ".join(vo))) jobs = Jobs(pysam.view, [(z for z in x) for x in view_opts]) jobs.run()
java
@Override public EList<JvmParameterizedTypeReference> getImplements() { if (implements_ == null) { implements_ = new EObjectContainmentEList<JvmParameterizedTypeReference>(JvmParameterizedTypeReference.class, this, SarlPackage.SARL_SKILL__IMPLEMENTS); } return implements_; }
java
protected synchronized Class loadClass(String classname, boolean resolve) throws ClassNotFoundException { // 'sync' is needed - otherwise 2 threads can load the same class // twice, resulting in LinkageError: duplicated class definition. // findLoadedClass avoids that, but without sync it won't work. Class theClass = findLoadedClass(classname); if (theClass != null) { return theClass; } if (isParentFirst(classname)) { try { theClass = findBaseClass(classname); log("Class " + classname + " loaded from parent loader " + "(parentFirst)", Project.MSG_DEBUG); } catch (ClassNotFoundException cnfe) { theClass = findClass(classname); log("Class " + classname + " loaded from ant loader " + "(parentFirst)", Project.MSG_DEBUG); } } else { try { theClass = findClass(classname); log("Class " + classname + " loaded from ant loader", Project.MSG_DEBUG); } catch (ClassNotFoundException cnfe) { if (ignoreBase) { throw cnfe; } theClass = findBaseClass(classname); log("Class " + classname + " loaded from parent loader", Project.MSG_DEBUG); } } if (resolve) { resolveClass(theClass); } return theClass; }
python
def as_dict(self): """ Return a Listing object as Dictionary :return: dict """ return { 'search_type': self.search_type, 'agent_id': self.agent_id, 'id': self.id, 'price': self.price, 'price_change': self.price_change, 'viewings': self.upcoming_viewings, 'facilities': self.facilities, 'overviews': self.overviews, 'formalised_address': self.formalised_address, 'address_line_1': self.address_line_1, 'county': self.county, 'listing_image': self.images, 'listing_hires_image': self.hires_images, 'agent': self.agent, 'agent_url': self.agent_url, 'contact_number': self.contact_number, 'daft_link': self.daft_link, 'shortcode': self.shortcode, 'date_insert_update': self.date_insert_update, 'views': self.views, 'description': self.description, 'dwelling_type': self.dwelling_type, 'posted_since': self.posted_since, 'num_bedrooms': self.bedrooms, 'num_bathrooms': self.bathrooms, 'city_center_distance': self.city_center_distance, 'transport_routes': self.transport_routes, 'latitude': self.latitude, 'longitude': self.longitude, 'ber_code': self.ber_code, 'commercial_area_size': self.commercial_area_size }
java
private ClassLoader getDelegationClassLoader(String docletClassName) { ClassLoader ctxCL = Thread.currentThread().getContextClassLoader(); ClassLoader sysCL = ClassLoader.getSystemClassLoader(); if (sysCL == null) return ctxCL; if (ctxCL == null) return sysCL; // Condition 1. try { sysCL.loadClass(docletClassName); try { ctxCL.loadClass(docletClassName); } catch (ClassNotFoundException e) { return sysCL; } } catch (ClassNotFoundException e) { } // Condition 2. try { if (getClass() == sysCL.loadClass(getClass().getName())) { try { if (getClass() != ctxCL.loadClass(getClass().getName())) return sysCL; } catch (ClassNotFoundException e) { return sysCL; } } } catch (ClassNotFoundException e) { } return ctxCL; }
java
public static DynamicDataSet createDynamicDataSet( File homeDir, int initialCapacity, int segmentFileSizeMB, SegmentFactory segmentFactory) throws Exception { int batchSize = StoreParams.BATCH_SIZE_DEFAULT; int numSyncBatches = StoreParams.NUM_SYNC_BATCHES_DEFAULT; double hashLoadFactor = StoreParams.HASH_LOAD_FACTOR_DEFAULT; double segmentCompactFactor = StoreParams.SEGMENT_COMPACT_FACTOR_DEFAULT; return createDynamicDataSet( homeDir, initialCapacity, batchSize, numSyncBatches, segmentFileSizeMB, segmentFactory, segmentCompactFactor, hashLoadFactor); }
java
public static void cut(Image srcImage, ImageOutputStream destImageStream, Rectangle rectangle) throws IORuntimeException { writeJpg(cut(srcImage, rectangle), destImageStream); }
java
private static int get16(byte[] b, int off) { return Byte.toUnsignedInt(b[off]) | ( Byte.toUnsignedInt(b[off+1]) << 8); }
python
def parse_image_json(text): """ parses response output of AWS describe commands and returns the first (and only) item in array :param text: describe output :return: image json """ image_details = json.loads(text) if image_details.get('Images') is not None: try: image_details = image_details.get('Images')[0] except IndexError: image_details = None return image_details
python
def choose_username(metadata, config): """ Choose the database username to use. Because databases should not be shared between services, database usernames should be the same as the service that uses them. """ if config.username is not None: # we allow -- but do not encourage -- database username configuration return config.username if config.read_only: # by convention, we provision read-only username for every service return f"{metadata.name}_ro" return metadata.name
python
def upload_server_cert(self, cert_name, cert_body, private_key, cert_chain=None, path=None): """ Uploads a server certificate entity for the AWS Account. The server certificate entity includes a public key certificate, a private key, and an optional certificate chain, which should all be PEM-encoded. :type cert_name: string :param cert_name: The name for the server certificate. Do not include the path in this value. :type cert_body: string :param cert_body: The contents of the public key certificate in PEM-encoded format. :type private_key: string :param private_key: The contents of the private key in PEM-encoded format. :type cert_chain: string :param cert_chain: The contents of the certificate chain. This is typically a concatenation of the PEM-encoded public key certificates of the chain. :type path: string :param path: The path for the server certificate. """ params = {'ServerCertificateName' : cert_name, 'CertificateBody' : cert_body, 'PrivateKey' : private_key} if cert_chain: params['CertificateChain'] = cert_chain if path: params['Path'] = path return self.get_response('UploadServerCertificate', params, verb='POST')
python
def packet_get_samples_per_frame(data, fs): """Gets the number of samples per frame from an Opus packet""" data_pointer = ctypes.c_char_p(data) result = _packet_get_nb_frames(data_pointer, ctypes.c_int(fs)) if result < 0: raise OpusError(result) return result
python
def warning(title="", text="", width=DEFAULT_WIDTH, height=DEFAULT_HEIGHT, timeout=None): """ Display a simple warning :param text: text inside the window :type text: str :param title: title of the window :type title: str :param width: window width :type width: int :param height: window height :type height: int :param timeout: close the window after n seconds :type timeout: int """ return _simple_dialog(Gtk.MessageType.WARNING, text, title, width, height, timeout)
python
def abstractclass(cls): """abstractclass - class decorator. make sure the class is abstract and cannot be used on it's own. @abstractclass class A(object): def __init__(self, *args, **kwargs): # logic pass class B(A): pass a = A() # results in an AssertionError b = B() # works fine """ setattr(cls, "_ISNEVER", cls.__bases__[0].__name__) origInit = cls.__dict__["__init__"] def wrapInit(self, *args, **kwargs): # when the class is instantiated we can check for bases # we don't want it to be the base class try: assert self.__class__.__bases__[-1].__name__ != self._ISNEVER origInit(self, *args, **kwargs) except AssertionError: raise TypeError("Use of abstract base class") # replace the original __init__ setattr(wrapInit, "__doc__", getattr(origInit, "__doc__")) setattr(origInit, "__doc__", "") setattr(cls, "__init__", wrapInit) return cls
java
private boolean isWindowLimitExceeded(FrameData dataFrame) { if (streamWindowUpdateWriteLimit - dataFrame.getPayloadLength() < 0 || muxLink.getWorkQ().getConnectionWriteLimit() - dataFrame.getPayloadLength() < 0) { // would exceed window update limit String s = "Cannot write Data Frame because it would exceed the stream window update limit." + "streamWindowUpdateWriteLimit: " + streamWindowUpdateWriteLimit + "\nstreamWindowUpdateWriteInitialSize: " + streamWindowUpdateWriteInitialSize + "\nconnection window size: " + muxLink.getWorkQ().getConnectionWriteLimit() + "\nframe size: " + dataFrame.getPayloadLength(); if (TraceComponent.isAnyTracingEnabled() && tc.isDebugEnabled()) { Tr.debug(tc, s); } return true; } return false; }
python
def _advance_to_next_stage(self, config_ids, losses): """ SuccessiveHalving simply continues the best based on the current loss. """ ranks = np.argsort(np.argsort(losses)) return(ranks < self.num_configs[self.stage])
java
public Ast parse(String expression) { Ast ast=new Ast(); if(expression==null || expression.length()==0) return ast; SeekableStringReader sr = new SeekableStringReader(expression); if(sr.peek()=='#') sr.readUntil('\n'); // skip comment line try { ast.root = parseExpr(sr); sr.skipWhitespace(); if(sr.hasMore()) throw new ParseException("garbage at end of expression"); return ast; } catch (ParseException x) { String faultLocation = extractFaultLocation(sr); throw new ParseException(x.getMessage() + " (at position "+sr.bookmark()+"; '"+faultLocation+"')", x); } }
java
public void marshall(ListImagesRequest listImagesRequest, ProtocolMarshaller protocolMarshaller) { if (listImagesRequest == null) { throw new SdkClientException("Invalid argument passed to marshall(...)"); } try { protocolMarshaller.marshall(listImagesRequest.getRegistryId(), REGISTRYID_BINDING); protocolMarshaller.marshall(listImagesRequest.getRepositoryName(), REPOSITORYNAME_BINDING); protocolMarshaller.marshall(listImagesRequest.getNextToken(), NEXTTOKEN_BINDING); protocolMarshaller.marshall(listImagesRequest.getMaxResults(), MAXRESULTS_BINDING); protocolMarshaller.marshall(listImagesRequest.getFilter(), FILTER_BINDING); } catch (Exception e) { throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e); } }
python
def getList(self): """ 查询敏感词列表方法 方法 @return code:返回码,200 为正常。 @return word:敏感词内容。 @return errorMessage:错误信息。 """ desc = { "name": "ListWordfilterReslut", "desc": "listWordfilter返回结果", "fields": [{ "name": "code", "type": "Integer", "desc": "返回码,200 为正常。" }, { "name": "word", "type": "String", "desc": "敏感词内容。" }, { "name": "errorMessage", "type": "String", "desc": "错误信息。" }] } r = self.call_api( method=('API', 'POST', 'application/x-www-form-urlencoded'), action='/wordfilter/list.json', params={}) return Response(r, desc)
java
public static byte[] uncompress(byte[] input) throws IOException { byte[] result = new byte[Snappy.uncompressedLength(input)]; Snappy.uncompress(input, 0, input.length, result, 0); return result; }
java
private void readTableData(List<SynchroTable> tables, InputStream is) throws IOException { for (SynchroTable table : tables) { if (REQUIRED_TABLES.contains(table.getName())) { readTable(is, table); } } }
java
public void updateBackupElements(Map<String, CmsContainerElementData> updateElements) { ArrayList<CmsContainerPageElementPanel> updatedList = new ArrayList<CmsContainerPageElementPanel>(); String containerId = m_groupContainer.getContainerId(); for (CmsContainerPageElementPanel element : m_backUpElements) { if (updateElements.containsKey(element.getId()) && CmsStringUtil.isNotEmptyOrWhitespaceOnly( updateElements.get(element.getId()).getContents().get(containerId))) { CmsContainerElementData elementData = updateElements.get(element.getId()); try { CmsContainerPageElementPanel replacer = m_controller.getContainerpageUtil().createElement( elementData, m_groupContainer, false); if (element.getInheritanceInfo() != null) { // in case of inheritance container editing, keep the inheritance info replacer.setInheritanceInfo(element.getInheritanceInfo()); } updatedList.add(replacer); } catch (Exception e) { // in this case keep the old version updatedList.add(element); } } else { updatedList.add(element); } } m_backUpElements = updatedList; }
python
def get_clean_content(self): """Implementation of the clean() method.""" fill_chars = {'BLANK_TEMPLATE': ' ', 'ECHO_TEMPLATE': '0'} for match in self.pattern.finditer(self.html_content): start, end = match.start(), match.end() tag = _get_tag(match) if tag == 'ECHO': self._write_content(start) self._index = start self._state = 'ECHO_TEMPLATE' elif tag == 'START': if self._index != start: self._write_content(start) self._index = start self._state = 'BLANK_TEMPLATE' elif tag == 'END': if self._state not in ('BLANK_TEMPLATE', 'ECHO_TEMPLATE'): # We got a closing tag but none was open. We decide to carry # on as it may be the case that it was because of a closing # dictionary in javascript like: var dict = {foo:{}}. # See the note on the clean() function for more details. continue fill_char = fill_chars[self._state] fill = fill_char * (end - self._index) if self._state == 'BLANK_TEMPLATE': self._pending.append(fill) self._pending_has_blank = True else: assert not self._pending self._output.write(fill) self._index = end self._state = 'HTML' elif tag == 'SPACES': self._pending.append(match.group('spaces')) self._index = end elif tag == 'NEWLINE': if self._state == 'HTML': if self._index != start or not self._pending_has_blank: self._write_content(start) self._output.write(match.group('newline')) elif self._state == 'BLANK_TEMPLATE': # We discard the content of this template and whatever is in # self._pending. self._output.write(match.group('newline')) elif self._state == 'ECHO_TEMPLATE': assert False, 'Echo tags should be in just one line.' self._index = end self._reset_pending() assert self._state == 'HTML', 'Tag was not closed' if self._index != len(self.html_content) or not self._pending_has_blank: self._write_content() return self._output.getvalue()
java
public static File getResourceAsFile(String resource) { ClassLoader cl = Thread.currentThread().getContextClassLoader(); try { return new File(URLDecoder.decode(cl.getResource(resource) .getFile(), "UTF-8")); } catch (UnsupportedEncodingException uee) { return null; } }
java
public static <P extends ICallbackPredicate> CallbackOption<P> of(P predicate) { return new CallbackOption<>(checkNotNull(predicate), Priority.NORMAL); }
java
public Object doRemoteCommand(String strCommand, Map<String, Object> properties) { if (SET_DEFAULT_COMMAND.equalsIgnoreCase(strCommand)) if (properties != null) if (properties.get(DBConstants.SYSTEM_NAME) != null) { if (this.getTask() != null) if (this.getTask().getApplication() instanceof BaseApplication) if (((BaseApplication)this.getTask().getApplication()).getEnvironment() != null) { String systemName = properties.get(DBConstants.SYSTEM_NAME).toString(); Environment env = ((BaseApplication)this.getTask().getApplication()).getEnvironment(); properties.put(DBConstants.SYSTEM_NAME, "base"); BaseApplication app = new MainApplication(env, properties, null); try { Task task = new AutoTask(app, null, properties); RecordOwner recordOwner = new BaseProcess(task, null, properties); Menus menus = new Menus(recordOwner); menus.setKeyArea(Menus.CODE_KEY); menus.getField(Menus.CODE).setString(ResourceConstants.DEFAULT_RESOURCE); if (menus.seek(null)) menus.edit(); else menus.addNew(); menus.getField(Menus.CODE).setString(ResourceConstants.DEFAULT_RESOURCE); ((PropertiesField)menus.getField(Menus.PARAMS)).setProperty(DBConstants.SYSTEM_NAME, systemName); if (menus.getEditMode() == DBConstants.EDIT_ADD) menus.add(); else menus.set(); } catch (DBException e) { e.printStackTrace(); } finally { app.free(); } env.getDefaultApplication().getSystemRecordOwner().setProperty(DBConstants.DEFAULT_SYSTEM_NAME, systemName); return Boolean.TRUE; // Success } } return super.doRemoteCommand(strCommand, properties); }
java
public void send(RoxPayload payload) { try { if (isStarted()) { ByteArrayOutputStream baos = new ByteArrayOutputStream(); new JsonSerializer().serializePayload(new OutputStreamWriter(baos), payload, false); socket.emit("payload", new String(baos.toByteArray())); } else { LOGGER.warn("Minirox is not available to send the test results"); } } catch (Exception e) { LOGGER.warn("Unable to send the result to MINI ROX. Cause: {}", e.getMessage()); if (LOGGER.isDebugEnabled()) { LOGGER.debug("Exception: ", e); } } }
java
public static String getFormattedName(final String orgText) { return trimAndNullIfEmpty(orgText) == null ? SPUIDefinitions.SPACE : orgText; }
python
def interpret(self, infile): """ Process a file of rest and return list of dicts """ data = [] for record in self.generate_records(infile): data.append(record) return data
python
def namedb_get_name_at(cur, name, block_number, include_expired=False): """ Get the sequence of states that a name record was in at a particular block height. There can be more than one if the name changed during the block. Returns only unexpired names by default. Can return expired names with include_expired=True Returns None if this name does not exist at this block height. """ if not include_expired: # don't return anything if this name is expired. # however, we don't care if the name hasn't been created as of this block_number either, since we might return its preorder (hence only_registered=False) name_rec = namedb_get_name(cur, name, block_number, include_expired=False, include_history=False, only_registered=False) if name_rec is None: # expired at this block. return None history_rows = namedb_get_record_states_at(cur, name, block_number) if len(history_rows) == 0: # doesn't exist return None else: return history_rows
java
@Override public void glVertexAttribPointer(int arrayId, int size, int type, boolean normalize, int byteStride, Buffer nioBuffer) { VertexAttribArrayState data = vertexAttribArrayState[arrayId]; // HtmlPlatform.log.info("glVertexAttribPointer Data size: " + nioBuffer.remaining()); useNioBuffer |= 1 << arrayId; data.nioBuffer = nioBuffer; data.nioBufferPosition = nioBuffer.position(); data.nioBufferLimit = nioBuffer.limit(); data.size = size; data.type = type; data.normalize = normalize; data.stride = byteStride == 0 ? size * getTypeSize(type) : byteStride; }
python
def from_timestamp(timestamp, tz_offset): """Converts a timestamp + tz_offset into an aware datetime instance.""" utc_dt = datetime.fromtimestamp(timestamp, utc) try: local_dt = utc_dt.astimezone(tzoffset(tz_offset)) return local_dt except ValueError: return utc_dt
java
public Optional<T> tryFind(Predicate<T> decision) { return Iterables.tryFind(result(), decision); }
python
def mute(self): """bool: The speaker's mute state. True if muted, False otherwise. """ response = self.renderingControl.GetMute([ ('InstanceID', 0), ('Channel', 'Master') ]) mute_state = response['CurrentMute'] return True if int(mute_state) else False
python
def from_plugin_classname(plugin_classname, exclude_lines_regex=None, **kwargs): """Initializes a plugin class, given a classname and kwargs. :type plugin_classname: str :param plugin_classname: subclass of BasePlugin. :type exclude_lines_regex: str|None :param exclude_lines_regex: optional regex for ignored lines. """ klass = globals()[plugin_classname] # Make sure the instance is a BasePlugin type, before creating it. if not issubclass(klass, BasePlugin): raise TypeError try: instance = klass(exclude_lines_regex=exclude_lines_regex, **kwargs) except TypeError: log.warning( 'Unable to initialize plugin!', ) raise return instance
java
@XmlElementDecl(namespace = "http://www.opengis.net/citygml/bridge/2.0", name = "_GenericApplicationPropertyOfBridgeInstallation") public JAXBElement<Object> create_GenericApplicationPropertyOfBridgeInstallation(Object value) { return new JAXBElement<Object>(__GenericApplicationPropertyOfBridgeInstallation_QNAME, Object.class, null, value); }
java
@SafeVarargs public static Double[] box(final double... a) { if (a == null) { return null; } return box(a, 0, a.length); }
java
@SuppressWarnings("unchecked") @Override public void eSet(int featureID, Object newValue) { switch (featureID) { case AfplibPackage.PPORG__RG_LENGTH: setRGLength((Integer)newValue); return; case AfplibPackage.PPORG__OBJ_TYPE: setObjType((Integer)newValue); return; case AfplibPackage.PPORG__PROC_FLGS: setProcFlgs((Integer)newValue); return; case AfplibPackage.PPORG__XOCA_OSET: setXocaOset((Integer)newValue); return; case AfplibPackage.PPORG__YOCA_OSET: setYocaOset((Integer)newValue); return; case AfplibPackage.PPORG__TRIPLETS: getTriplets().clear(); getTriplets().addAll((Collection<? extends Triplet>)newValue); return; } super.eSet(featureID, newValue); }
python
def untokenize(words): """ Untokenizing a text undoes the tokenizing operation, restoring punctuation and spaces to the places that people expect them to be. Ideally, `untokenize(tokenize(text))` should be identical to `text`, except for line breaks. """ text = ' '.join(words) step1 = text.replace("`` ", '"').replace(" ''", '"').replace('. . .', '...') step2 = step1.replace(" ( ", " (").replace(" ) ", ") ") step3 = re.sub(r' ([.,:;?!%]+)([ \'"`])', r"\1\2", step2) step4 = re.sub(r' ([.,:;?!%]+)$', r"\1", step3) step5 = step4.replace(" '", "'").replace(" n't", "n't").replace( "can not", "cannot") step6 = step5.replace(" ` ", " '") return step6.strip()
python
def process_forever(self, timeout=0.2): """Run an infinite loop, processing data from connections. This method repeatedly calls process_once. :param timeout: Parameter to pass to :meth:`irc.client.Reactor.process_once` :type timeout: :class:`float` """ # This loop should specifically *not* be mutex-locked. # Otherwise no other thread would ever be able to change # the shared state of a Reactor object running this function. log.debug("process_forever(timeout=%s)", timeout) self._looping.set() while self._looping.is_set(): self.process_once(timeout)
python
def policy_map_clss_priority_mapping_table_imprt_cee(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") policy_map = ET.SubElement(config, "policy-map", xmlns="urn:brocade.com:mgmt:brocade-policer") po_name_key = ET.SubElement(policy_map, "po-name") po_name_key.text = kwargs.pop('po_name') clss = ET.SubElement(policy_map, "class") cl_name_key = ET.SubElement(clss, "cl-name") cl_name_key.text = kwargs.pop('cl_name') priority_mapping_table = ET.SubElement(clss, "priority-mapping-table") imprt = ET.SubElement(priority_mapping_table, "import") cee = ET.SubElement(imprt, "cee") cee.text = kwargs.pop('cee') callback = kwargs.pop('callback', self._callback) return callback(config)
python
def print_colored_columns(printer, rows, padding=2): """Like `columnise`, but with colored rows. Args: printer (`colorize.Printer`): Printer object. Note: The last entry in each row is the row color, or None for no coloring. """ rows_ = [x[:-1] for x in rows] colors = [x[-1] for x in rows] for col, line in zip(colors, columnise(rows_, padding=padding)): printer(line, col)
python
def lag(expr, offset, default=None, sort=None, ascending=True): """ Get value in the row ``offset`` rows prior to the current row. :param offset: the offset value :param default: default value for the function, when there are no rows satisfying the offset :param expr: expression for calculation :param sort: name of the sort column :param ascending: whether to sort in ascending order :return: calculated column """ return _shift_op(expr, Lag, offset, default=default, sort=sort, ascending=ascending)
python
def get_filtered_values_by_selector(self, selector, regex=None, group=1): """Return the text content of @selector. Filter text content by @regex and @group. """ data = [ self.get_text_from_node(node, regex, group) for node in self.get_nodes_by_selector(selector) if self.get_text_from_node(node, regex, group) ] return data if len(data) > 1 else data[0]
python
def types(**typefuncs): """ Decorate a function that takes strings to one that takes typed values. The decorator's arguments are functions to perform type conversion. The positional and keyword arguments will be mapped to the positional and keyword arguments of the decoratored function. This allows web-based service functions, which by design always are passed string arguments, to be declared as functions taking typed arguments instead, eliminating the overhead of having to perform type conversions manually. If type conversion fails for any argument, the wrapped function will return a dict describing the exception that was raised. """ def wrap(f): @functools.wraps(f) def typed_func(*pargs, **kwargs): # Analyze the incoming arguments so we know how to apply the # type-conversion functions in `typefuncs`. argspec = inspect.getargspec(f) # The `args` property contains the list of named arguments passed to # f. Construct a dict mapping from these names to the values that # were passed. # # It is possible that `args` contains names that are not represented # in `pargs`, if some of the arguments are passed as keyword # arguments. In this case, the relative shortness of `pargs` will # cause the call to zip() to truncate the `args` list, and the # keyword-style passed arguments will simply be present in `kwargs`. pargs_dict = {name: value for (name, value) in zip(argspec.args, pargs)} # Begin converting arguments according to the functions given in # `typefuncs`. If a given name does not appear in `typefuncs`, # simply leave it unchanged. If a name appears in `typefuncs` that # does not appear in the argument list, this is considered an error. try: for name, func in typefuncs.iteritems(): if name in pargs_dict: pargs_dict[name] = func(pargs_dict[name]) elif name in kwargs: kwargs[name] = func(kwargs[name]) else: http_status(400, "Unknown Argument Name") content_type("application/json") return {"error": "'%s' was registered for type conversion but did not appear in the arguments list" % (name)} except ValueError as e: http_status(400, "Input Value Conversion Failed") content_type("application/json") return {"error": str(e)} # Unroll `pargs` into a list of arguments that are in the correct # order. pargs = [] for name in argspec.args: try: pargs.append(pargs_dict[name]) except KeyError: break # Call the wrapped function using the converted arguments. return f(*pargs, **kwargs) typed_func.typefuncs = typefuncs return typed_func return wrap
java
public static AztecCode encode(byte[] data, int minECCPercent, int userSpecifiedLayers) { // High-level encode BitArray bits = new HighLevelEncoder(data).encode(); // stuff bits and choose symbol size int eccBits = bits.getSize() * minECCPercent / 100 + 11; int totalSizeBits = bits.getSize() + eccBits; boolean compact; int layers; int totalBitsInLayer; int wordSize; BitArray stuffedBits; if (userSpecifiedLayers != DEFAULT_AZTEC_LAYERS) { compact = userSpecifiedLayers < 0; layers = Math.abs(userSpecifiedLayers); if (layers > (compact ? MAX_NB_BITS_COMPACT : MAX_NB_BITS)) { throw new IllegalArgumentException( String.format("Illegal value %s for layers", userSpecifiedLayers)); } totalBitsInLayer = totalBitsInLayer(layers, compact); wordSize = WORD_SIZE[layers]; int usableBitsInLayers = totalBitsInLayer - (totalBitsInLayer % wordSize); stuffedBits = stuffBits(bits, wordSize); if (stuffedBits.getSize() + eccBits > usableBitsInLayers) { throw new IllegalArgumentException("Data to large for user specified layer"); } if (compact && stuffedBits.getSize() > wordSize * 64) { // Compact format only allows 64 data words, though C4 can hold more words than that throw new IllegalArgumentException("Data to large for user specified layer"); } } else { wordSize = 0; stuffedBits = null; // We look at the possible table sizes in the order Compact1, Compact2, Compact3, // Compact4, Normal4,... Normal(i) for i < 4 isn't typically used since Compact(i+1) // is the same size, but has more data. for (int i = 0; ; i++) { if (i > MAX_NB_BITS) { throw new IllegalArgumentException("Data too large for an Aztec code"); } compact = i <= 3; layers = compact ? i + 1 : i; totalBitsInLayer = totalBitsInLayer(layers, compact); if (totalSizeBits > totalBitsInLayer) { continue; } // [Re]stuff the bits if this is the first opportunity, or if the // wordSize has changed if (stuffedBits == null || wordSize != WORD_SIZE[layers]) { wordSize = WORD_SIZE[layers]; stuffedBits = stuffBits(bits, wordSize); } int usableBitsInLayers = totalBitsInLayer - (totalBitsInLayer % wordSize); if (compact && stuffedBits.getSize() > wordSize * 64) { // Compact format only allows 64 data words, though C4 can hold more words than that continue; } if (stuffedBits.getSize() + eccBits <= usableBitsInLayers) { break; } } } BitArray messageBits = generateCheckWords(stuffedBits, totalBitsInLayer, wordSize); // generate mode message int messageSizeInWords = stuffedBits.getSize() / wordSize; BitArray modeMessage = generateModeMessage(compact, layers, messageSizeInWords); // allocate symbol int baseMatrixSize = (compact ? 11 : 14) + layers * 4; // not including alignment lines int[] alignmentMap = new int[baseMatrixSize]; int matrixSize; if (compact) { // no alignment marks in compact mode, alignmentMap is a no-op matrixSize = baseMatrixSize; for (int i = 0; i < alignmentMap.length; i++) { alignmentMap[i] = i; } } else { matrixSize = baseMatrixSize + 1 + 2 * ((baseMatrixSize / 2 - 1) / 15); int origCenter = baseMatrixSize / 2; int center = matrixSize / 2; for (int i = 0; i < origCenter; i++) { int newOffset = i + i / 15; alignmentMap[origCenter - i - 1] = center - newOffset - 1; alignmentMap[origCenter + i] = center + newOffset + 1; } } BitMatrix matrix = new BitMatrix(matrixSize); // draw data bits for (int i = 0, rowOffset = 0; i < layers; i++) { int rowSize = (layers - i) * 4 + (compact ? 9 : 12); for (int j = 0; j < rowSize; j++) { int columnOffset = j * 2; for (int k = 0; k < 2; k++) { if (messageBits.get(rowOffset + columnOffset + k)) { matrix.set(alignmentMap[i * 2 + k], alignmentMap[i * 2 + j]); } if (messageBits.get(rowOffset + rowSize * 2 + columnOffset + k)) { matrix.set(alignmentMap[i * 2 + j], alignmentMap[baseMatrixSize - 1 - i * 2 - k]); } if (messageBits.get(rowOffset + rowSize * 4 + columnOffset + k)) { matrix.set(alignmentMap[baseMatrixSize - 1 - i * 2 - k], alignmentMap[baseMatrixSize - 1 - i * 2 - j]); } if (messageBits.get(rowOffset + rowSize * 6 + columnOffset + k)) { matrix.set(alignmentMap[baseMatrixSize - 1 - i * 2 - j], alignmentMap[i * 2 + k]); } } } rowOffset += rowSize * 8; } // draw mode message drawModeMessage(matrix, compact, matrixSize, modeMessage); // draw alignment marks if (compact) { drawBullsEye(matrix, matrixSize / 2, 5); } else { drawBullsEye(matrix, matrixSize / 2, 7); for (int i = 0, j = 0; i < baseMatrixSize / 2 - 1; i += 15, j += 16) { for (int k = (matrixSize / 2) & 1; k < matrixSize; k += 2) { matrix.set(matrixSize / 2 - j, k); matrix.set(matrixSize / 2 + j, k); matrix.set(k, matrixSize / 2 - j); matrix.set(k, matrixSize / 2 + j); } } } AztecCode aztec = new AztecCode(); aztec.setCompact(compact); aztec.setSize(matrixSize); aztec.setLayers(layers); aztec.setCodeWords(messageSizeInWords); aztec.setMatrix(matrix); return aztec; }
java
public List<SpecNode> getAllSpecNodes() { final ArrayList<SpecNode> retValue = new ArrayList<SpecNode>(); // Add all the levels retValue.addAll(levels); // Add all the topics for (final Entry<Integer, List<ITopicNode>> topicEntry : topics.entrySet()) { for (final ITopicNode topic : topicEntry.getValue()) { if (topic instanceof SpecNode) { retValue.add((SpecNode) topic); } } } return retValue; }
python
def get_blockchain_data(self): """ Finalize tree and return byte array to issue on blockchain :return: """ self.tree.make_tree() merkle_root = self.tree.get_merkle_root() return h2b(ensure_string(merkle_root))
python
def _get_choices(self): """ Redefine standard method. """ if not self._choices: self._choices = tuple( (x.name, getattr(x, 'verbose_name', x.name) or x.name) for x in self.choices_class.constants() ) return self._choices
java
private void closeStream(InputStream stream) { if (stream != null) { try { stream.close(); } catch (Exception ex) { LOG.error(ex.getLocalizedMessage(), ex); } } }
java
public EntryRecord getRecord(Object key, byte[] serializedKey) throws IOException { int segment = (key.hashCode() & Integer.MAX_VALUE) % segments.length; lock.readLock().lock(); try { return IndexNode.applyOnLeaf(segments[segment], serializedKey, segments[segment].rootReadLock(), IndexNode.ReadOperation.GET_RECORD); } finally { lock.readLock().unlock(); } }
python
def parse_term(s): """Parse single s-expr term from bytes.""" size, s = s.split(b':', 1) size = int(size) return s[:size], s[size:]
java
public ASTMethod getMethod(ExecutableElement executableElement) { ImmutableList<ASTParameter> parameters = getParameters(executableElement.getParameters()); ASTAccessModifier modifier = buildAccessModifier(executableElement); ImmutableSet<ASTType> throwsTypes = buildASTElementTypes(executableElement.getThrownTypes()); return new ASTElementMethod(executableElement, astTypeBuilderVisitor, parameters, modifier, getAnnotations(executableElement), throwsTypes); }