language
stringclasses
2 values
func_code_string
stringlengths
63
466k
java
private void readObject(final ObjectInputStream in) throws IOException, ClassNotFoundException { in.defaultReadObject(); final Calendar definingCalendar = Calendar.getInstance(timeZone, locale); init(definingCalendar); }
java
public synchronized void submitRunTaskCommand(long jobId, int taskId, JobConfig jobConfig, Object taskArgs, long workerId) { RunTaskCommand.Builder runTaskCommand = RunTaskCommand.newBuilder(); runTaskCommand.setJobId(jobId); runTaskCommand.setTaskId(taskId); try { runTaskCommand.setJobConfig(ByteString.copyFrom(SerializationUtils.serialize(jobConfig))); if (taskArgs != null) { runTaskCommand.setTaskArgs(ByteString.copyFrom(SerializationUtils.serialize(taskArgs))); } } catch (IOException e) { // TODO(yupeng) better exception handling LOG.info("Failed to serialize the run task command:" + e); return; } JobCommand.Builder command = JobCommand.newBuilder(); command.setRunTaskCommand(runTaskCommand); if (!mWorkerIdToPendingCommands.containsKey(workerId)) { mWorkerIdToPendingCommands.put(workerId, Lists.<JobCommand>newArrayList()); } mWorkerIdToPendingCommands.get(workerId).add(command.build()); }
python
def _set_raw_raise(self, sep): """Set the output raw exception section :param sep: the separator of current style """ raw = '' if self.dst.style['out'] == 'numpydoc': if 'raise' not in self.dst.numpydoc.get_excluded_sections(): raw += '\n' if 'raise' in self.dst.numpydoc.get_mandatory_sections() or \ (self.docs['out']['raises'] and 'raise' in self.dst.numpydoc.get_optional_sections()): spaces = ' ' * 4 with_space = lambda s: '\n'.join([self.docs['out']['spaces'] + spaces + l.lstrip() if i > 0 else l for i, l in enumerate(s.splitlines())]) raw += self.dst.numpydoc.get_key_section_header('raise', self.docs['out']['spaces']) if len(self.docs['out']['raises']): for p in self.docs['out']['raises']: raw += self.docs['out']['spaces'] + p[0] + '\n' raw += self.docs['out']['spaces'] + spaces + with_space(p[1]).strip() + '\n' raw += '\n' elif self.dst.style['out'] == 'google': if 'raise' not in self.dst.googledoc.get_excluded_sections(): raw += '\n' if 'raise' in self.dst.googledoc.get_mandatory_sections() or \ (self.docs['out']['raises'] and 'raise' in self.dst.googledoc.get_optional_sections()): spaces = ' ' * 2 with_space = lambda s: '\n'.join([self.docs['out']['spaces'] + spaces + \ l.lstrip() if i > 0 else \ l for i, l in enumerate(s.splitlines())]) raw += self.dst.googledoc.get_key_section_header('raise', self.docs['out']['spaces']) if len(self.docs['out']['raises']): for p in self.docs['out']['raises']: raw += self.docs['out']['spaces'] + spaces if p[0] is not None: raw += p[0] + sep if p[1]: raw += p[1].strip() raw += '\n' raw += '\n' elif self.dst.style['out'] == 'groups': pass else: with_space = lambda s: '\n'.join([self.docs['out']['spaces'] + l if i > 0 else l for i, l in enumerate(s.splitlines())]) if len(self.docs['out']['raises']): if not self.docs['out']['params'] and not self.docs['out']['return']: raw += '\n' for p in self.docs['out']['raises']: raw += self.docs['out']['spaces'] + self.dst.get_key('raise', 'out') + ' ' if p[0] is not None: raw += p[0] + sep if p[1]: raw += with_space(p[1]).strip() raw += '\n' raw += '\n' return raw
java
@Override public synchronized boolean truncate() throws TTException { mDatabase.close(); if (mEnv.getDatabaseNames().contains(NAME)) { mEnv.removeDatabase(null, NAME); } mEnv.close(); return IOUtils.recursiveDelete(mFile); }
python
def prettyln(text, fill='-', align='^', prefix='[ ', suffix=' ]', length=69): """Wrap `text` in a pretty line with maximum length.""" text = '{prefix}{0}{suffix}'.format(text, prefix=prefix, suffix=suffix) print( "{0:{fill}{align}{length}}".format( text, fill=fill, align=align, length=length ) )
python
def init(self, switch_configuration, terminal_controller, logger, piping_processor, *args): """ :type switch_configuration: fake_switches.switch_configuration.SwitchConfiguration :type terminal_controller: fake_switches.terminal.TerminalController :type logger: logging.Logger :type piping_processor: fake_switches.command_processing.piping_processor_base.PipingProcessorBase """ self.switch_configuration = switch_configuration self.terminal_controller = terminal_controller self.logger = logger self.piping_processor = piping_processor self.sub_processor = None self.continuing_to = None self.is_done = False self.replace_input = False self.awaiting_keystroke = False
python
def inner_contains(self, time_point): """ Returns ``True`` if this interval contains the given time point, excluding its extrema (begin and end). :param time_point: the time point to test :type time_point: :class:`~aeneas.exacttiming.TimeValue` :rtype: bool """ if not isinstance(time_point, TimeValue): raise TypeError(u"time_point is not an instance of TimeValue") return (self.begin < time_point) and (time_point < self.end)
java
protected Object handleException(Exception ex, Method method, Object[] params) throws Exception { throw ex; }
java
private String getSha1FromRegistryPackageUrl(String registryPackageUrl, boolean isScopeDep, String versionOfPackage, RegistryType registryType, String npmAccessToken) { String uriScopeDep = registryPackageUrl; if (isScopeDep) { try { uriScopeDep = registryPackageUrl.replace(BomFile.DUMMY_PARAMETER_SCOPE_PACKAGE, URL_SLASH); } catch (Exception e) { logger.warn("Failed creating uri of {}", registryPackageUrl); return Constants.EMPTY_STRING; } } String responseFromRegistry = null; try { Client client = Client.create(); ClientResponse response; WebResource resource; resource = client.resource(uriScopeDep); if (StringUtils.isEmptyOrNull(npmAccessToken)) { response = resource.accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); logger.debug("npm.accessToken is not defined"); } else { logger.debug("npm.accessToken is defined"); if (registryType == RegistryType.VISUAL_STUDIO) { String userCredentials = BEARER + Constants.COLON + npmAccessToken; String basicAuth = BASIC + Constants.WHITESPACE + new String(Base64.getEncoder().encode(userCredentials.getBytes())); response = resource.accept(MediaType.APPLICATION_JSON).header("Authorization", basicAuth).get(ClientResponse.class); } else { // Bearer authorization String userCredentials = BEARER + Constants.WHITESPACE + npmAccessToken; response = resource.accept(MediaType.APPLICATION_JSON).header("Authorization", userCredentials).get(ClientResponse.class); } } if (response.getStatus() >= 200 && response.getStatus() < 300) { responseFromRegistry = response.getEntity(String.class); } else { logger.debug("Got {} status code from registry using the url {}.", response.getStatus(), uriScopeDep); } } catch (Exception e) { logger.warn("Could not reach the registry using the URL: {}. Got an error: {}", registryPackageUrl, e.getMessage()); return Constants.EMPTY_STRING; } if (responseFromRegistry == null) { return Constants.EMPTY_STRING; } JSONObject jsonRegistry = new JSONObject(responseFromRegistry); String shasum; if (isScopeDep) { shasum = jsonRegistry.getJSONObject(VERSIONS).getJSONObject(versionOfPackage).getJSONObject(DIST).getString(SHASUM); } else { shasum = jsonRegistry.getJSONObject(DIST).getString(SHASUM); } return shasum; }
java
public static ManifestVersion get(final Class<?> clazz) { final String manifestUrl = ClassExtensions.getManifestUrl(clazz); try { return of(manifestUrl != null ? new URL(manifestUrl) : null); } catch (final MalformedURLException ignore) { return of(null); } }
java
public String[] arrayDefaultString(final Object[] target, final Object defaultValue) { if (target == null) { return null; } final String[] result = new String[target.length]; for (int i = 0; i < target.length; i++) { result[i] = defaultString(target[i], defaultValue); } return result; }
python
def has_flag(compiler, flagname): """Return a boolean indicating whether a flag name is supported on the specified compiler. """ with TemporaryDirectory() as tmpdir, \ stdchannel_redirected(sys.stderr, os.devnull), \ stdchannel_redirected(sys.stdout, os.devnull): f = tempfile.mktemp(suffix='.cpp', dir=tmpdir) with open(f, 'w') as fh: fh.write('int main (int argc, char **argv) { return 0; }') try: compiler.compile([f], extra_postargs=[flagname], output_dir=tmpdir) except setuptools.distutils.errors.CompileError: return False return True
python
def get(self, name, *default): # type: (str, Any) -> Any """ Get context value with the given name and optional default. Args: name (str): The name of the context value. *default (Any): If given and the key doesn't not exist, this will be returned instead. If it's not given and the context value does not exist, `AttributeError` will be raised Returns: The requested context value. If the value does not exist it will return `default` if give or raise `AttributeError`. Raises: AttributeError: If the value does not exist and `default` was not given. """ curr = self.values for part in name.split('.'): if part in curr: curr = curr[part] elif default: return default[0] else: fmt = "Context value '{}' does not exist:\n{}" raise AttributeError(fmt.format( name, util.yaml_dump(self.values) )) return curr
java
public int getContentLength() { try { getInputStream(); } catch (Exception e) { return -1; } int l = contentLength; if (l < 0) { try { l = Integer.parseInt(properties.findValue("content-length")); setContentLength(l); } catch(Exception e) { } } return l; }
python
def matchfirst(self, event): ''' Return first match for this event :param event: an input event ''' # 1. matches(self.index[ind], event) # 2. matches(self.any, event) # 3. self.matches if self.depth < len(event.indices): ind = event.indices[self.depth] if ind in self.index: m = self.index[ind].matchfirst(event) if m is not None: return m if hasattr(self, 'any'): m = self.any.matchfirst(event) if m is not None: return m if self._use_dict: for o, m in self.matchers_dict: if m is None or m.judge(event): return o else: for o, m in self.matchers_list: if m is None or m.judge(event): return o
python
def process(self): '''Run all tag processors.''' for tag_proc in self.tag_procs: before_count = self.entry_count self.run_tag_processor(tag_proc) after_count = self.entry_count if self.verbose: print('Inserted %d entries for "%s" tag processor' % ( after_count - before_count, tag_proc), file=sys.stderr) if self.verbose: print('Inserted %d entries overall' % self.entry_count, file=sys.stderr)
python
def give_zip_element_z_and_names(element_name): ''' create 2 indexes that, given the name of the element/specie, give the atomic number.''' #import numpy as np global z_bismuth z_bismuth = 83 global z_for_elem z_for_elem = [] global index_stable index_stable = [] i_for_stable = 1 i_for_unstable = 0 for i in range(z_bismuth): z_for_elem.append(int(i+1)) # the only elements below bismuth with no stable isotopes are Tc and Pm if i+1 == 43 or i+1 == 61: index_stable.append(i_for_unstable) else: index_stable.append(i_for_stable) dummy_index = np.zeros(len(element_name)) for i in range(len(element_name)): if element_name[i] == 'Neutron': dummy_index[i] = 0 elif element_name[i] == 'H': dummy_index[i] = 1 elif element_name[i] == 'He': dummy_index[i] = 2 elif element_name[i] == 'Li': dummy_index[i] = 3 elif element_name[i] == 'Be': dummy_index[i] = 4 elif element_name[i] == 'B': dummy_index[i] = 5 elif element_name[i] == 'C': dummy_index[i] = 6 elif element_name[i] == 'N': dummy_index[i] = 7 elif element_name[i] == 'O': dummy_index[i] = 8 elif element_name[i] == 'F': dummy_index[i] = 9 elif element_name[i] == 'Ne': dummy_index[i] = 10 elif element_name[i] == 'Na': dummy_index[i] = 11 elif element_name[i] == 'Mg': dummy_index[i] = 12 elif element_name[i] == 'Al': dummy_index[i] = 13 elif element_name[i] == 'Si': dummy_index[i] = 14 elif element_name[i] == 'P': dummy_index[i] = 15 elif element_name[i] == 'S': dummy_index[i] = 16 elif element_name[i] == 'Cl': dummy_index[i] = 17 elif element_name[i] == 'Ar': dummy_index[i] = 18 elif element_name[i] == 'K': dummy_index[i] = 19 elif element_name[i] == 'Ca': dummy_index[i] = 20 elif element_name[i] == 'Sc': dummy_index[i] = 21 elif element_name[i] == 'Ti': dummy_index[i] = 22 elif element_name[i] == 'V': dummy_index[i] = 23 elif element_name[i] == 'Cr': dummy_index[i] = 24 elif element_name[i] == 'Mn': dummy_index[i] = 25 elif element_name[i] == 'Fe': dummy_index[i] = 26 elif element_name[i] == 'Co': dummy_index[i] = 27 elif element_name[i] == 'Ni': dummy_index[i] = 28 elif element_name[i] == 'Cu': dummy_index[i] = 29 elif element_name[i] == 'Zn': dummy_index[i] = 30 elif element_name[i] == 'Ga': dummy_index[i] = 31 elif element_name[i] == 'Ge': dummy_index[i] = 32 elif element_name[i] == 'As': dummy_index[i] = 33 elif element_name[i] == 'Se': dummy_index[i] = 34 elif element_name[i] == 'Br': dummy_index[i] = 35 elif element_name[i] == 'Kr': dummy_index[i] = 36 elif element_name[i] == 'Rb': dummy_index[i] = 37 elif element_name[i] == 'Sr': dummy_index[i] = 38 elif element_name[i] == 'Y': dummy_index[i] = 39 elif element_name[i] == 'Zr': dummy_index[i] = 40 elif element_name[i] == 'Nb': dummy_index[i] = 41 elif element_name[i] == 'Mo': dummy_index[i] = 42 elif element_name[i] == 'Tc': dummy_index[i] = 43 elif element_name[i] == 'Ru': dummy_index[i] = 44 elif element_name[i] == 'Rh': dummy_index[i] = 45 elif element_name[i] == 'Pd': dummy_index[i] = 46 elif element_name[i] == 'Ag': dummy_index[i] = 47 elif element_name[i] == 'Cd': dummy_index[i] = 48 elif element_name[i] == 'In': dummy_index[i] = 49 elif element_name[i] == 'Sn': dummy_index[i] = 50 elif element_name[i] == 'Sb': dummy_index[i] = 51 elif element_name[i] == 'Te': dummy_index[i] = 52 elif element_name[i] == 'I': dummy_index[i] = 53 elif element_name[i] == 'Xe': dummy_index[i] = 54 elif element_name[i] == 'Cs': dummy_index[i] = 55 elif element_name[i] == 'Ba': dummy_index[i] = 56 elif element_name[i] == 'La': dummy_index[i] = 57 elif element_name[i] == 'Ce': dummy_index[i] = 58 elif element_name[i] == 'Pr': dummy_index[i] = 59 elif element_name[i] == 'Nd': dummy_index[i] = 60 elif element_name[i] == 'Pm': dummy_index[i] = 61 elif element_name[i] == 'Sm': dummy_index[i] = 62 elif element_name[i] == 'Eu': dummy_index[i] = 63 elif element_name[i] == 'Gd': dummy_index[i] = 64 elif element_name[i] == 'Tb': dummy_index[i] = 65 elif element_name[i] == 'Dy': dummy_index[i] = 66 elif element_name[i] == 'Ho': dummy_index[i] = 67 elif element_name[i] == 'Er': dummy_index[i] = 68 elif element_name[i] == 'Tm': dummy_index[i] = 69 elif element_name[i] == 'Yb': dummy_index[i] = 70 elif element_name[i] == 'Lu': dummy_index[i] = 71 elif element_name[i] == 'Hf': dummy_index[i] = 72 elif element_name[i] == 'Ta': dummy_index[i] = 73 elif element_name[i] == 'W': dummy_index[i] = 74 elif element_name[i] == 'Re': dummy_index[i] = 75 elif element_name[i] == 'Os': dummy_index[i] = 76 elif element_name[i] == 'Ir': dummy_index[i] = 77 elif element_name[i] == 'Pt': dummy_index[i] = 78 elif element_name[i] == 'Au': dummy_index[i] = 79 elif element_name[i] == 'Hg': dummy_index[i] = 80 elif element_name[i] == 'Tl': dummy_index[i] = 81 elif element_name[i] == 'Pb': dummy_index[i] = 82 elif element_name[i] == 'Bi': dummy_index[i] = 83 elif element_name[i] == 'Po': dummy_index[i] = 84 elif element_name[i] == 'At': dummy_index[i] = 85 #if spe[0] == 'N 1': # znum_int[0] = 0 # here the index to connect name and atomic numbers. global index_z_for_elements index_z_for_elements = {} for a,b in zip(element_name,dummy_index): index_z_for_elements[a]=b
java
public ServiceFuture<DeletedCertificateBundle> getDeletedCertificateAsync(String vaultBaseUrl, String certificateName, final ServiceCallback<DeletedCertificateBundle> serviceCallback) { return ServiceFuture.fromResponse(getDeletedCertificateWithServiceResponseAsync(vaultBaseUrl, certificateName), serviceCallback); }
python
def from_dict(cls, d, ignore=()): """Create an instance from a serialized version of cls Args: d(dict): Endpoints of cls to set ignore(tuple): Keys to ignore Returns: Instance of this class """ filtered = {} for k, v in d.items(): if k == "typeid": assert v == cls.typeid, \ "Dict has typeid %s but %s has typeid %s" % \ (v, cls, cls.typeid) elif k not in ignore: filtered[k] = v try: inst = cls(**filtered) except TypeError as e: raise TypeError("%s raised error: %s" % (cls.typeid, str(e))) return inst
python
def are_worth_chaining(left_converter, right_converter) -> bool: """ Utility method to check if it makes sense to chain these two converters. Returns True if it brings value to chain the first converter with the second converter. To bring value, * the second converter's input should not be a parent class of the first converter's input (in that case, it is always more interesting to use the second converter directly for any potential input) * the second converter's output should not be a parent class of the first converter's input or output. Otherwise the chain does not even make any progress :) * The first converter has to allow chaining (with converter.can_chain=True) :param left_converter: :param right_converter: :return: """ if not left_converter.can_chain: return False elif not is_any_type(left_converter.to_type) and is_any_type(right_converter.to_type): # we gain the capability to generate any type. So it is interesting. return True elif issubclass(left_converter.from_type, right_converter.to_type) \ or issubclass(left_converter.to_type, right_converter.to_type) \ or issubclass(left_converter.from_type, right_converter.from_type): # Not interesting : the outcome of the chain would be not better than one of the converters alone return False # Note: we dont say that chaining a generic converter with a converter is useless. Indeed it might unlock some # capabilities for the user (new file extensions, etc.) that would not be available with the generic parser # targetting to_type alone. For example parsing object A from its constructor then converting A to B might # sometimes be interesting, rather than parsing B from its constructor else: # interesting return True
python
def delete(names, yes): """ Delete a training job. """ failures = False for name in names: try: experiment = ExperimentClient().get(normalize_job_name(name)) except FloydException: experiment = ExperimentClient().get(name) if not experiment: failures = True continue if not yes and not click.confirm("Delete Job: {}?".format(experiment.name), abort=False, default=False): floyd_logger.info("Job {}: Skipped.".format(experiment.name)) continue if not ExperimentClient().delete(experiment.id): failures = True else: floyd_logger.info("Job %s Deleted", experiment.name) if failures: sys.exit(1)
python
def Sun_Mishima(m, D, rhol, rhog, mul, kl, Hvap, sigma, q=None, Te=None): r'''Calculates heat transfer coefficient for film boiling of saturated fluid in any orientation of flow. Correlation is as shown in [1]_, and also reviewed in [2]_ and [3]_. Either the heat flux or excess temperature is required for the calculation of heat transfer coefficient. Uses liquid-only Reynolds number, Weber number, and Boiling number. Weber number is defined in terms of the velocity if all fluid were liquid. .. math:: h_{tp} = \frac{ 6 Re_{lo}^{1.05} Bg^{0.54}} {We_l^{0.191}(\rho_l/\rho_g)^{0.142}}\frac{k_l}{D} Re_{lo} = \frac{G_{tp}D}{\mu_l} Parameters ---------- m : float Mass flow rate [kg/s] D : float Diameter of the tube [m] rhol : float Density of the liquid [kg/m^3] rhog : float Density of the gas [kg/m^3] mul : float Viscosity of liquid [Pa*s] kl : float Thermal conductivity of liquid [W/m/K] Hvap : float Heat of vaporization of liquid [J/kg] sigma : float Surface tension of liquid [N/m] q : float, optional Heat flux to wall [W/m^2] Te : float, optional Excess temperature of wall, [K] Returns ------- h : float Heat transfer coefficient [W/m^2/K] Notes ----- [1]_ has been reviewed. [1]_ used 2501 data points to derive the results, covering hydraulic diameters from 0.21 to 6.05 mm and 11 different fluids. Examples -------- >>> Sun_Mishima(m=1, D=0.3, rhol=567., rhog=18.09, kl=0.086, mul=156E-6, sigma=0.02, Hvap=9E5, Te=10) 507.6709168372167 References ---------- .. [1] Sun, Licheng, and Kaichiro Mishima. "An Evaluation of Prediction Methods for Saturated Flow Boiling Heat Transfer in Mini-Channels." International Journal of Heat and Mass Transfer 52, no. 23-24 (November 2009): 5323-29. doi:10.1016/j.ijheatmasstransfer.2009.06.041. .. [2] Fang, Xiande, Zhanru Zhou, and Dingkun Li. "Review of Correlations of Flow Boiling Heat Transfer Coefficients for Carbon Dioxide." International Journal of Refrigeration 36, no. 8 (December 2013): 2017-39. doi:10.1016/j.ijrefrig.2013.05.015. ''' G = m/(pi/4*D**2) V = G/rhol Relo = G*D/mul We = Weber(V=V, L=D, rho=rhol, sigma=sigma) if q: Bg = Boiling(G=G, q=q, Hvap=Hvap) return 6*Relo**1.05*Bg**0.54/(We**0.191*(rhol/rhog)**0.142)*kl/D elif Te: A = 6*Relo**1.05/(We**0.191*(rhol/rhog)**0.142)*kl/D return A**(50/23.)*Te**(27/23.)/(G**(27/23.)*Hvap**(27/23.)) else: raise Exception('Either q or Te is needed for this correlation')
java
public void delete_class_attribute_property(Database database, String name, String attname, String propname) throws DevFailed { String[] array = new String[1]; array[0] = propname; delete_class_attribute_property(database, name, attname, array); }
python
def assert_valid(self, instance, value=None): """Checks if valid, including HasProperty instances pass validation""" valid = super(Instance, self).assert_valid(instance, value) if not valid: return False if value is None: value = instance._get(self.name) if isinstance(value, HasProperties): value.validate() return True
python
def List(device, device_path): """Prints a directory listing. Args: device_path: Directory to list. """ files = device.List(device_path) files.sort(key=lambda x: x.filename) maxname = max(len(f.filename) for f in files) maxsize = max(len(str(f.size)) for f in files) for f in files: mode = ( ('d' if stat.S_ISDIR(f.mode) else '-') + ('r' if f.mode & stat.S_IRUSR else '-') + ('w' if f.mode & stat.S_IWUSR else '-') + ('x' if f.mode & stat.S_IXUSR else '-') + ('r' if f.mode & stat.S_IRGRP else '-') + ('w' if f.mode & stat.S_IWGRP else '-') + ('x' if f.mode & stat.S_IXGRP else '-') + ('r' if f.mode & stat.S_IROTH else '-') + ('w' if f.mode & stat.S_IWOTH else '-') + ('x' if f.mode & stat.S_IXOTH else '-')) t = time.gmtime(f.mtime) yield '%s %*d %04d-%02d-%02d %02d:%02d:%02d %-*s\n' % ( mode, maxsize, f.size, t.tm_year, t.tm_mon, t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec, maxname, f.filename)
java
public void setResolved(String v) { if (Coordination_Type.featOkTst && ((Coordination_Type)jcasType).casFeat_resolved == null) jcasType.jcas.throwFeatMissing("resolved", "de.julielab.jules.types.Coordination"); jcasType.ll_cas.ll_setStringValue(addr, ((Coordination_Type)jcasType).casFeatCode_resolved, v);}
python
def _create_initstate_and_embeddings(self): """Create the initial state for the cell and the data embeddings.""" self._init_state = self.cell.zero_state(self.batch_size, tf.float32) embedding = tf.get_variable( "embedding", [self.vocab_size, self.num_hidden]) inputs = tf.nn.embedding_lookup(embedding, self.input_data) self.inputs = tf.nn.dropout(inputs, self.dropout)
python
def cli(env): """Prints some various bits of information about an account""" manager = AccountManager(env.client) summary = manager.get_summary() env.fout(get_snapshot_table(summary))
python
def visit_slot(self, slot_name: str, slot: SlotDefinition) -> None: """ Add a slot definition per slot @param slot_name: @param slot: @return: """ # Note: We use the raw name in OWL and add a subProperty arc slot_uri = self.prop_uri(slot.name) # Parent slots if slot.is_a: self.graph.add((slot_uri, RDFS.subPropertyOf, self.prop_uri(slot.is_a))) for mixin in slot.mixins: self.graph.add((slot_uri, RDFS.subPropertyOf, self.prop_uri(mixin))) # Slot range if not slot.range or slot.range in builtin_names: self.graph.add((slot_uri, RDF.type, OWL.DatatypeProperty if slot.object_property else OWL.AnnotationProperty)) self.graph.add((slot_uri, RDFS.range, URIRef(builtin_uri(slot.range, expand=True)))) elif slot.range in self.schema.types: self.graph.add((slot_uri, RDF.type, OWL.DatatypeProperty if slot.object_property else OWL.AnnotationProperty)) self.graph.add((slot_uri, RDFS.range, self.type_uri(slot.range))) else: self.graph.add((slot_uri, RDF.type, OWL.ObjectProperty if slot.object_property else OWL.AnnotationProperty)) self.graph.add((slot_uri, RDFS.range, self.class_uri(slot.range))) # Slot domain if slot.domain: self.graph.add((slot_uri, RDFS.domain, self.class_uri(slot.domain))) # Annotations self.graph.add((slot_uri, RDFS.label, Literal(slot.name))) if slot.description: self.graph.add((slot_uri, OBO.IAO_0000115, Literal(slot.description)))
java
public void resume(String resourceGroupName, String automationAccountName, UUID jobId) { resumeWithServiceResponseAsync(resourceGroupName, automationAccountName, jobId).toBlocking().single().body(); }
java
public static LinkedHashMap<String, ArrayList<Row>> parseSalt( VisualizerInput input, boolean showSpanAnnos, boolean showTokenAnnos, List<String> annotationNames, Set<String> mediaLayer, boolean replaceValueWithMediaIcon, long startTokenIndex, long endTokenIndex, PDFController pdfController, STextualDS text) { SDocumentGraph graph = input.getDocument().getDocumentGraph(); // only look at annotations which were defined by the user LinkedHashMap<String, ArrayList<Row>> rowsByAnnotation = new LinkedHashMap<>(); for (String anno : annotationNames) { rowsByAnnotation.put(anno, new ArrayList<Row>()); } AtomicInteger eventCounter = new AtomicInteger(); PDFPageHelper pageNumberHelper = new PDFPageHelper(input); if(showSpanAnnos) { for (SSpan span : graph.getSpans()) { if(text == null || text == CommonHelper.getTextualDSForNode(span, graph)) { addAnnotationsForNode(span, graph, startTokenIndex, endTokenIndex, pdfController, pageNumberHelper, eventCounter, rowsByAnnotation, true, mediaLayer, replaceValueWithMediaIcon); } } // end for each span } if(showTokenAnnos) { for(SToken tok : graph.getTokens()) { if(text == null || text == CommonHelper.getTextualDSForNode(tok, graph)) { addAnnotationsForNode(tok, graph, startTokenIndex, endTokenIndex, pdfController, pageNumberHelper, eventCounter, rowsByAnnotation, false, mediaLayer, replaceValueWithMediaIcon); } } } // 2. merge rows when possible for (Map.Entry<String, ArrayList<Row>> e : rowsByAnnotation.entrySet()) { mergeAllRowsIfPossible(e.getValue()); } // 3. sort events on one row by left token index for (Map.Entry<String, ArrayList<Row>> e : rowsByAnnotation.entrySet()) { for (Row r : e.getValue()) { sortEventsByTokenIndex(r); } } // 4. split up events if they cover islands for (Map.Entry<String, ArrayList<Row>> e : rowsByAnnotation.entrySet()) { for (Row r : e.getValue()) { splitRowsOnIslands(r, graph, text, startTokenIndex, endTokenIndex); } } // 5. split up events if they have gaps for (Map.Entry<String, ArrayList<Row>> e : rowsByAnnotation.entrySet()) { for (Row r : e.getValue()) { splitRowsOnGaps(r, graph, startTokenIndex, endTokenIndex); } } return rowsByAnnotation; }
java
public void encode(DerOutputStream out) throws IOException { DerOutputStream tmp = new DerOutputStream(); policyIdentifier.encode(tmp); if (!policyQualifiers.isEmpty()) { DerOutputStream tmp2 = new DerOutputStream(); for (PolicyQualifierInfo pq : policyQualifiers) { tmp2.write(pq.getEncoded()); } tmp.write(DerValue.tag_Sequence, tmp2); } out.write(DerValue.tag_Sequence, tmp); }
python
def retryable(retryer=retry_ex, times=3, cap=120000): """ A decorator to make a function retry. By default the retry occurs when an exception is thrown, but this may be changed by modifying the ``retryer`` argument. See also :py:func:`retry_ex` and :py:func:`retry_bool`. By default :py:func:`retry_ex` is used as the retry function. Note that the decorator must be called even if not given keyword arguments. :param function retryer: A function to handle retries :param int times: Number of times to retry on initial failure :param int cap: Maximum wait time in milliseconds :Example: :: @retryable() def can_fail(): .... @retryable(retryer=retry_bool, times=10) def can_fail_bool(): .... """ def _retryable(func): @f.wraps(func) def wrapper(*args, **kwargs): return retryer(lambda: func(*args, **kwargs), times, cap) return wrapper return _retryable
python
def do_grep(self, params): """ \x1b[1mNAME\x1b[0m grep - Prints znodes with a value matching the given text \x1b[1mSYNOPSIS\x1b[0m grep [path] <content> [show_matches] \x1b[1mOPTIONS\x1b[0m * path: the path (default: cwd) * show_matches: show the content that matched (default: false) \x1b[1mEXAMPLES\x1b[0m > grep / unbound true /passwd: unbound:x:992:991:Unbound DNS resolver:/etc/unbound:/sbin/nologin /copy/passwd: unbound:x:992:991:Unbound DNS resolver:/etc/unbound:/sbin/nologin """ self.grep(params.path, params.content, 0, params.show_matches)
java
public static final Long date2utc(Date date) { // use null for a null date if (date == null) return null; long time = date.getTime(); // remove the timezone offset time -= timezoneOffsetMillis(date); return time; }
java
public BigMoney plus(Iterable<? extends BigMoneyProvider> moniesToAdd) { BigDecimal total = amount; for (BigMoneyProvider moneyProvider : moniesToAdd) { BigMoney money = checkCurrencyEqual(moneyProvider); total = total.add(money.amount); } return with(total); }
java
@Override public List<AnalyzedTokenReadings> tag(List<String> sentenceTokens, boolean ignoreCase) throws IOException { List<AnalyzedTokenReadings> tokens = super.tag(sentenceTokens, ignoreCase); for (int i = 0; i < tokens.size(); i++) { AnalyzedTokenReadings reading = tokens.get(i); if (reading != null && reading.getToken() != null && reading.getToken().contains("ss") && !reading.isTagged()) { AnalyzedTokenReadings replacementReading = lookup(reading.getToken().replace("ss", "ß")); if(replacementReading != null) { for(AnalyzedToken at : replacementReading.getReadings()) { reading.addReading(new AnalyzedToken(reading.getToken(), at.getPOSTag(), at.getLemma())); } } } } return tokens; }
java
public void add(String typeName, PropertySerializer<?> serializer, Class<? extends PropertyEditorBase<?>> editorClass) { if (get(typeName) == null) { register(new PropertyType(typeName, serializer, editorClass)); } }
java
public static CcgUnaryRule parseFrom(String line) { String[] chunks = new CsvParser(CsvParser.DEFAULT_SEPARATOR, CsvParser.DEFAULT_QUOTE, CsvParser.NULL_ESCAPE).parseLine(line.trim()); Preconditions.checkArgument(chunks.length >= 1, "Illegal unary rule string: %s", line); String[] syntacticParts = chunks[0].split(" "); Preconditions.checkArgument(syntacticParts.length == 2, "Illegal unary rule string: %s", line); HeadedSyntacticCategory inputSyntax = HeadedSyntacticCategory.parseFrom(syntacticParts[0]); HeadedSyntacticCategory returnSyntax = HeadedSyntacticCategory.parseFrom(syntacticParts[1]); // Ensure that the return syntactic type is in canonical form. HeadedSyntacticCategory returnCanonical = returnSyntax.getCanonicalForm(); int[] originalToCanonical = returnSyntax.unifyVariables(returnSyntax.getUniqueVariables(), returnCanonical, new int[0]); int[] inputVars = inputSyntax.getUniqueVariables(); int[] inputRelabeling = new int[inputVars.length]; int[] returnOriginalVars = returnSyntax.getUniqueVariables(); int nextUnassignedVar = Ints.max(returnCanonical.getUniqueVariables()) + 1; for (int i = 0; i < inputVars.length; i++) { int index = Ints.indexOf(returnOriginalVars, inputVars[i]); if (index != -1) { inputRelabeling[i] = originalToCanonical[index]; } else { inputRelabeling[i] = nextUnassignedVar; nextUnassignedVar++; } } HeadedSyntacticCategory relabeledInput = inputSyntax.relabelVariables(inputVars, inputRelabeling); Expression2 logicalForm = null; if (chunks.length >= 2 && chunks[1].trim().length() > 0) { logicalForm = ExpressionParser.expression2().parse(chunks[1]); } if (chunks.length >= 3) { throw new UnsupportedOperationException( "Using unfilled dependencies with unary CCG rules is not yet implemented"); /* * String[] newDeps = chunks[4].split(" "); * Preconditions.checkArgument(newDeps.length == 3); long * subjectNum = Long.parseLong(newDeps[0].substring(1)); long * argNum = Long.parseLong(newDeps[1]); long objectNum = * Long.parseLong(newDeps[2].substring(1)); unfilledDeps = new * long[1]; * * unfilledDeps[0] = * CcgParser.marshalUnfilledDependency(objectNum, argNum, * subjectNum, 0, 0); */ } return new CcgUnaryRule(relabeledInput, returnCanonical, logicalForm); }
python
def similarity1DdiffShapedArrays(arr1, arr2, normalize=False): """ compare two strictly monotonous increasing 1d arrays of same or different size return a similarity index-> 0=identical """ # assign longer and shorter here, because jit cannot do it if len(arr1) < len(arr2): arr1, arr2 = arr2, arr1 if not len(arr2): out = sum(arr1) else: out = _calc(arr1, arr2) if normalize: if not len(arr2): mn = arr1[0] mx = arr1[-1] else: mn = min(arr1[0], arr2[0]) mx = max(arr1[-1], arr2[-1]) out = out/ (mx - mn) return out
java
private boolean intersects(Point point, int fudge, QuadCurve2D.Float c) { if (!c.intersects(point.x - fudge, point.y - fudge, fudge << 1, fudge << 1)) return false; if (c.getFlatness() < fudge) return true; QuadCurve2D.Float f1 = new QuadCurve2D.Float(), f2 = new QuadCurve2D.Float(); c.subdivide(f1, f2); return intersects(point, fudge, f1) || intersects(point, fudge, f2); }
java
public void attachUI(final WComponent ui) { if (backing == null || backing instanceof WComponent) { backing = ui; } else if (backing instanceof InterceptorComponent) { ((InterceptorComponent) backing).attachUI(ui); } else { throw new IllegalStateException( "Unable to attachUI. Unknown type of WebComponent encountered. " + backing. getClass().getName()); } }
python
async def replication(self, *, dc=None): """Checks status of ACL replication Parameters: dc (str): Specify datacenter that will be used. Defaults to the agent's local datacenter. Returns: Object: Replication information Returns the status of the ACL replication process in the datacenter. This is intended to be used by operators, or by automation checking the health of ACL replication. By default, the datacenter of the agent is queried; however, the dc can be provided using the "dc" parameter. It returns a body like this:: { "Enabled": True, "Running": True, "SourceDatacenter": "dc1", "ReplicatedIndex": 1976, "LastSuccess": datetime(2016, 8, 5, 6, 28, 58, tzinfo=tzutc()), "LastError": datetime(2016, 8, 5, 6, 28, 58, tzinfo=tzutc()) } **Enabled** reports whether ACL replication is enabled for the datacenter. **Running** reports whether the ACL replication process is running. The process may take approximately 60 seconds to begin running after a leader election occurs. **SourceDatacenter** is the authoritative ACL datacenter that ACLs are being replicated from, and will match the acl_datacenter configuration. **ReplicatedIndex** is the last index that was successfully replicated. You can compare this to the Index meta returned by the items() endpoint to determine if the replication process has gotten all available ACLs. Note that replication runs as a background process approximately every 30 seconds, and that local updates are rate limited to 100 updates/second, so so it may take several minutes to perform the initial sync of a large set of ACLs. After the initial sync, replica lag should be on the order of about 30 seconds. **LastSuccess** is the UTC time of the last successful sync operation. Note that since ACL replication is done with a blocking query, this may not update for up to 5 minutes if there have been no ACL changes to replicate. A zero value of "0001-01-01T00:00:00Z" will be present if no sync has been successful. **LastError** is the UTC time of the last error encountered during a sync operation. If this time is later than LastSuccess, you can assume the replication process is not in a good state. A zero value of "0001-01-01T00:00:00Z" will be present if no sync has resulted in an error. """ params = {"dc": dc} response = await self._api.get("/v1/acl/replication", params=params) return response.body
python
def _get_aws_variables(self): """ Returns the AWS specific environment variables that should be available in the Lambda runtime. They are prefixed it "AWS_*". :return dict: Name and value of AWS environment variable """ result = { # Variable that says this function is running in Local Lambda "AWS_SAM_LOCAL": "true", # Function configuration "AWS_LAMBDA_FUNCTION_MEMORY_SIZE": str(self.memory), "AWS_LAMBDA_FUNCTION_TIMEOUT": str(self.timeout), "AWS_LAMBDA_FUNCTION_HANDLER": str(self._function["handler"]), # AWS Credentials - Use the input credentials or use the defaults "AWS_REGION": self.aws_creds.get("region", self._DEFAULT_AWS_CREDS["region"]), "AWS_DEFAULT_REGION": self.aws_creds.get("region", self._DEFAULT_AWS_CREDS["region"]), "AWS_ACCESS_KEY_ID": self.aws_creds.get("key", self._DEFAULT_AWS_CREDS["key"]), "AWS_SECRET_ACCESS_KEY": self.aws_creds.get("secret", self._DEFAULT_AWS_CREDS["secret"]) # Additional variables we don't fill in # "AWS_ACCOUNT_ID=" # "AWS_LAMBDA_EVENT_BODY=", # "AWS_LAMBDA_FUNCTION_NAME=", # "AWS_LAMBDA_FUNCTION_VERSION=", } # Session Token should be added **only** if the input creds have a token and the value is not empty. if self.aws_creds.get("sessiontoken"): result["AWS_SESSION_TOKEN"] = self.aws_creds.get("sessiontoken") return result
java
@SuppressWarnings("unchecked") public <T> ProxyHandler<T> proxify(final Class<T> clazz) { return (ProxyHandler<T>) proxify(new Class[] { clazz }); }
java
private ControllerErrorHandler getContextErrorHandler(XmlWebApplicationContext context) { ControllerErrorHandler errorHandler = null; String[] names = context.getBeanNamesForType(ControllerErrorHandler.class); for (int i = 0; errorHandler == null && i < names.length; i++) { errorHandler = (ControllerErrorHandler) context.getBean(names[i]); Class<?> userClass = ClassUtils.getUserClass(errorHandler); if (userClass.isAnnotationPresent(Ignored.class)) { logger.debug("Ignored controllerErrorHandler: " + errorHandler); errorHandler = null; continue; } } return errorHandler; }
java
@Override public UpdateContainerInstancesStateResult updateContainerInstancesState(UpdateContainerInstancesStateRequest request) { request = beforeClientExecution(request); return executeUpdateContainerInstancesState(request); }
java
protected BigDecimal valueOrNull(final Double source) { if (source != null) { return BigDecimal.valueOf(source); } return null; }
python
def _parseAttrVal(self, attrStr): """ Returns tuple of tuple of (attr, value), multiple are returned to handle multi-value attributes. """ m = self.SPLIT_ATTR_RE.match(attrStr) if m is None: raise GFF3Exception( "can't parse attribute/value: '" + attrStr + "'", self.fileName, self.lineNumber) name = urllib.unquote(m.group(1)) val = m.group(2) # Split by comma to separate then unquote. # Commas in values must be url encoded. return name, [urllib.unquote(v) for v in val.split(',')]
python
def all(self): " execute query, get all list of lists" query,inputs = self._toedn() return self.db.q(query, inputs = inputs, limit = self._limit, offset = self._offset, history = self._history)
python
def get_config_file(): # type: () -> AnyStr """Get model configuration file name from argv""" parser = argparse.ArgumentParser(description="Read configuration file.") parser.add_argument('-ini', help="Full path of configuration file") args = parser.parse_args() ini_file = args.ini if not FileClass.is_file_exists(ini_file): print("Usage: -ini <full path to the configuration file.>") exit(-1) return ini_file
java
public static Object toPrimitiveArray(Object[] array) { Class primitiveType; if (array.length > 0) { LOG.debug("很可能array是用new Object[length]()构造的,这个时候array.getClass().getComponentType()返回的是Object类型,这不是我们期望的" + "我们希望使用元素的实际类型,这里有一个风险点,即数组类型不一致,后面可能就会抛出类型转换异常"); primitiveType = Reflection.getPrimitiveType(array[0].getClass()); } else { primitiveType = Reflection.getPrimitiveType(array.getClass().getComponentType()); } Object primitiveArray = Array.newInstance(primitiveType, array.length); for (int i = 0; i < array.length; i++) { Array.set(primitiveArray, i, array[i]); } return primitiveArray; }
java
public void verifyMessageSize(int maxMessageSize) { Iterator<MessageAndOffset> shallowIter = internalIterator(true); while(shallowIter.hasNext()) { MessageAndOffset messageAndOffset = shallowIter.next(); int payloadSize = messageAndOffset.message.payloadSize(); if(payloadSize > maxMessageSize) { throw new MessageSizeTooLargeException("payload size of " + payloadSize + " larger than " + maxMessageSize); } } }
python
async def wait_until_ready( self, timeout: Optional[float] = None, no_raise: bool = False ) -> bool: """ Waits for the underlying node to become ready. If no_raise is set, returns false when a timeout occurs instead of propogating TimeoutError. A timeout of None means to wait indefinitely. """ if self.node.ready.is_set(): return True try: return await self.node.wait_until_ready(timeout=timeout) except asyncio.TimeoutError: if no_raise: return False else: raise
java
public static ProjectReader getProjectReader(String name) throws MPXJException { int index = name.lastIndexOf('.'); if (index == -1) { throw new IllegalArgumentException("Filename has no extension: " + name); } String extension = name.substring(index + 1).toUpperCase(); Class<? extends ProjectReader> fileClass = READER_MAP.get(extension); if (fileClass == null) { throw new IllegalArgumentException("Cannot read files of type: " + extension); } try { ProjectReader file = fileClass.newInstance(); return (file); } catch (Exception ex) { throw new MPXJException("Failed to load project reader", ex); } }
java
public static Whitelist relaxed() { return new Whitelist() .addTags( "a", "b", "blockquote", "br", "caption", "cite", "code", "col", "colgroup", "dd", "div", "dl", "dt", "em", "h1", "h2", "h3", "h4", "h5", "h6", "i", "img", "li", "ol", "p", "pre", "q", "small", "span", "strike", "strong", "sub", "sup", "table", "tbody", "td", "tfoot", "th", "thead", "tr", "u", "ul") .addAttributes("a", "href", "title") .addAttributes("blockquote", "cite") .addAttributes("col", "span", "width") .addAttributes("colgroup", "span", "width") .addAttributes("img", "align", "alt", "height", "src", "title", "width") .addAttributes("ol", "start", "type") .addAttributes("q", "cite") .addAttributes("table", "summary", "width") .addAttributes("td", "abbr", "axis", "colspan", "rowspan", "width") .addAttributes( "th", "abbr", "axis", "colspan", "rowspan", "scope", "width") .addAttributes("ul", "type") .addProtocols("a", "href", "ftp", "http", "https", "mailto") .addProtocols("blockquote", "cite", "http", "https") .addProtocols("cite", "cite", "http", "https") .addProtocols("img", "src", "http", "https") .addProtocols("q", "cite", "http", "https") ; }
java
public LocalDateTime withMinute(int minute) { LocalTime newTime = time.withMinute(minute); return with(date, newTime); }
java
public static SwapFile createTempFile(String prefix, String suffix, File directory) throws IOException { throw new IOException("Not applicable. Call get(File, String) method instead"); }
java
protected static Event.Level formatLevel(Level level) { if (level.isGreaterOrEqual(Level.ERROR)) { return Event.Level.ERROR; } else if (level.isGreaterOrEqual(Level.WARN)) { return Event.Level.WARNING; } else if (level.isGreaterOrEqual(Level.INFO)) { return Event.Level.INFO; } else if (level.isGreaterOrEqual(Level.ALL)) { return Event.Level.DEBUG; } else { return null; } }
java
CompletableFuture<Void> resetIndexes(SessionId sessionId) { RaftSessionState sessionState = sessions.get(sessionId.id()); if (sessionState == null) { return Futures.exceptionalFuture(new IllegalArgumentException("Unknown session: " + sessionId)); } CompletableFuture<Void> future = new CompletableFuture<>(); KeepAliveRequest request = KeepAliveRequest.builder() .withSessionIds(new long[]{sessionId.id()}) .withCommandSequences(new long[]{sessionState.getCommandResponse()}) .withEventIndexes(new long[]{sessionState.getEventIndex()}) .build(); connection.keepAlive(request).whenComplete((response, error) -> { if (error == null) { if (response.status() == RaftResponse.Status.OK) { future.complete(null); } else { future.completeExceptionally(response.error().createException()); } } else { future.completeExceptionally(error); } }); return future; }
python
def out_of_date(original, derived): """ Returns True if derivative is out-of-date wrt original, both of which are full file paths. """ return (not os.path.exists(derived) or os.stat(derived).st_mtime < os.stat(original).st_mtime)
python
def parse_address(text: str) -> Tuple[str, int]: '''Parse PASV address.''' match = re.search( r'\(' r'(\d{1,3})\s*,' r'\s*(\d{1,3})\s*,' r'\s*(\d{1,3})\s*,' r'\s*(\d{1,3})\s*,' r'\s*(\d{1,3})\s*,' r'\s*(\d{1,3})\s*' r'\)', text) if match: return ( '{0}.{1}.{2}.{3}'.format(int(match.group(1)), int(match.group(2)), int(match.group(3)), int(match.group(4)) ), int(match.group(5)) << 8 | int(match.group(6)) ) else: raise ValueError('No address found')
python
def _create_union_types_specification(schema_graph, graphql_types, hidden_classes, base_name): """Return a function that gives the types in the union type rooted at base_name.""" # When edges point to vertices of type base_name, and base_name is both non-abstract and # has subclasses, we need to represent the edge endpoint type with a union type based on # base_name and its subclasses. This function calculates what types that union should include. def types_spec(): """Return a list of GraphQL types that this class' corresponding union type includes.""" return [ graphql_types[x] for x in sorted(list(schema_graph.get_subclass_set(base_name))) if x not in hidden_classes ] return types_spec
java
public OvhReceiver serviceName_users_login_receivers_POST(String serviceName, String login, Boolean autoUpdate, String csvUrl, String description, String documentId, Long slotId) throws IOException { String qPath = "/sms/{serviceName}/users/{login}/receivers"; StringBuilder sb = path(qPath, serviceName, login); HashMap<String, Object>o = new HashMap<String, Object>(); addBody(o, "autoUpdate", autoUpdate); addBody(o, "csvUrl", csvUrl); addBody(o, "description", description); addBody(o, "documentId", documentId); addBody(o, "slotId", slotId); String resp = exec(qPath, "POST", sb.toString(), o); return convertTo(resp, OvhReceiver.class); }
java
public static Hashtable<String, String[]> parseQueryString(String s) { String valArray[] = null; if (s == null) { throw new IllegalArgumentException(); } Hashtable<String, String[]> ht = new Hashtable<String, String[]>(); StringBuilder sb = new StringBuilder(); StringTokenizer st = new StringTokenizer(s, "&"); while (st.hasMoreTokens()) { String pair = st.nextToken(); int pos = pair.indexOf('='); if (pos == -1) { // XXX // should give more detail about the illegal argument throw new IllegalArgumentException(); } String key = parseName(pair.substring(0, pos), sb); String val = parseName(pair.substring(pos+1, pair.length()), sb); if (ht.containsKey(key)) { String oldVals[] = ht.get(key); valArray = new String[oldVals.length + 1]; for (int i = 0; i < oldVals.length; i++) { valArray[i] = oldVals[i]; } valArray[oldVals.length] = val; } else { valArray = new String[1]; valArray[0] = val; } ht.put(key, valArray); } return ht; }
java
private ArrayList<IIOMetadata> readExif( IIOMetadataNode app1EXIFNode ) { // Set up input skipping EXIF ID 6-byte sequence. byte[] app1Params = (byte[]) app1EXIFNode.getUserObject(); MemoryCacheImageInputStream app1EXIFInput = new MemoryCacheImageInputStream(new ByteArrayInputStream(app1Params, 6, app1Params.length - 6)); // only the tiff reader knows how to interpret the exif metadata ImageReader tiffReader = null; Iterator<ImageReader> readers = ImageIO.getImageReadersByFormatName("tiff"); while( readers.hasNext() ) { tiffReader = (ImageReader) readers.next(); if (tiffReader.getClass().getName().startsWith("com.sun.media")) { // Break on finding the core provider. break; } } if (tiffReader == null) { throw new RuntimeException("Cannot find core TIFF reader!"); } ArrayList<IIOMetadata> out = new ArrayList<IIOMetadata>(1); tiffReader.setInput(app1EXIFInput); IIOMetadata tiffMetadata = null; try { tiffMetadata = tiffReader.getImageMetadata(0); // IIOMetadata meta = tiffReader.getImageMetadata(0); TIFFImageReadParam rParam = (TIFFImageReadParam) tiffReader.getDefaultReadParam(); rParam.setTIFFDecompressor(null); } catch (IOException e) { e.printStackTrace(); }; tiffReader.dispose(); out.add(0, tiffMetadata); return out; }
python
def runExperiment6(dirName): """ This runs the experiment the section "Simulations with Combined Sequences", an example stream containing a mixture of temporal and sensorimotor sequences. """ # Results are put into a pkl file which can be used to generate the plots. # dirName is the absolute path where the pkl file will be placed. resultsFilename = os.path.join(dirName, "combined_results.pkl") results = runExperiment( { "numSequences": 50, "seqLength": 10, "numObjects": 50, "numFeatures": 500, "trialNum": 8, "numLocations": 100, "settlingTime": 1, "figure": "6", "numRepetitions": 30, "basalPredictedSegmentDecrement": 0.001, "stripStats": False, } ) # Pickle results for plotting and possible later debugging with open(resultsFilename, "wb") as f: cPickle.dump(results, f)
java
private void init(Symtab syms, boolean definitive) { if (classes != null) return; if (definitive) { Assert.check(packages == null || packages == syms.packages); packages = syms.packages; Assert.check(classes == null || classes == syms.classes); classes = syms.classes; } else { packages = new HashMap<Name, PackageSymbol>(); classes = new HashMap<Name, ClassSymbol>(); } packages.put(names.empty, syms.rootPackage); syms.rootPackage.completer = thisCompleter; syms.unnamedPackage.completer = thisCompleter; }
java
@Override public long position() throws IOException { if (inflater == null && deflater == null) { return 0; } if (inflater != null) { return inflater.getBytesWritten(); } else { return deflater.getBytesRead(); } }
java
@Override public List<List<LogTrace<LogEntry>>> parse(InputStream inputStream, ParsingMode parsingMode) throws ParameterException, ParserException { try { inputStream.available(); } catch (IOException e) { throw new ParameterException("Unable to read input file: " + e.getMessage()); } Collection<XLog> logs = null; XParser parser = ParserFileFormat.XES.getParser(); try { logs = parser.parse(inputStream); } catch (Exception e) { throw new ParserException("Exception while parsing with OpenXES: " + e.getMessage()); } if (logs == null) throw new ParserException("No suitable parser could have been found!"); parsedLogFiles = new ArrayList<>(logs.size()); Set<List<String>> activitySequencesSet = new HashSet<>(); Set<LogTrace<LogEntry>> traceSet = new HashSet<>(); for (XLog log : logs) { activitySequencesSet.clear(); traceSet.clear(); Class<?> logEntryClass = null; List<LogTrace<LogEntry>> logTraces = new ArrayList<>(); if (containsDataUsageExtension(log)) { logEntryClass = DULogEntry.class; } else { logEntryClass = LogEntry.class; } for (XTrace trace : log) { Integer traceID = null; // Extract trace ID for (Map.Entry<String, XAttribute> attribute : trace.getAttributes().entrySet()) { String key = attribute.getKey(); String value = attribute.getValue().toString(); if (key.equals("concept:name")) { try { traceID = Integer.parseInt(value); } catch (NumberFormatException e) { // if NAN, take the hash traceID = value.hashCode(); } if (traceID < 0) { traceID *= Integer.signum(traceID); } } } if (traceID == null) throw new ParserException("Cannot extract case-id"); // Build new log trace LogTrace<LogEntry> logTrace = new LogTrace<>(traceID); // Check for similar instances Collection<Long> similarInstances = getSimilarInstances(trace); if (similarInstances != null) { logTrace.setSimilarInstances(similarInstances); } for (XEvent event : trace) { // Add events to log trace logTrace.addEntry(buildLogEntry(event, logEntryClass)); } switch(parsingMode){ case DISTINCT_ACTIVITY_SEQUENCES: if(!activitySequencesSet.add(logTrace.getActivities())) break; logTrace.reduceToActivities(); // case DISTINCT_TRACES: // if(!traceSet.add(logTrace)) // break; case COMPLETE: logTraces.add(logTrace); } } parsedLogFiles.add(logTraces); summaries.add(new LogSummary<>(logTraces)); } return parsedLogFiles; }
java
private List<InetAddress> findAddresses() throws HarvestException { // Stores found addresses List<InetAddress> found = new ArrayList<InetAddress>(3); // Retrieve list of available network interfaces Enumeration<NetworkInterface> interfaces = getNetworkInterfaces(); while (interfaces.hasMoreElements()) { NetworkInterface iface = interfaces.nextElement(); // Evaluate network interface if (!useNetworkInterface(iface)) { continue; } // Retrieve list of available addresses from the network interface Enumeration<InetAddress> addresses = iface.getInetAddresses(); while (addresses.hasMoreElements()) { InetAddress address = addresses.nextElement(); // loopback addresses are discarded if (address.isLoopbackAddress()) { continue; } // Ignore IPv6 addresses for now if (address instanceof Inet4Address) { found.add(address); } } } return found; }
python
def AddKeyByPath(self, key_path, registry_key): """Adds a Windows Registry key for a specific key path. Args: key_path (str): Windows Registry key path to add the key. registry_key (WinRegistryKey): Windows Registry key. Raises: KeyError: if the subkey already exists. ValueError: if the Windows Registry key cannot be added. """ if not key_path.startswith(definitions.KEY_PATH_SEPARATOR): raise ValueError('Key path does not start with: {0:s}'.format( definitions.KEY_PATH_SEPARATOR)) if not self._root_key: self._root_key = FakeWinRegistryKey(self._key_path_prefix) path_segments = key_paths.SplitKeyPath(key_path) parent_key = self._root_key for path_segment in path_segments: try: subkey = FakeWinRegistryKey(path_segment) parent_key.AddSubkey(subkey) except KeyError: subkey = parent_key.GetSubkeyByName(path_segment) parent_key = subkey parent_key.AddSubkey(registry_key)
java
static boolean load(String path) { if (loadDat(path)) return true; // 从文本中载入并且尝试生成dat StringDictionary dictionary = new StringDictionary("="); if (!dictionary.load(path)) return false; TreeMap<String, Pinyin[]> map = new TreeMap<String, Pinyin[]>(); for (Map.Entry<String, String> entry : dictionary.entrySet()) { String[] args = entry.getValue().split(","); Pinyin[] pinyinValue = new Pinyin[args.length]; for (int i = 0; i < pinyinValue.length; ++i) { try { Pinyin pinyin = Pinyin.valueOf(args[i]); pinyinValue[i] = pinyin; } catch (IllegalArgumentException e) { logger.severe("读取拼音词典" + path + "失败,问题出在【" + entry + "】,异常是" + e); return false; } } map.put(entry.getKey(), pinyinValue); } trie.build(map); logger.info("正在缓存双数组" + path); saveDat(path, trie, map.entrySet()); return true; }
java
public static FieldMarshaller getFieldMarshaller (Field field) { if (_marshallers == null) { // multiple threads may attempt to create the stock marshallers, but they'll just do // extra work and _marshallers will only ever contain a fully populated table _marshallers = createMarshallers(); } // if necessary (we're running in a sandbox), look for custom field accessors if (useFieldAccessors()) { Method reader = null, writer = null; try { reader = field.getDeclaringClass().getMethod( getReaderMethodName(field.getName()), READER_ARGS); } catch (NoSuchMethodException nsme) { // no problem } try { writer = field.getDeclaringClass().getMethod( getWriterMethodName(field.getName()), WRITER_ARGS); } catch (NoSuchMethodException nsme) { // no problem } if (reader != null && writer != null) { return new MethodFieldMarshaller(reader, writer); } if ((reader == null && writer != null) || (writer == null && reader != null)) { log.warning("Class contains one but not both custom field reader and writer", "class", field.getDeclaringClass().getName(), "field", field.getName(), "reader", reader, "writer", writer); // fall through to using reflection on the fields... } } Class<?> ftype = field.getType(); // use the intern marshaller for pooled strings if (ftype == String.class && field.isAnnotationPresent(Intern.class)) { return _internMarshaller; } // if we have an exact match, use that FieldMarshaller fm = _marshallers.get(ftype); if (fm == null) { Class<?> collClass = Streamer.getCollectionClass(ftype); if (collClass != null && !collClass.equals(ftype)) { log.warning("Specific field types are discouraged " + "for Iterables/Collections and Maps. The implementation type may not be " + "recreated on the other side.", "class", field.getDeclaringClass(), "field", field.getName(), "type", ftype, "shouldBe", collClass); fm = _marshallers.get(collClass); } // otherwise if the class is a pure interface or streamable, // use the streamable marshaller if (fm == null && (ftype.isInterface() || Streamer.isStreamable(ftype))) { fm = _marshallers.get(Streamable.class); } } return fm; }
java
private void setNewObjectMapper() { mapper = new ObjectMapper(); if (!serializeNullValues) { mapper.setSerializationInclusion(JsonInclude.Include.NON_NULL); if (ignoreUnknownProperties) { mapper.configure(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES, false); } } }
java
@Override public void run() { getReport().println( Messages.get().container(Messages.RPT_REBUILD_SEARCH_INDEXES_BEGIN_0), I_CmsReport.FORMAT_HEADLINE); try { Map<String, Object> params = new HashMap<String, Object>(); params.put(I_CmsEventListener.KEY_REPORT, getReport()); if (m_indexNames != null) { params.put(I_CmsEventListener.KEY_INDEX_NAMES, CmsStringUtil.collectionAsString(m_indexNames, ",")); } OpenCms.fireCmsEvent(I_CmsEventListener.EVENT_REBUILD_SEARCHINDEXES, params); getReport().println( Messages.get().container(Messages.RPT_REBUILD_SEARCH_INDEXES_END_0), I_CmsReport.FORMAT_HEADLINE); } catch (Throwable exc) { getReport().println( org.opencms.search.Messages.get().container(org.opencms.search.Messages.RPT_SEARCH_INDEXING_FAILED_0), I_CmsReport.FORMAT_WARNING); getReport().println(exc); m_error = exc; } }
python
def add_view_no_menu(self, baseview, endpoint=None, static_folder=None): """ Add your views without creating a menu. :param baseview: A BaseView type class instantiated. """ baseview = self._check_and_init(baseview) log.info(LOGMSG_INF_FAB_ADD_VIEW.format(baseview.__class__.__name__, "")) if not self._view_exists(baseview): baseview.appbuilder = self self.baseviews.append(baseview) self._process_inner_views() if self.app: self.register_blueprint( baseview, endpoint=endpoint, static_folder=static_folder ) self._add_permission(baseview) else: log.warning(LOGMSG_WAR_FAB_VIEW_EXISTS.format(baseview.__class__.__name__)) return baseview
java
@FFDCIgnore(JAXBException.class) public static Object parseRaDeploymentDescriptor(com.ibm.wsspi.adaptable.module.Entry ddEntry) throws JAXBException, SAXException, ParserConfigurationException, UnableToAdaptException { Object connector = null; try { connector = parseResourceAdapterXml(ddEntry.adapt(InputStream.class), ddEntry.getName(), false); } catch (JAXBException jax) { if (isVersion10ResourceAdapter(ddEntry.adapt(InputStream.class))) { if (ra10Context == null) { ra10Context = JAXBContext.newInstance(Ra10Connector.class); } connector = parseResourceAdapterXml(ddEntry.adapt(InputStream.class), ddEntry.getName(), true); } else { throw jax; } } return connector; }
java
public void becomeCloneOf(ManagedObject clone) { if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) SibTr.entry(tc, "becomeCloneOf", "Clone="+clone); PersistableRawData object = (PersistableRawData)clone; _data = object._data; if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) SibTr.exit(tc, "becomeCloneOf"); }
java
public static EntityType getAndCheckEntityType(EntityDataModel entityDataModel, String typeName) { return checkIsEntityType(getAndCheckType(entityDataModel, typeName)); }
python
def _missing_imageinfo(self): """ returns list of image filenames that are missing info """ if 'image' not in self.data: return missing = [] for img in self.data['image']: if 'url' not in img: missing.append(img['file']) return list(set(missing))
java
public IWord findFirstWordByLabel(String label) { for (IWord word : this) { if (label.equals(word.getLabel())) { return word; } } return null; }
java
public ArrayList<PayloadType.Audio> getAudioPayloadTypesList() { ArrayList<PayloadType.Audio> result = new ArrayList<>(); Iterator<JinglePayloadType> jinglePtsIter = getJinglePayloadTypes(); while (jinglePtsIter.hasNext()) { JinglePayloadType jpt = jinglePtsIter.next(); if (jpt instanceof JinglePayloadType.Audio) { JinglePayloadType.Audio jpta = (JinglePayloadType.Audio) jpt; result.add((PayloadType.Audio) jpta.getPayloadType()); } } return result; }
java
public static MySQLBinaryProtocolValue getBinaryProtocolValue(final MySQLColumnType columnType) { Preconditions.checkArgument(BINARY_PROTOCOL_VALUES.containsKey(columnType), "Cannot find MySQL type '%s' in column type when process binary protocol value", columnType); return BINARY_PROTOCOL_VALUES.get(columnType); }
python
def build_stack_docs(root_project_dir, skippedNames=None): """Build stack Sphinx documentation (main entrypoint). Parameters ---------- root_project_dir : `str` Path to the root directory of the main documentation project. This is the directory containing the ``conf.py`` file. skippedNames : `list`, optional Optional list of packages to skip while creating symlinks. """ logger = logging.getLogger(__name__) # Create the directory where module content is symlinked # NOTE: this path is hard-wired in for pipelines.lsst.io, but could be # refactored as a configuration. root_modules_dir = os.path.join(root_project_dir, 'modules') if os.path.isdir(root_modules_dir): logger.info('Deleting any existing modules/ symlinks') remove_existing_links(root_modules_dir) else: logger.info('Creating modules/ dir at {0}'.format(root_modules_dir)) os.makedirs(root_modules_dir) # Create directory for package content root_packages_dir = os.path.join(root_project_dir, 'packages') if os.path.isdir(root_packages_dir): # Clear out existing module links logger.info('Deleting any existing packages/ symlinks') remove_existing_links(root_packages_dir) else: logger.info('Creating packages/ dir at {0}'.format(root_packages_dir)) os.makedirs(root_packages_dir) # Ensure _static directory exists (but do not delete any existing # directory contents) root_static_dir = os.path.join(root_project_dir, '_static') if os.path.isdir(root_static_dir): # Clear out existing directory links logger.info('Deleting any existing _static/ symlinks') remove_existing_links(root_static_dir) else: logger.info('Creating _static/ at {0}'.format(root_static_dir)) os.makedirs(root_static_dir) # Find package setup by EUPS packages = discover_setup_packages() # Get packages explicitly required in the table file to filter out # implicit dependencies later. table_path = find_table_file(root_project_dir) with open(table_path) as fp: table_data = fp.read() listed_packages = list_packages_in_eups_table(table_data) # Link to documentation directories of packages from the root project for package_name, package_info in packages.items(): if package_name not in listed_packages: logger.debug( 'Filtering %s from build since it is not explictly ' 'required by the %s table file.', package_name, table_path) continue try: package_docs = find_package_docs( package_info['dir'], skippedNames=skippedNames) except NoPackageDocs as e: logger.debug( 'Skipping {0} doc linking. {1}'.format(package_name, str(e))) continue link_directories(root_modules_dir, package_docs.module_dirs) link_directories(root_packages_dir, package_docs.package_dirs) link_directories(root_static_dir, package_docs.static_dirs) # Trigger the Sphinx build return_code = run_sphinx(root_project_dir) return return_code
java
@SuppressWarnings("unchecked") protected Map<String, Object> handleGetSettingsData(Set<String> settingSet) { if(settingSet == null || settingSet.size() == 0){ throw new NotEnoughFieldsSpecifiedException("At least one settings field needs to passed in as an argument."); } Map<String, String> uriVariables = restUriVariablesFactory.getUriVariablesForSettings(settingSet); String url = restUrlFactory.assembleUrlForSettings(); Map<String, Object> response = this.performGetRequest(url, Map.class, uriVariables); return response; }
java
public static String getStatusText(int nHttpStatusCode) { Integer intKey = new Integer(nHttpStatusCode); if (!mapStatusCodes.containsKey(intKey)) { return ""; } else { return mapStatusCodes.get(intKey); } }
java
public static String readInputStreamBufferedAsString(final InputStream in, final String charset) throws IOException { BufferedReader reader = null; try { reader = new BufferedReader(new UnicodeReader(in, charset)); StringBuilder result = new StringBuilder(); char[] cbuf = new char[2048]; int read; while ((read = reader.read(cbuf)) > 0) result.append(cbuf, 0, read); return result.toString(); } finally { closeIgnoringException(reader); } }
java
public static String encode(Object o) { Type type = typeOf(o); return type.encode(o); }
python
def randomWalkFunction(requestContext, name, step=60): """ Short Alias: randomWalk() Returns a random walk starting at 0. This is great for testing when there is no real data in whisper. Example:: &target=randomWalk("The.time.series") This would create a series named "The.time.series" that contains points where x(t) == x(t-1)+random()-0.5, and x(0) == 0. Accepts an optional second argument as step parameter (default step is 60 sec). """ delta = timedelta(seconds=step) when = requestContext["startTime"] values = [] current = 0 while when < requestContext["endTime"]: values.append(current) current += random.random() - 0.5 when += delta return [TimeSeries( name, int(epoch(requestContext["startTime"])), int(epoch(requestContext["endTime"])), step, values)]
java
WComponent getChildAt(final int index) { ComponentModel model = getComponentModel(); return model.getChildren().get(index); }
python
def queueMessage(self, sender, target, value, consequence=None): """ Queue a persistent outgoing message. @param sender: The a description of the shared item that is the sender of the message. @type sender: L{xmantissa.sharing.Identifier} @param target: The a description of the shared item that is the target of the message. @type target: L{xmantissa.sharing.Identifier} @param consequence: an item stored in the same database as this L{MessageQueue} implementing L{IDeliveryConsequence}. """ self.messageCounter += 1 _QueuedMessage.create(store=self.store, sender=sender, target=target, value=value, messageID=self.messageCounter, consequence=consequence) self._scheduleMePlease()
java
protected void destroyConnection(ThriftConnectionHandle<T> conn) { postDestroyConnection(conn); try { conn.internalClose(); } catch (ThriftConnectionPoolException e) { logger.error("Error in attempting to close connection", e); } }
java
@Override public void extinguish() { logger.info(">>> {} shutting down ...", NAME); try { if (server != null) { server.stop(); } } catch (Exception e) { logger.error("stop failed", e); System.exit(100); // NOSONAR } logger.info("done"); }
java
public Vector4d set(Vector4fc v) { return set(v.x(), v.y(), v.z(), v.w()); }
python
def _loopreport(self): ''' Loop over the report progress ''' while 1: eventlet.sleep(0.2) ac2popenlist = {} for action in self.session._actions: for popen in action._popenlist: if popen.poll() is None: lst = ac2popenlist.setdefault(action.activity, []) lst.append(popen) if not action._popenlist and action in self._actionmayfinish: super(RetoxReporter, self).logaction_finish(action) self._actionmayfinish.remove(action) self.screen.draw_next_frame(repeat=False)
java
public double det() { int m = lu.nrows(); int n = lu.ncols(); if (m != n) throw new IllegalArgumentException(String.format("Matrix is not square: %d x %d", m, n)); double d = (double) pivsign; for (int j = 0; j < n; j++) { d *= lu.get(j, j); } return d; }
python
def is_callable_tag(tag): """ Determine whether :tag: is a valid callable string tag. String is assumed to be valid callable if it starts with '{{' and ends with '}}'. :param tag: String name of tag. """ return (isinstance(tag, six.string_types) and tag.strip().startswith('{{') and tag.strip().endswith('}}'))
java
static void uninitialize() { // get the appcontext that we've stored data in AppContext ctx = AppContext.getAppContext(); // get the pcl stored in app context PropertyChangeListener pcl = (PropertyChangeListener) ctx.get("SeaGlassStyle.defaults.pcl"); // if the pcl exists, uninstall it from the UIDefaults tables if (pcl != null) { UIManager.getDefaults().removePropertyChangeListener(pcl); UIManager.getLookAndFeelDefaults().removePropertyChangeListener(pcl); } // clear out the compiled defaults ctx.put("SeaGlassStyle.defaults", null); }
java
private static void fillValue2Node(List<MetricInfo> componentMetrics, Map<String, TopologyNode> nodes) { String NODE_DIM = MetricDef.EMMITTED_NUM; List<String> FILTER = Arrays.asList(MetricDef.EMMITTED_NUM, MetricDef.SEND_TPS, MetricDef.RECV_TPS); for (MetricInfo info : componentMetrics) { if (info == null) continue; for (Map.Entry<String, Map<Integer, MetricSnapshot>> metric : info.get_metrics().entrySet()) { String name = metric.getKey(); String[] split_name = name.split("@"); String metricName = UIMetricUtils.extractMetricName(split_name); String compName = UIMetricUtils.extractComponentName(split_name); TopologyNode node = nodes.get(compName); if (node != null && FILTER.contains(metricName)) { for (Map.Entry<Integer, MetricSnapshot> winData : metric.getValue().entrySet()) { node.putMapValue(metricName, winData.getKey(), UIMetricUtils.getMetricValue(winData.getValue())); } } if (metricName == null || !metricName.equals(NODE_DIM)) { continue; } //get 60 window metric MetricSnapshot snapshot = metric.getValue().get(AsmWindow.M1_WINDOW); if (node != null) { node.setValue(snapshot.get_longValue()); nodes.get(compName).setTitle("Emitted: " + UIMetricUtils.getMetricValue(snapshot)); } } } }