language
stringclasses 2
values | func_code_string
stringlengths 63
466k
|
---|---|
java | public String[] sequencesToTexts(Integer[][] sequences) {
Integer oovTokenIndex = wordIndex.get(outOfVocabularyToken);
ArrayList<String> texts = new ArrayList<>();
for (Integer[] sequence: sequences) {
ArrayList<String> wordVector = new ArrayList<>();
for (Integer index: sequence) {
if (indexWord.containsKey(index)) {
String word = indexWord.get(index);
if (numWords != null && index >= numWords) {
if (oovTokenIndex != null) {
wordVector.add(indexWord.get(oovTokenIndex));
} else {
wordVector.add(word);
}
}
} else if (oovTokenIndex != null) {
wordVector.add(indexWord.get(oovTokenIndex));
}
}
StringBuilder builder = new StringBuilder();
for (String word: wordVector) {
builder.append(word + split);
}
String text = builder.toString();
texts.add(text);
}
return texts.toArray(new String[texts.size()]);
} |
python | def encipher(self,string,keep_punct=False):
"""Encipher string using Atbash cipher.
Example::
ciphertext = Atbash().encipher(plaintext)
:param string: The string to encipher.
:param keep_punct: if true, punctuation and spacing are retained. If false, it is all removed. Default is False.
:returns: The enciphered string.
"""
if not keep_punct: string = self.remove_punctuation(string)
ret = ''
for c in string.upper():
if c.isalpha(): ret += self.key[self.a2i(c)]
else: ret += c
return ret |
java | @Override
public GetClassifierResult getClassifier(GetClassifierRequest request) {
request = beforeClientExecution(request);
return executeGetClassifier(request);
} |
python | def stop(self):
'''
Stop the fuzzing session
'''
self.logger.info('Stopping client fuzzer')
self._target_control_thread.stop()
self.target.signal_mutated()
super(ClientFuzzer, self).stop() |
java | public void setArduinoEnabled(boolean enable) {
Buffer buffer = new Buffer();
buffer.writeByte(enable ? 1 : 0);
sendMessage(BeanMessageID.CC_POWER_ARDUINO, buffer);
} |
java | protected boolean isSymbolicLink(File file) throws IOException {
if (IS_WINDOWS) {
return false;
}
File canonical = file;
if (file.getParent() != null) {
canonical = new File(file.getParentFile().getCanonicalFile(), file.getName());
}
return !canonical.getCanonicalFile().equals(canonical.getAbsoluteFile());
} |
java | private CellFormatResult getErrorCellValue(final Cell cell, final Locale locale, final boolean isStartDate1904) {
final CellFormatResult result = new CellFormatResult();
result.setCellType(FormatCellType.Error);
final ErrorCell errorCell = (ErrorCell) cell;
final int errorCode = errorCell.getErrorCode();
result.setValue(errorCode);
if(isErrorCellAsEmpty()) {
result.setText("");
} else {
/*
* エラーコードについては、POIクラスを参照。
* ・org.apache.poi.ss.usermodel.FormulaError
* ・org.apache.poi.ss.usermodel.ErrorConstants
*/
switch(errorCode) {
case 7:
// 0除算
result.setText("#DIV/0!");
break;
case 42:
// 関数や数式に使用できる値がない
result.setText("#N/A");
break;
case 29:
// 数式が参照している名称がない
result.setText("#NAME?");
break;
case 0:
// 正しくない参照演算子または正しくないセル参照を使っている
result.setText("#NULL!");
break;
case 36:
// 数式または関数の数値が不適切
result.setText("#NUM!");
break;
case 23:
// 数式が参照しているセルがない
result.setText("#REF!");
break;
case 15:
// 文字列が正しいデータ型に変換されない
result.setText("#VALUE!");
break;
default:
result.setText("");
break;
}
}
return result;
} |
python | def deserialize_dtype(d):
"""
Deserializes a JSONified :obj:`numpy.dtype`.
Args:
d (:obj:`dict`): A dictionary representation of a :obj:`dtype` object.
Returns:
A :obj:`dtype` object.
"""
if isinstance(d['descr'], six.string_types):
return np.dtype(d['descr'])
descr = []
for col in d['descr']:
col_descr = []
for c in col:
if isinstance(c, six.string_types):
col_descr.append(str(c))
elif type(c) is list:
col_descr.append(tuple(c))
else:
col_descr.append(c)
descr.append(tuple(col_descr))
return np.dtype(descr) |
python | def plainica(x, reducedim=0.99, backend=None, random_state=None):
""" Source decomposition with ICA.
Apply ICA to the data x, with optional PCA dimensionality reduction.
Parameters
----------
x : array, shape (n_trials, n_channels, n_samples) or (n_channels, n_samples)
data set
reducedim : {int, float, 'no_pca'}, optional
A number of less than 1 in interpreted as the fraction of variance that should remain in the data. All
components that describe in total less than `1-reducedim` of the variance are removed by the PCA step.
An integer numer of 1 or greater is interpreted as the number of components to keep after applying the PCA.
If set to 'no_pca' the PCA step is skipped.
backend : dict-like, optional
Specify backend to use. When set to None the backend configured in config.backend is used.
Returns
-------
result : ResultICA
Source decomposition
"""
x = atleast_3d(x)
t, m, l = np.shape(x)
if backend is None:
backend = scotbackend
# pre-transform the data with PCA
if reducedim == 'no pca':
c = np.eye(m)
d = np.eye(m)
xpca = x
else:
c, d, xpca = backend['pca'](x, reducedim)
# run on residuals ICA to estimate volume conduction
mx, ux = backend['ica'](cat_trials(xpca), random_state=random_state)
# correct (un)mixing matrix estimatees
mx = mx.dot(d)
ux = c.dot(ux)
class Result:
unmixing = ux
mixing = mx
return Result |
java | public ClassFile getClassFile(String name) throws IOException {
if (name.indexOf('.') > 0) {
int i = name.lastIndexOf('.');
String pathname = name.replace('.', File.separatorChar) + ".class";
if (baseFileName.equals(pathname) ||
baseFileName.equals(pathname.substring(0, i) + "$" +
pathname.substring(i+1, pathname.length()))) {
return readClassFile(path);
}
} else {
if (baseFileName.equals(name.replace('/', File.separatorChar) + ".class")) {
return readClassFile(path);
}
}
return null;
} |
java | protected void modified(String id, Map<String, Object> properties) {
if (TraceComponent.isAnyTracingEnabled() && tc.isEventEnabled()) {
Tr.event(this, tc, "modified " + id, properties);
}
config = properties;
myProps = null;
} |
python | def hexbin(x, y, color="purple", **kwargs):
"""Seaborn-compatible hexbin plot.
See also: http://seaborn.pydata.org/tutorial/axis_grids.html#mapping-custom-functions-onto-the-grid
"""
if HAS_SEABORN:
cmap = sns.light_palette(color, as_cmap=True)
else:
cmap = "Purples"
plt.hexbin(x, y, cmap=cmap, **kwargs) |
java | public DataLakeAnalyticsAccountInner update(String resourceGroupName, String name, DataLakeAnalyticsAccountInner parameters) {
return updateWithServiceResponseAsync(resourceGroupName, name, parameters).toBlocking().last().body();
} |
java | public synchronized I_CmsLruCacheObject remove(I_CmsLruCacheObject theCacheObject) {
if (!isCached(theCacheObject)) {
// theCacheObject is null or not inside the cache
return null;
}
// set the list pointers correct
boolean nextNull = (theCacheObject.getNextLruObject() == null);
boolean prevNull = (theCacheObject.getPreviousLruObject() == null);
if (prevNull && nextNull) {
m_listHead = null;
m_listTail = null;
} else if (nextNull) {
// remove the object from the head pos.
I_CmsLruCacheObject newHead = theCacheObject.getPreviousLruObject();
newHead.setNextLruObject(null);
m_listHead = newHead;
} else if (prevNull) {
// remove the object from the tail pos.
I_CmsLruCacheObject newTail = theCacheObject.getNextLruObject();
newTail.setPreviousLruObject(null);
m_listTail = newTail;
} else {
// remove the object from within the list
theCacheObject.getPreviousLruObject().setNextLruObject(theCacheObject.getNextLruObject());
theCacheObject.getNextLruObject().setPreviousLruObject(theCacheObject.getPreviousLruObject());
}
// update cache stats. and notify the cached object
decreaseCache(theCacheObject);
return theCacheObject;
} |
java | private void mergeEngineeringObjectWithReferencedModel(Field field, EngineeringObjectModelWrapper model) {
AdvancedModelWrapper result = performMerge(loadReferencedModel(model, field), model);
if (result != null) {
model = result.toEngineeringObject();
}
} |
python | def from_numpy_arrays(freq_data, noise_data, length, delta_f, low_freq_cutoff):
"""Interpolate n PSD (as two 1-dimensional arrays of frequency and data)
to the desired length, delta_f and low frequency cutoff.
Parameters
----------
freq_data : array
Array of frequencies.
noise_data : array
PSD values corresponding to frequencies in freq_arr.
length : int
Length of the frequency series in samples.
delta_f : float
Frequency resolution of the frequency series in Herz.
low_freq_cutoff : float
Frequencies below this value are set to zero.
Returns
-------
psd : FrequencySeries
The generated frequency series.
"""
# Only include points above the low frequency cutoff
if freq_data[0] > low_freq_cutoff:
raise ValueError('Lowest frequency in input data '
' is higher than requested low-frequency cutoff ' + str(low_freq_cutoff))
kmin = int(low_freq_cutoff / delta_f)
flow = kmin * delta_f
data_start = (0 if freq_data[0]==low_freq_cutoff else numpy.searchsorted(freq_data, flow) - 1)
# If the cutoff is exactly in the file, start there
if freq_data[data_start+1] == low_freq_cutoff:
data_start += 1
freq_data = freq_data[data_start:]
noise_data = noise_data[data_start:]
flog = numpy.log(freq_data)
slog = numpy.log(noise_data)
psd_interp = scipy.interpolate.interp1d(flog, slog)
kmin = int(low_freq_cutoff / delta_f)
psd = numpy.zeros(length, dtype=numpy.float64)
vals = numpy.log(numpy.arange(kmin, length) * delta_f)
psd[kmin:] = numpy.exp(psd_interp(vals))
return FrequencySeries(psd, delta_f=delta_f) |
python | def fast_inhomo_bloch_equations(Ep, epsilonp, detuning_knob, T, gamma,
omega_level, rm, xi, theta,
unfolding, inhomogeneity, matrix_form=False,
file_name=None, return_code=False):
r"""Return a fast function that returns the numeric right-hand sides of \
inhomogeneous Bloch equations.
We test a basic two-level system.
>>> import numpy as np
>>> from scipy.constants import physical_constants
>>> from sympy import Matrix, symbols
>>> from fast.electric_field import electric_field_amplitude_top
>>> from fast.electric_field import PlaneWave
>>> from fast.symbolic import (define_laser_variables,
... polarization_vector)
>>> from fast.atomic_structure import Atom
>>> from fast.bloch import phase_transformation
>>> Ne = 2
>>> Nl = 1
>>> a0 = physical_constants["Bohr radius"][0]
>>> rm = [np.array([[0, 0], [a0, 0]]),
... np.array([[0, 0], [0, 0]]),
... np.array([[0, 0], [0, 0]])]
>>> xi = np.array([[[0, 1], [1, 0]]])
>>> omega_level = [0, 2.4e15]
>>> gamma21 = 2*np.pi*6e6
>>> gamma = np.array([[0, -gamma21], [gamma21, 0]])
>>> theta = phase_transformation(Ne, Nl, rm, xi)
We define symbolic variables to be used as token arguments.
>>> Ep, omega_laser = define_laser_variables(Nl)
>>> laser = PlaneWave(0, 0, 0, 0)
>>> epsilonp = [laser.epsilonp]
>>> k = [laser.k]
>>> detuning_knob = [symbols("delta1", real=True)]
A map to unfold the density matrix.
>>> unfolding = Unfolding(Ne, True, True, True)
We define the Doppler broadening.
>>> shape = [9]
>>> stds = [[-4, 4]]
>>> T = 273.15+20
>>> mass = Atom("Rb", 87).mass
>>> aux = (shape, stds, T, mass, detuning_knob, k,
... omega_level, xi, theta, unfolding, ["z", "x", "y"],
... True)
>>> doppler_effect = DopplerBroadening(*aux)
>>> doppler_effect.domain
[array([-669.86784872, -502.40088654, -334.93392436, -167.46696218,
0. , 167.46696218, 334.93392436, 502.40088654,
669.86784872])]
We obtain a function to calculate the Bloch equations.
>>> T_symb = symbols("T", positive=True)
>>> aux = (Ep, epsilonp, detuning_knob, T_symb, gamma,
... omega_level, rm, xi, theta, unfolding, doppler_effect,
... True)
>>> bloch_equations = fast_inhomo_bloch_equations(*aux)
We calculate an example.
>>> detuning_knobs = [0]
>>> Eps = electric_field_amplitude_top(0, 1e-3, 1, "SI")
>>> Eps *= np.exp(1j*np.pi)
>>> Eps = [Eps]
>>> A, b = bloch_equations(Eps, detuning_knobs, T)
>>> print A[:, 2, 1]*1e-6/2/np.pi
[ 853.49268666 640.12094531 426.74705849 213.37341005 0.
-213.37317167 -426.74610495 -640.11879984 -853.49125636]
>>> print b*1e-6
[[ 0. 0. 0.]
[ 0. 0. 0.]
[ 0. 0. 0.]
[ 0. 0. 0.]
[ 0. 0. 0.]
[ 0. 0. 0.]
[ 0. 0. 0.]
[ 0. 0. 0.]
[ 0. 0. 0.]]
"""
if not unfolding.lower_triangular:
mes = "It is very inefficient to solve using all components of the "
mes += "density matrix. Better set lower_triangular=True in Unfolding."
raise NotImplementedError(mes)
if matrix_form and (not unfolding.real) and (unfolding.lower_triangular):
mes = "It is not possible to express the equations in matrix form "
mes += "for complex lower triangular components only."
raise ValueError(mes)
Nl = len(Ep)
Nrho = unfolding.Nrho
# We determine which arguments are constants.
if True:
try:
Ep = np.array([complex(Ep[l]) for l in range(Nl)])
variable_Ep = False
except:
variable_Ep = True
try:
epsilonp = [np.array([complex(epsilonp[l][i]) for i in range(3)])
for l in range(Nl)]
variable_epsilonp = False
except:
variable_epsilonp = True
try:
detuning_knob = np.array([float(detuning_knob[l])
for l in range(Nl)])
variable_detuning_knob = False
except:
variable_detuning_knob = True
try:
T = float(T)
variable_T = False
except:
variable_T = True
# We obtain code for the homogeneous terms.
if True:
if file_name is not None:
file_name_bloch = file_name+"_bloch"
else:
file_name_bloch = file_name
aux = (Ep, epsilonp, detuning_knob, gamma, omega_level, rm, xi, theta,
unfolding, matrix_form, file_name_bloch, True)
bloch_equations = fast_bloch_equations(*aux)
code = bloch_equations+"\n\n"
# We establish the arguments of the output function.
if True:
code += "def inhomo_bloch_equations("
code_args = ""
if not matrix_form: code_args += "rho, "
if variable_Ep: code_args += "Ep, "
if variable_epsilonp: code_args += "epsilonp, "
if variable_detuning_knob: code_args += "detuning_knob, "
code += code_args
if variable_T: code += "T, "
code += "inhomogeneity=inhomogeneity, "
code += "bloch_equations=bloch_equations):\n"
code += ' r"""A fast calculation of inhomogeneous '
code += 'Bloch equations."""\n'
# We initialize the output and auxiliaries.
if True:
# We introduce the factor that multiplies all terms.
sha = str(inhomogeneity.shape)[1:-1]+" "
if matrix_form:
code += " A = np.zeros(("+sha+str(Nrho)+", "+str(Nrho)
if not unfolding.real:
code += "), complex)\n\n"
else:
code += "))\n\n"
if unfolding.normalized:
code += " b = np.zeros(("+sha+str(Nrho)
if not unfolding.real:
code += "), complex)\n\n"
else:
code += "))\n\n"
else:
code += " rhs = np.zeros(("+sha+str(Nrho)
if not unfolding.real:
code += "), complex)\n\n"
else:
code += "))\n\n"
# We calculate the equations for each ensemble.
if True:
if variable_T: code += " inhomogeneity.reset(T)\n"
if code_args[-2:] == ", ": code_args = code_args[:-2]
code += " homogeneous = bloch_equations("+code_args+")\n\n"
code += " terms = inhomogeneity.terms\n"
code += " shape = inhomogeneity.shape\n"
code += " domain = inhomogeneity.domain\n"
shape = inhomogeneity.shape
dimension = len(shape)
if dimension == 1:
code += " for i in range(shape[0]):\n"
code += " result = terms(domain[0][i], detuning_knob)\n"
if matrix_form:
if unfolding.normalized:
code += " A[i] = homogeneous[0]+result[0]\n"
code += " b[i] = homogeneous[1]+result[1]\n"
else:
code += " A[i] = homogeneous+result\n"
else:
code += " rhs[i] = homogeneous+result\n"
# We finish the code.
if True:
# code = rabi_code + "\n\n" + code
if matrix_form:
if unfolding.normalized:
code += " return A, b\n"
else:
code += " return A\n"
else:
code += " return rhs\n"
# We write the code to file if provided, and execute it.
if True:
if file_name is not None:
f = file(file_name+".py", "w")
f.write(code)
f.close()
inhomo_bloch_equations = code
if not return_code:
exec inhomo_bloch_equations
return inhomo_bloch_equations |
java | public static BackendUser from(ValidCredentials credentials){
BackendUser beu = new BackendUser();
beu.initFrom(credentials);
return beu;
} |
java | public static final Date parseExtendedAttributeDate(String value)
{
Date result = null;
if (value != null)
{
try
{
result = DATE_FORMAT.get().parse(value);
}
catch (ParseException ex)
{
// ignore exceptions
}
}
return (result);
} |
java | public Observable<ServiceResponse<ResourceHealthMetadataInner>> getBySiteWithServiceResponseAsync(String resourceGroupName, String name) {
if (resourceGroupName == null) {
throw new IllegalArgumentException("Parameter resourceGroupName is required and cannot be null.");
}
if (name == null) {
throw new IllegalArgumentException("Parameter name is required and cannot be null.");
}
if (this.client.subscriptionId() == null) {
throw new IllegalArgumentException("Parameter this.client.subscriptionId() is required and cannot be null.");
}
if (this.client.apiVersion() == null) {
throw new IllegalArgumentException("Parameter this.client.apiVersion() is required and cannot be null.");
}
return service.getBySite(resourceGroupName, name, this.client.subscriptionId(), this.client.apiVersion(), this.client.acceptLanguage(), this.client.userAgent())
.flatMap(new Func1<Response<ResponseBody>, Observable<ServiceResponse<ResourceHealthMetadataInner>>>() {
@Override
public Observable<ServiceResponse<ResourceHealthMetadataInner>> call(Response<ResponseBody> response) {
try {
ServiceResponse<ResourceHealthMetadataInner> clientResponse = getBySiteDelegate(response);
return Observable.just(clientResponse);
} catch (Throwable t) {
return Observable.error(t);
}
}
});
} |
python | def _dt_array_cmp(cls, op):
"""
Wrap comparison operations to convert datetime-like to datetime64
"""
opname = '__{name}__'.format(name=op.__name__)
nat_result = opname == '__ne__'
def wrapper(self, other):
if isinstance(other, (ABCDataFrame, ABCSeries, ABCIndexClass)):
return NotImplemented
other = lib.item_from_zerodim(other)
if isinstance(other, (datetime, np.datetime64, str)):
if isinstance(other, (datetime, np.datetime64)):
# GH#18435 strings get a pass from tzawareness compat
self._assert_tzawareness_compat(other)
try:
other = _to_M8(other, tz=self.tz)
except ValueError:
# string that cannot be parsed to Timestamp
return ops.invalid_comparison(self, other, op)
result = op(self.asi8, other.view('i8'))
if isna(other):
result.fill(nat_result)
elif lib.is_scalar(other) or np.ndim(other) == 0:
return ops.invalid_comparison(self, other, op)
elif len(other) != len(self):
raise ValueError("Lengths must match")
else:
if isinstance(other, list):
try:
other = type(self)._from_sequence(other)
except ValueError:
other = np.array(other, dtype=np.object_)
elif not isinstance(other, (np.ndarray, ABCIndexClass, ABCSeries,
DatetimeArray)):
# Following Timestamp convention, __eq__ is all-False
# and __ne__ is all True, others raise TypeError.
return ops.invalid_comparison(self, other, op)
if is_object_dtype(other):
# We have to use _comp_method_OBJECT_ARRAY instead of numpy
# comparison otherwise it would fail to raise when
# comparing tz-aware and tz-naive
with np.errstate(all='ignore'):
result = ops._comp_method_OBJECT_ARRAY(op,
self.astype(object),
other)
o_mask = isna(other)
elif not (is_datetime64_dtype(other) or
is_datetime64tz_dtype(other)):
# e.g. is_timedelta64_dtype(other)
return ops.invalid_comparison(self, other, op)
else:
self._assert_tzawareness_compat(other)
if isinstance(other, (ABCIndexClass, ABCSeries)):
other = other.array
if (is_datetime64_dtype(other) and
not is_datetime64_ns_dtype(other) or
not hasattr(other, 'asi8')):
# e.g. other.dtype == 'datetime64[s]'
# or an object-dtype ndarray
other = type(self)._from_sequence(other)
result = op(self.view('i8'), other.view('i8'))
o_mask = other._isnan
result = com.values_from_object(result)
if o_mask.any():
result[o_mask] = nat_result
if self._hasnans:
result[self._isnan] = nat_result
return result
return compat.set_function_name(wrapper, opname, cls) |
java | @Deprecated
public static URL buildUrl(String host, int port, String path, Map<String, String> parameters) throws MalformedURLException {
return buildUrl("http", port, path, parameters);
} |
python | def post_handler_err(self, function_arn, invocation_id, handler_err):
"""
Post the error message from executing the function handler for :code:`function_arn`
with specifid :code:`invocation_id`
:param function_arn: Arn of the Lambda function which has the handler error message.
:type function_arn: string
:param invocation_id: Invocation ID of the work that is being requested
:type invocation_id: string
:param handler_err: the error message caught from handler
:type handler_err: string
"""
url = self._get_work_url(function_arn)
runtime_logger.info('Posting handler error for invocation id [{}] to {}'.format(invocation_id, url))
payload = json.dumps({
"errorMessage": handler_err,
}).encode('utf-8')
request = Request(url, payload)
request.add_header(HEADER_INVOCATION_ID, invocation_id)
request.add_header(HEADER_FUNCTION_ERR_TYPE, "Handled")
request.add_header(HEADER_AUTH_TOKEN, self.auth_token)
urlopen(request)
runtime_logger.info('Posted handler error for invocation id [{}]'.format(invocation_id)) |
python | def _compute_quads(self, element, data, mapping):
"""
Computes the node quad glyph data.x
"""
quad_mapping = {'left': 'x0', 'right': 'x1', 'bottom': 'y0', 'top': 'y1'}
quad_data = dict(data['scatter_1'])
quad_data.update({'x0': [], 'x1': [], 'y0': [], 'y1': []})
for node in element._sankey['nodes']:
quad_data['x0'].append(node['x0'])
quad_data['y0'].append(node['y0'])
quad_data['x1'].append(node['x1'])
quad_data['y1'].append(node['y1'])
data['scatter_1'].update(quad_data)
data['quad_1'] = data['scatter_1']
mapping['quad_1'] = quad_mapping |
java | private int staticEncodingLength(GenericPropertyInfo info) {
TypeDesc type = info.getStorageType();
TypeDesc primType = type.toPrimitiveType();
if (primType == null) {
if (info.isLob()) {
// Lob locator is stored as a long.
return 8;
}
} else {
if (info.isNullable()) {
// Type is a primitive wrapper.
switch (primType.getTypeCode()) {
case TypeDesc.BYTE_CODE:
return ~1;
case TypeDesc.BOOLEAN_CODE:
return 1;
case TypeDesc.SHORT_CODE:
case TypeDesc.CHAR_CODE:
return ~1;
case TypeDesc.INT_CODE:
return ~1;
case TypeDesc.FLOAT_CODE:
return 4;
case TypeDesc.LONG_CODE:
return ~1;
case TypeDesc.DOUBLE_CODE:
return 8;
}
} else {
// Type is primitive or a required primitive wrapper.
switch (type.getTypeCode()) {
case TypeDesc.BYTE_CODE:
case TypeDesc.BOOLEAN_CODE:
return 1;
case TypeDesc.SHORT_CODE:
case TypeDesc.CHAR_CODE:
return 2;
case TypeDesc.INT_CODE:
case TypeDesc.FLOAT_CODE:
return 4;
case TypeDesc.LONG_CODE:
case TypeDesc.DOUBLE_CODE:
return 8;
}
}
}
return ~0;
} |
python | def rotvec(v1, angle, iaxis):
"""
Transform a vector to a new coordinate system rotated by angle
radians about axis iaxis. This transformation rotates v1 by
angle radians about the specified axis.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/rotvec_c.html
:param v1: Vector whose coordinate system is to be rotated.
:type v1: 3-Element Array of floats
:param angle: Angle of rotation (radians).
:type angle: float
:param iaxis: Axis of rotation X=1, Y=2, Z=3.
:type iaxis: int
:return: the vector expressed in the new coordinate system.
:rtype: 3-Element Array of floats
"""
v1 = stypes.toDoubleVector(v1)
angle = ctypes.c_double(angle)
iaxis = ctypes.c_int(iaxis)
vout = stypes.emptyDoubleVector(3)
libspice.rotvec_c(v1, angle, iaxis, vout)
return stypes.cVectorToPython(vout) |
java | protected BlockStmt returnField(String fieldName) {
BlockStmt body = new BlockStmt();
body.addStatement(new ReturnStmt(fieldName));
return body;
} |
python | def _flatten_mesh(self, Xs, term):
"""flatten the mesh and distribute into a feature matrix"""
n = Xs[0].size
if self.terms[term].istensor:
terms = self.terms[term]
else:
terms = [self.terms[term]]
X = np.zeros((n, self.statistics_['m_features']))
for term_, x in zip(terms, Xs):
X[:, term_.feature] = x.ravel()
return X |
python | def detect_cid_in_current_path(i):
"""
Input: {
(path) - path, otherwise current directory
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
repo_uoa - repo UOA
repo_uid - repo UID
repo_alias - repo alias
(module_uoa) - module UOA
(module_uid) - module UID
(module_alias) - module alias
(data_uoa) - data UOA
(data_uid) - data UID
(data_alias) - data alias
}
"""
p=i.get('path','')
if p=='': p=os.getcwd()
p=os.path.normpath(p)
dirs=[]
p1=''
pr='*'
found=False
while pr!='':
p1=os.path.join(p, cfg['repo_file'])
if os.path.isfile(p1):
found=True
break
p2=os.path.split(p)
p=p2[0]
pr=p2[1]
dirs.append(pr)
if not found:
return {'return':16, 'error':'repository is not detected in the current path'}
# Find info about repo (prepared as return dict)
r=find_repo_by_path({'path':p})
if r['return']>0: return r
# Check info about module
ld=len(dirs)
if ld>0:
m=dirs[ld-1]
rx=find_path_to_entry({'path':p, 'data_uoa':m})
if rx['return']>0 and rx['return']!=16: return rx
elif rx['return']==0:
r['module_uoa']=rx['data_uoa']
r['module_uid']=rx['data_uid']
r['module_alias']=rx['data_alias']
# Check info about data
if ld>1:
d=dirs[ld-2]
rx=find_path_to_entry({'path':os.path.join(p,m), 'data_uoa':d})
if rx['return']>0 and rx['return']!=16: return rx
elif rx['return']==0:
r['data_uoa']=rx['data_uoa']
r['data_uid']=rx['data_uid']
r['data_alias']=rx['data_alias']
return r |
java | public static <Type extends Message>
RpcCallback<Message> generalizeCallback(
final RpcCallback<Type> originalCallback,
final Class<Type> originalClass,
final Type defaultInstance) {
return new RpcCallback<Message>() {
public void run(final Message parameter) {
Type typedParameter;
try {
typedParameter = originalClass.cast(parameter);
} catch (ClassCastException ignored) {
typedParameter = copyAsType(defaultInstance, parameter);
}
originalCallback.run(typedParameter);
}
};
} |
java | @SuppressWarnings("unchecked")
@Override
public EList<String> getTelephoneNumbers() {
return (EList<String>) eGet(Ifc4Package.Literals.IFC_TELECOM_ADDRESS__TELEPHONE_NUMBERS, true);
} |
python | async def _process_2auth_form(self, html: str) -> (str, str):
"""
Parsing two-factor authorization page and filling the code
:param html: html page
:return: url and html from redirected page
"""
# Parse page
p = TwoFactorCodePageParser()
p.feed(html)
p.close()
# Prepare request data
form_url = p.url
form_data = dict(p.inputs)
form_data['remember'] = 0
if p.message:
raise VkAuthError('invalid_data', p.message, form_url, form_data)
form_data['code'] = await self.enter_confirmation_code()
# Send request
url, html = await self.driver.post_text(form_url, form_data)
return url, html |
python | def sample_path(alpha, A, pobs, T=None):
""" Sample the hidden pathway S from the conditional distribution P ( S | Parameters, Observations )
Parameters
----------
alpha : ndarray((T,N), dtype = float), optional, default = None
alpha[t,i] is the ith forward coefficient of time t.
A : ndarray((N,N), dtype = float)
transition matrix of the hidden states
pobs : ndarray((T,N), dtype = float)
pobs[t,i] is the observation probability for observation at time t given hidden state i
T : int
number of time steps
Returns
-------
S : numpy.array shape (T)
maximum likelihood hidden path
"""
if __impl__ == __IMPL_PYTHON__:
return ip.sample_path(alpha, A, pobs, T=T, dtype=config.dtype)
elif __impl__ == __IMPL_C__:
return ic.sample_path(alpha, A, pobs, T=T, dtype=config.dtype)
else:
raise RuntimeError('Nonexisting implementation selected: '+str(__impl__)) |
python | def _remove(self, obj):
"""Python 2.4 compatibility."""
for idx, item in enumerate(self._queue):
if item == obj:
del self._queue[idx]
break |
java | public static <R> Stream<R> zip(final char[] a, final char[] b, final char[] c, final char valueForNoneA, final char valueForNoneB,
final char valueForNoneC, final CharTriFunction<R> zipFunction) {
return zip(CharIteratorEx.of(a), CharIteratorEx.of(b), CharIteratorEx.of(c), valueForNoneA, valueForNoneB, valueForNoneC, zipFunction);
} |
java | public CharSequence process(CharSequence source) {
CharSequence target = source;
for (TextProcessor processor : processors) {
target = processor.process(target);
}
return target;
} |
java | public static List<Element> getDescriptorElements(InputStream xmlDescriptorIn)
throws IOException {
List<Element> elements = new ArrayList<>();
org.w3c.dom.Document xmlDescriptorDOM = createDOM(xmlDescriptorIn);
XPath xPath = XPathFactory.newInstance().newXPath();
NodeList allElements;
try {
XPathExpression exp = xPath.compile("//*");
allElements = (NodeList) exp.evaluate(xmlDescriptorDOM.getDocumentElement(), XPathConstants.NODESET);
} catch (XPathExpressionException e) {
throw new IllegalStateException("The hard coded XPath expression should always be valid!");
}
for (int i = 0; i < allElements.getLength(); i++) {
if (allElements.item(i) instanceof Element) {
Element customElement = (Element) allElements.item(i);
elements.add(customElement);
}
}
return elements;
} |
python | def convert(self, value, view):
"""Check that the value is a string and matches the pattern.
"""
if isinstance(value, BASESTRING):
if self.pattern and not self.regex.match(value):
self.fail(
u"must match the pattern {0}".format(self.pattern),
view
)
return value
else:
self.fail(u'must be a string', view, True) |
java | public Observable<ServiceResponse<Page<PublicIPAddressInner>>> listVirtualMachineScaleSetVMPublicIPAddressesSinglePageAsync(final String resourceGroupName, final String virtualMachineScaleSetName, final String virtualmachineIndex, final String networkInterfaceName, final String ipConfigurationName) {
if (resourceGroupName == null) {
throw new IllegalArgumentException("Parameter resourceGroupName is required and cannot be null.");
}
if (virtualMachineScaleSetName == null) {
throw new IllegalArgumentException("Parameter virtualMachineScaleSetName is required and cannot be null.");
}
if (virtualmachineIndex == null) {
throw new IllegalArgumentException("Parameter virtualmachineIndex is required and cannot be null.");
}
if (networkInterfaceName == null) {
throw new IllegalArgumentException("Parameter networkInterfaceName is required and cannot be null.");
}
if (ipConfigurationName == null) {
throw new IllegalArgumentException("Parameter ipConfigurationName is required and cannot be null.");
}
if (this.client.subscriptionId() == null) {
throw new IllegalArgumentException("Parameter this.client.subscriptionId() is required and cannot be null.");
}
final String apiVersion = "2017-03-30";
return service.listVirtualMachineScaleSetVMPublicIPAddresses(resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex, networkInterfaceName, ipConfigurationName, this.client.subscriptionId(), apiVersion, this.client.acceptLanguage(), this.client.userAgent())
.flatMap(new Func1<Response<ResponseBody>, Observable<ServiceResponse<Page<PublicIPAddressInner>>>>() {
@Override
public Observable<ServiceResponse<Page<PublicIPAddressInner>>> call(Response<ResponseBody> response) {
try {
ServiceResponse<PageImpl<PublicIPAddressInner>> result = listVirtualMachineScaleSetVMPublicIPAddressesDelegate(response);
return Observable.just(new ServiceResponse<Page<PublicIPAddressInner>>(result.body(), result.response()));
} catch (Throwable t) {
return Observable.error(t);
}
}
});
} |
python | def NewFromJSON(data):
"""
Create a new SharedFile instance from a JSON dict.
Args:
data (dict): JSON dictionary representing a SharedFile.
Returns:
A SharedFile instance.
"""
return SharedFile(
sharekey=data.get('sharekey', None),
name=data.get('name', None),
user=User.NewFromJSON(data.get('user', None)),
title=data.get('title', None),
description=data.get('description', None),
posted_at=data.get('posted_at', None),
permalink=data.get('permalink', None),
width=data.get('width', None),
height=data.get('height', None),
views=data.get('views', 0),
likes=data.get('likes', 0),
saves=data.get('saves', 0),
comments=data.get('comments', None),
nsfw=data.get('nsfw', False),
image_url=data.get('image_url', None),
source_url=data.get('source_url', None),
saved=data.get('saved', False),
liked=data.get('liked', False),
) |
java | public boolean setBooleanValue(boolean value) throws IOException, SaneException {
ControlOptionResult result = writeOption(SaneWord.forInt(value ? 1 : 0));
Preconditions.checkState(result.getType() == OptionValueType.BOOLEAN);
return SaneWord.fromBytes(result.getValue()).integerValue() != 0;
} |
java | protected static void reportError(ParsingContext context, String message, Object... parameter) {
final File file = context.getCurrentFile();
final int offset = context.getMatcher().start() + context.getStartIndex();
final int lineno = context.getLineNo();
Throwable cause = null;
for (final Object param : parameter) {
if (param instanceof Throwable) {
cause = (Throwable) param;
break;
}
}
final Object[] args = new Object[parameter.length + 3];
args[0] = file;
args[1] = lineno;
args[2] = offset;
System.arraycopy(parameter, 0, args, 3, parameter.length);
final String msg = MessageFormat.format(message, args);
if (cause != null) {
throw new ParsingException(msg, file, lineno, Throwables.getRootCause(cause));
}
throw new ParsingException(msg, file, lineno);
} |
java | private void cancelNameResolverBackoff() {
syncContext.throwIfNotInThisSynchronizationContext();
if (scheduledNameResolverRefresh != null) {
scheduledNameResolverRefresh.cancel();
scheduledNameResolverRefresh = null;
nameResolverBackoffPolicy = null;
}
} |
java | private boolean importConfigurations() {
// Create a set of files in the inventory
Set<File> files = inventory.entries().stream().map(v->v.getPath()).collect(Collectors.toSet());
List<File> entriesToImport = Arrays.asList(baseDir.listFiles(f->
f.isFile()
&& !f.equals(catalog) // Exclude the master catalog (should it have the same extension as entries)
&& f.getName().endsWith(CONFIG_EXT)
&& !files.contains(f)) // Exclude files already in the inventory
);
entriesToImport.forEach(f->{
try {
inventory.add(InventoryEntry.create(Configuration.read(f).copyWithIdentifier(inventory.nextIdentifier()), newConfigurationFile()));
f.delete();
} catch (IOException e) {
if (logger.isLoggable(Level.FINE)) {
logger.log(Level.FINE, "Failed to read: " + f, e);
}
}
});
return !entriesToImport.isEmpty();
} |
java | public Map<String, Metamodel> getMetamodelMap()
{
if (metamodelMap == null)
{
metamodelMap = new HashMap<String, Metamodel>();
}
return metamodelMap;
} |
python | def join_all(domain, *parts):
"""
Join all url components.
Example::
>>> join_all("https://www.apple.com", "iphone")
https://www.apple.com/iphone
:param domain: Domain parts, example: https://www.python.org
:param parts: Other parts, example: "/doc", "/py27"
:return: url
"""
l = list()
if domain.endswith("/"):
domain = domain[:-1]
l.append(domain)
for part in parts:
for i in part.split("/"):
if i.strip():
l.append(i)
url = "/".join(l)
return url |
java | protected void record( final AnnotationTypeMemberDeclaration annotationTypeMember,
final Node parentNode ) throws Exception {
final Node memberNode = parentNode.addNode(ClassFileSequencerLexicon.ANNOTATION_TYPE_MEMBER,
ClassFileSequencerLexicon.ANNOTATION_TYPE_MEMBER);
memberNode.setProperty(ClassFileSequencerLexicon.NAME, annotationTypeMember.getName().getFullyQualifiedName());
{ // modifiers
final int modifiers = annotationTypeMember.getModifiers();
memberNode.setProperty(ClassFileSequencerLexicon.ABSTRACT, (modifiers & Modifier.ABSTRACT) != 0);
memberNode.setProperty(ClassFileSequencerLexicon.VISIBILITY, getVisibility(modifiers));
}
{ // javadocs
final Javadoc javadoc = annotationTypeMember.getJavadoc();
if (javadoc != null) {
record(javadoc, memberNode);
}
}
{ // annotations
@SuppressWarnings( "unchecked" )
final List<IExtendedModifier> modifiers = annotationTypeMember.modifiers();
recordAnnotations(modifiers, memberNode);
}
{ // type
final Type type = annotationTypeMember.getType();
record(type, ClassFileSequencerLexicon.TYPE, memberNode);
}
{ // default expression
final Expression expression = annotationTypeMember.getDefault();
if (expression != null) {
recordExpression(expression, ClassFileSequencerLexicon.DEFAULT, memberNode);
}
}
} |
java | public RegexPatternSet withRegexPatternStrings(String... regexPatternStrings) {
if (this.regexPatternStrings == null) {
setRegexPatternStrings(new java.util.ArrayList<String>(regexPatternStrings.length));
}
for (String ele : regexPatternStrings) {
this.regexPatternStrings.add(ele);
}
return this;
} |
java | public com.google.privacy.dlp.v2.AnalyzeDataSourceRiskDetails.KMapEstimationResultOrBuilder
getKMapEstimationResultOrBuilder() {
if (resultCase_ == 7) {
return (com.google.privacy.dlp.v2.AnalyzeDataSourceRiskDetails.KMapEstimationResult) result_;
}
return com.google.privacy.dlp.v2.AnalyzeDataSourceRiskDetails.KMapEstimationResult
.getDefaultInstance();
} |
java | private void generateScenes(final Metadata m, final Element e) {
final Element scenesElement = new Element("scenes", NS);
for (final Scene scene : m.getScenes()) {
final Element sceneElement = new Element("scene", NS);
addNotNullElement(sceneElement, "sceneTitle", scene.getTitle());
addNotNullElement(sceneElement, "sceneDescription", scene.getDescription());
addNotNullElement(sceneElement, "sceneStartTime", scene.getStartTime());
addNotNullElement(sceneElement, "sceneEndTime", scene.getEndTime());
if (!sceneElement.getChildren().isEmpty()) {
scenesElement.addContent(sceneElement);
}
}
if (!scenesElement.getChildren().isEmpty()) {
e.addContent(scenesElement);
}
} |
java | public String getMessageAndLocation() {
StringBuilder sbuffer = new StringBuilder();
String message = super.getMessage();
if (null != message) {
sbuffer.append(message);
}
if (null != locator) {
String systemID = locator.getSystemId();
int line = locator.getLineNumber();
int column = locator.getColumnNumber();
if (null != systemID) {
sbuffer.append("; SystemID: ");
sbuffer.append(systemID);
}
if (0 != line) {
sbuffer.append("; Line#: ");
sbuffer.append(line);
}
if (0 != column) {
sbuffer.append("; Column#: ");
sbuffer.append(column);
}
}
return sbuffer.toString();
} |
java | public void drawItemStack(ItemStack itemStack, int x, int y, String label)
{
drawItemStack(itemStack, x, y, label, null, true);
} |
java | @Override
@SuppressWarnings("unchecked")
public void start() {
if (isStopped()) return;
logger().log("Inbound stream listening: for '" + reader.name() + "'");
try {
reader.openFor(this);
} catch (Exception e) {
reader.close();
e.printStackTrace();
throw new IllegalStateException(e.getMessage(), e);
}
cancellable = this.stage().scheduler().schedule(selfAs(Scheduled.class), null, 1000, probeInterval);
} |
python | def _determine_scaling_policies(scaling_policies, scaling_policies_from_pillar):
'''
helper method for present. ensure that scaling_policies are set
'''
pillar_scaling_policies = copy.deepcopy(
__salt__['config.option'](scaling_policies_from_pillar, {})
)
if not scaling_policies and pillar_scaling_policies:
scaling_policies = pillar_scaling_policies
return scaling_policies |
java | public static dnssrvrec[] get_filtered(nitro_service service, String filter) throws Exception{
dnssrvrec obj = new dnssrvrec();
options option = new options();
option.set_filter(filter);
dnssrvrec[] response = (dnssrvrec[]) obj.getfiltered(service, option);
return response;
} |
java | public JSONObject getFullPresence()
{
JSONObject game = getGameJson(this.game);
return new JSONObject()
.put("afk", idle)
.put("since", System.currentTimeMillis())
.put("game", game == null ? JSONObject.NULL : game)
.put("status", getStatus().getKey());
} |
java | public AbucoinsTicker getAbucoinsTicker(String productID) throws IOException {
AbucoinsTicker abucoinsTicker = abucoins.getTicker(productID);
return abucoinsTicker;
} |
python | def move(self, x, y):
"""Changes the overlay's position relative to the IFramebuffer.
in x of type int
in y of type int
"""
if not isinstance(x, baseinteger):
raise TypeError("x can only be an instance of type baseinteger")
if not isinstance(y, baseinteger):
raise TypeError("y can only be an instance of type baseinteger")
self._call("move",
in_p=[x, y]) |
python | def plotloc(data, circleinds=[], crossinds=[], edgeinds=[], url_path=None, fileroot=None,
tools="hover,tap,pan,box_select,wheel_zoom,reset", plot_width=450, plot_height=400):
""" Make a light-weight loc figure """
fields = ['l1', 'm1', 'sizes', 'colors', 'snrs', 'key']
if not circleinds: circleinds = range(len(data['snrs']))
# set ranges
datalen = len(data['dm'])
inds = circleinds + crossinds + edgeinds
l1 = [data['l1'][i] for i in inds]
l1_min = min(l1)
l1_max = max(l1)
m1 = [data['m1'][i] for i in inds]
m1_min = min(m1)
m1_max = max(m1)
source = ColumnDataSource(data = dict({(key, tuple([value[i] for i in circleinds if i not in edgeinds]))
for (key, value) in data.iteritems() if key in fields}))
loc = Figure(plot_width=plot_width, plot_height=plot_height, toolbar_location="left", x_axis_label='l1 (rad)', y_axis_label='m1 (rad)',
x_range=(l1_min, l1_max), y_range=(m1_min,m1_max), tools=tools, output_backend='webgl')
loc.circle('l1', 'm1', size='sizes', line_color=None, fill_color='colors', fill_alpha=0.2, source=source)
if crossinds:
sourceneg = ColumnDataSource(data = dict({(key, tuple([value[i] for i in crossinds]))
for (key, value) in data.iteritems() if key in fields}))
loc.cross('l1', 'm1', size='sizes', line_color='colors', line_alpha=0.3, source=sourceneg)
if edgeinds:
sourceedge = ColumnDataSource(data = dict({(key, tuple([value[i] for i in edgeinds]))
for (key, value) in data.iteritems() if key in fields}))
loc.circle('l1', 'm1', size='sizes', line_color='colors', fill_color='colors', source=sourceedge, line_alpha=0.5, fill_alpha=0.2)
hover = loc.select(dict(type=HoverTool))
hover.tooltips = OrderedDict([('SNR', '@snrs'), ('key', '@key')])
if url_path and fileroot:
url = '{}/cands_{}[email protected]'.format(url_path, fileroot)
taptool = loc.select(type=TapTool)
taptool.callback = OpenURL(url=url)
return loc |
python | def inicap_string(string,
cleanse=False):
"""
Convert the first letter of each word to capital letter without
touching the rest of the word.
@param string: a string.
@param cleanse: ``True`` to remove any separator character from the
string, such as comma; ``False`` to keeps any character but space.
@return: a string for which the first letter of each word has been
capitalized without modifying the case of the rest of the word.
"""
return string and ' '.join(word[0].upper() + word[1:]
for word in (string.split() if cleanse else string.split(' '))) |
python | def _makemasks(self, index=None, level=0):
"""
Internal function for generating masks for selecting values based on multi-index values.
As all other multi-index functions will call this function, basic type-checking is also
performed at this stage.
"""
if index is None:
index = self.index
try:
dims = len(array(index).shape)
if dims == 1:
index = array(index, ndmin=2).T
except:
raise TypeError('A multi-index must be convertible to a numpy ndarray')
try:
index = index[:, level]
except:
raise ValueError("Levels must be indices into individual elements of the index")
lenIdx = index.shape[0]
nlevels = index.shape[1]
combs = product(*[unique(index.T[i, :]) for i in range(nlevels)])
combs = array([l for l in combs])
masks = array([[array_equal(index[i], c) for i in range(lenIdx)] for c in combs])
return zip(*[(masks[x], combs[x]) for x in range(len(masks)) if masks[x].any()]) |
python | def ReadCompletedRequests(self, session_id, timestamp=None, limit=None):
"""Fetches all the requests with a status message queued for them."""
subject = session_id.Add("state")
requests = {}
status = {}
for predicate, serialized, _ in self.ResolvePrefix(
subject, [self.FLOW_REQUEST_PREFIX, self.FLOW_STATUS_PREFIX],
limit=limit,
timestamp=timestamp):
parts = predicate.split(":", 3)
request_id = parts[2]
if parts[1] == "status":
status[request_id] = serialized
else:
requests[request_id] = serialized
for request_id, serialized in sorted(iteritems(requests)):
if request_id in status:
yield (rdf_flow_runner.RequestState.FromSerializedString(serialized),
rdf_flows.GrrMessage.FromSerializedString(status[request_id])) |
java | @Override
public boolean releasePeerLease(String recoveryIdentity) throws Exception {
if (tc.isEntryEnabled())
Tr.entry(tc, "releasePeerLease", new Object[] { recoveryIdentity, this });
// Release the lock - if it is not null!
FileLock fLock = null;
FileChannel fChannel = null;
if (_peerLeaseLock != null) {
String recIdentity = _peerLeaseLock.getRecoveryIdentity();
if (recoveryIdentity.equals(recIdentity)) {
fLock = _peerLeaseLock.getFileLock();
if (fLock != null) {
fLock.release();
}
// Close the channel
fChannel = _peerLeaseLock.getFileChannel();
if (fChannel != null)
fChannel.close();
_peerLeaseLock = null;
} else {
if (tc.isDebugEnabled())
Tr.debug(tc, "The locks identity which was " + recIdentity + " did not match the requested identity which was " + recoveryIdentity);
}
} else {
if (tc.isDebugEnabled())
Tr.debug(tc, "The lease lock was unexpectedly null");
}
if (tc.isEntryEnabled())
Tr.exit(tc, "releasePeerLease");
return true;
} |
java | public IntIntSortedVector getSum(IntIntVector other) {
IntIntSortedVector sum = new IntIntSortedVector(this);
sum.add(other);
return sum;
} |
java | public static UTF8JsonGenerator newJsonGenerator(OutputStream out, byte[] buf)
{
return newJsonGenerator(out, buf, 0, false, new IOContext(
DEFAULT_JSON_FACTORY._getBufferRecycler(), out, false));
} |
python | def emit(self, record):
"""Prints a record out to some streams.
If FLAGS.logtostderr is set, it will print to sys.stderr ONLY.
If FLAGS.alsologtostderr is set, it will print to sys.stderr.
If FLAGS.logtostderr is not set, it will log to the stream
associated with the current thread.
Args:
record: logging.LogRecord, the record to emit.
"""
# People occasionally call logging functions at import time before
# our flags may have even been defined yet, let alone even parsed, as we
# rely on the C++ side to define some flags for us and app init to
# deal with parsing. Match the C++ library behavior of notify and emit
# such messages to stderr. It encourages people to clean-up and does
# not hide the message.
level = record.levelno
if not FLAGS.is_parsed(): # Also implies "before flag has been defined".
global _warn_preinit_stderr
if _warn_preinit_stderr:
sys.stderr.write(
'WARNING: Logging before flag parsing goes to stderr.\n')
_warn_preinit_stderr = False
self._log_to_stderr(record)
elif FLAGS['logtostderr'].value:
self._log_to_stderr(record)
else:
super(PythonHandler, self).emit(record)
stderr_threshold = converter.string_to_standard(
FLAGS['stderrthreshold'].value)
if ((FLAGS['alsologtostderr'].value or level >= stderr_threshold) and
self.stream != sys.stderr):
self._log_to_stderr(record)
# Die when the record is created from ABSLLogger and level is FATAL.
if _is_absl_fatal_record(record):
self.flush() # Flush the log before dying.
# In threaded python, sys.exit() from a non-main thread only
# exits the thread in question.
os.abort() |
java | private String nodeTabs() {
StringBuilder builder = new StringBuilder();
builder.append("<div class='tabs'>");
builder.append("<ul>");
builder
.append("<li class='tab' type='browsers'><a title='test slots' href='#'>Browsers</a></li>");
builder
.append("<li class='tab' type='config'><a title='node configuration' href='#'>Configuration</a></li>");
builder.append("</ul>");
builder.append("</div>");
return builder.toString();
} |
java | @XmlElementDecl(namespace = "http://schema.intuit.com/finance/v3", name = "TaxCode", substitutionHeadNamespace = "http://schema.intuit.com/finance/v3", substitutionHeadName = "IntuitObject")
public JAXBElement<TaxCode> createTaxCode(TaxCode value) {
return new JAXBElement<TaxCode>(_TaxCode_QNAME, TaxCode.class, null, value);
} |
java | public static List<String> splitEqually(String _text, int _len) {
if (_text == null) {
return null;
}
List<String> ret = new ArrayList<String>((_text.length() + _len - 1) / _len);
for (int start = 0; start < _text.length(); start += _len) {
ret.add(_text.substring(start, Math.min(_text.length(), start + _len)));
}
return ret;
} |
java | public int lookupIndex(short value) {
for (int i=0; i<elements.length; i++) {
if (elements[i] == value) {
return i;
}
}
return -1;
} |
python | def expect(self, c):
"""
If the current position doesn't hold the specified c character then it raises an
exception, otherwise it skips the specified character (moves the current position forward).
"""
if self.peek() != c:
self.error('Expected "%c"' % (c,))
self.skip_char() |
python | def set_parallel_value_for_key(self, key, value):
"""
Set a globally available key and value that can be accessed
from all the pabot processes.
"""
if self._remotelib:
self._remotelib.run_keyword('set_parallel_value_for_key',
[key, value], {})
else:
_PabotLib.set_parallel_value_for_key(self, key, value) |
python | def set_aliases_and_defaults(self, aliases_config=None,
default_properties=None):
"""
Set the alias config and defaults to use. Typically used when
switching to a collection with a different schema.
Args:
aliases_config:
An alias dict to use. Defaults to None, which means the default
aliases defined in "aliases.json" is used. See constructor
for format.
default_properties:
List of property names (strings) to use by default, if no
properties are given to the 'properties' argument of
query().
"""
if aliases_config is None:
with open(os.path.join(os.path.dirname(__file__),
"aliases.json")) as f:
d = json.load(f)
self.aliases = d.get("aliases", {})
self.default_criteria = d.get("defaults", {})
else:
self.aliases = aliases_config.get("aliases", {})
self.default_criteria = aliases_config.get("defaults", {})
# set default properties
if default_properties is None:
self._default_props, self._default_prop_dict = None, None
else:
self._default_props, self._default_prop_dict = \
self._parse_properties(default_properties) |
java | protected FieldDescriptor[] buildFieldsForSelect(ClassDescriptor cld)
{
DescriptorRepository repository = cld.getRepository();
Set fields = new ListOrderedSet(); // keep the order of the fields
// add Standard Fields
// MBAIRD: if the object being queried on has multiple classes mapped to the table,
// then we will get all the fields that are a unique set across all those classes so if we need to
// we can materialize an extent
FieldDescriptor fds[] = repository.getFieldDescriptorsForMultiMappedTable(cld);
for (int i = 0; i < fds.length; i++)
{
fields.add(fds[i]);
}
// add inherited Fields. This is important when querying for a class having a super-reference
fds = cld.getFieldDescriptor(true);
for (int i = 0; i < fds.length; i++)
{
fields.add(fds[i]);
}
// add Fields of joined subclasses
Class[] multiJoinedClasses = repository.getSubClassesMultipleJoinedTables(cld, true);
for (int c = 0; c < multiJoinedClasses.length; c++)
{
ClassDescriptor subCld = repository.getDescriptorFor(multiJoinedClasses[c]);
fds = subCld.getFieldDescriptions();
for (int i = 0; i < fds.length; i++)
{
fields.add(fds[i]);
}
}
FieldDescriptor[] result = new FieldDescriptor[fields.size()];
fields.toArray(result);
return result;
} |
java | private String getValue(final List<String> attributes,final List<Class<?>> classes,String value,final String mappedFieldName,final Class<?> configuredClass,final Class<?> targetClass){
String regex = getValue(value,mappedFieldName);
String mappedClassName = configuredClass.getSimpleName();
String targetClassName = targetClass.getSimpleName();
/* IF ATTRIBUTES AND CLASSES ARE EMPTY */
if( attributes.isEmpty() && classes.isEmpty() ){
String targetFieldName = fieldName(targetClass,regex);
if(!isNull(targetFieldName))
return targetFieldName;
Error.mapping(mappedFieldName, mappedClassName, regex, targetClassName);
}
/* IF ATTRIBUTES IS EMPTY AND CLASSES NOT */
if( attributes.isEmpty() && !classes.isEmpty() ){
if(classes.contains(targetClass)){
String targetFieldName = fieldName(targetClass,regex);
if(!isNull(targetFieldName))
return targetFieldName;
}
Error.mapping(mappedFieldName, mappedClassName, regex, targetClassName);
}
/* IF ATTRIBUTES AND CLASSES ARE VALUED AND THEY HAVE THE SAME LENGTH */
if( !attributes.isEmpty() && !classes.isEmpty() )
if(attributes.size()==classes.size())
if(classes.contains(targetClass)){
// get the attribute from attributes, positioned at the same index of targetClass in classes
String targetClassValue = attributes.get(classes.indexOf(targetClass));
regex = getValue(targetClassValue,mappedFieldName);
String targetFieldName = fieldName(targetClass,regex);
if(!isNull(targetFieldName))
return targetFieldName;
Error.mapping(mappedFieldName, mappedClassName, regex, targetClassName);
}else
Error.mapping(mappedFieldName, mappedClassName, targetClassName);
else
Error.mapping(mappedFieldName, mappedClassName);
/* IF ATTRIBUTES IS FULL AND CLASSES IS EMPTY */
if( !attributes.isEmpty() && classes.isEmpty() )
for (String str : attributes){
regex = getValue(str,mappedFieldName);
// if exist the target field in targetClass
String targetFieldName = fieldName(targetClass,regex);
if(!isNull(targetFieldName))
//returns the corresponding name
return targetFieldName;
}
Error.mapping(mappedFieldName, configuredClass,targetClass);
return "this return is never used";
} |
python | def connect(self, coro):
"""
The coroutine `coro` is connected to the signal. The coroutine must
return a true value, unless it wants to be disconnected from the
signal.
.. note::
This is different from the return value convention with
:attr:`AdHocSignal.STRONG` and :attr:`AdHocSignal.WEAK`.
:meth:`connect` returns a token which can be used with
:meth:`disconnect` to disconnect the coroutine.
"""
self.logger.debug("connecting %r", coro)
return self._connect(coro) |
python | def calc_max_flexural_wavelength(self):
"""
Returns the approximate maximum flexural wavelength
This is important when padding of the grid is required: in Flexure (this
code), grids are padded out to one maximum flexural wavelength, but in any
case, the flexural wavelength is a good characteristic distance for any
truncation limit
"""
if np.isscalar(self.D):
Dmax = self.D
else:
Dmax = self.D.max()
# This is an approximation if there is fill that evolves with iterations
# (e.g., water), but should be good enough that this won't do much to it
alpha = (4*Dmax/(self.drho*self.g))**.25 # 2D flexural parameter
self.maxFlexuralWavelength = 2*np.pi*alpha
self.maxFlexuralWavelength_ncells_x = int(np.ceil(self.maxFlexuralWavelength / self.dx))
self.maxFlexuralWavelength_ncells_y = int(np.ceil(self.maxFlexuralWavelength / self.dy)) |
java | @Deprecated
public void update(String projectKey, WikiPageDetail detail) throws RedmineException {
String urlSafeTitle = WikiPageDetail.getUrlSafeString(detail.getTitle());
transport.updateChildEntry(Project.class, projectKey, detail, urlSafeTitle);
} |
python | def discover_OP_information(OP_uri):
"""
Discovers information about the provided OpenID Provider.
:param OP_uri: The base URI of the Provider information is requested for.
:type OP_uri: str
:returns: The contents of the Provider metadata document.
:rtype: dict
.. versionadded:: 1.0
"""
_, content = httplib2.Http().request(
'%s/.well-known/openid-configuration' % OP_uri)
return _json_loads(content) |
java | public void forEach(final CounterConsumer consumer)
{
int counterId = 0;
final AtomicBuffer metaDataBuffer = this.metaDataBuffer;
final AtomicBuffer valuesBuffer = this.valuesBuffer;
for (int i = 0, capacity = metaDataBuffer.capacity(); i < capacity; i += METADATA_LENGTH)
{
final int recordStatus = metaDataBuffer.getIntVolatile(i);
if (RECORD_ALLOCATED == recordStatus)
{
consumer.accept(
valuesBuffer.getLongVolatile(counterOffset(counterId)), counterId, labelValue(metaDataBuffer, i));
}
else if (RECORD_UNUSED == recordStatus)
{
break;
}
counterId++;
}
} |
python | def NetFxSdkDir(self):
"""
Microsoft .NET Framework SDK directory.
"""
for ver in self.NetFxSdkVersion:
loc = os.path.join(self.ri.netfx_sdk, ver)
sdkdir = self.ri.lookup(loc, 'kitsinstallationfolder')
if sdkdir:
break
return sdkdir or '' |
java | public static void release(ClassLoader classLoader) {
// JULI's log manager looks at the current classLoader so there is no
// need to use the passed in classLoader, the default implementation
// does not so calling reset in that case will break things
if (!LogManager.getLogManager().getClass().getName().equals(
"java.util.logging.LogManager")) {
LogManager.getLogManager().reset();
}
} |
python | def clean(self, value, initial=None):
"""
Most part of this method is a copy of
django.forms.MultiValueField.clean, with the exception of initial
value handling (this need for correct processing FileField's).
All original comments saved.
"""
if initial is None:
initial = [None for x in range(0, len(value))]
else:
if not isinstance(initial, list):
initial = self.widget.decompress(initial)
clean_data = []
errors = []
if not value or isinstance(value, (list, tuple)):
if (not value or not [v for v in value if
v not in self.empty_values]) \
and (not initial or not [v for v in initial if
v not in self.empty_values]):
if self.required:
raise ValidationError(self.error_messages['required'],
code='required')
else:
raise ValidationError(self.error_messages['invalid'],
code='invalid')
for i, field in enumerate(self.fields):
try:
field_value = value[i]
except IndexError:
field_value = None
try:
field_initial = initial[i]
except IndexError:
field_initial = None
if field_value in self.empty_values and \
field_initial in self.empty_values:
if self.require_all_fields:
# Raise a 'required' error if the MultiValueField is
# required and any field is empty.
if self.required:
raise ValidationError(self.error_messages['required'],
code='required')
elif field.required:
# Otherwise, add an 'incomplete' error to the list of
# collected errors and skip field cleaning, if a required
# field is empty.
if field.error_messages['incomplete'] not in errors:
errors.append(field.error_messages['incomplete'])
continue
try:
clean_data.append(field.clean(field_value, field_initial))
except ValidationError as e:
# Collect all validation errors in a single list, which we'll
# raise at the end of clean(), rather than raising a single
# exception for the first error we encounter. Skip duplicates.
errors.extend(m for m in e.error_list if m not in errors)
if errors:
raise ValidationError(errors)
out = self.compress(clean_data)
self.validate(out)
self.run_validators(out)
return out |
python | def actor(fn=None, *, actor_class=Actor, actor_name=None, queue_name="default", priority=0, broker=None, **options):
"""Declare an actor.
Examples:
>>> import dramatiq
>>> @dramatiq.actor
... def add(x, y):
... print(x + y)
...
>>> add
Actor(<function add at 0x106c6d488>, queue_name='default', actor_name='add')
>>> add(1, 2)
3
>>> add.send(1, 2)
Message(
queue_name='default',
actor_name='add',
args=(1, 2), kwargs={}, options={},
message_id='e0d27b45-7900-41da-bb97-553b8a081206',
message_timestamp=1497862448685)
Parameters:
fn(callable): The function to wrap.
actor_class(type): Type created by the decorator. Defaults to
:class:`Actor` but can be any callable as long as it returns an
actor and takes the same arguments as the :class:`Actor` class.
actor_name(str): The name of the actor.
queue_name(str): The name of the queue to use.
priority(int): The actor's global priority. If two tasks have
been pulled on a worker concurrently and one has a higher
priority than the other then it will be processed first.
Lower numbers represent higher priorities.
broker(Broker): The broker to use with this actor.
**options(dict): Arbitrary options that vary with the set of
middleware that you use. See ``get_broker().actor_options``.
Returns:
Actor: The decorated function.
"""
def decorator(fn):
nonlocal actor_name, broker
actor_name = actor_name or fn.__name__
if not _queue_name_re.fullmatch(queue_name):
raise ValueError(
"Queue names must start with a letter or an underscore followed "
"by any number of letters, digits, dashes or underscores."
)
broker = broker or get_broker()
invalid_options = set(options) - broker.actor_options
if invalid_options:
invalid_options_list = ", ".join(invalid_options)
raise ValueError((
"The following actor options are undefined: %s. "
"Did you forget to add a middleware to your Broker?"
) % invalid_options_list)
return actor_class(
fn, actor_name=actor_name, queue_name=queue_name,
priority=priority, broker=broker, options=options,
)
if fn is None:
return decorator
return decorator(fn) |
java | @Override
protected void initialize() throws Exception {
super.initialize();
// sanity check: the tail has to update either the workset or the solution set
if (!isWorksetUpdate && !isSolutionSetUpdate) {
throw new RuntimeException("The iteration tail doesn't update workset or the solution set.");
}
// set the last output collector of this task to reflect the iteration tail state update:
// a) workset update,
// b) solution set update, or
// c) merged workset and solution set update
Collector<OT> outputCollector = null;
if (isWorksetUpdate) {
outputCollector = createWorksetUpdateOutputCollector();
// we need the WorksetUpdateOutputCollector separately to count the collected elements
if (isWorksetIteration) {
worksetUpdateOutputCollector = (WorksetUpdateOutputCollector<OT>) outputCollector;
}
}
if (isSolutionSetUpdate) {
if (isWorksetIteration) {
outputCollector = createSolutionSetUpdateOutputCollector(outputCollector);
}
// Bulk iteration with termination criterion
else {
outputCollector = new Collector<OT>() {
@Override
public void collect(OT record) {}
@Override
public void close() {}
};
}
if (!isWorksetUpdate) {
solutionSetUpdateBarrier = SolutionSetUpdateBarrierBroker.instance().get(brokerKey());
}
}
setLastOutputCollector(outputCollector);
} |
python | def get_genres(self):
"""
Grab genre URLs from iTunes Podcast preview
"""
page = r.get(ITUNES_GENRES_URL)
tree = html.fromstring(page.content)
elements = tree.xpath("//a[@class='top-level-genre']")
return [e.attrib['href'] for e in elements] |
python | def send_measurement(self, measurement):
"""
Posts the provided Measurement object's data to the Station API.
:param measurement: the *pyowm.stationsapi30.measurement.Measurement*
object to be posted
:type measurement: *pyowm.stationsapi30.measurement.Measurement* instance
:returns: `None` if creation is successful, an exception otherwise
"""
assert measurement is not None
assert measurement.station_id is not None
status, _ = self.http_client.post(
MEASUREMENTS_URI,
params={'appid': self.API_key},
data=[self._structure_dict(measurement)],
headers={'Content-Type': 'application/json'}) |
java | public void processResources(List<Row> rows)
{
for (Row row : rows)
{
Resource resource = m_project.addResource();
processFields(m_resourceFields, row, resource);
resource.setResourceCalendar(getResourceCalendar(row.getInteger("clndr_id")));
// Even though we're not filling in a rate, filling in a time unit can still be useful
// so that we know what rate time unit was originally used in Primavera.
TimeUnit timeUnit = TIME_UNIT_MAP.get(row.getString("cost_qty_type"));
resource.setStandardRateUnits(timeUnit);
resource.setOvertimeRateUnits(timeUnit);
// Add User Defined Fields
populateUserDefinedFieldValues("RSRC", FieldTypeClass.RESOURCE, resource, resource.getUniqueID());
m_eventManager.fireResourceReadEvent(resource);
}
} |
python | def tryCommit(self, prepare: Prepare):
"""
Try to commit if the Prepare message is ready to be passed into the
commit phase.
"""
rv, reason = self.canCommit(prepare)
if rv:
self.doCommit(prepare)
else:
self.logger.debug("{} cannot send COMMIT since {}".format(self, reason)) |
java | private boolean updateTintColor(int[] state) {
final int trackColor = mTrackStateList.getColorForState(state, mTrackColor);
final int scrubberColor = mScrubberStateList.getColorForState(state, mScrubberColor);
final int thumbColor = mThumbStateList.getColorForState(state, mThumbColor);
if (trackColor != mTrackColor || scrubberColor != mScrubberColor || thumbColor != mThumbColor) {
mTrackColor = trackColor;
mScrubberColor = scrubberColor;
mThumbColor = thumbColor;
updateCurColor();
invalidateSelf();
return true;
}
return false;
} |
python | def dead_chips(self):
"""Generate the coordinates of all dead chips.
Yields
------
(x, y)
The coordinate of a dead chip.
"""
for x in range(self.width):
for y in range(self.height):
if (x, y) not in self:
yield (x, y) |
python | def string_avg(strings, binary=True):
"""
Takes a list of strings of equal length and returns a string containing
the most common value from each index in the string.
Optional argument: binary - a boolean indicating whether or not to treat
strings as binary numbers (fill in leading zeros if lengths differ).
"""
if binary: # Assume this is a binary number and fill leading zeros
strings = deepcopy(strings)
longest = len(max(strings, key=len))
for i in range(len(strings)):
while len(strings[i]) < longest:
split_string = strings[i].split("b")
strings[i] = "0b0" + split_string[1]
avg = ""
for i in (range(len(strings[0]))):
opts = []
for s in strings:
opts.append(s[i])
avg += max(set(opts), key=opts.count)
return avg |
python | def update_model(self, tfi):
"""Update the model for the given tfi
:param tfi: taskfile info
:type tfi: :class:`TaskFileInfo`
:returns: None
:rtype: None
:raises: None
"""
if tfi.task.department.assetflag:
browser = self.assetbrws
else:
browser = self.shotbrws
if tfi.version == 1: # add descriptor
parent = browser.selected_indexes(2)[0]
ddata = treemodel.ListItemData([tfi.descriptor])
ditem = treemodel.TreeItem(ddata)
browser.model.insertRow(0, ditem, parent)
self.set_level(browser, 3, [tfi.descriptor]) |
java | public JComponent learn() {
double[][] data = dataset[datasetIndex].toArray(new double[dataset[datasetIndex].size()][]);
String[] names = dataset[datasetIndex].toArray(new String[dataset[datasetIndex].size()]);
if (names[0] == null) {
names = null;
}
int[] label = dataset[datasetIndex].toArray(new int[dataset[datasetIndex].size()]);
int min = Math.min(label);
for (int i = 0; i < label.length; i++) {
label[i] -= min;
}
long clock = System.currentTimeMillis();
FLD lda = new FLD(data, label, Math.unique(label).length > 3 ? 3 : 2);
System.out.format("Learn LDA from %d samples in %dms\n", data.length, System.currentTimeMillis()-clock);
double[][] y = lda.project(data);
PlotCanvas plot = new PlotCanvas(Math.colMin(y), Math.colMax(y));
if (names != null) {
plot.points(y, names);
} else if (dataset[datasetIndex].responseAttribute() != null) {
int[] labels = dataset[datasetIndex].toArray(new int[dataset[datasetIndex].size()]);
for (int i = 0; i < y.length; i++) {
plot.point(pointLegend, Palette.COLORS[labels[i]], y[i]);
}
} else {
plot.points(y, pointLegend);
}
plot.setTitle("Linear Discriminant Analysis");
return plot;
} |
python | def get_residue_annotations(self, start_resnum, end_resnum=None):
"""Retrieve letter annotations for a residue or a range of residues
Args:
start_resnum (int): Residue number
end_resnum (int): Optional residue number, specify if a range is desired
Returns:
dict: Letter annotations for this residue or residues
"""
if not end_resnum:
end_resnum = start_resnum
# Create a new SeqFeature
f = SeqFeature(FeatureLocation(start_resnum - 1, end_resnum))
# Get sequence properties
return f.extract(self).letter_annotations |
python | def check(*args, **kwds):
"""Check wrapper, a factory for FontBakeryCheck
Requires all arguments of FontBakeryCheck but not `checkfunc`
which is passed via the decorator syntax.
"""
def wrapper(checkfunc):
return wraps(checkfunc)(FontBakeryCheck(checkfunc, *args, **kwds))
return wrapper |
python | def p_expr_EQ_expr(p):
""" expr : expr EQ expr
"""
p[0] = make_binary(p.lineno(2), 'EQ', p[1], p[3], lambda x, y: x == y) |
java | @Override
public final Iterable<T> children(final T root) {
checkNotNull(root);
return new FluentIterable<T>() {
@Override
public Iterator<T> iterator() {
return new AbstractIterator<T>() {
boolean doneLeft;
boolean doneRight;
@Override
protected T computeNext() {
if (!doneLeft) {
doneLeft = true;
Optional<T> left = leftChild(root);
if (left.isPresent()) {
return left.get();
}
}
if (!doneRight) {
doneRight = true;
Optional<T> right = rightChild(root);
if (right.isPresent()) {
return right.get();
}
}
return endOfData();
}
};
}
};
} |
java | public static <S1, I1, T1, S2, I2, T2, SP2, TP2> Mapping<S1, S2> rawCopy(TSTraversalMethod method,
TransitionSystem<S1, ? super I1, T1> in,
int limit,
Collection<? extends I1> inputs,
MutableAutomaton<S2, I2, T2, ? super SP2, ? super TP2> out,
Function<? super I1, ? extends I2> inputsMapping,
Function<? super S1, ? extends SP2> spMapping,
Function<? super T1, ? extends TP2> tpMapping) {
return rawCopy(method,
in,
limit,
inputs,
out,
inputsMapping,
spMapping,
tpMapping,
x -> true,
TransitionPredicates.alwaysTrue());
} |
python | def get_tl(self):
"""Returns the top left border of the cell"""
cell_above_left = CellBorders(self.cell_attributes,
*self.cell.get_above_left_key_rect())
return cell_above_left.get_r() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.