language
stringclasses 2
values | func_code_string
stringlengths 63
466k
|
---|---|
java | public static File createTempFile(String prefix, String suffix, File dir, boolean isReCreat) throws IORuntimeException {
int exceptionsCount = 0;
while (true) {
try {
File file = File.createTempFile(prefix, suffix, dir).getCanonicalFile();
if (isReCreat) {
file.delete();
file.createNewFile();
}
return file;
} catch (IOException ioex) { // fixes java.io.WinNTFileSystem.createFileExclusively access denied
if (++exceptionsCount >= 50) {
throw new IORuntimeException(ioex);
}
}
}
} |
java | @Override
public void onGestureBegin(TransformGestureDetector detector) {
FLog.v(TAG, "onGestureBegin");
mPreviousTransform.set(mActiveTransform);
onTransformBegin();
// We only received a touch down event so far, and so we don't know yet in which direction a
// future move event will follow. Therefore, if we can't scroll in all directions, we have to
// assume the worst case where the user tries to scroll out of edge, which would cause
// transformation to be corrected.
mWasTransformCorrected = !canScrollInAllDirection();
} |
java | public void marshall(CreateRule createRule, ProtocolMarshaller protocolMarshaller) {
if (createRule == null) {
throw new SdkClientException("Invalid argument passed to marshall(...)");
}
try {
protocolMarshaller.marshall(createRule.getInterval(), INTERVAL_BINDING);
protocolMarshaller.marshall(createRule.getIntervalUnit(), INTERVALUNIT_BINDING);
protocolMarshaller.marshall(createRule.getTimes(), TIMES_BINDING);
} catch (Exception e) {
throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e);
}
} |
java | public LocalDateTime minusDays(long days) {
return (days == Long.MIN_VALUE ? plusDays(Long.MAX_VALUE).plusDays(1) : plusDays(-days));
} |
python | def candidate(cls):
"""The ``Candidate``."""
return relationship(
"Candidate",
backref=backref(
camel_to_under(cls.__name__) + "s",
cascade="all, delete-orphan",
cascade_backrefs=False,
),
cascade_backrefs=False,
) |
python | def link_bytecode(cls, attr_dict: Dict[str, str]) -> Type["LinkableContract"]:
"""
Return a cloned contract factory with the deployment / runtime bytecode linked.
:attr_dict: Dict[`ContractType`: `Address`] for all deployment and runtime link references.
"""
if not cls.unlinked_references and not cls.linked_references:
raise BytecodeLinkingError("Contract factory has no linkable bytecode.")
if not cls.needs_bytecode_linking:
raise BytecodeLinkingError(
"Bytecode for this contract factory does not require bytecode linking."
)
cls.validate_attr_dict(attr_dict)
bytecode = apply_all_link_refs(cls.bytecode, cls.unlinked_references, attr_dict)
runtime = apply_all_link_refs(
cls.bytecode_runtime, cls.linked_references, attr_dict
)
linked_class = cls.factory(
cls.web3, bytecode_runtime=runtime, bytecode=bytecode
)
if linked_class.needs_bytecode_linking:
raise BytecodeLinkingError(
"Expected class to be fully linked, but class still needs bytecode linking."
)
return linked_class |
java | public void exportAndBind() throws RemoteException, AlreadyBoundException,
InterruptedException {
Registry registry =
LocateRegistry
.createRegistry(arguments.getRegistryPortNumber());
registry.rebind(RMI_BINDING_NAME, this);
Thread.sleep(2000);
logger.info("RmiJournalReceiver is ready - journal directory is '"
+ arguments.getDirectoryPath().getAbsolutePath() + "'");
} |
python | def urls_old(self, protocol=Resource.Protocol.http):
'''
Iterate through all resources registered with this router
and create a list endpoint and a detail endpoint for each one.
Uses the router name as prefix and endpoint name of the resource when registered, to assemble the url pattern.
Uses the constructor-passed url method or class for generating urls
'''
url_patterns = []
for endpoint, resource_class in self._registry.items():
setattr(resource_class, 'api_name', self.name)
setattr(resource_class, 'resource_name', endpoint)
# append any nested resources the resource may have
nested = []
for route in resource_class.nested_routes('/%s/%s/' % (self.name, endpoint)):
route = route._replace(handler=resource_class.wrap_handler(route.handler, protocol))
nested.append(route)
url_patterns.extend(nested)
# append resource as list
url_patterns.append(Route(
path='/%s/%s/' % (self.name, endpoint),
handler=resource_class.as_list(protocol),
methods=resource_class.route_methods(),
name='{}_{}_list'.format(self.name, endpoint).replace('/', '_')
))
# append resource as detail
url_patterns.append(Route(
path='/%s/%s/%s/' % (self.name, endpoint, resource_class.route_param('pk')),
handler=resource_class.as_detail(protocol),
methods=resource_class.route_methods(),
name='{}_{}_detail'.format(self.name, endpoint).replace('/', '_')
))
return url_patterns |
java | public static <A extends Number & Comparable<?>> NumberExpression<Double> asin(Expression<A> num) {
return Expressions.numberOperation(Double.class, Ops.MathOps.ASIN, num);
} |
java | public static ClassLoader getClassLoader(final Class<?> c)
{
if (System.getSecurityManager() == null)
return c.getClassLoader();
return AccessController.doPrivileged(new PrivilegedAction<ClassLoader>()
{
public ClassLoader run()
{
return c.getClassLoader();
}
});
} |
python | def print_table(column_names: IterableOfStrings,
rows: IterableOfTuples,
column_alignments: Optional[IterableOfStrings] = None,
primary_column_idx: int = 0,
) -> None:
"""
Prints a table of information to the console. Automatically determines if the
console is wide enough, and if not, displays the information in list form.
:param column_names: The heading labels
:param rows: A list of lists
:param column_alignments: An optional list of strings, using either '<' or '>'
to specify left or right alignment respectively
:param primary_column_idx: Used when displaying information in list form,
to determine which label should be the top-most one. Defaults to the first
label in ``column_names``.
"""
header_template = ''
row_template = ''
table_width = 0
type_formatters = {int: 'd', float: 'f', str: 's'}
types = [type_formatters.get(type(x), 'r') for x in rows[0]]
alignments = {int: '>', float: '>'}
column_alignments = (column_alignments or
[alignments.get(type(x), '<') for x in rows[0]])
def get_column_width(idx):
header_length = len(column_names[idx])
content_length = max(len(str(row[idx])) for row in rows)
return (content_length if content_length > header_length
else header_length)
for i in range(0, len(column_names)):
col_width = get_column_width(i)
header_col_template = f'{{:{col_width}}}'
col_template = f'{{:{column_alignments[i]}{col_width}{types[i]}}}'
if i == 0:
header_template += header_col_template
row_template += col_template
table_width += col_width
else:
header_template += ' ' + header_col_template
row_template += ' ' + col_template
table_width += 2 + col_width
# check if we can format the table horizontally
if table_width < get_terminal_width():
click.echo(header_template.format(*column_names))
click.echo('-' * table_width)
for row in rows:
try:
click.echo(row_template.format(*row))
except TypeError as e:
raise TypeError(f'{e}: {row!r}')
# otherwise format it vertically
else:
max_label_width = max(*[len(label) for label in column_names])
non_primary_columns = [(i, col) for i, col in enumerate(column_names)
if i != primary_column_idx]
for row in rows:
type_ = types[primary_column_idx]
row_template = f'{{:>{max_label_width}s}}: {{:{type_}}}'
click.echo(row_template.format(column_names[primary_column_idx],
row[primary_column_idx]))
for i, label in non_primary_columns:
row_template = f'{{:>{max_label_width}s}}: {{:{types[i]}}}'
click.echo(row_template.format(label, row[i]))
click.echo() |
java | public <R> Future<R> map(final Function<? super T, R> success, final Function<Throwable, R> failure) {
return Future.of(future.thenApply(success)
.exceptionally(failure));
} |
java | public static final <VertexKey extends Comparable<VertexKey>, VertexValue, Message, EdgeValue>
VertexCentricIteration<VertexKey, VertexValue, Message, EdgeValue> withValuedEdges(
DataSet<Tuple3<VertexKey, VertexKey, EdgeValue>> edgesWithValue,
VertexUpdateFunction<VertexKey, VertexValue, Message> uf,
MessagingFunction<VertexKey, VertexValue, Message, EdgeValue> mf,
int maximumNumberOfIterations)
{
return new VertexCentricIteration<VertexKey, VertexValue, Message, EdgeValue>(uf, mf, edgesWithValue, maximumNumberOfIterations, true);
} |
python | def set_serializer(self, serializer_name, compression=None):
"""
Configure the serializer to use for communication with the server.
The serializer specified must be valid and in the
:py:data:`.g_serializer_drivers` map.
:param str serializer_name: The name of the serializer to use.
:param str compression: The name of a compression library to use.
"""
self.serializer = Serializer(serializer_name, charset='UTF-8', compression=compression)
self.logger.debug('using serializer: ' + serializer_name) |
python | def getXlogStatus(self):
"""Returns Transaction Logging or Recovery Status.
@return: Dictionary of status items.
"""
inRecovery = None
if self.checkVersion('9.0'):
inRecovery = self._simpleQuery("SELECT pg_is_in_recovery();")
cur = self._conn.cursor()
if inRecovery:
cols = ['pg_last_xlog_receive_location()',
'pg_last_xlog_replay_location()',]
headers = ['xlog_receive_location',
'xlog_replay_location',]
if self.checkVersion('9.1'):
cols.extend(['pg_last_xact_replay_timestamp()',
'pg_is_xlog_replay_paused()',])
headers.extend(['xact_replay_timestamp',
'xlog_replay_paused',])
cur.execute("""SELECT %s;""" % ','.join(cols))
headers = ('xlog_receive_location', 'xlog_replay_location')
else:
cur.execute("""SELECT
pg_current_xlog_location(),
pg_xlogfile_name(pg_current_xlog_location());""")
headers = ('xlog_location', 'xlog_filename')
row = cur.fetchone()
info_dict = dict(zip(headers, row))
if inRecovery is not None:
info_dict['in_recovery'] = inRecovery
return info_dict |
java | public List<JSTypeExpression> getImplementedInterfaces() {
if (info == null || info.implementedInterfaces == null) {
return ImmutableList.of();
}
return Collections.unmodifiableList(info.implementedInterfaces);
} |
java | private List<List<String>> processAcceptLanguage(String acceptLanguage) {
StringTokenizer languageTokenizer = new StringTokenizer(acceptLanguage, ",");
TreeMap<Double, List<String>> map = new TreeMap<Double, List<String>>(Collections.reverseOrder());
List<String> list;
while (languageTokenizer.hasMoreTokens()) {
String language = languageTokenizer.nextToken().trim();
if (language.length() == 0) {
continue;
}
int semicolonIndex = language.indexOf(';');
Double qValue = Double.valueOf(1);
if (semicolonIndex > -1) {
int qIndex = language.indexOf("q=");
String qValueStr = language.substring(qIndex + 2);
try {
qValue = Double.valueOf(qValueStr.trim());
} catch (NumberFormatException nfe) {
FFDCFilter.processException(nfe,
"EncodingUtils.processAcceptLanguage", "215");
}
language = language.substring(0, semicolonIndex);
}
if (language.length() > 0) {
if ((qValue.doubleValue() > 0) && (language.charAt(0) != '*')) {
list = map.get(qValue);
if (null == list) {
list = new ArrayList<String>(1);
}
list.add(language);
map.put(qValue, list);
}
}
}
List<List<String>> rc = null;
if (!map.isEmpty()) {
rc = new ArrayList<List<String>>(map.values());
}
return rc;
} |
python | def scale_means(self, hs_dims=None, prune=False):
"""Return list of column and row scaled means for this slice.
If a row/col doesn't have numerical values, return None for the
corresponding dimension. If a slice only has 1D, return only the column
scaled mean (as numpy array). If both row and col scaled means are
present, return them as two numpy arrays inside of a list.
"""
scale_means = self._cube.scale_means(hs_dims, prune)
if self.ca_as_0th:
# If slice is used as 0th CA, then we need to observe the 1st dimension,
# because the 0th dimension is CA items, which is only used for slicing
# (and thus doesn't have numerical values, and also doesn't constitute any
# dimension of the actual crosstabs that will be created in this case).
scale_means = scale_means[0][-1]
if scale_means is None:
return [None]
return [scale_means[self._index]]
return scale_means[self._index] |
java | private void checkBuffer() {
// Doc count limit
if (docBuffer.size() >= bufferDocLimit) {
log.debug("=== Buffer check: Doc limit reached '{}'", docBuffer.size());
submitBuffer(false);
return;
}
// Size limit
if (bufferSize > bufferSizeLimit) {
log.debug("=== Buffer check: Size exceeded '{}'", bufferSize);
submitBuffer(false);
return;
}
// Time limit
long age = ((new Date().getTime()) - bufferOldest) / 1000;
if (age > bufferTimeLimit) {
log.debug("=== Buffer check: Age exceeded '{}s'", age);
submitBuffer(false);
return;
}
} |
java | public JsonObject putAndEncrypt(String name, List<?> value, String providerName) {
addValueEncryptionInfo(name, providerName, true);
return put(name, JsonArray.from(value));
} |
python | def update_extent_from_rectangle(self):
"""Update extent value in GUI based from the QgsMapTool rectangle.
.. note:: Delegates to update_extent()
"""
self.show()
self.canvas.unsetMapTool(self.rectangle_map_tool)
self.canvas.setMapTool(self.pan_tool)
rectangle = self.rectangle_map_tool.rectangle()
if rectangle:
self.bounding_box_group.setTitle(
self.tr('Bounding box from rectangle'))
extent = rectangle_geo_array(rectangle, self.iface.mapCanvas())
self.update_extent(extent) |
python | def load_package(package_dir, package=None, exclude=None, default_section=_DEFAULT_SECTION):
"""
从目录中载入配置文件
:param package_dir:
:param package:
:param exclude:
:param default_section:
:return:
"""
init_py = '__init__.py'
py_ext = '.py'
files = os.listdir(package_dir)
if init_py in files:
files = [f for f in files if f != init_py]
if package:
files.insert(0, package)
def init_package(item):
if str(item).endswith(py_ext):
item = item[:-3]
if package:
item = '{package}.{item}'.format(package=package, item=item)
elif _is_conf(item):
item = '{package_dir}/{item}'.format(package_dir=package_dir, item=item)
else:
item = package
return str(item)
logger.debug(files)
files = [init_package(f) for f in files]
if exclude:
files = [f for f in files if f not in exclude]
settings = load(files, default_section)
return merge(settings) |
python | def add_devs_custom_views(custom_view_name, dev_list, auth, url):
"""
function takes a list of devIDs from devices discovered in the HPE IMC platform and issues a
RESTFUL call to add the list of devices to a specific custom views from HPE IMC.
:param custom_view_name: str of the target custom view name
:param dev_list: list containing the devID of all devices to be contained in this custom view.
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:return: str of creation results ( "view " + name + "created successfully"
:rtype: str
>>> from pyhpeimc.auth import *
>>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
"""
view_id = get_custom_views(auth, url, name=custom_view_name)
if view_id is None:
print("View " + custom_view_name + " doesn't exist")
return view_id
view_id = get_custom_views(auth, url, name=custom_view_name)[0]['symbolId']
add_devs_custom_views_url = '/imcrs/plat/res/view/custom/' + str(view_id)
device_list = []
for dev in dev_list:
new_dev = {"id": dev}
device_list.append(new_dev)
payload = '''{"device" : ''' + json.dumps(device_list) + '''}'''
print(payload)
f_url = url + add_devs_custom_views_url
response = requests.put(f_url, data=payload, auth=auth, headers=HEADERS)
try:
if response.status_code == 204:
print('View ' + custom_view_name + ' : Devices Successfully Added')
return response.status_code
except requests.exceptions.RequestException as error:
return "Error:\n" + str(error) + ' get_custom_views: An Error has occured' |
python | def validate_regexp(ctx, param, value):
""" Validate and compile regular expression. """
if value:
try:
value = re.compile(value)
except ValueError:
raise click.BadParameter('invalid regular expression.')
return value |
python | def parse(self):
"""
Reads all lines from the current data source and yields each FileResult objects
"""
if self.data is None:
raise ValueError('No input data provided, unable to parse')
for line in self.data:
parts = line.strip().split()
try:
path = parts[0]
code = parts[1]
path, line, char = path.split(':')[:3]
if not re.match(POSITION, line):
continue
if not re.match(POSITION, char):
continue
if not re.match(ERROR_CODE, code):
continue
if not re.match(FILEPATH, path):
continue
# For parts mismatch
except IndexError:
continue
# For unpack mismatch
except ValueError:
continue
yield path, code, line, char, ' '.join(parts[2:]) |
python | def csv_to_transactions(handle, source_encoding="latin1",
date_format="%d-%m-%Y", thousand_sep=".", decimal_sep=","):
"""
Parses CSV data from stream and returns ``Transactions``.
Args:
index: The index of this row in the original CSV file. Used for
sorting ``Transaction``s by their order of appearance.
row: The row containing strings for [transfer_date, posted_date,
message, money_amount, money_total].
source_encoding: The encoding that will be used to decode strings
to UTF-8.
date_format: The format of dates in this row.
thousand_sep: The thousand separator in money amounts.
decimal_sep: The decimal separator in money amounts.
Returns:
A ``Transactions`` object.
"""
trans = Transactions()
rows = csv.reader(handle, delimiter=";", quotechar="\"")
for index, row in enumerate(rows):
trans.append(Parse.csv_row_to_transaction(index, row))
return trans |
java | private void obtainStyledAttributes(@Nullable final AttributeSet attributeSet,
@AttrRes final int defaultStyle,
@StyleRes final int defaultStyleResource) {
TypedArray typedArray = getContext()
.obtainStyledAttributes(attributeSet, R.styleable.ColorPalettePreference,
defaultStyle, defaultStyleResource);
try {
obtainColorPalette(typedArray);
obtainDialogPreviewSize(typedArray);
obtainDialogPreviewShape(typedArray);
obtainDialogPreviewBorderWidth(typedArray);
obtainDialogPreviewBorderColor(typedArray);
obtainDialogPreviewBackground(typedArray);
obtainNumberOfColumns(typedArray);
} finally {
typedArray.recycle();
}
} |
java | public static synchronized <T> T mock(Class<T> type) {
return DefaultMockCreator.mock(type, false, false, null, null, (Method[]) null);
} |
java | public static OutputStream quoteOutputStream(final OutputStream out
) throws IOException {
return new OutputStream() {
private byte[] data = new byte[1];
@Override
public void write(byte[] data, int off, int len) throws IOException {
quoteHtmlChars(out, data, off, len);
}
@Override
public void write(int b) throws IOException {
data[0] = (byte) b;
quoteHtmlChars(out, data, 0, 1);
}
@Override
public void flush() throws IOException {
out.flush();
}
@Override
public void close() throws IOException {
out.close();
}
};
} |
java | public DescribeAgentVersionsResult withAgentVersions(AgentVersion... agentVersions) {
if (this.agentVersions == null) {
setAgentVersions(new com.amazonaws.internal.SdkInternalList<AgentVersion>(agentVersions.length));
}
for (AgentVersion ele : agentVersions) {
this.agentVersions.add(ele);
}
return this;
} |
java | public List<Discussion> getCommitDiscussions(Object projectIdOrPath, Integer commitId) throws GitLabApiException {
Pager<Discussion> pager = getCommitDiscussionsPager(projectIdOrPath, commitId, getDefaultPerPage());
return (pager.all());
} |
python | def chugid_and_umask(runas, umask, group=None):
'''
Helper method for for subprocess.Popen to initialise uid/gid and umask
for the new process.
'''
set_runas = False
set_grp = False
current_user = getpass.getuser()
if runas and runas != current_user:
set_runas = True
runas_user = runas
else:
runas_user = current_user
current_grp = grp.getgrgid(pwd.getpwnam(getpass.getuser()).pw_gid).gr_name
if group and group != current_grp:
set_grp = True
runas_grp = group
else:
runas_grp = current_grp
if set_runas or set_grp:
chugid(runas_user, runas_grp)
if umask is not None:
os.umask(umask) |
java | public static Document create(@NonNull String text, @NonNull Map<AttributeType, ?> attributes) {
return DocumentFactory.getInstance().create(text, Hermes.defaultLanguage(), attributes);
} |
python | def get_direct_message(self):
""" :reference: https://developer.twitter.com/en/docs/direct-messages/sending-and-receiving/api-reference/get-message
:allowed_param:'id', 'full_text'
"""
return bind_api(
api=self,
path='/direct_messages/show/{id}.json',
payload_type='direct_message',
allowed_param=['id', 'full_text'],
require_auth=True
) |
java | public static <T> T[] removeEle(T[] array, T element) throws IllegalArgumentException {
return remove(array, indexOf(array, element));
} |
java | private File writeVersionInfoTemplateToTempFile() throws MojoExecutionException
{
try
{
final File versionInfoSrc = File.createTempFile( "msbuild-maven-plugin_" + MOJO_NAME, null );
InputStream is = getClass().getResourceAsStream( DEFAULT_VERSION_INFO_TEMPLATE );
FileOutputStream dest = new FileOutputStream( versionInfoSrc );
byte[] buffer = new byte[1024];
int read = -1;
while ( ( read = is.read( buffer ) ) != -1 )
{
dest.write( buffer, 0, read );
}
dest.close();
return versionInfoSrc;
}
catch ( IOException ioe )
{
String msg = "Failed to create temporary version file";
getLog().error( msg, ioe );
throw new MojoExecutionException( msg, ioe );
}
} |
java | public static String getCurrentAbsolutPathWithoutDotAndSlash()
{
final File currentAbsolutPath = new File(".");
return currentAbsolutPath.getAbsolutePath().substring(0,
currentAbsolutPath.getAbsolutePath().length() - 2);
} |
python | def run(self, packets):
"""Run automatically.
Positional arguments:
* packets -- list<dict>, list of packet dicts to be reassembled
"""
for packet in packets:
frag_check(packet, protocol=self.protocol)
info = Info(packet)
self.reassembly(info)
self._newflg = True |
python | def strip_context_items(self, a_string):
"""Strip PaloAlto-specific output.
PaloAlto will also put a configuration context:
[edit]
This method removes those lines.
"""
strings_to_strip = [r"\[edit.*\]"]
response_list = a_string.split(self.RESPONSE_RETURN)
last_line = response_list[-1]
for pattern in strings_to_strip:
if re.search(pattern, last_line):
return self.RESPONSE_RETURN.join(response_list[:-1])
return a_string |
java | @NonNull
private List<String> mapObsoleteElements(List<String> names) {
List<String> elementsToRemove = new ArrayList<>(names.size());
for (String name : names) {
if (name.startsWith("android")) continue;
elementsToRemove.add(name);
}
return elementsToRemove;
} |
java | public static String preprocess(String text)
{
return text.replaceAll("\\p{P}", " ").replaceAll("\\s+", " ").toLowerCase(Locale.getDefault());
} |
python | def get_handler():
"""Return the handler as a named tuple.
The named tuple attributes are 'host', 'port', 'signum'.
Return None when no handler has been registered.
"""
host, port, signum = _pdbhandler._registered()
if signum:
return Handler(host if host else DFLT_ADDRESS[0].encode(),
port if port else DFLT_ADDRESS[1], signum) |
python | def parse_file_type(file_type):
'''
:param file_type: file type string 'description (*.file_extension1;*.file_extension2)' as required by file filter in create_file_dialog
:return: (description, file extensions) tuple
'''
valid_file_filter = r'^([\w ]+)\((\*(?:\.(?:\w+|\*))*(?:;\*\.\w+)*)\)$'
match = re.search(valid_file_filter, file_type)
if match:
return match.group(1).rstrip(), match.group(2)
else:
raise ValueError('{0} is not a valid file filter'.format(file_type)) |
python | def reindex(self, indexers=None, method=None, tolerance=None, copy=True,
**indexers_kwargs):
"""Conform this object onto a new set of indexes, filling in
missing values with NaN.
Parameters
----------
indexers : dict. optional
Dictionary with keys given by dimension names and values given by
arrays of coordinates tick labels. Any mis-matched coordinate
values will be filled in with NaN, and any mis-matched dimension
names will simply be ignored.
One of indexers or indexers_kwargs must be provided.
method : {None, 'nearest', 'pad'/'ffill', 'backfill'/'bfill'}, optional
Method to use for filling index values in ``indexers`` not found in
this dataset:
* None (default): don't fill gaps
* pad / ffill: propagate last valid index value forward
* backfill / bfill: propagate next valid index value backward
* nearest: use nearest valid index value (requires pandas>=0.16)
tolerance : optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations must
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
Requires pandas>=0.17.
copy : bool, optional
If ``copy=True``, data in the return value is always copied. If
``copy=False`` and reindexing is unnecessary, or can be performed
with only slice operations, then the output may share memory with
the input. In either case, a new xarray object is always returned.
**indexers_kwarg : {dim: indexer, ...}, optional
Keyword arguments in the same form as ``indexers``.
One of indexers or indexers_kwargs must be provided.
Returns
-------
reindexed : Dataset
Another dataset, with this dataset's data but replaced coordinates.
See Also
--------
Dataset.reindex_like
align
pandas.Index.get_indexer
"""
indexers = utils.either_dict_or_kwargs(indexers, indexers_kwargs,
'reindex')
bad_dims = [d for d in indexers if d not in self.dims]
if bad_dims:
raise ValueError('invalid reindex dimensions: %s' % bad_dims)
variables, indexes = alignment.reindex_variables(
self.variables, self.sizes, self.indexes, indexers, method,
tolerance, copy=copy)
coord_names = set(self._coord_names)
coord_names.update(indexers)
return self._replace_with_new_dims(
variables, coord_names, indexes=indexes) |
python | def ctype(self):
"""Returns the name of the c_type from iso_c_binding to use when declaring
the output parameter for interaction with python ctypes.
"""
if self.dtype == "logical":
return "C_BOOL"
elif self.dtype == "complex":
#We don't actually know what the precision of the complex numbers is because
#it is defined by the developer when they construct the number with CMPLX()
#We just return double to be safe; it is a widening conversion, so there
#shouldn't be any issues.
return "C_DOUBLE_COMPLEX"
elif self.dtype == "character":
return "C_CHAR"
elif self.dtype in ["integer", "real"]:
if self.kind is None:
if self.dtype == "integer":
return "C_INT"
else:
return "C_FLOAT"
if self._kind_module is None and self.kind is not None:
self.dependency()
if self._kind_module is None and self.kind is not None:
raise ValueError("Can't find the c-type for {}".format(self.definition()))
elif self._kind_module is not None:
#We look up the parameter in the kind module to find out its
#precision etc.
import re
default = self._kind_module.members[self.kind].default
vals = default.split("(")[1].replace(")", "")
ints = list(map(int, re.split(",\s*", vals)))
if self.dtype == "integer" and len(ints) == 1:
if ints[0] <= 15:
return "C_SHORT"
elif ints[0] <= 31:
return "C_INT"
elif ints[0] <= 63:
return "C_LONG"
elif self.dtype == "real" and len(ints) == 2:
if ints[0] <= 24 and ints[1] < 127:
return "C_FLOAT"
elif ints[0] <= 53 and ints[1] < 1023:
return "C_DOUBLE" |
java | protected void skipPad() throws IOException {
if (bytesRead > 0) {
int extra = (int) (bytesRead % TarConstants.DATA_BLOCK);
if (extra > 0) {
long bs = 0;
while (bs < TarConstants.DATA_BLOCK - extra) {
long res = skip(TarConstants.DATA_BLOCK - extra - bs);
bs += res;
}
}
}
} |
python | def dump(self):
"""
Dump the database using the postgres custom format
"""
dumpfile = self.args.dumpfile
if not dumpfile:
db, env = self.get_db_args_env()
dumpfile = fileutils.timestamp_filename(
'omero-database-%s' % db['name'], 'pgdump')
log.info('Dumping database to %s', dumpfile)
if not self.args.dry_run:
self.pgdump('-Fc', '-f', dumpfile) |
java | public int getBlastWordSize() {
if (param.containsKey(WORD_SIZE)) {
return Integer.parseInt(getAlignmentOption(WORD_SIZE));
}
// return default word size value
try {
BlastProgramEnum programType = getBlastProgram();
switch (programType) {
case blastn:
return 11;
case megablast:
return 28;
case blastp:
case blastx:
case tblastn:
case tblastx:
return 3;
default:
throw new UnsupportedOperationException("Blast program " + programType.name() + " is not supported.");
}
} catch (IllegalArgumentException e) {
throw new IllegalArgumentException("Blast program " + getBlastProgram() + " is not supported.", e);
}
} |
python | def make_element(builder, tag, content):
"""Make an element with this tag and text content"""
builder.start(tag, {})
builder.data(content) # Must be UTF-8 encoded
builder.end(tag) |
java | public ProxySettings setServer(URL url)
{
if (url == null)
{
return this;
}
try
{
return setServer(url.toURI());
}
catch (URISyntaxException e)
{
throw new IllegalArgumentException(e);
}
} |
java | @Override
public void eUnset(int featureID) {
switch (featureID) {
case AfplibPackage.MMORG__OV_LID:
setOVLid(OV_LID_EDEFAULT);
return;
case AfplibPackage.MMORG__FLAGS:
setFlags(FLAGS_EDEFAULT);
return;
case AfplibPackage.MMORG__OV_LNAME:
setOVLname(OV_LNAME_EDEFAULT);
return;
}
super.eUnset(featureID);
} |
java | public synchronized void addProjection( String externalNodeKey,
String projectedNodeKey,
String alias,
SessionCache systemSession) {
Projection projection = new Projection(externalNodeKey, projectedNodeKey, alias);
storeProjection(projection, systemSession);
Snapshot current = this.snapshot.get();
Snapshot updated = current.withProjection(projection);
this.snapshot.compareAndSet(current, updated);
} |
python | def list_by_group(self, id_ugroup):
"""Search Administrative Permission by Group User by identifier.
:param id_ugroup: Identifier of the Group User. Integer value and greater than zero.
:return: Dictionary with the following structure:
::
{'perms': [{'ugrupo': < ugrupo_id >, 'permission': { 'function' < function >, 'id': < id > },
'id': < id >, 'escrita': < escrita >,
'leitura': < leitura >}, ... ] }
:raise InvalidParameterError: Group User is null and invalid.
:raise UGrupoNotFoundError: Group User not registered.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
"""
if id_ugroup is None:
raise InvalidParameterError(
u'The identifier of Group User is invalid or was not informed.')
url = 'aperms/group/' + str(id_ugroup) + '/'
code, xml = self.submit(None, 'GET', url)
return self.response(code, xml) |
python | def t_quotedvar_DOLLAR_OPEN_CURLY_BRACES(t):
r'\$\{'
if re.match(r'[A-Za-z_]', peek(t.lexer)):
t.lexer.begin('varname')
else:
t.lexer.begin('php')
return t |
python | def chmod_r(root: str, permission: int) -> None:
"""
Recursive ``chmod``.
Args:
root: directory to walk down
permission: e.g. ``e.g. stat.S_IWUSR``
"""
os.chmod(root, permission)
for dirpath, dirnames, filenames in os.walk(root):
for d in dirnames:
os.chmod(os.path.join(dirpath, d), permission)
for f in filenames:
os.chmod(os.path.join(dirpath, f), permission) |
java | public void fireIndexedPropertyChange(String propertyName, int index, Object oldValue, Object newValue) {
if (oldValue == null || newValue == null || !oldValue.equals(newValue)) {
firePropertyChange(new IndexedPropertyChangeEvent(source, propertyName, oldValue, newValue, index));
}
} |
python | def verify_jwt_in_request():
"""
Ensure that the requester has a valid access token. This does not check the
freshness of the access token. Raises an appropiate exception there is
no token or if the token is invalid.
"""
if request.method not in config.exempt_methods:
jwt_data = _decode_jwt_from_request(request_type='access')
ctx_stack.top.jwt = jwt_data
verify_token_claims(jwt_data)
_load_user(jwt_data[config.identity_claim_key]) |
python | def add_note(self, content):
"""Add a note to the project.
.. warning:: Requires Todoist premium.
:param content: The note content.
:type content: str
>>> from pytodoist import todoist
>>> user = todoist.login('[email protected]', 'password')
>>> project = user.get_project('PyTodoist')
>>> project.add_note('Remember to update to the latest version.')
"""
args = {
'project_id': self.id,
'content': content
}
_perform_command(self.owner, 'note_add', args) |
java | public static File[] toFiles(final URL[] urls) throws UncheckedIOException {
if (N.isNullOrEmpty(urls)) {
return new File[0];
}
final File[] files = new File[urls.length];
for (int i = 0; i < urls.length; i++) {
files[i] = toFile(urls[i]);
}
return files;
} |
python | def select_from_fv_by_seeds(fv, seeds, unique_cls):
"""
Tool to make simple feature functions take features from feature array by seeds.
:param fv: ndarray with lineariezed feature. It's shape is MxN, where M is number of image pixels and N is number
of features
:param seeds: ndarray with seeds. Does not to be linear.
:param unique_cls: number of used seeds clases. Like [1, 2]
:return: fv_selection, seeds_selection - selection from feature vector and selection from seeds
"""
logger.debug("seeds" + str(seeds))
# fvlin = fv.reshape(-1, int(fv.size/seeds.size))
expected_shape = [seeds.size, int(fv.size/seeds.size)]
if fv.shape[0] != expected_shape[0] or fv.shape[1] != expected_shape[1]:
raise AssertionError("Wrong shape of input feature vector array fv")
# sd = seeds.reshape(-1, 1)
selection = np.in1d(seeds, unique_cls)
fv_selection = fv[selection]
seeds_selection = seeds.flatten()[selection]
# sd = sd[]
return fv_selection, seeds_selection |
java | @Override
public void returnConnection(Connection conn) {
try {
if (conn instanceof MyConnectionInvocationHandler) {
DbcHelper.returnConnection(((MyConnectionInvocationHandler) conn).target);
} else {
DbcHelper.returnConnection(conn);
}
} catch (SQLException e) {
throw new DaoException(e);
}
} |
java | @Override
public List<CPDefinitionLink> findByCPD_T(long CPDefinitionId, String type,
int start, int end,
OrderByComparator<CPDefinitionLink> orderByComparator) {
return findByCPD_T(CPDefinitionId, type, start, end, orderByComparator,
true);
} |
python | def _assemble_regulate_activity(self, stmt):
"""Example: p(HGNC:MAP2K1) => act(p(HGNC:MAPK1))"""
act_obj = deepcopy(stmt.obj)
act_obj.activity = stmt._get_activity_condition()
# We set is_active to True here since the polarity is encoded
# in the edge (decreases/increases)
act_obj.activity.is_active = True
activates = isinstance(stmt, Activation)
relation = get_causal_edge(stmt, activates)
self._add_nodes_edges(stmt.subj, act_obj, relation, stmt.evidence) |
java | public void marshall(GetCoreDefinitionVersionRequest getCoreDefinitionVersionRequest, ProtocolMarshaller protocolMarshaller) {
if (getCoreDefinitionVersionRequest == null) {
throw new SdkClientException("Invalid argument passed to marshall(...)");
}
try {
protocolMarshaller.marshall(getCoreDefinitionVersionRequest.getCoreDefinitionId(), COREDEFINITIONID_BINDING);
protocolMarshaller.marshall(getCoreDefinitionVersionRequest.getCoreDefinitionVersionId(), COREDEFINITIONVERSIONID_BINDING);
} catch (Exception e) {
throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e);
}
} |
java | public static TransactionException hasNotAllowed(Thing thing, Attribute attribute) {
return create(HAS_INVALID.getMessage(thing.type().label(), attribute.type().label()));
} |
java | public void setResources(java.util.Collection<Resource> resources) {
if (resources == null) {
this.resources = null;
return;
}
this.resources = new java.util.ArrayList<Resource>(resources);
} |
python | def evaluate_expression(dbg, frame, expression, is_exec):
'''returns the result of the evaluated expression
@param is_exec: determines if we should do an exec or an eval
'''
if frame is None:
return
# Not using frame.f_globals because of https://sourceforge.net/tracker2/?func=detail&aid=2541355&group_id=85796&atid=577329
# (Names not resolved in generator expression in method)
# See message: http://mail.python.org/pipermail/python-list/2009-January/526522.html
updated_globals = {}
updated_globals.update(frame.f_globals)
updated_globals.update(frame.f_locals) # locals later because it has precedence over the actual globals
try:
expression = str(expression.replace('@LINE@', '\n'))
if is_exec:
try:
# try to make it an eval (if it is an eval we can print it, otherwise we'll exec it and
# it will have whatever the user actually did)
compiled = compile(expression, '<string>', 'eval')
except:
Exec(expression, updated_globals, frame.f_locals)
pydevd_save_locals.save_locals(frame)
else:
result = eval(compiled, updated_globals, frame.f_locals)
if result is not None: # Only print if it's not None (as python does)
sys.stdout.write('%s\n' % (result,))
return
else:
return eval_in_context(expression, updated_globals, frame.f_locals)
finally:
# Should not be kept alive if an exception happens and this frame is kept in the stack.
del updated_globals
del frame |
java | public PeriodDuration normalizedStandardDays() {
long totalSecs = period.getDays() * SECONDS_PER_DAY + duration.getSeconds();
int splitDays = Math.toIntExact(totalSecs / SECONDS_PER_DAY);
long splitSecs = totalSecs % SECONDS_PER_DAY;
if (splitDays == period.getDays() && splitSecs == duration.getSeconds()) {
return this;
}
return PeriodDuration.of(period.withDays(splitDays), duration.withSeconds(splitSecs));
} |
python | def create_config(kwargs=None, call=None):
'''
Creates a Linode Configuration Profile.
name
The name of the VM to create the config for.
linode_id
The ID of the Linode to create the configuration for.
root_disk_id
The Root Disk ID to be used for this config.
swap_disk_id
The Swap Disk ID to be used for this config.
data_disk_id
The Data Disk ID to be used for this config.
.. versionadded:: 2016.3.0
kernel_id
The ID of the kernel to use for this configuration profile.
'''
if call == 'action':
raise SaltCloudSystemExit(
'The create_config function must be called with -f or --function.'
)
if kwargs is None:
kwargs = {}
name = kwargs.get('name', None)
linode_id = kwargs.get('linode_id', None)
root_disk_id = kwargs.get('root_disk_id', None)
swap_disk_id = kwargs.get('swap_disk_id', None)
data_disk_id = kwargs.get('data_disk_id', None)
kernel_id = kwargs.get('kernel_id', None)
if kernel_id is None:
# 138 appears to always be the latest 64-bit kernel for Linux
kernel_id = 138
required_params = [name, linode_id, root_disk_id, swap_disk_id]
for item in required_params:
if item is None:
raise SaltCloudSystemExit(
'The create_config functions requires a \'name\', \'linode_id\', '
'\'root_disk_id\', and \'swap_disk_id\'.'
)
disklist = '{0},{1}'.format(root_disk_id, swap_disk_id)
if data_disk_id is not None:
disklist = '{0},{1},{2}'.format(root_disk_id, swap_disk_id, data_disk_id)
config_args = {'LinodeID': linode_id,
'KernelID': kernel_id,
'Label': name,
'DiskList': disklist
}
result = _query('linode', 'config.create', args=config_args)
return _clean_data(result) |
python | def joinMeiUyir(mei_char, uyir_char):
"""
This function join mei character and uyir character, and retuns as
compound uyirmei unicode character.
Inputs:
mei_char : It must be unicode tamil mei char.
uyir_char : It must be unicode tamil uyir char.
Written By : Arulalan.T
Date : 22.09.2014
"""
if not mei_char: return uyir_char
if not uyir_char: return mei_char
if not isinstance(mei_char, PYTHON3 and str or unicode):
raise ValueError(u"Passed input mei character '%s' must be unicode, not just string" % mei_char)
if not isinstance(uyir_char, PYTHON3 and str or unicode) and uyir_char != None:
raise ValueError(u"Passed input uyir character '%s' must be unicode, not just string" % uyir_char)
if mei_char not in grantha_mei_letters:
raise ValueError(u"Passed input character '%s' is not a tamil mei character" % mei_char)
if uyir_char not in uyir_letters:
raise ValueError(u"Passed input character '%s' is not a tamil uyir character" % uyir_char)
if uyir_char:
uyiridx = uyir_letters.index(uyir_char)
else:
return mei_char
meiidx = grantha_mei_letters.index(mei_char)
# calculate uyirmei index
uyirmeiidx = meiidx*12 + uyiridx
return grantha_uyirmei_letters[uyirmeiidx] |
python | def Dropout(x, params, rate=0.0, mode='train', rng=None, **kwargs):
"""Layer construction function for a dropout layer with given rate."""
del params, kwargs
if rng is None:
msg = ('Dropout layer requires apply_fun to be called with a rng keyword '
'argument. That is, instead of `Dropout(params, inputs)`, call '
'it like `Dropout(params, inputs, rng=key)`.')
raise ValueError(msg)
if rate >= 1.0:
raise ValueError('Dropout rate (%f) must be lower than 1.' % rate)
if mode == 'train' and rate > 0.0:
keep = backend.random.bernoulli(rng, 1.0 - rate, x.shape)
return np.where(keep, x / (1.0 - rate), 0)
else:
return x |
python | def to_er7(self, encoding_chars=None, trailing_children=False):
"""
Return the ER7-encoded string
:type encoding_chars: ``dict``
:param encoding_chars: a dictionary containing the encoding chars or None to use the default
(see :func:`get_default_encoding <hl7apy.get_default_encoding_chars>`)
:type trailing_children: ``bool``
:param trailing_children: if ``True``, trailing children will be added even if their value is None
:return: the ER7-encoded string
>>> msh_9 = Field("MSH_9")
>>> msh_9.value = "ADT^A01^ADT_A01"
>>> print(msh_9.to_er7())
ADT^A01^ADT_A01
"""
if encoding_chars is None:
encoding_chars = self.encoding_chars
if self.is_named('MSH_1'):
try:
return self.msh_1_1.children[0].value.value
except IndexError:
return self.msh_1_1.children[0].value
elif self.is_named('MSH_2'):
try:
return self.msh_2_1.children[0].value.value
except IndexError:
return self.msh_2_1.children[0].value
return super(Field, self).to_er7(encoding_chars, trailing_children) |
python | def repo(
state, host, name, baseurl,
present=True, description=None, enabled=True, gpgcheck=True, gpgkey=None,
):
'''
Add/remove/update yum repositories.
+ name: filename for the repo (in ``/etc/yum/repos.d/``)
+ baseurl: the baseurl of the repo
+ present: whether the ``.repo`` file should be present
+ description: optional verbose description
+ gpgcheck: whether set ``gpgcheck=1``
+ gpgkey: the URL to the gpg key for this repo
'''
# Description defaults to name
description = description or name
filename = '/etc/yum.repos.d/{0}.repo'.format(name)
# If we don't want the repo, just remove any existing file
if not present:
yield files.file(state, host, filename, present=False)
return
# Build the repo file from string
repo_lines = [
'[{0}]'.format(name),
'name={0}'.format(description),
'baseurl={0}'.format(baseurl),
'enabled={0}'.format(1 if enabled else 0),
'gpgcheck={0}'.format(1 if gpgcheck else 0),
]
if gpgkey:
repo_lines.append('gpgkey={0}'.format(gpgkey))
repo_lines.append('')
repo = '\n'.join(repo_lines)
repo = StringIO(repo)
# Ensure this is the file on the server
yield files.put(state, host, repo, filename) |
java | public void splitField(String sourceField, String targetField, String splitString, String index) {
TransformationStep step = new TransformationStep();
step.setTargetField(targetField);
step.setSourceFields(sourceField);
step.setOperationParameter(TransformationConstants.SPLIT_PARAM, splitString);
step.setOperationParameter(TransformationConstants.INDEX_PARAM, index);
step.setOperationName("split");
steps.add(step);
} |
java | public static Resource getOrCreateChild(Resource resource, String relPath, String primaryTypes)
throws RepositoryException {
Resource child = null;
if (resource != null) {
ResourceResolver resolver = resource.getResourceResolver();
String path = resource.getPath();
while (relPath.startsWith("/")) {
relPath = relPath.substring(1);
}
if (StringUtils.isNotBlank(relPath)) {
path += "/" + relPath;
}
child = getOrCreateResource(resolver, path, primaryTypes);
}
return child;
} |
python | def set_artist(self, artist):
"""Sets song's artist
:param artist: artist
"""
self._set_attr(TPE1(encoding=3, text=artist.decode('utf-8'))) |
python | def on_train_begin(self, **kwargs: Any) -> None:
"Prepare MLflow experiment and log params"
self.client = mlflow.tracking.MlflowClient(self.uri)
exp = self.client.get_experiment_by_name(self.exp_name)
self.exp_id = self.client.create_experiment(self.exp_name) if exp is None else exp.experiment_id
run = self.client.create_run(experiment_id=self.exp_id)
self.run = run.info.run_uuid
for k,v in self.params.items():
self.client.log_param(run_id=self.run, key=k, value=v) |
python | def matchTypes(accept_types, have_types):
"""Given the result of parsing an Accept: header, and the
available MIME types, return the acceptable types with their
quality markdowns.
For example:
>>> acceptable = parseAcceptHeader('text/html, text/plain; q=0.5')
>>> matchTypes(acceptable, ['text/plain', 'text/html', 'image/jpeg'])
[('text/html', 1.0), ('text/plain', 0.5)]
Type signature: ([(str, str, float)], [str]) -> [(str, float)]
"""
if not accept_types:
# Accept all of them
default = 1
else:
default = 0
match_main = {}
match_sub = {}
for (main, sub, q) in accept_types:
if main == '*':
default = max(default, q)
continue
elif sub == '*':
match_main[main] = max(match_main.get(main, 0), q)
else:
match_sub[(main, sub)] = max(match_sub.get((main, sub), 0), q)
accepted_list = []
order_maintainer = 0
for mtype in have_types:
main, sub = mtype.split('/')
if (main, sub) in match_sub:
q = match_sub[(main, sub)]
else:
q = match_main.get(main, default)
if q:
accepted_list.append((1 - q, order_maintainer, q, mtype))
order_maintainer += 1
accepted_list.sort()
return [(mtype, q) for (_, _, q, mtype) in accepted_list] |
java | private ListenableFuture<ZkWorker> addWorker(final Worker worker)
{
log.info("Worker[%s] reportin' for duty!", worker.getHost());
try {
cancelWorkerCleanup(worker.getHost());
final String workerStatusPath = JOINER.join(indexerZkConfig.getStatusPath(), worker.getHost());
final PathChildrenCache statusCache = workerStatusPathChildrenCacheFactory.make(cf, workerStatusPath);
final SettableFuture<ZkWorker> retVal = SettableFuture.create();
final ZkWorker zkWorker = new ZkWorker(
worker,
statusCache,
jsonMapper
);
// Add status listener to the watcher for status changes
zkWorker.addListener(
new PathChildrenCacheListener()
{
@Override
public void childEvent(CuratorFramework client, PathChildrenCacheEvent event)
{
final String taskId;
final RemoteTaskRunnerWorkItem taskRunnerWorkItem;
synchronized (statusLock) {
try {
switch (event.getType()) {
case CHILD_ADDED:
case CHILD_UPDATED:
taskId = ZKPaths.getNodeFromPath(event.getData().getPath());
final TaskAnnouncement announcement = jsonMapper.readValue(
event.getData().getData(), TaskAnnouncement.class
);
log.info(
"Worker[%s] wrote %s status for task [%s] on [%s]",
zkWorker.getWorker().getHost(),
announcement.getTaskStatus().getStatusCode(),
taskId,
announcement.getTaskLocation()
);
// Synchronizing state with ZK
statusLock.notifyAll();
final RemoteTaskRunnerWorkItem tmp;
if ((tmp = runningTasks.get(taskId)) != null) {
taskRunnerWorkItem = tmp;
} else {
final RemoteTaskRunnerWorkItem newTaskRunnerWorkItem = new RemoteTaskRunnerWorkItem(
taskId,
announcement.getTaskType(),
zkWorker.getWorker(),
TaskLocation.unknown(),
announcement.getTaskDataSource()
);
final RemoteTaskRunnerWorkItem existingItem = runningTasks.putIfAbsent(
taskId,
newTaskRunnerWorkItem
);
if (existingItem == null) {
log.warn(
"Worker[%s] announced a status for a task I didn't know about, adding to runningTasks: %s",
zkWorker.getWorker().getHost(),
taskId
);
taskRunnerWorkItem = newTaskRunnerWorkItem;
} else {
taskRunnerWorkItem = existingItem;
}
}
if (!announcement.getTaskLocation().equals(taskRunnerWorkItem.getLocation())) {
taskRunnerWorkItem.setLocation(announcement.getTaskLocation());
TaskRunnerUtils.notifyLocationChanged(listeners, taskId, announcement.getTaskLocation());
}
if (announcement.getTaskStatus().isComplete()) {
taskComplete(taskRunnerWorkItem, zkWorker, announcement.getTaskStatus());
runPendingTasks();
}
break;
case CHILD_REMOVED:
taskId = ZKPaths.getNodeFromPath(event.getData().getPath());
taskRunnerWorkItem = runningTasks.remove(taskId);
if (taskRunnerWorkItem != null) {
log.info("Task[%s] just disappeared!", taskId);
taskRunnerWorkItem.setResult(TaskStatus.failure(taskId));
TaskRunnerUtils.notifyStatusChanged(listeners, taskId, TaskStatus.failure(taskId));
} else {
log.info("Task[%s] went bye bye.", taskId);
}
break;
case INITIALIZED:
if (zkWorkers.putIfAbsent(worker.getHost(), zkWorker) == null) {
retVal.set(zkWorker);
} else {
final String message = StringUtils.format(
"WTF?! Tried to add already-existing worker[%s]",
worker.getHost()
);
log.makeAlert(message)
.addData("workerHost", worker.getHost())
.addData("workerIp", worker.getIp())
.emit();
retVal.setException(new IllegalStateException(message));
}
runPendingTasks();
break;
case CONNECTION_SUSPENDED:
case CONNECTION_RECONNECTED:
case CONNECTION_LOST:
// do nothing
}
}
catch (Exception e) {
log.makeAlert(e, "Failed to handle new worker status")
.addData("worker", zkWorker.getWorker().getHost())
.addData("znode", event.getData().getPath())
.emit();
}
}
}
}
);
zkWorker.start();
return retVal;
}
catch (Exception e) {
throw new RuntimeException(e);
}
} |
java | public String getColumnClassName(int column) throws SQLException {
checkColumn(column);
Type type = resultMetaData.columnTypes[--column];
return type.getJDBCClassName();
} |
java | public void resetColumnLabel(final String schema) {
Map<String, Integer> labelAndIndexMap = new HashMap<>(1, 1);
labelAndIndexMap.put(schema, 1);
resetLabelAndIndexMap(labelAndIndexMap);
} |
java | public void set(Versioned<E> element) {
if(element == null)
throw new NullPointerException("cannot set a null element");
if(_lastCall != LastCall.NEXT && _lastCall != LastCall.PREVIOUS)
throw new IllegalStateException("neither next() nor previous() has been called");
_stack.setById(_lastId, element);
afterSet(element.getValue());
} |
java | public Stats getCollectionStats(String collectionId, Date date) throws FlickrException {
return getStats(METHOD_GET_COLLECTION_STATS, "collection_id", collectionId, date);
} |
python | def check(source,
filename='<string>',
report_level=docutils.utils.Reporter.INFO_LEVEL,
ignore=None,
debug=False):
"""Yield errors.
Use lower report_level for noisier error output.
Each yielded error is a tuple of the form:
(line_number, message)
Line numbers are indexed at 1 and are with respect to the full RST file.
Each code block is checked asynchronously in a subprocess.
Note that this function mutates state by calling the ``docutils``
``register_*()`` functions.
"""
# Do this at call time rather than import time to avoid unnecessarily
# mutating state.
register_code_directive()
ignore_sphinx()
ignore = ignore or {}
try:
ignore.setdefault('languages', []).extend(
find_ignored_languages(source)
)
except Error as error:
yield (error.line_number, '{}'.format(error))
writer = CheckWriter(source, filename, ignore=ignore)
string_io = io.StringIO()
# This is a hack to avoid false positive from docutils (#23). docutils
# mistakes BOMs for actual visible letters. This results in the "underline
# too short" warning firing.
source = strip_byte_order_mark(source)
try:
docutils.core.publish_string(
source, writer=writer,
source_path=filename,
settings_overrides={'halt_level': report_level,
'report_level': report_level,
'warning_stream': string_io})
except docutils.utils.SystemMessage:
pass
except AttributeError:
# Sphinx will sometimes throw an exception trying to access
# "self.state.document.settings.env". Ignore this for now until we
# figure out a better approach.
if debug:
raise
for checker in writer.checkers:
for error in checker():
yield error
rst_errors = string_io.getvalue().strip()
if rst_errors:
for message in rst_errors.splitlines():
try:
ignore_regex = ignore.get('messages', '')
if ignore_regex and re.search(ignore_regex, message):
continue
yield parse_gcc_style_error_message(message,
filename=filename,
has_column=False)
except ValueError:
continue |
java | public static String encodeAsXMLName(String s) {
StringBuilder sb = new StringBuilder("_");
for (byte b : s.getBytes(Charset.forName("UTF-8"))) {
sb.append(Integer.toHexString((b >>> 4) & 0xF));
sb.append(Integer.toHexString(b & 0xF));
}
return sb.toString();
} |
java | public boolean isAnyPermissionPermanentlyDenied() {
boolean hasPermanentlyDeniedAnyPermission = false;
for (PermissionDeniedResponse deniedResponse : deniedPermissionResponses) {
if (deniedResponse.isPermanentlyDenied()) {
hasPermanentlyDeniedAnyPermission = true;
break;
}
}
return hasPermanentlyDeniedAnyPermission;
} |
python | def create_index(self, columns=None, optlevel=None, kind=None):
"""
Create a pytables index on the specified columns
note: cannot index Time64Col() or ComplexCol currently;
PyTables must be >= 3.0
Parameters
----------
columns : False (don't create an index), True (create all columns
index), None or list_like (the indexers to index)
optlevel: optimization level (defaults to 6)
kind : kind of index (defaults to 'medium')
Exceptions
----------
raises if the node is not a table
"""
if not self.infer_axes():
return
if columns is False:
return
# index all indexables and data_columns
if columns is None or columns is True:
columns = [a.cname for a in self.axes if a.is_data_indexable]
if not isinstance(columns, (tuple, list)):
columns = [columns]
kw = dict()
if optlevel is not None:
kw['optlevel'] = optlevel
if kind is not None:
kw['kind'] = kind
table = self.table
for c in columns:
v = getattr(table.cols, c, None)
if v is not None:
# remove the index if the kind/optlevel have changed
if v.is_indexed:
index = v.index
cur_optlevel = index.optlevel
cur_kind = index.kind
if kind is not None and cur_kind != kind:
v.remove_index()
else:
kw['kind'] = cur_kind
if optlevel is not None and cur_optlevel != optlevel:
v.remove_index()
else:
kw['optlevel'] = cur_optlevel
# create the index
if not v.is_indexed:
if v.type.startswith('complex'):
raise TypeError(
'Columns containing complex values can be stored '
'but cannot'
' be indexed when using table format. Either use '
'fixed format, set index=False, or do not include '
'the columns containing complex values to '
'data_columns when initializing the table.')
v.create_index(**kw) |
java | public static String getNode(Long nodeId) {
// 根据nodeId构造path
return MessageFormat.format(ArbitrateConstants.NODE_NID_FORMAT, String.valueOf(nodeId));
} |
python | def linguist_field_names(self):
"""
Returns linguist field names (example: "title" and "title_fr").
"""
return list(self.model._linguist.fields) + list(
utils.get_language_fields(self.model._linguist.fields)
) |
java | protected String handleAddException(IOException e, ItemData item) throws RepositoryException,
InvalidItemStateException
{
StringBuilder message = new StringBuilder("[");
message.append(containerName).append("] ADD ").append(item.isNode() ? "NODE. " : "PROPERTY. ");
String errMessage = e.getMessage();
String itemInfo =
item.getQPath().getAsString() + ", ID: " + item.getIdentifier() + ", ParentID: " + item.getParentIdentifier()
+ (errMessage != null ? ". Cause >>>> " + errMessage : "");
// try detect integrity violation
RepositoryException ownException = null;
try
{
NodeData parent = (NodeData)conn.getItemData(item.getParentIdentifier());
if (parent != null)
{
// have a parent
try
{
ItemData me = conn.getItemData(item.getIdentifier());
if (me != null)
{
// item already exists
message.append("Item already exists in storage: ").append(itemInfo);
ownException = new ItemExistsException(message.toString(), e);
throw ownException;
}
me =
conn.getItemData(parent, new QPathEntry(item.getQPath().getName(), item.getQPath().getIndex()),
ItemType.getItemType(item));
if (me != null)
{
message.append("Item already exists in storage: ").append(itemInfo);
ownException = new ItemExistsException(message.toString(), e);
throw ownException;
}
}
catch (Exception ep)
{
// item not found or other things but error of item reading
if (ownException != null)
throw ownException;
}
message.append("Error of item add. ").append(itemInfo);
ownException = new RepositoryException(message.toString(), e);
throw ownException;
}
}
catch (Exception ep)
{
// no parent or error access it
if (ownException != null)
throw ownException;
}
message.append("Error of item add. ").append(itemInfo);
throw new JCRInvalidItemStateException(message.toString(), item.getIdentifier(), ItemState.ADDED, e);
} |
python | def getLinearityFunction(expTimes, imgs, mxIntensity=65535, min_ascent=0.001,
):
'''
returns offset, ascent
of image(expTime) = offset + ascent*expTime
'''
# TODO: calculate [min_ascent] from noise function
# instead of having it as variable
ascent, offset, error = linRegressUsingMasked2dArrays(
expTimes, imgs, imgs > mxIntensity)
ascent[np.isnan(ascent)] = 0
# remove low frequent noise:
if min_ascent > 0:
i = ascent < min_ascent
offset[i] += (0.5 * (np.min(expTimes) + np.max(expTimes))) * ascent[i]
ascent[i] = 0
return offset, ascent, error |
java | public static String requireNotEmpty(final String str) throws NullPointerException, IllegalArgumentException {
if (str.length() == 0) {
throw new IllegalArgumentException();
}
return str;
} |
java | @Override
public byte[] readBuffer(final ChannelBuffer buffer) {
int len = getLength();
byte[] matched = new byte[len];
buffer.readBytes(matched);
return matched;
} |
python | def _cluster(bam_file, ma_file, out_dir, reference, annotation_file=None):
"""
Connect to seqcluster to run cluster with python directly
"""
seqcluster = op.join(get_bcbio_bin(), "seqcluster")
# cl = ["cluster", "-o", out_dir, "-m", ma_file, "-a", bam_file, "-r", reference]
if annotation_file:
annotation_file = "-g " + annotation_file
else:
annotation_file = ""
if not file_exists(op.join(out_dir, "counts.tsv")):
cmd = ("{seqcluster} cluster -o {out_dir} -m {ma_file} -a {bam_file} -r {reference} {annotation_file}")
do.run(cmd.format(**locals()), "Running seqcluster.")
counts = op.join(out_dir, "counts.tsv")
stats = op.join(out_dir, "read_stats.tsv")
json = op.join(out_dir, "seqcluster.json")
return {'out_dir': out_dir, 'count_file': counts, 'stat_file': stats, 'json': json} |
python | def writeChunk(self, stream, filename, chunkIdx=None):
"""
Streams an uploaded chunk to a file.
:param stream: the binary stream that contains the file.
:param filename: the name of the file.
:param chunkIdx: optional chunk index (for writing to a tmp dir)
:return: no of bytes written or -1 if there was an error.
"""
import io
more = True
outputFileName = filename if chunkIdx is None else filename + '.' + str(chunkIdx)
outputDir = self._uploadDir if chunkIdx is None else self._tmpDir
chunkFilePath = os.path.join(outputDir, outputFileName)
if os.path.exists(chunkFilePath) and os.path.isfile(chunkFilePath):
logger.error('Uploaded file already exists: ' + chunkFilePath)
return -1
else:
chunkFile = open(chunkFilePath, 'xb')
count = 0
while more:
chunk = stream.read(io.DEFAULT_BUFFER_SIZE)
chunkLen = len(chunk)
count += chunkLen
if chunkLen == 0:
more = False
else:
chunkFile.write(chunk)
return count |
python | def make_motif34lib():
'''
This function generates the motif34lib.mat library required for all
other motif computations. Not to be called externally.
'''
from scipy import io
import os
def motif3generate():
n = 0
M = np.zeros((54, 6), dtype=bool) # isomorphs
# canonical labels (predecssors of IDs)
CL = np.zeros((54, 6), dtype=np.uint8)
cl = np.zeros((6,), dtype=np.uint8)
for i in range(2**6): # loop through all subgraphs
m = '{0:b}'.format(i)
m = str().zfill(6 - len(m)) + m
G = np.array(((0, m[2], m[4]), (m[0], 0, m[5]),
(m[1], m[3], 0)), dtype=int)
ko = np.sum(G, axis=1)
ki = np.sum(G, axis=0)
if np.all(ko + ki): # if subgraph weakly connected
u = np.array((ko, ki)).T
cl.flat = u[np.lexsort((ki, ko))]
CL[n, :] = cl # assign motif label to isomorph
M[n, :] = np.array((G.T.flat[1:4], G.T.flat[5:8])).flat
n += 1
# convert CLs into motif IDs
_, ID = np.unique(
CL.view(CL.dtype.descr * CL.shape[1]), return_inverse=True)
ID += 1
# convert IDs into sporns & kotter classification
id_mika = (1, 3, 4, 6, 7, 8, 11)
id_olaf = (-3, -6, -1, -11, -4, -7, -8)
for mika, olaf in zip(id_mika, id_olaf):
ID[ID == mika] = olaf
ID = np.abs(ID)
ix = np.argsort(ID)
ID = ID[ix] # sort IDs
M = M[ix, :] # sort isomorphs
N = np.squeeze(np.sum(M, axis=1)) # number of edges
Mn = np.array(np.sum(np.tile(np.power(10, np.arange(5, -1, -1)),
(M.shape[0], 1)) * M, axis=1), dtype=np.uint32)
return M, Mn, ID, N
def motif4generate():
n = 0
M = np.zeros((3834, 12), dtype=bool) # isomorphs
CL = np.zeros((3834, 16), dtype=np.uint8) # canonical labels
cl = np.zeros((16,), dtype=np.uint8)
for i in range(2**12): # loop through all subgraphs
m = '{0:b}'.format(i)
m = str().zfill(12 - len(m)) + m
G = np.array(((0, m[3], m[6], m[9]), (m[0], 0, m[7], m[10]),
(m[1], m[4], 0, m[11]), (m[2], m[5], m[8], 0)), dtype=int)
Gs = G + G.T
v = Gs[0, :]
for j in range(2):
v = np.any(Gs[v != 0, :], axis=0) + v
if np.all(v): # if subgraph weakly connected
G2 = np.dot(G, G) != 0
ko = np.sum(G, axis=1)
ki = np.sum(G, axis=0)
ko2 = np.sum(G2, axis=1)
ki2 = np.sum(G2, axis=0)
u = np.array((ki, ko, ki2, ko2)).T
cl.flat = u[np.lexsort((ko2, ki2, ko, ki))]
CL[n, :] = cl # assign motif label to isomorph
M[n, :] = np.array((G.T.flat[1:5], G.T.flat[6:10],
G.T.flat[11:15])).flat
n += 1
# convert CLs into motif IDs
_, ID = np.unique(
CL.view(CL.dtype.descr * CL.shape[1]), return_inverse=True)
ID += 1
ix = np.argsort(ID)
ID = ID[ix] # sort IDs
M = M[ix, :] # sort isomorphs
N = np.sum(M, axis=1) # number of edges
Mn = np.array(np.sum(np.tile(np.power(10, np.arange(11, -1, -1)),
(M.shape[0], 1)) * M, axis=1), dtype=np.uint64)
return M, Mn, ID, N
dir = os.path.dirname(__file__)
fname = os.path.join(dir, motiflib)
if os.path.exists(fname):
print("motif34lib already exists")
return
m3, m3n, id3, n3 = motif3generate()
m4, m4n, id4, n4 = motif4generate()
io.savemat(fname, mdict={'m3': m3, 'm3n': m3n, 'id3': id3, 'n3': n3,
'm4': m4, 'm4n': m4n, 'id4': id4, 'n4': n4}) |
java | public void start(Xid xid, int flags) throws XAException
{
if (trace)
log.tracef("start(%s, %s)", xid, flags);
if (currentXid != null && flags == XAResource.TMNOFLAGS)
{
throw new LocalXAException(bundle.tryingStartNewTxWhenOldNotComplete(
currentXid, xid, flags), XAException.XAER_PROTO);
}
if (currentXid == null && flags != XAResource.TMNOFLAGS)
{
throw new LocalXAException(bundle.tryingStartNewTxWithWrongFlags(xid, flags), XAException.XAER_PROTO);
}
if (currentXid == null)
{
try
{
cl.getManagedConnection().getLocalTransaction().begin();
}
catch (ResourceException re)
{
throw new LocalXAException(bundle.errorTryingStartLocalTx(), XAException.XAER_RMERR, re);
}
catch (Throwable t)
{
throw new LocalXAException(bundle.throwableTryingStartLocalTx(), XAException.XAER_RMERR, t);
}
currentXid = xid;
}
} |
java | public void continueIfExecutionDoesNotAffectNextOperation(Callback<PvmExecutionImpl, Void> dispatching,
Callback<PvmExecutionImpl, Void> continuation,
PvmExecutionImpl execution) {
String lastActivityId = execution.getActivityId();
String lastActivityInstanceId = getActivityInstanceId(execution);
dispatching.callback(execution);
execution = execution.getReplacedBy() != null ? execution.getReplacedBy() : execution;
String currentActivityInstanceId = getActivityInstanceId(execution);
String currentActivityId = execution.getActivityId();
//if execution was canceled or was changed during the dispatch we should not execute the next operation
//since another atomic operation was executed during the dispatching
if (!execution.isCanceled() && isOnSameActivity(lastActivityInstanceId, lastActivityId, currentActivityInstanceId, currentActivityId)) {
continuation.callback(execution);
}
} |
python | def _getPublicSignupInfo(siteStore):
"""
Get information about public web-based signup mechanisms.
@param siteStore: a store with some signups installed on it (as indicated
by _SignupTracker instances).
@return: a generator which yields 2-tuples of (prompt, url) where 'prompt'
is unicode briefly describing the signup mechanism (e.g. "Sign Up"), and
'url' is a (unicode) local URL linking to a page where an anonymous user
can access it.
"""
# Note the underscore; this _should_ be a public API but it is currently an
# unfortunate hack; there should be a different powerup interface that
# requires prompt and prefixURL attributes rather than _SignupTracker.
# -glyph
for tr in siteStore.query(_SignupTracker):
si = tr.signupItem
p = getattr(si, 'prompt', None)
u = getattr(si, 'prefixURL', None)
if p is not None and u is not None:
yield (p, u'/'+u) |
java | private boolean prepareListData() {
Set<String> headers = new HashSet<>();
listDataHeader = new ArrayList<String>();
//category, content
listDataChild = new HashMap<String, List<String>>();
if (sampleFactory==null || additionActivitybasedSamples==null) {
//getActivity().getSupportFragmentManager().popBackStack();
return false;
}
//had this throw an NPE once after device rotation and a back button press.
for (int a = 0; a < sampleFactory.count(); a++) {
final BaseSampleFragment f = sampleFactory.getSample(a);
titleSampleMap.put(f.getSampleTitle(), f);
String clz = f.getClass().getCanonicalName();
String[] bits=clz.split("\\.");
String group = bits[bits.length-2];
group=capitialize(group);
headers.add(group);
if (!listDataChild.containsKey(group)) {
listDataChild.put(group, new ArrayList<String>());
}
listDataChild.get(group).add(f.getSampleTitle());
}
if (!additionActivitybasedSamples.isEmpty()) {
listDataHeader.add("Activities");
listDataChild.put("Activities", new ArrayList<String>());
for (int a = 0; a < additionActivitybasedSamples.size(); a++) {
listDataChild.get("Activities").add(additionActivitybasedSamples.get(a).getActivityTitle());
titleSampleMap.put(additionActivitybasedSamples.get(a).getActivityTitle(), additionActivitybasedSamples.get(a));
}
}
listDataHeader.addAll(headers);
return true;
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.