language
stringclasses 2
values | func_code_string
stringlengths 63
466k
|
---|---|
python | def add_var(self, var):
""" Adds a variable to the model.
"""
if var.name in [v.name for v in self.vars]:
logger.error("Variable set named '%s' already exists." % var.name)
return
var.i1 = self.var_N
var.iN = self.var_N + var.N - 1
self.vars.append(var) |
python | def _operate(self, other, operation, inplace=True):
"""
Gives the CanonicalDistribution operation (product or divide) with
the other factor.
The product of two canonical factors over the same scope
X is simply:
C(K1, h1, g1) * C(K2, h2, g2) = C(K1+K2, h1+h2, g1+g2)
The division of canonical forms is defined analogously:
C(K1, h1, g1) / C(K2, h2, g2) = C(K1-K2, h1-h2, g1- g2)
When we have two canonical factors over different scopes X and Y,
we simply extend the scope of both to make their scopes match and
then perform the operation of the above equation. The extension of
the scope is performed by simply adding zero entries to both the K
matrices and the h vectors.
Parameters
----------
other: CanonicalFactor
The CanonicalDistribution to be multiplied.
operation: String
'product' for multiplication operation and
'divide' for division operation.
Returns
-------
CanonicalDistribution or None:
if inplace=True (default) returns None
if inplace=False returns a new CanonicalDistribution instance.
Example
-------
>>> import numpy as np
>>> from pgmpy.factors.continuous import CanonicalDistribution
>>> phi1 = CanonicalDistribution(['x1', 'x2', 'x3'],
np.array([[1, -1, 0], [-1, 4, -2], [0, -2, 4]]),
np.array([[1], [4], [-1]]), -2)
>>> phi2 = CanonicalDistribution(['x1', 'x2'], np.array([[3, -2], [-2, 4]]),
np.array([[5], [-1]]), 1)
>>> phi3 = phi1 * phi2
>>> phi3.K
array([[ 4., -3., 0.],
[-3., 8., -2.],
[ 0., -2., 4.]])
>>> phi3.h
array([ 6., 3., -1.])
>>> phi3.g
-1
>>> phi4 = phi1 / phi2
>>> phi4.K
array([[-2., 1., 0.],
[ 1., 0., -2.],
[ 0., -2., 4.]])
>>> phi4.h
array([-4., 5., -1.])
>>> phi4.g
-3
"""
if not isinstance(other, CanonicalDistribution):
raise TypeError(
"CanonicalDistribution object can only be multiplied or divided "
"with an another CanonicalDistribution object. Got {other_type}, "
"expected CanonicalDistribution.".format(other_type=type(other)))
phi = self if inplace else self.copy()
all_vars = self.variables + [var for var in other.variables if var not in self.variables]
no_of_var = len(all_vars)
self_var_index = [all_vars.index(var) for var in self.variables]
other_var_index = [all_vars.index(var) for var in other.variables]
def _extend_K_scope(K, index):
ext_K = np.zeros([no_of_var, no_of_var])
ext_K[np.ix_(index, index)] = K
return ext_K
def _extend_h_scope(h, index):
ext_h = np.zeros(no_of_var).reshape(no_of_var, 1)
ext_h[index] = h
return ext_h
phi.variables = all_vars
if operation == 'product':
phi.K = _extend_K_scope(self.K, self_var_index) + _extend_K_scope(other.K, other_var_index)
phi.h = _extend_h_scope(self.h, self_var_index) + _extend_h_scope(other.h, other_var_index)
phi.g = self.g + other.g
else:
phi.K = _extend_K_scope(self.K, self_var_index) - _extend_K_scope(other.K, other_var_index)
phi.h = _extend_h_scope(self.h, self_var_index) - _extend_h_scope(other.h, other_var_index)
phi.g = self.g - other.g
if not inplace:
return phi |
python | def update_counts(self, current):
"""
updates counts for the class instance based on the current dictionary
counts
args:
-----
current: current dictionary counts
"""
for item in current:
try:
self.counts[item] += 1
except KeyError:
self.counts[item] = 1 |
java | public void fillContent(CmsImageInfoBean imageInfo, CmsJSONMap imageAttributes, boolean initialFill) {
for (Entry<Attribute, I_CmsFormWidget> entry : m_fields.entrySet()) {
String val = imageAttributes.getString(entry.getKey().name());
if (CmsStringUtil.isNotEmptyOrWhitespaceOnly(val)) {
if ((entry.getKey() == Attribute.linkPath) && val.startsWith(CmsCoreProvider.get().getVfsPrefix())) {
entry.getValue().setFormValueAsString(val.substring(CmsCoreProvider.get().getVfsPrefix().length()));
} else {
entry.getValue().setFormValueAsString(val);
}
}
}
} |
java | private static void validate(RepresentationModel<?> resource, HalFormsAffordanceModel model) {
String affordanceUri = model.getURI();
String selfLinkUri = resource.getRequiredLink(IanaLinkRelations.SELF.value()).expand().getHref();
if (!affordanceUri.equals(selfLinkUri)) {
throw new IllegalStateException("Affordance's URI " + affordanceUri + " doesn't match self link " + selfLinkUri
+ " as expected in HAL-FORMS");
}
} |
python | def create_response(self, request, image, content_type):
"""Returns a response object for the given image. Can be overridden to return different responses."""
return HttpResponse(content=image, content_type=content_type) |
java | @Override
public final E fromString(final Map<String, Object> pAddParam,
final String pStrVal) throws Exception {
if (pStrVal == null || "".equals(pStrVal)) {
return null;
}
return Enum.valueOf(this.enumClass, pStrVal);
} |
python | def fill_constants_types(module_name, elements):
""" Recursively save arguments name and default value. """
for elem, intrinsic in elements.items():
if isinstance(intrinsic, dict): # Submodule case
fill_constants_types(module_name + (elem,), intrinsic)
elif isinstance(intrinsic, ConstantIntr):
# use introspection to get the Python constants types
cst = getattr(__import__(".".join(module_name)), elem)
intrinsic.signature = type(cst) |
python | def aggregate(name):
""" Perform an aggregation request. """
cube = get_cube(name)
result = cube.aggregate(aggregates=request.args.get('aggregates'),
drilldowns=request.args.get('drilldown'),
cuts=request.args.get('cut'),
order=request.args.get('order'),
page=request.args.get('page'),
page_size=request.args.get('pagesize'))
result['status'] = 'ok'
if request.args.get('format', '').lower() == 'csv':
return create_csv_response(result['cells'])
else:
return jsonify(result) |
python | def get_image_info_by_image_name(self, image, exact_tag=True):
"""
using `docker images`, provide information about an image
:param image: ImageName, name of image
:param exact_tag: bool, if false then return info for all images of the
given name regardless what their tag is
:return: list of dicts
"""
logger.info("getting info about provided image specified by name '%s'", image)
logger.debug("image_name = '%s'", image)
# returns list of
# {u'Created': 1414577076,
# u'Id': u'3ab9a7ed8a169ab89b09fb3e12a14a390d3c662703b65b4541c0c7bde0ee97eb',
# u'ParentId': u'a79ad4dac406fcf85b9c7315fe08de5b620c1f7a12f45c8185c843f4b4a49c4e',
# u'RepoTags': [u'buildroot-fedora:latest'],
# u'Size': 0,
# u'VirtualSize': 856564160}
images = self.d.images(name=image.to_str(tag=False))
if exact_tag:
# tag is specified, we are looking for the exact image
for found_image in images:
if image.to_str(explicit_tag=True) in found_image['RepoTags']:
logger.debug("image '%s' found", image)
return [found_image]
images = [] # image not found
logger.debug("%d matching images found", len(images))
return images |
java | public void addClientObserver (ClientObserver observer)
{
_clobservers.add(observer);
if (observer instanceof DetailedClientObserver) {
_dclobservers.add((DetailedClientObserver)observer);
}
} |
java | public static ClusterException create(Type type, String message) {
switch (type) {
case METASTORE:
return new MetaStoreException(message);
default:
throw new IllegalArgumentException("Invalid exception type");
}
} |
python | def _combine_lines(self, lines):
"""
Combines a list of JSON objects into one JSON object.
"""
lines = filter(None, map(lambda x: x.strip(), lines))
return '[' + ','.join(lines) + ']' |
python | def get_email_context(self,**kwargs):
'''
This method can be overridden in classes that inherit from this mixin
so that additional object-specific context is provided to the email
template. This should return a dictionary. By default, only general
financial context variables are added to the dictionary, and kwargs are
just passed directly.
Note also that it is in general not a good idea for security reasons
to pass model instances in the context here, since these methods can be
accessed by logged in users who use the SendEmailView. So, In the default
models of this app, the values of fields and properties are passed
directly instead.
'''
context = kwargs
context.update({
'currencyCode': getConstant('general__currencyCode'),
'currencySymbol': getConstant('general__currencySymbol'),
'businessName': getConstant('contact__businessName'),
'site_url': getConstant('email__linkProtocol') + '://' + Site.objects.get_current().domain,
})
return context |
java | public static <T, V> String interpose(T[] values, Iterator<V> separators) {
return new InterposeStrings<T, V>().apply(new ArrayIterator<>(values), separators);
} |
python | def build_fncall(
ctx,
fndoc,
argdocs=(),
kwargdocs=(),
hug_sole_arg=False,
trailing_comment=None,
):
"""Builds a doc that looks like a function call,
from docs that represent the function, arguments
and keyword arguments.
If ``hug_sole_arg`` is True, and the represented
functional call is done with a single non-keyword
argument, the function call parentheses will hug
the sole argument doc without newlines and indentation
in break mode. This makes a difference in calls
like this::
> hug_sole_arg = False
frozenset(
[
1,
2,
3,
4,
5
]
)
> hug_sole_arg = True
frozenset([
1,
2,
3,
4,
5,
])
If ``trailing_comment`` is provided, the text is
rendered as a comment after the last argument and
before the closing parenthesis. This will force
the function call to be broken to multiple lines.
"""
if callable(fndoc):
fndoc = general_identifier(fndoc)
has_comment = bool(trailing_comment)
argdocs = list(argdocs)
kwargdocs = list(kwargdocs)
kwargdocs = [
# Propagate any comments to the kwarg doc.
(
comment_doc(
concat([
keyword_arg(binding),
ASSIGN_OP,
doc.doc
]),
doc.annotation.value
)
if is_commented(doc)
else concat([
keyword_arg(binding),
ASSIGN_OP,
doc
])
)
for binding, doc in kwargdocs
]
if not (argdocs or kwargdocs):
return concat([
fndoc,
LPAREN,
RPAREN,
])
if (
hug_sole_arg and
not kwargdocs and
len(argdocs) == 1 and
not is_commented(argdocs[0])
):
return group(
concat([
fndoc,
LPAREN,
argdocs[0],
RPAREN
])
)
allarg_docs = [*argdocs, *kwargdocs]
if trailing_comment:
allarg_docs.append(commentdoc(trailing_comment))
parts = []
for idx, doc in enumerate(allarg_docs):
last = idx == len(allarg_docs) - 1
if is_commented(doc):
has_comment = True
comment_str = doc.annotation.value
doc = doc.doc
else:
comment_str = None
part = concat([doc, NIL if last else COMMA])
if comment_str:
part = group(
flat_choice(
when_flat=concat([
part,
' ',
commentdoc(comment_str)
]),
when_broken=concat([
commentdoc(comment_str),
HARDLINE,
part,
]),
)
)
if not last:
part = concat([part, HARDLINE if has_comment else LINE])
parts.append(part)
outer = (
always_break
if has_comment
else group
)
return outer(
concat([
fndoc,
LPAREN,
nest(
ctx.indent,
concat([
SOFTLINE,
concat(parts),
])
),
SOFTLINE,
RPAREN
])
) |
java | public static String toTimePrecision(final TimeUnit t) {
switch (t) {
case HOURS:
return "h";
case MINUTES:
return "m";
case SECONDS:
return "s";
case MILLISECONDS:
return "ms";
case MICROSECONDS:
return "u";
case NANOSECONDS:
return "n";
default:
EnumSet<TimeUnit> allowedTimeunits = EnumSet.of(
TimeUnit.HOURS,
TimeUnit.MINUTES,
TimeUnit.SECONDS,
TimeUnit.MILLISECONDS,
TimeUnit.MICROSECONDS,
TimeUnit.NANOSECONDS);
throw new IllegalArgumentException("time precision must be one of:" + allowedTimeunits);
}
} |
java | public final void rulePredicatedRuleCall() throws RecognitionException {
int stackSize = keepStackSize();
try {
// InternalXtext.g:883:2: ( ( ( rule__PredicatedRuleCall__Group__0 ) ) )
// InternalXtext.g:884:2: ( ( rule__PredicatedRuleCall__Group__0 ) )
{
// InternalXtext.g:884:2: ( ( rule__PredicatedRuleCall__Group__0 ) )
// InternalXtext.g:885:3: ( rule__PredicatedRuleCall__Group__0 )
{
before(grammarAccess.getPredicatedRuleCallAccess().getGroup());
// InternalXtext.g:886:3: ( rule__PredicatedRuleCall__Group__0 )
// InternalXtext.g:886:4: rule__PredicatedRuleCall__Group__0
{
pushFollow(FollowSets000.FOLLOW_2);
rule__PredicatedRuleCall__Group__0();
state._fsp--;
}
after(grammarAccess.getPredicatedRuleCallAccess().getGroup());
}
}
}
catch (RecognitionException re) {
reportError(re);
recover(input,re);
}
finally {
restoreStackSize(stackSize);
}
return ;
} |
java | private Map<I_CmsResourceType, I_CmsPreviewProvider> getPreviewProviderForTypes(List<I_CmsResourceType> types) {
Map<String, I_CmsPreviewProvider> previewProviderMap = new HashMap<String, I_CmsPreviewProvider>();
Map<I_CmsResourceType, I_CmsPreviewProvider> typeProviderMapping = new HashMap<I_CmsResourceType, I_CmsPreviewProvider>();
for (I_CmsResourceType type : types) {
String providerClass = type.getGalleryPreviewProvider();
if (CmsStringUtil.isNotEmptyOrWhitespaceOnly(providerClass)) {
providerClass = providerClass.trim();
try {
if (previewProviderMap.containsKey(providerClass)) {
typeProviderMapping.put(type, previewProviderMap.get(providerClass));
} else {
I_CmsPreviewProvider previewProvider = (I_CmsPreviewProvider)Class.forName(
providerClass).newInstance();
previewProviderMap.put(providerClass, previewProvider);
typeProviderMapping.put(type, previewProvider);
}
} catch (Exception e) {
logError(
new CmsException(
Messages.get().container(
Messages.ERR_INSTANCING_PREVIEW_PROVIDER_2,
providerClass,
type.getTypeName()),
e));
}
}
}
return typeProviderMapping;
} |
java | public static TStatus toTStatus(Exception e) {
if (e instanceof HiveSQLException) {
return ((HiveSQLException)e).toTStatus();
}
TStatus tStatus = new TStatus(TStatusCode.ERROR_STATUS);
tStatus.setErrorMessage(e.getMessage());
tStatus.setInfoMessages(toString(e));
return tStatus;
} |
python | def master_key_from_seed(seed):
""" Generates a master key from a provided seed.
Args:
seed (bytes or str): a string of bytes or a hex string
Returns:
HDPrivateKey: the master private key.
"""
S = get_bytes(seed)
I = hmac.new(b"Bitcoin seed", S, hashlib.sha512).digest()
Il, Ir = I[:32], I[32:]
parse_Il = int.from_bytes(Il, 'big')
if parse_Il == 0 or parse_Il >= bitcoin_curve.n:
raise ValueError("Bad seed, resulting in invalid key!")
return HDPrivateKey(key=parse_Il, chain_code=Ir, index=0, depth=0) |
java | void markKnownViewsInvalid() {
final int childCount = getChildCount();
for (int i = 0; i < childCount; i++) {
final ViewHolder holder = getChildViewHolderInt(getChildAt(i));
if (holder != null) {
holder.addFlags(ViewHolder.FLAG_UPDATE | ViewHolder.FLAG_INVALID);
}
}
mRecycler.markKnownViewsInvalid();
} |
java | public static <A extends Number & Comparable<?>> NumberExpression<Integer> sign(Expression<A> num) {
return Expressions.numberOperation(Integer.class, Ops.MathOps.SIGN, num);
} |
java | public Response fetchHistoryAsOf(@PathParam("id") URI_ID id,
@PathParam("asof") final Timestamp asOf) throws Exception {
final MODEL_ID mId = tryConvertId(id);
matchedFetchHistoryAsOf(mId, asOf);
final Query<MODEL> query = server.find(modelType);
defaultFindOrderBy(query);
Object entity = executeTx(t -> {
configDefaultQuery(query);
configFetchHistoryAsOfQuery(query, mId, asOf);
applyUriQuery(query, false);
MODEL model = query.asOf(asOf).setId(mId).findOne();
return processFetchedHistoryAsOfModel(mId, model, asOf);
});
if (isEmptyEntity(entity)) {
return Response.noContent().build();
}
return Response.ok(entity).build();
} |
java | private String getConstantPoolString(final int cpIdx, final int subFieldIdx)
throws ClassfileFormatException, IOException {
final int constantPoolStringOffset = getConstantPoolStringOffset(cpIdx, subFieldIdx);
return constantPoolStringOffset == 0 ? null
: intern(inputStreamOrByteBuffer.readString(constantPoolStringOffset,
/* replaceSlashWithDot = */ false, /* stripLSemicolon = */ false));
} |
python | def _process_model_dict(self, d):
"""
Remove redundant items from a model's configuration dict.
Parameters
----------
d : dict
Modified in place.
Returns
-------
dict
Modified `d`.
"""
del d['model_type']
del d['sample_size']
del d['probability_mode']
del d['choice_mode']
del d['choosers_fit_filters']
del d['choosers_predict_filters']
del d['alts_fit_filters']
del d['alts_predict_filters']
del d['interaction_predict_filters']
del d['estimation_sample_size']
del d['prediction_sample_size']
del d['choice_column']
if d['model_expression'] == self.default_model_expr:
del d['model_expression']
d["name"] = yamlio.to_scalar_safe(d["name"])
return d |
python | def installed(package, version):
"""
Check if the package meets the required version.
The version specifier consists of an optional comparator (one of =, ==, >,
<, >=, <=) and an arbitrarily long version number separated by dots. The
should be as you would expect, e.g. for an installed version '0.1.2' of
package 'foo':
>>> installed('foo', '==0.1.2')
True
>>> installed('foo', '<0.1')
False
>>> installed('foo', '>= 0.0.4')
True
If ``pkg-config`` not on path, raises ``EnvironmentError``.
"""
if not exists(package):
return False
number, comparator = _split_version_specifier(version)
modversion = _query(package, '--modversion')
try:
result = _compare_versions(modversion, number)
except ValueError:
msg = "{0} is not a correct version specifier".format(version)
raise ValueError(msg)
if comparator in ('', '=', '=='):
return result == 0
if comparator == '>':
return result > 0
if comparator == '>=':
return result >= 0
if comparator == '<':
return result < 0
if comparator == '<=':
return result <= 0 |
java | private static void includeAffected(Set<String> allClasses, Set<String> affectedClasses, List<File> sortedFiles) {
Storer storer = Config.createStorer();
Hasher hasher = Config.createHasher();
NameBasedCheck classCheck = Config.DEBUG_MODE_V != Config.DebugMode.NONE ?
new DebugNameCheck(storer, hasher, DependencyAnalyzer.CLASS_EXT) :
new NameBasedCheck(storer, hasher, DependencyAnalyzer.CLASS_EXT);
NameBasedCheck covCheck = new NameBasedCheck(storer, hasher, DependencyAnalyzer.COV_EXT);
MethodCheck methodCheck = new MethodCheck(storer, hasher);
String prevClassName = null;
for (File file : sortedFiles) {
String fileName = file.getName();
String dirName = file.getParent();
String className = null;
if (file.isDirectory()) {
continue;
}
if (fileName.endsWith(DependencyAnalyzer.COV_EXT)) {
className = covCheck.includeAll(fileName, dirName);
} else if (fileName.endsWith(DependencyAnalyzer.CLASS_EXT)) {
className = classCheck.includeAll(fileName, dirName);
} else {
className = methodCheck.includeAll(fileName, dirName);
}
// Reset after some time to free space.
if (prevClassName != null && className != null && !prevClassName.equals(className)) {
methodCheck.includeAffected(affectedClasses);
methodCheck = new MethodCheck(Config.createStorer(), Config.createHasher());
}
if (className != null) {
allClasses.add(className);
prevClassName = className;
}
}
classCheck.includeAffected(affectedClasses);
covCheck.includeAffected(affectedClasses);
methodCheck.includeAffected(affectedClasses);
} |
python | def get_all_clusters(resource_root, view=None):
"""
Get all clusters
@param resource_root: The root Resource object.
@return: A list of ApiCluster objects.
"""
return call(resource_root.get, CLUSTERS_PATH, ApiCluster, True,
params=view and dict(view=view) or None) |
java | public static AbstractFile getFileByAbsolutePath(final SecurityContext securityContext, final String absolutePath) {
try {
return StructrApp.getInstance(securityContext).nodeQuery(AbstractFile.class).and(StructrApp.key(AbstractFile.class, "path"), absolutePath).getFirst();
} catch (FrameworkException ex) {
ex.printStackTrace();
logger.warn("File not found: {}", absolutePath);
}
return null;
} |
python | def wrap_existing_process(self, pid, stdout_read_fd, stderr_read_fd, port=None):
"""Do syncing, etc. for an already-running process.
This returns after the process has ended and syncing is done.
Captures ctrl-c's, signals, etc.
"""
stdout_read_file = os.fdopen(stdout_read_fd, 'rb')
stderr_read_file = os.fdopen(stderr_read_fd, 'rb')
stdout_streams, stderr_streams = self._get_stdout_stderr_streams()
self._stdout_tee = io_wrap.Tee(stdout_read_file, *stdout_streams)
self._stderr_tee = io_wrap.Tee(stderr_read_file, *stderr_streams)
self.proc = Process(pid)
self._run.pid = pid
logger.info("wrapping existing process %i" % pid)
try:
self.init_run()
except LaunchError as e:
logger.exception("catostrophic launch error")
wandb.termerror(str(e))
util.sentry_exc(e)
self._socket.launch_error()
return
if io_wrap.SIGWINCH_HANDLER is not None:
# SIGWINCH_HANDLER (maybe) gets set in self.init_run()
io_wrap.SIGWINCH_HANDLER.add_fd(stdout_read_fd)
io_wrap.SIGWINCH_HANDLER.add_fd(stderr_read_fd)
# Signal the main process that we're all hooked up
logger.info("informing user process we are ready to proceed")
self._socket.ready()
self._sync_etc(headless=True) |
python | def clear(self):
""" removes all documents in this collection """
collection = self.ds.connection(COLLECTION_MANAGED_PROCESS)
return collection.delete_many(filter={}) |
python | def delete_chat_photo(
self,
chat_id: Union[int, str]
) -> bool:
"""Use this method to delete a chat photo.
Photos can't be changed for private chats.
You must be an administrator in the chat for this to work and must have the appropriate admin rights.
Note:
In regular groups (non-supergroups), this method will only work if the "All Members Are Admins"
setting is off.
Args:
chat_id (``int`` | ``str``):
Unique identifier (int) or username (str) of the target chat.
Returns:
True on success.
Raises:
:class:`RPCError <pyrogram.RPCError>` in case of a Telegram RPC error.
``ValueError`` if a chat_id belongs to user.
"""
peer = self.resolve_peer(chat_id)
if isinstance(peer, types.InputPeerChat):
self.send(
functions.messages.EditChatPhoto(
chat_id=peer.chat_id,
photo=types.InputChatPhotoEmpty()
)
)
elif isinstance(peer, types.InputPeerChannel):
self.send(
functions.channels.EditPhoto(
channel=peer,
photo=types.InputChatPhotoEmpty()
)
)
else:
raise ValueError("The chat_id \"{}\" belongs to a user".format(chat_id))
return True |
java | @Action(name = "Modify Instance Attribute",
outputs = {
@Output(Outputs.RETURN_CODE),
@Output(Outputs.RETURN_RESULT),
@Output(Outputs.EXCEPTION)
},
responses = {
@Response(text = Outputs.SUCCESS, field = Outputs.RETURN_CODE, value = Outputs.SUCCESS_RETURN_CODE,
matchType = MatchType.COMPARE_EQUAL, responseType = ResponseType.RESOLVED),
@Response(text = Outputs.FAILURE, field = Outputs.RETURN_CODE, value = Outputs.FAILURE_RETURN_CODE,
matchType = MatchType.COMPARE_EQUAL, responseType = ResponseType.ERROR)
}
)
public Map<String, String> execute(@Param(value = ENDPOINT, required = true) String endpoint,
@Param(value = IDENTITY, required = true) String identity,
@Param(value = CREDENTIAL, required = true, encrypted = true) String credential,
@Param(value = PROXY_HOST) String proxyHost,
@Param(value = PROXY_PORT) String proxyPort,
@Param(value = PROXY_USERNAME) String proxyUsername,
@Param(value = PROXY_PASSWORD, encrypted = true) String proxyPassword,
@Param(value = HEADERS) String headers,
@Param(value = QUERY_PARAMS) String queryParams,
@Param(value = VERSION) String version,
@Param(value = DELIMITER) String delimiter,
@Param(value = ATTRIBUTE) String attribute,
@Param(value = ATTRIBUTE_VALUE) String attributeValue,
@Param(value = BLOCK_DEVICE_MAPPING_DEVICE_NAMES_STRING) String blockDeviceMappingDeviceNamesString,
@Param(value = BLOCK_DEVICE_MAPPING_VIRTUAL_NAMES_STRING) String blockDeviceMappingVirtualNamesString,
@Param(value = DELETE_ON_TERMINATIONS_STRING) String deleteOnTerminationsString,
@Param(value = VOLUME_IDS_STRING) String volumeIdsString,
@Param(value = NO_DEVICES_STRING) String noDevicesString,
@Param(value = LOWER_CASE_DISABLE_API_TERMINATION) String disableApiTermination,
@Param(value = EBS_OPTIMIZED) String ebsOptimized,
@Param(value = ENA_SUPPORT) String enaSupport,
@Param(value = SECURITY_GROUP_IDS_STRING) String securityGroupIdsString,
@Param(value = INSTANCE_ID, required = true) String instanceId,
@Param(value = LOWER_CASE_INSTANCE_INITIATED_SHUTDOWN_BEHAVIOR) String instanceInitiatedShutdownBehavior,
@Param(value = INSTANCE_TYPE) String instanceType,
@Param(value = LOWER_CASE_KERNEL) String kernel,
@Param(value = LOWER_CASE_RAMDISK) String ramdisk,
@Param(value = SOURCE_DESTINATION_CHECK) String sourceDestinationCheck,
@Param(value = SRIOV_NET_SUPPORT) String sriovNetSupport,
@Param(value = LOWER_CASE_USER_DATA) String userData) {
try {
version = getDefaultStringInput(version, INSTANCES_DEFAULT_API_VERSION);
final CommonInputs commonInputs = new CommonInputs.Builder()
.withEndpoint(endpoint, EC2_API, EMPTY)
.withIdentity(identity)
.withCredential(credential)
.withProxyHost(proxyHost)
.withProxyPort(proxyPort)
.withProxyUsername(proxyUsername)
.withProxyPassword(proxyPassword)
.withHeaders(headers)
.withQueryParams(queryParams)
.withVersion(version)
.withDelimiter(delimiter)
.withAction(MODIFY_INSTANCE_ATTRIBUTE)
.withApiService(EC2_API)
.withRequestUri(EMPTY)
.withRequestPayload(EMPTY)
.withHttpClientMethod(HTTP_CLIENT_METHOD_GET)
.build();
final CustomInputs customInputs = new CustomInputs.Builder()
.withInstanceId(instanceId)
.withInstanceType(instanceType)
.build();
EbsInputs ebsInputs = new EbsInputs.Builder()
.withEbsOptimized(ebsOptimized)
.withBlockDeviceMappingDeviceNamesString(blockDeviceMappingDeviceNamesString)
.withBlockDeviceMappingVirtualNamesString(blockDeviceMappingVirtualNamesString)
.withDeleteOnTerminationsString(deleteOnTerminationsString)
.withVolumeIdsString(volumeIdsString)
.withNoDevicesString(noDevicesString)
.build();
IamInputs iamInputs = new IamInputs.Builder().withSecurityGroupIdsString(securityGroupIdsString).build();
InstanceInputs instanceInputs = new InstanceInputs.Builder()
.withAttribute(attribute)
.withAttributeValue(attributeValue)
.withDisableApiTermination(disableApiTermination)
.withEnaSupport(enaSupport)
.withInstanceInitiatedShutdownBehavior(instanceInitiatedShutdownBehavior)
.withKernel(kernel)
.withRamdisk(ramdisk)
.withSourceDestinationCheck(sourceDestinationCheck)
.withSriovNetSupport(sriovNetSupport)
.withUserData(userData)
.build();
return new QueryApiExecutor().execute(commonInputs, customInputs, ebsInputs, iamInputs, instanceInputs);
} catch (Exception e) {
return ExceptionProcessor.getExceptionResult(e);
}
} |
java | public static LoaderPipe getPipe(String name, android.support.v4.app.Fragment fragment, Context applicationContext) {
Pipe pipe = pipes.get(name);
LoaderAdapter adapter = new LoaderAdapter(fragment, applicationContext, pipe, name);
adapter.setLoaderIds(loaderIdsForNamed);
return adapter;
} |
java | private void calculateHighestAlert() {
synchronized (alerts) {
highestAlert = null;
for (Alert alert : alerts) {
if (isHighestAlert(alert)) {
highestAlert = alert;
}
}
calculateHighestAlert = false;
}
} |
python | def _free(self, ptr):
"""
Handler for any libc `free` SimProcedure call. If the heap has faithful support for `free`, it ought to be
implemented in a `free` function (as opposed to the `_free` function).
:param ptr: the location in memory to be freed
"""
raise NotImplementedError("%s not implemented for %s" % (self._free.__func__.__name__,
self.__class__.__name__)) |
python | def from_bytes(cls, bitstream, decode_payload=True):
r'''
Parse the given packet and update properties accordingly
>>> data_hex = ('c033d3c10000000745c0005835400000'
... 'ff06094a254d38204d45d1a30016f597'
... 'a1c3c7406718bf1b50180ff0793f0000'
... 'b555e59ff5ba6aad33d875c600fd8c1f'
... 'c5268078f365ee199179fbd09d09d690'
... '193622a6b70bcbc7bf5f20dda4258801')
>>> data = data_hex.decode('hex')
>>> message = DataPacket.from_bytes(data)
>>> message.echo_nonce_request
False
>>> message.nonce
'3\xd3\xc1'
>>> message.source_map_version
>>> message.destination_map_version
>>> message.lsb
... # doctest: +ELLIPSIS
[True, True, True, False, False, ..., False, False, False, False]
>>> message.instance_id
>>> bytes(message.payload)
... # doctest: +ELLIPSIS
'E\xc0\x00X5@\x00\x00\xff\x06\tJ%M8...\xdd\xa4%\x88\x01'
'''
packet = cls()
# Convert to ConstBitStream (if not already provided)
if not isinstance(bitstream, ConstBitStream):
if isinstance(bitstream, Bits):
bitstream = ConstBitStream(auto=bitstream)
else:
bitstream = ConstBitStream(bytes=bitstream)
# Read the flags
(nonce_present,
lsb_enabled,
packet.echo_nonce_request,
map_version_present,
instance_id_present) = bitstream.readlist('5*bool')
# Skip over reserved bits
bitstream.read(3)
# Parse nonce or map versions
if nonce_present:
# Nonce: yes, versions: no
packet.nonce = bitstream.read('bytes:3')
packet.source_map_version = None
packet.destination_map_version = None
elif map_version_present:
# Nonce: no, versions: yes
packet.nonce = None
(packet.source_map_version,
packet.destination_map_version) = bitstream.readlist('2*uint:12')
else:
# Nonce: no, versions: no
packet.nonce = None
packet.source_map_version = None
packet.destination_map_version = None
# Skip over the nonce/map-version bits
bitstream.read(24)
# Parse instance-id
if instance_id_present:
packet.instance_id = bitstream.read('uint:24')
# 8 bits remaining for LSB
lsb_bits = 8
else:
# 32 bits remaining for LSB
lsb_bits = 32
# Parse LSBs
if lsb_enabled:
packet.lsb = bitstream.readlist('%d*bool' % lsb_bits)
# Reverse for readability: least significant locator-bit first
packet.lsb.reverse()
else:
# Skip over the LSBs
bitstream.read(lsb_bits)
# The rest of the packet is payload
remaining = bitstream[bitstream.pos:]
# Parse IP packet
if len(remaining):
ip_version = remaining.peek('uint:4')
if ip_version == 4:
packet.payload = IPv4Packet.from_bytes(remaining, decode_payload=decode_payload)
elif ip_version == 6:
packet.payload = IPv6Packet.from_bytes(remaining, decode_payload=decode_payload)
else:
packet.payload = remaining.bytes
# Verify that the properties make sense
packet.sanitize()
return packet |
python | def get_symbol_at_address(self, address):
"""
Tries to find the closest matching symbol for the given address.
@type address: int
@param address: Memory address to query.
@rtype: None or tuple( str, int, int )
@return: Returns a tuple consisting of:
- Name
- Address
- Size (in bytes)
Returns C{None} if no symbol could be matched.
"""
# Any module may have symbols pointing anywhere in memory, so there's
# no easy way to optimize this. I guess we're stuck with brute force.
found = None
for (SymbolName, SymbolAddress, SymbolSize) in self.iter_symbols():
if SymbolAddress > address:
continue
if SymbolAddress == address:
found = (SymbolName, SymbolAddress, SymbolSize)
break
if SymbolAddress < address:
if found and (address - found[1]) < (address - SymbolAddress):
continue
else:
found = (SymbolName, SymbolAddress, SymbolSize)
return found |
python | def parse_email(self):
"""Email address parsing is done in several stages.
First the name of the email use is determined.
Then it looks for a '@' as a delimiter between the name and the site.
Lastly the email site is matched.
Each part's string is stored, combined and returned.
"""
email = []
# Match from current char until a non lower cased alpha
name = self.match_string_pattern(spat.alphal)
if not name:
raise PartpyError(self, 'Expected a valid name')
email.append(name) # Store the name
self.eat_string(name) # Eat the name
nextchar = self.get_char()
if not nextchar == '@':
raise PartpyError(self, 'Expecting @, found: ' + nextchar)
email.append(nextchar)
self.eat_length(1) # Eat the '@' symbol
# Use string pattern matching to match all lower cased alphas or '.'s.
site = self.match_string_pattern(spat.alphal + '.')
if not site:
raise PartpyError(self, 'Expecting a site, found: ' + site)
email.append(site)
self.eat_string(site) # Eat the site
return ''.join(email) |
python | def copy(self, name=None, prefix=None):
"""A copy of this :class:`Config` container.
If ``prefix`` is given, it prefixes all non
:ref:`global settings <setting-section-global-server-settings>`
with it. Used when multiple applications are loaded.
"""
cls = self.__class__
me = cls.__new__(cls)
me.__dict__.update(self.__dict__)
if prefix:
me.prefix = prefix
settings = me.settings
me.settings = {}
for setting in settings.values():
setting = setting.copy(name, prefix)
me.settings[setting.name] = setting
me.params = me.params.copy()
return me |
java | @Deprecated
public static Delay constant(int delay, TimeUnit timeUnit) {
LettuceAssert.notNull(timeUnit, "TimeUnit must not be null");
return constant(Duration.ofNanos(timeUnit.toNanos(delay)));
} |
python | def _contains_value(self, value):
"""Helper function for __contains__ to check a single value is contained within the interval"""
g = operator.gt if self._lower is self.OPEN else operator.ge
l = operator.lt if self._upper is self.OPEN else operator.le
return g(value, self.lower_value) and l(value, self._upper_value) |
python | def change_default(
kls,
key,
new_default,
new_converter=None,
new_reference_value=None,
):
"""return a new configman Option object that is a copy of an existing one,
giving the new one a different default value"""
an_option = kls.get_required_config()[key].copy()
an_option.default = new_default
if new_converter:
an_option.from_string_converter = new_converter
if new_reference_value:
an_option.reference_value_from = new_reference_value
return an_option |
java | public Classifier<L, F> trainClassifierSemiSup(GeneralDataset<L, F> data, GeneralDataset<L, F> biasedData, double[][] confusionMatrix, double[] initial) {
double[][] weights = trainWeightsSemiSup(data, biasedData, confusionMatrix, initial);
LinearClassifier<L, F> classifier = new LinearClassifier<L, F>(weights, data.featureIndex(), data.labelIndex());
return classifier;
} |
java | public void setEditorTarget (PropertyEditorTarget target)
{
if (target instanceof DBMetaColumnNode)
{
super.setEditorTarget(target);
this.tfColumnName.setText((String)target.getAttribute(DBMetaColumnNode.ATT_COLUMN_NAME));
}
else
{
throw new UnsupportedOperationException("This editor can only edit DBMetaColumnNode objects");
}
} |
java | public void run() throws IllegalArgumentException {
List<List<ResultEntry>> groups = null;
for (int i = 0; i < this.repeatedRuns; ++i) {
this.solver.getResult().clearResults();
this.solver.run();
Result result = this.solver.getResult();
if (groups == null) {
// create empty groups
groups = new ArrayList<List<ResultEntry>>(result.getResultEntries().size());
// allocate space in every group
for (int j = 0; j < result.getResultEntries().size(); ++j) {
groups.add(new ArrayList<ResultEntry>(this.repeatedRuns));
}
}
if (result.getResultEntries().size() != groups.size())
throw new IllegalArgumentException(String.format(
"Wrapped solver must return result with identical number of elements each run, " +
"given %d in run %d, expected %d",
result.getResultEntries().size(), i, groups.size()));
for (int j = 0; j < result.getResultEntries().size(); j++) {
ResultEntry resultEntry = result.getResultEntries().get(j);
groups.get(j).add(resultEntry);
}
}
int middleIndex = this.repeatedRuns / 2;
for (List<ResultEntry> group : groups) {
Collections.sort(group, new ResultEntryFitnessComparator());
if (this.logger.isDebugEnabled()) {
this.logger.debug("Sorted result entries, dump:");
for (int i = 0; i < group.size(); i++) {
ResultEntry resultEntry = group.get(i);
this.logger.debug(i + ". " + resultEntry.getBestFitness());
}
}
this.getResult().addEntry(group.get(middleIndex));
}
if (this.file != null) {
try {
PrintStream printStream = new PrintStream(file);
for (List<ResultEntry> group : groups) {
printStream.printf("Sorted entries for %s/%s\n", group.get(0).getAlgorithm(), group.get(0).getProblem());
for (int i = 0; i < group.size(); i++) {
printStream.printf("%03d. %.5f\n", i, group.get(i).getBestFitness());
}
}
} catch (FileNotFoundException e) {
e.printStackTrace();
}
}
} |
python | def remove_enclosure(self, left_char, right_char):
"""
Remove enclosure pair from set of enclosures.
:param str left_char: left character of enclosure pair - e.g. "("
:param str right_char: right character of enclosure pair - e.g. ")"
"""
assert len(left_char) == 1, \
"Parameter left_char must be character not string"
assert len(right_char) == 1, \
"Parameter right_char must be character not string"
rm_enclosure = (left_char, right_char)
if rm_enclosure in self._enclosure:
self._enclosure.remove(rm_enclosure)
self._after_tld_chars = self._get_after_tld_chars() |
java | static <K,V> TreeMapEntry<K,V> predecessor(TreeMapEntry<K,V> t) {
if (t == null)
return null;
else if (t.left != null) {
TreeMapEntry<K,V> p = t.left;
while (p.right != null)
p = p.right;
return p;
} else {
TreeMapEntry<K,V> p = t.parent;
TreeMapEntry<K,V> ch = t;
while (p != null && ch == p.left) {
ch = p;
p = p.parent;
}
return p;
}
} |
java | ByteString getState(String channelId, String txId, String collection, String key) {
return invokeChaincodeSupport(newGetStateEventMessage(channelId, txId, collection, key));
} |
java | private int getTotal(Class<?> clazz) {
StringBuilder sql = new StringBuilder();
sql.append(SQLUtils.getSelectCountSQL(clazz));
sql.append(SQLUtils.autoSetSoftDeleted("", clazz));
log(sql);
long start = System.currentTimeMillis();
int rows = jdbcTemplate.queryForObject(sql.toString(), Integer.class);
long cost = System.currentTimeMillis() - start;
logSlow(cost, sql.toString(), null);
return rows;
} |
python | def new_encoded_stream(args, stream):
'''Return a stream writer.'''
if args.ascii_print:
return wpull.util.ASCIIStreamWriter(stream)
else:
return stream |
java | public ProjectTimeFormat getTimeFormat(int field)
{
ProjectTimeFormat result;
if ((field < m_fields.length) && (m_fields[field].length() != 0))
{
result = ProjectTimeFormat.getInstance(Integer.parseInt(m_fields[field]));
}
else
{
result = ProjectTimeFormat.TWELVE_HOUR;
}
return (result);
} |
python | def replace(self, s, data, attrs=None):
"""
Replace the attributes of the plotter data in a string
%(replace_note)s
Parameters
----------
s: str
String where the replacements shall be made
data: InteractiveBase
Data object from which to use the coordinates and insert the
coordinate and attribute informations
attrs: dict
Meta attributes that shall be used for replacements. If None, it
will be gained from `data.attrs`
Returns
-------
str
`s` with inserted informations"""
# insert labels
s = s.format(**self.rc['labels'])
# replace attributes
attrs = attrs or data.attrs
if hasattr(getattr(data, 'psy', None), 'arr_name'):
attrs = attrs.copy()
attrs['arr_name'] = data.psy.arr_name
s = safe_modulo(s, attrs)
# replace datetime.datetime like time informations
if isinstance(data, InteractiveList):
data = data[0]
tname = self.any_decoder.get_tname(
next(self.plotter.iter_base_variables), data.coords)
if tname is not None and tname in data.coords:
time = data.coords[tname]
if not time.values.ndim:
try: # assume a valid datetime.datetime instance
s = pd.to_datetime(str(time.values[()])).strftime(s)
except ValueError:
pass
if six.PY2:
return s.decode('utf-8')
return s |
java | public static boolean isString( Object obj ) {
if( (obj instanceof String)
|| (obj instanceof Character)
|| (obj != null && (obj.getClass() == Character.TYPE || String.class.isAssignableFrom( obj.getClass() ))) ){
return true;
}
return false;
} |
python | def _setup_chassis(self):
"""
Sets up the router with the corresponding chassis
(create slots and insert default adapters).
"""
self._create_slots(2)
self._slots[0] = self.integrated_adapters[self._chassis]() |
java | @Nonnull
@Override
public AffinityGroup get(@Nonnull String affinityGroupId) throws InternalException, CloudException {
if(affinityGroupId == null || affinityGroupId.isEmpty())
throw new InternalException("Please specify the id for the affinity group you want to retrieve.");
AzureMethod method = new AzureMethod(this.provider);
final AffinityGroupModel affinityGroupModel = method.get(AffinityGroupModel.class, String.format(RESOURCE_AFFINITYGROUP, affinityGroupId));
//TODO see if name is enough to be used as an id
return AffinityGroup.getInstance(affinityGroupModel.getName(),affinityGroupModel.getName(),affinityGroupModel.getDescription(), affinityGroupModel.getLocation(), null);
} |
python | def launch_gateway(conf=None, popen_kwargs=None):
"""
launch jvm gateway
:param conf: spark configuration passed to spark-submit
:param popen_kwargs: Dictionary of kwargs to pass to Popen when spawning
the py4j JVM. This is a developer feature intended for use in
customizing how pyspark interacts with the py4j JVM (e.g., capturing
stdout/stderr).
:return:
"""
if "PYSPARK_GATEWAY_PORT" in os.environ:
gateway_port = int(os.environ["PYSPARK_GATEWAY_PORT"])
gateway_secret = os.environ["PYSPARK_GATEWAY_SECRET"]
# Process already exists
proc = None
else:
SPARK_HOME = _find_spark_home()
# Launch the Py4j gateway using Spark's run command so that we pick up the
# proper classpath and settings from spark-env.sh
on_windows = platform.system() == "Windows"
script = "./bin/spark-submit.cmd" if on_windows else "./bin/spark-submit"
command = [os.path.join(SPARK_HOME, script)]
if conf:
for k, v in conf.getAll():
command += ['--conf', '%s=%s' % (k, v)]
submit_args = os.environ.get("PYSPARK_SUBMIT_ARGS", "pyspark-shell")
if os.environ.get("SPARK_TESTING"):
submit_args = ' '.join([
"--conf spark.ui.enabled=false",
submit_args
])
command = command + shlex.split(submit_args)
# Create a temporary directory where the gateway server should write the connection
# information.
conn_info_dir = tempfile.mkdtemp()
try:
fd, conn_info_file = tempfile.mkstemp(dir=conn_info_dir)
os.close(fd)
os.unlink(conn_info_file)
env = dict(os.environ)
env["_PYSPARK_DRIVER_CONN_INFO_PATH"] = conn_info_file
# Launch the Java gateway.
popen_kwargs = {} if popen_kwargs is None else popen_kwargs
# We open a pipe to stdin so that the Java gateway can die when the pipe is broken
popen_kwargs['stdin'] = PIPE
# We always set the necessary environment variables.
popen_kwargs['env'] = env
if not on_windows:
# Don't send ctrl-c / SIGINT to the Java gateway:
def preexec_func():
signal.signal(signal.SIGINT, signal.SIG_IGN)
popen_kwargs['preexec_fn'] = preexec_func
proc = Popen(command, **popen_kwargs)
else:
# preexec_fn not supported on Windows
proc = Popen(command, **popen_kwargs)
# Wait for the file to appear, or for the process to exit, whichever happens first.
while not proc.poll() and not os.path.isfile(conn_info_file):
time.sleep(0.1)
if not os.path.isfile(conn_info_file):
raise Exception("Java gateway process exited before sending its port number")
with open(conn_info_file, "rb") as info:
gateway_port = read_int(info)
gateway_secret = UTF8Deserializer().loads(info)
finally:
shutil.rmtree(conn_info_dir)
# In Windows, ensure the Java child processes do not linger after Python has exited.
# In UNIX-based systems, the child process can kill itself on broken pipe (i.e. when
# the parent process' stdin sends an EOF). In Windows, however, this is not possible
# because java.lang.Process reads directly from the parent process' stdin, contending
# with any opportunity to read an EOF from the parent. Note that this is only best
# effort and will not take effect if the python process is violently terminated.
if on_windows:
# In Windows, the child process here is "spark-submit.cmd", not the JVM itself
# (because the UNIX "exec" command is not available). This means we cannot simply
# call proc.kill(), which kills only the "spark-submit.cmd" process but not the
# JVMs. Instead, we use "taskkill" with the tree-kill option "/t" to terminate all
# child processes in the tree (http://technet.microsoft.com/en-us/library/bb491009.aspx)
def killChild():
Popen(["cmd", "/c", "taskkill", "/f", "/t", "/pid", str(proc.pid)])
atexit.register(killChild)
# Connect to the gateway
gateway = JavaGateway(
gateway_parameters=GatewayParameters(port=gateway_port, auth_token=gateway_secret,
auto_convert=True))
# Store a reference to the Popen object for use by the caller (e.g., in reading stdout/stderr)
gateway.proc = proc
# Import the classes used by PySpark
java_import(gateway.jvm, "org.apache.spark.SparkConf")
java_import(gateway.jvm, "org.apache.spark.api.java.*")
java_import(gateway.jvm, "org.apache.spark.api.python.*")
java_import(gateway.jvm, "org.apache.spark.ml.python.*")
java_import(gateway.jvm, "org.apache.spark.mllib.api.python.*")
# TODO(davies): move into sql
java_import(gateway.jvm, "org.apache.spark.sql.*")
java_import(gateway.jvm, "org.apache.spark.sql.api.python.*")
java_import(gateway.jvm, "org.apache.spark.sql.hive.*")
java_import(gateway.jvm, "scala.Tuple2")
return gateway |
java | public static double getScaleFactor(IAtomContainer container, double bondLength) {
double currentAverageBondLength = getBondLengthAverage(container);
if (currentAverageBondLength == 0 || Double.isNaN(currentAverageBondLength)) return 1;
return bondLength / currentAverageBondLength;
} |
python | def generate_value_label(self, byteorder, encoding):
"""
Parameters
----------
byteorder : str
Byte order of the output
encoding : str
File encoding
Returns
-------
value_label : bytes
Bytes containing the formatted value label
"""
self._encoding = encoding
bio = BytesIO()
null_string = '\x00'
null_byte = b'\x00'
# len
bio.write(struct.pack(byteorder + 'i', self.len))
# labname
labname = self._encode(_pad_bytes(self.labname[:32], 33))
bio.write(labname)
# padding - 3 bytes
for i in range(3):
bio.write(struct.pack('c', null_byte))
# value_label_table
# n - int32
bio.write(struct.pack(byteorder + 'i', self.n))
# textlen - int32
bio.write(struct.pack(byteorder + 'i', self.text_len))
# off - int32 array (n elements)
for offset in self.off:
bio.write(struct.pack(byteorder + 'i', offset))
# val - int32 array (n elements)
for value in self.val:
bio.write(struct.pack(byteorder + 'i', value))
# txt - Text labels, null terminated
for text in self.txt:
bio.write(self._encode(text + null_string))
bio.seek(0)
return bio.read() |
java | private static double doubleHighPart(double d) {
if (d > -Precision.SAFE_MIN && d < Precision.SAFE_MIN){
return d; // These are un-normalised - don't try to convert
}
long xl = Double.doubleToLongBits(d);
xl = xl & MASK_30BITS; // Drop low order bits
return Double.longBitsToDouble(xl);
} |
java | public DeregisterInstancesFromLoadBalancerRequest withInstances(Instance... instances) {
if (this.instances == null) {
setInstances(new com.amazonaws.internal.SdkInternalList<Instance>(instances.length));
}
for (Instance ele : instances) {
this.instances.add(ele);
}
return this;
} |
python | def assign(self, attrs):
"""Merge new attributes
"""
for k, v in attrs.items():
setattr(self, k, v) |
python | def list_external_tools_courses(self, course_id, include_parents=None, search_term=None, selectable=None):
"""
List external tools.
Returns the paginated list of external tools for the current context.
See the get request docs for a single tool for a list of properties on an external tool.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# OPTIONAL - search_term
"""The partial name of the tools to match and return."""
if search_term is not None:
params["search_term"] = search_term
# OPTIONAL - selectable
"""If true, then only tools that are meant to be selectable are returned"""
if selectable is not None:
params["selectable"] = selectable
# OPTIONAL - include_parents
"""If true, then include tools installed in all accounts above the current context"""
if include_parents is not None:
params["include_parents"] = include_parents
self.logger.debug("GET /api/v1/courses/{course_id}/external_tools with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/courses/{course_id}/external_tools".format(**path), data=data, params=params, no_data=True) |
python | def chunks(data, chunk_size):
""" Yield chunk_size chunks from data."""
for i in xrange(0, len(data), chunk_size):
yield data[i:i+chunk_size] |
python | def get_property(self, key):
"""Returns the value of the network attachment property with the given name.
If the requested data @a key does not exist, this function will
succeed and return an empty string in the @a value argument.
in key of type str
Name of the property to get.
return value of type str
Current property value.
raises :class:`OleErrorInvalidarg`
@a name is @c null or empty.
"""
if not isinstance(key, basestring):
raise TypeError("key can only be an instance of type basestring")
value = self._call("getProperty",
in_p=[key])
return value |
python | def setup_callbacks(self):
'''
Assign attributes for pygit2 callbacks
'''
if PYGIT2_VERSION >= _LooseVersion('0.23.2'):
self.remotecallbacks = pygit2.RemoteCallbacks(
credentials=self.credentials)
if not self.ssl_verify:
# Override the certificate_check function with a lambda that
# just returns True, thus skipping the cert check.
self.remotecallbacks.certificate_check = \
lambda *args, **kwargs: True
else:
self.remotecallbacks = None
if not self.ssl_verify:
warnings.warn(
'pygit2 does not support disabling the SSL certificate '
'check in versions prior to 0.23.2 (installed: {0}). '
'Fetches for self-signed certificates will fail.'.format(
PYGIT2_VERSION
)
) |
python | def load_mode(node):
"""Load one observing mdode"""
obs_mode = ObservingMode()
obs_mode.__dict__.update(node)
# handle validator
load_mode_validator(obs_mode, node)
# handle builder
load_mode_builder(obs_mode, node)
# handle tagger:
load_mode_tagger(obs_mode, node)
return obs_mode |
python | def bna_config_cmd_output_session_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
bna_config_cmd = ET.Element("bna_config_cmd")
config = bna_config_cmd
output = ET.SubElement(bna_config_cmd, "output")
session_id = ET.SubElement(output, "session-id")
session_id.text = kwargs.pop('session_id')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
java | public void marshall(JobFlowInstancesConfig jobFlowInstancesConfig, ProtocolMarshaller protocolMarshaller) {
if (jobFlowInstancesConfig == null) {
throw new SdkClientException("Invalid argument passed to marshall(...)");
}
try {
protocolMarshaller.marshall(jobFlowInstancesConfig.getMasterInstanceType(), MASTERINSTANCETYPE_BINDING);
protocolMarshaller.marshall(jobFlowInstancesConfig.getSlaveInstanceType(), SLAVEINSTANCETYPE_BINDING);
protocolMarshaller.marshall(jobFlowInstancesConfig.getInstanceCount(), INSTANCECOUNT_BINDING);
protocolMarshaller.marshall(jobFlowInstancesConfig.getInstanceGroups(), INSTANCEGROUPS_BINDING);
protocolMarshaller.marshall(jobFlowInstancesConfig.getInstanceFleets(), INSTANCEFLEETS_BINDING);
protocolMarshaller.marshall(jobFlowInstancesConfig.getEc2KeyName(), EC2KEYNAME_BINDING);
protocolMarshaller.marshall(jobFlowInstancesConfig.getPlacement(), PLACEMENT_BINDING);
protocolMarshaller.marshall(jobFlowInstancesConfig.getKeepJobFlowAliveWhenNoSteps(), KEEPJOBFLOWALIVEWHENNOSTEPS_BINDING);
protocolMarshaller.marshall(jobFlowInstancesConfig.getTerminationProtected(), TERMINATIONPROTECTED_BINDING);
protocolMarshaller.marshall(jobFlowInstancesConfig.getHadoopVersion(), HADOOPVERSION_BINDING);
protocolMarshaller.marshall(jobFlowInstancesConfig.getEc2SubnetId(), EC2SUBNETID_BINDING);
protocolMarshaller.marshall(jobFlowInstancesConfig.getEc2SubnetIds(), EC2SUBNETIDS_BINDING);
protocolMarshaller.marshall(jobFlowInstancesConfig.getEmrManagedMasterSecurityGroup(), EMRMANAGEDMASTERSECURITYGROUP_BINDING);
protocolMarshaller.marshall(jobFlowInstancesConfig.getEmrManagedSlaveSecurityGroup(), EMRMANAGEDSLAVESECURITYGROUP_BINDING);
protocolMarshaller.marshall(jobFlowInstancesConfig.getServiceAccessSecurityGroup(), SERVICEACCESSSECURITYGROUP_BINDING);
protocolMarshaller.marshall(jobFlowInstancesConfig.getAdditionalMasterSecurityGroups(), ADDITIONALMASTERSECURITYGROUPS_BINDING);
protocolMarshaller.marshall(jobFlowInstancesConfig.getAdditionalSlaveSecurityGroups(), ADDITIONALSLAVESECURITYGROUPS_BINDING);
} catch (Exception e) {
throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e);
}
} |
java | public void uncheck() {
getDispatcher().beforeUncheck(this);
RemoteWebElement e = (RemoteWebElement) getElement();
while (e.isSelected()) {
e.click();
}
if (Config.getBoolConfigProperty(ConfigProperty.ENABLE_GUI_LOGGING)) {
logUIAction(UIActions.UNCHECKED);
}
getDispatcher().afterUncheck(this);
} |
java | public EtcdResponse put(final String key, final String value) throws EtcdException {
UriComponentsBuilder builder = UriComponentsBuilder.fromUriString(KEYSPACE);
builder.pathSegment(key);
MultiValueMap<String, String> payload = new LinkedMultiValueMap<>(1);
payload.set("value", value);
return execute(builder, HttpMethod.PUT, payload, EtcdResponse.class);
} |
java | static void verifyItemDoesNotAlreadyExist(@Nonnull ItemGroup<?> parent, @Nonnull String newName, @CheckForNull Item variant) throws IllegalArgumentException, Failure {
Item existing;
try (ACLContext ctxt = ACL.as(ACL.SYSTEM)) {
existing = parent.getItem(newName);
}
if (existing != null && existing != variant) {
if (existing.hasPermission(Item.DISCOVER)) {
String prefix = parent.getFullName();
throw new IllegalArgumentException((prefix.isEmpty() ? "" : prefix + "/") + newName + " already exists");
} else {
// Cannot hide its existence, so at least be as vague as possible.
throw new Failure("");
}
}
} |
java | private Collection<ClassDoc> subclasses(ClassDoc cd) {
Collection<ClassDoc> ret = classToSubclass.get(cd.qualifiedName());
if (ret == null) {
ret = new TreeSet<ClassDoc>();
List<ClassDoc> subs = classtree.subclasses(cd);
if (subs != null) {
ret.addAll(subs);
for (Iterator<ClassDoc> it = subs.iterator(); it.hasNext();) {
ret.addAll(subclasses(it.next()));
}
}
addAll(classToSubclass, cd, ret);
}
return ret;
} |
java | public static void copy(InputStream in, File file) throws IOException {
OutputStream out = new BufferedOutputStream(new FileOutputStream(file));
try {
IOUtils.copy(in, out);
}
finally {
IOUtils.closeQuietly(out);
}
} |
java | void addSubsystem(final OperationTransformerRegistry registry, final String name, final ModelVersion version) {
final OperationTransformerRegistry profile = registry.getChild(PathAddress.pathAddress(PROFILE));
final OperationTransformerRegistry server = registry.getChild(PathAddress.pathAddress(HOST, SERVER));
final PathAddress address = PathAddress.pathAddress(PathElement.pathElement(ModelDescriptionConstants.SUBSYSTEM, name));
subsystem.mergeSubtree(profile, Collections.singletonMap(address, version));
if(server != null) {
subsystem.mergeSubtree(server, Collections.singletonMap(address, version));
}
} |
java | private void prepareProperties(QName portQName, Map<String, String> serviceRefProps, Map<String, String> portProps) throws IOException {
// Merge the properties form port and service.
Map<String, String> allProperties = new HashMap<String, String>();
if (null != serviceRefProps) {
allProperties.putAll(serviceRefProps);
}
if (null != portProps) {
allProperties.putAll(portProps);
}
for (Map.Entry<String, String> entry : servicePidToPropertyPrefixMap.entrySet()) {
String serviceFactoryPid = entry.getKey();
String prefix = entry.getValue();
// Extract the properties according to different property prefix,
// update the extracted properties by corresponding factory service.
Map<String, String> extractProps = extract(prefix, allProperties);
// Put the port QName and the properties into the servicePropertiesMap
ConfigProperties configProps = new ConfigProperties(serviceFactoryPid, extractProps);
Set<ConfigProperties> configSet = servicePropertiesMap.get(portQName);
if (null == configSet) {
configSet = new HashSet<ConfigProperties>();
servicePropertiesMap.put(portQName, configSet);
}
if (configSet.contains(configProps)) {
// re-add the config props
configSet.remove(configProps);
configSet.add(configProps);
} else {
configSet.add(configProps);
}
}
} |
java | @Override
public boolean containsAll(Collection<?> collection) {
for (Object nextCandidateElement : collection) {
if (!contains(nextCandidateElement)) {
return false;
}
}
return true;
} |
java | public TouchActions singleTap(WebElement onElement) {
if (touchScreen != null) {
action.addAction(new SingleTapAction(touchScreen, (Locatable) onElement));
}
tick(touchPointer.createPointerDown(0));
tick(touchPointer.createPointerUp(0));
return this;
} |
python | def generate(variables, templates_path, main_template):
"""
:Parameters:
variables : dict
Template parameters, passed through.
templates_path : str
Root directory for transclusions.
main_template : str
Contents of the main template.
Returns the rendered output.
"""
env = jinja2.Environment(
loader=jinja2.FileSystemLoader(templates_path),
lstrip_blocks=True,
trim_blocks=True
)
def norm_alg_filename(alg_name):
if alg_name in variables['globals']['algorithm_filename_parts']:
return variables['globals']['algorithm_filename_parts'][alg_name]
else:
raise KeyError("{0} not found in globals.algorithm_filename_parts"
.format(alg_name))
env.globals.update(norm_alg_filename=norm_alg_filename)
template = env.from_string(main_template)
return template.render(variables) + "\n" |
python | def _add_pos1(token):
"""
Adds a 'pos1' element to a frog token.
"""
result = token.copy()
result['pos1'] = _POSMAP[token['pos'].split("(")[0]]
return result |
python | def read_group_info(self):
"""Get information about groups directly from the widget."""
self.groups = []
for i in range(self.tabs.count()):
one_group = self.tabs.widget(i).get_info()
# one_group['name'] = self.tabs.tabText(i)
self.groups.append(one_group) |
python | def re_acquire_lock(self, ltime=5):
'''Re-acquire the lock.
You must already own the lock; this is best called from
within a :meth:`lock` block.
:param int ltime: maximum time (in seconds) to own lock
:return: the session lock identifier
:raise rejester.exceptions.EnvironmentError:
if we didn't already own the lock
'''
conn = redis.Redis(connection_pool=self.pool)
script = conn.register_script('''
if redis.call("get", KEYS[1]) == ARGV[1]
then
return redis.call("expire", KEYS[1], ARGV[2])
else
return -1
end
''')
ret = script(keys=[self._lock_name],
args=[self._session_lock_identifier, ltime])
if ret != 1:
raise EnvironmentError('failed to re-acquire lock')
# logger.debug('re-acquired lock %s', self._lock_name)
return self._session_lock_identifier |
python | def _get_df(self):
"""Returns stellar model grid with desired bandpasses and with standard column names
bands must be iterable, and are parsed according to :func:``get_band``
"""
grids = {}
df = pd.DataFrame()
for bnd in self.bands:
s,b = self.get_band(bnd, **self.kwargs)
logging.debug('loading {} band from {}'.format(b,s))
if s not in grids:
grids[s] = self.get_hdf(s)
if self.common_columns[0] not in df:
df[list(self.common_columns)] = grids[s][list(self.common_columns)]
col = grids[s][b]
n_nan = np.isnan(col).sum()
if n_nan > 0:
logging.debug('{} NANs in {} column'.format(n_nan, b))
df.loc[:, bnd] = col.values #dunno why it has to be this way; something
# funny with indexing.
return df |
java | public static Optional<LineageInfo> getLineageInfo(@Nullable SharedResourcesBroker<GobblinScopeTypes> broker) {
if (broker == null) {
log.warn("Null broker. Will not track data lineage");
return Optional.absent();
}
try {
LineageInfo lineageInfo = broker.getSharedResource(new LineageInfoFactory(), EmptyKey.INSTANCE);
return Optional.of(lineageInfo);
} catch (NotConfiguredException e) {
log.warn("Fail to get LineageInfo instance from broker. Will not track data lineage", e);
return Optional.absent();
}
} |
java | public int getMaxEndPosition(int docId, int position) throws IOException {
if (ignoreSpans != null && docId == currentDocId) {
if (position < minimumPosition) {
throw new IOException(
"Unexpected position, should be >= " + minimumPosition + "!");
}
computeFullEndPositionList(position);
if (maxFullEndPosition.containsKey(position)) {
return maxFullEndPosition.get(position);
} else {
return 0;
}
} else {
return 0;
}
} |
java | private void executeJoinRequest(JoinRequest joinRequest, Connection connection) {
clusterServiceLock.lock();
try {
if (checkJoinRequest(joinRequest, connection)) {
return;
}
if (!authenticate(joinRequest)) {
return;
}
if (!validateJoinRequest(joinRequest, joinRequest.getAddress())) {
return;
}
startJoinRequest(joinRequest.toMemberInfo());
} finally {
clusterServiceLock.unlock();
}
} |
java | public Observable<Page<CertificateInner>> listByAutomationAccountNextAsync(final String nextPageLink) {
return listByAutomationAccountNextWithServiceResponseAsync(nextPageLink)
.map(new Func1<ServiceResponse<Page<CertificateInner>>, Page<CertificateInner>>() {
@Override
public Page<CertificateInner> call(ServiceResponse<Page<CertificateInner>> response) {
return response.body();
}
});
} |
python | def _deserialize(cls, key, value, fields):
""" Marshal incoming data into Python objects."""
converter = cls._get_converter_for_field(key, None, fields)
return converter.deserialize(value) |
java | public void marshall(GameSessionPlacement gameSessionPlacement, ProtocolMarshaller protocolMarshaller) {
if (gameSessionPlacement == null) {
throw new SdkClientException("Invalid argument passed to marshall(...)");
}
try {
protocolMarshaller.marshall(gameSessionPlacement.getPlacementId(), PLACEMENTID_BINDING);
protocolMarshaller.marshall(gameSessionPlacement.getGameSessionQueueName(), GAMESESSIONQUEUENAME_BINDING);
protocolMarshaller.marshall(gameSessionPlacement.getStatus(), STATUS_BINDING);
protocolMarshaller.marshall(gameSessionPlacement.getGameProperties(), GAMEPROPERTIES_BINDING);
protocolMarshaller.marshall(gameSessionPlacement.getMaximumPlayerSessionCount(), MAXIMUMPLAYERSESSIONCOUNT_BINDING);
protocolMarshaller.marshall(gameSessionPlacement.getGameSessionName(), GAMESESSIONNAME_BINDING);
protocolMarshaller.marshall(gameSessionPlacement.getGameSessionId(), GAMESESSIONID_BINDING);
protocolMarshaller.marshall(gameSessionPlacement.getGameSessionArn(), GAMESESSIONARN_BINDING);
protocolMarshaller.marshall(gameSessionPlacement.getGameSessionRegion(), GAMESESSIONREGION_BINDING);
protocolMarshaller.marshall(gameSessionPlacement.getPlayerLatencies(), PLAYERLATENCIES_BINDING);
protocolMarshaller.marshall(gameSessionPlacement.getStartTime(), STARTTIME_BINDING);
protocolMarshaller.marshall(gameSessionPlacement.getEndTime(), ENDTIME_BINDING);
protocolMarshaller.marshall(gameSessionPlacement.getIpAddress(), IPADDRESS_BINDING);
protocolMarshaller.marshall(gameSessionPlacement.getPort(), PORT_BINDING);
protocolMarshaller.marshall(gameSessionPlacement.getPlacedPlayerSessions(), PLACEDPLAYERSESSIONS_BINDING);
protocolMarshaller.marshall(gameSessionPlacement.getGameSessionData(), GAMESESSIONDATA_BINDING);
protocolMarshaller.marshall(gameSessionPlacement.getMatchmakerData(), MATCHMAKERDATA_BINDING);
} catch (Exception e) {
throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e);
}
} |
python | def get_slot_position(self, container, type='a'):
"""Returns the slot where the analyses from the type and container passed
in are located within the worksheet.
:param container: the container in which the analyses are grouped
:param type: type of the analysis
:return: the slot position
:rtype: int
"""
if not container or type not in ALLOWED_ANALYSES_TYPES:
return None
uid = api.get_uid(container)
layout = self.getLayout()
for pos in layout:
if pos['type'] != type or pos['container_uid'] != uid:
continue
return to_int(pos['position'])
return None |
java | <T> T _deserialize(String body, Class<T> clazz) {
if(body == null || body.isEmpty()){
throw new ServiceException(ErrorCode.REMOTE_DIRECTORY_SERVER_ERROR,
ErrorCode.REMOTE_DIRECTORY_SERVER_ERROR.getMessageTemplate(),
"the message body is empty");
}
try {
return deserialize(body.getBytes(), clazz);
} catch (IOException e) {
throw new ServiceException(ErrorCode.REMOTE_DIRECTORY_SERVER_ERROR, e,
ErrorCode.REMOTE_DIRECTORY_SERVER_ERROR.getMessageTemplate(),
"unrecognized message, deserialize failed.");
}
} |
java | public void marshall(ReviewPolicy reviewPolicy, ProtocolMarshaller protocolMarshaller) {
if (reviewPolicy == null) {
throw new SdkClientException("Invalid argument passed to marshall(...)");
}
try {
protocolMarshaller.marshall(reviewPolicy.getPolicyName(), POLICYNAME_BINDING);
protocolMarshaller.marshall(reviewPolicy.getParameters(), PARAMETERS_BINDING);
} catch (Exception e) {
throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e);
}
} |
python | def order_by_refs(envs):
"""
Return topologicaly sorted list of environments.
I.e. all referenced environments are placed before their references.
"""
topology = {
env['name']: set(env['refs'])
for env in envs
}
by_name = {
env['name']: env
for env in envs
}
return [
by_name[name]
for name in toposort_flatten(topology)
] |
java | public static String getString(String key) {
try {
return RESOURCE_BUNDLE.getString(key);
} catch (MissingResourceException e) {
return '!' + key + '!';
}
} |
java | public void doEdit(@FormGroup("nodeInfo") Group nodeInfo, @FormGroup("nodeParameterInfo") Group nodeParameterInfo,
@Param("pageIndex") int pageIndex, @Param("searchKey") String searchKey,
@FormField(name = "formNodeError", group = "nodeInfo") CustomErrors err, Navigator nav)
throws Exception {
Node node = new Node();
NodeParameter parameter = new NodeParameter();
nodeInfo.setProperties(node);
nodeParameterInfo.setProperties(parameter);
if (parameter.getDownloadPort() == null || parameter.getDownloadPort() == 0) {
parameter.setDownloadPort(node.getPort().intValue() + 1);
}
if (parameter.getMbeanPort() == null || parameter.getMbeanPort() == 0) {
parameter.setMbeanPort(node.getPort().intValue() + 2);
}
Long autoKeeperclusterId = nodeParameterInfo.getField("autoKeeperclusterId").getLongValue();
if (autoKeeperclusterId != null && autoKeeperclusterId > 0) {
AutoKeeperCluster autoKeeperCluster = autoKeeperClusterService.findAutoKeeperClusterById(autoKeeperclusterId);
parameter.setZkCluster(autoKeeperCluster);
}
node.setParameters(parameter);
try {
nodeService.modify(node);
} catch (RepeatConfigureException rce) {
err.setMessage("invalidNode");
return;
}
nav.redirectToLocation("nodeList.htm?pageIndex=" + pageIndex + "&searchKey=" + urlEncode(searchKey));
} |
java | public void addExpression() {
SWRL expression = getOWLModel().createSWRLExpression(null);
if (getOWLValueObject().owlValue().isIndividual()) {
SWRLIndividual swrlIndividual = swrl().wrapIndividual(getOWLValueObject().owlValue().castTo(OWLIndividual.class));
Atom atom = swrl().createSameIndividualAtom(swrlIndividual, swrlIndividual);
expression.setBody(swrl().createList(atom));
asOWLSResult().addEffect(expression);
}
} |
python | def install_package_command(package_name):
'''install python package from pip'''
#TODO refactor python logic
if sys.platform == "win32":
cmds = 'python -m pip install --user {0}'.format(package_name)
else:
cmds = 'python3 -m pip install --user {0}'.format(package_name)
call(cmds, shell=True) |
python | def _execute_autoprops_on_class(object_type, # type: Type[T]
include=None, # type: Union[str, Tuple[str]]
exclude=None # type: Union[str, Tuple[str]]
):
"""
This method will automatically add one getter and one setter for each constructor argument, except for those
overridden using autoprops_override_decorate(), @getter_override or @setter_override.
It will add a @contract on top of all setters (generated or overridden, if they don't already have one)
:param object_type: the class on which to execute.
:param include: a tuple of explicit attribute names to include (None means all)
:param exclude: a tuple of explicit attribute names to exclude. In such case, include should be None.
:return:
"""
# 0. first check parameters
validate_include_exclude(include, exclude)
# 1. Find the __init__ constructor signature and possible pycontracts @contract
constructor = get_constructor(object_type, allow_inheritance=True)
s = signature(constructor)
# option a) pycontracts
contracts_dict = constructor.__contracts__ if hasattr(constructor, '__contracts__') else {}
# option b) valid8
validators_dict = constructor.__validators__ if hasattr(constructor, '__validators__') else {}
# 2. For each attribute that is not 'self' and is included and not excluded, add the property
added = []
for attr_name in s.parameters.keys():
if is_attr_selected(attr_name, include=include, exclude=exclude):
added.append(attr_name)
# pycontract
if attr_name in contracts_dict.keys():
pycontract = contracts_dict[attr_name]
else:
pycontract = None
# valid8 validators: create copies, because we will modify them (changing the validated function ref)
if attr_name in validators_dict.keys():
validators = [copy(v) for v in validators_dict[attr_name]]
else:
validators = None
_add_property(object_type, s.parameters[attr_name], pycontract=pycontract, validators=validators)
# 3. Finally check that there is no overridden setter or getter that does not correspond to an attribute
extra_overrides = getmembers(object_type, predicate=(lambda fun: callable(fun) and
(hasattr(fun, __GETTER_OVERRIDE_ANNOTATION)
and getattr(fun, __GETTER_OVERRIDE_ANNOTATION) not in added)
or
(hasattr(fun, __SETTER_OVERRIDE_ANNOTATION))
and getattr(fun, __SETTER_OVERRIDE_ANNOTATION) not in added)
)
if len(extra_overrides) > 0:
raise AttributeError('Attribute named \'' + extra_overrides[0][0] + '\' was not found in constructor signature.'
'Therefore its getter/setter can not be overridden by function '
+ extra_overrides[0][1].__qualname__) |
python | def im_open(self, *, user: str, **kwargs) -> SlackResponse:
"""Opens a direct message channel.
Args:
user (str): The user id to open a DM with. e.g. 'W1234567890'
"""
kwargs.update({"user": user})
return self.api_call("im.open", json=kwargs) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.