language
stringclasses 2
values | func_code_string
stringlengths 63
466k
|
---|---|
python | def close_chromium(self):
'''
Close the remote chromium instance.
This command is normally executed as part of the class destructor.
It can be called early without issue, but calling ANY class functions
after the remote chromium instance is shut down will have unknown effects.
Note that if you are rapidly creating and destroying ChromeController instances,
you may need to *explicitly* call this before destruction.
'''
if self.cr_proc:
try:
if 'win' in sys.platform:
self.__close_internal_windows()
else:
self.__close_internal_linux()
except Exception as e:
for line in traceback.format_exc().split("\n"):
self.log.error(line)
ACTIVE_PORTS.discard(self.port) |
java | public synchronized BloomUpdate getUpdateFlag() {
if (nFlags == 0)
return BloomUpdate.UPDATE_NONE;
else if (nFlags == 1)
return BloomUpdate.UPDATE_ALL;
else if (nFlags == 2)
return BloomUpdate.UPDATE_P2PUBKEY_ONLY;
else
throw new IllegalStateException("Unknown flag combination");
} |
python | def _add_repr(cls, ns=None, attrs=None):
"""
Add a repr method to *cls*.
"""
if attrs is None:
attrs = cls.__attrs_attrs__
cls.__repr__ = _make_repr(attrs, ns)
return cls |
java | public void text(Object o) throws SAXException {
if (o!=null)
output.write(escape(o.toString()));
} |
java | public MockSubnet deleteSubnet(final String subnetId) {
if (subnetId != null && allMockSubnets.containsKey(subnetId)) {
return allMockSubnets.remove(subnetId);
}
return null;
} |
java | @Nullable
public String getAuthorizationUrl() throws UnsupportedEncodingException {
String authorizationCodeRequestUrl =
authorizationCodeFlow.newAuthorizationUrl().setScopes(scopes).build();
if (redirectUri != null) {
authorizationCodeRequestUrl += "&redirect_uri=" + URLEncoder.encode(redirectUri, "UTF-8");
}
return authorizationCodeRequestUrl;
} |
python | def _claim(cls, cdata: Any) -> "Tileset":
"""Return a new Tileset that owns the provided TCOD_Tileset* object."""
self = object.__new__(cls) # type: Tileset
if cdata == ffi.NULL:
raise RuntimeError("Tileset initialized with nullptr.")
self._tileset_p = ffi.gc(cdata, lib.TCOD_tileset_delete)
return self |
python | def read_rows(
self,
table_name,
app_profile_id=None,
rows=None,
filter_=None,
rows_limit=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Streams back the contents of all requested rows in key order, optionally
applying the same Reader filter to each. Depending on their size,
rows and cells may be broken up across multiple responses, but
atomicity of each row will still be preserved. See the
ReadRowsResponse documentation for details.
Example:
>>> from google.cloud import bigtable_v2
>>>
>>> client = bigtable_v2.BigtableClient()
>>>
>>> table_name = client.table_path('[PROJECT]', '[INSTANCE]', '[TABLE]')
>>>
>>> for element in client.read_rows(table_name):
... # process element
... pass
Args:
table_name (str): The unique name of the table from which to read. Values are of the form
``projects/<project>/instances/<instance>/tables/<table>``.
app_profile_id (str): This value specifies routing for replication. If not specified, the
"default" application profile will be used.
rows (Union[dict, ~google.cloud.bigtable_v2.types.RowSet]): The row keys and/or ranges to read. If not specified, reads from all rows.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.bigtable_v2.types.RowSet`
filter_ (Union[dict, ~google.cloud.bigtable_v2.types.RowFilter]): The filter to apply to the contents of the specified row(s). If unset,
reads the entirety of each row.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.bigtable_v2.types.RowFilter`
rows_limit (long): The read will terminate after committing to N rows' worth of results. The
default (zero) is to return all results.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
Iterable[~google.cloud.bigtable_v2.types.ReadRowsResponse].
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "read_rows" not in self._inner_api_calls:
self._inner_api_calls[
"read_rows"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.read_rows,
default_retry=self._method_configs["ReadRows"].retry,
default_timeout=self._method_configs["ReadRows"].timeout,
client_info=self._client_info,
)
request = bigtable_pb2.ReadRowsRequest(
table_name=table_name,
app_profile_id=app_profile_id,
rows=rows,
filter=filter_,
rows_limit=rows_limit,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("table_name", table_name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["read_rows"](
request, retry=retry, timeout=timeout, metadata=metadata
) |
java | public <T> List<Predicate> byExample(ManagedType<T> mt, Path<T> mtPath, T mtValue, SearchParameters sp, CriteriaBuilder builder) {
List<Predicate> predicates = newArrayList();
for (SingularAttribute<? super T, ?> attr : mt.getSingularAttributes()) {
if (!isPrimaryKey(mt, attr)) {
continue;
}
Object attrValue = jpaUtil.getValue(mtValue, attr);
if (attrValue != null) {
predicates.add(builder.equal(mtPath.get(jpaUtil.attribute(mt, attr)), attrValue));
}
}
return predicates;
} |
python | def makeMany2ManyRelatedManager(formodel, name_relmodel, name_formodel):
'''formodel is the model which the manager .'''
class _Many2ManyRelatedManager(Many2ManyRelatedManager):
pass
_Many2ManyRelatedManager.formodel = formodel
_Many2ManyRelatedManager.name_relmodel = name_relmodel
_Many2ManyRelatedManager.name_formodel = name_formodel
return _Many2ManyRelatedManager |
python | def _check_time_range(time_range, now):
'''
Check time range
'''
if _TIME_SUPPORTED:
_start = dateutil_parser.parse(time_range['start'])
_end = dateutil_parser.parse(time_range['end'])
return bool(_start <= now <= _end)
else:
log.error('Dateutil is required.')
return False |
python | def upsert(self, events):
"""Inserts/updates the given events into MySQL"""
existing = self.get_existing_keys(events)
inserts = [e for e in events if not e[self.key] in existing]
updates = [e for e in events if e[self.key] in existing]
self.insert(inserts)
self.update(updates) |
python | def header_echo(cls, request,
api_key: (Ptypes.header, String('API key'))) -> [
(200, 'Ok', String)]:
'''Echo the header parameter.'''
log.info('Echoing header param, value is: {}'.format(api_key))
for i in range(randint(0, MAX_LOOP_DURATION)):
yield
msg = 'The value sent was: {}'.format(api_key)
Respond(200, msg) |
python | def generic_add(a, b):
print
"""Simple function to add two numbers"""
logger.info('Called generic_add({}, {})'.format(a, b))
return a + b |
python | def shift_rows(state):
"""
Transformation in the Cipher that processes the State by cyclically shifting
the last three rows of the State by different offsets.
"""
state = state.reshape(4, 4, 8)
return fcat(
state[0][0], state[1][1], state[2][2], state[3][3],
state[1][0], state[2][1], state[3][2], state[0][3],
state[2][0], state[3][1], state[0][2], state[1][3],
state[3][0], state[0][1], state[1][2], state[2][3]
) |
python | def _get_app_config(self, app_name):
"""
Returns an app config for the given name, not by label.
"""
matches = [app_config for app_config in apps.get_app_configs()
if app_config.name == app_name]
if not matches:
return
return matches[0] |
java | protected List<JCAnnotation> annotationsOpt(Tag kind) {
if (token.kind != MONKEYS_AT) return List.nil(); // optimization
ListBuffer<JCAnnotation> buf = new ListBuffer<>();
int prevmode = mode;
while (token.kind == MONKEYS_AT) {
int pos = token.pos;
nextToken();
buf.append(annotation(pos, kind));
}
lastmode = mode;
mode = prevmode;
List<JCAnnotation> annotations = buf.toList();
return annotations;
} |
python | def fromfilenames(cls, filenames, coltype=LIGOTimeGPS):
"""
Read Cache objects from the files named and concatenate the results into a
single Cache.
"""
cache = cls()
for filename in filenames:
cache.extend(cls.fromfile(open(filename), coltype=coltype))
return cache |
python | def get_facet_serializer(self, *args, **kwargs):
"""
Return the facet serializer instance that should be used for
serializing faceted output.
"""
assert "objects" in kwargs, "`objects` is a required argument to `get_facet_serializer()`"
facet_serializer_class = self.get_facet_serializer_class()
kwargs["context"] = self.get_serializer_context()
kwargs["context"].update({
"objects": kwargs.pop("objects"),
"facet_query_params_text": self.facet_query_params_text,
})
return facet_serializer_class(*args, **kwargs) |
python | def write_hex(fout, buf, offset, width=16):
"""Write the content of 'buf' out in a hexdump style
Args:
fout: file object to write to
buf: the buffer to be pretty printed
offset: the starting offset of the buffer
width: how many bytes should be displayed per row
"""
skipped_zeroes = 0
for i, chunk in enumerate(chunk_iter(buf, width)):
# zero skipping
if chunk == (b"\x00" * width):
skipped_zeroes += 1
continue
elif skipped_zeroes != 0:
fout.write(" -- skipped zeroes: {}\n".format(skipped_zeroes))
skipped_zeroes = 0
# starting address of the current line
fout.write("{:016x} ".format(i * width + offset))
# bytes column
column = " ".join([" ".join(["{:02x}".format(c) for c in subchunk])
for subchunk in chunk_iter(chunk, 8)])
w = width * 2 + (width - 1) + ((width // 8) - 1)
if len(column) != w:
column += " " * (w - len(column))
fout.write(column)
# ASCII character column
fout.write(" |")
for c in chunk:
if c in PRINTABLE_CHARS:
fout.write(chr(c))
else:
fout.write(".")
if len(chunk) < width:
fout.write(" " * (width - len(chunk)))
fout.write("|")
fout.write("\n") |
python | def raise_304(instance):
"""Abort the current request with a 304 (Not Modified) response code.
Clears out the body of the response.
:param instance: Resource instance (used to access the response)
:type instance: :class:`webob.resource.Resource`
:raises: :class:`webob.exceptions.ResponseException` of status 304
.. todo: The following headers MUST be output: Date, ETag and/or
Content-Location, Expires, Cache-Control, Vary. See :rfc:`2616`
section 10.3.5.
"""
instance.response.status = 304
instance.response.body = ''
instance.response.body_raw = None
raise ResponseException(instance.response) |
java | private AuthnStatement buildAuthnStatement(final Object casAssertion,
final RequestAbstractType authnRequest,
final SamlRegisteredServiceServiceProviderMetadataFacade adaptor,
final SamlRegisteredService service,
final String binding,
final MessageContext messageContext,
final HttpServletRequest request) throws SamlException {
val assertion = Assertion.class.cast(casAssertion);
val authenticationMethod = this.authnContextClassRefBuilder.build(assertion, authnRequest, adaptor, service);
var id = request != null ? CommonUtils.safeGetParameter(request, CasProtocolConstants.PARAMETER_TICKET) : StringUtils.EMPTY;
if (StringUtils.isBlank(id)) {
LOGGER.warn("Unable to locate service ticket as the session index; Generating random identifier instead...");
id = '_' + String.valueOf(RandomUtils.nextLong());
}
val statement = newAuthnStatement(authenticationMethod, DateTimeUtils.zonedDateTimeOf(assertion.getAuthenticationDate()), id);
if (assertion.getValidUntilDate() != null) {
val dt = DateTimeUtils.zonedDateTimeOf(assertion.getValidUntilDate());
statement.setSessionNotOnOrAfter(
DateTimeUtils.dateTimeOf(dt.plusSeconds(casProperties.getAuthn().getSamlIdp().getResponse().getSkewAllowance())));
}
val subjectLocality = buildSubjectLocality(assertion, authnRequest, adaptor, binding);
statement.setSubjectLocality(subjectLocality);
return statement;
} |
python | def list_mapped_classes():
"""
Returns all the rdfclasses that have and associated elasticsearch
mapping
Args:
None
"""
cls_dict = {key: value
for key, value in MODULE.rdfclass.__dict__.items()
if not isinstance(value, RdfConfigManager)
and key not in ['properties']
and hasattr(value, 'es_defs')
and value.es_defs.get('kds_esIndex')}
new_dict = {}
# remove items that are appearing as a subclass of a main mapping class
# the intersion of the set of the cls_dict values and the a classes
# individual hierarchy will be >1 if the class is a subclass of another
# class in the list
potential_maps = set([cls_.__name__ for cls_ in cls_dict.values()])
for name, cls_ in cls_dict.items():
parents = set(cls_.hierarchy)
if len(parents.intersection(potential_maps)) <= 1:
new_dict[name] = cls_
return new_dict |
java | public static base_response add(nitro_service client, route6 resource) throws Exception {
route6 addresource = new route6();
addresource.network = resource.network;
addresource.gateway = resource.gateway;
addresource.vlan = resource.vlan;
addresource.weight = resource.weight;
addresource.distance = resource.distance;
addresource.cost = resource.cost;
addresource.advertise = resource.advertise;
addresource.msr = resource.msr;
addresource.monitor = resource.monitor;
addresource.td = resource.td;
return addresource.add_resource(client);
} |
java | private void fixNCNTypes(String[] symbs, int[][] graph) {
for (int v = 0; v < graph.length; v++) {
if ("NCN+".equals(symbs[v])) {
boolean foundCNN = false;
for (int w : graph[v]) {
foundCNN = foundCNN || "CNN+".equals(symbs[w]) || "CIM+".equals(symbs[w]);
}
if (!foundCNN) {
symbs[v] = "NC=N";
}
}
}
} |
java | public static <T extends Com4jObject> T getActiveObject(Class<T> primaryInterface, String clsid ) {
return getActiveObject(primaryInterface,new GUID(clsid));
} |
java | @Pure
public static String[] split(char leftSeparator, char rightSeparator, String str) {
final SplitSeparatorToArrayAlgorithm algo = new SplitSeparatorToArrayAlgorithm();
splitSeparatorAlgorithm(leftSeparator, rightSeparator, str, algo);
return algo.toArray();
} |
java | public static String getClassName(Object obj, boolean isSimple) {
if (null == obj) {
return null;
}
final Class<?> clazz = obj.getClass();
return getClassName(clazz, isSimple);
} |
java | public static DMatrixSparseTriplet uniform(int numRows , int numCols , int nz_total ,
double min , double max , Random rand ) {
// Create a list of all the possible element values
int N = numCols*numRows;
if( N < 0 )
throw new IllegalArgumentException("matrix size is too large");
nz_total = Math.min(N,nz_total);
int selected[] = new int[N];
for (int i = 0; i < N; i++) {
selected[i] = i;
}
for (int i = 0; i < nz_total; i++) {
int s = rand.nextInt(N);
int tmp = selected[s];
selected[s] = selected[i];
selected[i] = tmp;
}
// Create a sparse matrix
DMatrixSparseTriplet ret = new DMatrixSparseTriplet(numRows,numCols,nz_total);
for (int i = 0; i < nz_total; i++) {
int row = selected[i]/numCols;
int col = selected[i]%numCols;
double value = rand.nextDouble()*(max-min)+min;
ret.addItem(row,col, value);
}
return ret;
} |
python | def enable_category(self, category: str) -> None:
"""
Enable an entire category of commands
:param category: the category to enable
"""
for cmd_name in list(self.disabled_commands):
func = self.disabled_commands[cmd_name].command_function
if hasattr(func, HELP_CATEGORY) and getattr(func, HELP_CATEGORY) == category:
self.enable_command(cmd_name) |
java | public DataObject[] getDataObjectArray(){
DataObject[] res = new DataObject[this.dataList.size()];
return this.dataList.toArray(res);
} |
java | @Override
public Component getTableCellEditorComponent(JTable table, Object value,
boolean isSelected, int row, int column) {
// Save the supplied value to the time picker.
setCellEditorValue(value);
// If needed, adjust the minimum row height for the table.
zAdjustTableRowHeightIfNeeded(table);
// This fixes a bug where the time text could "move around" during a table resize event.
timePicker.getComponentTimeTextField().setScrollOffset(0);
// Return the time picker component.
return timePicker;
} |
java | private static void hideMultipleParts(IAtomContainer container, Sgroup sgroup) {
final Set<IBond> crossing = sgroup.getBonds();
final Set<IAtom> atoms = sgroup.getAtoms();
final Set<IAtom> parentAtoms = sgroup.getValue(SgroupKey.CtabParentAtomList);
for (IBond bond : container.bonds()) {
if (parentAtoms.contains(bond.getBegin()) && parentAtoms.contains(bond.getEnd()))
continue;
if (atoms.contains(bond.getBegin()) || atoms.contains(bond.getEnd()))
StandardGenerator.hide(bond);
}
for (IAtom atom : atoms) {
if (!parentAtoms.contains(atom))
StandardGenerator.hide(atom);
}
for (IBond bond : crossing) {
StandardGenerator.unhide(bond);
}
} |
java | public void buildSignature(XMLNode node, Content constructorDocTree) {
constructorDocTree.addContent(writer.getSignature(currentConstructor));
} |
python | def dr( self, r1, r2, cutoff=None ):
"""
Calculate the distance between two fractional coordinates in the cell.
Args:
r1 (np.array): fractional coordinates for position 1.
r2 (np.array): fractional coordinates for position 2.
cutoff (optional:Bool): If set, returns None for distances greater than the cutoff. Default None (unset).
Returns:
(float): the distance between r1 and r2.
"""
delta_r_cartesian = ( r1 - r2 ).dot( self.matrix )
delta_r_squared = sum( delta_r_cartesian**2 )
if cutoff != None:
cutoff_squared = cutoff ** 2
if delta_r_squared > cutoff_squared:
return None
return( math.sqrt( delta_r_squared ) ) |
python | def moment_inertia(self):
"""
The analytic inertia tensor of the sphere primitive.
Returns
----------
tensor: (3,3) float, 3D inertia tensor
"""
tensor = inertia.sphere_inertia(mass=self.volume,
radius=self.primitive.radius)
return tensor |
java | private void prettyprint(String xmlLogsFile, FileOutputStream htmlReportFile)
throws Exception {
TransformerFactory tFactory = TransformerFactory.newInstance();
// Fortify Mod: prevent external entity injection
tFactory.setFeature(XMLConstants.FEATURE_SECURE_PROCESSING, true);
DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance();
// Fortify Mod: prevent external entity injection
factory.setExpandEntityReferences(false);
factory.setNamespaceAware(true);
factory.setXIncludeAware(true);
DocumentBuilder parser = factory.newDocumentBuilder();
Document document = parser.parse(xmlLogsFile);
Transformer transformer = tFactory.newTransformer(new StreamSource(
xsltFileHandler));
transformer.transform(new DOMSource(document), new StreamResult(
htmlReportFile));
} |
java | public static Timer getNamedTotalTimer(String timerName) {
long totalCpuTime = 0;
long totalSystemTime = 0;
int measurements = 0;
int timerCount = 0;
int todoFlags = RECORD_NONE;
Timer previousTimer = null;
for (Map.Entry<Timer, Timer> entry : registeredTimers.entrySet()) {
if (entry.getValue().name.equals(timerName)) {
previousTimer = entry.getValue();
timerCount += 1;
totalCpuTime += previousTimer.totalCpuTime;
totalSystemTime += previousTimer.totalWallTime;
measurements += previousTimer.measurements;
todoFlags |= previousTimer.todoFlags;
}
}
if (timerCount == 1) {
return previousTimer;
} else {
Timer result = new Timer(timerName, todoFlags, 0);
result.totalCpuTime = totalCpuTime;
result.totalWallTime = totalSystemTime;
result.measurements = measurements;
result.threadCount = timerCount;
return result;
}
} |
python | def summarize_tensors(tensor_dict, tag=None):
"""Summarize the tensors.
Args:
tensor_dict: a dictionary of tensors.
tag: name scope of the summary; defaults to tensors/.
"""
if tag is None:
tag = "tensors/"
for t_name in list(tensor_dict):
t = tensor_dict[t_name]
tf.summary.histogram(tag + t_name, t) |
java | public <T extends BioPAXElement> Class<T> getImplClass(Class<T> aModelInterfaceClass)
{
Class<T> implClass = null;
if (aModelInterfaceClass.isInterface()) {
String name = mapClassName(aModelInterfaceClass);
try {
implClass = (Class<T>) Class.forName(name);
} catch (ClassNotFoundException e) {
log.debug(String.format("getImplClass(%s), %s" , aModelInterfaceClass, e));
}
}
return implClass;
} |
java | public List<List<String>> getAllScopes() {
this.checkInitialized();
final ImmutableList.Builder<List<String>> builder = ImmutableList.<List<String>>builder();
final Consumer<Integer> _function = (Integer it) -> {
List<String> _get = this.scopes.get(it);
StringConcatenation _builder = new StringConcatenation();
_builder.append("No scopes are available for index: ");
_builder.append(it);
builder.add(Preconditions.<List<String>>checkNotNull(_get, _builder));
};
this.scopes.keySet().forEach(_function);
return builder.build();
} |
python | def make_gdf_graph(filename, stop=True):
"""Create a graph in simple GDF format, suitable for feeding into Gephi,
or some other graph manipulation and display tool. Setting stop to True
will stop the current trace.
"""
if stop:
stop_trace()
try:
f = open(filename, 'w')
f.write(get_gdf())
finally:
if f: f.close() |
python | def _arm_track_lr_on_stack(self, addr, irsb, function):
"""
At the beginning of the basic block, we check if the first instruction stores the LR register onto the stack.
If it does, we calculate the offset of that store, and record the offset in function.info.
For instance, here is the disassembly of a THUMB mode function:
000007E4 STR.W LR, [SP,#var_4]!
000007E8 MOV R2, R1
000007EA SUB SP, SP, #0xC
000007EC MOVS R1, #0
...
00000800 ADD SP, SP, #0xC
00000802 LDR.W PC, [SP+4+var_4],#4
The very last basic block has a jumpkind of Ijk_Boring, which is because VEX cannot do such complicated analysis
to determine the real jumpkind.
As we can see, instruction 7e4h stores LR at [sp-4], and at the end of this function, instruction 802 loads LR
from [sp], then increments sp by 4. We execute the first instruction, and track the following things:
- if the value from register LR is stored onto the stack.
- the difference between the offset of the LR store on stack, and the SP after the store.
If at the end of the function, the LR is read out from the stack at the exact same stack offset, we will change
the jumpkind of the final IRSB to Ijk_Ret.
This method can be enabled by setting "ret_jumpkind_heuristics", which is an architecture-specific option on
ARM, to True.
:param int addr: Address of the basic block.
:param pyvex.IRSB irsb: The basic block object.
:param Function function: The function instance.
:return: None
"""
if irsb.statements is None:
return
if 'lr_saved_on_stack' in function.info:
return
# if it does, we log it down to the Function object.
lr_offset = self.project.arch.registers['lr'][0]
sp_offset = self.project.arch.sp_offset
initial_sp = 0x7fff0000
initial_lr = 0xabcdef
tmps = {}
# pylint:disable=too-many-nested-blocks
for stmt in irsb.statements:
if isinstance(stmt, pyvex.IRStmt.IMark):
if stmt.addr + stmt.delta != addr:
break
elif isinstance(stmt, pyvex.IRStmt.WrTmp):
data = stmt.data
if isinstance(data, pyvex.IRExpr.Get):
if data.offset == sp_offset:
tmps[stmt.tmp] = initial_sp
elif data.offset == lr_offset:
tmps[stmt.tmp] = initial_lr
elif isinstance(data, pyvex.IRExpr.Binop):
if data.op == 'Iop_Sub32':
arg0, arg1 = data.args
if isinstance(arg0, pyvex.IRExpr.RdTmp) and isinstance(arg1, pyvex.IRExpr.Const):
if arg0.tmp in tmps:
tmps[stmt.tmp] = tmps[arg0.tmp] - arg1.con.value
elif isinstance(stmt, (pyvex.IRStmt.Store, pyvex.IRStmt.StoreG)):
data = stmt.data
storing_lr = False
if isinstance(data, pyvex.IRExpr.RdTmp):
if data.tmp in tmps:
val = tmps[data.tmp]
if val == initial_lr:
# we are storing LR to somewhere
storing_lr = True
if storing_lr:
if isinstance(stmt.addr, pyvex.IRExpr.RdTmp):
if stmt.addr.tmp in tmps:
storing_addr = tmps[stmt.addr.tmp]
function.info['lr_saved_on_stack'] = True
function.info['lr_on_stack_offset'] = storing_addr - initial_sp
break
if 'lr_saved_on_stack' not in function.info:
function.info['lr_saved_on_stack'] = False |
java | private void sendEntireMessage(JsMessage jsMessage, List<DataSlice> messageSlices)
throws UnsupportedEncodingException, MessageCopyFailedException,
IncorrectMessageTypeException, MessageEncodeFailedException {
if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled())
SibTr.entry(this, tc, "sendEntireMessage",
new Object[] { jsMessage, messageSlices });
int msgLen = 0;
try {
CommsServerByteBuffer buffer = poolManager.allocate();
ConversationState convState = (ConversationState) conversation.getAttachment();
buffer.putShort(convState.getConnectionObjectId());
if (!mainConsumer.getUsingConnectionReceive()) {
buffer.putShort(mainConsumer.getConsumerSessionId());
}
// Put the message into the buffer in whatever way is suitable
if (messageSlices == null) {
msgLen = buffer.putMessage(jsMessage,
convState.getCommsConnection(),
conversation);
} else {
msgLen = buffer.putMessgeWithoutEncode(messageSlices);
}
// Decide on the segment
int seg = JFapChannelConstants.SEG_RECEIVE_SESS_MSG_R;
if (mainConsumer.getUsingConnectionReceive()) {
seg = JFapChannelConstants.SEG_RECEIVE_CONN_MSG_R;
}
int jfapPriority = JFapChannelConstants.getJFAPPriority(jsMessage.getPriority());
if (TraceComponent.isAnyTracingEnabled() && tc.isDebugEnabled())
SibTr.debug(this, tc, "Sending with JFAP priority of " + jfapPriority);
conversation.send(buffer,
seg,
requestNumber,
jfapPriority,
false,
ThrottlingPolicy.BLOCK_THREAD,
null);
mainConsumer.messagesSent++;
} catch (SIException e) {
//No FFDC code needed
//Only FFDC if we haven't received a meTerminated event.
if (!((ConversationState) mainConsumer.getConversation().getAttachment()).hasMETerminated()) {
FFDCFilter.processException(e, CLASS_NAME + ".sendEntireMessage",
CommsConstants.CATSYNCASYNCHREADER_SEND_MSG_01, this);
}
if (TraceComponent.isAnyTracingEnabled() && tc.isDebugEnabled())
SibTr.debug(tc, e.getMessage(), e);
SibTr.error(tc, "COMMUNICATION_ERROR_SICO2015", e);
}
if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled())
SibTr.exit(this, tc, "sendEntireMessage");
} |
java | public <T> TypeDef createTypeFromTemplate(T model, String[] parameters, String content) {
try (StringWriter writer = new StringWriter()) {
new CodeGeneratorBuilder<T>()
.withContext(context)
.withModel(model)
.withParameters(parameters)
.withWriter(writer)
.withTemplateContent(content)
.build()
.generate();
ByteArrayInputStream bis = new ByteArrayInputStream(writer.toString().getBytes());
return Sources.FROM_INPUTSTEAM_TO_SINGLE_TYPEDEF.apply(bis);
} catch (IOException e) {
return null;
}
} |
python | def get_signer_by_version(digest, ver):
"""Returns a new signer object for a digest and version combination.
Keyword arguments:
digest -- a callable that may be passed to the initializer of any Signer object in this library.
The callable must return a hasher object when called with no arguments.
ver -- the version of the signature. This may be any value convertible to an int.
"""
if int(ver) == 1:
return V1Signer(digest)
elif int(ver) == 2:
return V2Signer(digest)
else:
return None |
python | def delete_model(self, meta: dict):
"""Delete the model from GCS."""
bucket = self.connect()
if bucket is None:
raise BackendRequiredError
blob_name = "models/%s/%s.asdf" % (meta["model"], meta["uuid"])
self._log.info(blob_name)
try:
self._log.info("Deleting model ...")
bucket.delete_blob(blob_name)
except NotFound:
self._log.warning("Model %s already deleted.", meta["uuid"]) |
java | protected double receivePoint(LofPoint recievedPoint, LofDataSet dataSet)
{
// 現在保持している学習データ数
int dataCount = dataSet.getDataIdList().size();
// データが最小数に満たない場合はデータの追加のみを行い、LOF値は0.0として扱う
if (dataCount < this.minDataCount)
{
addDataWithoutCalculate(recievedPoint, dataSet);
return 0.0d;
}
double lofScore = 0.0d;
if (this.alwaysUpdateModel || dataSet.getDataIdList().size() < this.maxDataCount
|| (this.receiveCount % this.updateInterval) == 0)
{
// 以下の条件を満たす場合、学習データモデルの更新、及びLOF値の算出を行う。
// 1. 「データ受信時、常時学習データモデルを更新」がtrue
// 2. 学習データモデルの保持するデータ数が最大保持数未満の場合
// 3. 受信データ数 % 学習データモデル更新間隔 = 0
lofScore = calculateLofWithUpdate(recievedPoint, dataSet);
}
else
{
// 上記の条件をいずれも満たさない場合、スコアの算出のみを行う。
lofScore = calculateLofWithoutUpdate(recievedPoint, dataSet);
}
return lofScore;
} |
java | public static com.liferay.commerce.model.CommerceOrder updateCommerceOrder(
com.liferay.commerce.model.CommerceOrder commerceOrder) {
return getService().updateCommerceOrder(commerceOrder);
} |
python | def get(self, key, default=None, as_int=False, setter=None):
"""Gets a value from the cache.
:param str|unicode key: The cache key to get value for.
:param default: Value to return if none found in cache.
:param bool as_int: Return 64bit number instead of str.
:param callable setter: Setter callable to automatically set cache
value if not already cached. Required to accept a key and return
a value that will be cached.
:rtype: str|unicode|int
"""
if as_int:
val = uwsgi.cache_num(key, self.name)
else:
val = decode(uwsgi.cache_get(key, self.name))
if val is None:
if setter is None:
return default
val = setter(key)
if val is None:
return default
self.set(key, val)
return val |
java | public void updateCellule( SIBUuid8 newRemoteMEUuid)
{
if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled())
SibTr.entry(this, tc, "updateCellule", newRemoteMEUuid);
this.remoteMEUuid = newRemoteMEUuid;
if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled())
SibTr.exit(tc, "updateCellule");
} |
python | def writeNSdict(self, nsdict):
'''Write a namespace dictionary, taking care to not clobber the
standard (or reserved by us) prefixes.
'''
for k,v in nsdict.items():
if (k,v) in _standard_ns: continue
rv = _reserved_ns.get(k)
if rv:
if rv != v:
raise KeyError("Reserved namespace " + str((k,v)) + " used")
continue
if k:
self.dom.setNamespaceAttribute(k, v)
else:
self.dom.setNamespaceAttribute('xmlns', v) |
java | private void onCopyFailure(
KeySetView<IOException, Boolean> innerExceptions,
GoogleJsonError e,
String srcBucketName, String srcObjectName) {
if (errorExtractor.itemNotFound(e)) {
FileNotFoundException fnfe =
GoogleCloudStorageExceptions.getFileNotFoundException(srcBucketName, srcObjectName);
innerExceptions.add((FileNotFoundException) fnfe.initCause(new IOException(e.toString())));
} else {
String srcString = StorageResourceId.createReadableString(srcBucketName, srcObjectName);
innerExceptions.add(new IOException(String.format("Error copying %s:%n%s", srcString, e)));
}
} |
python | def consume_results(self): # pylint: disable=too-many-branches
"""Handle results waiting in waiting_results list.
Check ref will call consume result and update their status
:return: None
"""
# All results are in self.waiting_results
# We need to get them first
queue_size = self.waiting_results.qsize()
for _ in range(queue_size):
self.manage_results(self.waiting_results.get())
# Then we consume them
for chk in list(self.checks.values()):
if chk.status == ACT_STATUS_WAIT_CONSUME:
logger.debug("Consuming: %s", chk)
item = self.find_item_by_id(chk.ref)
notification_period = None
if getattr(item, 'notification_period', None) is not None:
notification_period = self.timeperiods[item.notification_period]
dep_checks = item.consume_result(chk, notification_period, self.hosts,
self.services, self.timeperiods,
self.macromodulations, self.checkmodulations,
self.businessimpactmodulations,
self.resultmodulations, self.checks,
self.pushed_conf.log_active_checks and
not chk.passive_check)
# # Raise the log only when the check got consumed!
# # Else the item information are not up-to-date :/
# if self.pushed_conf.log_active_checks and not chk.passive_check:
# item.raise_check_result()
#
for check in dep_checks:
logger.debug("-> raised a dependency check: %s", chk)
self.add(check)
# loop to resolve dependencies
have_resolved_checks = True
while have_resolved_checks:
have_resolved_checks = False
# All 'finished' checks (no more dep) raise checks they depend on
for chk in list(self.checks.values()):
if chk.status == ACT_STATUS_WAITING_ME:
for dependent_checks in chk.depend_on_me:
# Ok, now dependent will no more wait
dependent_checks.depend_on.remove(chk.uuid)
have_resolved_checks = True
# REMOVE OLD DEP CHECK -> zombie
chk.status = ACT_STATUS_ZOMBIE
# Now, reinteger dep checks
for chk in list(self.checks.values()):
if chk.status == ACT_STATUS_WAIT_DEPEND and not chk.depend_on:
item = self.find_item_by_id(chk.ref)
notification_period = None
if getattr(item, 'notification_period', None) is not None:
notification_period = self.timeperiods[item.notification_period]
dep_checks = item.consume_result(chk, notification_period, self.hosts,
self.services, self.timeperiods,
self.macromodulations, self.checkmodulations,
self.businessimpactmodulations,
self.resultmodulations, self.checks,
self.pushed_conf.log_active_checks and
not chk.passive_check)
for check in dep_checks:
self.add(check) |
java | @Override
public GetVocabularyResult getVocabulary(GetVocabularyRequest request) {
request = beforeClientExecution(request);
return executeGetVocabulary(request);
} |
python | def _set_state(self, state):
"""
Transition the SCTP association to a new state.
"""
if state != self._association_state:
self.__log_debug('- %s -> %s', self._association_state, state)
self._association_state = state
if state == self.State.ESTABLISHED:
self.__state = 'connected'
for channel in list(self._data_channels.values()):
if channel.negotiated and channel.readyState != 'open':
channel._setReadyState('open')
asyncio.ensure_future(self._data_channel_flush())
elif state == self.State.CLOSED:
self._t1_cancel()
self._t2_cancel()
self._t3_cancel()
self.__state = 'closed'
# close data channels
for stream_id in list(self._data_channels.keys()):
self._data_channel_closed(stream_id)
# no more events will be emitted, so remove all event listeners
# to facilitate garbage collection.
self.remove_all_listeners() |
java | public static int search(long[] longArray, long value) {
int start = 0;
int end = longArray.length - 1;
int middle = 0;
while(start <= end) {
middle = (start + end) >> 1;
if(value == longArray[middle]) {
return middle;
}
if(value < longArray[middle]) {
end = middle - 1 ;
}
else {
start = middle + 1;
}
}
return -1;
} |
java | public static void solveL( double L[] , double []b , int n )
{
// for( int i = 0; i < n; i++ ) {
// double sum = b[i];
// for( int k=0; k<i; k++ ) {
// sum -= L[i*n+k]* b[k];
// }
// b[i] = sum / L[i*n+i];
// }
for( int i = 0; i < n; i++ ) {
double sum = b[i];
int indexL = i*n;
for( int k=0; k<i; k++ ) {
sum -= L[indexL++]* b[k];
}
b[i] = sum / L[indexL];
}
} |
python | def _load(self):
"""Fetch metadata from remote. Entries are fetched lazily."""
# This will not immediately fetch any sources (entries). It will lazily
# fetch sources from the server in paginated blocks when this Catalog
# is iterated over. It will fetch specific sources when they are
# accessed in this Catalog via __getitem__.
if self.page_size is None:
# Fetch all source info.
params = {}
else:
# Just fetch the metadata now; fetch source info later in pages.
params = {'page_offset': 0, 'page_size': 0}
http_args = self._get_http_args(params)
response = requests.get(self.info_url, **http_args)
try:
response.raise_for_status()
except requests.HTTPError as err:
six.raise_from(RemoteCatalogError(
"Failed to fetch metadata."), err)
info = msgpack.unpackb(response.content, **unpack_kwargs)
self.metadata = info['metadata']
# The intake server now always provides a length, but the server may be
# running an older version of intake.
self._len = info.get('length')
self._entries.reset()
# If we are paginating (page_size is not None) and the server we are
# working with is new enough to support pagination, info['sources']
# should be empty. If either of those things is not true,
# info['sources'] will contain all the entries and we should cache them
# now.
if info['sources']:
# Signal that we are not paginating, even if we were asked to.
self._page_size = None
self._entries._page_cache.update(
{source['name']: RemoteCatalogEntry(
url=self.url,
getenv=self.getenv,
getshell=self.getshell,
auth=self.auth,
http_args=self.http_args, **source)
for source in info['sources']}) |
python | def _long_string_handler(c, ctx, is_field_name=False):
"""Handles triple-quoted strings. Remains active until a value other than a long string is encountered."""
assert c == _SINGLE_QUOTE
is_clob = ctx.ion_type is IonType.CLOB
max_char = _MAX_CLOB_CHAR if is_clob else _MAX_TEXT_CHAR
assert not (is_clob and is_field_name)
if not is_clob and not is_field_name:
ctx.set_ion_type(IonType.STRING)
assert not ctx.value
ctx.set_unicode(quoted_text=True)
val = ctx.value
if is_field_name:
assert not val
ctx.set_pending_symbol()
val = ctx.pending_symbol
quotes = 0
in_data = True
c, self = yield
here = ctx.immediate_transition(self)
trans = here
while True:
if c == _SINGLE_QUOTE and not _is_escaped(c):
quotes += 1
if quotes == 3:
in_data = not in_data
ctx.set_quoted_text(in_data)
quotes = 0
else:
if in_data:
_validate_long_string_text(c, ctx, max_char)
# Any quotes found in the meantime are part of the data
val.extend(_SINGLE_QUOTES[quotes])
if not _is_escaped_newline(c):
val.append(c)
quotes = 0
else:
if quotes > 0:
assert quotes < 3
if is_field_name or is_clob:
# There are at least two values here, which is illegal for field names or within clobs.
_illegal_character(c, ctx, 'Malformed triple-quoted text: %s' % (val,))
else:
# This string value is followed by a quoted symbol.
if ctx.container.is_delimited:
_illegal_character(c, ctx, 'Delimiter %s not found after value.'
% (_chr(ctx.container.delimiter[0]),))
trans = ctx.event_transition(IonEvent, IonEventType.SCALAR, ctx.ion_type, ctx.value.as_text())
if quotes == 1:
if BufferQueue.is_eof(c):
_illegal_character(c, ctx, "Unexpected EOF.")
# c was read as a single byte. Re-read it as a code point.
ctx.queue.unread(c)
ctx.set_quoted_text(True)
c, _ = yield ctx.immediate_transition(self)
trans = _CompositeTransition(
trans,
ctx,
partial(_quoted_symbol_handler, c, is_field_name=False),
)
else: # quotes == 2
trans = _CompositeTransition(trans, ctx, None, ctx.set_empty_symbol())
elif c not in _WHITESPACE:
if is_clob:
trans = ctx.immediate_transition(_clob_end_handler(c, ctx))
elif c == _SLASH:
if ctx.container.ion_type is IonType.SEXP:
pending = ctx.event_transition(IonEvent, IonEventType.SCALAR,
ctx.ion_type, ctx.value.as_text())
trans = ctx.immediate_transition(_sexp_slash_handler(c, ctx, self, pending))
else:
trans = ctx.immediate_transition(_comment_handler(c, ctx, self))
elif is_field_name:
if c != _COLON:
_illegal_character(c, ctx, 'Illegal character after field name %s.' % (val,))
trans = ctx.immediate_transition(ctx.whence)
else:
trans = ctx.event_transition(IonEvent, IonEventType.SCALAR, ctx.ion_type, ctx.value.as_text())
c, _ = yield trans
ctx.set_self_delimiting(False) # If comments separated long string components, this would have been set.
trans = here |
java | public <S extends Model, R> AnimaQuery<T> where(TypeFunction<S, R> function, Object value) {
String columnName = AnimaUtils.getLambdaColumnName(function);
conditionSQL.append(" AND ").append(columnName).append(" = ?");
paramValues.add(value);
return this;
} |
java | public java.util.List<IpRouteInfo> getIpRoutesInfo() {
if (ipRoutesInfo == null) {
ipRoutesInfo = new com.amazonaws.internal.SdkInternalList<IpRouteInfo>();
}
return ipRoutesInfo;
} |
python | def _no_more_pads(self, element):
"""The callback for GstElement's "no-more-pads" signal.
"""
# Sent when the pads are done adding (i.e., there are no more
# streams in the file). If we haven't gotten at least one
# decodable stream, raise an exception.
if not self._got_a_pad:
self.read_exc = NoStreamError()
self.ready_sem.release() |
java | public static boolean stampVersion(JsonDBConfig dbConfig, File f, String version) {
FileOutputStream fos = null;
OutputStreamWriter osr = null;
BufferedWriter writer = null;
try {
fos = new FileOutputStream(f);
osr = new OutputStreamWriter(fos, dbConfig.getCharset());
writer = new BufferedWriter(osr);
String versionData = dbConfig.getObjectMapper().writeValueAsString(new SchemaVersion(version));
writer.write(versionData);
writer.newLine();
} catch (JsonProcessingException e) {
logger.error("Failed to serialize SchemaVersion to Json string", e);
return false;
} catch (IOException e) {
logger.error("Failed to write SchemaVersion to the new .json file {}", f, e);
return false;
} finally {
try {
writer.close();
} catch (IOException e) {
logger.error("Failed to close BufferedWriter for new collection file {}", f, e);
}
try {
osr.close();
} catch (IOException e) {
logger.error("Failed to close OutputStreamWriter for new collection file {}", f, e);
}
try {
fos.close();
} catch (IOException e) {
logger.error("Failed to close FileOutputStream for new collection file {}", f, e);
}
}
return true;
} |
python | def send(self, frame):
"""
Handle the SEND command: Delivers a message to a queue or topic (default).
"""
dest = frame.headers.get('destination')
if not dest:
raise ProtocolError('Missing destination for SEND command.')
if dest.startswith('/queue/'):
self.engine.queue_manager.send(frame)
else:
self.engine.topic_manager.send(frame) |
python | def get_measurement_metadata(self, fields, ids=None, noneval=nan,
output_format='DataFrame'):
"""
Get the metadata fields of specified measurements (all if None given).
Parameters
----------
fields : str | iterable of str
Names of metadata fields to be returned.
ids : hashable| iterable of hashables | None
Keys of measurements for which metadata will be returned.
If None is given return metadata of all measurements.
noneval : obj
Value returned if applyto is 'data' but no data is available.
output_format : 'DataFrame' | 'dict'
'DataFrame' : return DataFrame,
'dict' : return dictionary.
Returns
-------
Measurement metadata in specified output_format.
"""
fields = to_list(fields)
func = lambda x: x.get_meta_fields(fields)
meta_d = self.apply(func, ids=ids, applyto='measurement',
noneval=noneval, output_format='dict')
if output_format is 'dict':
return meta_d
elif output_format is 'DataFrame':
from pandas import DataFrame as DF
meta_df = DF(meta_d, index=fields)
return meta_df
else:
msg = ("The output_format must be either 'dict' or 'DataFrame'. " +
"Encountered unsupported value %s." % repr(output_format))
raise Exception(msg) |
java | public FaceDetail withEmotions(Emotion... emotions) {
if (this.emotions == null) {
setEmotions(new java.util.ArrayList<Emotion>(emotions.length));
}
for (Emotion ele : emotions) {
this.emotions.add(ele);
}
return this;
} |
python | def send(self, request: Request) -> None:
"""
Dispatches a request. Expects one and one only target handler
:param request: The request to dispatch
:return: None, will throw a ConfigurationException if more than one handler factor is registered for the command
"""
handler_factories = self._registry.lookup(request)
if len(handler_factories) != 1:
raise ConfigurationException("There is no handler registered for this request")
handler = handler_factories[0]()
handler.handle(request) |
java | public void decode(AsnInputStream ais) throws ParseException {
try {
long val = ais.readInteger();
if (ais.getTag() == OperationCode._TAG_NATIONAL) {
this.setNationalOperationCode(val);
} else {
this.setPrivateOperationCode(val);
}
} catch (IOException e) {
throw new ParseException(RejectProblem.generalBadlyStructuredCompPortion, "IOException while decoding OperationCode: " + e.getMessage(), e);
} catch (AsnException e) {
throw new ParseException(RejectProblem.generalBadlyStructuredCompPortion, "AsnException while decoding OperationCode: " + e.getMessage(), e);
}
} |
python | def middleware(self, *args, **kwargs):
"""
A decorator that can be used to implement a Middleware plugin to
all of the Blueprints that belongs to this specific Blueprint Group.
In case of nested Blueprint Groups, the same middleware is applied
across each of the Blueprints recursively.
:param args: Optional positional Parameters to be use middleware
:param kwargs: Optional Keyword arg to use with Middleware
:return: Partial function to apply the middleware
"""
kwargs["bp_group"] = True
def register_middleware_for_blueprints(fn):
for blueprint in self.blueprints:
blueprint.middleware(fn, *args, **kwargs)
return register_middleware_for_blueprints |
java | public ServiceFuture<VpnSiteInner> updateTagsAsync(String resourceGroupName, String vpnSiteName, Map<String, String> tags, final ServiceCallback<VpnSiteInner> serviceCallback) {
return ServiceFuture.fromResponse(updateTagsWithServiceResponseAsync(resourceGroupName, vpnSiteName, tags), serviceCallback);
} |
java | @Override
public long dynamicQueryCount(DynamicQuery dynamicQuery,
Projection projection) {
return commerceNotificationAttachmentPersistence.countWithDynamicQuery(dynamicQuery,
projection);
} |
java | private void addTimeZoneOffset(Calendar c, StringBuilder sb) {
int min = (c.get(Calendar.ZONE_OFFSET) + c.get(Calendar.DST_OFFSET)) / 60000;
char op;
if (min < 0) {
op = '-';
min = min - min - min;
}
else op = '+';
int hours = min / 60;
min = min - (hours * 60);
sb.append(op);
toString(sb, hours, 2);
sb.append(':');
toString(sb, min, 2);
} |
java | public static void configureLogback( ServletContext servletContext, File logDirFallback, String vHostName, Logger log ) throws FileNotFoundException, MalformedURLException, IOException {
log.debug("Reconfiguring Logback!");
String systemLogDir = System.getProperty(LOG_DIRECTORY_OVERRIDE, System.getProperty(JBOSS_LOG_DIR));
if (systemLogDir != null) {
systemLogDir += "/" + vHostName;
}
File logDir = FileSystemManager.getWritableDirectoryWithFailovers(systemLogDir,servletContext.getInitParameter(LOG_DIR_INIT_PARAM), logDirFallback.getAbsolutePath());
if(logDir != null) {
log.debug("Resetting logback context.");
URL configFile = servletContext.getResource("/WEB-INF/context-logback.xml");
log.debug("Configuring logback with file, {}", configFile);
LoggerContext context = (LoggerContext) LoggerFactory.getILoggerFactory();
try {
JoranConfigurator configurator = new JoranConfigurator();
configurator.setContext(context);
context.stop();
context.reset();
context.putProperty("logDir", logDir.getCanonicalPath());
configurator.doConfigure(configFile);
} catch (JoranException je) {
// StatusPrinter will handle this
} finally {
context.start();
log.debug("Done resetting logback.");
}
StatusPrinter.printInCaseOfErrorsOrWarnings(context);
}
} |
java | @Nonnull
public JSFunction name (@Nonnull @Nonempty final String sName)
{
if (!JSMarshaller.isJSIdentifier (sName))
throw new IllegalArgumentException ("The name '" + sName + "' is not a legal JS identifier!");
m_sName = sName;
return this;
} |
python | def _get_random_id():
""" Get a random (i.e., unique) string identifier"""
symbols = string.ascii_uppercase + string.ascii_lowercase + string.digits
return ''.join(random.choice(symbols) for _ in range(15)) |
python | def save(self, fname=None, link_copy=False,raiseError=False):
""" link_copy: only works in hfd5 format
save space by creating link when identical arrays are found,
it may slows down the saving (3 or 4 folds) but saves space
when saving different dataset together (since it does not duplicate
arrays)
"""
if fname is None:
fname = self.filename
assert fname is not None
save(fname, self, link_copy=link_copy,raiseError=raiseError) |
python | def exists(self, record_key):
'''
a method to determine if a record exists in collection
:param record_key: string with key of record
:return: boolean reporting status
'''
title = '%s.exists' % self.__class__.__name__
# validate inputs
input_fields = {
'record_key': record_key
}
for key, value in input_fields.items():
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# construct path to file
from os import path
file_path = path.join(self.collection_folder, record_key)
# validate existence of file
if not path.exists(file_path):
return False
return True |
python | def safe_data(self):
"""The data dictionary for this entity.
If this `ModelEntity` points to the dead state, it will
raise `DeadEntityException`.
"""
if self.data is None:
raise DeadEntityException(
"Entity {}:{} is dead - its attributes can no longer be "
"accessed. Use the .previous() method on this object to get "
"a copy of the object at its previous state.".format(
self.entity_type, self.entity_id))
return self.data |
java | public String getGroupName() {
CollapsibleGroup group = getGroup();
return (group == null ? getId() : group.getGroupName());
} |
python | def _hashed_key(self):
""" Returns 16-digit numeric hash of the redis key """
return abs(int(hashlib.md5(
self.key_prefix.encode('utf8')
).hexdigest(), 16)) % (10 ** (
self._size_mod if hasattr(self, '_size_mod') else 5)) |
python | def _keep_alive(self, get_work_response):
"""
Returns true if a worker should stay alive given.
If worker-keep-alive is not set, this will always return false.
For an assistant, it will always return the value of worker-keep-alive.
Otherwise, it will return true for nonzero n_pending_tasks.
If worker-count-uniques is true, it will also
require that one of the tasks is unique to this worker.
"""
if not self._config.keep_alive:
return False
elif self._assistant:
return True
elif self._config.count_last_scheduled:
return get_work_response.n_pending_last_scheduled > 0
elif self._config.count_uniques:
return get_work_response.n_unique_pending > 0
elif get_work_response.n_pending_tasks == 0:
return False
elif not self._config.max_keep_alive_idle_duration:
return True
elif not self._idle_since:
return True
else:
time_to_shutdown = self._idle_since + self._config.max_keep_alive_idle_duration - datetime.datetime.now()
logger.debug("[%s] %s until shutdown", self._id, time_to_shutdown)
return time_to_shutdown > datetime.timedelta(0) |
java | private String getParameterValue(CmsObject cms, String key) {
if (m_parameters == null) {
m_parameters = getParameterMap(getStringValue(cms));
}
return getParameterValue(cms, m_parameters, key);
} |
java | @Override
public void flush() throws IOException {
List<Exception> flushExceptions = new ArrayList<>(delegates.size());
for (Writer delegate : delegates) {
try {
delegate.flush();
} catch (IOException | RuntimeException flushException) {
flushExceptions.add(flushException);
}
}
if (!flushExceptions.isEmpty()) {
throw mergeExceptions("flushing", flushExceptions);
}
} |
java | public MethodHandle nop() {
if (type().returnType() != void.class) {
throw new InvalidTransformException("must have void return type to nop: " + type());
}
return invoke(Binder
.from(type())
.drop(0, type().parameterCount())
.cast(Object.class)
.constant(null));
} |
python | def metaclass(*metaclasses):
# type: (*type) -> Callable[[type], type]
"""Create the class using all metaclasses.
Args:
metaclasses: A tuple of metaclasses that will be used to generate and
replace a specified class.
Returns:
A decorator that will recreate the class using the specified
metaclasses.
"""
def _inner(cls):
# pragma pylint: disable=unused-variable
metabases = tuple(
collections.OrderedDict( # noqa: F841
(c, None) for c in (metaclasses + (type(cls),))
).keys()
)
# pragma pylint: enable=unused-variable
_Meta = metabases[0]
for base in metabases[1:]:
class _Meta(base, _Meta): # pylint: disable=function-redefined
pass
return six.add_metaclass(_Meta)(cls)
return _inner |
java | public int setString( String strField, boolean bDisplayOption, int iMoveMode)
{
NumberField numberField = (NumberField)this.getNextConverter();
int iErrorCode = super.setString(strField, DBConstants.DONT_DISPLAY, iMoveMode);
if (strField.length() == 0)
numberField.displayField(); // Special Case (because we return immediately)
if ((iErrorCode != DBConstants.NORMAL_RETURN) || strField.length() == 0)
return iErrorCode;
double doubleValue = this.getValue();
if (doubleValue != 0)
doubleValue = 1 / doubleValue;
iErrorCode = this.setValue(doubleValue, bDisplayOption, DBConstants.SCREEN_MOVE);
return iErrorCode;
} |
java | private void evalUnit(MessageArg arg) {
// TODO: format unit ranges, e.g. {0;1 unit in:seconds sequence:hour,minute}
if (!arg.resolve()) {
return;
}
initUnitArgsParser();
parseArgs(unitArgsParser);
BigDecimal amount = arg.asBigDecimal();
// TODO: For future refactoring:
// This method closely follows the template compiler's UnitFormatter which
// was written first. It is relatively complex. In the near future, generalize the individual
// typed formatters to be reusable, and rewrite UnitFormatter and MessageFormat to depend on them.
MessageArgsUnitParser opts = unitArgsParser;
Unit inputUnit = opts.inputUnit;
Unit exactUnit = opts.exactUnit;
Unit[] exactUnits = opts.exactUnits;
String compact = opts.compact;
Unit[] sequence = opts.sequence;
// == FALLBACK ==
// If no arguments were set, we don't know the unit type. Format as plain number and bail out.
if (inputUnit == null && exactUnit == null) {
UnitValue value = new UnitValue(amount, null);
NumberFormatter formatter = CLDR_INSTANCE.getNumberFormatter(locale);
formatter.formatUnit(value, buf, unitArgsParser.options());
return;
}
// At least one argument was provided. We will try to infer the others where possible.
UnitConverter converter = CLDR_INSTANCE.getUnitConverter(locale);
UnitFactorSet factorSet = null;
// == INTERPRET ARGUMENTS ==
if (compact != null) {
// First see if compact format matches a direct unit conversion (e.g. temperature, speed)
Unit unit = MessageArgsUnitParser.selectExactUnit(compact, converter);
if (unit != null) {
exactUnit = unit;
} else if (opts.factorSet != null) {
// Compact format might correspond to a factor set (e.g. digital bits, bytes).
factorSet = opts.factorSet;
} else {
factorSet = MessageArgsUnitParser.selectFactorSet(compact, converter);
opts.factorSet = factorSet;
}
} else if (exactUnits != null && exactUnits.length > 0) {
if (opts.factorSet != null) {
factorSet = opts.factorSet;
} else {
UnitCategory category = exactUnits[0].category();
factorSet = converter.getFactorSet(category, exactUnits);
opts.factorSet = factorSet;
}
} else if (sequence != null && sequence.length > 0) {
if (opts.factorSet != null) {
factorSet = opts.factorSet;
} else {
UnitCategory category = sequence[0].category();
factorSet = converter.getFactorSet(category, sequence);
opts.factorSet = factorSet;
}
}
// Make sure we know what the input units are.
if (inputUnit == null) {
inputUnit = MessageArgsUnitParser.inputFromExactUnit(exactUnit, converter);
}
// == CONVERSION ==
UnitValue value = new UnitValue(amount, inputUnit);
// In sequence mode this will get set below.
List<UnitValue> values = null;
if (exactUnit != null) {
// Convert to exact units using the requested unit.
value = converter.convert(value, exactUnit);
} else if (factorSet == null) {
// Convert directly to "best" unit using the default built-in factor sets.
value = converter.convert(value);
} else if (compact != null || exactUnits != null) {
// Use the factor set to build a compact form.
value = converter.convert(value, factorSet);
} else if (sequence != null) {
// Use the factor set to produce a sequence.
values = converter.sequence(value, factorSet);
}
// == FORMATTING ==
NumberFormatter formatter = CLDR_INSTANCE.getNumberFormatter(locale);
if (values == null) {
formatter.formatUnit(value, buf, opts.options());
} else {
formatter.formatUnits(values, buf, opts.options());
}
} |
python | def translate_purposes(f):
"""decorator to translate the purposes field.
translate the values of the purposes field of the API response into
translated values.
"""
@wraps(f)
def wr(r, pc):
tmp = []
for P in r["purposes"]:
try:
tmp.append(POSTCODE_API_TYPEDEFS_PURPOSES[P])
except:
logger.warning("Warning: {}: "
"cannot translate 'purpose': {}".format(pc, P))
tmp.append(P)
r.update({"purposes": tmp})
return f(r, pc)
return wr |
python | def get_choice_selected_value(self):
"""
Returns the default selection from a choice menu
Throws an error if this is not a choice parameter.
"""
if 'choiceInfo' not in self.dto[self.name]:
raise GPException('not a choice parameter')
choice_info_dto = self.dto[self.name]['choiceInfo']
if 'selectedValue' in choice_info_dto:
return self.dto[self.name]['choiceInfo']['selectedValue']
else:
return None |
python | def ekntab():
"""
Return the number of loaded EK tables.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ekntab_c.html
:return: The number of loaded EK tables.
:rtype: int
"""
n = ctypes.c_int(0)
libspice.ekntab_c(ctypes.byref(n))
return n.value |
java | public long getNearestPosition(long recordNumber) {
if (recordNumber < startingRecordNumber || recordNumber > lastRecordNumber)
throw new IndexOutOfBoundsException();
return index.get((int) ((recordNumber - startingRecordNumber) / step));
} |
java | @Override
protected void unserializeFrom(RawDataBuffer in)
{
super.unserializeFrom(in);
consumerId = new IntegerID(in.readInt());
topic = (Topic)DestinationSerializer.unserializeFrom(in);
messageSelector = in.readNullableUTF();
noLocal = in.readBoolean();
name = in.readUTF();
} |
java | public static Pattern convertPerlRegexToPattern(@Nonnull final String regex, @Nonnull final boolean faultTolerant) {
Check.notNull(regex, "regex");
String pattern = regex.trim();
final Matcher matcher = faultTolerant ? PERL_STYLE_TOLERANT.matcher(pattern) : PERL_STYLE.matcher(pattern);
if (!matcher.matches()) {
throw new IllegalArgumentException("The given regular expression '" + pattern
+ "' seems to be not in PERL style or has unsupported modifiers.");
}
pattern = pattern.substring(1);
final int lastIndex = pattern.lastIndexOf('/');
pattern = pattern.substring(0, lastIndex);
final int flags = Flag.convertToBitmask(Flag.parse(matcher.group(1)));
return Pattern.compile(pattern, flags);
} |
python | def uuid_constructor(loader, node):
""""
Construct a uuid.UUID object form a scalar YAML node.
Tests:
>>> yaml.add_constructor("!uuid", uuid_constructor, Loader=yaml.SafeLoader)
>>> yaml.safe_load("{'test': !uuid 'cc3702ca-699a-4aa6-8226-4c938f294d9b'}")
{'test': UUID('cc3702ca-699a-4aa6-8226-4c938f294d9b')}
"""
value = loader.construct_scalar(node)
return uuid.UUID(value) |
java | @NotNull
public DoubleStream mapToDouble(@NotNull final LongToDoubleFunction mapper) {
return new DoubleStream(params, new LongMapToDouble(iterator, mapper));
} |
java | public static Color stringToColor(String str) {
int icol = SVG_COLOR_NAMES.getInt(str.toLowerCase());
if(icol != NO_VALUE) {
return new Color(icol, false);
}
return colorLookupStylesheet.stringToColor(str);
} |
java | @Override
public AssociateServiceActionWithProvisioningArtifactResult associateServiceActionWithProvisioningArtifact(
AssociateServiceActionWithProvisioningArtifactRequest request) {
request = beforeClientExecution(request);
return executeAssociateServiceActionWithProvisioningArtifact(request);
} |
python | def __bind(self):
'''
Bind to the local port
'''
# using ZMQIOLoop since we *might* need zmq in there
install_zmq()
self.io_loop = ZMQDefaultLoop()
self.io_loop.make_current()
for req_channel in self.req_channels:
req_channel.post_fork(self._handle_payload, io_loop=self.io_loop) # TODO: cleaner? Maybe lazily?
try:
self.io_loop.start()
except (KeyboardInterrupt, SystemExit):
# Tornado knows what to do
pass |
python | def get_synset_xml(self,syn_id):
"""
call cdb_syn with synset identifier -> returns the synset xml;
"""
http, resp, content = self.connect()
params = ""
fragment = ""
path = "cdb_syn"
if self.debug:
printf( "cornettodb/views/query_remote_syn_id: db_opt: %s" % path )
# output_opt: plain, html, xml
# 'xml' is actually xhtml (with markup), but it is not valid xml!
# 'plain' is actually valid xml (without markup)
output_opt = "plain"
if self.debug:
printf( "cornettodb/views/query_remote_syn_id: output_opt: %s" % output_opt )
action = "runQuery"
if self.debug:
printf( "cornettodb/views/query_remote_syn_id: action: %s" % action )
printf( "cornettodb/views/query_remote_syn_id: query: %s" % syn_id )
qdict = {}
qdict[ "action" ] = action
qdict[ "query" ] = syn_id
qdict[ "outtype" ] = output_opt
query = urllib.urlencode( qdict )
db_url_tuple = ( self.scheme, self.host + ':' + str(self.port), path, params, query, fragment )
db_url = urlparse.urlunparse( db_url_tuple )
if self.debug:
printf( "db_url: %s" % db_url )
resp, content = http.request( db_url, "GET" )
if self.debug:
printf( "resp:\n%s" % resp )
# printf( "content:\n%s" % content )
# printf( "content is of type: %s" % type( content ) ) #<type 'str'>
xml_data = eval( content )
return etree.fromstring( xml_data ) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.