language
stringclasses 2
values | func_code_string
stringlengths 63
466k
|
---|---|
java | @Override
public Request<DeleteVpcEndpointsRequest> getDryRunRequest() {
Request<DeleteVpcEndpointsRequest> request = new DeleteVpcEndpointsRequestMarshaller().marshall(this);
request.addParameter("DryRun", Boolean.toString(true));
return request;
} |
java | public void setSubjectAlternativeNames(java.util.Collection<String> subjectAlternativeNames) {
if (subjectAlternativeNames == null) {
this.subjectAlternativeNames = null;
return;
}
this.subjectAlternativeNames = new java.util.ArrayList<String>(subjectAlternativeNames);
} |
java | public void close() throws IOException {
try {
while (processNextResponse(0))
;
file.close();
} catch (SshException ex) {
throw new SshIOException(ex);
} catch (SftpStatusException ex) {
throw new IOException(ex.getMessage());
}
} |
python | def set_scale(self, xscale=None, yscale=None, zscale=None, reset_camera=True):
"""
Scale all the datasets in the scene of the active renderer.
Scaling in performed independently on the X, Y and Z axis.
A scale of zero is illegal and will be replaced with one.
Parameters
----------
xscale : float, optional
Scaling of the x axis. Must be greater than zero.
yscale : float, optional
Scaling of the y axis. Must be greater than zero.
zscale : float, optional
Scaling of the z axis. Must be greater than zero.
reset_camera : bool, optional
Resets camera so all actors can be seen.
"""
self.renderer.set_scale(xscale, yscale, zscale, reset_camera) |
java | public HttpResponse request(HttpRequest smartsheetRequest) throws HttpClientException {
Util.throwIfNull(smartsheetRequest);
if (smartsheetRequest.getUri() == null) {
throw new IllegalArgumentException("A Request URI is required.");
}
int attempt = 0;
long start = System.currentTimeMillis();
HttpRequestBase apacheHttpRequest;
HttpResponse smartsheetResponse;
InputStream bodyStream = null;
if(smartsheetRequest.getEntity() != null && smartsheetRequest.getEntity().getContent() != null) {
bodyStream = smartsheetRequest.getEntity().getContent();
}
// the retry logic will consume the body stream so we make sure it supports mark/reset and mark it
boolean canRetryRequest = bodyStream == null || bodyStream.markSupported();
if (!canRetryRequest) {
try {
// attempt to wrap the body stream in a input-stream that does support mark/reset
bodyStream = new ByteArrayInputStream(StreamUtil.readBytesFromStream(bodyStream));
// close the old stream (just to be tidy) and then replace it with a reset-able stream
smartsheetRequest.getEntity().getContent().close();
smartsheetRequest.getEntity().setContent(bodyStream);
canRetryRequest = true;
}
catch(IOException ignore) {
}
}
// the retry loop
while(true) {
apacheHttpRequest = createApacheRequest(smartsheetRequest);
// Set HTTP headers
if (smartsheetRequest.getHeaders() != null) {
for (Map.Entry<String, String> header : smartsheetRequest.getHeaders().entrySet()) {
apacheHttpRequest.addHeader(header.getKey(), header.getValue());
}
}
HttpEntitySnapshot requestEntityCopy = null;
HttpEntitySnapshot responseEntityCopy = null;
// Set HTTP entity
final HttpEntity entity = smartsheetRequest.getEntity();
if (apacheHttpRequest instanceof HttpEntityEnclosingRequestBase && entity != null && entity.getContent() != null) {
try {
// we need access to the original request stream so we can log it (in the event of errors and/or tracing)
requestEntityCopy = new HttpEntitySnapshot(entity);
} catch (IOException iox) {
logger.error("failed to make copy of original request entity - {}", iox);
}
InputStreamEntity streamEntity = new InputStreamEntity(entity.getContent(), entity.getContentLength());
streamEntity.setChunked(false); // why? not supported by library?
((HttpEntityEnclosingRequestBase) apacheHttpRequest).setEntity(streamEntity);
}
// mark the body so we can reset on retry
if(canRetryRequest && bodyStream != null) {
bodyStream.mark((int)smartsheetRequest.getEntity().getContentLength());
}
// Make the HTTP request
smartsheetResponse = new HttpResponse();
HttpContext context = new BasicHttpContext();
try {
long startTime = System.currentTimeMillis();
apacheHttpResponse = this.httpClient.execute(apacheHttpRequest, context);
long endTime = System.currentTimeMillis();
// Set request headers to values ACTUALLY SENT (not just created by us), this would include:
// 'Connection', 'Accept-Encoding', etc. However, if a proxy is used, this may be the proxy's CONNECT
// request, hence the test for HTTP method first
Object httpRequest = context.getAttribute("http.request");
if(httpRequest != null && HttpRequestWrapper.class.isAssignableFrom(httpRequest.getClass())) {
HttpRequestWrapper actualRequest = (HttpRequestWrapper)httpRequest;
switch(HttpMethod.valueOf(actualRequest.getMethod())) {
case GET:
case POST:
case PUT:
case DELETE:
apacheHttpRequest.setHeaders(((HttpRequestWrapper)httpRequest).getAllHeaders());
break;
}
}
// Set returned headers
smartsheetResponse.setHeaders(new HashMap<String, String>());
for (Header header : apacheHttpResponse.getAllHeaders()) {
smartsheetResponse.getHeaders().put(header.getName(), header.getValue());
}
smartsheetResponse.setStatus(apacheHttpResponse.getStatusLine().getStatusCode(),
apacheHttpResponse.getStatusLine().toString());
// Set returned entities
if (apacheHttpResponse.getEntity() != null) {
HttpEntity httpEntity = new HttpEntity();
httpEntity.setContentType(apacheHttpResponse.getEntity().getContentType().getValue());
httpEntity.setContentLength(apacheHttpResponse.getEntity().getContentLength());
httpEntity.setContent(apacheHttpResponse.getEntity().getContent());
smartsheetResponse.setEntity(httpEntity);
responseEntityCopy = new HttpEntitySnapshot(httpEntity);
}
long responseTime = endTime - startTime;
logRequest(apacheHttpRequest, requestEntityCopy, smartsheetResponse, responseEntityCopy, responseTime);
if (traces.size() > 0) { // trace-logging of request and response (if so configured)
RequestAndResponseData requestAndResponseData = RequestAndResponseData.of(apacheHttpRequest,
requestEntityCopy, smartsheetResponse, responseEntityCopy, traces);
TRACE_WRITER.println(requestAndResponseData.toString(tracePrettyPrint));
}
if (smartsheetResponse.getStatusCode() == 200) {
// call successful, exit the retry loop
break;
}
// the retry logic might consume the content stream so we make sure it supports mark/reset and mark it
InputStream contentStream = smartsheetResponse.getEntity().getContent();
if (!contentStream.markSupported()) {
// wrap the response stream in a input-stream that does support mark/reset
contentStream = new ByteArrayInputStream(StreamUtil.readBytesFromStream(contentStream));
// close the old stream (just to be tidy) and then replace it with a reset-able stream
smartsheetResponse.getEntity().getContent().close();
smartsheetResponse.getEntity().setContent(contentStream);
}
try {
contentStream.mark((int) smartsheetResponse.getEntity().getContentLength());
long timeSpent = System.currentTimeMillis() - start;
if (!shouldRetry(++attempt, timeSpent, smartsheetResponse)) {
// should not retry, or retry time exceeded, exit the retry loop
break;
}
} finally {
if(bodyStream != null) {
bodyStream.reset();
}
contentStream.reset();
}
// moving this to finally causes issues because socket is closed (which means response stream is closed)
this.releaseConnection();
} catch (ClientProtocolException e) {
try {
logger.warn("ClientProtocolException " + e.getMessage());
logger.warn("{}", RequestAndResponseData.of(apacheHttpRequest, requestEntityCopy, smartsheetResponse,
responseEntityCopy, REQUEST_RESPONSE_SUMMARY));
// if this is a PUT and was retried by the http client, the body content stream is at the
// end and is a NonRepeatableRequest. If we marked the body content stream prior to execute,
// reset and retry
if (canRetryRequest && e.getCause() instanceof NonRepeatableRequestException) {
if (smartsheetRequest.getEntity() != null) {
smartsheetRequest.getEntity().getContent().reset();
}
continue;
}
} catch (IOException ignore) {
}
throw new HttpClientException("Error occurred.", e);
} catch (NoHttpResponseException e) {
try {
logger.warn("NoHttpResponseException " + e.getMessage());
logger.warn("{}", RequestAndResponseData.of(apacheHttpRequest, requestEntityCopy, smartsheetResponse,
responseEntityCopy, REQUEST_RESPONSE_SUMMARY));
// check to see if the response was empty and this was a POST. All other HTTP methods
// will be automatically retried by the http client.
// (POST is non-idempotent and is not retried automatically, but is safe for us to retry)
if (canRetryRequest && smartsheetRequest.getMethod() == HttpMethod.POST) {
if (smartsheetRequest.getEntity() != null) {
smartsheetRequest.getEntity().getContent().reset();
}
continue;
}
} catch (IOException ignore) {
}
throw new HttpClientException("Error occurred.", e);
} catch (IOException e) {
try {
logger.warn("{}", RequestAndResponseData.of(apacheHttpRequest, requestEntityCopy, smartsheetResponse,
responseEntityCopy, REQUEST_RESPONSE_SUMMARY));
} catch (IOException ignore) {
}
throw new HttpClientException("Error occurred.", e);
}
}
return smartsheetResponse;
} |
python | def set_ext_param(self, ext_key, param_key, val):
'''
Set the provided parameter in a set of extension parameters.
'''
if not self.extensions[ext_key]:
self.extensions[ext_key] = defaultdict(lambda: None)
self.extensions[ext_key][param_key] = val |
java | public static double[] columnPackedCopy(final double[][] m1) {
final int rowdim = m1.length, coldim = getColumnDimensionality(m1);
final double[] vals = new double[m1.length * coldim];
for(int i = 0; i < rowdim; i++) {
final double[] rowM = m1[i];
// assert rowM.length == coldim : ERR_MATRIX_RAGGED;
for(int j = 0, k = i; j < coldim; j++, k += rowdim) {
vals[k] = rowM[j];
}
}
return vals;
} |
java | public void connect() {
CommandFactory f = new CommandFactory();
f.addTrainerInitCommand();
initMessage = f.next();
super.start();
super.setName("Trainer");
} |
python | def get_default_stats():
"""Returns a :class: `dict` of the default stats structure."""
default_stats = {
"total_count": 0,
"max": 0,
"min": 0,
"value": 0,
"average": 0,
"last_update": None,
}
return {
"totals": default_stats,
"colors": {
"red": default_stats.copy(),
"blue": default_stats.copy(),
"yellow": default_stats.copy(),
"green": default_stats.copy(),
"black": default_stats.copy(),
}
} |
python | def create_snmp_manager(self, manager, host, **kwargs):
"""Create an SNMP manager.
:param manager: Name of manager to be created.
:type manager: str
:param host: IP address or DNS name of SNMP server to be used.
:type host: str
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**POST snmp/:manager**
:type \*\*kwargs: optional
:returns: A dictionary describing the created SNMP manager.
:rtype: ResponseDict
"""
data = {"host": host}
data.update(kwargs)
return self._request("POST", "snmp/{0}".format(manager), data) |
python | def _chunkForSend(self, data):
"""
limit the chunks that we send over PB to 128k, since it has a hardwired
string-size limit of 640k.
"""
LIMIT = self.CHUNK_LIMIT
for i in range(0, len(data), LIMIT):
yield data[i:i + LIMIT] |
python | def process_params(self, params):
'''
Populates the launch data from a dictionary. Only cares about keys in
the LAUNCH_DATA_PARAMETERS list, or that start with 'custom_' or
'ext_'.
'''
for key, val in params.items():
if key in LAUNCH_DATA_PARAMETERS and val != 'None':
if key == 'roles':
if isinstance(val, list):
# If it's already a list, no need to parse
self.roles = list(val)
else:
# If it's a ',' delimited string, split
self.roles = val.split(',')
else:
setattr(self, key, touni(val))
elif 'custom_' in key:
self.custom_params[key] = touni(val)
elif 'ext_' in key:
self.ext_params[key] = touni(val) |
java | void removeExportedKeys(Table toDrop) {
// toDrop.schema may be null because it is not registerd
Schema schema = (Schema) schemaMap.get(toDrop.getSchemaName().name);
for (int i = 0; i < schema.tableList.size(); i++) {
Table table = (Table) schema.tableList.get(i);
for (int j = table.constraintList.length - 1; j >= 0; j--) {
Table refTable = table.constraintList[j].getRef();
if (toDrop == refTable) {
table.removeConstraint(j);
}
}
}
} |
java | private void destroyPingExecutor() {
synchronized (pingExecutor) {
if (!pingExecutor.isShutdown()) {
try {
log.debugf("Shutting down WebSocket ping executor");
pingExecutor.shutdown();
if (!pingExecutor.awaitTermination(1, TimeUnit.SECONDS)) {
pingExecutor.shutdownNow();
}
} catch (Throwable t) {
log.warnf("Cannot shut down WebSocket ping executor. Cause=%s", t.toString());
}
}
}
} |
python | def search_ap(self, mode, query):
"""搜索接入点
查看指定接入点的所有配置信息,包括所有监听端口的配置。
Args:
- mode: 搜索模式,可以是domain、ip、host
- query: 搜索文本
Returns:
返回一个tuple对象,其格式为(<result>, <ResponseInfo>)
- result 成功返回搜索结果,失败返回{"error": "<errMsg string>"}
- ResponseInfo 请求的Response信息
"""
url = '{0}/v3/aps/search?{1}={2}'.format(self.host, mode, query)
return self.__get(url) |
java | private static ImmutableMap<String, Annotation> buildAnnotations(Iterable<String> whitelist) {
ImmutableMap.Builder<String, Annotation> annotationsBuilder = ImmutableMap.builder();
annotationsBuilder.putAll(Annotation.recognizedAnnotations);
for (String unrecognizedAnnotation : whitelist) {
if (!unrecognizedAnnotation.isEmpty()
&& !Annotation.recognizedAnnotations.containsKey(unrecognizedAnnotation)) {
annotationsBuilder.put(unrecognizedAnnotation, Annotation.NOT_IMPLEMENTED);
}
}
return annotationsBuilder.build();
} |
java | private static SimpleEntry<byte[], Integer> readAllBytes(final InputStream inputStream, final long fileSizeHint)
throws IOException {
if (fileSizeHint > MAX_BUFFER_SIZE) {
throw new IOException("InputStream is too large to read");
}
final int bufferSize = fileSizeHint < 1L
// If fileSizeHint is unknown, use default buffer size
? DEFAULT_BUFFER_SIZE
// fileSizeHint is just a hint -- limit the max allocated buffer size, so that invalid ZipEntry
// lengths do not become a memory allocation attack vector
: Math.min((int) fileSizeHint, MAX_INITIAL_BUFFER_SIZE);
byte[] buf = new byte[bufferSize];
int bufLength = buf.length;
int totBytesRead = 0;
for (int bytesRead;;) {
// Fill buffer -- may fill more or fewer bytes than buffer size
while ((bytesRead = inputStream.read(buf, totBytesRead, bufLength - totBytesRead)) > 0) {
totBytesRead += bytesRead;
}
if (bytesRead < 0) {
// Reached end of stream
break;
}
// bytesRead == 0 => grow buffer, avoiding overflow
if (bufLength <= MAX_BUFFER_SIZE - bufLength) {
bufLength = bufLength << 1;
} else {
if (bufLength == MAX_BUFFER_SIZE) {
throw new IOException("InputStream too large to read");
}
bufLength = MAX_BUFFER_SIZE;
}
buf = Arrays.copyOf(buf, bufLength);
}
// Return buffer and number of bytes read
return new SimpleEntry<>((bufLength == totBytesRead) ? buf : Arrays.copyOf(buf, totBytesRead),
totBytesRead);
} |
python | def new_filename(original_filename, new_locale):
"""Returns a filename derived from original_filename, using new_locale as the locale"""
orig_file = Path(original_filename)
new_file = orig_file.parent.parent.parent / new_locale / orig_file.parent.name / orig_file.name
return new_file.abspath() |
java | public ConversationMessage viewConversationMessage(final String messageId)
throws NotFoundException, GeneralException, UnauthorizedException {
String url = CONVERSATIONS_BASE_URL + CONVERSATION_MESSAGE_PATH;
return messageBirdService.requestByID(url, messageId, ConversationMessage.class);
} |
python | def setColor(self, color):
"""
Sets the current :py:class:`Color` to use.
:param color: The :py:class:`Color` to use.
:rtype: Nothing.
"""
self.color = color
if self.brush and self.brush.doesUseSourceCaching():
self.brush.cacheBrush(color) |
java | public static MySqlConnectionPoolBuilder newBuilder(final String connectionString, final String username) {
final String[] arr = connectionString.split(":");
final String host = arr[0];
final int port = Integer.parseInt(arr[1]);
return newBuilder(host, port, username);
} |
python | def conv(self,field_name,conv_func):
"""When a record is returned by a SELECT, ask conversion of
specified field value with the specified function"""
if field_name not in self.fields:
raise NameError,"Unknown field %s" %field_name
self.conv_func[field_name] = conv_func |
python | def listVars(prefix="", equals="\t= ", **kw):
"""List IRAF variables."""
keylist = getVarList()
if len(keylist) == 0:
print('No IRAF variables defined')
else:
keylist.sort()
for word in keylist:
print("%s%s%s%s" % (prefix, word, equals, envget(word))) |
java | @Override
public TopicSubscriber createDurableSubscriber(Topic topic, String subscriptionName, String messageSelector, boolean noLocal) throws JMSException
{
throw new IllegalStateException("Method not available on this domain.");
} |
python | def homegearCheckInit(self, remote):
"""Check if proxy is still initialized"""
rdict = self.remotes.get(remote)
if not rdict:
return False
if rdict.get('type') != BACKEND_HOMEGEAR:
return False
try:
interface_id = "%s-%s" % (self._interface_id, remote)
return self.proxies[interface_id].clientServerInitialized(interface_id)
except Exception as err:
LOG.debug(
"ServerThread.homegearCheckInit: Exception: %s" % str(err))
return False |
python | def select_product():
"""
binds the frozen context the selected features
should be called only once - calls after the first call have
no effect
"""
global _product_selected
if _product_selected:
# tss already bound ... ignore
return
_product_selected = True
from django_productline import context, template
featuremonkey.add_import_guard('django.conf')
featuremonkey.add_import_guard('django.db')
os.environ['DJANGO_SETTINGS_MODULE'] = 'django_productline.settings'
contextfile = os.environ['PRODUCT_CONTEXT_FILENAME']
equationfile = os.environ['PRODUCT_EQUATION_FILENAME']
#bind context and compose features
context.bind_context(contextfile)
get_composer().select_equation(equationfile)
# after composition we are now able to bind composed template settings
template.bind_settings()
featuremonkey.remove_import_guard('django.conf')
featuremonkey.remove_import_guard('django.db')
import django
if compare_version(django.get_version(), '1.7') >= 0:
django.setup()
# force import of settings and urls
# better fail during initialization than on the first request
from django.conf import settings
from django.core.urlresolvers import get_resolver
# eager creation of URLResolver
get_resolver(None)
# make sure overextends tag is registered
from django.template.loader import get_template
from overextends import models |
java | protected void assertArgumentNotNull(String argumentName, Object value) {
if (argumentName == null) {
String msg = "The argument name should not be null: argName=null value=" + value;
throw new IllegalArgumentException(msg);
}
if (value == null) {
String msg = "The value should not be null: argName=" + argumentName;
throw new IllegalArgumentException(msg);
}
} |
java | public AwsSecurityFindingFilters withRecommendationText(StringFilter... recommendationText) {
if (this.recommendationText == null) {
setRecommendationText(new java.util.ArrayList<StringFilter>(recommendationText.length));
}
for (StringFilter ele : recommendationText) {
this.recommendationText.add(ele);
}
return this;
} |
python | def run_filter_radia(job, bams, radia_file, univ_options, radia_options, chrom):
"""
Run filterradia on the RADIA output.
:param dict bams: Dict of bam and bai for tumor DNA-Seq, normal DNA-Seq and tumor RNA-Seq
:param toil.fileStore.FileID radia_file: The vcf from runnning RADIA
:param dict univ_options: Dict of universal options used by almost all tools
:param dict radia_options: Options specific to RADIA
:param str chrom: Chromosome to process
:return: fsID for the filtered chromsome vcf
:rtype: toil.fileStore.FileID
"""
work_dir = os.getcwd()
input_files = {
'rna.bam': bams['tumor_rna'],
'rna.bam.bai': bams['tumor_rnai'],
'tumor.bam': bams['tumor_dna'],
'tumor.bam.bai': bams['tumor_dnai'],
'normal.bam': bams['normal_dna'],
'normal.bam.bai': bams['normal_dnai'],
'radia.vcf': radia_file,
'genome.fa.tar.gz': radia_options['genome_fasta'],
'genome.fa.fai.tar.gz': radia_options['genome_fai'],
'cosmic_beds': radia_options['cosmic_beds'],
'dbsnp_beds': radia_options['dbsnp_beds'],
'retrogene_beds': radia_options['retrogene_beds'],
'pseudogene_beds': radia_options['pseudogene_beds'],
'gencode_beds': radia_options['gencode_beds']
}
input_files = get_files_from_filestore(job, input_files, work_dir, docker=False)
for key in ('genome.fa', 'genome.fa.fai'):
input_files[key] = untargz(input_files[key + '.tar.gz'], work_dir)
for key in ('cosmic_beds', 'dbsnp_beds', 'retrogene_beds', 'pseudogene_beds', 'gencode_beds'):
input_files[key] = untargz(input_files[key], work_dir)
input_files = {key: docker_path(path) for key, path in input_files.items()}
filterradia_log = ''.join([work_dir, '/radia_filtered_', chrom, '_radia.log'])
parameters = [univ_options['patient'], # shortID
chrom.lstrip('chr'),
input_files['radia.vcf'],
'/data',
'/home/radia/scripts',
'-d', input_files['dbsnp_beds'],
'-r', input_files['retrogene_beds'],
'-p', input_files['pseudogene_beds'],
'-c', input_files['cosmic_beds'],
'-t', input_files['gencode_beds'],
'--noSnpEff',
'--noBlacklist',
'--noTargets',
'--noRnaBlacklist',
'-f', input_files['genome.fa'],
'--log=INFO',
'-g', docker_path(filterradia_log)]
docker_call(tool='filterradia',
tool_parameters=parameters, work_dir=work_dir, dockerhub=univ_options['dockerhub'],
tool_version=radia_options['version'])
output_file = ''.join([work_dir, '/', chrom, '.vcf'])
os.rename(''.join([work_dir, '/', univ_options['patient'], '_', chrom, '.vcf']), output_file)
output_fsid = job.fileStore.writeGlobalFile(output_file)
export_results(job, output_fsid, output_file, univ_options, subfolder='mutations/radia')
job.fileStore.logToMaster('Ran filter-radia on %s:%s successfully'
% (univ_options['patient'], chrom))
return output_fsid |
java | public final EObject ruleParenthesizedAssignableElement() throws RecognitionException {
EObject current = null;
Token otherlv_0=null;
Token otherlv_2=null;
EObject this_AssignableAlternatives_1 = null;
enterRule();
try {
// InternalXtext.g:2544:2: ( (otherlv_0= '(' this_AssignableAlternatives_1= ruleAssignableAlternatives otherlv_2= ')' ) )
// InternalXtext.g:2545:2: (otherlv_0= '(' this_AssignableAlternatives_1= ruleAssignableAlternatives otherlv_2= ')' )
{
// InternalXtext.g:2545:2: (otherlv_0= '(' this_AssignableAlternatives_1= ruleAssignableAlternatives otherlv_2= ')' )
// InternalXtext.g:2546:3: otherlv_0= '(' this_AssignableAlternatives_1= ruleAssignableAlternatives otherlv_2= ')'
{
otherlv_0=(Token)match(input,15,FollowSets000.FOLLOW_40);
newLeafNode(otherlv_0, grammarAccess.getParenthesizedAssignableElementAccess().getLeftParenthesisKeyword_0());
newCompositeNode(grammarAccess.getParenthesizedAssignableElementAccess().getAssignableAlternativesParserRuleCall_1());
pushFollow(FollowSets000.FOLLOW_37);
this_AssignableAlternatives_1=ruleAssignableAlternatives();
state._fsp--;
current = this_AssignableAlternatives_1;
afterParserOrEnumRuleCall();
otherlv_2=(Token)match(input,16,FollowSets000.FOLLOW_2);
newLeafNode(otherlv_2, grammarAccess.getParenthesizedAssignableElementAccess().getRightParenthesisKeyword_2());
}
}
leaveRule();
}
catch (RecognitionException re) {
recover(input,re);
appendSkippedTokens();
}
finally {
}
return current;
} |
java | @Override
public void doRender(final WComponent component, final WebXmlRenderContext renderContext) {
WMultiDropdown dropdown = (WMultiDropdown) component;
XmlStringBuilder xml = renderContext.getWriter();
String dataKey = dropdown.getListCacheKey();
boolean readOnly = dropdown.isReadOnly();
xml.appendTagOpen("ui:multidropdown");
xml.appendAttribute("id", component.getId());
xml.appendOptionalAttribute("class", component.getHtmlClass());
xml.appendOptionalAttribute("track", component.isTracking(), "true");
xml.appendOptionalAttribute("hidden", dropdown.isHidden(), "true");
if (readOnly) {
xml.appendAttribute("readOnly", "true");
} else {
xml.appendOptionalAttribute("data", dataKey != null && !readOnly, dataKey);
xml.appendOptionalAttribute("disabled", dropdown.isDisabled(), "true");
xml.appendOptionalAttribute("required", dropdown.isMandatory(), "true");
xml.appendOptionalAttribute("submitOnChange", dropdown.isSubmitOnChange(), "true");
xml.appendOptionalAttribute("toolTip", component.getToolTip());
xml.appendOptionalAttribute("accessibleText", component.getAccessibleText());
int min = dropdown.getMinSelect();
int max = dropdown.getMaxSelect();
xml.appendOptionalAttribute("min", min > 0, min);
xml.appendOptionalAttribute("max", max > 0, max);
xml.appendOptionalAttribute("title", I18nUtilities.format(null, InternalMessages.DEFAULT_MULTIDROPDOWN_TIP));
}
xml.appendClose();
// Options
List<?> options = dropdown.getOptions();
boolean renderSelectionsOnly = dropdown.isReadOnly() || dataKey != null;
if (options != null) {
int optionIndex = 0;
List<?> selections = dropdown.getSelected();
for (Object option : options) {
if (option instanceof OptionGroup) {
xml.appendTagOpen("ui:optgroup");
xml.appendAttribute("label", ((OptionGroup) option).getDesc());
xml.appendClose();
for (Object nestedOption : ((OptionGroup) option).getOptions()) {
renderOption(dropdown, nestedOption, optionIndex++, xml, selections,
renderSelectionsOnly);
}
xml.appendEndTag("ui:optgroup");
} else {
renderOption(dropdown, option, optionIndex++, xml, selections,
renderSelectionsOnly);
}
}
}
if (!readOnly) {
DiagnosticRenderUtil.renderDiagnostics(dropdown, renderContext);
}
// End tag
xml.appendEndTag("ui:multidropdown");
} |
python | def addFeatureSet(self):
"""
Adds a new feature set into this repo
"""
self._openRepo()
dataset = self._repo.getDatasetByName(self._args.datasetName)
filePath = self._getFilePath(self._args.filePath,
self._args.relativePath)
name = getNameFromPath(self._args.filePath)
featureSet = sequence_annotations.Gff3DbFeatureSet(
dataset, name)
referenceSetName = self._args.referenceSetName
if referenceSetName is None:
raise exceptions.RepoManagerException(
"A reference set name must be provided")
referenceSet = self._repo.getReferenceSetByName(referenceSetName)
featureSet.setReferenceSet(referenceSet)
ontologyName = self._args.ontologyName
if ontologyName is None:
raise exceptions.RepoManagerException(
"A sequence ontology name must be provided")
ontology = self._repo.getOntologyByName(ontologyName)
self._checkSequenceOntology(ontology)
featureSet.setOntology(ontology)
featureSet.populateFromFile(filePath)
featureSet.setAttributes(json.loads(self._args.attributes))
self._updateRepo(self._repo.insertFeatureSet, featureSet) |
python | def validateIP(value, blank=False, strip=None, allowlistRegexes=None, blocklistRegexes=None, excMsg=None):
"""Raises ValidationException if value is not an IPv4 or IPv6 address.
Returns the value argument.
* value (str): The value being validated as an IP address.
* blank (bool): If True, a blank string will be accepted. Defaults to False.
* strip (bool, str, None): If None, whitespace is stripped from value. If a str, the characters in it are stripped from value. If False, nothing is stripped.
* allowlistRegexes (Sequence, None): A sequence of regex str that will explicitly pass validation, even if they aren't numbers.
* blocklistRegexes (Sequence, None): A sequence of regex str or (regex_str, response_str) tuples that, if matched, will explicitly fail validation.
* excMsg (str): A custom message to use in the raised ValidationException.
>>> import pysimplevalidate as pysv
>>> pysv.validateIP('127.0.0.1')
'127.0.0.1'
>>> pysv.validateIP('255.255.255.255')
'255.255.255.255'
>>> pysv.validateIP('256.256.256.256')
Traceback (most recent call last):
pysimplevalidate.ValidationException: '256.256.256.256' is not a valid IP address.
>>> pysv.validateIP('1:2:3:4:5:6:7:8')
'1:2:3:4:5:6:7:8'
>>> pysv.validateIP('1::8')
'1::8'
>>> pysv.validateIP('fe80::7:8%eth0')
'fe80::7:8%eth0'
>>> pysv.validateIP('::255.255.255.255')
'::255.255.255.255'
"""
# Validate parameters.
_validateGenericParameters(blank=blank, strip=strip, allowlistRegexes=allowlistRegexes, blocklistRegexes=blocklistRegexes)
returnNow, value = _prevalidationCheck(value, blank, strip, allowlistRegexes, blocklistRegexes, excMsg)
if returnNow:
return value
# Reuse the logic in validateRegex()
try:
try:
# Check if value is an IPv4 address.
if validateRegex(value=value, regex=IPV4_REGEX, blank=blank, strip=strip, allowlistRegexes=allowlistRegexes, blocklistRegexes=blocklistRegexes):
return value
except:
pass # Go on to check if it's an IPv6 address.
# Check if value is an IPv6 address.
if validateRegex(value=value, regex=IPV6_REGEX, blank=blank, strip=strip, allowlistRegexes=allowlistRegexes, blocklistRegexes=blocklistRegexes):
return value
except ValidationException:
_raiseValidationException(_('%r is not a valid IP address.') % (_errstr(value)), excMsg) |
java | public CommandArgs createCommandArgs() throws ParseException {
// In case the last arg was a call-by-name , have that parameter parse a 'no-value' value.
// Can only succeed in certain cases with certain parameters.
if (nextNamedParam.isPresent()) {
// The last parsed arg did indeed end with '-{paramName}' without assigning that parameter a value.
final CliParam param = nextNamedParam.get();
final Object arg = param.noValue();
addArg(param, arg);
}
// Assign default values to any optional params not bound.
resolveUnboundParams();
final List<Object> commandArgs = new ArrayList<>(params.size());
for (CliParam param : params) {
final Object parsedValue = parsedValues.get(param);
if (parsedValue == null && !param.isNullable()) {
// If there is a missing arg value at this point, this is an internal error.
throw new IllegalStateException("Internal Error: Not all params have been parsed! Missing=" + param);
}
commandArgs.add(parsedValue);
}
return new CommandArgsImpl(commandArgs);
} |
java | public static void bitVectorToEdge(final BitVector bv, final long seed, final int numVertices, final int partSize, final int e[]) {
if (numVertices == 0) {
e[0] = e[1] = e[2] = -1;
return;
}
final long[] hash = new long[3];
Hashes.spooky4(bv, seed, hash);
e[0] = (int)((hash[0] & 0x7FFFFFFFFFFFFFFFL) % partSize);
e[1] = (int)(partSize + (hash[1] & 0x7FFFFFFFFFFFFFFFL) % partSize);
e[2] = (int)((partSize << 1) + (hash[2] & 0x7FFFFFFFFFFFFFFFL) % partSize);
} |
java | private String toAlias(String propertyName) {
String result = propertyAliasType.get(propertyName);
return result == null ? propertyName : result;
} |
python | def hmget(self, key, field, *fields, encoding=_NOTSET):
"""Get the values of all the given fields."""
return self.execute(b'HMGET', key, field, *fields, encoding=encoding) |
python | def boost_ranks(job, isoform_expression, merged_mhc_calls, transgene_out, univ_options,
rankboost_options):
"""
Boost the ranks of the predicted peptides:MHC combinations.
:param toil.fileStore.FileID isoform_expression: fsID of rsem isoform expression file
:param dict merged_mhc_calls: Dict of results from merging mhc peptide binding predictions
:param dict transgene_out: Dict of results from running Transgene
:param dict univ_options: Dict of universal options used by almost all tools
:param dict rankboost_options: Options specific to rankboost
:return: Dict of concise and detailed results for mhci and mhcii
output_files:
|- 'mhcii_rankboost_concise_results.tsv': fsID
|- 'mhcii_rankboost_detailed_results.txt': fsID
|- 'mhci_rankboost_concise_results.tsv': fsID
+- 'mhci_rankboost_detailed_results.txt': fsID
:rtype: dict
"""
work_dir = os.getcwd()
input_files = {
'rsem_quant.tsv': isoform_expression,
'mhci_merged_files.tsv': merged_mhc_calls['mhci_merged_files.list'],
'mhcii_merged_files.tsv': merged_mhc_calls['mhcii_merged_files.list'],
'mhci_peptides.faa': transgene_out['transgened_tumor_10_mer_peptides.faa'],
'mhcii_peptides.faa': transgene_out['transgened_tumor_15_mer_peptides.faa']}
input_files = get_files_from_filestore(job, input_files, work_dir, docker=True)
output_files = {}
for mhc in ('mhci', 'mhcii'):
import re
ratios = re.sub("'", '', repr(rankboost_options[''.join([mhc, '_args'])]))
parameters = ['--' + mhc,
'--predictions', input_files[''.join([mhc, '_merged_files.tsv'])],
'--expression', input_files['rsem_quant.tsv'],
'--peptides', input_files[''.join([mhc, '_peptides.faa'])],
'--ratios', ratios
]
docker_call(tool='rankboost', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'], tool_version=rankboost_options['version'])
mhc_concise = ''.join([work_dir, '/', mhc, '_rankboost_concise_results.tsv'])
mhc_detailed = ''.join([work_dir, '/', mhc, '_rankboost_detailed_results.txt'])
output_files[mhc] = {}
if os.path.exists(mhc_concise):
output_files[os.path.basename(mhc_concise)] = job.fileStore.writeGlobalFile(mhc_concise)
export_results(job, output_files[os.path.basename(mhc_concise)], mhc_concise,
univ_options, subfolder='rankboost')
else:
output_files[os.path.basename(mhc_concise)] = None
if os.path.exists(mhc_detailed):
output_files[os.path.basename(mhc_detailed)] = \
job.fileStore.writeGlobalFile(mhc_detailed)
export_results(job, output_files[os.path.basename(mhc_detailed)], mhc_detailed,
univ_options, subfolder='rankboost')
else:
output_files[os.path.basename(mhc_detailed)] = None
job.fileStore.logToMaster('Ran boost_ranks on %s successfully' % univ_options['patient'])
return output_files |
java | public static Person getWithJAASKey(final JAASSystem _jaasSystem,
final String _jaasKey)
throws EFapsException
{
long personId = 0;
Connection con = null;
try {
con = Context.getConnection();
PreparedStatement stmt = null;
try {
stmt = con.prepareStatement(Person.SQL_JAASKEY);
stmt.setObject(1, _jaasKey);
stmt.setObject(2, _jaasSystem.getId());
final ResultSet rs = stmt.executeQuery();
if (rs.next()) {
personId = rs.getLong(1);
}
rs.close();
} catch (final SQLException e) {
Person.LOG.error("search for person for JAAS system '" + _jaasSystem.getName() + "' with key '"
+ _jaasKey + "' is not possible", e);
throw new EFapsException(Person.class, "getWithJAASKey.SQLException", e, _jaasSystem.getName(),
_jaasKey);
} finally {
try {
if (stmt != null) {
stmt.close();
con.commit();
}
} catch (final SQLException e) {
throw new EFapsException(Person.class, "getWithJAASKey.SQLException", e, _jaasSystem.getName(),
_jaasKey);
}
}
} finally {
try {
if (con != null && !con.isClosed()) {
con.close();
}
} catch (final SQLException e) {
throw new EFapsException(Person.class, "updateLastLogin.SQLException", e);
}
}
return Person.get(personId);
} |
python | def getSCDPURL(self, serviceType, default=None):
"""Returns the SCDP (Service Control Protocol Document) URL for a given service type.
When the device definitions have been loaded with :meth:`~simpletr64.DeviceTR64.loadDeviceDefinitions` this
method returns for a given service type/namespace the associated URL to the SCDP. If the device definitions
have not been loaded a default value can be given which gets returned instead. The SCDP specifies all the
interaction functionality a device provides.
:param serviceType: the service type to look up for
:param default: the default return value in case the service type is not found and device definitions are not
loaded
:type default: str or None
:return: the URL/URI
:rtype: str or None
:raises ValueError: if the device did load device definitions and the service type is not known.
.. seealso::
:meth:`~simpletr64.DeviceTR64.loadDeviceDefinitions`
"""
if serviceType in self.__deviceServiceDefinitions.keys():
return self.__deviceServiceDefinitions[serviceType]["scpdURL"]
# check if definitions have been loaded, then dont return the default
if self.__deviceXMLInitialized:
raise ValueError("Device do not support given serviceType: " + serviceType)
return default |
java | public static ParametricFactorGraph buildSequenceModel(Iterable<String> emissionFeatureLines,
String featureDelimiter) {
// Read in the possible values of each variable.
List<String> words = StringUtils.readColumnFromDelimitedLines(emissionFeatureLines, 0, featureDelimiter);
List<String> labels = StringUtils.readColumnFromDelimitedLines(emissionFeatureLines, 1, featureDelimiter);
List<String> emissionFeatures = StringUtils.readColumnFromDelimitedLines(emissionFeatureLines, 2, featureDelimiter);
// Create dictionaries for each variable's values.
DiscreteVariable wordType = new DiscreteVariable("word", words);
DiscreteVariable labelType = new DiscreteVariable("label", labels);
DiscreteVariable emissionFeatureType = new DiscreteVariable("emissionFeature", emissionFeatures);
// Create a dynamic factor graph with a single plate replicating
// the input/output variables.
ParametricFactorGraphBuilder builder = new ParametricFactorGraphBuilder();
builder.addPlate(PLATE_NAME, new VariableNumMap(Ints.asList(1, 2),
Arrays.asList(INPUT_NAME, OUTPUT_NAME), Arrays.asList(wordType, labelType)), 10000);
String inputPattern = PLATE_NAME + "/?(0)/" + INPUT_NAME;
String outputPattern = PLATE_NAME + "/?(0)/" + OUTPUT_NAME;
String nextOutputPattern = PLATE_NAME + "/?(1)/" + OUTPUT_NAME;
VariableNumMap plateVars = new VariableNumMap(Ints.asList(1, 2),
Arrays.asList(inputPattern, outputPattern), Arrays.asList(wordType, labelType));
// Read in the emission features (for the word/label weights).
VariableNumMap x = plateVars.getVariablesByName(inputPattern);
VariableNumMap y = plateVars.getVariablesByName(outputPattern);
VariableNumMap emissionFeatureVar = VariableNumMap.singleton(0, "emissionFeature", emissionFeatureType);
TableFactor emissionFeatureFactor = TableFactor.fromDelimitedFile(
Arrays.asList(x, y, emissionFeatureVar), emissionFeatureLines,
featureDelimiter, false, SparseTensorBuilder.getFactory())
.cacheWeightPermutations();
System.out.println(emissionFeatureFactor.getVars());
// Add a parametric factor for the word/label weights
DiscreteLogLinearFactor emissionFactor = new DiscreteLogLinearFactor(x.union(y), emissionFeatureVar,
emissionFeatureFactor);
builder.addFactor(WORD_LABEL_FACTOR, emissionFactor,
VariableNamePattern.fromTemplateVariables(plateVars, VariableNumMap.EMPTY));
// Create a factor connecting adjacent labels
VariableNumMap adjacentVars = new VariableNumMap(Ints.asList(0, 1),
Arrays.asList(outputPattern, nextOutputPattern), Arrays.asList(labelType, labelType));
builder.addFactor(TRANSITION_FACTOR, DiscreteLogLinearFactor.createIndicatorFactor(adjacentVars),
VariableNamePattern.fromTemplateVariables(adjacentVars, VariableNumMap.EMPTY));
return builder.build();
} |
java | public static IDLProxyObject createSingle(InputStream is, boolean debug, boolean isUniName) throws IOException {
return createSingle(is, debug, null, isUniName);
} |
java | public void marshall(GetSampledRequestsRequest getSampledRequestsRequest, ProtocolMarshaller protocolMarshaller) {
if (getSampledRequestsRequest == null) {
throw new SdkClientException("Invalid argument passed to marshall(...)");
}
try {
protocolMarshaller.marshall(getSampledRequestsRequest.getWebAclId(), WEBACLID_BINDING);
protocolMarshaller.marshall(getSampledRequestsRequest.getRuleId(), RULEID_BINDING);
protocolMarshaller.marshall(getSampledRequestsRequest.getTimeWindow(), TIMEWINDOW_BINDING);
protocolMarshaller.marshall(getSampledRequestsRequest.getMaxItems(), MAXITEMS_BINDING);
} catch (Exception e) {
throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e);
}
} |
python | def findall(node, filter_=None, stop=None, maxlevel=None, mincount=None, maxcount=None):
"""
Search nodes matching `filter_` but stop at `maxlevel` or `stop`.
Return tuple with matching nodes.
Args:
node: top node, start searching.
Keyword Args:
filter_: function called with every `node` as argument, `node` is returned if `True`.
stop: stop iteration at `node` if `stop` function returns `True` for `node`.
maxlevel (int): maximum decending in the node hierarchy.
mincount (int): minimum number of nodes.
maxcount (int): maximum number of nodes.
Example tree:
>>> from anytree import Node, RenderTree, AsciiStyle
>>> f = Node("f")
>>> b = Node("b", parent=f)
>>> a = Node("a", parent=b)
>>> d = Node("d", parent=b)
>>> c = Node("c", parent=d)
>>> e = Node("e", parent=d)
>>> g = Node("g", parent=f)
>>> i = Node("i", parent=g)
>>> h = Node("h", parent=i)
>>> print(RenderTree(f, style=AsciiStyle()).by_attr())
f
|-- b
| |-- a
| +-- d
| |-- c
| +-- e
+-- g
+-- i
+-- h
>>> findall(f, filter_=lambda node: node.name in ("a", "b"))
(Node('/f/b'), Node('/f/b/a'))
>>> findall(f, filter_=lambda node: d in node.path)
(Node('/f/b/d'), Node('/f/b/d/c'), Node('/f/b/d/e'))
The number of matches can be limited:
>>> findall(f, filter_=lambda node: d in node.path, mincount=4) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
anytree.search.CountError: Expecting at least 4 elements, but found 3. ... Node('/f/b/d/e'))
>>> findall(f, filter_=lambda node: d in node.path, maxcount=2) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
anytree.search.CountError: Expecting 2 elements at maximum, but found 3. ... Node('/f/b/d/e'))
"""
return _findall(node, filter_=filter_, stop=stop,
maxlevel=maxlevel, mincount=mincount, maxcount=maxcount) |
python | def setup_figure(self, figure):
"""
Makes any desired changes to the figure object
This method will be called once with a figure object
before any plotting has completed. Subclasses that
override this method should make sure that the base
class method is called.
"""
for th in self.themeables.values():
th.setup_figure(figure) |
python | def sync_release_files(self):
""" Purge + download files returning files removed + added """
release_files = []
for release in self.releases.values():
release_files.extend(release)
downloaded_files = set()
deferred_exception = None
for release_file in release_files:
try:
downloaded_file = self.download_file(
release_file["url"], release_file["digests"]["sha256"]
)
if downloaded_file:
downloaded_files.add(
str(downloaded_file.relative_to(self.mirror.homedir))
)
except Exception as e:
logger.exception(
f"Continuing to next file after error downloading: "
f"{release_file['url']}"
)
if not deferred_exception: # keep first exception
deferred_exception = e
if deferred_exception:
raise deferred_exception # raise the exception after trying all files
self.mirror.altered_packages[self.name] = downloaded_files |
java | public static void isNotEmpty( Object[] argument,
String name ) {
isNotNull(argument, name);
if (argument.length == 0) {
throw new IllegalArgumentException(CommonI18n.argumentMayNotBeEmpty.text(name));
}
} |
python | def boundaries(self, boundaryEdges=True, featureAngle=65, nonManifoldEdges=True):
"""
Return an ``Actor`` that shows the boundary lines of an input mesh.
:param bool boundaryEdges: Turn on/off the extraction of boundary edges.
:param float featureAngle: Specify the feature angle for extracting feature edges.
:param bool nonManifoldEdges: Turn on/off the extraction of non-manifold edges.
"""
fe = vtk.vtkFeatureEdges()
fe.SetInputData(self.polydata())
fe.SetBoundaryEdges(boundaryEdges)
if featureAngle:
fe.FeatureEdgesOn()
fe.SetFeatureAngle(featureAngle)
else:
fe.FeatureEdgesOff()
fe.SetNonManifoldEdges(nonManifoldEdges)
fe.ColoringOff()
fe.Update()
return Actor(fe.GetOutput(), c="p").lw(5) |
java | public static base_response unset(nitro_service client, snmpmib resource, String[] args) throws Exception{
snmpmib unsetresource = new snmpmib();
return unsetresource.unset_resource(client,args);
} |
java | static ObjectName createObjectName(final String domain, final PathAddress pathAddress) {
return createObjectName(domain, pathAddress, null);
} |
python | def bind_to_constructor(self, cls, constructor):
"""Bind a class to a callable singleton constructor."""
self._check_class(cls)
if constructor is None:
raise InjectorException('Constructor cannot be None, key=%s' % cls)
self._bindings[cls] = _ConstructorBinding(constructor)
logger.debug('Bound %s to a constructor %s', cls, constructor)
return self |
java | public static <T> Collector<T, ?, List<T>> filtering(Predicate<? super T> predicate) {
return filtering(predicate, Collectors.toList());
} |
python | def info(self, request, message, extra_tags='', fail_silently=False):
"""Add a message with the ``INFO`` level."""
add(self.target_name,
request, constants.INFO, message, extra_tags=extra_tags,
fail_silently=fail_silently) |
python | def clone(root, jsontreecls=jsontree, datetimeencoder=_datetimeencoder,
datetimedecoder=_datetimedecoder):
"""Clone an object by first searializing out and then loading it back in.
"""
return json.loads(json.dumps(root, cls=JSONTreeEncoder,
datetimeencoder=datetimeencoder),
cls=JSONTreeDecoder, jsontreecls=jsontreecls,
datetimedecoder=datetimedecoder) |
python | def visit(self, module):
"""Walk the given astroid *tree* and transform each encountered node
Only the nodes which have transforms registered will actually
be replaced or changed.
"""
module.body = [self._visit(child) for child in module.body]
return self._transform(module) |
python | def check_arrange_act_spacing(self) -> typing.Generator[AAAError, None, None]:
"""
* When no spaces found, point error at line above act block
* When too many spaces found, point error at 2nd blank line
"""
yield from self.check_block_spacing(
LineType.arrange,
LineType.act,
'AAA03 expected 1 blank line before Act block, found {}',
) |
java | public JobWithJars getPlanWithJars() throws ProgramInvocationException {
if (isUsingProgramEntryPoint()) {
return new JobWithJars(getPlan(), getAllLibraries(), classpaths, userCodeClassLoader);
} else {
throw new ProgramInvocationException("Cannot create a " + JobWithJars.class.getSimpleName() +
" for a program that is using the interactive mode.", getPlan().getJobId());
}
} |
java | @Override
public final String nameForKey(final int mKey) {
checkState(!mPageReadTrx.isClosed(), "Transaction is already closed.");
NodeMetaPageFactory.MetaKey key = new NodeMetaPageFactory.MetaKey(mKey);
NodeMetaPageFactory.MetaValue value = (MetaValue)mPageReadTrx.getMetaBucket().get(key);
return value == null ? null : value.getData();
} |
python | def set_with_conversion(self, variable, value_string):
"""Convert user supplied string to Python type.
Lets user use values such as True, False and integers. All variables can be set
to None, regardless of type. Handle the case where a string is typed by the user
and is not quoted, as a string literal.
"""
self._assert_valid_variable(variable)
try:
v = ast.literal_eval(value_string)
except (ValueError, SyntaxError):
v = value_string
if v is None or v == "none":
self._variables[variable] = None
else:
try:
type_converter = variable_type_map[variable]
value_string = self._validate_variable_type(
value_string, type_converter
)
value = type_converter(value_string)
self._variables[variable] = value
except ValueError:
raise d1_cli.impl.exceptions.InvalidArguments(
"Invalid value for {}: {}".format(variable, value_string)
) |
python | def consume(self, f):
"""
Creates a sink which consumes all values for this Recver. *f* is a
callable which takes a single argument. All values sent on this
Recver's Sender will be passed to *f* for processing. Unlike *map*
however consume terminates this chain::
sender, recver = h.pipe
@recver.consume
def _(data):
logging.info(data)
sender.send('Hello') # logs 'Hello'
"""
@self.hub.spawn
def _():
for item in self:
# TODO: think through whether trapping for HALT here is a good
# idea
try:
f(item)
except vanilla.exception.Halt:
self.close()
break |
python | def init_manager(app, db, **kwargs):
"""
Initialise Manager
:param app: Flask app object
:parm db: db instance
:param kwargs: Additional keyword arguments to be made available as shell context
"""
manager.app = app
manager.db = db
manager.context = kwargs
manager.add_command('db', MigrateCommand)
manager.add_command('clean', Clean())
manager.add_command('showurls', ShowUrls())
manager.add_command('shell', Shell(make_context=shell_context))
manager.add_command('plainshell', Shell(make_context=shell_context,
use_ipython=False, use_bpython=False))
return manager |
python | def step(self, observations):
""" Sample action from an action space for given state """
log_histogram = self(observations)
actions = self.q_head.sample(log_histogram)
return {
'actions': actions,
'log_histogram': log_histogram
} |
python | def unix_install():
"""
Edits or creates .bashrc, .bash_profile, and .profile files in the users
HOME directory in order to add your current directory (hopefully your
PmagPy directory) and assorted lower directories in the PmagPy/programs
directory to your PATH environment variable. It also adds the PmagPy and
the PmagPy/programs directories to PYTHONPATH.
"""
PmagPyDir = os.path.abspath(".")
COMMAND = """\n
for d in %s/programs/*/ "%s/programs/"; do
case ":$PATH:" in
*":$d:"*) :;; # already there
*) PMAGPATHS="$PMAGPATHS:$d";; # or PATH="$PATH:$new_entry"
esac
done
export PYTHONPATH="$PYTHONPATH:%s:%s/programs/"
export PATH="$PATH:$PMAGPATHS" """ % (PmagPyDir, PmagPyDir, PmagPyDir, PmagPyDir)
frc_path = os.path.join(
os.environ["HOME"], ".bashrc") # not recommended, but hey it freaking works
fbprof_path = os.path.join(os.environ["HOME"], ".bash_profile")
fprof_path = os.path.join(os.environ["HOME"], ".profile")
all_paths = [frc_path, fbprof_path, fprof_path]
for f_path in all_paths:
open_type = 'a'
if not os.path.isfile(f_path):
open_type = 'w+'
fout = open(f_path, open_type)
fout.write(COMMAND)
fout.close()
else:
fin = open(f_path, 'r')
current_f = fin.read()
fin.close()
if COMMAND not in current_f:
fout = open(f_path, open_type)
fout.write(COMMAND)
fout.close()
print("Install complete. Please restart the shell to complete install.\nIf you are seeing strange or non-existent paths in your PATH or PYTHONPATH variable please manually check your .bashrc, .bash_profile, and .profile or attempt to reinstall.") |
python | def list(self, cart_glob=['*.json']):
"""
List all carts
"""
carts = []
for glob in cart_glob:
# Translate cart names into cart file names
if not glob.endswith('.json'):
search_glob = glob + ".json"
else:
search_glob = glob
for cart in juicer.utils.find_pattern(Constants.CART_LOCATION, search_glob):
cart_name = cart.split('/')[-1].replace('.json', '')
carts.append(cart_name)
return carts |
python | def decrypt(data, digest=True):
"""Decrypt provided data."""
alg, _, data = data.rpartition("$")
if not alg:
return data
data = _from_hex_digest(data) if digest else data
try:
return implementations["decryption"][alg](
data, implementations["get_key"]()
)
except KeyError:
raise CryptError("Can not decrypt key for algorithm: %s" % alg) |
java | public Link link(String name, BaseAsset asset, String url,
boolean onMenu, Map<String, Object> attributes) {
Link link = new Link(instance);
link.setAsset(asset);
link.setName(name);
link.setURL(url);
link.setOnMenu(onMenu);
addAttributes(link, attributes);
link.save();
return link;
} |
python | def mi(x, y):
'''
compute and return the mutual information between x and y
inputs:
-------
x, y: iterables of hashable items
output:
-------
mi: float
Notes:
------
if you are trying to mix several symbols together as in mi(x, (y0,y1,...)), try
info[p] = _info.mi(x, info.combine_symbols(y0, y1, ...) )
'''
# dict.values() returns a view object that has to be converted to a list before being
# converted to an array
# the following lines will execute properly in python3, but not python2 because there
# is no zip object
try:
if isinstance(x, zip):
x = list(x)
if isinstance(y, zip):
y = list(y)
except:
pass
probX = symbols_to_prob(x).prob()
probY = symbols_to_prob(y).prob()
probXY = symbols_to_prob(combine_symbols(x, y)).prob()
return entropy(prob=probX) + entropy(prob=probY) - entropy(prob=probXY) |
python | def check(table='filter', chain=None, rule=None, family='ipv4'):
'''
Check for the existence of a rule in the table and chain
This function accepts a rule in a standard nftables command format,
starting with the chain. Trying to force users to adapt to a new
method of creating rules would be irritating at best, and we
already have a parser that can handle it.
CLI Example:
.. code-block:: bash
salt '*' nftables.check filter input \\
rule='tcp dport 22 log accept'
IPv6:
salt '*' nftables.check filter input \\
rule='tcp dport 22 log accept' \\
family=ipv6
'''
ret = {'comment': '',
'result': False}
if not chain:
ret['comment'] = 'Chain needs to be specified'
return ret
if not rule:
ret['comment'] = 'Rule needs to be specified'
return ret
res = check_table(table, family=family)
if not res['result']:
return res
res = check_chain(table, chain, family=family)
if not res['result']:
return res
nft_family = _NFTABLES_FAMILIES[family]
cmd = '{0} --handle --numeric --numeric --numeric list chain {1} {2} {3}'.\
format(_nftables_cmd(), nft_family, table, chain)
search_rule = '{0} #'.format(rule)
out = __salt__['cmd.run'](cmd, python_shell=False).find(search_rule)
if out == -1:
ret['comment'] = 'Rule {0} in chain {1} in table {2} in family {3} does not exist'.\
format(rule, chain, table, family)
else:
ret['comment'] = 'Rule {0} in chain {1} in table {2} in family {3} exists'.\
format(rule, chain, table, family)
ret['result'] = True
return ret |
java | @NonNull
public static Term divide(@NonNull Term left, @NonNull Term right) {
return new BinaryArithmeticTerm(ArithmeticOperator.QUOTIENT, left, right);
} |
python | def save_file(self, srcfile):
"""
Save a (raw) file to the store.
"""
filehash = digest_file(srcfile)
if not os.path.exists(self.object_path(filehash)):
# Copy the file to a temporary location first, then move, to make sure we don't end up with
# truncated contents if the build gets interrupted.
tmppath = self.temporary_object_path(filehash)
copyfile(srcfile, tmppath)
self._move_to_store(tmppath, filehash)
return filehash |
python | def get_git_repositories_activity_metrics(self, project, from_date, aggregation_type, skip, top):
"""GetGitRepositoriesActivityMetrics.
[Preview API] Retrieves git activity metrics for repositories matching a specified criteria.
:param str project: Project ID or project name
:param datetime from_date: Date from which, the trends are to be fetched.
:param str aggregation_type: Bucket size on which, trends are to be aggregated.
:param int skip: The number of repositories to ignore.
:param int top: The number of repositories for which activity metrics are to be retrieved.
:rtype: [RepositoryActivityMetrics]
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
query_parameters = {}
if from_date is not None:
query_parameters['fromDate'] = self._serialize.query('from_date', from_date, 'iso-8601')
if aggregation_type is not None:
query_parameters['aggregationType'] = self._serialize.query('aggregation_type', aggregation_type, 'str')
if skip is not None:
query_parameters['$skip'] = self._serialize.query('skip', skip, 'int')
if top is not None:
query_parameters['$top'] = self._serialize.query('top', top, 'int')
response = self._send(http_method='GET',
location_id='df7fbbca-630a-40e3-8aa3-7a3faf66947e',
version='5.0-preview.1',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('[RepositoryActivityMetrics]', self._unwrap_collection(response)) |
java | public ArrayList<OvhProductInformation> cart_cartId_domainTransfer_GET(String cartId, String domain) throws IOException {
String qPath = "/order/cart/{cartId}/domainTransfer";
StringBuilder sb = path(qPath, cartId);
query(sb, "domain", domain);
String resp = execN(qPath, "GET", sb.toString(), null);
return convertTo(resp, t7);
} |
python | def _iplot(self,kind='scatter',data=None,layout=None,filename='',sharing=None,title='',xTitle='',yTitle='',zTitle='',theme=None,colors=None,colorscale=None,fill=False,width=None,
dash='solid',mode='',interpolation='linear',symbol='circle',size=12,barmode='',sortbars=False,bargap=None,bargroupgap=None,bins=None,histnorm='',
histfunc='count',orientation='v',boxpoints=False,annotations=None,keys=False,bestfit=False,
bestfit_colors=None,mean=False,mean_colors=None,categories='',x='',y='',z='',text='',gridcolor=None,
zerolinecolor=None,margin=None,labels=None,values=None,secondary_y='',secondary_y_title='',subplots=False,shape=None,error_x=None,
error_y=None,error_type='data',locations=None,lon=None,lat=None,asFrame=False,asDates=False,asFigure=False,
asImage=False,dimensions=None,asPlot=False,asUrl=False,online=None,**kwargs):
"""
Returns a plotly chart either as inline chart, image of Figure object
Parameters:
-----------
kind : string
Kind of chart
scatter
bar
box
spread
ratio
heatmap
surface
histogram
bubble
bubble3d
scatter3d
scattergeo
ohlc
candle
pie
choroplet
data : Data
Plotly Data Object.
If not entered then the Data object will be automatically
generated from the DataFrame.
layout : Layout
Plotly layout Object
If not entered then the Layout objet will be automatically
generated from the DataFrame.
filename : string
Filename to be saved as in plotly account
sharing : string
Sets the sharing level permission
public - anyone can see this chart
private - only you can see this chart
secret - only people with the link can see the chart
title : string
Chart Title
xTitle : string
X Axis Title
yTitle : string
Y Axis Title
zTitle : string
zTitle : string
Z Axis Title
Applicable only for 3d charts
theme : string
Layout Theme
solar
pearl
white
see cufflinks.getThemes() for all
available themes
colors : dict, list or string
{key:color} to specify the color for each column
[colors] to use the colors in the defined order
colorscale : string
Color scale name
If the color name is preceded by a minus (-)
then the scale is inversed
Only valid if 'colors' is null
See cufflinks.colors.scales() for available scales
fill : bool
Filled Traces
width : dict, list or int
int : applies to all traces
list : applies to each trace in the order
specified
dict: {column:value} for each column in
the dataframe
Line width
dash : dict, list or string
string : applies to all traces
list : applies to each trace in the order
specified
dict: {column:value} for each column in
the dataframe
Drawing style of lines
solid
dash
dashdot
dot
mode : dict, list or string
string : applies to all traces
list : applies to each trace in the order
specified
dict: {column:value} for each column in
the dataframe
Plotting mode for scatter trace
lines
markers
lines+markers
lines+text
markers+text
lines+markers+text
interpolation : dict, list, or string
string : applies to all traces
list : applies to each trace in the order
specified
dict: {column:value} for each column in
the dataframe
Positioning of the connecting lines
linear
spline
vhv
hvh
vh
hv
symbol : dict, list or string
string : applies to all traces
list : applies to each trace in the order
specified
dict: {column:value} for each column in
the dataframe
The symbol that is drawn on the plot for each marker
Valid only when mode includes markers
circle
circle-dot
diamond
square
and many more...(see plotly.validators.scatter.marker.SymbolValidator.values)
size : string or int
Size of marker
Valid only if marker in mode
barmode : string
Mode when displaying bars
group
stack
overlay
* Only valid when kind='bar'
sortbars : bool
Sort bars in descending order
* Only valid when kind='bar'
bargap : float
Sets the gap between bars
[0,1)
* Only valid when kind is 'histogram' or 'bar'
bargroupgap : float
Set the gap between groups
[0,1)
* Only valid when kind is 'histogram' or 'bar'
bins : int or tuple
if int:
Specifies the number of bins
if tuple:
(start, end, size)
start : starting value
end: end value
size: bin size
* Only valid when kind='histogram'
histnorm : string
'' (frequency)
percent
probability
density
probability density
Sets the type of normalization for an histogram trace. By default
the height of each bar displays the frequency of occurrence, i.e.,
the number of times this value was found in the
corresponding bin. If set to 'percent', the height of each bar
displays the percentage of total occurrences found within the
corresponding bin. If set to 'probability', the height of each bar
displays the probability that an event will fall into the
corresponding bin. If set to 'density', the height of each bar is
equal to the number of occurrences in a bin divided by the size of
the bin interval such that summing the area of all bins will yield
the total number of occurrences. If set to 'probability density',
the height of each bar is equal to the number of probability that an
event will fall into the corresponding bin divided by the size of
the bin interval such that summing the area of all bins will yield
1.
* Only valid when kind='histogram'
histfunc : string
count
sum
avg
min
max
Sets the binning function used for an histogram trace.
* Only valid when kind='histogram'
orientation : string
h
v
Sets the orientation of the bars. If set to 'v', the length of each
| bar will run vertically. If set to 'h', the length of each bar will
| run horizontally
* Only valid when kind is 'histogram','bar' or 'box'
boxpoints : string
Displays data points in a box plot
outliers
all
suspectedoutliers
False
annotations : dictionary
Dictionary of annotations
{x_point : text}
keys : list of columns
List of columns to chart.
Also can be used for custom sorting.
bestfit : boolean or list
If True then a best fit line will be generated for
all columns.
If list then a best fit line will be generated for
each key on the list.
bestfit_colors : list or dict
{key:color} to specify the color for each column
[colors] to use the colors in the defined order
categories : string
Name of the column that contains the categories
x : string
Name of the column that contains the x axis values
y : string
Name of the column that contains the y axis values
z : string
Name of the column that contains the z axis values
text : string
Name of the column that contains the text values
gridcolor : string
Grid color
zerolinecolor : string
Zero line color
margin : dict or tuple
Dictionary (l,r,b,t) or
Tuple containing the left,
right, bottom and top margins
labels : string
Name of the column that contains the labels.
* Only valid when kind='pie'
values : string
Name of the column that contains the values.
* Only valid when kind='pie'
secondary_y : string or list(string)
Name(s) of the column to be charted on the
right hand side axis
secondary_y_title : string
Title of the secondary axis
subplots : bool
If true then each trace is placed in
subplot layout
shape : (rows,cols)
Tuple indicating the size of rows and columns
If omitted then the layout is automatically set
* Only valid when subplots=True
error_x : int or float or [int or float]
error values for the x axis
error_y : int or float or [int or float]
error values for the y axis
error_type : string
type of error bars
'data'
'constant'
'percent'
'sqrt'
'continuous'
'continuous_percent'
asFrame : bool
If true then the data component of Figure will
be of Pandas form (Series) otherwise they will
be index values
asDates : bool
If true it truncates times from a DatetimeIndex
asFigure : bool
If True returns plotly Figure
asImage : bool
If True it returns an Image (png)
In ONLINE mode:
Image file is saved in the working directory
Accepts:
filename
dimensions
scale
display_image
In OFFLINE mode:
Image file is downloaded (downloads folder) and a
regular plotly chart is displayed in Jupyter
Accepts:
filename
dimensions
dimensions : tuple(int,int)
Dimensions for image / chart
(width,height)
asPlot : bool
If True the chart opens in browser
asUrl : bool
If True the chart url/path is returned. No chart is displayed.
If Online : the URL is returned
If Offline : the local path is returned
online : bool
If True then the chart/image is rendered on the server
even when running in offline mode.
Other Kwargs
============
Line, Scatter
connectgaps : bool
If True, empty values are connected
Pie charts
sort : bool
If True it sorts the labels by value
pull : float [0-1]
Pulls the slices from the centre
hole : float [0-1]
Sets the size of the inner hole
linecolor : string
Sets the color for the contour line of the slices
linewidth : string
Sets the width for the contour line of the slices
textcolor : string
Sets the color for the text in the slices
textposition : string
Sets the position of the legends for each slice
outside
inner
textinfo : string
Sets the information to be displayed on
the legends
label
percent
value
* or ony combination of the above using
'+' between each item
ie 'label+percent'
Histogram
linecolor : string
specifies the line color of the histogram
Heatmap and Surface
center_scale : float
Centers the colorscale at a specific value
Automatically sets the (zmin,zmax) values
zmin : float
Defines the minimum range for the z values.
This affects the range for the colorscale
zmax : float
Defines the maximum range for the z values.
This affects the range for the colorscale
Error Bars
error_trace : string
Name of the column for which error should be
plotted. If omitted then errors apply to all
traces.
error_values_minus : int or float or [int or float]
Values corresponding to the span of the error bars
below the trace coordinates
error_color : string
Color for error bars
error_thickness : float
Sets the line thickness of the error bars
error_width : float
Sets the width (in pixels) of the cross-bar at both
ends of the error bars
error_opacity : float [0,1]
Opacity for the error bars
Subplots
horizontal_spacing : float [0,1]
Space between subplot columns.
vertical_spacing : float [0,1]
Space between subplot rows.
subplot_titles : bool
If True, chart titles are plotted
at the top of each subplot
shared_xaxes : bool
Assign shared x axes.
If True, subplots in the same grid column have one common
shared x-axis at the bottom of the grid.
shared_yaxes : bool
Assign shared y axes.
If True, subplots in the same grid row have one common
shared y-axis on the left-hand side of the grid.
Shapes
hline : float, list or dict
Draws a horizontal line at the
indicated y position(s)
Extra parameters can be passed in
the form of a dictionary (see shapes)
vline : float, list or dict
Draws a vertical line at the
indicated x position(s)
Extra parameters can be passed in
the form of a dictionary (see shapes)
hpsan : (y0,y1)
Draws a horizontal rectangle at the
indicated (y0,y1) positions.
Extra parameters can be passed in
the form of a dictionary (see shapes)
vspan : (x0,x1)
Draws a vertical rectangle at the
indicated (x0,x1) positions.
Extra parameters can be passed in
the form of a dictionary (see shapes)
shapes : dict or list(dict)
List of dictionaries with the
specifications of a given shape.
See help(cufflinks.tools.get_shape)
for more information
Axis Ranges
xrange : [lower_bound,upper_bound]
Sets the range for the x axis
yrange : [lower_bound,upper_bound]
Sets the range for the y axis
zrange : [lower_bound,upper_bound]
Sets the range for the z axis
Explicit Layout Updates
layout_update : dict
The layout will be modified with all
the explicit values stated in the
dictionary.
It will not apply if layout is passed
as parameter.
Range Selector
rangeselector : dict
Defines a rangeselector object
see help(cf.tools.get_range_selector) for more information
Example:
{'steps':['1y','2 months','5 weeks','ytd','2mtd'],
'axis':'xaxis', 'bgcolor' : ('blue',.3),
'x': 0.2 , 'y' : 0.9}
Range Slider
rangeslider : bool or dict
Defines if a rangeslider is displayed
If bool:
True : Makes it visible
if dict:
Rangeslider object
Example:
{'bgcolor':('blue',.3),'autorange':True}
Annotations
fontcolor : str
Text color for annotations
fontsize : int
Text size for annotations
textangle : int
Text angle
See https://plot.ly/python/reference/#layout-annotations
for a complete list of valid parameters.
Exports
display_image : bool
If True then the image if displayed after being saved
** only valid if asImage=True
scale : integer
Increase the resolution of the image by `scale` amount
Only valid when asImage=True
"""
# Valid Kwargs
valid_kwargs = ['color','opacity','column','columns','labels','text','world_readable','colorbar']
BUBBLE_KWARGS = ['abs_size']
TRACE_KWARGS = ['hoverinfo','connectgaps']
HEATMAP_SURFACE_KWARGS = ['center_scale','zmin','zmax']
PIE_KWARGS=['sort','pull','hole','textposition','textinfo','linecolor','linewidth','textcolor']
OHLC_KWARGS=['up_color','down_color','open','high','low','close','volume','name','decreasing','increasing']
SUBPLOT_KWARGS=['horizontal_spacing', 'vertical_spacing',
'specs', 'insets','start_cell','shared_xaxes','shared_yaxes','subplot_titles','shared_xaxis','shared_yaxis']
GEO_KWARGS=['locationmode','locationsrc','geo','lon','lat']
ERROR_KWARGS=['error_trace','error_values_minus','error_color','error_thickness',
'error_width','error_opacity']
EXPORT_KWARGS=['display_image','scale']
FF_DISTPLOT=["group_labels", "bin_size", "curve_type", "rug_text", "show_hist", "show_curve", "show_rug"]
FF_VIOLIN=["data_header","group_header","show_rug","sort"]
kwargs_list = [tools.__LAYOUT_KWARGS,BUBBLE_KWARGS,TRACE_KWARGS,
OHLC_KWARGS,PIE_KWARGS,HEATMAP_SURFACE_KWARGS,SUBPLOT_KWARGS,GEO_KWARGS,ERROR_KWARGS,EXPORT_KWARGS,
FF_DISTPLOT,FF_VIOLIN]
[valid_kwargs.extend(_) for _ in kwargs_list]
dict_modifiers_keys = ['line']
dict_modifiers={}
for k in dict_modifiers_keys:
dict_modifiers[k]=kwargs_from_keyword(kwargs,{},k,True)
for key in list(kwargs.keys()):
if key not in valid_kwargs:
raise Exception("Invalid keyword : '{0}'".format(key))
# Setting default values
if not colors:
colors=kwargs['color'] if 'color' in kwargs else colors
if isinstance(colors,str):
colors=[colors]
opacity=kwargs['opacity'] if 'opacity' in kwargs else 0.8
if not dimensions:
dimensions=auth.get_config_file()['dimensions']
# Get values from config theme
if theme is None:
theme = auth.get_config_file()['theme']
theme_config=tools.getTheme(theme)
if colorscale is None:
config_colorscale=auth.get_config_file()['colorscale']
if config_colorscale in ('dflt',None):
colorscale=theme_config['colorscale'] if 'colorscale' in theme_config else 'original'
else:
colorscale=config_colorscale
if width is None:
if kind != 'pie':
width=theme_config['linewidth'] if 'linewidth' in theme_config else 2
if margin is None:
margin=auth.get_config_file().get('margin',None)
# In case column was used instead of keys
if 'column' in kwargs:
keys=[kwargs['column']] if isinstance(kwargs['column'],str) else kwargs['column']
if 'columns' in kwargs:
keys=[kwargs['columns']] if isinstance(kwargs['columns'],str) else kwargs['columns']
kind='line' if kind=='lines' else kind
# Figure generators
def get_marker(marker={}):
if 'line' in dict_modifiers:
if 'color' not in dict_modifiers['line']:
if 'linecolor' in kwargs:
linecolor=kwargs.get('linecolor')
else:
if 'linecolor' in tools.getTheme(theme=theme):
linecolor=normalize(tools.getTheme(theme=theme)['linecolor'])
else:
linecolor=tools.getLayout(theme=theme)['xaxis']['titlefont']['color']
dict_modifiers['line']['color']=linecolor
dict_modifiers['line']=tools.updateColors(dict_modifiers['line'])
marker['line']=deep_update(marker['line'],dict_modifiers['line'])
return marker
# We assume we are good citizens
validate=True
if not layout:
l_kwargs=dict([(k,kwargs[k]) for k in tools.__LAYOUT_KWARGS if k in kwargs])
if annotations:
ann_kwargs=check_kwargs(kwargs,tools.__ANN_KWARGS,clean_origin=True)
annotations=tools.get_annotations(self.copy(),annotations,kind=kind,theme=theme,**ann_kwargs)
layout=tools.getLayout(kind=kind,theme=theme,xTitle=xTitle,yTitle=yTitle,zTitle=zTitle,title=title,barmode=barmode,
bargap=bargap,bargroupgap=bargroupgap,annotations=annotations,gridcolor=gridcolor,
dimensions=dimensions,
zerolinecolor=zerolinecolor,margin=margin,is3d='3d' in kind,**l_kwargs)
elif isinstance(layout, Layout):
layout = layout.to_plotly_json()
if not data:
if categories and kind not in ('violin'):
data=[]
if 'bar' in kind:
df=self.copy()
df=df.set_index(categories)
fig=df.figure(kind=kind,colors=colors,colorscale=colorscale,fill=fill,width=width,sortbars=sortbars,opacity=opacity,
asDates=asDates,mode=mode,symbol=symbol,size=size,text=text,barmode=barmode,orientation=orientation)
data=fig['data']
else:
_keys=pd.unique(self[categories])
colors=get_colors(colors,colorscale,_keys)
mode='markers' if not mode else mode
for _ in _keys:
__=self[self[categories]==_].copy()
if text:
_text=__[text] if asFrame else __[text].values
_x=__[x] if asFrame else __[x].values
_y=__[y] if asFrame else __[y].values
if z:
_z=__[z] if asFrame else __[z].values
if 'bubble' in kind:
rg=__[size].values
rgo=self[size].values
if not kwargs.get('abs_size',False):
if len(rgo)>1:
_size=[int(100*(float(i)-rgo.min( ))/(rgo.max()-rgo.min()))+12 for i in rg]
else:
_size=[12] if len(rgo) else []
else:
_size=rgo
else:
_size=size
_data=Scatter3d(x=_x,y=_y,mode=mode,name=_,
marker=dict(color=colors[_],symbol=symbol,size=_size,opacity=opacity,
line=dict(width=width)),textfont=tools.getLayout(theme=theme)['xaxis']['titlefont'])
if '3d' in kind:
_data=Scatter3d(x=_x,y=_y,z=_z,mode=mode,name=_,
marker=dict(color=colors[_],symbol=symbol,size=_size,opacity=opacity,
line=dict(width=width)),textfont=tools.getLayout(theme=theme)['xaxis']['titlefont'])
else:
#see error 168
if type(_x)==pd.np.ndarray:
if '[ns]' in _x.dtype.str:
_x=_x.astype(str)
if type(_y)==pd.np.ndarray:
if '[ns]' in _y.dtype.str:
_y=_y.astype(str)
_data=Scatter(x=_x,y=_y,mode=mode,name=_,
marker=dict(color=colors[_],symbol=symbol,size=_size,opacity=opacity,
line=dict(width=width)),textfont=tools.getLayout(theme=theme)['xaxis']['titlefont'])
if text:
_data.update(text=_text)
data.append(_data)
else:
if kind in ('scatter','spread','ratio','bar','barh','area','line'):
df=self.copy()
if type(df)==pd.core.series.Series:
df=pd.DataFrame({df.name:df})
if x:
df=df.set_index(x)
if y and secondary_y:
if isinstance(secondary_y, str):
df=df[[y, secondary_y]]
else:
df=df[[y] + secondary_y]
elif y:
df=df[y]
if kind=='area':
df=df.transpose().fillna(0).cumsum().transpose()
mode='lines' if not mode else mode
if text:
if not isinstance(text,list):
text=self[text].values
data=df.to_iplot(colors=colors,colorscale=colorscale,kind=kind,interpolation=interpolation,fill=fill,width=width,dash=dash,sortbars=sortbars,keys=keys,
bestfit=bestfit,bestfit_colors=bestfit_colors,mean=mean,mean_colors=mean_colors,asDates=asDates,mode=mode,symbol=symbol,size=size,
text=text,**kwargs)
trace_kw=check_kwargs(kwargs,TRACE_KWARGS)
for trace in data:
trace.update(**trace_kw)
if kind in ('spread','ratio'):
if kind=='spread':
trace=self.apply(lambda x:x[0]-x[1],axis=1)
positive=trace.apply(lambda x:x if x>=0 else pd.np.nan)
negative=trace.apply(lambda x:x if x<0 else pd.np.nan)
trace=pd.DataFrame({'positive':positive,'negative':negative})
trace=trace.to_iplot(colors={'positive':'green','negative':'red'},width=0.5)
else:
trace=self.apply(lambda x:x[0]*1.0/x[1],axis=1).to_iplot(colors=['green'],width=1)
for t in trace:
t.update({'xaxis':'x2','yaxis':'y2','fill':'tozeroy',
'name':kind.capitalize(),'connectgaps':False,'showlegend':False})
data.append(trace[0])
if kind=='spread':
data.append(trace[1])
layout['yaxis'].update({'domain':[.3,1]})
layout['yaxis2']=copy.deepcopy(layout['yaxis'])
layout['xaxis2']=copy.deepcopy(layout['xaxis'])
layout['yaxis2'].update(domain=[0,.25],title=kind.capitalize())
layout['xaxis2'].update(anchor='y2',showticklabels=False)
layout['hovermode']='x'
if 'bar' in kind:
if 'stack' in barmode:
layout['legend'].update(traceorder='normal')
orientation = 'h' if kind=='barh' else orientation
for trace in data:
trace.update(orientation=orientation)
if orientation=='h':
trace['x'],trace['y']=trace['y'],trace['x']
elif kind=='bubble':
mode='markers' if 'markers' not in mode else mode
x=self[x].values.tolist()
y=self[y].values.tolist()
z=size if size else z
rg=self[z].values
if not kwargs.get('abs_size',False):
if len(rg) > 1:
z=[int(100*(float(_)-rg.min())/(rg.max()-rg.min()))+12 for _ in rg]
else:
z=[12] if len(rg) else []
else:
z=rg
text=kwargs['labels'] if 'labels' in kwargs else text
labels=self[text].values.tolist() if text else ''
clrs=colors if colors else get_scales(colorscale)
clrs=[clrs] if not isinstance(clrs,list) else clrs
clrs=[clrs[0]]*len(x) if len(clrs)==1 else clrs
marker=dict(color=clrs,size=z,symbol=symbol,
line=dict(width=width))
trace=Scatter(x=x,y=y,marker=marker,mode='markers',text=labels)
data=[trace]
elif kind in ('box','histogram','hist'):
if isinstance(self,pd.core.series.Series):
df=pd.DataFrame({self.name:self})
else:
df=self.copy()
data=[]
clrs=get_colors(colors,colorscale,df.columns)
if 'hist' in kind:
barmode = 'overlay' if barmode=='' else barmode
layout.update(barmode=barmode)
columns=keys if keys else df.columns
for _ in columns:
if kind=='box':
__=Box(y=df[_].values.tolist(),marker=dict(color=clrs[_]),name=_,
line=dict(width=width),boxpoints=boxpoints)
# 114 - Horizontal Box
__['orientation']=orientation
if orientation=='h':
__['x'],__['y']=__['y'],__['x']
else:
__=dict(x=df[_].values.tolist(),name=_,
marker=dict(color=clrs[_], line=dict(width=width)),
orientation=orientation,
opacity=kwargs['opacity'] if 'opacity' in kwargs else .8, histfunc=histfunc,
histnorm=histnorm)
__['marker']=get_marker(__['marker'])
if orientation=='h':
__['y']=__['x']
del __['x']
__ = Histogram(__)
if bins:
if type(bins) in (tuple,list):
try:
_bins={'start':bins[0],'end':bins[1],'size':bins[2]}
if orientation=='h':
__.update(ybins=_bins,autobiny=False)
else:
__.update(xbins=_bins,autobinx=False)
except:
print("Invalid format for bins generation")
else:
if orientation=='h':
__.update(nbinsy=bins)
else:
__.update(nbinsx=bins)
data.append(__)
elif kind in ('heatmap','surface'):
if x:
x=self[x].values.tolist()
else:
if self.index.__class__.__name__ in ('PeriodIndex','DatetimeIndex'):
x=self.index.format()
else:
x=self.index.values.tolist()
y=self[y].values.tolist() if y else self.columns.values.tolist()
z=self[z].values.tolist() if z else self.values.transpose()
scale=get_scales('rdbu') if not colorscale else get_scales(colorscale)
colorscale=[[float(_)/(len(scale)-1),scale[_]] for _ in range(len(scale))]
center_scale = kwargs.get('center_scale',None)
if is_list(z):
zmin=min(z)
zmax=max(z)
else:
zmin=z.min()
zmax=z.max()
if center_scale is not None:
if center_scale<=zmin+(zmax-zmin)/2:
zmin=center_scale*2-zmax
else:
zmax=center_scale*2-zmin
zmin=kwargs.get('zmin',zmin)
zmax=kwargs.get('zmax',zmax)
if kind=='heatmap':
data=[Heatmap(z=z,x=x,y=y,zmin=zmin,zmax=zmax,colorscale=colorscale)]
else:
data=[Surface(z=z,x=x,y=y,colorscale=colorscale)]
elif kind in ('scatter3d','bubble3d'):
data=[]
keys=self[text].values if text else list(range(len(self)))
colors=get_colors(colors,colorscale,keys,asList=True)
mode='markers' if 'markers' not in mode else mode
df=self.copy()
df['index']=keys
if kind=='bubble3d':
rg=self[size].values
if not kwargs.get('abs_size',False):
size=[int(100*(float(_)-rg.min())/(rg.max()-rg.min()))+12 for _ in rg]
else:
size=rg
else:
size=[size for _ in range(len(keys))]
_data=Scatter3d(x=df[x].values.tolist(),y=df[y].values.tolist(),z=df[z].values.tolist(),mode=mode,text=keys,
marker=dict(color=colors,symbol=symbol,size=size,opacity=.8))
if text:
_data.update(text=keys)
data.append(_data)
elif kind=='pie':
if not labels:
raise CufflinksError('Missing: labels')
if not values:
raise CufflinksError('Missing: values')
labels=self[labels].values.tolist()
values=self[values].values.tolist()
marker=dict(colors=get_colors(colors,colorscale,labels,asList=True))
marker.update(line=dict(color=kwargs.pop('linecolor',None),width=kwargs.pop('linewidth',width)))
pie=dict(values=values,labels=labels,name='',marker=marker)
kw=check_kwargs(kwargs,PIE_KWARGS)
kw['textfont']={'color':kw.pop('textcolor',None)}
pie.update(kw)
data=[]
del layout['xaxis']
del layout['yaxis']
data.append(Pie(pie))
validate=False
elif kind in ('old_candle','old_ohlc'):
d=ta._ohlc_dict(self)
if len(list(d.keys()))!=4:
raise Exception("OHLC type of charts require an Open, High, Low and Close column")
ohlc_kwargs=check_kwargs(kwargs,OHLC_KWARGS)
if kind=='old_candle':
fig=tools.get_candle(self,theme=theme,layout=layout,**ohlc_kwargs)
else:
fig=tools.get_ohlc(self,theme=theme,layout=layout,**ohlc_kwargs)
if bestfit:
df=self.copy()
bf=_to_iplot(self[d['close']],bestfit=True,bestfit_colors=bestfit_colors,asTimestamp=True)
fig['data'].append(bf[1])
data=fig['data']
layout=fig['layout']
elif kind in ('candle','ohlc','candlestick'):
kind='candlestick' if kind=='candle' else kind
kw=check_kwargs(kwargs,OHLC_KWARGS)
d=ta._ohlc_dict(self,validate='ohlc',**kw)
_d=dict(type=kind,
open=self[d['open']].values.tolist(),
high=self[d['high']].values.tolist(),
low=self[d['low']].values.tolist(),
close=self[d['close']].values.tolist())
if isinstance(self.index,pd.core.indexes.datetimes.DatetimeIndex):
_d['x']=self.index.astype('str')
else:
_d['x']=self.index
if 'name' in kw:
_d['name']=kw['name']
showlegend=False
if 'showlegend' in kwargs:
showlegend=kwargs['showlegend']
else:
if 'legend' in kwargs:
if type(kwargs['legend'])==bool:
showlegend=kwargs['legend']
# https://github.com/santosjorge/cufflinks/issues/113
# _d['increasing']=dict(line=dict(color=kw['up_color']) if 'up_color' in kw else dict(),showlegend=showlegend)
# _d['decreasing']=dict(line=dict(color=kw['down_color']) if 'down_color' in kw else dict(),showlegend=showlegend)
_d['increasing']=dict(line=dict(color=kw['up_color']) if 'up_color' in kw else dict())
_d['decreasing']=dict(line=dict(color=kw['down_color']) if 'down_color' in kw else dict())
for k in ('increasing','decreasing'):
if k in kw:
_d[k]=deep_update(_d[k],kw[k])
_d['showlegend']=showlegend
_d['yaxis']='y2'
data=[_d]
elif kind in ('choropleth','scattergeo'):
kw=check_kwargs(kwargs,GEO_KWARGS)
if kind=='choropleth':
if not all([x!=None for x in (locations,z)]):
raise Exception("Choropleth maps require a 'location' and 'z' column names specified")
geo_data={'type':'choropleth','locations':self[locations],'z':self[z],
'colorscale':get_colorscale(colorscale),
'marker':get_marker(dict(line=dict(width=width)))}
elif kind=='scattergeo':
if not all([x!=None for x in (lon,lat)]):
raise Exception("Scattergeo maps require a 'lon' and 'lat' column names specified")
geo_data={'type':'scattergeo','lat':self[lat],'lon':self[lon],
'marker':get_marker(dict(line=dict(width=width),
symbol=symbol,colorscale=get_colorscale(colorscale),
color=self[z] if z else None))}
if 'colorbar' in kwargs:
geo_data['colorbar']=kwargs['colorbar']
geo_data.update(kw)
if text:
geo_data.update(text=self[text])
validate=False
data=[]
data.append(geo_data)
# Figure Factory
elif kind in ('distplot'):
colors=get_colors(colors,colorscale,self.keys(),asList=True)
hist_data=self.transpose().values
kw=check_kwargs(kwargs,FF_DISTPLOT)
group_labels=kw.pop('group_labels',self.columns)
if histnorm:
kw['histnorm']=histnorm
fig=ff.create_distplot(hist_data=hist_data,group_labels=group_labels,
colors=colors,**kw)
data=fig.data
layout=tools.merge_dict(layout,fig.layout)
elif kind in ('violin'):
df=pd.DataFrame(self) if type(self)==pd.core.series.Series else self.copy()
kw=check_kwargs(kwargs,FF_VIOLIN)
kw['rugplot']=kw.pop('show_rug',True)
kw['title']=title
if 'group_header' not in kw:
kw['group_header']=categories if categories else None
categories=kw.get('group_header')
colors=get_colors(colors,colorscale,df[categories].value_counts().values if categories else df.keys(),asList=True)
kw['colors']=colors
if categories:
for _ in range(2,df[categories].value_counts().size+1):
layout['xaxis{0}'.format(_)]=layout['xaxis'].copy()
if categories not in df:
raise CufflinksError('Column "{0}" not found in DataFrame'.format(categories))
elif len(df.columns)==1:
raise CufflinksError('When "categories" are specified, two columns are expected. \n Only one column was found.')
elif len(df.columns)==2:
cols=list(df.columns)
cols.remove(categories)
kw['data_header']=cols[0]
else:
if 'data_header' not in kw:
raise CufflinksError('data_header must be the column name with the desired numeric data for the violin plot.')
else:
if len(df.columns)==1:
kw['data_header']=df.columns[0]
elif len(df.columns)>1:
if 'data_header' not in kw:
raise CufflinksError('data_header must be the column name with the desired numeric data for the violin plot.')
fig=ff.create_violin(df,**kw).to_dict()
data=fig['data']
layout=tools.merge_dict(layout,fig['layout'])
## Sharing Values
if all(['world_readable' in kwargs,sharing is None]):
sharing=kwargs['world_readable']
if isinstance(sharing,bool):
if sharing:
sharing='public'
else:
sharing='private'
if sharing is None:
sharing=auth.get_config_file()['sharing']
if not filename:
if title:
filename=title
else:
filename='Plotly Playground {0}'.format(time.strftime("%Y-%m-%d %H:%M:%S"))
## Figure defintion
figure={}
figure['data']=data
figure['layout']=layout
## Check secondary axis
if secondary_y:
figure=tools._set_axis(figure,secondary_y,side='right')
if secondary_y_title:
figure.layout.yaxis2.title=secondary_y_title
## Error Bars
if kind in ('scatter','bar','barh','lines','line'):
if any([error_x,error_y]):
def set_error(axis,**kwargs):
return tools.set_errors(figure,axis=axis,**kwargs)
kw=check_kwargs(kwargs,ERROR_KWARGS)
kw=dict([(k.replace('error_',''),v) for k,v in list(kw.items())])
kw['type']=error_type
if error_x:
kw['values']=error_x
figure=set_error('x',**kw)
if error_y:
kw['values']=error_y
figure=set_error('y',**kw)
## Subplots
if subplots:
fig=tools.strip_figures(figure)
kw=check_kwargs(kwargs,SUBPLOT_KWARGS)
for _ in ['x','y']:
if 'shared_{0}axes'.format(_) not in kw:
kw['shared_{0}axes'.format(_)]=kw.pop('shared_{0}axis'.format(_),False)
if 'subplot_titles' in kwargs:
if kwargs['subplot_titles']==True:
kw['subplot_titles']=[d['name'] for d in data]
else:
kw['subplot_titles']=kwargs['subplot_titles']
figure=tools.subplots(fig,shape,base_layout=layout,theme=theme,**kw)
## Exports
validate = False if 'shapes' in layout else validate
if asFigure:
return Figure(figure)
else:
return iplot(figure,validate=validate,sharing=sharing,filename=filename,
online=online,asImage=asImage,asUrl=asUrl,asPlot=asPlot,
dimensions=dimensions,display_image=kwargs.get('display_image',True)) |
python | def domain(self, default):
"""
Get the domain for this pipeline.
- If an explicit domain was provided at construction time, use it.
- Otherwise, infer a domain from the registered columns.
- If no domain can be inferred, return ``default``.
Parameters
----------
default : zipline.pipeline.Domain
Domain to use if no domain can be inferred from this pipeline by
itself.
Returns
-------
domain : zipline.pipeline.Domain
The domain for the pipeline.
Raises
------
AmbiguousDomain
ValueError
If the terms in ``self`` conflict with self._domain.
"""
# Always compute our inferred domain to ensure that it's compatible
# with our explicit domain.
inferred = infer_domain(self._output_terms)
if inferred is GENERIC and self._domain is GENERIC:
# Both generic. Fall back to default.
return default
elif inferred is GENERIC and self._domain is not GENERIC:
# Use the non-generic domain.
return self._domain
elif inferred is not GENERIC and self._domain is GENERIC:
# Use the non-generic domain.
return inferred
else:
# Both non-generic. They have to match.
if inferred is not self._domain:
raise ValueError(
"Conflicting domains in Pipeline. Inferred {}, but {} was "
"passed at construction.".format(inferred, self._domain)
)
return inferred |
java | public static String escapeXml11(final String text, final XmlEscapeType type, final XmlEscapeLevel level) {
return escapeXml(text, XmlEscapeSymbols.XML11_SYMBOLS, type, level);
} |
python | def insert(self, context, plan):
"""
Include an insert operation to the given plan.
:param execution.Context context:
Current execution context.
:param list plan:
List of :class:`execution.Operation` instances.
"""
op = execution.Insert(self.__comp_name, self.__comp())
if op not in plan and self.available(context) != True:
for dep_stub in self.dependencies():
dep_stub.insert(context, plan)
plan.append(op) |
python | def booted(name, single=False):
'''
Ensure zone is booted
name : string
name of the zone
single : boolean
boot in single usermode
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': ''}
zones = __salt__['zoneadm.list'](installed=True)
if name in zones:
## zone exists
if zones[name]['state'] == 'running':
## zone is running
ret['result'] = True
ret['comment'] = 'Zone {0} already booted'.format(name)
else:
## try and boot the zone
if not __opts__['test']:
zoneadm_res = __salt__['zoneadm.boot'](name, single)
if __opts__['test'] or zoneadm_res['status']:
ret['result'] = True
ret['changes'][name] = 'booted'
ret['comment'] = 'Zone {0} booted'.format(name)
else:
ret['result'] = False
ret['comment'] = 'Failed to boot {0}'.format(name)
else:
## zone does not exist
ret['comment'] = []
ret['comment'].append(
'The zone {0} is not in the installed or booted state.'.format(name)
)
for zone in zones:
if zones[zone]['uuid'] == name:
ret['comment'].append(
'The zone {0} has a uuid of {1}, please use the zone name instead!'.format(
zone,
name,
)
)
ret['result'] = False
ret['comment'] = "\n".join(ret['comment'])
return ret |
python | def _apply_dvs_infrastructure_traffic_resources(infra_traffic_resources,
resource_dicts):
'''
Applies the values of the resource dictionaries to infra traffic resources,
creating the infra traffic resource if required
(vim.DistributedVirtualSwitchProductSpec)
'''
for res_dict in resource_dicts:
filtered_traffic_resources = \
[r for r in infra_traffic_resources if r.key == res_dict['key']]
if filtered_traffic_resources:
traffic_res = filtered_traffic_resources[0]
else:
traffic_res = vim.DvsHostInfrastructureTrafficResource()
traffic_res.key = res_dict['key']
traffic_res.allocationInfo = \
vim.DvsHostInfrastructureTrafficResourceAllocation()
infra_traffic_resources.append(traffic_res)
if res_dict.get('limit'):
traffic_res.allocationInfo.limit = res_dict['limit']
if res_dict.get('reservation'):
traffic_res.allocationInfo.reservation = res_dict['reservation']
if res_dict.get('num_shares') or res_dict.get('share_level'):
if not traffic_res.allocationInfo.shares:
traffic_res.allocationInfo.shares = vim.SharesInfo()
if res_dict.get('share_level'):
traffic_res.allocationInfo.shares.level = \
vim.SharesLevel(res_dict['share_level'])
if res_dict.get('num_shares'):
#XXX Even though we always set the number of shares if provided,
#the vCenter will ignore it unless the share level is 'custom'.
traffic_res.allocationInfo.shares.shares = res_dict['num_shares'] |
python | def minimise_xyz(xyz):
"""Minimise an (x, y, z) coordinate."""
x, y, z = xyz
m = max(min(x, y), min(max(x, y), z))
return (x-m, y-m, z-m) |
java | int writeBytes(OutputStream out)
throws IOException
{
int length = myBuffer.length - myOffset;
byte[] bytes = new byte[length];
System.arraycopy(myBuffer, myOffset, bytes, 0, length);
out.write(bytes);
return length;
} |
java | private void requestSegments(IntSet segments, Map<Address, IntSet> sources, Set<Address> excludedSources) {
if (sources.isEmpty()) {
findSources(segments, sources, excludedSources);
}
for (Map.Entry<Address, IntSet> e : sources.entrySet()) {
addTransfer(e.getKey(), e.getValue());
}
} |
java | public void setPageSize(Rectangle pageSize) {
if(!guessFormat(pageSize, false)) {
this.pageWidth = (int) (pageSize.getWidth() * RtfElement.TWIPS_FACTOR);
this.pageHeight = (int) (pageSize.getHeight() * RtfElement.TWIPS_FACTOR);
this.landscape = pageWidth > pageHeight;
}
} |
python | def rectangles_from_grid(P, black=1):
"""Largest area rectangle in a binary matrix
:param P: matrix
:param black: search for rectangles filled with value black
:returns: area, left, top, right, bottom of optimal rectangle
consisting of all (i,j) with
left <= j < right and top <= i <= bottom
:complexity: linear
"""
rows = len(P)
cols = len(P[0])
t = [0] * cols
best = None
for i in range(rows):
for j in range(cols):
if P[i][j] == black:
t[j] += 1
else:
t[j] = 0
(area, left, height, right) = rectangles_from_histogram(t)
alt = (area, left, i, right, i-height)
if best is None or alt > best:
best = alt
return best |
java | @SneakyThrows
protected Http signHttpUserPreAuthRequest(final Http request) {
request.signRequest(
duoProperties.getDuoIntegrationKey(),
duoProperties.getDuoSecretKey());
return request;
} |
java | public int compareTo(MutableDouble anotherMutableDouble) {
double thisVal = this.d;
double anotherVal = anotherMutableDouble.d;
return (thisVal < anotherVal ? -1 : (thisVal == anotherVal ? 0 : 1));
} |
python | def from_string(self, value):
"""Convert string to enum value."""
if not isinstance(value, basestring):
raise ValueError('expected string value: ' + str(type(value)))
self.test_value(value)
return value |
java | private void initExpressions()
{
expressions = new LinkedList<>();
for (UriTemplateComponent c : components)
{
if (c instanceof Expression)
{
expressions.add((Expression) c);
}
}
} |
python | def obtain_token(self, config=None):
"""Obtain the token to access to QX Platform.
Raises:
CredentialsError: when token is invalid or the user has not
accepted the license.
ApiError: when the response from the server couldn't be parsed.
"""
client_application = CLIENT_APPLICATION
if self.config and ("client_application" in self.config):
client_application += ':' + self.config["client_application"]
headers = {'x-qx-client-application': client_application}
if self.token_unique:
try:
response = requests.post(str(self.config.get('url') +
"/users/loginWithToken"),
data={'apiToken': self.token_unique},
verify=self.verify,
headers=headers,
**self.extra_args)
except requests.RequestException as e:
raise ApiError('error during login: %s' % str(e))
elif config and ("email" in config) and ("password" in config):
email = config.get('email', None)
password = config.get('password', None)
credentials = {
'email': email,
'password': password
}
try:
response = requests.post(str(self.config.get('url') +
"/users/login"),
data=credentials,
verify=self.verify,
headers=headers,
**self.extra_args)
except requests.RequestException as e:
raise ApiError('error during login: %s' % str(e))
else:
raise CredentialsError('invalid token')
if response.status_code == 401:
error_message = None
try:
# For 401: ACCEPT_LICENSE_REQUIRED, a detailed message is
# present in the response and passed to the exception.
error_message = response.json()['error']['message']
except:
pass
if error_message:
raise CredentialsError('error during login: %s' % error_message)
else:
raise CredentialsError('invalid token')
try:
response.raise_for_status()
self.data_credentials = response.json()
except (requests.HTTPError, ValueError) as e:
raise ApiError('error during login: %s' % str(e))
if self.get_token() is None:
raise CredentialsError('invalid token') |
java | public String buildStyleClassValue(List/*<String>*/ styleClasses) {
if(styleClasses == null)
return EMPTY;
boolean styleWritten = false;
InternalStringBuilder buf = new InternalStringBuilder();
for(int i = 0; i < styleClasses.size(); i++) {
if(styleWritten)
buf.append(SPACE);
if(styleClasses.get(i) != null) {
buf.append(styleClasses.get(i));
styleWritten = true;
}
}
if(!styleWritten)
return null;
else return buf.toString();
} |
java | private static long parseLong(String s) {
try {
return Long.parseLong(s);
} catch (NumberFormatException nfe) {
log.error("Unable to parse long value '" + s + "'", nfe);
}
return -1;
} |
java | private boolean readHeader() {
boolean correct = true;
ByteOrder byteOrder = ByteOrder.LITTLE_ENDIAN;
int c1 = 0;
int c2 = 0;
try {
c1 = data.readByte().toInt();
c2 = data.readByte().toInt();
} catch (Exception ex) {
validation.addErrorLoc("Header IO Exception", "Header");
}
// read the first two bytes, in order to know the byte ordering
if (c1 == 'I' && c2 == 'I') {
byteOrder = ByteOrder.LITTLE_ENDIAN;
} else if (c1 == 'M' && c2 == 'M') {
byteOrder = ByteOrder.BIG_ENDIAN;
}
else if (byteOrderErrorTolerance > 0 && c1 == 'i' && c2 == 'i') {
validation.addWarning("Byte Order in lower case", "" + c1 + c2, "Header");
byteOrder = ByteOrder.LITTLE_ENDIAN;
} else if (byteOrderErrorTolerance > 0 && c1 == 'm' && c2 == 'm') {
validation.addWarning("Byte Order in lower case", "" + c1 + c2, "Header");
byteOrder = ByteOrder.BIG_ENDIAN;
} else if (byteOrderErrorTolerance > 1) {
validation.addWarning("Non-sense Byte Order. Trying Little Endian.", "" + c1 + c2, "Header");
byteOrder = ByteOrder.LITTLE_ENDIAN;
} else {
validation.addErrorLoc("Invalid Byte Order " + c1 + c2, "Header");
correct = false;
}
if (correct) {
tiffModel.setByteOrder(byteOrder);
data.setByteOrder(byteOrder);
try {
// read magic number
int magic = data.readShort().toInt();
tiffModel.setMagicNumber(magic);
} catch (Exception ex) {
validation.addErrorLoc("Magic number parsing error", "Header");
correct = false;
}
}
return correct;
} |
java | public void marshall(ListDeploymentGroupsRequest listDeploymentGroupsRequest, ProtocolMarshaller protocolMarshaller) {
if (listDeploymentGroupsRequest == null) {
throw new SdkClientException("Invalid argument passed to marshall(...)");
}
try {
protocolMarshaller.marshall(listDeploymentGroupsRequest.getApplicationName(), APPLICATIONNAME_BINDING);
protocolMarshaller.marshall(listDeploymentGroupsRequest.getNextToken(), NEXTTOKEN_BINDING);
} catch (Exception e) {
throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e);
}
} |
java | public String description() {
Util.out4.println("DeviceImpl.description() arrived");
//
// Record attribute request in black box
//
blackbox.insert_attr(Attr_Description);
//
// Return data to caller
//
Util.out4.println("Leaving DeviceImpl.description()");
return desc;
} |
java | public boolean isComplete() {
return _group != null && _type != null && _kind != null && _name != null && _version != null;
} |
java | public static AOL getAOL(final MavenProject project, final String architecture, final String os, final Linker linker,
final String aol, final Log log) throws MojoFailureException, MojoExecutionException {
/*
To support a linker that is not the default linker specified in the aol.properties
* */
String aol_linker;
if(linker != null & linker.getName() != null)
{
log.debug("linker original name: " + linker.getName());
aol_linker = linker.getName();
}
else
{
log.debug("linker original name not exist ");
aol_linker = getLinkerName(project, architecture, os,linker, log);
}
log.debug("aol_linker: " + aol_linker);
return aol == null ? new AOL(getArchitecture(architecture), getOS(os), aol_linker) : new AOL(aol);
} |
python | def _compute(self, arrays, dates, assets, mask):
"""
For each row in the input, compute a like-shaped array of per-row
ranks.
"""
return masked_rankdata_2d(
arrays[0],
mask,
self.inputs[0].missing_value,
self._method,
self._ascending,
) |
python | def render_table(request,
table,
links=None,
context=None,
template='tri_table/list.html',
blank_on_empty=False,
paginate_by=40, # pragma: no mutate
page=None,
paginator=None,
show_hits=False,
hit_label='Items',
post_bulk_edit=lambda table, queryset, updates: None):
"""
Render a table. This automatically handles pagination, sorting, filtering and bulk operations.
:param request: the request object. This is set on the table object so that it is available for lambda expressions.
:param table: an instance of Table
:param links: a list of instances of Link
:param context: dict of extra context parameters
:param template: if you need to render the table differently you can override this parameter with either a name of a template to load or a `Template` instance.
:param blank_on_empty: turn off the displaying of `{{ empty_message }}` in the template when the list is empty
:param show_hits: Display how many items there are total in the paginator.
:param hit_label: Label for the show_hits display.
:return: a string with the rendered HTML table
"""
if not context:
context = {}
if isinstance(table, Namespace):
table = table()
assert isinstance(table, Table), table
table.request = request
should_return, dispatch_result = handle_dispatch(request=request, obj=table)
if should_return:
return dispatch_result
context['bulk_form'] = table.bulk_form
context['query_form'] = table.query_form
context['tri_query_error'] = table.query_error
if table.bulk_form and request.method == 'POST':
if table.bulk_form.is_valid():
queryset = table.bulk_queryset()
updates = {
field.name: field.value
for field in table.bulk_form.fields
if field.value is not None and field.value != '' and field.attr is not None
}
queryset.update(**updates)
post_bulk_edit(table=table, queryset=queryset, updates=updates)
return HttpResponseRedirect(request.META['HTTP_REFERER'])
table.context = table_context(
request,
table=table,
links=links,
paginate_by=paginate_by,
page=page,
extra_context=context,
paginator=paginator,
show_hits=show_hits,
hit_label=hit_label,
)
if not table.data and blank_on_empty:
return ''
if table.query_form and not table.query_form.is_valid():
table.data = None
table.context['invalid_form_message'] = mark_safe('<i class="fa fa-meh-o fa-5x" aria-hidden="true"></i>')
return render_template(request, template, table.context) |
python | def sub_base_uri(self):
""" This will return the sub_base_uri parsed from the base_uri
:return: str of the sub_base_uri
"""
return self.base_uri and self.base_uri.split('://')[-1].split('.')[
0] or self.base_uri |
python | def recover_db(self, src_file):
"""
" Recover DB from xxxxx.backup.json or xxxxx.json.factory to xxxxx.json
" [src_file]: copy from src_file to xxxxx.json
"""
with self.db_mutex:
try:
shutil.copy2(src_file, self.json_db_path)
except IOError as e:
_logger.debug("*** NO: %s file." % src_file)
raise e |
java | @Nullable
public Animator onDisappear(@NonNull final ViewGroup sceneRoot,
@Nullable TransitionValues startValues,
int startVisibility,
@Nullable TransitionValues endValues,
int endVisibility) {
if ((mMode & MODE_OUT) != MODE_OUT) {
return null;
}
View startView = (startValues != null) ? startValues.view : null;
View endView = (endValues != null) ? endValues.view : null;
View overlayView = null;
View viewToKeep = null;
boolean reusingCreatedOverlayView = false;
boolean createOverlayFromStartView = false;
if (endView == null || endView.getParent() == null) {
if (endView != null) {
// endView was removed from its parent - add it to the overlay
overlayView = endView;
} else if (startView != null) {
createOverlayFromStartView = true;
}
} else {
// visibility change
if (endVisibility == View.INVISIBLE) {
viewToKeep = endView;
} else {
// Becoming GONE
if (startView == endView || startView == null) {
viewToKeep = endView;
} else {
createOverlayFromStartView = true;
}
}
}
if (createOverlayFromStartView) {
// endView does not exist. Use startView only under certain
// conditions, because placing a view in an overlay necessitates
// it being removed from its current parent
if (startView.getTag(R.id.overlay_view) != null) {
// we've already created overlay for the start view.
// it means that we are applying two visibility
// transitions for the same view
overlayView = (View) startView.getTag(R.id.overlay_view);
reusingCreatedOverlayView = true;
} else if (startView.getParent() == null) {
// no parent - safe to use
overlayView = startView;
} else if (startView.getParent() instanceof View) {
View startParent = (View) startView.getParent();
TransitionValues startParentValues = getTransitionValues(startParent, true);
TransitionValues endParentValues = getMatchedTransitionValues(startParent,
true);
VisibilityInfo parentVisibilityInfo =
getVisibilityChangeInfo(startParentValues, endParentValues);
if (!parentVisibilityInfo.visibilityChange) {
overlayView = TransitionUtils.copyViewImage(sceneRoot, startView, startParent);
} else if (startParent.getParent() == null) {
int id = startParent.getId();
if (id != View.NO_ID && sceneRoot.findViewById(id) != null
&& mCanRemoveViews) {
// no parent, but its parent is unparented but the parent
// hierarchy has been replaced by a new hierarchy with the same id
// and it is safe to un-parent startView
overlayView = startView;
}
}
}
}
if (overlayView != null && startValues != null) {
// TODO: Need to do this for general case of adding to overlay
final int[] screenLoc = (int[]) startValues.values.get(PROPNAME_SCREEN_LOCATION);
if (!reusingCreatedOverlayView) {
ViewGroupOverlayUtils.addOverlay(sceneRoot, overlayView, screenLoc[0], screenLoc[1]);
}
Animator animator = onDisappear(sceneRoot, overlayView, startValues, endValues);
if (animator == null) {
ViewGroupOverlayUtils.removeOverlay(sceneRoot, overlayView);
} else if (!reusingCreatedOverlayView) {
final View finalOverlayView = overlayView;
final View finalStartView = startView;
finalStartView.setTag(R.id.overlay_view, finalOverlayView);
addListener(new TransitionListenerAdapter() {
@Override
public void onTransitionPause(@NonNull Transition transition) {
ViewGroupOverlayUtils.removeOverlay(sceneRoot, finalOverlayView);
}
@Override
public void onTransitionResume(@NonNull Transition transition) {
if (finalOverlayView.getParent() == null) {
ViewGroupOverlayUtils.addOverlay(sceneRoot, finalOverlayView, screenLoc[0], screenLoc[1]);
}
else {
cancel();
}
}
@Override
public void onTransitionEnd(@NonNull Transition transition) {
finalStartView.setTag(R.id.overlay_view, null);
ViewGroupOverlayUtils.removeOverlay(sceneRoot, finalOverlayView);
transition.removeListener(this);
}
});
}
return animator;
}
if (viewToKeep != null) {
int originalVisibility = -1;
final boolean isForcedVisibility = mForcedStartVisibility != -1 ||
mForcedEndVisibility != -1;
if (!isForcedVisibility) {
originalVisibility = viewToKeep.getVisibility();
ViewUtils.setTransitionVisibility(viewToKeep, View.VISIBLE);
}
Animator animator = onDisappear(sceneRoot, viewToKeep, startValues, endValues);
if (animator != null) {
DisappearListener disappearListener = new DisappearListener(viewToKeep,
endVisibility, isForcedVisibility);
animator.addListener(disappearListener);
AnimatorUtils.addPauseListener(animator, disappearListener);
addListener(disappearListener);
} else if (!isForcedVisibility) {
ViewUtils.setTransitionVisibility(viewToKeep, originalVisibility);
}
return animator;
}
return null;
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.