language
stringclasses
2 values
func_code_string
stringlengths
63
466k
python
def from_dict(cls, d): """ Returns: CompleteDos object from dict representation. """ tdos = Dos.from_dict(d) struct = Structure.from_dict(d["structure"]) pdoss = {} for i in range(len(d["pdos"])): at = struct[i] orb_dos = {} for orb_str, odos in d["pdos"][i].items(): orb = orb_str orb_dos[orb] = {Spin(int(k)): v for k, v in odos["densities"].items()} pdoss[at] = orb_dos return LobsterCompleteDos(struct, tdos, pdoss)
python
def cluster_commit(): ''' Commit Cluster Changes .. versionchanged:: 2015.8.0 CLI Example: .. code-block:: bash salt '*' riak.cluster_commit ''' ret = {'comment': '', 'success': False} cmd = __execute_cmd('riak-admin', 'cluster commit') if cmd['retcode'] != 0: ret['comment'] = cmd['stdout'] else: ret['comment'] = cmd['stdout'] ret['success'] = True return ret
java
private String readString() throws IOException { StringBuffer sb = new StringBuffer(); int delim = lastChar; int l = lineNo; int c = colNo; readChar(); while ((-1 != lastChar) && (delim != lastChar)) { StringBuffer digitBuffer; if (lastChar != '\\') { sb.append((char)lastChar); readChar(); continue; } readChar(); switch (lastChar) { case 'b': readChar(); sb.append('\b'); continue; case 'f': readChar(); sb.append('\f'); continue; case 'n': readChar(); sb.append('\n'); continue; case 'r': readChar(); sb.append('\r'); continue; case 't': readChar(); sb.append('\t'); continue; case '\'': readChar(); sb.append('\''); continue; case '"': readChar(); sb.append('"'); continue; case '\\': readChar(); sb.append('\\'); continue; case '/': readChar(); sb.append('/'); continue; // hex constant // unicode constant case 'x': case 'u': digitBuffer = new StringBuffer(); int toRead = 2; if (lastChar == 'u') toRead = 4; for (int i=0; i<toRead; i++) { readChar(); if (!isHexDigit(lastChar)) throw new IOException("non-hex digit " + onLineCol()); digitBuffer.append((char) lastChar); } readChar(); try { int digitValue = Integer.parseInt(digitBuffer.toString(), 16); sb.append((char) digitValue); } catch (NumberFormatException e) { throw new IOException("non-hex digit " + onLineCol()); } break; // octal constant default: if (!isOctalDigit(lastChar)) throw new IOException("non-hex digit " + onLineCol()); digitBuffer = new StringBuffer(); digitBuffer.append((char) lastChar); for (int i=0; i<2; i++) { readChar(); if (!isOctalDigit(lastChar)) break; digitBuffer.append((char) lastChar); } try { int digitValue = Integer.parseInt(digitBuffer.toString(), 8); sb.append((char) digitValue); } catch (NumberFormatException e) { throw new IOException("non-hex digit " + onLineCol()); } } } if (-1 == lastChar) { throw new IOException("String not terminated " + onLineCol(l,c)); } readChar(); return sb.toString(); }
java
@Override public void put(final String varName, final Object object) { runWithContext(new RhinoCallable<Void, RuntimeException>() { @Override protected Void doCall(Context cx, Scriptable scope) { scope.put(varName, scope, object != null ? Context.javaToJS(object, scope) : null); return null; } }); }
python
def save_json_to_file(i): """ Input: { json_file - file name dict - dict to save (sort_keys) - if 'yes', sort keys (safe) - if 'yes', ignore non-JSON values (only for Debugging - changes original dict!) } Output: { return - return code = 0, if successful > 0, if error (error) - error text if return > 0 } """ fn=i['json_file'] if i.get('safe','')=='yes': d=i['dict'] sd={} # Check main unprintable keys for k in d: try: json.dumps(d[k]) except Exception as e: pass else: sd[k]=d[k] i['dict']=sd r=dumps_json(i) if r['return']>0: return r s=r['string'].replace('\r','')+'\n' return save_text_file({'text_file':fn, 'string':s})
python
def launch(self, args, ctx, grid_dims, block_dims, shared_mem=0): """Launch cuda kernel. Parameters ---------- args : tuple of NDArray or numbers List of arguments for kernel. NDArrays are expected for pointer types (e.g. `float*`, `double*`) while numbers are expected for non-pointer types (e.g. `int`, `float`). ctx : Context The context to launch kernel on. Must be GPU context. grid_dims : tuple of 3 integers Grid dimensions for CUDA kernel. block_dims : tuple of 3 integers Block dimensions for CUDA kernel. shared_mem : integer, optional Size of dynamically allocated shared memory. Defaults to 0. """ assert ctx.device_type == 'gpu', "Cuda kernel can only be launched on GPU" assert len(grid_dims) == 3, "grid_dims must be a tuple of 3 integers" assert len(block_dims) == 3, "grid_dims must be a tuple of 3 integers" assert len(args) == len(self._dtypes), \ "CudaKernel(%s) expects %d arguments but got %d"%( self._name, len(self._dtypes), len(args)) void_args = [] ref_holder = [] for i, (arg, is_nd, dtype) in enumerate(zip(args, self._is_ndarray, self._dtypes)): if is_nd: assert isinstance(arg, NDArray), \ "The %d-th argument is expected to be a NDArray but got %s"%( i, type(arg)) void_args.append(arg.handle) else: assert isinstance(arg, numeric_types), \ "The %d-th argument is expected to be a number, but got %s"%( i, type(arg)) ref_holder.append(np.array(arg, dtype=dtype)) void_args.append(ref_holder[-1].ctypes.data_as(ctypes.c_void_p)) check_call(_LIB.MXRtcCudaKernelCall( self.handle, ctx.device_id, c_array(ctypes.c_void_p, void_args), mx_uint(grid_dims[0]), mx_uint(grid_dims[1]), mx_uint(grid_dims[2]), mx_uint(block_dims[0]), mx_uint(block_dims[1]), mx_uint(block_dims[2]), mx_uint(shared_mem)))
java
public Vector3d set(int index, DoubleBuffer buffer) { MemUtil.INSTANCE.get(this, index, buffer); return this; }
python
def to_str(self, s): ''' In py2 converts a unicode to str (bytes) using utf-8. -- in py3 raises an error if it's not str already. ''' if s.__class__ != str: if not IS_PY3K: s = s.encode('utf-8') else: raise AssertionError('Expected to have str on Python 3. Found: %s (%s)' % (s, s.__class__)) return s
java
public OvhOrder dedicated_nasha_new_duration_GET(String duration, OvhNasHAZoneEnum datacenter, OvhNasHAOfferEnum model) throws IOException { String qPath = "/order/dedicated/nasha/new/{duration}"; StringBuilder sb = path(qPath, duration); query(sb, "datacenter", datacenter); query(sb, "model", model); String resp = exec(qPath, "GET", sb.toString(), null); return convertTo(resp, OvhOrder.class); }
java
@Override public C setScale(final double x, final double y) { getAttributes().setScale(x, y); return cast(); }
java
static int dayNumToDate(DayOfWeek dow0, int nDays, int weekNum, DayOfWeek dow, int d0, int nDaysInMonth) { //if dow is wednesday, then this is the date of the first wednesday int firstDateOfGivenDow = 1 + ((7 + dow.getCalendarConstant() - dow0.getCalendarConstant()) % 7); int date; if (weekNum > 0) { date = ((weekNum - 1) * 7) + firstDateOfGivenDow - d0; } else { //count weeks from end of month //calculate last day of the given dow //since nDays <= 366, this should be > nDays int lastDateOfGivenDow = firstDateOfGivenDow + (7 * 54); lastDateOfGivenDow -= 7 * ((lastDateOfGivenDow - nDays + 6) / 7); date = lastDateOfGivenDow + 7 * (weekNum + 1) - d0; } return (date <= 0 || date > nDaysInMonth) ? 0 : date; }
python
def are_equal(self, sp1, sp2): """ True if there is some overlap in composition between the species Args: sp1: First species. A dict of {specie/element: amt} as per the definition in Site and PeriodicSite. sp2: Second species. A dict of {specie/element: amt} as per the definition in Site and PeriodicSite. Returns: True always """ set1 = set(sp1.elements) set2 = set(sp2.elements) return set1.issubset(set2) or set2.issubset(set1)
java
public final long[] evalDayStartEndFor(final Date pDateFor) { Calendar cal = Calendar.getInstance(new Locale("en", "US")); cal.setTime(pDateFor); cal.set(Calendar.HOUR_OF_DAY, 0); cal.set(Calendar.MINUTE, 0); cal.set(Calendar.SECOND, 0); cal.set(Calendar.MILLISECOND, 0); long[] result = new long[2]; result[0] = cal.getTimeInMillis(); cal.set(Calendar.HOUR_OF_DAY, 23); cal.set(Calendar.MINUTE, 59); cal.set(Calendar.SECOND, 59); cal.set(Calendar.MILLISECOND, 999); result[1] = cal.getTimeInMillis(); return result; }
python
def _negotiateAssociation(self, endpoint): """Make association requests to the server, attempting to create a new association. @returns: a new association object @rtype: L{openid.association.Association} """ # Get our preferred session/association type from the negotiatior. assoc_type, session_type = self.negotiator.getAllowedType() try: assoc = self._requestAssociation(endpoint, assoc_type, session_type) except ServerError as why: supportedTypes = self._extractSupportedAssociationType( why, endpoint, assoc_type) if supportedTypes is not None: assoc_type, session_type = supportedTypes # Attempt to create an association from the assoc_type # and session_type that the server told us it # supported. try: assoc = self._requestAssociation(endpoint, assoc_type, session_type) except ServerError as why: # Do not keep trying, since it rejected the # association type that it told us to use. logging.error( 'Server %s refused its suggested association ' 'type: session_type=%s, assoc_type=%s' % ( endpoint.server_url, session_type, assoc_type)) return None else: return assoc else: return assoc
python
def get_transient(self, name): '''Restores TransientFile object with given name. Should be used when form is submitted with file name and no file''' # security checks: basically no folders are allowed assert not ('/' in name or '\\' in name or name[0] in '.~') transient = TransientFile(self.transient_root, name, self) if not os.path.isfile(transient.path): raise OSError(errno.ENOENT, 'Transient file has been lost', transient.path) return transient
java
public static String pick(String[] values) { if (values == null || values.length == 0) return ""; int index = RandomInteger.nextInteger(values.length); return values[index]; }
java
private static void internalVerticalBlur( int[] pixels, int[] outCol, int w, int h, int col, int diameter, int[] div) { final int lastInByte = w * (h - 1) + col; final int radiusTimesW = (diameter >> 1) * w; final int diameterMinusOneTimesW = (diameter - 1) * w; int a = 0, r = 0, g = 0, b = 0; int pixel; int outColPos = 0; // iterate over absolute positions in `pixelsIn`; `w` is the step width for moving down one row for (int i = col - radiusTimesW; i <= lastInByte + radiusTimesW; i += w) { final int ii = bound(i, col, lastInByte); pixel = pixels[ii]; r += (pixel >> 16) & 0xFF; g += (pixel >> 8) & 0xFF; b += pixel & 0xFF; a += pixel >>> 24; final int outPos = i - radiusTimesW; if (outPos >= col) { outCol[outColPos] = (div[a] << 24) | (div[r] << 16) | (div[g] << 8) | div[b]; outColPos++; final int j = i - diameterMinusOneTimesW; final int jj = bound(j, col, lastInByte); pixel = pixels[jj]; r -= (pixel >> 16) & 0xFF; g -= (pixel >> 8) & 0xFF; b -= pixel & 0xFF; a -= pixel >>> 24; } } }
python
def update_exif_GEXIV2(oldfile,newfile): """Transfers oldfile's exif to newfile's exif and updates the width/height EXIF fields""" # Requires gexiv2 and pygobject package in gentoo # (USE=introspection) try: from gi.repository import GExiv2 except: print("Couldn't import GExiv2") print("Are you sure you have GExiv2 installed?") print("See this page: http://goo.gl/0bhDGx") print("For gentoo, emerge media-libs/gexiv2 with introspection USE flag") return False # exif of orginal image exif = GExiv2.Metadata(oldfile) # exif of resized image newExif = GExiv2.Metadata(newfile) # Figure out dimensions imgresize = Image.open(newfile) # save all exif data of orinal image to resized for tag in exif.get_exif_tags(): newExif[tag] = exif[tag] # edit exif data - size newExif['Exif.Photo.PixelXDimension'] = str(imgresize.size[0]) newExif['Exif.Photo.PixelYDimension'] = str(imgresize.size[1]) # FIXME: Doesn't work with PENTAX JPG # Error is: gi._glib.GError: Unsupported data area offset type newExif.save_file() return True
python
def upload_entities_tsv(namespace, workspace, entities_tsv): """Upload entities from a tsv loadfile. File-based wrapper for api.upload_entities(). A loadfile is a tab-separated text file with a header row describing entity type and attribute names, followed by rows of entities and their attribute values. Ex: entity:participant_id age alive participant_23 25 Y participant_27 35 N Args: namespace (str): project to which workspace belongs workspace (str): Workspace name entities_tsv (file): FireCloud loadfile, see format above """ if isinstance(entities_tsv, string_types): with open(entities_tsv, "r") as tsv: entity_data = tsv.read() elif isinstance(entities_tsv, io.StringIO): entity_data = entities_tsv.getvalue() else: raise ValueError('Unsupported input type.') return upload_entities(namespace, workspace, entity_data)
java
public static OverwritePolicy byDateFromTo(String attribute, Calendar start, Calendar end) { return new DateRangeOverwritePolicy(attribute, start, end); }
java
public static <K, V> TreeMap<K, V> treeMapOf(K k1, V v1, K k2, V v2) { TreeMap<K, V> treeMap = new TreeMap<>(); treeMap.put(k1, v1); treeMap.put(k2, v2); return treeMap; }
python
def system(session): """Run the system test suite.""" # Sanity check: Only run system tests if the environment variable is set. if not os.environ.get('GOOGLE_APPLICATION_CREDENTIALS', ''): session.skip('Credentials must be set via environment variable.') # Use pre-release gRPC for system tests. session.install('--pre', 'grpcio') # Install all test dependencies, then install this package into the # virtualenv's dist-packages. session.install('mock', 'pytest') for local_dep in LOCAL_DEPS: session.install('-e', local_dep) systest_deps = [ '../bigquery/', '../pubsub/', '../storage/', '../test_utils/', ] for systest_dep in systest_deps: session.install('-e', systest_dep) session.install('-e', '.') # Run py.test against the system tests. session.run( 'py.test', '-vvv', '-s', 'tests/system', *session.posargs)
java
public static void displayAction( CmsResource elementResource, I_CmsFormatterBean formatter, Map<String, String> settings, boolean editable, boolean canCreate, boolean canDelete, String creationSiteMap, String postCreateHandler, PageContext context, ServletRequest request, ServletResponse response) { CmsContainerElementBean element = new CmsContainerElementBean( elementResource.getStructureId(), formatter.getJspStructureId(), settings, false); displayAction( element, formatter, editable, canCreate, canDelete, creationSiteMap, postCreateHandler, context, request, response); }
java
private org.apache.hadoop.conf.Configuration loadHadoopConfigFromFlink() { org.apache.hadoop.conf.Configuration hadoopConfig = new org.apache.hadoop.conf.Configuration(); for (String key : flinkConfig.keySet()) { for (String prefix : flinkConfigPrefixes) { if (key.startsWith(prefix)) { String newKey = hadoopConfigPrefix + key.substring(prefix.length()); String newValue = fixHadoopConfig(key, flinkConfig.getString(key, null)); hadoopConfig.set(newKey, newValue); LOG.debug("Adding Flink config entry for {} as {} to Hadoop config", key, newKey); } } } return hadoopConfig; }
python
def update_braintree_gateway_by_id(cls, braintree_gateway_id, braintree_gateway, **kwargs): """Update BraintreeGateway Update attributes of BraintreeGateway This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.update_braintree_gateway_by_id(braintree_gateway_id, braintree_gateway, async=True) >>> result = thread.get() :param async bool :param str braintree_gateway_id: ID of braintreeGateway to update. (required) :param BraintreeGateway braintree_gateway: Attributes of braintreeGateway to update. (required) :return: BraintreeGateway If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return cls._update_braintree_gateway_by_id_with_http_info(braintree_gateway_id, braintree_gateway, **kwargs) else: (data) = cls._update_braintree_gateway_by_id_with_http_info(braintree_gateway_id, braintree_gateway, **kwargs) return data
java
@Override public Attribute getAttribute(Object feature, String name) throws LayerException { return convertAttribute(asFeature(feature).getAttribute(name), name); }
python
def _set_drop_precedence_force(self, v, load=False): """ Setter method for drop_precedence_force, mapped from YANG variable /ipv6_acl/ipv6/access_list/extended/seq/drop_precedence_force (ip-access-list:drop-prec-uint) If this variable is read-only (config: false) in the source YANG file, then _set_drop_precedence_force is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_drop_precedence_force() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'0..2']}), is_leaf=True, yang_name="drop-precedence-force", rest_name="drop-precedence-force", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Force drop precedence', u'cli-optional-in-sequence': None, u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-access-list', defining_module='brocade-ipv6-access-list', yang_type='ip-access-list:drop-prec-uint', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """drop_precedence_force must be of a type compatible with ip-access-list:drop-prec-uint""", 'defined-type': "ip-access-list:drop-prec-uint", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'0..2']}), is_leaf=True, yang_name="drop-precedence-force", rest_name="drop-precedence-force", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Force drop precedence', u'cli-optional-in-sequence': None, u'cli-suppress-no': None}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-access-list', defining_module='brocade-ipv6-access-list', yang_type='ip-access-list:drop-prec-uint', is_config=True)""", }) self.__drop_precedence_force = t if hasattr(self, '_set'): self._set()
python
def split_tag(section): """ Split the JSDoc tag text (everything following the @) at the first whitespace. Returns a tuple of (tagname, body). """ splitval = re.split('\s+', section, 1) tag, body = len(splitval) > 1 and splitval or (splitval[0], '') return tag.strip(), body.strip()
python
def gather(self, iterable): """Calls the lookup with gather True Passing iterable and yields the result. """ for result in self.lookup(iterable, gather=True): yield result
python
def attribute(element, attribute, default=None): """ Returns the value of an attribute, or a default if it's not defined :param element: The XML Element object :type element: etree._Element :param attribute: The name of the attribute to evaluate :type attribute: basestring :param default: The default value to return if the attribute is not defined """ attribute_value = element.get(attribute) return attribute_value if attribute_value is not None else default
java
public GetOpenIDConnectProviderResult withClientIDList(String... clientIDList) { if (this.clientIDList == null) { setClientIDList(new com.amazonaws.internal.SdkInternalList<String>(clientIDList.length)); } for (String ele : clientIDList) { this.clientIDList.add(ele); } return this; }
python
def beginWithoutDiscovery(self, service, anonymous=False): """Start OpenID verification without doing OpenID server discovery. This method is used internally by Consumer.begin after discovery is performed, and exists to provide an interface for library users needing to perform their own discovery. @param service: an OpenID service endpoint descriptor. This object and factories for it are found in the L{openid.consumer.discover} module. @type service: L{OpenIDServiceEndpoint<openid.consumer.discover.OpenIDServiceEndpoint>} @returns: an OpenID authentication request object. @rtype: L{AuthRequest<openid.consumer.consumer.AuthRequest>} @See: Openid.consumer.consumer.Consumer.begin @see: openid.consumer.discover """ auth_req = self.consumer.begin(service) self.session[self._token_key] = auth_req.endpoint try: auth_req.setAnonymous(anonymous) except ValueError as why: raise ProtocolError(str(why)) return auth_req
java
public static synchronized void configure(final String options, final Instrumentation instrumentation) throws Exception { if (SigarAgent.instrumentation != null) { logger.severe("Duplicate agent setup attempt."); return; } SigarAgent.options = options; SigarAgent.instrumentation = instrumentation; logger.info("Sigar loader options: " + options); final File folder = new File(SigarProvisioner.discoverLocation(options)); SigarProvisioner.provision(folder); }
java
protected byte[] getBytesInternal() { byte cached[]; if(hasNoValueCache() || (cached = valueCache.lowerBytes) == null) { valueCache.lowerBytes = cached = getBytesImpl(true); } return cached; }
python
def _is_valid_templates_dict(policy_templates_dict, schema=None): """ Is this a valid policy template dictionary :param dict policy_templates_dict: Data to be validated :param dict schema: Optional, dictionary containing JSON Schema representing policy template :return: True, if it is valid. :raises ValueError: If the template dictionary doesn't match up with the schema """ if not schema: schema = PolicyTemplatesProcessor._read_schema() try: jsonschema.validate(policy_templates_dict, schema) except ValidationError as ex: # Stringifying the exception will give us useful error message raise ValueError(str(ex)) return True
python
def scan2(self, tablename, expr_values=None, alias=None, attributes=None, consistent=False, select=None, index=None, limit=None, return_capacity=None, filter=False, segment=None, total_segments=None, exclusive_start_key=None, **kwargs): """ Perform a full-table scan For many parameters you will want to reference the DynamoDB API: http://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_Scan.html Parameters ---------- tablename : str Name of the table to scan expr_values : dict, optional See docs for ExpressionAttributeValues. See also: kwargs alias : dict, optional See docs for ExpressionAttributeNames attributes : str or list, optional See docs for ProjectionExpression. If list, it will be joined by commas. consistent : bool, optional Perform a strongly consistent read of the data (default False) select : str, optional See docs for Select index : str, optional The name of the index to query limit : int, optional Maximum number of items to return return_capacity : {NONE, INDEXES, TOTAL}, optional INDEXES will return the consumed capacity for indexes, TOTAL will return the consumed capacity for the table and the indexes. (default NONE) filter : str, optional See docs for FilterExpression segment : int, optional When doing a parallel scan, the unique thread identifier for this scan. If present, total_segments must also be present. total_segments : int, optional When doing a parallel scan, the total number of threads performing the scan. exclusive_start_key : dict, optional The ExclusiveStartKey to resume a previous query **kwargs : dict, optional If expr_values is not provided, the kwargs dict will be used as the ExpressionAttributeValues (a ':' will be automatically prepended to all keys). Examples -------- .. code-block:: python connection.scan2('mytable', filter='contains(tags, :search)', search='text) connection.scan2('mytable', filter='id = :id', expr_values={':id': 'dsa'}) """ keywords = { 'TableName': tablename, 'ReturnConsumedCapacity': self._default_capacity(return_capacity), 'ConsistentRead': consistent, } values = build_expression_values(self.dynamizer, expr_values, kwargs) if values: keywords['ExpressionAttributeValues'] = values if attributes is not None: if not isinstance(attributes, six.string_types): attributes = ', '.join(attributes) keywords['ProjectionExpression'] = attributes if index is not None: keywords['IndexName'] = index if alias: keywords['ExpressionAttributeNames'] = alias if select: keywords['Select'] = select if filter: keywords['FilterExpression'] = filter if segment is not None: keywords['Segment'] = segment if total_segments is not None: keywords['TotalSegments'] = total_segments if exclusive_start_key is not None: keywords['ExclusiveStartKey'] = \ self.dynamizer.maybe_encode_keys(exclusive_start_key) if not isinstance(limit, Limit): limit = Limit(limit) if select == COUNT: return self._count('scan', limit, keywords) else: return ResultSet(self, limit, 'scan', **keywords)
java
public static <T> CloseableIterable<T> concat(final CloseableIterable<? extends Iterable<? extends T>> inputs) { return wrap(Iterables.concat(inputs), inputs); }
python
def getInfo(self, CorpNum, MgtKeyType, MgtKey): """ ์ƒํƒœ์ •๋ณด ํ™•์ธ args CorpNum : ํšŒ์› ์‚ฌ์—…์ž ๋ฒˆํ˜ธ MgtKeyType : ๊ด€๋ฆฌ๋ฒˆํ˜ธ ์œ ํ˜• one of ['SELL','BUY','TRUSTEE'] MgtKey : ํŒŒํŠธ๋„ˆ ๊ด€๋ฆฌ๋ฒˆํ˜ธ return ์ฒ˜๋ฆฌ๊ฒฐ๊ณผ. consist of code and message raise PopbillException """ if MgtKeyType not in self.__MgtKeyTypes: raise PopbillException(-99999999, "๊ด€๋ฆฌ๋ฒˆํ˜ธ ํ˜•ํƒœ๊ฐ€ ์˜ฌ๋ฐ”๋ฅด์ง€ ์•Š์Šต๋‹ˆ๋‹ค.") if MgtKey == None or MgtKey == "": raise PopbillException(-99999999, "๊ด€๋ฆฌ๋ฒˆํ˜ธ๊ฐ€ ์ž…๋ ฅ๋˜์ง€ ์•Š์•˜์Šต๋‹ˆ๋‹ค.") return self._httpget('/Taxinvoice/' + MgtKeyType + '/' + MgtKey, CorpNum)
java
@Override public UpdateUserSecurityProfilesResult updateUserSecurityProfiles(UpdateUserSecurityProfilesRequest request) { request = beforeClientExecution(request); return executeUpdateUserSecurityProfiles(request); }
java
public final void mT__136() throws RecognitionException { try { int _type = T__136; int _channel = DEFAULT_TOKEN_CHANNEL; // InternalSARL.g:122:8: ( 'try' ) // InternalSARL.g:122:10: 'try' { match("try"); } state.type = _type; state.channel = _channel; } finally { } }
java
static URL lookupResource(String path) { for (LocaleDrivenResourceProvider provider : getLocaleDrivenResourceProviders()) { try { URL url = provider.lookup(path); if (url != null) { return url; } } catch (Exception e) { LOGGER.log(Level.WARNING, "Failed to look up URL for " + path + " from " + provider, e); } } return null; }
python
def add_search_engine(self, name, engine): '''Adds a search engine with the given name. ``engine`` must be the **class** object rather than an instance. The class *must* be a subclass of :class:`dossier.web.SearchEngine`, which should provide a means of obtaining recommendations given a query. The ``engine`` must be a class so that its dependencies can be injected when the corresponding route is executed by the user. If ``engine`` is ``None``, then it removes a possibly existing search engine named ``name``. :param str name: The name of the search engine. This appears in the list of search engines provided to the user, and is how the search engine is invoked via REST. :param engine: A search engine *class*. :type engine: `type` :rtype: :class:`WebBuilder` ''' if engine is None: self.search_engines.pop(name, None) self.search_engines[name] = engine return self
java
public void openRead(ReadStreamOld rs) throws IOException { closeWrite(); TempReadStream tempReadStream = new TempReadStream(_head); //tempReadStream.setPath(getPath()); tempReadStream.setFreeWhenDone(true); _head = null; _tail = null; rs.init(tempReadStream); }
java
public void displayOSInfo( Log log, boolean info ) { String string = "OS Info: Arch: " + Os.OS_ARCH + " Family: " + Os.OS_FAMILY + " Name: " + Os.OS_NAME + " Version: " + Os.OS_VERSION; if ( !info ) { log.debug( string ); } else { log.info( string ); } }
python
async def sort(self, name, start=None, num=None, by=None, get=None, desc=False, alpha=False, store=None, groups=None): """Sort and return the list, set or sorted set at ``name``. :start: and :num: allow for paging through the sorted data :by: allows using an external key to weight and sort the items. Use an "*" to indicate where in the key the item value is located :get: allows for returning items from external keys rather than the sorted data itself. Use an "*" to indicate where int he key the item value is located :desc: allows for reversing the sort :alpha: allows for sorting lexicographically rather than numerically :store: allows for storing the result of the sort into the key `store` ClusterImpl: A full implementation of the server side sort mechanics because many of the options work on multiple keys that can exist on multiple servers. """ if (start is None and num is not None) or \ (start is not None and num is None): raise RedisError("RedisError: ``start`` and ``num`` must both be specified") try: data_type = b(await self.type(name)) if data_type == b("none"): return [] elif data_type == b("set"): data = list(await self.smembers(name))[:] elif data_type == b("list"): data = await self.lrange(name, 0, -1) else: raise RedisClusterException("Unable to sort data type : {0}".format(data_type)) if by is not None: # _sort_using_by_arg mutates data so we don't # need need a return value. data = await self._sort_using_by_arg(data, by, alpha) elif not alpha: data.sort(key=self._strtod_key_func) else: data.sort() if desc: data = data[::-1] if not (start is None and num is None): data = data[start:start + num] if get: data = await self._retrive_data_from_sort(data, get) if store is not None: if data_type == b("set"): await self.delete(store) await self.rpush(store, *data) elif data_type == b("list"): await self.delete(store) await self.rpush(store, *data) else: raise RedisClusterException("Unable to store sorted data for data type : {0}".format(data_type)) return len(data) if groups: if not get or isinstance(get, str) or len(get) < 2: raise DataError('when using "groups" the "get" argument ' 'must be specified and contain at least ' 'two keys') n = len(get) return list(zip(*[data[i::n] for i in range(n)])) else: return data except KeyError: return []
python
def transform_array(rot_mtx,vec_array): '''transform_array( matrix, vector_array ) -> vector_array ''' return map( lambda x,m=rot_mtx:transform(m,x), vec_array )
java
public static Pipe newPipe(byte[] data, int offset, int length) throws IOException { return newPipe(new ByteArrayInputStream(data, offset, length)); }
java
public XSModel parseString(String schema, String baseURI){ return xsLoader.load(new DOMInputImpl(null, null, baseURI, schema, null)); }
java
public boolean contains(Object o) { if (o instanceof String) { return set.contains(((String) o).toLowerCase()); } else { return set.contains(o); } }
python
def fetch_extra_data(resource): """Return a dict with extra data retrieved from cern oauth.""" person_id = resource.get('PersonID', [None])[0] identity_class = resource.get('IdentityClass', [None])[0] department = resource.get('Department', [None])[0] return dict( person_id=person_id, identity_class=identity_class, department=department )
python
def delete_job(job_id): """Deletes a job.""" try: current_app.apscheduler.remove_job(job_id) return Response(status=204) except JobLookupError: return jsonify(dict(error_message='Job %s not found' % job_id), status=404) except Exception as e: return jsonify(dict(error_message=str(e)), status=500)
python
def get_manufacturer_string(self): """ Get the Manufacturer String from the HID device. :return: The Manufacturer String :rtype: unicode """ self._check_device_status() str_p = ffi.new("wchar_t[]", 255) rv = hidapi.hid_get_manufacturer_string(self._device, str_p, 255) if rv == -1: raise IOError("Failed to read manufacturer string from HID " "device: {0}".format(self._get_last_error_string())) return ffi.string(str_p)
python
async def src_reload(app, path: str = None): """ prompt each connected browser to reload by sending websocket message. :param path: if supplied this must be a path relative to app['static_path'], eg. reload of a single file is only supported for static resources. :return: number of sources reloaded """ cli_count = len(app[WS]) if cli_count == 0: return 0 is_html = None if path: path = str(Path(app['static_url']) / Path(path).relative_to(app['static_path'])) is_html = mimetypes.guess_type(path)[0] == 'text/html' reloads = 0 aux_logger.debug('prompting source reload for %d clients', cli_count) for ws, url in app[WS]: if path and is_html and path not in {url, url + '.html', url.rstrip('/') + '/index.html'}: aux_logger.debug('skipping reload for client at %s', url) continue aux_logger.debug('reload client at %s', url) data = { 'command': 'reload', 'path': path or url, 'liveCSS': True, 'liveImg': True, } try: await ws.send_str(json.dumps(data)) except RuntimeError as e: # eg. "RuntimeError: websocket connection is closing" aux_logger.error('Error broadcasting change to %s, RuntimeError: %s', path or url, e) else: reloads += 1 if reloads: s = '' if reloads == 1 else 's' aux_logger.info('prompted reload of %s on %d client%s', path or 'page', reloads, s) return reloads
python
def _strip_key(dictionary, keyword): ''' look for a certain key within a dictionary and nullify ti's contents, check within nested dictionaries and lists as well. Certain attributes such as "generation" will change even when there were no changes made to the entity. ''' for key, value in six.iteritems(dictionary): if key == keyword: dictionary[key] = None elif isinstance(value, dict): _strip_key(value, keyword) elif isinstance(value, list): for item in value: if isinstance(item, dict): _strip_key(item, keyword) return dictionary
java
private static RandomVariable[] numberListToDoubleArray(List<RandomVariable> listOfNumbers) { RandomVariable[] array = new RandomVariable[listOfNumbers.size()]; for(int i=0; i<array.length; i++) { array[i] = listOfNumbers.get(i); } return array; }
java
static void writeIoDevice(IoDevice ioDevice, BufferedWriter writer, int priority) throws IOException { writer.write(XML_START_TAG); XMLUtils.writeXmlAttribute(VENDOR_ID, ioDevice.getVendorId(), writer); XMLUtils.writeXmlAttribute(PRODUCT_ID, ioDevice.getProductId(), writer); XMLUtils.writeXmlAttribute(DEVICE_ID, ioDevice.getDeviceId(), writer); String name = ioDevice.getName(); if (name != null) { XMLUtils.writeXmlAttribute(NAME, name, writer); } String vendorName = ioDevice.getVendorName(); if (vendorName != null) { XMLUtils.writeXmlAttribute(VENDOR_NAME, vendorName, writer); } if (priority != INVALID_PRIORITY) { XMLUtils.writeXmlAttribute(PRIORITY, priority, writer); } writer.write(XMLUtils.ELEMENT_END); }
python
def solve_equilibrium_point(self, analyzer1, analyzer2, delu_dict={}, delu_default=0, units="nanometers"): """ Gives the radial size of two particles where equilibrium is reached between both particles. NOTE: the solution here is not the same as the solution visualized in the plot because solving for r requires that both the total surface area and volume of the particles are functions of r. Args: analyzer1 (SurfaceEnergyPlotter): Analyzer associated with the first polymorph analyzer2 (SurfaceEnergyPlotter): Analyzer associated with the second polymorph delu_dict (Dict): Dictionary of the chemical potentials to be set as constant. Note the key should be a sympy Symbol object of the format: Symbol("delu_el") where el is the name of the element. delu_default (float): Default value for all unset chemical potentials units (str): Can be nanometers or Angstrom Returns: Particle radius in nm """ # Set up wulff1 = analyzer1.wulff_from_chempot(delu_dict=delu_dict, delu_default=delu_default, symprec=self.symprec) wulff2 = analyzer2.wulff_from_chempot(delu_dict=delu_dict, delu_default=delu_default, symprec=self.symprec) # Now calculate r delta_gamma = wulff1.weighted_surface_energy - wulff2.weighted_surface_energy delta_E = self.bulk_gform(analyzer1.ucell_entry) - self.bulk_gform(analyzer2.ucell_entry) r = ((-3 * delta_gamma) / (delta_E)) return r / 10 if units == "nanometers" else r
java
private static double IndianToJD(int year, int month, int date) { int leapMonth, gyear, m; double start, jd; gyear = year + INDIAN_ERA_START; if(isGregorianLeap(gyear)) { leapMonth = 31; start = gregorianToJD(gyear, 3, 21); } else { leapMonth = 30; start = gregorianToJD(gyear, 3, 22); } if (month == 1) { jd = start + (date - 1); } else { jd = start + leapMonth; m = month - 2; m = Math.min(m, 5); jd += m * 31; if (month >= 8) { m = month - 7; jd += m * 30; } jd += date - 1; } return jd; }
python
def sequence_to_string( a_list, open_bracket_char='[', close_bracket_char=']', delimiter=", " ): """a dedicated function that turns a list into a comma delimited string of items converted. This method will flatten nested lists.""" return "%s%s%s" % ( open_bracket_char, delimiter.join( local_to_str(x) for x in a_list ), close_bracket_char )
java
private DescribeSubnetsResponseType describeSubnets() { DescribeSubnetsResponseType ret = new DescribeSubnetsResponseType(); ret.setRequestId(UUID.randomUUID().toString()); SubnetSetType subnetSetType = new SubnetSetType(); for (Iterator<MockSubnet> mockSubnet = mockSubnetController.describeSubnets() .iterator(); mockSubnet.hasNext();) { MockSubnet item = mockSubnet.next(); SubnetType subnetType = new SubnetType(); if (!DEFAULT_MOCK_PLACEMENT.getAvailabilityZone().equals(currentRegion)) { subnetType.setVpcId(currentRegion + "_" + item.getVpcId()); subnetType.setSubnetId(currentRegion + "_" + item.getSubnetId()); } else { subnetType.setVpcId(item.getVpcId()); subnetType.setSubnetId(item.getSubnetId()); } subnetType.setSubnetId(item.getSubnetId()); subnetType.setState("available"); subnetType.setCidrBlock(item.getCidrBlock()); subnetType.setAvailableIpAddressCount(item.getAvailableIpAddressCount()); subnetType.setAvailabilityZone(currentRegion); subnetType.setDefaultForAz(false); subnetType.setMapPublicIpOnLaunch(false); subnetSetType.getItem().add(subnetType); } ret.setSubnetSet(subnetSetType); return ret; }
java
private DefaultSubscriptionBase getDefaultSubscriptionBase(final Entity subscriptionBase, final Catalog catalog, final InternalTenantContext context) throws CatalogApiException { if (subscriptionBase instanceof DefaultSubscriptionBase) { return (DefaultSubscriptionBase) subscriptionBase; } else { // Safe cast, see above return (DefaultSubscriptionBase) dao.getSubscriptionFromId(subscriptionBase.getId(), catalog, context); } }
python
def ext_pillar(minion_id, # pylint: disable=W0613 pillar, # pylint: disable=W0613 conf): ''' Parse varstack data and return the result ''' vs = varstack.Varstack(config_filename=conf) return vs.evaluate(__grains__)
python
def git_clone(prettyname: str, url: str, directory: str, branch: str = None, commit: str = None, clone_options: List[str] = None, run_func: Callable[[List[str]], Any] = None) -> bool: """ Fetches a Git repository, unless we have it already. Args: prettyname: name to display to user url: URL directory: destination directory branch: repository branch commit: repository commit tag clone_options: additional options to pass to ``git clone`` run_func: function to use to call an external command Returns: did we need to do anything? """ run_func = run_func or subprocess.check_call clone_options = clone_options or [] # type: List[str] if os.path.isdir(directory): log.info("Not re-cloning {} Git repository: using existing source " "in {}".format(prettyname, directory)) return False log.info("Fetching {} source from {} into {}", prettyname, url, directory) require_executable(GIT) gitargs = [GIT, "clone"] + clone_options if branch: gitargs += ["--branch", branch] gitargs += [url, directory] run_func(gitargs) if commit: log.info("Resetting {} local Git repository to commit {}", prettyname, commit) run_func([GIT, "-C", directory, "reset", "--hard", commit]) # Using a Git repository that's not in the working directory: # https://stackoverflow.com/questions/1386291/git-git-dir-not-working-as-expected # noqa return True
python
def __print_command_help(self, session, namespace, cmd_name): """ Prints the documentation of the given command :param session: Session handler :param namespace: Name space of the command :param cmd_name: Name of the command """ # Extract documentation args, doc = self.__extract_help(self._commands[namespace][cmd_name]) # Print the command name, and its arguments if args: session.write_line("- {0} {1}", cmd_name, args) else: session.write_line("- {0}", cmd_name) # Print the documentation line session.write_line("\t\t{0}", doc)
python
def create_weights(nodes, dist): """Create weights for the Laja method.""" poly = chaospy.quad.generate_stieltjes(dist, len(nodes)-1, retall=True)[0] poly = chaospy.poly.flatten(chaospy.poly.Poly(poly)) weights_inverse = poly(nodes) weights = numpy.linalg.inv(weights_inverse) return weights[:, 0]
python
def example_metadata(study_name, draft_name): """Example of building a metadata doc""" odm = ODM("SYSTEM_NAME", filetype=ODM.FILETYPE_SNAPSHOT) study = Study(study_name, project_type=Study.PROJECT) # Push study element into odm odm << study # Create global variables and set them into study. study << GlobalVariables(study_name) #Expected that protocol name will match the Study OID. # Create some basic definitions bd = BasicDefinitions() # Add some measurement units to the basic definitions. This time using the call () syntax: bd( MeasurementUnit("KG", "Kilograms")( Symbol()(TranslatedText("Kilograms")) ), MeasurementUnit("CM", "Centimeters")( Symbol()(TranslatedText("Centimeters")) ) ) # Add basic definitions to study study << bd # Now metadata which will contain all our form and field defs eventually meta = MetaDataVersion('META1', draft_name) study << meta # Protocol contains StudyEventRefs protocol = Protocol() # Add some StudyEventRefs protocol << StudyEventRef("FLDR1", 1, True) # Order 1, Mandatory # protocol << StudyEventRef("FLDR2", 2, False) # Order 2, Not Mandatory # protocol << StudyEventRef("AE", 3, True) meta << protocol # Add Study Event Defs with some child FormRefs fldr1 = StudyEventDef("FLDR1", "Folder 1", False, StudyEventDef.SCHEDULED) fldr1 << FormRef("DM", 1, True) fldr1 << FormRef("VS", 2, True) meta << fldr1 meta << StudyEventDef("FLDR2", "Folder 2", False, StudyEventDef.UNSCHEDULED)( FormRef("VS", 1, True) ) meta << StudyEventDef("AE", "Adverse Events", False, StudyEventDef.COMMON)( FormRef("AE", 1, False) ) dm_form = FormDef("DM","Demography") dm_form << MdsolHelpText("en","Some help text for Demography") dm_form << MdsolViewRestriction('Data Manager') dm_form << MdsolEntryRestriction('Batch Upload') dm_form << ItemGroupRef("DM_IG1", 1) dm_form << ItemGroupRef("DM_IG2", 2) # Add to metadata meta << dm_form # Define item group meta << ItemGroupDef("DM_IG1", "DM Item Group 1")( MdsolLabelRef("LBL1", 1), ItemRef("SEX", 2)( MdsolAttribute("Standards","CDASH","SEX"), MdsolAttribute("Standards","STDNUMBER","1120") ), ItemRef("RACE", 3), ItemRef("RACE_OTH", 4), ItemRef("DOB", 5), ItemRef("AGE", 6) ) # Add the ItemDefs meta << ItemDef("SEX", "Gender", DataType.Text, 1, control_type=ControlType.RadioButton )( Question()(TranslatedText("Gender at Birth")), CodeListRef("CL_SEX") ) meta << ItemDef("RACE", "Race", DataType.Text, 2, control_type=ControlType.RadioButtonVertical )( Question()(TranslatedText("Race")), CodeListRef("CL_RACE") ) meta << ItemDef("RACE_OTH", "RaceOther", DataType.Text, 20) \ << Question() << TranslatedText("If Race Other, please specify") meta << ItemDef("DOB", "DateOfBirth", DataType.Date, 10, control_type=ControlType.DateTime, date_time_format="dd/mm/yyyy" )( Question()(TranslatedText("Date of Birth")), MdsolHelpText("en","If month unknown, enter January") ) meta << ItemDef("AGE", "Age in Years", DataType.Integer, 4, significant_digits=3, control_type=ControlType.Text )( Question()(TranslatedText("Age in Years")), RangeCheck(RangeCheckComparatorType.GreaterThanEqualTo, RangeCheckType.Soft) ( CheckValue("18") ), RangeCheck(RangeCheckComparatorType.LessThanEqualTo, RangeCheckType.Soft) ( CheckValue("65") ) ) # Add a Label meta.add(MdsolLabelDef("LBL1", "Label1")(TranslatedText("Please answer all questions."))) # As well as () and << you can use add() meta.add( CodeList("CL_SEX", "SEX", datatype=DataType.Text)( CodeListItem("M").add( Decode().add( TranslatedText("Male")) ), CodeListItem("F").add( Decode().add( TranslatedText("Female")) ), ), CodeList("CL_RACE", "RACE", datatype=DataType.Text)( CodeListItem("AS")(Decode()(TranslatedText("Asian"))), CodeListItem("CA")(Decode()(TranslatedText("White"))), CodeListItem("OT")(Decode()(TranslatedText("Other"))), ) ) meta.add(MdsolEditCheckDef('CHECK1')( # Static value required to make this stick, gets ignored but won't load without it MdsolCheckStep(form_oid="DM", field_oid="RACE", data_format='CodedValue', static_value="1"), MdsolCheckStep(static_value="OT", data_format="$2"), MdsolCheckStep(function=StepType.IsEqualTo), MdsolCheckStep(form_oid="DM", field_oid="RACE_OTH"), MdsolCheckStep(function=StepType.IsEmpty), MdsolCheckStep(function=StepType.And), MdsolCheckAction(form_oid="DM", field_oid="RACE_OTH", check_action_type=ActionType.OpenQuery, check_string="Race is set as OTHER but not specified. Please correct.", check_options="Site from System,RequiresResponse,RequiresManualClose" ) ), MdsolEditCheckDef('CHECK2') ) meta.add(MdsolCustomFunctionDef("CF1","SELECT 1,2 FROM DataPoints", language="SQ")) meta.add(MdsolCustomFunctionDef("CF2","return true;", language="C#")) meta.add( # Variable OID required MdsolDerivationDef("AGE", form_oid="DM", field_oid="AGE")( # Variable OID required to be identified as a data step MdsolDerivationStep(form_oid="DM", field_oid="DOB", data_format="StandardValue", variable_oid="DOB"), MdsolDerivationStep(function=StepType.Age) ) ) return odm
java
public static StreamingOutput buildResponseOfDomLR(final IStorage pDatabase, final IBackendFactory pStorageFac, final IRevisioning pRevision) { final StreamingOutput sOutput = new StreamingOutput() { @Override public void write(final OutputStream output) throws IOException, WebApplicationException { Document document; try { document = createSurroundingXMLResp(); final Element resElement = RESTResponseHelper.createResultElement(document); List<Element> collections; try { collections = RESTResponseHelper.createCollectionElementDBs(pDatabase, document, pStorageFac, pRevision); } catch (final TTException exce) { throw new WebApplicationException(exce); } for (final Element resource : collections) { resElement.appendChild(resource); } document.appendChild(resElement); final DOMSource domSource = new DOMSource(document); final StreamResult streamResult = new StreamResult(output); final Transformer transformer = TransformerFactory.newInstance().newTransformer(); transformer.transform(domSource, streamResult); } catch (final ParserConfigurationException exce) { throw new WebApplicationException(exce); } catch (final TransformerConfigurationException exce) { throw new WebApplicationException(exce); } catch (final TransformerFactoryConfigurationError exce) { throw new WebApplicationException(exce); } catch (final TransformerException exce) { throw new WebApplicationException(exce); } } }; return sOutput; }
python
def add(self, fixed=None, random=None, priors=None, family='gaussian', link=None, categorical=None, append=True): '''Adds one or more terms to the model via an R-like formula syntax. Args: fixed (str): Optional formula specification of fixed effects. random (list): Optional list-based specification of random effects. priors (dict): Optional specification of priors for one or more terms. A dict where the keys are the names of terms in the model, and the values are either instances of class Prior or ints, floats, or strings that specify the width of the priors on a standardized scale. family (str, Family): A specification of the model family (analogous to the family object in R). Either a string, or an instance of class priors.Family. If a string is passed, a family with the corresponding name must be defined in the defaults loaded at Model initialization. Valid pre-defined families are 'gaussian', 'bernoulli', 'poisson', and 't'. link (str): The model link function to use. Can be either a string (must be one of the options defined in the current backend; typically this will include at least 'identity', 'logit', 'inverse', and 'log'), or a callable that takes a 1D ndarray or theano tensor as the sole argument and returns one with the same shape. categorical (str, list): The names of any variables to treat as categorical. Can be either a single variable name, or a list of names. If categorical is None, the data type of the columns in the DataFrame will be used to infer handling. In cases where numeric columns are to be treated as categoricals (e.g., random factors coded as numerical IDs), explicitly passing variable names via this argument is recommended. append (bool): If True, terms are appended to the existing model rather than replacing any existing terms. This allows formula-based specification of the model in stages. ''' data = self.data # Primitive values (floats, strs) can be overwritten with Prior objects # so we need to make sure to copy first to avoid bad things happening # if user is re-using same prior dict in multiple models. if priors is None: priors = {} else: priors = deepcopy(priors) if not append: self.reset() # Explicitly convert columns to category if desired--though this # can also be done within the formula using C(). if categorical is not None: data = data.copy() cats = listify(categorical) data[cats] = data[cats].apply(lambda x: x.astype('category')) # Custom patsy.missing.NAAction class. Similar to patsy drop/raise # defaults, but changes the raised message and logs any dropped rows NA_handler = Custom_NA(dropna=self.dropna) # screen fixed terms if fixed is not None: if '~' in fixed: clean_fix = re.sub(r'\[.+\]', '', fixed) dmatrices(clean_fix, data=data, NA_action=NA_handler) else: dmatrix(fixed, data=data, NA_action=NA_handler) # screen random terms if random is not None: for term in listify(random): for side in term.split('|'): dmatrix(side, data=data, NA_action=NA_handler) # update the running list of complete cases if len(NA_handler.completes): self.completes.append(NA_handler.completes) # save arguments to pass to _add() args = dict(zip( ['fixed', 'random', 'priors', 'family', 'link', 'categorical'], [fixed, random, priors, family, link, categorical])) self.added_terms.append(args) self.built = False
java
public Vector position(VesselPosition relativeTo) { // TODO longitude wrapping check double xMetres = (lon - relativeTo.lon()) * relativeTo.metresPerDegreeLongitude(); double yMetres = (lat - relativeTo.lat()) * relativeTo.metresPerDegreeLatitude(); return new Vector(xMetres, yMetres); }
python
def setSpeed(self, vehID, speed): """setSpeed(string, double) -> None Sets the speed in m/s for the named vehicle within the last step. """ self._connection._sendDoubleCmd( tc.CMD_SET_VEHICLE_VARIABLE, tc.VAR_SPEED, vehID, speed)
java
public OvhOfferTask billingAccount_number_serviceName_convertToLine_POST(String billingAccount, String serviceName, String offer) throws IOException { String qPath = "/telephony/{billingAccount}/number/{serviceName}/convertToLine"; StringBuilder sb = path(qPath, billingAccount, serviceName); HashMap<String, Object>o = new HashMap<String, Object>(); addBody(o, "offer", offer); String resp = exec(qPath, "POST", sb.toString(), o); return convertTo(resp, OvhOfferTask.class); }
java
@Override protected List<List<T>> mate(List<T> parent1, List<T> parent2, int numberOfCrossoverPoints, Random rng) { List<T> offspring1 = new ArrayList<T>(parent1); // Use a random-access list for performance. List<T> offspring2 = new ArrayList<T>(parent2); // Apply as many cross-overs as required. for (int i = 0; i < numberOfCrossoverPoints; i++) { // Cross-over index is always greater than zero and less than // the length of the parent so that we always pick a point that // will result in a meaningful cross-over. int max = Math.min(parent1.size(), parent2.size()); if (max > 1) // Don't perform cross-over if there aren't at least 2 elements in each list. { int crossoverIndex = (1 + rng.nextInt(max - 1)); for (int j = 0; j < crossoverIndex; j++) { T temp = offspring1.get(j); offspring1.set(j, offspring2.get(j)); offspring2.set(j, temp); } } } List<List<T>> result = new ArrayList<List<T>>(2); result.add(offspring1); result.add(offspring2); return result; }
java
@Nonnull public ApiFuture<DocumentSnapshot> get(@Nonnull DocumentReference documentRef) { Preconditions.checkState(isEmpty(), READ_BEFORE_WRITE_ERROR_MSG); return ApiFutures.transform( firestore.getAll(new DocumentReference[] {documentRef}, /*fieldMask=*/ null, transactionId), new ApiFunction<List<DocumentSnapshot>, DocumentSnapshot>() { @Override public DocumentSnapshot apply(List<DocumentSnapshot> snapshots) { return snapshots.isEmpty() ? null : snapshots.get(0); } }); }
python
def get_path(self, file): """Get the full path of the notebook found in the directory specified by self.path. """ class_path = inspect.getfile(self.__class__) parent_path = os.path.dirname(class_path) path = os.path.join(parent_path,self.path,file) return os.path.realpath(path)
java
public IRestfulClientFactory getRestfulClientFactory() { if (myRestfulClientFactory == null) { try { myRestfulClientFactory = (IRestfulClientFactory) ReflectionUtil.newInstance(Class.forName("ca.uhn.fhir.rest.client.apache.ApacheRestfulClientFactory"), FhirContext.class, this); } catch (ClassNotFoundException e) { throw new ConfigurationException("hapi-fhir-client does not appear to be on the classpath"); } } return myRestfulClientFactory; }
python
def import_class(import_path, name=None): """ Imports and returns class for full class path string. Ex. 'foo.bar.Bogus' -> <class 'foo.bar.Bogus'> """ if not name: import_path, name = import_path.rsplit('.', 1) mod = import_module(import_path) try: return getattr(mod, name) except AttributeError as e: raise ImportError(e)
java
public JdbcMapperBuilder<T> addMapping(final String column, final int index, final int sqlType, Object... properties) { return addMapping(new JdbcColumnKey(column, index, sqlType), properties); }
python
def base_add_isoquant_data(features, quantfeatures, acc_col, quantacc_col, quantfields): """Generic function that takes a peptide or protein table and adds quant data from ANOTHER such table.""" quant_map = get_quantmap(quantfeatures, quantacc_col, quantfields) for feature in features: feat_acc = feature[acc_col] outfeat = {k: v for k, v in feature.items()} try: outfeat.update(quant_map[feat_acc]) except KeyError: outfeat.update({field: 'NA' for field in quantfields}) yield outfeat
java
public T removeHeader(String name) { if(name != null) { headers.remove(name.trim()); } return (T)this; }
java
private String generateXml(String encrypt, String signature, String timestamp, String nonce) { String format = "<xml>\n" + "<Encrypt><![CDATA[%1$s]]></Encrypt>\n" + "<MsgSignature><![CDATA[%2$s]]></MsgSignature>\n" + "<TimeStamp>%3$s</TimeStamp>\n" + "<Nonce><![CDATA[%4$s]]></Nonce>\n" + "</xml>"; return String.format(format, encrypt, signature, timestamp, nonce); }
python
def unpack_db_to_component_dfs(self, convert_dates=False): """Returns the set of known tables in the adjustments file in DataFrame form. Parameters ---------- convert_dates : bool, optional By default, dates are returned in seconds since EPOCH. If convert_dates is True, all ints in date columns will be converted to datetimes. Returns ------- dfs : dict{str->DataFrame} Dictionary which maps table name to the corresponding DataFrame version of the table, where all date columns have been coerced back from int to datetime. """ return { t_name: self.get_df_from_table(t_name, convert_dates) for t_name in self._datetime_int_cols }
java
public static ByteArrayInputStream toStream(ByteBuffer byteBuffer) { if (byteBuffer == null) { return new ByteArrayInputStream(new byte[0]); } return new ByteArrayInputStream(copyBytesFrom(byteBuffer)); }
java
@Override public void eUnset(int featureID) { switch (featureID) { case TypesPackage.JVM_ANNOTATION_REFERENCE__ANNOTATION: setAnnotation((JvmAnnotationType)null); return; case TypesPackage.JVM_ANNOTATION_REFERENCE__EXPLICIT_VALUES: getExplicitValues().clear(); return; } super.eUnset(featureID); }
java
public ModelBuilder properties(Map<String, ModelProperty> properties) { this.properties.putAll(nullToEmptyMap(properties)); return this; }
python
def application_2_json(self): """ transform ariane_clip3 Application object to Ariane server JSON obj :return: Ariane JSON obj """ LOGGER.debug("Application.application_2_json") json_obj = { 'applicationID': self.id, 'applicationName': self.name, 'applicationDescription': self.description, 'applicationShortName': self.short_name, 'applicationColorCode': self.color_code, 'applicationCompanyID': self.company_id, 'applicationTeamID': self.team_id, 'applicationOSInstancesID': self.osi_ids } return json.dumps(json_obj)
python
def update_trigger(self, trigger): """ Updates on the Alert API the trigger record having the ID of the specified Trigger object: the remote record is updated with data from the local Trigger object. :param trigger: the Trigger with updated data :type trigger: `pyowm.alertapi30.trigger.Trigger` :return: ``None`` if update is successful, an error otherwise """ assert trigger is not None assert isinstance(trigger.id, str), "Value must be a string" the_time_period = { "start": { "expression": "after", "amount": trigger.start_after_millis }, "end": { "expression": "after", "amount": trigger.end_after_millis } } the_conditions = [dict(name=c.weather_param, expression=c.operator, amount=c.amount) for c in trigger.conditions] the_area = [a.as_dict() for a in trigger.area] status, _ = self.http_client.put( NAMED_TRIGGER_URI % trigger.id, params={'appid': self.API_key}, data=dict(time_period=the_time_period, conditions=the_conditions, area=the_area), headers={'Content-Type': 'application/json'})
java
protected void initListener(XmlParser.Node node) { String className=node.getString("listener-class",false,true); Object listener=null; try { Class listenerClass=getWebApplicationContext().loadClass(className); listener=listenerClass.newInstance(); } catch(Exception e) { log.warn("Could not instantiate listener "+className,e); return; } if(!(listener instanceof EventListener)) { log.warn("Not an EventListener: "+listener); return; } boolean known=false; try { getWebApplicationContext().addEventListener((EventListener)listener); known=true; } catch(Exception e) { LogSupport.ignore(log,e); } try { getWebApplicationHandler().addEventListener((EventListener)listener); known=true; } catch(Exception e) { LogSupport.ignore(log,e); } if(!known) log.warn("Unknown: "+listener); }
java
private boolean hasNonComplexMember(Complex cx) { for (PhysicalEntity mem : cx.getComponent()) { if (!(mem instanceof Complex) || hasNonComplexMember((Complex) mem)) { return true; } } return false; }
python
def _str_datetime_now(self, x=None): """Return datetime string for use with time attributes. Handling depends on input: 'now' - returns datetime for now number - assume datetime values, generate string other - no change, return same value """ if (x == 'now'): # Now, this is wht datetime_to_str() with no arg gives return(datetime_to_str()) try: # Test for number junk = x + 0.0 return datetime_to_str(x) except TypeError: # Didn't look like a number, treat as string return x
python
def create(cls, selective: typing.Optional[base.Boolean] = None): """ Create new force reply :param selective: :return: """ return cls(selective=selective)
python
def _pruned(self, path): """Is a stat tree node pruned? Goes through the list of prune rules to find one that applies. Chronologically newer rules are higher-precedence than older ones. If no rule applies, the stat is not pruned by default.""" if path[0] == '/': path = path[1:] for rule in reversed(self.pruneRules): if isinstance(rule, six.string_types): if fnmatch(path, rule): return True elif rule(path): return True return False
java
@FFDCIgnore(Exception.class) public boolean isEntityInRealm(String uniqueName) { if (isSafRegistry()) { try { return userRegistry.isValidUser(uniqueName); } catch (Exception e) { /* Ignore. */ } try { return userRegistry.isValidGroup(uniqueName); } catch (Exception e) { /* Ignore. */ } } else { try { SearchResult result = userRegistry.getUsers(uniqueName, 1); if (result != null && result.getList().size() > 0) return true; } catch (Exception e) { /* Ignore. */ } try { SearchResult result = userRegistry.getGroups(uniqueName, 1); if (result != null && result.getList().size() > 0) return true; } catch (Exception e) { /* Ignore. */ } } return false; }
java
@SuppressWarnings("unchecked") @Override public Class<T> versionClass() { if (this.retentionPolicies.size() == 1) { return (Class<T>) this.retentionPolicies.get(0).versionClass(); } Class<T> klazz = (Class<T>) this.retentionPolicies.get(0).versionClass(); for (RetentionPolicy<T> policy : this.retentionPolicies) { klazz = commonSuperclass(klazz, (Class<T>) policy.versionClass()); } return klazz; }
python
def index_impl(self): """Return {runName: {tagName: {displayName: ..., description: ...}}}.""" if self._db_connection_provider: # Read tags from the database. db = self._db_connection_provider() cursor = db.execute(''' SELECT Tags.tag_name, Tags.display_name, Runs.run_name FROM Tags JOIN Runs ON Tags.run_id = Runs.run_id WHERE Tags.plugin_name = ? ''', (metadata.PLUGIN_NAME,)) result = collections.defaultdict(dict) for row in cursor: tag_name, display_name, run_name = row result[run_name][tag_name] = { 'displayName': display_name, # TODO(chihuahua): Populate the description. Currently, the tags # table does not link with the description table. 'description': '', } return result runs = self._multiplexer.Runs() result = {run: {} for run in runs} mapping = self._multiplexer.PluginRunToTagToContent(metadata.PLUGIN_NAME) for (run, tag_to_content) in six.iteritems(mapping): for (tag, content) in six.iteritems(tag_to_content): content = metadata.parse_plugin_metadata(content) summary_metadata = self._multiplexer.SummaryMetadata(run, tag) result[run][tag] = {'displayName': summary_metadata.display_name, 'description': plugin_util.markdown_to_safe_html( summary_metadata.summary_description)} return result
python
def get_repositories_by_ids(self, repository_ids=None): """Gets a ``RepositoryList`` corresponding to the given ``IdList``. In plenary mode, the returned list contains all of the repositories specified in the ``Id`` list, in the order of the list, including duplicates, or an error results if an ``Id`` in the supplied list is not found or inaccessible. Otherwise, inaccessible ``Repositories`` may be omitted from the list and may present the elements in any order including returning a unique set. arg: repository_ids (osid.id.IdList): the list of ``Ids`` to retrieve return: (osid.repository.RepositoryList) - the returned ``Repository list`` raise: NotFound - an ``Id`` was not found raise: NullArgument - ``repository_ids`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from awsosid template for - # osid.resource.BinLookupSession.get_bins_by_ids_template if not self._can('lookup'): raise PermissionDenied() else: return self._provider_session.get_repositories_by_ids(repository_ids)
python
def _get_path_assembly_mapping_data( self, source_assembly, target_assembly, retries=10 ): """ Get local path to assembly mapping data, downloading if necessary. Parameters ---------- source_assembly : {'NCBI36', 'GRCh37', 'GRCh38'} assembly to remap from target_assembly : {'NCBI36', 'GRCh37', 'GRCh38'} assembly to remap to retries : int number of retries per chromosome to download assembly mapping data Returns ------- str path to <source_assembly>_<target_assembly>.tar.gz References ---------- ..[1] Ensembl, Assembly Information Endpoint, https://rest.ensembl.org/documentation/info/assembly_info ..[2] Ensembl, Assembly Map Endpoint, http://rest.ensembl.org/documentation/info/assembly_map """ if not lineage.create_dir(self._resources_dir): return None chroms = [ "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19", "20", "21", "22", "X", "Y", "MT", ] assembly_mapping_data = source_assembly + "_" + target_assembly destination = os.path.join( self._resources_dir, assembly_mapping_data + ".tar.gz" ) if not os.path.exists(destination) or not self._all_chroms_in_tar( chroms, destination ): print("Downloading {}".format(os.path.relpath(destination))) try: with tarfile.open(destination, "w:gz") as out_tar: for chrom in chroms: file = chrom + ".json" map_endpoint = ( "/map/human/" + source_assembly + "/" + chrom + "/" + target_assembly + "?" ) # get assembly mapping data response = None retry = 0 while response is None and retry < retries: response = self._ensembl_rest_client.perform_rest_action( map_endpoint ) retry += 1 if response is not None: # open temp file, save json response to file, close temp file with tempfile.NamedTemporaryFile( delete=False, mode="w" ) as f: json.dump(response, f) # add temp file to archive out_tar.add(f.name, arcname=file) # remove temp file os.remove(f.name) except Exception as err: print(err) return None return destination
python
def parse_requirements( requirements_path: str = 'requirements.txt') -> t.List[str]: """Read contents of requirements.txt file and return data from its relevant lines. Only non-empty and non-comment lines are relevant. """ requirements = [] with HERE.joinpath(requirements_path).open() as reqs_file: for requirement in [line.strip() for line in reqs_file.read().splitlines()]: if not requirement or requirement.startswith('#'): continue requirements.append(requirement) return requirements
python
def get_all_methods(self, entry_point=ALL, protocol=ALL, sort_methods=False): """Return a list of all methods in the registry supported by the given entry_point / protocol pair""" if sort_methods: return [ method for (_, method) in sorted(self._registry.items()) if method.is_valid_for(entry_point, protocol) ] return self._registry.values()
java
public ConditionalBranchDescrBuilder<CEDescrBuilder<P, T>> conditionalBranch() { ConditionalBranchDescrBuilder<CEDescrBuilder<P, T>> conditionalBranch = new ConditionalBranchDescrBuilderImpl<CEDescrBuilder<P, T>>( this ); ((ConditionalElementDescr) descr).addDescr(conditionalBranch.getDescr()); return conditionalBranch; }
python
def noise_from_psd(length, delta_t, psd, seed=None): """ Create noise with a given psd. Return noise with a given psd. Note that if unique noise is desired a unique seed should be provided. Parameters ---------- length : int The length of noise to generate in samples. delta_t : float The time step of the noise. psd : FrequencySeries The noise weighting to color the noise. seed : {0, int} The seed to generate the noise. Returns -------- noise : TimeSeries A TimeSeries containing gaussian noise colored by the given psd. """ noise_ts = TimeSeries(zeros(length), delta_t=delta_t) if seed is None: seed = numpy.random.randint(2**32) randomness = lal.gsl_rng("ranlux", seed) N = int (1.0 / delta_t / psd.delta_f) n = N//2+1 stride = N//2 if n > len(psd): raise ValueError("PSD not compatible with requested delta_t") psd = (psd[0:n]).lal() psd.data.data[n-1] = 0 segment = TimeSeries(zeros(N), delta_t=delta_t).lal() length_generated = 0 SimNoise(segment, 0, psd, randomness) while (length_generated < length): if (length_generated + stride) < length: noise_ts.data[length_generated:length_generated+stride] = segment.data.data[0:stride] else: noise_ts.data[length_generated:length] = segment.data.data[0:length-length_generated] length_generated += stride SimNoise(segment, stride, psd, randomness) return noise_ts