language
stringclasses
2 values
func_code_string
stringlengths
63
466k
java
private Animator preparePulseAnimation() { AnimatorSet animation = new AnimatorSet(); Animator firstBounce = ObjectAnimator.ofFloat(drawable, CircularProgressDrawable.CIRCLE_SCALE_PROPERTY, drawable.getCircleScale(), 0.88f); firstBounce.setDuration(300); firstBounce.setInterpolator(new CycleInterpolator(1)); Animator secondBounce = ObjectAnimator.ofFloat(drawable, CircularProgressDrawable.CIRCLE_SCALE_PROPERTY, 0.75f, 0.83f); secondBounce.setDuration(300); secondBounce.setInterpolator(new CycleInterpolator(1)); Animator thirdBounce = ObjectAnimator.ofFloat(drawable, CircularProgressDrawable.CIRCLE_SCALE_PROPERTY, 0.75f, 0.80f); thirdBounce.setDuration(300); thirdBounce.setInterpolator(new CycleInterpolator(1)); animation.playSequentially(firstBounce, secondBounce, thirdBounce); return animation; }
java
public static int validatePortNumber(String portStringValue) { final int portNumber; final StringBuilder exceptionMessageBuilder = new StringBuilder(); exceptionMessageBuilder.append("Invalid value '").append(portStringValue) .append("' for input '").append(HttpClientInputs.PROXY_PORT) .append("'. Valid Values: -1 and integer values greater than 0. "); try { portNumber = Integer.parseInt(portStringValue); if ((portNumber <= 0) && (portNumber != -1)) { throw new IllegalArgumentException(exceptionMessageBuilder.toString()); } } catch (NumberFormatException e) { throw new IllegalArgumentException(exceptionMessageBuilder.toString(), e); } return portNumber; }
python
def read1(self, n): """Read up to n bytes with at most one read() system call.""" # Simplify algorithm (branching) by transforming negative n to large n. if n < 0 or n is None: n = self.MAX_N # Bytes available in read buffer. len_readbuffer = len(self._readbuffer) - self._offset # Read from file. if self._compress_left > 0 and n > len_readbuffer + len(self._unconsumed): nbytes = n - len_readbuffer - len(self._unconsumed) nbytes = max(nbytes, self.MIN_READ_SIZE) nbytes = min(nbytes, self._compress_left) data = self._fileobj.read(nbytes) self._compress_left -= len(data) if data and self._decrypter is not None: data = ''.join(map(self._decrypter, data)) if self._compress_type == ZIP_STORED: self._update_crc(data, eof=(self._compress_left==0)) self._readbuffer = self._readbuffer[self._offset:] + data self._offset = 0 else: # Prepare deflated bytes for decompression. self._unconsumed += data # Handle unconsumed data. if (len(self._unconsumed) > 0 and n > len_readbuffer and self._compress_type == ZIP_DEFLATED): data = self._decompressor.decompress( self._unconsumed, max(n - len_readbuffer, self.MIN_READ_SIZE) ) self._unconsumed = self._decompressor.unconsumed_tail eof = len(self._unconsumed) == 0 and self._compress_left == 0 if eof: data += self._decompressor.flush() self._update_crc(data, eof=eof) self._readbuffer = self._readbuffer[self._offset:] + data self._offset = 0 # Read from buffer. data = self._readbuffer[self._offset: self._offset + n] self._offset += len(data) return data
python
def _determine_base_url(document, page_url): """Determine the HTML document's base URL. This looks for a ``<base>`` tag in the HTML document. If present, its href attribute denotes the base URL of anchor tags in the document. If there is no such tag (or if it does not have a valid href attribute), the HTML file's URL is used as the base URL. :param document: An HTML document representation. The current implementation expects the result of ``html5lib.parse()``. :param page_url: The URL of the HTML document. """ for base in document.findall(".//base"): href = base.get("href") if href is not None: return href return page_url
java
public Observable<ServiceResponse<JobResponseInner>> getJobWithServiceResponseAsync(String resourceGroupName, String resourceName, String jobId) { if (this.client.subscriptionId() == null) { throw new IllegalArgumentException("Parameter this.client.subscriptionId() is required and cannot be null."); } if (resourceGroupName == null) { throw new IllegalArgumentException("Parameter resourceGroupName is required and cannot be null."); } if (resourceName == null) { throw new IllegalArgumentException("Parameter resourceName is required and cannot be null."); } if (jobId == null) { throw new IllegalArgumentException("Parameter jobId is required and cannot be null."); } if (this.client.apiVersion() == null) { throw new IllegalArgumentException("Parameter this.client.apiVersion() is required and cannot be null."); } return service.getJob(this.client.subscriptionId(), resourceGroupName, resourceName, jobId, this.client.apiVersion(), this.client.acceptLanguage(), this.client.userAgent()) .flatMap(new Func1<Response<ResponseBody>, Observable<ServiceResponse<JobResponseInner>>>() { @Override public Observable<ServiceResponse<JobResponseInner>> call(Response<ResponseBody> response) { try { ServiceResponse<JobResponseInner> clientResponse = getJobDelegate(response); return Observable.just(clientResponse); } catch (Throwable t) { return Observable.error(t); } } }); }
java
public void process(final Node aNode) { // Only attributes are permitted here. Attr attribute = (Attr) aNode; // Change the fileName. String newFilename = namespaceUriToNewFilenameMap.get(getNamespace(attribute)); attribute.setValue(newFilename); }
python
def crypto_box_open_afternm(ciphertext, nonce, k): """ Decrypts and returns the encrypted message ``ciphertext``, using the shared key ``k`` and the nonce ``nonce``. :param ciphertext: bytes :param nonce: bytes :param k: bytes :rtype: bytes """ if len(nonce) != crypto_box_NONCEBYTES: raise exc.ValueError("Invalid nonce") if len(k) != crypto_box_BEFORENMBYTES: raise exc.ValueError("Invalid shared key") padded = (b"\x00" * crypto_box_BOXZEROBYTES) + ciphertext plaintext = ffi.new("unsigned char[]", len(padded)) res = lib.crypto_box_open_afternm( plaintext, padded, len(padded), nonce, k) ensure(res == 0, "An error occurred trying to decrypt the message", raising=exc.CryptoError) return ffi.buffer(plaintext, len(padded))[crypto_box_ZEROBYTES:]
java
public int parseArgument(Options opt, String[] args, int start) throws BadCommandLineException, IOException { int consumed = 0; final String optionPrefix = "-" + getOptionName() + "-"; final int optionPrefixLength = optionPrefix.length(); final String arg = args[start]; final int equalsPosition = arg.indexOf('='); if (arg.startsWith(optionPrefix) && equalsPosition > optionPrefixLength) { final String propertyName = arg.substring(optionPrefixLength, equalsPosition); final String value = arg.substring(equalsPosition + 1); consumed++; try { BeanUtils.setProperty(this, propertyName, value); } catch (Exception ex) { ex.printStackTrace(); throw new BadCommandLineException("Error setting property [" + propertyName + "], value [" + value + "]."); } } return consumed; }
python
def strip_msa_100(msa, threshold, plot = False): """ strip out columns of a MSA that represent gaps for X percent (threshold) of sequences """ msa = [seq for seq in parse_fasta(msa)] columns = [[0, 0] for pos in msa[0][1]] # [[#bases, #gaps], [#bases, #gaps], ...] for seq in msa: for position, base in enumerate(seq[1]): if base == '-' or base == '.': columns[position][1] += 1 else: columns[position][0] += 1 columns = [float(float(g)/float(g+b)*100) for b, g in columns] # convert to percent gaps for seq in msa: stripped = [] for position, base in enumerate(seq[1]): if columns[position] < threshold: stripped.append(base) yield [seq[0], ''.join(stripped)] if plot is not False: plot_gaps(plot, columns)
python
def get_resource(self, resource_id): """Gets the ``Resource`` specified by its ``Id``. In plenary mode, the exact ``Id`` is found or a ``NotFound`` results. Otherwise, the returned ``Resource`` may have a different ``Id`` than requested, such as the case where a duplicate ``Id`` was assigned to a ``Resource`` and retained for compatibility. arg: resource_id (osid.id.Id): the ``Id`` of the ``Resource`` to retrieve return: (osid.resource.Resource) - the returned ``Resource`` raise: NotFound - no ``Resource`` found with the given ``Id`` raise: NullArgument - ``resource_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceLookupSession.get_resource # NOTE: This implementation currently ignores plenary view collection = JSONClientValidated('resource', collection='Resource', runtime=self._runtime) result = collection.find_one( dict({'_id': ObjectId(self._get_id(resource_id, 'resource').get_identifier())}, **self._view_filter())) return objects.Resource(osid_object_map=result, runtime=self._runtime, proxy=self._proxy)
python
def fprob(dfnum, dfden, F): """ Returns the (1-tailed) significance level (p-value) of an F statistic given the degrees of freedom for the numerator (dfR-dfF) and the degrees of freedom for the denominator (dfF). Usage: lfprob(dfnum, dfden, F) where usually dfnum=dfbn, dfden=dfwn """ p = betai(0.5 * dfden, 0.5 * dfnum, dfden / float(dfden + dfnum * F)) return p
python
def loader(self, file_name, bad_steps=None, **kwargs): """Loads data from biologics .mpr files. Args: file_name (str): path to .res file. bad_steps (list of tuples): (c, s) tuples of steps s (in cycle c) to skip loading. Returns: new_tests (list of data objects) """ new_tests = [] if not os.path.isfile(file_name): self.logger.info("Missing file_\n %s" % file_name) return None filesize = os.path.getsize(file_name) hfilesize = humanize_bytes(filesize) txt = "Filesize: %i (%s)" % (filesize, hfilesize) self.logger.debug(txt) # creating temporary file and connection temp_dir = tempfile.gettempdir() temp_filename = os.path.join(temp_dir, os.path.basename(file_name)) shutil.copy2(file_name, temp_dir) self.logger.debug("tmp file: %s" % temp_filename) self.logger.debug("HERE WE LOAD THE DATA") data = DataSet() fid = FileID(file_name) # div parameters and information (probably load this last) test_no = 1 data.test_no = test_no data.loaded_from = file_name # some overall prms data.channel_index = None data.channel_number = None data.creator = None data.item_ID = None data.schedule_file_name = None data.start_datetime = None data.test_ID = None data.test_name = None data.raw_data_files.append(fid) # --------- read raw-data (normal-data) ------------------------- self.logger.debug("reading raw-data") self.mpr_data = None self.mpr_log = None self.mpr_settings = None self._load_mpr_data(temp_filename, bad_steps) length_of_test = self.mpr_data.shape[0] self.logger.debug(f"length of test: {length_of_test}") self.logger.debug("renaming columns") self._rename_headers() # --------- stats-data (summary-data) ------------------------- summary_df = self._create_summary_data() if summary_df.empty: txt = "\nCould not find any summary (stats-file)!" txt += " (summary_df.empty = True)" txt += "\n -> issue make_summary(use_cellpy_stat_file=False)" warnings.warn(txt) data.dfsummary = summary_df data.dfdata = self.mpr_data data.raw_data_files_length.append(length_of_test) new_tests.append(data) self._clean_up(temp_filename) return new_tests
java
public final void commitPersistLock(PersistentTransaction transaction) throws SevereMessageStoreException { if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) SibTr.entry(this, tc, "commitPersistLock", transaction); AbstractItem item = null; boolean hasBecomePersistentlyLocked = false; boolean linkHasBecomeReleasable = false; synchronized (this) { if (ItemLinkState.STATE_PERSISTING_LOCK == _itemLinkState) { _assertCorrectTransaction(transaction); item = _getAndAssertItem(); final int strategy = _tuple.getStorageStrategy(); if ((AbstractItem.STORE_NEVER != strategy) && (AbstractItem.STORE_MAYBE != strategy)) { // 272110 - added STORE_MAYBE to ease handling of corrupt persistent representation // storable items can be discarded if in persistentLocked state linkHasBecomeReleasable = _declareDiscardable(); } _itemLinkState = ItemLinkState.STATE_PERSISTENTLY_LOCKED; hasBecomePersistentlyLocked = true; } else { if (TraceComponent.isAnyTracingEnabled() && tc.isEventEnabled()) SibTr.event(this, tc, "Invalid Item state: " + _itemLinkState); if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) SibTr.exit(this, tc, "commitPersistLock"); throw new StateException(_itemLinkState.toString()); } _transactionId = null; } if (hasBecomePersistentlyLocked) { if (linkHasBecomeReleasable) { _declareReleasable(item); } } if (TraceComponent.isAnyTracingEnabled() && tc.isEntryEnabled()) SibTr.exit(this, tc, "commitPersistLock"); }
python
def normalize_mask(mask, is_micro): """\ Normalizes the (user specified) mask. :param mask: A mask constant :type mask: int or None :param bool is_micro: Indicates if the mask is meant to be used for a Micro QR Code. """ if mask is None: return None try: mask = int(mask) except ValueError: raise MaskError('Invalid data mask "{0}". Must be an integer or a string which represents an integer value.'.format(mask)) if is_micro: if not 0 <= mask < 4: raise MaskError('Invalid data mask "{0}" for Micro QR Code. Must be in range 0 .. 3'.format(mask)) else: if not 0 <= mask < 8: raise MaskError('Invalid data mask "{0}". Must be in range 0 .. 7'.format(mask)) return mask
python
def check_command(self, command): """ Check if command can be called. """ # Use `command` to see if command is callable, store exit code code = os.system("command -v {0} >/dev/null 2>&1 || {{ exit 1; }}".format(command)) # If exit code is not 0, report which command failed and return False, else return True if code != 0: print("Command is not callable: {0}".format(command)) return False else: return True
python
def generate_uncertainties(N, dist='Gamma', rseed=None): """ This function generates a uncertainties for the white noise component in the synthetic light curve. Parameters --------- N: positive integer Lenght of the returned uncertainty vector dist: {'EMG', 'Gamma'} Probability density function (PDF) used to generate the uncertainties rseed: Seed for the random number generator Returns ------- s: ndarray Vector containing the uncertainties expected_s_2: float Expectation of the square of s computed analytically """ np.random.seed(rseed) #print(dist) if dist == 'EMG': # Exponential modified Gaussian # the mean of a EMG rv is mu + 1/(K*sigma) # the variance of a EMG rv is sigma**2 + 1/(K*sigma)**2 K = 1.824328605481941 sigma = 0.05*0.068768312946785953 mu = 0.05*0.87452567616276777 # IMPORTANT NOTE # These parameters were obtained after fitting uncertainties # coming from 10,000 light curves of the VVV survey expected_s_2 = sigma**2 + mu**2 + 2*K*mu*sigma + 2*K**2*sigma**2 s = exponnorm.rvs(K, loc=mu, scale=sigma, size=N) elif dist == 'Gamma': # The mean of a gamma rv is k*sigma # The variance of a gamma rv is k*sigma**2 k = 3.0 sigma = 0.05/k # mean=0.05, var=0.05**2/k s = gamma.rvs(k, loc=0.0, scale=sigma, size=N) expected_s_2 = k*(1+k)*sigma**2 return s, expected_s_2
java
public static String getUniqueId() { char[] data = new char[36]; long l0 = System.currentTimeMillis(); UUID uuid = UUID.randomUUID(); long l1 = uuid.getMostSignificantBits(); long l2 = uuid.getLeastSignificantBits(); //we don't use Long.toString(long, radix) because we want to treat values as unsigned for(int i = 0; i < 9; i++) { data[8 - i] = UID_CHARS[(int)(l0 & 31)]; l0 >>>= 5; } if(l0 != 0) throw new RuntimeException("ERROR"); data[9] = '-'; for(int i = 0; i < 13; i++) { data[22 - i] = UID_CHARS[(int)(l1 & 31)]; l1 >>>= 5; } if(l1 != 0) throw new RuntimeException("ERROR"); for(int i = 0; i < 13; i++) { data[35 - i] = UID_CHARS[(int)(l2 & 31)]; l2 >>>= 5; } if(l2 != 0) throw new RuntimeException("ERROR"); String v = new String(data); return v; }
java
@Override public void setObserver(HttpOutputStreamObserver obs) { if (TraceComponent.isAnyTracingEnabled() && tc.isDebugEnabled()) { Tr.debug(tc, "obs ->" + obs); } this.obs = obs; }
python
def _read_csv_with_offset_pyarrow_on_ray( fname, num_splits, start, end, kwargs, header ): # pragma: no cover """Use a Ray task to read a chunk of a CSV into a pyarrow Table. Note: Ray functions are not detected by codecov (thus pragma: no cover) Args: fname: The filename of the file to open. num_splits: The number of splits (partitions) to separate the DataFrame into. start: The start byte offset. end: The end byte offset. kwargs: The kwargs for the pyarrow `read_csv` function. header: The header of the file. Returns: A list containing the split pyarrow Tables and the the number of rows of the tables as the last element. This is used to determine the total length of the DataFrame to build a default Index. """ bio = open(fname, "rb") # The header line for the CSV file first_line = bio.readline() bio.seek(start) to_read = header + first_line + bio.read(end - start) bio.close() table = csv.read_csv( BytesIO(to_read), parse_options=csv.ParseOptions(header_rows=1) ) chunksize = get_default_chunksize(table.num_columns, num_splits) chunks = [ pa.Table.from_arrays(table.columns[chunksize * i : chunksize * (i + 1)]) for i in range(num_splits) ] return chunks + [table.num_rows]
java
public static Thread consumeProcessOutputStream(Process self, Appendable output) { Thread thread = new Thread(new TextDumper(self.getInputStream(), output)); thread.start(); return thread; }
java
public void setResult(R result) { try { lock.lock(); this.result = result; notifyHaveResult(); } finally { lock.unlock(); } }
python
def register_patches(self): """ Registers the patches. :return: Method success. :rtype: bool """ if not self.__paths: return False unregistered_patches = [] for path in self.paths: for file in foundations.walkers.files_walker(path, ("\.{0}$".format(self.__extension),), ("\._",)): name = foundations.strings.get_splitext_basename(file) if not self.register_patch(name, file): unregistered_patches.append(name) if not unregistered_patches: return True else: raise umbra.exceptions.PatchRegistrationError( "{0} | '{1}' patches failed to register!".format(self.__class__.__name__, ", ".join(unregistered_patches)))
java
public ScoreNode nextScoreNode() throws IOException { while (childHits != null) { ScoreNode sn = childHits.nextScoreNode(); if (sn != null) { return sn; } else { fetchNextChildHits(); } } // if we get here there are no more score nodes return null; }
java
void fireEntryLeave(AsteriskQueueEntryImpl entry) { synchronized (listeners) { for (AsteriskQueueListener listener : listeners) { try { listener.onEntryLeave(entry); } catch (Exception e) { logger.warn("Exception in onEntryLeave()", e); } } } }
python
def parse_pagination(headers): """ Parses headers to create a pagination objects :param headers: HTTP Headers :type headers: dict :return: Navigation object for pagination :rtype: _Navigation """ links = { link.rel: parse_qs(link.href).get("page", None) for link in link_header.parse(headers.get("Link", "")).links } return _Navigation( links.get("previous", [None])[0], links.get("next", [None])[0], links.get("last", [None])[0], links.get("current", [None])[0], links.get("first", [None])[0] )
python
def get_email_regex(self): """ Return a regex pattern matching valid email addresses. Uses the same logic as the django validator, with the folowing exceptions: - Internationalized domain names not supported - IP addresses not supported - Strips lookbehinds (not supported in javascript regular expressions) """ validator = self.default_validators[0] user_regex = validator.user_regex.pattern.replace('\Z', '@') domain_patterns = ([re.escape(domain) + '$' for domain in validator.domain_whitelist] + [validator.domain_regex.pattern.replace('\Z', '$')]) domain_regex = '({0})'.format('|'.join(domain_patterns)) email_regex = user_regex + domain_regex return re.sub(r'\(\?\<[^()]*?\)', '', email_regex)
java
@Override public int countByG_E(long groupId, String engineKey) { FinderPath finderPath = FINDER_PATH_COUNT_BY_G_E; Object[] finderArgs = new Object[] { groupId, engineKey }; Long count = (Long)finderCache.getResult(finderPath, finderArgs, this); if (count == null) { StringBundler query = new StringBundler(3); query.append(_SQL_COUNT_COMMERCESHIPPINGMETHOD_WHERE); query.append(_FINDER_COLUMN_G_E_GROUPID_2); boolean bindEngineKey = false; if (engineKey == null) { query.append(_FINDER_COLUMN_G_E_ENGINEKEY_1); } else if (engineKey.equals("")) { query.append(_FINDER_COLUMN_G_E_ENGINEKEY_3); } else { bindEngineKey = true; query.append(_FINDER_COLUMN_G_E_ENGINEKEY_2); } String sql = query.toString(); Session session = null; try { session = openSession(); Query q = session.createQuery(sql); QueryPos qPos = QueryPos.getInstance(q); qPos.add(groupId); if (bindEngineKey) { qPos.add(engineKey); } count = (Long)q.uniqueResult(); finderCache.putResult(finderPath, finderArgs, count); } catch (Exception e) { finderCache.removeResult(finderPath, finderArgs); throw processException(e); } finally { closeSession(session); } } return count.intValue(); }
java
public String convertServiceSimpleTypeToString(EDataType eDataType, Object instanceValue) { return instanceValue == null ? null : instanceValue.toString(); }
java
public int rank(Type t) { t = t.unannotatedType(); switch(t.getTag()) { case CLASS: { ClassType cls = (ClassType)t; if (cls.rank_field < 0) { Name fullname = cls.tsym.getQualifiedName(); if (fullname == names.java_lang_Object) cls.rank_field = 0; else { int r = rank(supertype(cls)); for (List<Type> l = interfaces(cls); l.nonEmpty(); l = l.tail) { if (rank(l.head) > r) r = rank(l.head); } cls.rank_field = r + 1; } } return cls.rank_field; } case TYPEVAR: { TypeVar tvar = (TypeVar)t; if (tvar.rank_field < 0) { int r = rank(supertype(tvar)); for (List<Type> l = interfaces(tvar); l.nonEmpty(); l = l.tail) { if (rank(l.head) > r) r = rank(l.head); } tvar.rank_field = r + 1; } return tvar.rank_field; } case ERROR: case NONE: return 0; default: throw new AssertionError(); } }
java
void checkInternalConsistency() throws IllegalStateException { if (encryptionKey == null) { throw new IllegalStateException("Missing encryption key"); } switch (state) { case LOGGED_IN: if (StringUtils.isNullOrEmpty(userId)) { throw new IllegalStateException("Missing user id"); } break; case LOGGED_OUT: throw new IllegalStateException("Invalid conversation state: " + state); default: break; } }
java
public DoubleProperty arcHeightProperty() { if (this.arcHeight == null) { this.arcHeight = new DependentSimpleDoubleProperty<ReadOnlyDoubleProperty>( this, MathFXAttributeNames.ARC_HEIGHT, heightProperty()) { @Override protected void invalidated(ReadOnlyDoubleProperty dependency) { final double value = get(); if (value < 0.) { set(0.); } else { final double maxArcHeight = dependency.get() / 2.; if (value > maxArcHeight) { set(maxArcHeight); } } } }; } return this.arcHeight; }
java
public static InstanceTypeDescription construct(InstanceType instanceType, HardwareDescription hardwareDescription, int numberOfAvailableInstances) { return new InstanceTypeDescription(instanceType, hardwareDescription, numberOfAvailableInstances); }
python
def format(self, altitude=None, deg_char='', min_char='m', sec_char='s'): """ Format decimal degrees (DD) to degrees minutes seconds (DMS) """ latitude = "%s %s" % ( format_degrees(abs(self.latitude), symbols={ 'deg': deg_char, 'arcmin': min_char, 'arcsec': sec_char }), self.latitude >= 0 and 'N' or 'S' ) longitude = "%s %s" % ( format_degrees(abs(self.longitude), symbols={ 'deg': deg_char, 'arcmin': min_char, 'arcsec': sec_char }), self.longitude >= 0 and 'E' or 'W' ) coordinates = [latitude, longitude] if altitude is None: altitude = bool(self.altitude) if altitude: if not isinstance(altitude, string_compare): altitude = 'km' coordinates.append(self.format_altitude(altitude)) return ", ".join(coordinates)
java
@BetaApi public final Operation removeHealthCheckTargetPool( String targetPool, TargetPoolsRemoveHealthCheckRequest targetPoolsRemoveHealthCheckRequestResource) { RemoveHealthCheckTargetPoolHttpRequest request = RemoveHealthCheckTargetPoolHttpRequest.newBuilder() .setTargetPool(targetPool) .setTargetPoolsRemoveHealthCheckRequestResource( targetPoolsRemoveHealthCheckRequestResource) .build(); return removeHealthCheckTargetPool(request); }
python
def create_executable_script(filepath, body, program=None): """Create an executable script. Args: filepath (str): File to create. body (str or callable): Contents of the script. If a callable, its code is used as the script body. program (str): Name of program to launch the script, 'python' if None """ program = program or "python" if callable(body): from rez.utils.sourcecode import SourceCode code = SourceCode(func=body) body = code.source if not body.endswith('\n'): body += '\n' with open(filepath, 'w') as f: # TODO: make cross platform f.write("#!/usr/bin/env %s\n" % program) f.write(body) # TODO: Although Windows supports os.chmod you can only set the readonly # flag. Setting the file readonly breaks the unit tests that expect to # clean up the files once the test has run. Temporarily we don't bother # setting the permissions, but this will need to change. if os.name == "posix": os.chmod(filepath, stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
java
public AttributeValue getAttributeValueObject_2() throws DevFailed { // Build a DeviceAttribute_3 from this final DeviceAttribute_3DAODefaultImpl att = new DeviceAttribute_3DAODefaultImpl(); att.setAttributeValue(this); return att.getAttributeValueObject_2(); }
java
protected static void assertArgumentNotNull(String variableName, Object value) { if (variableName == null) { String msg = "The value should not be null: variableName=null value=" + value; throw new IllegalArgumentException(msg); } if (value == null) { String msg = "The value should not be null: variableName=" + variableName; throw new IllegalArgumentException(msg); } }
java
public static ShopGetResult shopGet(String accessToken, ShopInfo shopInfo) { return shopGet(accessToken, JsonUtil.toJSONString(shopInfo)); }
python
def send_document(chat_id, document, reply_to_message_id=None, reply_markup=None, **kwargs): """ Use this method to send general files. :param chat_id: Unique identifier for the message recipient — User or GroupChat id :param document: File to send. You can either pass a file_id as String to resend a file that is already on the Telegram servers, or upload a new file using multipart/form-data. :param reply_to_message_id: If the message is a reply, ID of the original message :param reply_markup: Additional interface options. A JSON-serialized object for a custom reply keyboard, instructions to hide keyboard or to force a reply from the user. :param \*\*kwargs: Args that get passed down to :class:`TelegramBotRPCRequest` :type chat_id: int :type document: InputFile or str :type reply_to_message_id: int :type reply_markup: ReplyKeyboardMarkup or ReplyKeyboardHide or ForceReply :returns: On success, the sent Message is returned. :rtype: TelegramBotRPCRequest """ files = None if isinstance(document, InputFile): files = [document] document = None elif not isinstance(document, str): raise Exception('document must be instance of InputFile or str') # required args params = dict( chat_id=chat_id, document=document ) # optional args params.update( _clean_params( reply_to_message_id=reply_to_message_id, reply_markup=reply_markup ) ) return TelegramBotRPCRequest('sendDocument', params=params, files=files, on_result=Message.from_result, **kwargs)
python
def visit_assignname(self, node, parent, node_name=None): """visit a node and return a AssignName node""" newnode = nodes.AssignName( node_name, getattr(node, "lineno", None), getattr(node, "col_offset", None), parent, ) self._save_assignment(newnode) return newnode
python
def mod(self): """ Cached compiled binary of the Generic_Code class. To clear cache invoke :meth:`clear_mod_cache`. """ if self._mod is None: self._mod = self.compile_and_import_binary() return self._mod
java
private void writeObject(ObjectOutputStream out) throws IOException { super.write(out); out.writeUTF(this.mapreduceInputFormat.getClass().getName()); out.writeUTF(this.keyClass.getName()); out.writeUTF(this.valueClass.getName()); this.configuration.write(out); }
python
def _parseSCPDActions(self, actionListElement, actions, variableParameterDict): """Internal method to parse the SCPD definitions. :param actionListElement: the action xml element :type actionListElement: xml.etree.ElementTree.Element :param dict actions: a container to store all actions :param dict variableParameterDict: remember where a variable gets referenced """ # go through all action elements in this list for actionElement in actionListElement.getchildren(): action = {} # go through all elements in this action for inActionElement in actionElement.getchildren(): tagName = inActionElement.tag.lower() if tagName.endswith("name"): # action name action["name"] = inActionElement.text elif tagName.endswith("argumentlist"): # parse the arguments of this action for argumentElement in inActionElement.getchildren(): argument = {} # go through the argument definition for inArgumentElement in argumentElement.getchildren(): tagName = inArgumentElement.tag.lower() if tagName.endswith("name"): # remember the argument name argument["name"] = inArgumentElement.text elif tagName.endswith("direction"): # is it an in or out argument argument["direction"] = inArgumentElement.text elif tagName.endswith("relatedstatevariable"): # remember the argument and safe it under the variable name to dereference later argument["variable"] = inArgumentElement.text if argument["variable"] not in variableParameterDict.keys(): variableParameterDict[argument["variable"]] = [] variableParameterDict[argument["variable"]].append(argument) if "name" not in argument.keys(): raise ValueError("Parameter definition does not contain a name.") if "direction" not in argument.keys(): raise ValueError("Parameter definition does not contain a direction: " + argument["name"]) direction = argument["direction"] + "Parameter" # store the actual argument in the action if direction not in action.keys(): action[direction] = {} action[direction][argument["name"]] = argument # cleanup, we stored the argument we dont need these values in there anymore otherwise they # would be redundant del argument["name"] del argument["direction"] if "name" not in action.keys(): raise ValueError("Action has not a name assigned.") if action["name"] in actions.keys(): raise ValueError("Action name defined more than ones: " + action["name"]) # save the action under its name actions[action["name"]] = action # cleanup, as we have saved the action under its name in the container it would be redundant del action["name"]
python
def cli(env, identifier, label, note): """Edits an SSH key.""" mgr = SoftLayer.SshKeyManager(env.client) key_id = helpers.resolve_id(mgr.resolve_ids, identifier, 'SshKey') if not mgr.edit_key(key_id, label=label, notes=note): raise exceptions.CLIAbort('Failed to edit SSH key')
python
def get_components_old(A, no_depend=False): ''' Returns the components of an undirected graph specified by the binary and undirected adjacency matrix adj. Components and their constitutent nodes are assigned the same index and stored in the vector, comps. The vector, comp_sizes, contains the number of nodes beloning to each component. Parameters ---------- adj : NxN np.ndarray binary undirected adjacency matrix no_depend : bool If true, doesn't import networkx to do the calculation. Default value is false. Returns ------- comps : Nx1 np.ndarray vector of component assignments for each node comp_sizes : Mx1 np.ndarray vector of component sizes Notes ----- Note: disconnected nodes will appear as components with a component size of 1 Note: The identity of each component (i.e. its numerical value in the result) is not guaranteed to be identical the value returned in BCT, although the component topology is. Note: networkx is used to do the computation efficiently. If networkx is not available a breadth-first search that does not depend on networkx is used instead, but this is less efficient. The corresponding BCT function does the computation by computing the Dulmage-Mendelsohn decomposition. I don't know what a Dulmage-Mendelsohn decomposition is and there doesn't appear to be a python equivalent. If you think of a way to implement this better, let me know. ''' # nonsquare matrices cannot be symmetric; no need to check if not np.all(A == A.T): # ensure matrix is undirected raise BCTParamError('get_components can only be computed for undirected' ' matrices. If your matrix is noisy, correct it with np.around') A = binarize(A, copy=True) n = len(A) np.fill_diagonal(A, 1) try: if no_depend: raise ImportError() else: import networkx as nx net = nx.from_numpy_matrix(A) cpts = list(nx.connected_components(net)) cptvec = np.zeros((n,)) cptsizes = np.zeros(len(cpts)) for i, cpt in enumerate(cpts): cptsizes[i] = len(cpt) for node in cpt: cptvec[node] = i + 1 except ImportError: # if networkx is not available use less efficient breadth first search cptvec = np.zeros((n,)) r, _ = breadthdist(A) for node, reach in enumerate(r): if cptvec[node] > 0: continue else: cptvec[np.where(reach)] = np.max(cptvec) + 1 cptsizes = np.zeros(np.max(cptvec)) for i in np.arange(np.max(cptvec)): cptsizes[i] = np.size(np.where(cptvec == i + 1)) return cptvec, cptsizes
python
def NotifyAboutEnd(self): """Send out a final notification about the end of this flow.""" flow_ref = None if self.runner_args.client_id: flow_ref = rdf_objects.FlowReference( client_id=self.client_id, flow_id=self.urn.Basename()) num_results = len(self.ResultCollection()) notification_lib.Notify( self.creator, rdf_objects.UserNotification.Type.TYPE_FLOW_RUN_COMPLETED, "Flow %s completed with %d %s" % (self.__class__.__name__, num_results, num_results == 1 and "result" or "results"), rdf_objects.ObjectReference( reference_type=rdf_objects.ObjectReference.Type.FLOW, flow=flow_ref))
python
def is_jail(name): ''' Return True if jail exists False if not CLI Example: .. code-block:: bash salt '*' poudriere.is_jail <jail name> ''' jails = list_jails() for jail in jails: if jail.split()[0] == name: return True return False
java
@Deprecated double adjustNumberAsInFormatting(double number) { if (Double.isNaN(number)) { return number; } number = round(multiply(number)); if (Double.isInfinite(number)) { return number; } return toDigitList(number).getDouble(); }
python
def import_app_credentials(filename=CREDENTIALS_FILENAME): """Import app credentials from configuration file. Parameters filename (str) Name of configuration file. Returns credentials (dict) All your app credentials and information imported from the configuration file. """ with open(filename, 'r') as config_file: config = safe_load(config_file) client_id = config['client_id'] client_secret = config['client_secret'] redirect_url = config['redirect_url'] config_values = [client_id, client_secret, redirect_url] for value in config_values: if value in DEFAULT_CONFIG_VALUES: exit('Missing credentials in {}'.format(filename)) credentials = { 'client_id': client_id, 'client_secret': client_secret, 'redirect_url': redirect_url, 'scopes': set(config['scopes']), } return credentials
java
protected void init(int code, String phrase, boolean isError) { this.myPhrase = phrase; this.myPhraseBytes = HttpChannelUtils.getEnglishBytes(phrase); this.myIntCode = code; if (isError) { this.myError = new HttpError(code, this.myPhrase); } initSpecialArrays(); checkForAllowedBody(); }
java
public DockerRuleBuilder waitForMessage(String waitForMessage, int waitSeconds) { this.waitConditions.add(WaitFor.logMessage(waitForMessage)); this.waitForSeconds = waitSeconds; return this; }
java
@Override @Nonnull @OverridingMethodsMustInvokeSuper protected IMicroNode internalConvertToMicroNode (@Nonnull final IHCConversionSettingsToNode aConversionSettings) { // Create the element final IMicroElement ret = createMicroElement (aConversionSettings); if (ret == null) throw new IllegalStateException ("Created a null element!"); // Set all HTML attributes etc. fillMicroElement (ret, aConversionSettings); // Optional callback after everything was done (implementation dependent) finishMicroElement (ret, aConversionSettings); return ret; }
java
public ValueContainer[] getKeyValues(ClassDescriptor cld, Identity oid) throws PersistenceBrokerException { return getKeyValues(cld, oid, true); }
python
def running(name, image=None, skip_translate=None, ignore_collisions=False, validate_ip_addrs=True, force=False, watch_action='force', start=True, shutdown_timeout=None, client_timeout=salt.utils.docker.CLIENT_TIMEOUT, networks=None, **kwargs): ''' Ensure that a container with a specific configuration is present and running name Name of the container image Image to use for the container .. note:: This state will pull the image if it is not present. However, if the image needs to be built from a Dockerfile or loaded from a saved image, or if you would like to use requisites to trigger a replacement of the container when the image is updated, then the :py:func:`docker_image.present <salt.states.dockermod.image_present>` state should be used to manage the image. .. versionchanged:: 2018.3.0 If no tag is specified in the image name, and nothing matching the specified image is pulled on the minion, the ``docker pull`` that retrieves the image will pull *all tags* for the image. A tag of ``latest`` is no longer implicit for the pull. For this reason, it is recommended to specify the image in ``repo:tag`` notation. .. _docker-container-running-skip-translate: skip_translate This function translates Salt CLI or SLS input into the format which docker-py_ expects. However, in the event that Salt's translation logic fails (due to potential changes in the Docker Remote API, or to bugs in the translation code), this argument can be used to exert granular control over which arguments are translated and which are not. Pass this argument as a comma-separated list (or Python list) of arguments, and translation for each passed argument name will be skipped. Alternatively, pass ``True`` and *all* translation will be skipped. Skipping tranlsation allows for arguments to be formatted directly in the format which docker-py_ expects. This allows for API changes and other issues to be more easily worked around. An example of using this option to skip translation would be: For example, imagine that there is an issue with processing the ``port_bindings`` argument, and the following configuration no longer works as expected: .. code-block:: yaml mycontainer: docker_container.running: - image: 7.3.1611 - port_bindings: - 10.2.9.10:8080:80 By using ``skip_translate``, you can forego the input translation and configure the port binding in the format docker-py_ needs: .. code-block:: yaml mycontainer: docker_container.running: - image: 7.3.1611 - skip_translate: port_bindings - port_bindings: {8080: [('10.2.9.10', 80)], '4193/udp': 9314} See the following links for more information: - `docker-py Low-level API`_ - `Docker Engine API`_ .. _docker-py: https://pypi.python.org/pypi/docker-py .. _`docker-py Low-level API`: http://docker-py.readthedocs.io/en/stable/api.html#docker.api.container.ContainerApiMixin.create_container .. _`Docker Engine API`: https://docs.docker.com/engine/api/v1.33/#operation/ContainerCreate ignore_collisions : False Since many of docker-py_'s arguments differ in name from their CLI counterparts (with which most Docker users are more familiar), Salt detects usage of these and aliases them to the docker-py_ version of that argument so that both CLI and API versions of a given argument are supported. However, if both the alias and the docker-py_ version of the same argument (e.g. ``env`` and ``environment``) are used, an error will be raised. Set this argument to ``True`` to suppress these errors and keep the docker-py_ version of the argument. validate_ip_addrs : True For parameters which accept IP addresses as input, IP address validation will be performed. To disable, set this to ``False`` force : False Set this parameter to ``True`` to force Salt to re-create the container irrespective of whether or not it is configured as desired. watch_action : force Control what type of action is taken when this state :ref:`watches <requisites-watch>` another state that has changes. The default action is ``force``, which runs the state with ``force`` set to ``True``, triggering a rebuild of the container. If any other value is passed, it will be assumed to be a kill signal. If the container matches the specified configuration, and is running, then the action will be to send that signal to the container. Kill signals can be either strings or numbers, and are defined in the **Standard Signals** section of the ``signal(7)`` manpage. Run ``man 7 signal`` on a Linux host to browse this manpage. For example: .. code-block:: yaml mycontainer: docker_container.running: - image: busybox - watch_action: SIGHUP - watch: - file: some_file .. note:: If the container differs from the specified configuration, or is not running, then instead of sending a signal to the container, the container will be re-created/started and no signal will be sent. start : True Set to ``False`` to suppress starting of the container if it exists, matches the desired configuration, but is not running. This is useful for data-only containers, or for non-daemonized container processes, such as the Django ``migrate`` and ``collectstatic`` commands. In instances such as this, the container only needs to be started the first time. shutdown_timeout If the container needs to be replaced, the container will be stopped using :py:func:`docker.stop <salt.modules.dockermod.stop>`. If a ``shutdown_timout`` is not set, and the container was created using ``stop_timeout``, that timeout will be used. If neither of these values were set, then a timeout of 10 seconds will be used. .. versionchanged:: 2017.7.0 This option was renamed from ``stop_timeout`` to ``shutdown_timeout`` to accommodate the ``stop_timeout`` container configuration setting. client_timeout : 60 Timeout in seconds for the Docker client. This is not a timeout for this function, but for receiving a response from the API. .. note:: This is only used if Salt needs to pull the requested image. .. _salt-states-docker-container-network-management: **NETWORK MANAGEMENT** .. versionadded:: 2018.3.0 .. versionchanged:: 2019.2.0 If the ``networks`` option is used, any networks (including the default ``bridge`` network) which are not specified will be disconnected. The ``networks`` argument can be used to ensure that a container is attached to one or more networks. Optionally, arguments can be passed to the networks. In the example below, ``net1`` is being configured with arguments, while ``net2`` and ``bridge`` are being configured *without* arguments: .. code-block:: yaml foo: docker_container.running: - image: myuser/myimage:foo - networks: - net1: - aliases: - bar - baz - ipv4_address: 10.0.20.50 - net2 - bridge - require: - docker_network: net1 - docker_network: net2 The supported arguments are the ones from the docker-py's `connect_container_to_network`_ function (other than ``container`` and ``net_id``). .. important:: Unlike with the arguments described in the **CONTAINER CONFIGURATION PARAMETERS** section below, these network configuration parameters are not translated at all. Consult the `connect_container_to_network`_ documentation for the correct type/format of data to pass. .. _`connect_container_to_network`: https://docker-py.readthedocs.io/en/stable/api.html#docker.api.network.NetworkApiMixin.connect_container_to_network To start a container with no network connectivity (only possible in 2019.2.0 and later) pass this option as an empty list. For example: .. code-block:: yaml foo: docker_container.running: - image: myuser/myimage:foo - networks: [] **CONTAINER CONFIGURATION PARAMETERS** auto_remove (or *rm*) : False Enable auto-removal of the container on daemon side when the container’s process exits (analogous to running a docker container with ``--rm`` on the CLI). .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - auto_remove: True binds Files/directories to bind mount. Each bind mount should be passed in one of the following formats: - ``<host_path>:<container_path>`` - ``host_path`` is mounted within the container as ``container_path`` with read-write access. - ``<host_path>:<container_path>:<selinux_context>`` - ``host_path`` is mounted within the container as ``container_path`` with read-write access. Additionally, the specified selinux context will be set within the container. - ``<host_path>:<container_path>:<read_only>`` - ``host_path`` is mounted within the container as ``container_path``, with the read-only or read-write setting explicitly defined. - ``<host_path>:<container_path>:<read_only>,<selinux_context>`` - ``host_path`` is mounted within the container as ``container_path``, with the read-only or read-write setting explicitly defined. Additionally, the specified selinux context will be set within the container. ``<read_only>`` can be either ``rw`` for read-write access, or ``ro`` for read-only access. When omitted, it is assumed to be read-write. ``<selinux_context>`` can be ``z`` if the volume is shared between multiple containers, or ``Z`` if the volume should be private. .. note:: When both ``<read_only>`` and ``<selinux_context>`` are specified, there must be a comma before ``<selinux_context>``. Binds can be expressed as a comma-separated list or a YAML list. The below two examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - binds: /srv/www:/var/www:ro,/etc/foo.conf:/usr/local/etc/foo.conf:rw .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - binds: - /srv/www:/var/www:ro - /home/myuser/conf/foo.conf:/etc/foo.conf:rw However, in cases where both ro/rw and an selinux context are combined, the only option is to use a YAML list, like so: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - binds: - /srv/www:/var/www:ro,Z - /home/myuser/conf/foo.conf:/etc/foo.conf:rw,Z Since the second bind in the previous example is mounted read-write, the ``rw`` and comma can be dropped. For example: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - binds: - /srv/www:/var/www:ro,Z - /home/myuser/conf/foo.conf:/etc/foo.conf:Z blkio_weight Block IO weight (relative weight), accepts a weight value between 10 and 1000. .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - blkio_weight: 100 blkio_weight_device Block IO weight (relative device weight), specified as a list of expressions in the format ``PATH:RATE`` .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - blkio_weight_device: /dev/sda:100 cap_add List of capabilities to add within the container. Can be expressed as a comma-separated list or a Python list. The below two examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - cap_add: SYS_ADMIN,MKNOD .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - cap_add: - SYS_ADMIN - MKNOD .. note:: This option requires Docker 1.2.0 or newer. cap_drop List of capabilities to drop within the container. Can be expressed as a comma-separated list or a Python list. The below two examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - cap_drop: SYS_ADMIN,MKNOD .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - cap_drop: - SYS_ADMIN - MKNOD .. note:: This option requires Docker 1.2.0 or newer. command (or *cmd*) Command to run in the container .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - command: bash cpuset_cpus (or *cpuset*) CPUs on which which to allow execution, specified as a string containing a range (e.g. ``0-3``) or a comma-separated list of CPUs (e.g. ``0,1``). .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - cpuset_cpus: "0,1" cpuset_mems Memory nodes on which which to allow execution, specified as a string containing a range (e.g. ``0-3``) or a comma-separated list of MEMs (e.g. ``0,1``). Only effective on NUMA systems. .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - cpuset_mems: "0,1" cpu_group The length of a CPU period in microseconds .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - cpu_group: 100000 cpu_period Microseconds of CPU time that the container can get in a CPU period .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - cpu_period: 50000 cpu_shares CPU shares (relative weight), specified as an integer between 2 and 1024. .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - cpu_shares: 512 detach : False If ``True``, run the container's command in the background (daemon mode) .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - detach: True devices List of host devices to expose within the container. Can be expressed as a comma-separated list or a YAML list. The below two examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - devices: /dev/net/tun,/dev/xvda1:/dev/xvda1,/dev/xvdb1:/dev/xvdb1:r .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - devices: - /dev/net/tun - /dev/xvda1:/dev/xvda1 - /dev/xvdb1:/dev/xvdb1:r device_read_bps Limit read rate (bytes per second) from a device, specified as a list of expressions in the format ``PATH:RATE``, where ``RATE`` is either an integer number of bytes, or a string ending in ``kb``, ``mb``, or ``gb``. Can be expressed as a comma-separated list or a YAML list. The below two examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - devices_read_bps: /dev/sda:1mb,/dev/sdb:5mb .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - devices_read_bps: - /dev/sda:1mb - /dev/sdb:5mb device_read_iops Limit read rate (I/O per second) from a device, specified as a list of expressions in the format ``PATH:RATE``, where ``RATE`` is a number of I/O operations. Can be expressed as a comma-separated list or a YAML list. The below two examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - devices_read_iops: /dev/sda:1000,/dev/sdb:500 .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - devices_read_iops: - /dev/sda:1000 - /dev/sdb:500 device_write_bps Limit write rate (bytes per second) from a device, specified as a list of expressions in the format ``PATH:RATE``, where ``RATE`` is either an integer number of bytes, or a string ending in ``kb``, ``mb``, or ``gb``. Can be expressed as a comma-separated list or a YAML list. The below two examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - devices_write_bps: /dev/sda:1mb,/dev/sdb:5mb .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - devices_write_bps: - /dev/sda:1mb - /dev/sdb:5mb device_read_iops Limit write rate (I/O per second) from a device, specified as a list of expressions in the format ``PATH:RATE``, where ``RATE`` is a number of I/O operations. Can be expressed as a comma-separated list or a YAML list. The below two examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - devices_read_iops: /dev/sda:1000,/dev/sdb:500 .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - devices_read_iops: - /dev/sda:1000 - /dev/sdb:500 dns List of DNS nameservers. Can be expressed as a comma-separated list or a YAML list. The below two examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - dns: 8.8.8.8,8.8.4.4 .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - dns: - 8.8.8.8 - 8.8.4.4 .. note:: To skip IP address validation, use ``validate_ip_addrs=False`` dns_opt Additional options to be added to the container’s ``resolv.conf`` file. Can be expressed as a comma-separated list or a YAML list. The below two examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - dns_opt: ndots:9 .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - dns_opt: - ndots:9 dns_search List of DNS search domains. Can be expressed as a comma-separated list or a YAML list. The below two examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - dns_search: foo1.domain.tld,foo2.domain.tld .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - dns_search: - foo1.domain.tld - foo2.domain.tld domainname The domain name to use for the container .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - dommainname: domain.tld entrypoint Entrypoint for the container .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - entrypoint: "mycmd --arg1 --arg2" This argument can also be specified as a list: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - entrypoint: - mycmd - --arg1 - --arg2 environment Either a list of variable/value mappings, or a list of strings in the format ``VARNAME=value``. The below three examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - environment: - VAR1: value - VAR2: value .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - environment: 'VAR1=value,VAR2=value' .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - environment: - VAR1=value - VAR2=value extra_hosts Additional hosts to add to the container's /etc/hosts file. Can be expressed as a comma-separated list or a Python list. The below two examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - extra_hosts: web1:10.9.8.7,web2:10.9.8.8 .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - extra_hosts: - web1:10.9.8.7 - web2:10.9.8.8 .. note:: To skip IP address validation, use ``validate_ip_addrs=False`` .. note:: This option requires Docker 1.3.0 or newer. group_add List of additional group names and/or IDs that the container process will run as. Can be expressed as a comma-separated list or a YAML list. The below two examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - group_add: web,network .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - group_add: - web - network hostname Hostname of the container. If not provided, the value passed as the container's``name`` will be used for the hostname. .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - hostname: web1 .. warning:: ``hostname`` cannot be set if ``network_mode`` is set to ``host``. The below example will result in an error: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - hostname: web1 - network_mode: host interactive (or *stdin_open*) : False Leave stdin open, even if not attached .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - interactive: True ipc_mode (or *ipc*) Set the IPC mode for the container. The default behavior is to create a private IPC namespace for the container, but this option can be used to change that behavior: - ``container:<container_name_or_id>`` reuses another container shared memory, semaphores and message queues - ``host``: use the host's shared memory, semaphores and message queues .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - ipc_mode: container:foo .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - ipc_mode: host .. warning:: Using ``host`` gives the container full access to local shared memory and is therefore considered insecure. isolation Specifies the type of isolation technology used by containers .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - isolation: hyperv .. note:: The default value on Windows server is ``process``, while the default value on Windows client is ``hyperv``. On Linux, only ``default`` is supported. labels Add metadata to the container. Labels can be set both with and without values, and labels with values can be passed either as ``key=value`` or ``key: value`` pairs. For example, while the below would be very confusing to read, it is technically valid, and demonstrates the different ways in which labels can be passed: .. code-block:: yaml mynet: docker_network.present: - labels: - foo - bar=baz - hello: world The labels can also simply be passed as a YAML dictionary, though this can be error-prone due to some :ref:`idiosyncrasies <yaml-idiosyncrasies>` with how PyYAML loads nested data structures: .. code-block:: yaml foo: docker_network.present: - labels: foo: '' bar: baz hello: world .. versionchanged:: 2018.3.0 Methods for specifying labels can now be mixed. Earlier releases required either labels with or without values. links Link this container to another. Links can be specified as a list of mappings or a comma-separated or Python list of expressions in the format ``<container_name_or_id>:<link_alias>``. The below three examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - links: - web1: link1 - web2: link2 .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - links: web1:link1,web2:link2 .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - links: - web1:link1 - web2:link2 log_driver and log_opt Set container's logging driver and options to configure that driver. Requires Docker 1.6 or newer. .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - log_driver: syslog - log_opt: - syslog-address: tcp://192.168.0.42 - syslog-facility: daemon The ``log_opt`` can also be expressed as a comma-separated or YAML list of ``key=value`` pairs. The below two examples are equivalent to the above one: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - log_driver: syslog - log_opt: "syslog-address=tcp://192.168.0.42,syslog-facility=daemon" .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - log_driver: syslog - log_opt: - syslog-address=tcp://192.168.0.42 - syslog-facility=daemon .. note:: The logging driver feature was improved in Docker 1.13 introducing option name changes. Please see Docker's `Configure logging drivers`_ documentation for more information. .. _`Configure logging drivers`: https://docs.docker.com/engine/admin/logging/overview/ lxc_conf Additional LXC configuration parameters to set before starting the container. Either a list of variable/value mappings, or a list of strings in the format ``VARNAME=value``. The below three examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - lxc_conf: - lxc.utsname: docker - lxc.arch: x86_64 .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - lxc_conf: lxc.utsname=docker,lxc.arch=x86_64 .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - lxc_conf: - lxc.utsname=docker - lxc.arch=x86_64 .. note:: These LXC configuration parameters will only have the desired effect if the container is using the LXC execution driver, which has been deprecated for some time. mac_address MAC address to use for the container. If not specified, a random MAC address will be used. .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - mac_address: 01:23:45:67:89:0a mem_limit (or *memory*) : 0 Memory limit. Can be specified in bytes or using single-letter units (i.e. ``512M``, ``2G``, etc.). A value of ``0`` (the default) means no memory limit. .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - mem_limit: 512M mem_swappiness Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - mem_swappiness: 60 memswap_limit (or *memory_swap*) : -1 Total memory limit (memory plus swap). Set to ``-1`` to disable swap. A value of ``0`` means no swap limit. .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - memswap_limit: 1G network_disabled : False If ``True``, networking will be disabled within the container .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - network_disabled: True network_mode : bridge One of the following: - ``bridge`` - Creates a new network stack for the container on the docker bridge - ``none`` - No networking (equivalent of the Docker CLI argument ``--net=none``). Not to be confused with Python's ``None``. - ``container:<name_or_id>`` - Reuses another container's network stack - ``host`` - Use the host's network stack inside the container - Any name that identifies an existing network that might be created with ``docker.network_present``. .. warning:: Using ``host`` mode gives the container full access to the hosts system's services (such as D-bus), and is therefore considered insecure. .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - network_mode: "none" .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - network_mode: container:web1 oom_kill_disable Whether to disable OOM killer .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - oom_kill_disable: False oom_score_adj An integer value containing the score given to the container in order to tune OOM killer preferences .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - oom_score_adj: 500 pid_mode Set to ``host`` to use the host container's PID namespace within the container. Requires Docker 1.5.0 or newer. .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - pid_mode: host .. note:: This option requires Docker 1.5.0 or newer. pids_limit Set the container's PID limit. Set to ``-1`` for unlimited. .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - pids_limit: 2000 port_bindings (or *publish*) Bind exposed ports. Port bindings should be passed in the same way as the ``--publish`` argument to the ``docker run`` CLI command: - ``ip:hostPort:containerPort`` - Bind a specific IP and port on the host to a specific port within the container. - ``ip::containerPort`` - Bind a specific IP and an ephemeral port to a specific port within the container. - ``hostPort:containerPort`` - Bind a specific port on all of the host's interfaces to a specific port within the container. - ``containerPort`` - Bind an ephemeral port on all of the host's interfaces to a specific port within the container. Multiple bindings can be separated by commas, or expressed as a YAML list, and port ranges can be defined using dashes. The below two examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - port_bindings: "4505-4506:14505-14506,2123:2123/udp,8080" .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - port_bindings: - 4505-4506:14505-14506 - 2123:2123/udp - 8080 .. note:: When specifying a protocol, it must be passed in the ``containerPort`` value, as seen in the examples above. ports A list of ports to expose on the container. Can either be a comma-separated list or a YAML list. If the protocol is omitted, the port will be assumed to be a TCP port. The below two examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - ports: 1111,2222/udp .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - ports: - 1111 - 2222/udp privileged : False If ``True``, runs the exec process with extended privileges .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - privileged: True publish_all_ports (or *publish_all*) : False Publish all ports to the host .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - ports: 8080 - publish_all_ports: True read_only : False If ``True``, mount the container’s root filesystem as read only .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - read_only: True restart_policy (or *restart*) Set a restart policy for the container. Must be passed as a string in the format ``policy[:retry_count]`` where ``policy`` is one of ``always``, ``unless-stopped``, or ``on-failure``, and ``retry_count`` is an optional limit to the number of retries. The retry count is ignored when using the ``always`` or ``unless-stopped`` restart policy. .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - restart_policy: on-failure:5 bar: docker_container.running: - image: bar/baz:latest - restart_policy: always security_opt (or *security_opts*): Security configuration for MLS systems such as SELinux and AppArmor. Can be expressed as a comma-separated list or a YAML list. The below two examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - security_opt: apparmor:unconfined .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - security_opt: - apparmor:unconfined .. important:: Some security options can contain commas. In these cases, this argument *must* be passed as a Python list, as splitting by comma will result in an invalid configuration. .. note:: See the documentation for security_opt at https://docs.docker.com/engine/reference/run/#security-configuration shm_size Size of /dev/shm .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - shm_size: 128M stop_signal Specify the signal docker will send to the container when stopping. Useful when running systemd as PID 1 inside the container. .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - stop_signal: SIGRTMIN+3 .. note:: This option requires Docker 1.9.0 or newer and docker-py 1.7.0 or newer. .. versionadded:: 2016.11.0 stop_timeout Timeout to stop the container, in seconds .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - stop_timeout: 5 .. note:: In releases prior to 2017.7.0, this option was not set in the container configuration, but rather this timeout was enforced only when shutting down an existing container to replace it. To remove the ambiguity, and to allow for the container to have a stop timeout set for it, the old ``stop_timeout`` argument has been renamed to ``shutdown_timeout``, while ``stop_timeout`` now refer's to the container's configured stop timeout. storage_opt Storage driver options for the container. Can be either a list of strings in the format ``option=value``, or a list of mappings between option and value. The below three examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - storage_opt: - dm.basesize: 40G .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - storage_opt: dm.basesize=40G .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - storage_opt: - dm.basesize=40G sysctls (or *sysctl*) Set sysctl options for the container. Can be either a list of strings in the format ``option=value``, or a list of mappings between option and value. The below three examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - sysctls: - fs.nr_open: 1048576 - kernel.pid_max: 32768 .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - sysctls: fs.nr_open=1048576,kernel.pid_max=32768 .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - sysctls: - fs.nr_open=1048576 - kernel.pid_max=32768 tmpfs A map of container directories which should be replaced by tmpfs mounts and their corresponding mount options. .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - tmpfs: - /run: rw,noexec,nosuid,size=65536k tty : False Attach TTYs .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - tty: True ulimits List of ulimits. These limits should be passed in the format ``<ulimit_name>:<soft_limit>:<hard_limit>``, with the hard limit being optional. Can be expressed as a comma-separated list or a YAML list. The below two examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - ulimits: nofile=1024:1024,nproc=60 .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - ulimits: - nofile=1024:1024 - nproc=60 user User under which to run exec process .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - user: foo userns_mode (or *user_ns_mode*) Sets the user namsepace mode, when the user namespace remapping option is enabled .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - userns_mode: host volumes (or *volume*) List of directories to expose as volumes. Can be expressed as a comma-separated list or a YAML list. The below two examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - volumes: /mnt/vol1,/mnt/vol2 .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - volumes: - /mnt/vol1 - /mnt/vol2 volumes_from Container names or IDs from which the container will get volumes. Can be expressed as a comma-separated list or a YAML list. The below two examples are equivalent: .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - volumes_from: foo .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - volumes_from: - foo volume_driver sets the container's volume driver .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - volume_driver: foobar working_dir (or *workdir*) Working directory inside the container .. code-block:: yaml foo: docker_container.running: - image: bar/baz:latest - working_dir: /var/log/nginx ''' ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''} if image is None: ret['result'] = False ret['comment'] = 'The \'image\' argument is required' return ret elif not isinstance(image, six.string_types): image = six.text_type(image) try: # Since we're rewriting the "networks" value below, save the original # value here. configured_networks = networks networks = _parse_networks(networks) if networks: kwargs['networks'] = networks image_id = _resolve_image(ret, image, client_timeout) except CommandExecutionError as exc: ret['result'] = False if exc.info is not None: return _format_comments(ret, exc.info) else: ret['comment'] = exc.__str__() return ret comments = [] # Pop off the send_signal argument passed by the watch requisite send_signal = kwargs.pop('send_signal', False) try: current_image_id = __salt__['docker.inspect_container'](name)['Image'] except CommandExecutionError: current_image_id = None except KeyError: ret['result'] = False comments.append( 'Unable to detect current image for container \'{0}\'. ' 'This might be due to a change in the Docker API.'.format(name) ) return _format_comments(ret, comments) # Shorthand to make the below code more understandable exists = current_image_id is not None pre_state = __salt__['docker.state'](name) if exists else None # If skip_comparison is True, we're definitely going to be using the temp # container as the new container (because we're forcing the change, or # because the image IDs differ). If False, we'll need to perform a # comparison between it and the new container. skip_comparison = force or not exists or current_image_id != image_id if skip_comparison and __opts__['test']: ret['result'] = None if force: ret['changes']['forced_update'] = True elif current_image_id != image_id: ret['changes']['image'] = {'old': current_image_id, 'new': image_id} comments.append( 'Container \'{0}\' would be {1}'.format( name, 'created' if not exists else 'replaced' ) ) return _format_comments(ret, comments) # Create temp container (or just create the named container if the # container does not already exist) try: temp_container = __salt__['docker.create']( image, name=name if not exists else None, skip_translate=skip_translate, ignore_collisions=ignore_collisions, validate_ip_addrs=validate_ip_addrs, client_timeout=client_timeout, **kwargs) temp_container_name = temp_container['Name'] except KeyError as exc: ret['result'] = False comments.append( 'Key \'{0}\' missing from API response, this may be due to a ' 'change in the Docker Remote API. Please report this on the ' 'SaltStack issue tracker if it has not already been reported.' .format(exc) ) return _format_comments(ret, comments) except Exception as exc: ret['result'] = False msg = exc.__str__() if isinstance(exc, CommandExecutionError) \ and isinstance(exc.info, dict) and 'invalid' in exc.info: msg += ( '\n\nIf you feel this information is incorrect, the ' 'skip_translate argument can be used to skip input ' 'translation for the argument(s) identified as invalid. See ' 'the documentation for details.' ) comments.append(msg) return _format_comments(ret, comments) def _replace(orig, new): rm_kwargs = {'stop': True} if shutdown_timeout is not None: rm_kwargs['timeout'] = shutdown_timeout ret['changes'].setdefault('container_id', {})['removed'] = \ __salt__['docker.rm'](name, **rm_kwargs) try: result = __salt__['docker.rename'](new, orig) except CommandExecutionError as exc: result = False comments.append('Failed to rename temp container: {0}'.format(exc)) if result: comments.append('Replaced container \'{0}\''.format(orig)) else: comments.append('Failed to replace container \'{0}\'') return result def _delete_temp_container(): log.debug('Removing temp container \'%s\'', temp_container_name) __salt__['docker.rm'](temp_container_name) # If we're not skipping the comparison, then the assumption is that # temp_container will be discarded, unless the comparison reveals # differences, in which case we'll set cleanup_temp = False to prevent it # from being cleaned. cleanup_temp = not skip_comparison try: pre_net_connect = __salt__['docker.inspect_container']( name if exists else temp_container_name) for net_name, net_conf in six.iteritems(networks): try: __salt__['docker.connect_container_to_network']( temp_container_name, net_name, **net_conf) except CommandExecutionError as exc: # Shouldn't happen, stopped docker containers can be # attached to networks even if the static IP lies outside # of the network's subnet. An exception will be raised once # you try to start the container, however. ret['result'] = False comments.append(exc.__str__()) return _format_comments(ret, comments) post_net_connect = __salt__['docker.inspect_container']( temp_container_name) if configured_networks is not None: # Use set arithmetic to determine the networks which are connected # but not explicitly defined. They will be disconnected below. Note # that we check configured_networks because it represents the # original (unparsed) network configuration. When no networks # argument is used, the parsed networks will be an empty list, so # it's not sufficient to do a boolean check on the "networks" # variable. extra_nets = set( post_net_connect.get('NetworkSettings', {}).get('Networks', {}) ) - set(networks) if extra_nets: for extra_net in extra_nets: __salt__['docker.disconnect_container_from_network']( temp_container_name, extra_net) # We've made changes, so we need to inspect the container again post_net_connect = __salt__['docker.inspect_container']( temp_container_name) net_changes = __salt__['docker.compare_container_networks']( pre_net_connect, post_net_connect) if not skip_comparison: container_changes = __salt__['docker.compare_containers']( name, temp_container_name, ignore='Hostname', ) if container_changes: if _check_diff(container_changes): ret.setdefault('warnings', []).append( 'The detected changes may be due to incorrect ' 'handling of arguments in earlier Salt releases. If ' 'this warning persists after running the state ' 'again{0}, and no changes were made to the SLS file, ' 'then please report this.'.format( ' without test=True' if __opts__['test'] else '' ) ) changes_ptr = ret['changes'].setdefault('container', {}) changes_ptr.update(container_changes) if __opts__['test']: ret['result'] = None comments.append( 'Container \'{0}\' would be {1}'.format( name, 'created' if not exists else 'replaced' ) ) else: # We don't want to clean the temp container, we'll be # replacing the existing one with it. cleanup_temp = False # Replace the container if not _replace(name, temp_container_name): ret['result'] = False return _format_comments(ret, comments) ret['changes'].setdefault('container_id', {})['added'] = \ temp_container['Id'] else: # No changes between existing container and temp container. # First check if a requisite is asking to send a signal to the # existing container. if send_signal: if __opts__['test']: comments.append( 'Signal {0} would be sent to container'.format( watch_action ) ) else: try: __salt__['docker.signal'](name, signal=watch_action) except CommandExecutionError as exc: ret['result'] = False comments.append( 'Failed to signal container: {0}'.format(exc) ) return _format_comments(ret, comments) else: ret['changes']['signal'] = watch_action comments.append( 'Sent signal {0} to container'.format(watch_action) ) elif container_changes: if not comments: log.warning( 'docker_container.running: detected changes without ' 'a specific comment for container \'%s\'', name ) comments.append( 'Container \'{0}\'{1} updated.'.format( name, ' would be' if __opts__['test'] else '' ) ) else: # Container was not replaced, no differences between the # existing container and the temp container were detected, # and no signal was sent to the container. comments.append( 'Container \'{0}\' is already configured as specified' .format(name) ) if net_changes: ret['changes'].setdefault('container', {})['Networks'] = net_changes if __opts__['test']: ret['result'] = None comments.append('Network configuration would be updated') elif cleanup_temp: # We only need to make network changes if the container # isn't being replaced, since we would already have # attached all the networks for purposes of comparison. network_failure = False for net_name in sorted(net_changes): errors = [] disconnected = connected = False try: if name in __salt__['docker.connected'](net_name): __salt__['docker.disconnect_container_from_network']( name, net_name) disconnected = True except CommandExecutionError as exc: errors.append(exc.__str__()) if net_name in networks: try: __salt__['docker.connect_container_to_network']( name, net_name, **networks[net_name]) connected = True except CommandExecutionError as exc: errors.append(exc.__str__()) if disconnected: # We succeeded in disconnecting but failed # to reconnect. This can happen if the # network's subnet has changed and we try # to reconnect with the same IP address # from the old subnet. for item in list(net_changes[net_name]): if net_changes[net_name][item]['old'] is None: # Since they'd both be None, just # delete this key from the changes del net_changes[net_name][item] else: net_changes[net_name][item]['new'] = None if errors: comments.extend(errors) network_failure = True ret['changes'].setdefault( 'container', {}).setdefault( 'Networks', {})[net_name] = net_changes[net_name] if disconnected and connected: comments.append( 'Reconnected to network \'{0}\' with updated ' 'configuration'.format(net_name) ) elif disconnected: comments.append( 'Disconnected from network \'{0}\''.format( net_name ) ) elif connected: comments.append( 'Connected to network \'{0}\''.format(net_name) ) if network_failure: ret['result'] = False return _format_comments(ret, comments) finally: if cleanup_temp: _delete_temp_container() if skip_comparison: if not exists: comments.append('Created container \'{0}\''.format(name)) else: if not _replace(name, temp_container): ret['result'] = False return _format_comments(ret, comments) ret['changes'].setdefault('container_id', {})['added'] = \ temp_container['Id'] # "exists" means that a container by the specified name existed prior to # this state being run # "not cleanup_temp" means that the temp container became permanent, either # because the named container did not exist or changes were detected # "cleanup_temp" means that the container already existed and no changes # were detected, so the the temp container was discarded if not cleanup_temp and (not exists or (exists and start)) \ or (start and cleanup_temp and pre_state != 'running'): if __opts__['test']: ret['result'] = None comments.append('Container would be started') return _format_comments(ret, comments) else: try: post_state = __salt__['docker.start'](name)['state']['new'] except Exception as exc: ret['result'] = False comments.append( 'Failed to start container \'{0}\': \'{1}\''.format(name, exc) ) return _format_comments(ret, comments) else: post_state = __salt__['docker.state'](name) if not __opts__['test'] and post_state == 'running': # Now that we're certain the container is running, check each modified # network to see if the network went from static (or disconnected) to # automatic IP configuration. If so, grab the automatically-assigned # IPs and munge the changes dict to include them. Note that this can # only be done after the container is started bceause automatic IPs are # assigned at runtime. contextkey = '.'.join((name, 'docker_container.running')) def _get_nets(): if contextkey not in __context__: new_container_info = \ __salt__['docker.inspect_container'](name) __context__[contextkey] = new_container_info.get( 'NetworkSettings', {}).get('Networks', {}) return __context__[contextkey] autoip_keys = __opts__['docker.compare_container_networks'].get('automatic', []) for net_name, net_changes in six.iteritems( ret['changes'].get('container', {}).get('Networks', {})): if 'IPConfiguration' in net_changes \ and net_changes['IPConfiguration']['new'] == 'automatic': for key in autoip_keys: val = _get_nets().get(net_name, {}).get(key) if val: net_changes[key] = {'old': None, 'new': val} try: net_changes.pop('IPConfiguration') except KeyError: pass __context__.pop(contextkey, None) if pre_state != post_state: ret['changes']['state'] = {'old': pre_state, 'new': post_state} if pre_state is not None: comments.append( 'State changed from \'{0}\' to \'{1}\''.format( pre_state, post_state ) ) if exists and current_image_id != image_id: comments.append('Container has a new image') ret['changes']['image'] = {'old': current_image_id, 'new': image_id} if post_state != 'running' and start: ret['result'] = False comments.append('Container is not running') return _format_comments(ret, comments)
python
def can_pp_seq_no_be_in_view(self, view_no, pp_seq_no): """ Checks if the `pp_seq_no` could have been in view `view_no`. It will return False when the `pp_seq_no` belongs to a later view than `view_no` else will return True :return: """ if view_no > self.viewNo: raise PlenumValueError( 'view_no', view_no, "<= current view_no {}".format(self.viewNo), prefix=self ) return view_no == self.viewNo or ( view_no < self.viewNo and self.last_prepared_before_view_change and compare_3PC_keys( (view_no, pp_seq_no), self.last_prepared_before_view_change) >= 0)
java
public static CompositeConfiguration getConfig() { if (config == null) { config = new CompositeConfiguration(); String configFile = "bard.properties"; if (Util.class.getClassLoader().getResource(configFile) == null) { return config; } try { config.addConfiguration(new PropertiesConfiguration("bard.properties")); } catch (ConfigurationException e) { logger.error("Load Bard configuration \"bard.properties\" error: {}", e); } } return config; }
python
def add_event(self, event_collection, event_body, timestamp=None): """ Adds an event. Depending on the persistence strategy of the client, this will either result in the event being uploaded to Keen immediately or will result in saving the event to some local cache. :param event_collection: the name of the collection to insert the event to :param event_body: dict, the body of the event to insert the event to :param timestamp: datetime, optional, the timestamp of the event """ event = Event(self.project_id, event_collection, event_body, timestamp=timestamp) self.persistence_strategy.persist(event)
python
def validate_arrangement_version(self): """Validate if the arrangement_version is supported This is for autorebuilds to fail early otherwise they may failed on workers because of osbs-client validation checks. Method should be called after self.adjust_build_kwargs Shows a warning when version is deprecated :raises ValueError: when version is not supported """ arrangement_version = self.build_kwargs['arrangement_version'] if arrangement_version is None: return if arrangement_version <= 5: # TODO: raise as ValueError in release 1.6.38+ self.log.warning("arrangement_version <= 5 is deprecated and will be removed" " in release 1.6.38")
python
def set_config_value(self, name, value, quiet=False): """a client helper function to set a configuration value, meaning reading in the configuration file (if it exists), saving a new config value, and then writing back Parameters ========== name: the name of the value to set (key in dictionary) value: the value to set at the key quiet: disable verbose output if True (default is False) """ config_data = self._read_config_file() if value is not None: # Update the config file with the value config_data[name] = value # Update the instance with the value self.config_values[name] = value # If defined by client, set and save! self._write_config_file(config_data) if not quiet: self.print_config_value(name, separator=' is now set to: ')
python
def WriteBlobs(self, blob_id_data_map, cursor=None): """Writes given blobs.""" chunks = [] for blob_id, blob in iteritems(blob_id_data_map): chunks.extend(_BlobToChunks(blob_id.AsBytes(), blob)) for values in _PartitionChunks(chunks): _Insert(cursor, "blobs", values)
java
private void readObject(java.io.ObjectInputStream s) throws java.io.IOException, ClassNotFoundException { // Read in any hidden stuff s.defaultReadObject(); // Read in size (number of Mappings) int size = s.readInt(); if (size < 0) throw new java.io.StreamCorruptedException ("Illegal mappings count: " + size); init(capacity(size)); // Read the keys and values, and put the mappings in the table for (int i=0; i<size; i++) { @SuppressWarnings("unchecked") K key = (K) s.readObject(); @SuppressWarnings("unchecked") V value = (V) s.readObject(); putForCreate(key, value); } }
python
def complete_abstract_value( exe_context, # type: ExecutionContext return_type, # type: Union[GraphQLInterfaceType, GraphQLUnionType] field_asts, # type: List[Field] info, # type: ResolveInfo path, # type: List[Union[int, str]] result, # type: Any ): # type: (...) -> Dict[str, Any] """ Complete an value of an abstract type by determining the runtime type of that value, then completing based on that type. """ runtime_type = None # type: Union[str, GraphQLObjectType, None] # Field type must be Object, Interface or Union and expect sub-selections. if isinstance(return_type, (GraphQLInterfaceType, GraphQLUnionType)): if return_type.resolve_type: runtime_type = return_type.resolve_type(result, info) else: runtime_type = get_default_resolve_type_fn(result, info, return_type) if isinstance(runtime_type, string_types): runtime_type = info.schema.get_type(runtime_type) # type: ignore if not isinstance(runtime_type, GraphQLObjectType): raise GraphQLError( ( "Abstract type {} must resolve to an Object type at runtime " + 'for field {}.{} with value "{}", received "{}".' ).format( return_type, info.parent_type, info.field_name, result, runtime_type ), field_asts, ) if not exe_context.schema.is_possible_type(return_type, runtime_type): raise GraphQLError( u'Runtime Object type "{}" is not a possible type for "{}".'.format( runtime_type, return_type ), field_asts, ) return complete_object_value( exe_context, runtime_type, field_asts, info, path, result )
java
public OutgoingFileTransfer createOutgoingFileTransfer(EntityFullJid userID) { // We need to create outgoing file transfers with a full JID since this method will later // use XEP-0095 to negotiate the stream. This is done with IQ stanzas that need to be addressed to a full JID // in order to reach an client entity. if (userID == null) { throw new IllegalArgumentException("userID was null"); } return new OutgoingFileTransfer(connection().getUser(), userID, FileTransferNegotiator.getNextStreamID(), fileTransferNegotiator); }
python
def response_result(self, **kwargs): """ default will fetch MAX_AP pages yield `self.driver.page_source, self.driver.current_url, 1` after mock submit, the first page is crawled. so start@ index of 1, and yield first page first when running over, use else to yield the last page. 程序运行到此, 已经load 了第一页, 故在进行操作 `点击下一页` 之前, 需要 yield range(1, page_togo), 则在 page_togo - 1时跳出循环, 此时程序 已经完成点击了下一页, 故 page_togo 这一页已经 load 完成, 故在 else 跳出时 yield """ page_togo = kwargs.get('page_togo', self.max_page_togo) if page_togo <= 1: return self.robot.driver.page_source, self.robot.driver.current_url, 1 # 从 `1` 开始是由于已经加载了第一页 # 到 `page_togo` 结束, 是因为在 `page_togo -1` 时,已经点击了下一页 # 因此此处不能写为 range(0, page_togo), 或者(1, page_togo + 1) yield_last = kwargs.get('yield_last', False) start_yval = 0 for page_done in range(1, page_togo): # log.debug(self.robot.driver.current_url) if not yield_last: yield self.robot.driver.page_source, self.robot.driver.current_url, page_done # click any popups self.mock_popovers() if self.has_next_page(page_done): start_yval = self.goto_next(start_yval) else: # 如果无下一页, 直接退出 log.debug('page {} is the last result page!'.format(page_done)) break else: if not yield_last: yield self.robot.driver.page_source, self.robot.driver.current_url, page_togo if yield_last: yield self.robot.driver.page_source, self.robot.driver.current_url, page_togo
python
def second_order_moments(adata, adjusted=False): """Computes second order moments for stochastic velocity estimation. Arguments --------- adata: `AnnData` Annotated data matrix. Returns ------- Mss: Second order moments for spliced abundances Mus: Second order moments for spliced with unspliced abundances """ if 'neighbors' not in adata.uns: raise ValueError('You need to run `pp.neighbors` first to compute a neighborhood graph.') connectivities = get_connectivities(adata) s, u = csr_matrix(adata.layers['spliced']), csr_matrix(adata.layers['unspliced']) Mss = csr_matrix.dot(connectivities, s.multiply(s)).astype(np.float32).A Mus = csr_matrix.dot(connectivities, s.multiply(u)).astype(np.float32).A if adjusted: Mss = 2 * Mss - adata.layers['Ms'].reshape(Mss.shape) Mus = 2 * Mus - adata.layers['Mu'].reshape(Mus.shape) return Mss, Mus
java
public final <T> String evalWhereForField( final Map<String, Object> pAddParam, final T pEntity, final String pFieldFor) throws Exception { String[] fieldsNames = new String[] {pFieldFor}; pAddParam.put("fieldsNames", fieldsNames); ColumnsValues columnsValues = evalColumnsValues(pAddParam, pEntity); pAddParam.remove("fieldsNames"); TableSql tableSql = this.tablesMap.get(pEntity.getClass().getSimpleName()); FieldSql fldSql = tableSql.getFieldsMap().get(pFieldFor); StringBuffer sbWhere = new StringBuffer(" where "); String tableNm = pEntity.getClass().getSimpleName().toUpperCase(); if (fldSql.getTypeField().equals(ETypeField.COMPOSITE_FK_PK) || fldSql.getTypeField().equals(ETypeField.COMPOSITE_FK)) { TableSql tableSqlFr = this.tablesMap.get(fldSql.getForeignEntity()); for (int i = 0; i < tableSqlFr.getIdColumnsNames().length; i++) { if (i > 0) { sbWhere.append(" and "); } sbWhere.append(tableNm + "." + tableSqlFr.getIdColumnsNames()[i] .toUpperCase() + "=" + columnsValues .evalObjectValue(tableSqlFr.getIdColumnsNames()[i])); } } else { sbWhere.append(tableNm + "." + pFieldFor.toUpperCase() + "=" + columnsValues.evalObjectValue(pFieldFor)); } return sbWhere.toString(); }
java
private void updateStart(DownloadRequest request, long totalBytes) { /* if the request has failed before, donnot deliver callback */ if (request.downloadState() == DownloadState.FAILURE) { updateState(request, DownloadState.RUNNING); return; } /* set the download state of this request as running */ updateState(request, DownloadState.RUNNING); delivery.postStart(request, totalBytes); }
python
def server_poweroff(host=None, admin_username=None, admin_password=None, module=None): ''' Powers down the managed server. host The chassis host. admin_username The username used to access the chassis. admin_password The password used to access the chassis. module The element to power off on the chassis such as a blade. If not provided, the chassis will be powered off. CLI Example: .. code-block:: bash salt dell dracr.server_poweroff salt dell dracr.server_poweroff module=server-1 ''' return __execute_cmd('serveraction powerdown', host=host, admin_username=admin_username, admin_password=admin_password, module=module)
python
def parse_time(self, input_str, reference_time=''): """Parses input with Duckling for occurences of times. Args: input_str: An input string, e.g. 'Let's meet at 11:45am'. reference_time: Optional reference time for Duckling. Returns: A preprocessed list of results (dicts) from Duckling output. For example: [ { "dim":"time", "end":21, "start":11, "value":{ "value":"2016-10-11T11:45:00.000-07:00", "others":[ "2016-10-11T11:45:00.000-07:00", "2016-10-12T11:45:00.000-07:00", "2016-10-13T11:45:00.000-07:00" ] }, "text":"at 11:45am" } ] """ return self._parse(input_str, dim=Dim.TIME, reference_time=reference_time)
java
boolean compareLists(AnalyzedTokenReadings[] tokens, int startIndex, int endIndex, Pattern[] patterns) { if (startIndex < 0) { return false; } int i = 0; for (int j = startIndex; j <= endIndex; j++) { if (i >= patterns.length || j >= tokens.length || !patterns[i].matcher(tokens[j].getToken()).matches()) { return false; } i++; } return true; }
python
def generate_read_batches( self, table, columns, keyset, index="", partition_size_bytes=None, max_partitions=None, ): """Start a partitioned batch read operation. Uses the ``PartitionRead`` API request to initiate the partitioned read. Returns a list of batch information needed to perform the actual reads. :type table: str :param table: name of the table from which to fetch data :type columns: list of str :param columns: names of columns to be retrieved :type keyset: :class:`~google.cloud.spanner_v1.keyset.KeySet` :param keyset: keys / ranges identifying rows to be retrieved :type index: str :param index: (Optional) name of index to use, rather than the table's primary key :type partition_size_bytes: int :param partition_size_bytes: (Optional) desired size for each partition generated. The service uses this as a hint, the actual partition size may differ. :type max_partitions: int :param max_partitions: (Optional) desired maximum number of partitions generated. The service uses this as a hint, the actual number of partitions may differ. :rtype: iterable of dict :returns: mappings of information used peform actual partitioned reads via :meth:`process_read_batch`. """ partitions = self._get_snapshot().partition_read( table=table, columns=columns, keyset=keyset, index=index, partition_size_bytes=partition_size_bytes, max_partitions=max_partitions, ) read_info = { "table": table, "columns": columns, "keyset": keyset._to_dict(), "index": index, } for partition in partitions: yield {"partition": partition, "read": read_info.copy()}
python
def _set_load_action(self, mem_addr, rec_count, retries, read_complete=False): """Calculate the next record to read. If the last record was successful and one record was being read then look for the next record until we get to the high water mark. If the last read was successful and all records were being read then look for the first record. if the last read was unsuccessful and one record was being read then repeat the last read until max retries If the last read was unsuccessful and all records were being read then repeat the last read until max retries or look for the first record. """ if self._have_all_records(): mem_addr = None rec_count = 0 retries = 0 elif read_complete: retries = 0 if rec_count: mem_addr = self._next_address(mem_addr) else: mem_addr = self._next_address(mem_addr) rec_count = 1 retries = 0 elif rec_count and retries < ALDB_RECORD_RETRIES: retries = retries + 1 elif not rec_count and retries < ALDB_ALL_RECORD_RETRIES: retries = retries + 1 elif not rec_count and retries >= ALDB_ALL_RECORD_RETRIES: mem_addr = self._next_address(mem_addr) rec_count = 1 retries = 0 else: mem_addr = None rec_count = 0 retries = 0 self._load_action = LoadAction(mem_addr, rec_count, retries) if mem_addr is not None: _LOGGER.debug('Load action: addr: %04x rec_count: %d retries: %d', self._load_action.mem_addr, self._load_action.rec_count, self._load_action.retries)
java
@Override public void eUnset(int featureID) { switch (featureID) { case AfplibPackage.GSAP__P: setP(P_EDEFAULT); return; case AfplibPackage.GSAP__Q: setQ(Q_EDEFAULT); return; case AfplibPackage.GSAP__R: setR(R_EDEFAULT); return; case AfplibPackage.GSAP__S: setS(S_EDEFAULT); return; } super.eUnset(featureID); }
java
public Entry getNext() { checkEntryParent(); Entry entry = null; if(!isLast()) { entry = next; } return entry; }
java
public SignalRResourceInner beginCreateOrUpdate(String resourceGroupName, String resourceName, SignalRCreateParameters parameters) { return beginCreateOrUpdateWithServiceResponseAsync(resourceGroupName, resourceName, parameters).toBlocking().single().body(); }
java
@Override public void paintIcon(Component c, Graphics g, int x, int y) { Graphics2D g2 = (Graphics2D) g.create(); g2.setComposite(AlphaComposite.SrcAtop.derive(alpha)); icon.paintIcon(c, g2, x, y); g2.dispose(); }
java
public String getNamespaceURIFromPrefix(String prefix) { String uri = null; if (m_prefixMap != null) uri = m_prefixMap.lookupNamespace(prefix); return uri; }
python
def iterate(infile): '''iterate over ``samtools pileup -c`` formatted file. *infile* can be any iterator over a lines. The function yields named tuples of the type :class:`pysam.Pileup.PileupSubstitution` or :class:`pysam.Pileup.PileupIndel`. .. note:: The parser converts to 0-based coordinates ''' conv_subst = (str, lambda x: int(x) - 1, str, str, int, int, int, int, str, str) conv_indel = (str, lambda x: int(x) - 1, str, str, int, int, int, int, str, str, int, int, int) for line in infile: d = line[:-1].split() if d[2] == "*": try: yield PileupIndel(*[x(y) for x, y in zip(conv_indel, d)]) except TypeError: raise pysam.SamtoolsError("parsing error in line: `%s`" % line) else: try: yield PileupSubstitution(*[x(y) for x, y in zip(conv_subst, d)]) except TypeError: raise pysam.SamtoolsError("parsing error in line: `%s`" % line)
java
public static boolean hasField(Class<?> beanClass, String name) throws SecurityException { return null != getField(beanClass, name); }
java
public Long getProcessId(String procname) throws Exception { Process proc = ProcessCache.getProcess(procname, 0); if (proc == null) throw new DataAccessException(0, "Cannot find process with name " + procname + ", version 0"); return proc.getId(); }
python
def split(self, max_commands): """ Split this action into an equivalent list of actions, each of which have at most max_commands commands. :param max_commands: max number of commands allowed in any action :return: the list of commands created from this one """ a_prior = Action(**self.frame) a_prior.commands = list(self.commands) self.split_actions = [a_prior] while len(a_prior.commands) > max_commands: a_next = Action(**self.frame) a_prior.commands, a_next.commands = a_prior.commands[0:max_commands], a_prior.commands[max_commands:] self.split_actions.append(a_next) a_prior = a_next return self.split_actions
java
@Override public List<CPOptionCategory> getCPOptionCategoriesByUuidAndCompanyId( String uuid, long companyId) { return cpOptionCategoryPersistence.findByUuid_C(uuid, companyId); }
python
def monitor(name, callback): ''' monitors actions on the specified container, callback is a function to be called on ''' global _monitor if not exists(name): raise ContainerNotExists("The container (%s) does not exist!" % name) if _monitor: if _monitor.is_monitored(name): raise Exception("You are already monitoring this container (%s)" % name) else: _monitor = _LXCMonitor() logging.info("Starting LXC Monitor") _monitor.start() def kill_handler(sg, fr): stop_monitor() signal.signal(signal.SIGTERM, kill_handler) signal.signal(signal.SIGINT, kill_handler) _monitor.add_monitor(name, callback)
python
def create(style_dataset, content_dataset, style_feature=None, content_feature=None, max_iterations=None, model='resnet-16', verbose=True, batch_size = 6, **kwargs): """ Create a :class:`StyleTransfer` model. Parameters ---------- style_dataset: SFrame Input style images. The columns named by the ``style_feature`` parameters will be extracted for training the model. content_dataset : SFrame Input content images. The columns named by the ``content_feature`` parameters will be extracted for training the model. style_feature: string Name of the column containing the input images in style SFrame. 'None' (the default) indicates the only image column in the style SFrame should be used as the feature. content_feature: string Name of the column containing the input images in content SFrame. 'None' (the default) indicates the only image column in the content SFrame should be used as the feature. max_iterations : int The number of training iterations. If 'None' (the default), then it will be automatically determined based on the amount of data you provide. model : string optional Style transfer model to use: - "resnet-16" : Fast and small-sized residual network that uses VGG-16 as reference network during training. batch_size : int, optional If you are getting memory errors, try decreasing this value. If you have a powerful computer, increasing this value may improve training throughput. verbose : bool, optional If True, print progress updates and model details. Returns ------- out : StyleTransfer A trained :class:`StyleTransfer` model. See Also -------- StyleTransfer Examples -------- .. sourcecode:: python # Create datasets >>> content_dataset = turicreate.image_analysis.load_images('content_images/') >>> style_dataset = turicreate.image_analysis.load_images('style_images/') # Train a style transfer model >>> model = turicreate.style_transfer.create(content_dataset, style_dataset) # Stylize an image on all styles >>> stylized_images = model.stylize(data) # Visualize the stylized images >>> stylized_images.explore() """ if len(style_dataset) == 0: raise _ToolkitError("style_dataset SFrame cannot be empty") if len(content_dataset) == 0: raise _ToolkitError("content_dataset SFrame cannot be empty") if(batch_size < 1): raise _ToolkitError("'batch_size' must be greater than or equal to 1") from ._sframe_loader import SFrameSTIter as _SFrameSTIter import mxnet as _mx from .._mxnet import _mxnet_utils if style_feature is None: style_feature = _tkutl._find_only_image_column(style_dataset) if content_feature is None: content_feature = _tkutl._find_only_image_column(content_dataset) if verbose: print("Using '{}' in style_dataset as feature column and using " "'{}' in content_dataset as feature column".format(style_feature, content_feature)) _raise_error_if_not_training_sframe(style_dataset, style_feature) _raise_error_if_not_training_sframe(content_dataset, content_feature) params = { 'batch_size': batch_size, 'vgg16_content_loss_layer': 2, # conv3_3 layer 'lr': 0.001, 'content_loss_mult': 1.0, 'style_loss_mult': [1e-4, 1e-4, 1e-4, 1e-4], # conv 1-4 layers 'finetune_all_params': True, 'pretrained_weights': False, 'print_loss_breakdown': False, 'input_shape': (256, 256), 'training_content_loader_type': 'stretch', 'use_augmentation': False, 'sequential_image_processing': False, # Only used if use_augmentaion is True 'aug_resize': 0, 'aug_min_object_covered': 0, 'aug_rand_crop': 0.9, 'aug_rand_pad': 0.9, 'aug_rand_gray': 0.0, 'aug_aspect_ratio': 1.25, 'aug_hue': 0.05, 'aug_brightness': 0.05, 'aug_saturation': 0.05, 'aug_contrast': 0.05, 'aug_horizontal_flip': True, 'aug_area_range': (.05, 1.5), 'aug_pca_noise': 0.0, 'aug_max_attempts': 20, 'aug_inter_method': 2, } if '_advanced_parameters' in kwargs: # Make sure no additional parameters are provided new_keys = set(kwargs['_advanced_parameters'].keys()) set_keys = set(params.keys()) unsupported = new_keys - set_keys if unsupported: raise _ToolkitError('Unknown advanced parameters: {}'.format(unsupported)) params.update(kwargs['_advanced_parameters']) _content_loss_mult = params['content_loss_mult'] _style_loss_mult = params['style_loss_mult'] num_gpus = _mxnet_utils.get_num_gpus_in_use(max_devices=params['batch_size']) batch_size_each = params['batch_size'] // max(num_gpus, 1) batch_size = max(num_gpus, 1) * batch_size_each input_shape = params['input_shape'] iterations = 0 if max_iterations is None: max_iterations = len(style_dataset) * 10000 if verbose: print('Setting max_iterations to be {}'.format(max_iterations)) # data loader if params['use_augmentation']: content_loader_type = '%s-with-augmentation' % params['training_content_loader_type'] else: content_loader_type = params['training_content_loader_type'] content_images_loader = _SFrameSTIter(content_dataset, batch_size, shuffle=True, feature_column=content_feature, input_shape=input_shape, loader_type=content_loader_type, aug_params=params, sequential=params['sequential_image_processing']) ctx = _mxnet_utils.get_mxnet_context(max_devices=params['batch_size']) num_styles = len(style_dataset) # TRANSFORMER MODEL from ._model import Transformer as _Transformer transformer_model_path = _pre_trained_models.STYLE_TRANSFER_BASE_MODELS[model]().get_model_path() transformer = _Transformer(num_styles, batch_size_each) transformer.collect_params().initialize(ctx=ctx) if params['pretrained_weights']: transformer.load_params(transformer_model_path, ctx, allow_missing=True) # For some reason, the transformer fails to hybridize for training, so we # avoid this until resolved # transformer.hybridize() # VGG MODEL from ._model import Vgg16 as _Vgg16 vgg_model_path = _pre_trained_models.STYLE_TRANSFER_BASE_MODELS['Vgg16']().get_model_path() vgg_model = _Vgg16() vgg_model.collect_params().initialize(ctx=ctx) vgg_model.load_params(vgg_model_path, ctx=ctx, ignore_extra=True) vgg_model.hybridize() # TRAINER from mxnet import gluon as _gluon from ._model import gram_matrix as _gram_matrix if params['finetune_all_params']: trainable_params = transformer.collect_params() else: trainable_params = transformer.collect_params('.*gamma|.*beta') trainer = _gluon.Trainer(trainable_params, 'adam', {'learning_rate': params['lr']}) mse_loss = _gluon.loss.L2Loss() start_time = _time.time() smoothed_loss = None last_time = 0 cuda_gpus = _mxnet_utils.get_gpus_in_use(max_devices=params['batch_size']) num_mxnet_gpus = len(cuda_gpus) if verbose: # Estimate memory usage (based on experiments) cuda_mem_req = 260 + batch_size_each * 880 + num_styles * 1.4 _tkutl._print_neural_compute_device(cuda_gpus=cuda_gpus, use_mps=False, cuda_mem_req=cuda_mem_req, has_mps_impl=False) # # Pre-compute gram matrices for style images # if verbose: print('Analyzing visual features of the style images') style_images_loader = _SFrameSTIter(style_dataset, batch_size, shuffle=False, num_epochs=1, feature_column=style_feature, input_shape=input_shape, loader_type='stretch', sequential=params['sequential_image_processing']) num_layers = len(params['style_loss_mult']) gram_chunks = [[] for _ in range(num_layers)] for s_batch in style_images_loader: s_data = _gluon.utils.split_and_load(s_batch.data[0], ctx_list=ctx, batch_axis=0) for s in s_data: vgg16_s = _vgg16_data_prep(s) ret = vgg_model(vgg16_s) grams = [_gram_matrix(x) for x in ret] for i, gram in enumerate(grams): if gram.context != _mx.cpu(0): gram = gram.as_in_context(_mx.cpu(0)) gram_chunks[i].append(gram) del style_images_loader grams = [ # The concatenated styles may be padded, so we slice overflow _mx.nd.concat(*chunks, dim=0)[:num_styles] for chunks in gram_chunks ] # A context->grams look-up table, where all the gram matrices have been # distributed ctx_grams = {} if ctx[0] == _mx.cpu(0): ctx_grams[_mx.cpu(0)] = grams else: for ctx0 in ctx: ctx_grams[ctx0] = [gram.as_in_context(ctx0) for gram in grams] # # Training loop # vgg_content_loss_layer = params['vgg16_content_loss_layer'] rs = _np.random.RandomState(1234) while iterations < max_iterations: content_images_loader.reset() for c_batch in content_images_loader: c_data = _gluon.utils.split_and_load(c_batch.data[0], ctx_list=ctx, batch_axis=0) Ls = [] curr_content_loss = [] curr_style_loss = [] with _mx.autograd.record(): for c in c_data: # Randomize styles to train indices = _mx.nd.array(rs.randint(num_styles, size=batch_size_each), dtype=_np.int64, ctx=c.context) # Generate pastiche p = transformer(c, indices) # mean subtraction vgg16_p = _vgg16_data_prep(p) vgg16_c = _vgg16_data_prep(c) # vgg forward p_vgg_outputs = vgg_model(vgg16_p) c_vgg_outputs = vgg_model(vgg16_c) c_content_layer = c_vgg_outputs[vgg_content_loss_layer] p_content_layer = p_vgg_outputs[vgg_content_loss_layer] # Calculate Loss # Style Loss between style image and stylized image # Ls = sum of L2 norm of gram matrix of vgg16's conv layers style_losses = [] for gram, p_vgg_output, style_loss_mult in zip(ctx_grams[c.context], p_vgg_outputs, _style_loss_mult): gram_s_vgg = gram[indices] gram_p_vgg = _gram_matrix(p_vgg_output) style_losses.append(style_loss_mult * mse_loss(gram_s_vgg, gram_p_vgg)) style_loss = _mx.nd.add_n(*style_losses) # Content Loss between content image and stylized image # Lc = L2 norm at a single layer in vgg16 content_loss = _content_loss_mult * mse_loss(c_content_layer, p_content_layer) curr_content_loss.append(content_loss) curr_style_loss.append(style_loss) # Divide loss by large number to get into a more legible # range total_loss = (content_loss + style_loss) / 10000.0 Ls.append(total_loss) for L in Ls: L.backward() cur_loss = _np.mean([L.asnumpy()[0] for L in Ls]) if smoothed_loss is None: smoothed_loss = cur_loss else: smoothed_loss = 0.9 * smoothed_loss + 0.1 * cur_loss iterations += 1 trainer.step(batch_size) if verbose and iterations == 1: # Print progress table header column_names = ['Iteration', 'Loss', 'Elapsed Time'] num_columns = len(column_names) column_width = max(map(lambda x: len(x), column_names)) + 2 hr = '+' + '+'.join(['-' * column_width] * num_columns) + '+' print(hr) print(('| {:<{width}}' * num_columns + '|').format(*column_names, width=column_width-1)) print(hr) cur_time = _time.time() if verbose and (cur_time > last_time + 10 or iterations == max_iterations): # Print progress table row elapsed_time = cur_time - start_time print("| {cur_iter:<{width}}| {loss:<{width}.3f}| {time:<{width}.1f}|".format( cur_iter = iterations, loss = smoothed_loss, time = elapsed_time , width = column_width-1)) if params['print_loss_breakdown']: print_content_loss = _np.mean([L.asnumpy()[0] for L in curr_content_loss]) print_style_loss = _np.mean([L.asnumpy()[0] for L in curr_style_loss]) print('Total Loss: {:6.3f} | Content Loss: {:6.3f} | Style Loss: {:6.3f}'.format(cur_loss, print_content_loss, print_style_loss)) last_time = cur_time if iterations == max_iterations: print(hr) break training_time = _time.time() - start_time style_sa = style_dataset[style_feature] idx_column = _tc.SArray(range(0, style_sa.shape[0])) style_sframe = _tc.SFrame({"style": idx_column, style_feature: style_sa}) # Save the model state state = { '_model': transformer, '_training_time_as_string': _seconds_as_string(training_time), 'batch_size': batch_size, 'num_styles': num_styles, 'model': model, 'input_image_shape': input_shape, 'styles': style_sframe, 'num_content_images': len(content_dataset), 'training_time': training_time, 'max_iterations': max_iterations, 'training_iterations': iterations, 'training_epochs': content_images_loader.cur_epoch, 'style_feature': style_feature, 'content_feature': content_feature, "_index_column": "style", 'training_loss': smoothed_loss, } return StyleTransfer(state)
python
def get_images(config, name=None, quiet=False, all=True, *args, **kwargs): ''' List docker images :type name: string :param name: A repository name to filter on :type quiet: boolean :param quiet: Only show image ids :type all: boolean :param all: Show all images :rtype: dict :returns: the images ''' err = "Unknown" client = _get_client(config) try: infos = client.images(name=name, quiet=quiet, all=all) for i in range(len(infos)): inf = _set_id(infos[i]) try: inf['Human_Size'] = _sizeof_fmt(int(inf['Size'])) except ValueError: pass try: ts = int(inf['Created']) dts = datetime.datetime.fromtimestamp(ts) inf['Human_IsoCreated'] = dts.isoformat() inf['Human_Created'] = dts.strftime( '%Y-%m-%d %H:%M:%S') except Exception: pass try: inf['Human_VirtualSize'] = ( _sizeof_fmt(int(inf['VirtualSize']))) except ValueError: pass return infos except Exception as e: err = e utils.error("Unable to list Docker images: %s"%err) return None
python
def is_moderated(self, curr_time, pipe): ''' Tests to see if the moderation limit is not exceeded @return: True if the moderation limit is exceeded ''' # get key, otherwise default the moderate key expired and # we dont care value = pipe.get(self.moderate_key) if value is None: value = 0.0 else: value = float(value) # check moderation difference if (curr_time - value) < self.moderation: return True return False
java
public static XElement parseXML(File file) throws XMLStreamException { try (InputStream in = new FileInputStream(file)) { return parseXML(in); } catch (IOException ex) { throw new XMLStreamException(ex); } }
python
def size(self): """Tuple[int, int]: The width and height of the window.""" size = ffi.new('int[]', 2) lib.SDL_GetWindowSize(self._ptr, size + 0, size + 1) return (size[0], size[1])
java
@Override public void delete() throws JMSException { if (connection == null) throw new FFMQException("Temporary queue already deleted","QUEUE_DOES_NOT_EXIST"); connection.deleteTemporaryQueue(name); connection = null; }
java
private <T> String buildClassListTag(final T t) { return (exportClassFullName != null) ? exportClassFullName : t.getClass().getSimpleName() + exportClassEnding; }
python
def get_objects(self, subject, predicate): """ Search for all subjects related to the specified subject and predicate. :param subject: :param object: :rtype: generator of RDF statements """ for statement in self.spo_search(subject=subject, predicate=predicate): yield str(statement[2])
python
def _binary_exp(expression, op): # type: (QuilParser.ExpressionContext, Callable) -> Number """ Apply an operator to two expressions. Start by evaluating both sides of the operator. """ [arg1, arg2] = expression.expression() return op(_expression(arg1), _expression(arg2))
java
@Trivial protected String processString(String name, String expression, boolean immediateOnly) { return processString(name, expression, immediateOnly, false); }
python
def lF_value (ER,EF,dfnum,dfden): """ Returns an F-statistic given the following: ER = error associated with the null hypothesis (the Restricted model) EF = error associated with the alternate hypothesis (the Full model) dfR-dfF = degrees of freedom of the numerator dfF = degrees of freedom associated with the denominator/Full model Usage: lF_value(ER,EF,dfnum,dfden) """ return ((ER-EF)/float(dfnum) / (EF/float(dfden)))
java
public Tree clear(String path) { Tree child = getChild(path, false); if (child == null) { child = putMap(path); } else { child.clear(); } return child; }
python
def safe_unicode(string): '''Safely transform any object into utf8 encoded bytes''' if not isinstance(string, basestring): string = unicode(string) if isinstance(string, unicode): string = string.encode('utf8') return string
java
public FaunusPipeline simplePath() { this.state.assertNotLocked(); this.state.assertNoProperty(); this.compiler.addMap(CyclicPathFilterMap.Map.class, NullWritable.class, FaunusVertex.class, CyclicPathFilterMap.createConfiguration(this.state.getElementType())); makeMapReduceString(CyclicPathFilterMap.class); return this; }
java
protected boolean tryBridgeMethod(MethodNode target, Expression receiver, boolean implicitThis, TupleExpression args, ClassNode thisClass) { ClassNode lookupClassNode; if (target.isProtected()) { lookupClassNode = controller.getClassNode(); while (lookupClassNode != null && !lookupClassNode.isDerivedFrom(target.getDeclaringClass())) { lookupClassNode = lookupClassNode.getOuterClass(); } if (lookupClassNode == null) { return false; } } else { lookupClassNode = target.getDeclaringClass().redirect(); } Map<MethodNode, MethodNode> bridges = lookupClassNode.getNodeMetaData(PRIVATE_BRIDGE_METHODS); MethodNode bridge = bridges==null?null:bridges.get(target); if (bridge != null) { Expression fixedReceiver = receiver; if (implicitThis) { if (!controller.isInClosure()) { fixedReceiver = new PropertyExpression(new ClassExpression(lookupClassNode), "this"); } else if (thisClass != null) { ClassNode current = thisClass.getOuterClass(); fixedReceiver = new VariableExpression("thisObject", current); // adjust for multiple levels of nesting if needed while (current instanceof InnerClassNode && !lookupClassNode.equals(current)) { FieldNode thisField = current.getField("this$0"); current = current.getOuterClass(); if (thisField != null) { fixedReceiver = new PropertyExpression(fixedReceiver, "this$0"); fixedReceiver.setType(current); } } } } ArgumentListExpression newArgs = new ArgumentListExpression(target.isStatic()?new ConstantExpression(null):fixedReceiver); for (Expression expression : args.getExpressions()) { newArgs.addExpression(expression); } return writeDirectMethodCall(bridge, implicitThis, fixedReceiver, newArgs); } return false; }
java
private File saveAsFile(ResponseBody responseBody) { if (responseBody == null) { return null; } try { File file = new File(destDirectory + File.separator + fileName + retrieveUniqueId() + "." + extension); InputStream inputStream = null; OutputStream outputStream = null; try { inputStream = responseBody.byteStream(); outputStream = new FileOutputStream(file); byte[] buffer = new byte[4096]; int numOfBufferedBytes; while ((numOfBufferedBytes = inputStream.read(buffer)) != END_OF_FILE_DENOTER) { outputStream.write(buffer, 0, numOfBufferedBytes); } outputStream.flush(); return file; } catch (IOException exception) { return null; } catch (Exception exception) { return null; } finally { if (inputStream != null) { inputStream.close(); } if (outputStream != null) { outputStream.close(); } } } catch (IOException exception) { return null; } }
java
@Override protected boolean isPresent(final Request request) { if (isAllowNoSelection()) { String id = getId(); return request.getParameter(id + "-h") != null; } else { return super.isPresent(request); } }