language
stringclasses
2 values
func_code_string
stringlengths
63
466k
java
public YamlLoader getLoader(String name) { for (YamlProperties yamlProp : yamlProps) { if (yamlProp.getRoot().containsKey(name)) { return yamlProp.getLoader(); } } return null; }
java
public static Codec<int[], IntegerGene> ofVector(final IntRange... domains) { if (domains.length == 0) { throw new IllegalArgumentException("Domains must not be empty."); } final ISeq<IntegerChromosome> chromosomes = Stream.of(domains) .peek(Objects::requireNonNull) .map(IntegerGene::of) .map(IntegerChromosome::of) .collect(ISeq.toISeq()); return Codec.of( Genotype.of(chromosomes), gt -> { final int[] args = new int[gt.length()]; for (int i = gt.length(); --i >= 0;) { args[i] = gt.getChromosome(i).getGene().intValue(); } return args; } ); }
python
def get_all_components(user, topic_id): """Get all components of a topic.""" args = schemas.args(flask.request.args.to_dict()) query = v1_utils.QueryBuilder(_TABLE, args, _C_COLUMNS) query.add_extra_condition(sql.and_( _TABLE.c.topic_id == topic_id, _TABLE.c.state != 'archived')) nb_rows = query.get_number_of_rows() rows = query.execute(fetchall=True) rows = v1_utils.format_result(rows, _TABLE.name, args['embed'], _EMBED_MANY) # Return only the component which have the export_control flag set to true # if user.is_not_super_admin(): rows = [row for row in rows if row['export_control']] return flask.jsonify({'components': rows, '_meta': {'count': nb_rows}})
python
def _draw_frame(self, framedata): """Reads, processes and draws the frames. If needed for color maps, conversions to gray scale are performed. In case the images are no color images and no custom color maps are defined, the colormap `gray` is applied. This function is called by TimedAnimation. Args: framedata: The frame data. """ original = self.read_frame() if original is None: self.update_info(self.info_string(message='Finished.', frame=framedata)) return if self.original is not None: processed = self.process_frame(original.copy()) if self.cmap_original is not None: original = to_gray(original) elif not is_color_image(original): self.original.set_cmap('gray') self.original.set_data(original) else: processed = self.process_frame(original) if self.cmap_processed is not None: processed = to_gray(processed) elif not is_color_image(processed): self.processed.set_cmap('gray') if self.annotations: self.annotate(framedata) self.processed.set_data(processed) self.update_info(self.info_string(frame=framedata))
python
def _ixor(self, other): """Set self to the symmetric difference between the sets. if isinstance(other, _basebag): This runs in O(other.num_unique_elements()) else: This runs in O(len(other)) """ if isinstance(other, _basebag): for elem, other_count in other.counts(): count = abs(self.count(elem) - other_count) self._set_count(elem, count) else: # Let a = self.count(elem) and b = other.count(elem) # if a >= b then elem is removed from self b times leaving a - b # if a < b then elem is removed from self a times then added (b - a) # times leaving a - a + (b - a) = b - a for elem in other: try: self._increment_count(elem, -1) except ValueError: self._increment_count(elem, 1) return self
python
def update_cached_fields_pre_save(self, update_fields: list): """ Call on pre_save signal for objects (to automatically refresh on save). :param update_fields: list of fields to update """ if self.id and update_fields is None: self.update_cached_fields(commit=False, exceptions=False)
python
def _evaluate(self,R,phi=0.,t=0.): """ NAME: _evaluate PURPOSE: evaluate the potential at R,phi,t INPUT: R - Galactocentric cylindrical radius phi - azimuth t - time OUTPUT: Phi(R,phi,t) HISTORY: 2011-10-19 - Started - Bovy (IAS) """ #Calculate relevant time if not self._tform is None: if t < self._tform: smooth= 0. elif t < self._tsteady: deltat= t-self._tform xi= 2.*deltat/(self._tsteady-self._tform)-1. smooth= (3./16.*xi**5.-5./8*xi**3.+15./16.*xi+.5) else: #fully on smooth= 1. else: smooth= 1. return smooth*self._twophio/2.*R**self._p\ *m.cos(2.*(phi-self._phib))
python
def uricompose(scheme=None, authority=None, path='', query=None, fragment=None, userinfo=None, host=None, port=None, querysep='&', encoding='utf-8'): """Compose a URI reference string from its individual components.""" # RFC 3986 3.1: Scheme names consist of a sequence of characters # beginning with a letter and followed by any combination of # letters, digits, plus ("+"), period ("."), or hyphen ("-"). # Although schemes are case-insensitive, the canonical form is # lowercase and documents that specify schemes must do so with # lowercase letters. An implementation should accept uppercase # letters as equivalent to lowercase in scheme names (e.g., allow # "HTTP" as well as "http") for the sake of robustness but should # only produce lowercase scheme names for consistency. if isinstance(scheme, bytes): scheme = _scheme(scheme) elif scheme is not None: scheme = _scheme(scheme.encode()) # authority must be string type or three-item iterable if authority is None: authority = (None, None, None) elif isinstance(authority, bytes): authority = _AUTHORITY_RE_BYTES.match(authority).groups() elif isinstance(authority, _unicode): authority = _AUTHORITY_RE_UNICODE.match(authority).groups() elif not isinstance(authority, collections.Iterable): raise TypeError('Invalid authority type') elif len(authority) != 3: raise ValueError('Invalid authority length') authority = _authority( userinfo if userinfo is not None else authority[0], host if host is not None else authority[1], port if port is not None else authority[2], encoding ) # RFC 3986 3.3: If a URI contains an authority component, then the # path component must either be empty or begin with a slash ("/") # character. If a URI does not contain an authority component, # then the path cannot begin with two slash characters ("//"). path = uriencode(path, _SAFE_PATH, encoding) if authority is not None and path and not path.startswith(b'/'): raise ValueError('Invalid path with authority component') if authority is None and path.startswith(b'//'): raise ValueError('Invalid path without authority component') # RFC 3986 4.2: A path segment that contains a colon character # (e.g., "this:that") cannot be used as the first segment of a # relative-path reference, as it would be mistaken for a scheme # name. Such a segment must be preceded by a dot-segment (e.g., # "./this:that") to make a relative-path reference. if scheme is None and authority is None and not path.startswith(b'/'): if b':' in path.partition(b'/')[0]: path = b'./' + path # RFC 3986 3.4: The characters slash ("/") and question mark ("?") # may represent data within the query component. Beware that some # older, erroneous implementations may not handle such data # correctly when it is used as the base URI for relative # references (Section 5.1), apparently because they fail to # distinguish query data from path data when looking for # hierarchical separators. However, as query components are often # used to carry identifying information in the form of "key=value" # pairs and one frequently used value is a reference to another # URI, it is sometimes better for usability to avoid percent- # encoding those characters. if isinstance(query, _strtypes): query = uriencode(query, _SAFE_QUERY, encoding) elif isinstance(query, collections.Mapping): query = _querydict(query, querysep, encoding) elif isinstance(query, collections.Iterable): query = _querylist(query, querysep, encoding) elif query is not None: raise TypeError('Invalid query type') # RFC 3986 3.5: The characters slash ("/") and question mark ("?") # are allowed to represent data within the fragment identifier. # Beware that some older, erroneous implementations may not handle # this data correctly when it is used as the base URI for relative # references. if fragment is not None: fragment = uriencode(fragment, _SAFE_FRAGMENT, encoding) result = uriunsplit((scheme, authority, path, query, fragment)) # always return platform `str` type return result if isinstance(result, str) else result.decode()
python
def _check_graph(self, graph): """the atomic numbers must match""" if graph.num_vertices != self.size: raise TypeError("The number of vertices in the graph does not " "match the length of the atomic numbers array.") # In practice these are typically the same arrays using the same piece # of memory. Just checking to be sure. if (self.numbers != graph.numbers).any(): raise TypeError("The atomic numbers in the graph do not match the " "atomic numbers in the molecule.")
python
def create(self, r, r_, R=200): '''Create new spirograph image with given arguments. Returned image is scaled to agent's preferred image size. ''' x, y = give_dots(R, r, r_, spins=20) xy = np.array([x, y]).T xy = np.array(np.around(xy), dtype=np.int64) xy = xy[(xy[:, 0] >= -250) & (xy[:, 1] >= -250) & (xy[:, 0] < 250) & (xy[:, 1] < 250)] xy = xy + 250 img = np.ones([500, 500], dtype=np.uint8) img[:] = 255 img[xy[:, 0], xy[:, 1]] = 0 img = misc.imresize(img, [self.img_size, self.img_size]) fimg = img / 255.0 return fimg
java
static void createGeneratedFilesDirectory(String apiName) { File folder = new File(getDestinationDirectory(apiName)); if (!folder.exists()){ //noinspection ResultOfMethodCallIgnored folder.mkdirs(); } }
java
public Block[] getBlocksBeingWrittenReport(int namespaceId) throws IOException { LightWeightHashSet<Block> blockSet = new LightWeightHashSet<Block>(); volumes.getBlocksBeingWrittenInfo(namespaceId, blockSet); Block blockTable[] = new Block[blockSet.size()]; int i = 0; for (Iterator<Block> it = blockSet.iterator(); it.hasNext(); i++) { blockTable[i] = it.next(); } return blockTable; }
java
public static void deleteRole(DbConn cnx, int id, boolean force) { if (force) { cnx.runUpdate("user_remove_role", id); } else { int userUsingRole = cnx.runSelectSingle("user_select_count_using_role", Integer.class, id); if (userUsingRole > 0) { cnx.setRollbackOnly(); throw new JqmAdminApiUserException( "cannot delete a role currently attributed to a user. Remove role attribution of use force parameter."); } } QueryResult qr = cnx.runUpdate("role_delete_by_id", id); if (qr.nbUpdated != 1) { cnx.setRollbackOnly(); throw new JqmAdminApiUserException("no item with ID " + id); } }
python
def store(self, bank, key, data): ''' Store data using the specified module :param bank: The name of the location inside the cache which will hold the key and its associated data. :param key: The name of the key (or file inside a directory) which will hold the data. File extensions should not be provided, as they will be added by the driver itself. :param data: The data which will be stored in the cache. This data should be in a format which can be serialized by msgpack/json/yaml/etc. :raises SaltCacheError: Raises an exception if cache driver detected an error accessing data in the cache backend (auth, permissions, etc). ''' fun = '{0}.store'.format(self.driver) return self.modules[fun](bank, key, data, **self._kwargs)
java
@Override public DescribeBackupVaultResult describeBackupVault(DescribeBackupVaultRequest request) { request = beforeClientExecution(request); return executeDescribeBackupVault(request); }
python
def subkeys(self, path): """ A generalized form that can return multiple subkeys. """ for _ in subpaths_for_path_range(path, hardening_chars="'pH"): yield self.subkey(_)
python
def cook_layout(layout, ajax): """Return main_template compatible layout""" # Fix XHTML layouts with CR[+LF] line endings layout = re.sub('\r', '\n', re.sub('\r\n', '\n', layout)) # Parse layout if isinstance(layout, six.text_type): result = getHTMLSerializer([layout.encode('utf-8')], encoding='utf-8') else: result = getHTMLSerializer([layout], encoding='utf-8') # Fix XHTML layouts with inline js (etree.tostring breaks all <![CDATA[) if '<![CDATA[' in layout: result.serializer = html.tostring # Wrap all panels with a metal:fill-slot -tag: all_slots = [] for layoutPanelNode in slotsXPath(result.tree): data_slots = layoutPanelNode.attrib['data-slots'] all_slots += wrap_append_prepend_slots(layoutPanelNode, data_slots) del layoutPanelNode.attrib['data-slots'] # When no slots are explicitly defined, try to inject the very default # slots if len(all_slots) == 0: for node in result.tree.xpath('//*[@data-panel="content"]'): wrap_append_prepend_slots( node, 'content > body header main * content-core') # Append implicit slots head = result.tree.getroot().find('head') if not ajax and head is not None: for name in ['top_slot', 'head_slot', 'style_slot', 'javascript_head_slot']: slot = etree.Element('{{{0:s}}}{1:s}'.format(NSMAP['metal'], name), nsmap=NSMAP) slot.attrib['define-slot'] = name head.append(slot) template = TEMPLATE metal = 'xmlns:metal="http://namespaces.zope.org/metal"' return (template % ''.join(result)).replace(metal, '')
python
def has_attribute_type(self, attribute: str, typ: Type) -> bool: """Whether the given attribute exists and has a compatible type. Returns true iff the attribute exists and is an instance of \ the given type. Matching between types passed as typ and \ yaml node types is as follows: +---------+-------------------------------------------+ | typ | yaml | +=========+===========================================+ | str | ScalarNode containing string | +---------+-------------------------------------------+ | int | ScalarNode containing int | +---------+-------------------------------------------+ | float | ScalarNode containing float | +---------+-------------------------------------------+ | bool | ScalarNode containing bool | +---------+-------------------------------------------+ | None | ScalarNode containing null | +---------+-------------------------------------------+ | list | SequenceNode | +---------+-------------------------------------------+ | dict | MappingNode | +---------+-------------------------------------------+ Args: attribute: The name of the attribute to check. typ: The type to check against. Returns: True iff the attribute exists and matches the type. """ if not self.has_attribute(attribute): return False attr_node = self.get_attribute(attribute).yaml_node if typ in scalar_type_to_tag: tag = scalar_type_to_tag[typ] return attr_node.tag == tag elif typ == list: return isinstance(attr_node, yaml.SequenceNode) elif typ == dict: return isinstance(attr_node, yaml.MappingNode) raise ValueError('Invalid argument for typ attribute')
java
public static int cardinality(long[] v) { int sum = 0; for(int i = 0; i < v.length; i++) { sum += Long.bitCount(v[i]); } return sum; }
java
public void unbindView() { if (mContentView != null) { this.mContentView.setAdapter(null); this.mContentView.setLayoutManager(null); this.mContentView = null; } }
python
def register_entity_to_group(self, entity, group): ''' Add entity to a group. If group does not exist, entity will be added as first member entity is of type Entity group is a string that is the name of the group ''' if entity in self._entities: if group in self._groups: self._groups[group].append(entity) else: self._groups[group] = [entity] else: raise UnmanagedEntityError(entity)
python
def batch_keep_absolute_retrain__r2(X, y, model_generator, method_name, num_fcounts=11): """ Batch Keep Absolute (retrain) xlabel = "Fraction of features kept" ylabel = "R^2" transform = "identity" sort_order = 13 """ return __run_batch_abs_metric(measures.batch_keep_retrain, X, y, model_generator, method_name, sklearn.metrics.r2_score, num_fcounts)
java
private void improveGermanSentences(JCas jcas) { /* * these POS tag sequences will decide whether we want to merge two sentences * that have (supposedly wrongfully) been split. */ HashSet<String[]> posRules = new HashSet<String[]>(); posRules.add(new String[] {"CARD", "\\$.", "NN"}); posRules.add(new String[] {"CARD", "\\$.", "NE"}); FSIterator sentIter = jcas.getAnnotationIndex(Sentence.type).iterator(); // compare two sentences at a time in order to have access to all POS tags HashSet<HashSet<Sentence>> toMerge = new HashSet<HashSet<Sentence>>(); Sentence prevSent = null, thisSent = null; while(sentIter.hasNext()) { if(thisSent == null) { thisSent = (Sentence) sentIter.next(); continue; } prevSent = thisSent; thisSent = (Sentence) sentIter.next(); /* * select the last two tokens within the previous sentence as well as the * first of the current one and check for matches. */ Token penultimateToken = null, ultimateToken = null, firstToken = null; FSIterator tokIter = jcas.getAnnotationIndex(Token.type).subiterator(thisSent); if(tokIter.hasNext()) { firstToken = (Token) tokIter.next(); } tokIter = jcas.getAnnotationIndex(Token.type).subiterator(prevSent); while(tokIter.hasNext()) { if(ultimateToken == null) { ultimateToken = (Token) tokIter.next(); continue; } penultimateToken = ultimateToken; ultimateToken = (Token) tokIter.next(); } // check that all tokens for further analysis are present. if not: skip if(penultimateToken == null || ultimateToken == null || firstToken == null) { continue; } // check rules, memorize sentences to be merged for(String[] posRule : posRules) { /* * either one of the pre-defined POS rules fit, or the first token's * covered text begins with lower case characters. */ if((penultimateToken.getPos() != null && penultimateToken.getPos().matches(posRule[0]) && ultimateToken.getPos() != null && ultimateToken.getPos().matches(posRule[1]) && firstToken.getPos() != null && firstToken.getPos().matches(posRule[2])) || (firstToken.getCoveredText().matches("^[a-z/].*"))) { /* * check whether one of the previous candidate pairs already * contains one of our sentences. */ Boolean candidateExisted = false; for(HashSet<Sentence> mergeCandidate : toMerge) { if(mergeCandidate.contains(thisSent) || mergeCandidate.contains(prevSent)) { // we add both here because sets ignore duplicates mergeCandidate.add(prevSent); mergeCandidate.add(thisSent); candidateExisted = true; break; } } /* * if one of the sentences was not already to be merged with another, * create a new merge candidate set */ if(!candidateExisted) { HashSet<Sentence> newCandidate = new HashSet<Sentence>(); newCandidate.add(prevSent); newCandidate.add(thisSent); toMerge.add(newCandidate); } break; // don't need to do the next rules; already merging. } } } // iterate over the previously collected merge candidates for(HashSet<Sentence> mergeCandidate : toMerge) { // find the earliest beginning and latest end for the set of sentences Integer beginIndex = Integer.MAX_VALUE, endIndex = Integer.MIN_VALUE; Sentence mergedSent = new Sentence(jcas); for(Sentence s : mergeCandidate) { if(s.getBegin() < beginIndex) { beginIndex = s.getBegin(); } if(s.getEnd() > endIndex) { endIndex = s.getEnd(); } s.removeFromIndexes(); } // set values, add to jcas mergedSent.setBegin(beginIndex); mergedSent.setEnd(endIndex); mergedSent.addToIndexes(); } }
java
private void addFace(String data) { matcher = facePattern.matcher(data); List<Vertex> faceVertex = new ArrayList<>(); List<Vector> faceNormals = new ArrayList<>(); int v = 0, t = 0, n = 0; String strV, strT, strN; Vertex vertex, vertexCopy; UV uv = null; Vector normal; while (matcher.find()) { normal = null; uv = null; strV = matcher.group("v"); strT = matcher.group("t"); strN = matcher.group("n"); v = Integer.parseInt(strV); vertex = vertexes.get(v > 0 ? v - 1 : vertexes.size() - v - 1); if (vertex != null) { vertexCopy = new Vertex(vertex); if (strT != null) { t = Integer.parseInt(strT); uv = uvs.get(t > 0 ? t - 1 : uvs.size() - t - 1); if (uv != null) vertexCopy.setUV(uv.u, uv.v); } faceVertex.add(vertexCopy); if (strN != null) { n = Integer.parseInt(strN); n = n > 0 ? n - 1 : normals.size() - n - 1; if (n >= 0 && n < normals.size()) normal = normals.get(n); if (normal != null) faceNormals.add(new Vector(normal.x, normal.y, normal.z)); } } else { MalisisCore.log.error("[ObjFileImporter] Wrong vertex reference {} for face at line {} :\n{}", v, lineNumber, currentLine); } } Face f = new Face(faceVertex); f.deductParameters(faceNormals.toArray(new Vector[0])); RenderParameters params = f.getParameters(); if (params.direction.get() == EnumFacing.NORTH || params.direction.get() == EnumFacing.EAST) params.flipU.set(true); params.renderAllFaces.set(true); params.interpolateUV.set(false); params.calculateAOColor.set(false); //params.useEnvironmentBrightness.set(false); faces.add(f); }
python
def rmi(self, force=False, via_name=False): """ remove this image :param force: bool, force removal of the image :param via_name: bool, refer to the image via name, if false, refer via ID, not used now :return: None """ return os.remove(self.local_location)
java
@Override public void sawOpcode(int seen) { IOIUserValue uvSawType = null; try { switch (seen) { case Const.INVOKESPECIAL: uvSawType = processInvokeSpecial(); break; case Const.INVOKESTATIC: processInvokeStatic(); break; case Const.INVOKEVIRTUAL: processInvokeVirtual(); break; case Const.ASTORE: case Const.ASTORE_0: case Const.ASTORE_1: case Const.ASTORE_2: case Const.ASTORE_3: processAStore(seen); break; default: break; } } catch (ClassNotFoundException cnfe) { bugReporter.reportMissingClass(cnfe); } finally { stack.sawOpcode(this, seen); if ((uvSawType != null) && (stack.getStackDepth() > 0)) { OpcodeStack.Item itm = stack.getStackItem(0); itm.setUserValue(uvSawType); } } }
python
def parse_lines(self, file, boundary, content_length): """Generate parts of ``('begin_form', (headers, name))`` ``('begin_file', (headers, name, filename))`` ``('cont', bytestring)`` ``('end', None)`` Always obeys the grammar parts = ( begin_form cont* end | begin_file cont* end )* """ next_part = b'--' + boundary last_part = next_part + b'--' iterator = chain(make_line_iter(file, limit=content_length, buffer_size=self.buffer_size), _empty_string_iter) terminator = self._find_terminator(iterator) if terminator == last_part: return elif terminator != next_part: self.fail('Expected boundary at start of multipart data') while terminator != last_part: headers = parse_multipart_headers(iterator) disposition = headers.get('content-disposition') if disposition is None: self.fail('Missing Content-Disposition header') disposition, extra = parse_options_header(disposition) transfer_encoding = self.get_part_encoding(headers) name = extra.get('name') filename = extra.get('filename') # if no content type is given we stream into memory. A list is # used as a temporary container. if filename is None: yield _begin_form, (headers, name) # otherwise we parse the rest of the headers and ask the stream # factory for something we can write in. else: yield _begin_file, (headers, name, filename) buf = b'' for line in iterator: if not line: self.fail('unexpected end of stream') if line[:2] == b'--': terminator = line.rstrip() if terminator in (next_part, last_part): break if transfer_encoding is not None: if transfer_encoding == 'base64': transfer_encoding = 'base64_codec' try: line = codecs.decode(line, transfer_encoding) except Exception: self.fail('could not decode transfer encoded chunk') # we have something in the buffer from the last iteration. # this is usually a newline delimiter. if buf: yield _cont, buf buf = b'' # If the line ends with windows CRLF we write everything except # the last two bytes. In all other cases however we write # everything except the last byte. If it was a newline, that's # fine, otherwise it does not matter because we will write it # the next iteration. this ensures we do not write the # final newline into the stream. That way we do not have to # truncate the stream. However we do have to make sure that # if something else than a newline is in there we write it # out. if line[-2:] == b'\r\n': buf = b'\r\n' cutoff = -2 else: buf = line[-1:] cutoff = -1 yield _cont, line[:cutoff] else: # pragma: no cover raise ValueError('unexpected end of part') # if we have a leftover in the buffer that is not a newline # character we have to flush it, otherwise we will chop of # certain values. if buf not in (b'', b'\r', b'\n', b'\r\n'): yield _cont, buf yield _end, None
java
public void executeMethods(final List<Method> methods) throws Exception { for (final Method method : methods) { // TODO - curious about the findSuitableInstancesOf ? won't // method.getDeclaringClass be ok?? for (final Object object : findSuitableInstancesOf(method.getDeclaringClass())) { method.invoke(object); } } }
java
@Nullable public PaymentIntent confirmPaymentIntentSynchronous( @NonNull PaymentIntentParams paymentIntentParams, @NonNull String publishableKey) throws AuthenticationException, InvalidRequestException, APIConnectionException, APIException { return mApiHandler.confirmPaymentIntent( paymentIntentParams, publishableKey, mStripeAccount ); }
python
def draw(self, **kwargs): """ Called from the fit method, this method creates the canvas and draws the distribution plot on it. Parameters ---------- kwargs: generic keyword arguments. """ # Prepare the data bins = np.arange(self.N) words = [self.features[i] for i in self.sorted_[:self.N]] freqs = {} # Set up the bar plots if self.conditional_freqdist_: for label, values in sorted(self.conditional_freqdist_.items(), key=itemgetter(0)): freqs[label] = [ values[i] for i in self.sorted_[:self.N] ] else: freqs['corpus'] = [ self.freqdist_[i] for i in self.sorted_[:self.N] ] # Draw a horizontal barplot if self.orient == 'h': # Add the barchart, stacking if necessary for label, freq in freqs.items(): self.ax.barh(bins, freq, label=label, align='center') # Set the y ticks to the words self.ax.set_yticks(bins) self.ax.set_yticklabels(words) # Order the features from top to bottom on the y axis self.ax.invert_yaxis() # Turn off y grid lines and turn on x grid lines self.ax.yaxis.grid(False) self.ax.xaxis.grid(True) # Draw a vertical barplot elif self.orient == 'v': # Add the barchart, stacking if necessary for label, freq in freqs.items(): self.ax.bar(bins, freq, label=label, align='edge') # Set the y ticks to the words self.ax.set_xticks(bins) self.ax.set_xticklabels(words, rotation=90) # Turn off x grid lines and turn on y grid lines self.ax.yaxis.grid(True) self.ax.xaxis.grid(False) # Unknown state else: raise YellowbrickValueError( "Orientation must be 'h' or 'v'" )
python
def get_sentence(sentence_id=None): """Retrieve a randomly-generated sentence as a unicode string. :param sentence_id: Allows you to optionally specify an integer representing the sentence_id from the database table. This allows you to retrieve a specific sentence each time, albeit with different keywords.""" counts = __get_table_limits() result = None id_ = 0 try: if isinstance(sentence_id, int): id_ = sentence_id elif isinstance(sentence_id, float): print("""ValueError: Floating point number detected. Rounding number to 0 decimal places.""") id_ = round(sentence_id) else: id_ = random.randint(1, counts['max_sen']) except ValueError: print("ValueError: Incorrect parameter type detected.") if id_ <= counts['max_sen']: sentence = __get_sentence(counts, sentence_id=id_) else: print("""ValueError: Parameter integer is too high. Maximum permitted value is {0}.""".format(str(counts['max_sen']))) id_ = counts['max_sen'] sentence = __get_sentence(counts, sentence_id=id_) if sentence is not None: while sentence[0] == 'n': if id_ is not None: # here we delibrately pass 'None' to __getsentence__ as it will sentence = __get_sentence(counts, None) else: sentence = __get_sentence(counts, id_) if sentence[0] == 'y': result = __process_sentence(sentence, counts) return result else: print('ValueError: _sentence cannot be None.')
python
def parseString( self, instring, parseAll=False ): """Execute the parse expression with the given string. This is the main interface to the client code, once the complete expression has been built. If you want the grammar to require that the entire input string be successfully parsed, then set parseAll to True (equivalent to ending the grammar with StringEnd()). Note: parseString implicitly calls expandtabs() on the input string, in order to report proper column numbers in parse actions. If the input string contains tabs and the grammar uses parse actions that use the loc argument to index into the string being parsed, you can ensure you have a consistent view of the input string by: - calling parseWithTabs on your grammar before calling parseString (see L{I{parseWithTabs}<parseWithTabs>}) - define your parse action using the full (s,loc,toks) signature, and reference the input string using the parse action's s argument - explictly expand the tabs in your input string before calling parseString """ ParserElement.resetCache() if not self.streamlined: self.streamline() #~ self.saveAsList = True for e in self.ignoreExprs: e.streamline() if not self.keepTabs: instring = instring.expandtabs() loc, tokens = self._parse( instring, 0 ) if parseAll: StringEnd()._parse( instring, loc ) return tokens
java
public void insert_uc(final byte argin) { final byte[] values = new byte[1]; attrval.r_dim.dim_x = 1; values[0] = argin; attrval.r_dim.dim_y = 0; DevVarCharArrayHelper.insert(attrval.value, values); }
java
public Optional<Board> getOptionalBoard(Object projectIdOrPath, Integer boardId) { try { return (Optional.ofNullable(getBoard(projectIdOrPath, boardId))); } catch (GitLabApiException glae) { return (GitLabApi.createOptionalFromException(glae)); } }
java
public List<PrivacyItem> setPrivacyList(String listName, List<PrivacyItem> listItem) { // Add new list to the itemLists this.getItemLists().put(listName, listItem); return listItem; }
python
def use_plenary_proficiency_view(self): """Pass through to provider ProficiencyLookupSession.use_plenary_proficiency_view""" self._object_views['proficiency'] = PLENARY # self._get_provider_session('proficiency_lookup_session') # To make sure the session is tracked for session in self._get_provider_sessions(): try: session.use_plenary_proficiency_view() except AttributeError: pass
python
def stream_replicate(): """Monitor changes in approximately real-time and replicate them""" stream = primary.stream(SomeDataBlob, "trim_horizon") next_heartbeat = pendulum.now() while True: now = pendulum.now() if now >= next_heartbeat: stream.heartbeat() next_heartbeat = now.add(minutes=10) record = next(stream) if record is None: continue if record["new"] is not None: replica.save(record["new"]) else: replica.delete(record["old"])
python
def add_errback(self, fn, *args, **kwargs): """ Like :meth:`.add_callback()`, but handles error cases. An Exception instance will be passed as the first positional argument to `fn`. """ run_now = False with self._callback_lock: # Always add fn to self._errbacks, even when we're about to execute # it, to prevent races with functions like start_fetching_next_page # that reset _final_exception self._errbacks.append((fn, args, kwargs)) if self._final_exception: run_now = True if run_now: fn(self._final_exception, *args, **kwargs) return self
java
public com.google.api.ads.adwords.axis.v201809.cm.FeedStatus getFeedStatus() { return feedStatus; }
python
def commandlineargs(self): """Obtain a string of all parameters, using the paramater flags they were defined with, in order to pass to an external command. This is shell-safe by definition.""" commandlineargs = [] for parametergroup, parameters in self.parameters: #pylint: disable=unused-variable for parameter in parameters: p = parameter.compilearg() if p: commandlineargs.append(p) return " ".join(commandlineargs)
python
def from_dict(cls, d): """ Create Site from dict representation """ atoms_n_occu = {} for sp_occu in d["species"]: if "oxidation_state" in sp_occu and Element.is_valid_symbol( sp_occu["element"]): sp = Specie.from_dict(sp_occu) elif "oxidation_state" in sp_occu: sp = DummySpecie.from_dict(sp_occu) else: sp = Element(sp_occu["element"]) atoms_n_occu[sp] = sp_occu["occu"] props = d.get("properties", None) return cls(atoms_n_occu, d["xyz"], properties=props)
python
def execute_after_delay(time_s, func, *args, **kwargs): """A function that executes the given function after a delay. Executes func in a separate thread after a delay, so that this function returns immediately. Note that any exceptions raised by func will be ignored (but logged). Also, if time_s is a PolledTimeout with no expiration, then this method simply returns immediately and does nothing. Args: time_s: Delay in seconds to wait before executing func, may be a PolledTimeout object. func: Function to call. *args: Arguments to call the function with. **kwargs: Keyword arguments to call the function with. """ timeout = PolledTimeout.from_seconds(time_s) def target(): time.sleep(timeout.remaining) try: func(*args, **kwargs) except Exception: # pylint: disable=broad-except _LOG.exception('Error executing %s after %s expires.', func, timeout) if timeout.remaining is not None: thread = threading.Thread(target=target) thread.start()
python
def get_keys_for(self, value, include_uncommitted=False): """Get keys for a given value. :param value: The value to look for :type value: object :param include_uncommitted: Include uncommitted values in results :type include_uncommitted: bool :return: The keys for the given value :rtype: list(str) """ if not include_uncommitted: return super(TransactionalIndex, self).get_keys_for(value) else: keys = super(TransactionalIndex, self).get_keys_for(value) hash_value = self.get_hash_for(value) keys += self._reverse_add_cache[hash_value] return keys
python
def pvalues(self): """Association p-value for candidate markers.""" self.compute_statistics() lml_alts = self.alt_lmls() lml_null = self.null_lml() lrs = -2 * lml_null + 2 * asarray(lml_alts) from scipy.stats import chi2 chi2 = chi2(df=1) return chi2.sf(lrs)
python
def get_certs(context=_DEFAULT_CONTEXT, store=_DEFAULT_STORE): ''' Get the available certificates in the given store. :param str context: The name of the certificate store location context. :param str store: The name of the certificate store. :return: A dictionary of the certificate thumbprints and properties. :rtype: dict CLI Example: .. code-block:: bash salt '*' win_pki.get_certs ''' ret = dict() cmd = list() blacklist_keys = ['DnsNameList'] store_path = r'Cert:\{0}\{1}'.format(context, store) _validate_cert_path(name=store_path) cmd.append(r"Get-ChildItem -Path '{0}' | Select-Object".format(store_path)) cmd.append(' DnsNameList, SerialNumber, Subject, Thumbprint, Version') items = _cmd_run(cmd=six.text_type().join(cmd), as_json=True) for item in items: cert_info = dict() for key in item: if key not in blacklist_keys: cert_info[key.lower()] = item[key] names = item.get('DnsNameList', None) if isinstance(names, list): cert_info['dnsnames'] = [name.get('Unicode') for name in names] else: cert_info['dnsnames'] = [] ret[item['Thumbprint']] = cert_info return ret
java
private void updatePov() { MapView mapView = getMapModel().getMapView(); WorldViewTransformer transformer = new WorldViewTransformer(mapView); Bbox targetBox = targetMap.getMapModel().getMapView().getBounds(); Bbox overviewBox = mapView.getBounds(); // check if bounds are valid if (Double.isNaN(overviewBox.getX())) { return; } // zoom if current view is too small if (dynamicOverview && !overviewBox.contains(targetBox)) { // mapView.applyBounds(overviewBox.union(targetBox), MapView.ZoomOption.LEVEL_FIT); // super.onMapViewChanged(null); } // calculate boxSize Coordinate viewBegin = transformer.worldToView(targetBox.getOrigin()); Coordinate viewEnd = transformer.worldToView(targetBox.getEndPoint()); double width = Math.abs(viewEnd.getX() - viewBegin.getX()); double height = Math.abs(viewEnd.getY() - viewBegin.getY()); viewBegin.setY(viewBegin.getY() - height); // show recticle or box if (width < 20) { if (null != targetRectangle) { render(targetRectangle, RenderGroup.SCREEN, RenderStatus.DELETE); targetRectangle = null; } if (null == targetReticle) { targetReticle = new Image("targetReticle"); targetReticle.setHref(Geomajas.getIsomorphicDir() + TARGET_RETICLE_IMAGE); targetReticle.setBounds(new Bbox(0, 0, 21, 21)); } double x = viewBegin.getX() + (width / 2) - 10; double y = viewBegin.getY() + (width / 2) - 10; targetReticle.getBounds().setX(x); targetReticle.getBounds().setY(y); render(targetReticle, RenderGroup.SCREEN, RenderStatus.UPDATE); } else { if (null != targetReticle) { render(targetReticle, RenderGroup.SCREEN, RenderStatus.DELETE); targetReticle = null; } if (null == targetRectangle) { targetRectangle = new Rectangle("targetRect"); targetRectangle.setStyle(rectangleStyle); } targetRectangle.setBounds(new Bbox(viewBegin.getX(), viewBegin.getY(), width, height)); render(targetRectangle, RenderGroup.SCREEN, RenderStatus.UPDATE); } }
java
protected void setColumnVisibilities() { m_colVisibilities = new HashMap<Integer, Boolean>(16); // set explorer configurable column visibilities int preferences = new CmsUserSettings(getCms()).getExplorerSettings(); setColumnVisibility(CmsUserSettings.FILELIST_TITLE, preferences); setColumnVisibility(CmsUserSettings.FILELIST_TYPE, preferences); setColumnVisibility(CmsUserSettings.FILELIST_SIZE, preferences); setColumnVisibility(CmsUserSettings.FILELIST_PERMISSIONS, preferences); setColumnVisibility(CmsUserSettings.FILELIST_DATE_LASTMODIFIED, preferences); setColumnVisibility(CmsUserSettings.FILELIST_USER_LASTMODIFIED, preferences); setColumnVisibility(CmsUserSettings.FILELIST_DATE_CREATED, preferences); setColumnVisibility(CmsUserSettings.FILELIST_USER_CREATED, preferences); setColumnVisibility(CmsUserSettings.FILELIST_DATE_RELEASED, preferences); setColumnVisibility(CmsUserSettings.FILELIST_DATE_EXPIRED, preferences); setColumnVisibility(CmsUserSettings.FILELIST_STATE, preferences); setColumnVisibility(CmsUserSettings.FILELIST_LOCKEDBY, preferences); // set explorer no configurable column visibilities m_colVisibilities.put(new Integer(LIST_COLUMN_TYPEICON.hashCode()), Boolean.TRUE); m_colVisibilities.put(new Integer(LIST_COLUMN_LOCKICON.hashCode()), Boolean.TRUE); m_colVisibilities.put(new Integer(LIST_COLUMN_PROJSTATEICON.hashCode()), Boolean.TRUE); m_colVisibilities.put(new Integer(LIST_COLUMN_NAME.hashCode()), Boolean.TRUE); m_colVisibilities.put(new Integer(LIST_COLUMN_EDIT.hashCode()), Boolean.FALSE); m_colVisibilities.put( new Integer(LIST_COLUMN_SITE.hashCode()), Boolean.valueOf(OpenCms.getSiteManager().getSites().size() > 1)); }
java
@NonNull public static CreateKeyspaceStart createKeyspace(@NonNull String keyspaceName) { return createKeyspace(CqlIdentifier.fromCql(keyspaceName)); }
python
def get_events( self, obj_id, search='', include_events=None, exclude_events=None, order_by='', order_by_dir='ASC', page=1 ): """ Get a list of a contact's engagement events :param obj_id: int Contact ID :param search: str :param include_events: list|tuple :param exclude_events: list|tuple :param order_by: str :param order_by_dir: str :param page: int :return: dict|str """ if include_events is None: include_events = [] if exclude_events is None: exclude_events = [] parameters = { 'search': search, 'includeEvents': include_events, 'excludeEvents': exclude_events, 'orderBy': order_by, 'orderByDir': order_by_dir, 'page': page } response = self._client.session.get( '{url}/{id}/events'.format( url=self.endpoint_url, id=obj_id ), params=parameters ) return self.process_response(response)
java
public PartialResponseChangesType<T> eval(String ... values) { if (values != null) { for(String name: values) { childNode.createChild("eval").text(name); } } return this; }
python
def tv_distance(a, b): '''Get the Total Variation (TV) distance between two densities a and b.''' if len(a.shape) == 1: return np.sum(np.abs(a - b)) return np.sum(np.abs(a - b), axis=1)
python
def remove_escalation_policy(self, escalation_policy, **kwargs): """Remove an escalation policy from this team.""" if isinstance(escalation_policy, Entity): escalation_policy = escalation_policy['id'] assert isinstance(escalation_policy, six.string_types) endpoint = '{0}/{1}/escalation_policies/{2}'.format( self.endpoint, self['id'], escalation_policy, ) return self.request('DELETE', endpoint=endpoint, query_params=kwargs)
python
def submit_completion(self, user, course_key, block_key, completion): """ Update the completion value for the specified record. Parameters: * user (django.contrib.auth.models.User): The user for whom the completion is being submitted. * course_key (opaque_keys.edx.keys.CourseKey): The course in which the submitted block is found. * block_key (opaque_keys.edx.keys.UsageKey): The block that has had its completion changed. * completion (float in range [0.0, 1.0]): The fractional completion value of the block (0.0 = incomplete, 1.0 = complete). Return Value: (BlockCompletion, bool): A tuple comprising the created or updated BlockCompletion object and a boolean value indicating whether the object was newly created by this call. Raises: ValueError: If the wrong type is passed for one of the parameters. django.core.exceptions.ValidationError: If a float is passed that is not between 0.0 and 1.0. django.db.DatabaseError: If there was a problem getting, creating, or updating the BlockCompletion record in the database. This will also be a more specific error, as described here: https://docs.djangoproject.com/en/1.11/ref/exceptions/#database-exceptions. IntegrityError and OperationalError are relatively common subclasses. """ # Raise ValueError to match normal django semantics for wrong type of field. if not isinstance(course_key, CourseKey): raise ValueError( "course_key must be an instance of `opaque_keys.edx.keys.CourseKey`. Got {}".format(type(course_key)) ) try: block_type = block_key.block_type except AttributeError: raise ValueError( "block_key must be an instance of `opaque_keys.edx.keys.UsageKey`. Got {}".format(type(block_key)) ) if waffle.waffle().is_enabled(waffle.ENABLE_COMPLETION_TRACKING): try: with transaction.atomic(): obj, is_new = self.get_or_create( # pylint: disable=unpacking-non-sequence user=user, course_key=course_key, block_key=block_key, defaults={ 'completion': completion, 'block_type': block_type, }, ) except IntegrityError: # The completion was created concurrently by another process log.info( "An IntegrityError was raised when trying to create a BlockCompletion for %s:%s:%s. " "Falling back to get().", user, course_key, block_key, ) obj = self.get( user=user, course_key=course_key, block_key=block_key, ) is_new = False if not is_new and obj.completion != completion: obj.completion = completion obj.full_clean() obj.save(update_fields={'completion', 'modified'}) else: # If the feature is not enabled, this method should not be called. # Error out with a RuntimeError. raise RuntimeError( "BlockCompletion.objects.submit_completion should not be \ called when the feature is disabled." ) return obj, is_new
python
def save(self, *args, **kwargs): """Make sure token is added.""" self.save_prep(instance_or_instances=self) return super(AbstractTokenModel, self).save(*args, **kwargs)
java
public void setAttribute(String name, String value, String facet) throws JspException { // validate the name attribute, in the case of an error simply return. if (name == null || name.length() <= 0) { String s = Bundle.getString("Tags_AttributeNameNotSet"); registerTagError(s, null); return; } // it's not legal to set the id or name attributes this way if (name.equals(ID) || name.equals(NAME)) { String s = Bundle.getString("Tags_AttributeMayNotBeSet", new Object[]{name}); registerTagError(s, null); return; } // handle the facet. Span will place stuff into the spanState input is the default // so we don't need to do anything. if (facet != null) { if (facet.equals("span")) { _spanState.registerAttribute(AbstractHtmlState.ATTR_GENERAL, name, value); return; } else if (facet.equals("input")) { // do nothing... } else { String s = Bundle.getString("Tags_AttributeFacetNotSupported", new Object[]{facet}); registerTagError(s, null); return; } } // don't set the value on the input if (name.equals(VALUE)) { String s = Bundle.getString("Tags_AttributeMayNotBeSet", new Object[]{name}); registerTagError(s, null); return; } // we place the state into the special attribute map. if (_attrs == null) { _attrs = new HashMap(); } _attrs.put(name, value); }
python
def makeHist(x_val, y_val, fit=spline_base.fit2d, bins=[np.linspace(-36.5,36.5,74),np.linspace(-180,180,361)]): """ Constructs a (fitted) histogram of the given data. Parameters: x_val : array The data to be histogrammed along the x-axis. y_val : array The data to be histogrammed along the y-axis. fit : function or None, optional The function to use in order to fit the data. If no fit should be applied, set to None bins : touple of arrays, giving the bin edges to be used in the histogram. (First value: y-axis, Second value: x-axis) """ y_val = y_val[~np.isnan(y_val)] x_val = x_val[~np.isnan(x_val)] samples = list(zip(y_val, x_val)) K, xedges, yedges = np.histogram2d(y_val, x_val, bins=bins) if (fit is None): return K/ K.sum() # Check if given attr is a function elif hasattr(fit, '__call__'): H = fit(np.array(samples), bins[0], bins[1], p_est=K)[0] return H/H.sum() else: raise TypeError("Not a valid argument, insert spline function or None")
java
public ItemRequest<User> findById(String user) { String path = String.format("/users/%s", user); return new ItemRequest<User>(this, User.class, path, "GET"); }
python
def get_crab(registry): """ Get the Crab Gateway :rtype: :class:`crabpy.gateway.crab.CrabGateway` # argument might be a config or a request """ # argument might be a config or a request regis = getattr(registry, 'registry', None) if regis is None: regis = registry return regis.queryUtility(ICrab)
python
def _preprocess_data(self, X, Y=None, idxs=None, train=False): """ Preprocess the data: 1. Make sentence with mention into sequence data for LSTM. 2. Select subset of the input if idxs exists. :param X: The input data of the model. :type X: pair with candidates and corresponding features :param Y: The labels of input data (optional). :type Y: list or numpy.array :param idxs: The selected indexs of input data. :type idxs: list or numpy.array :param train: An indicator for word dictionary to extend new words. :type train: bool :return: Preprocessed data. :rtype: list of list of (word sequences, features, feature_weights) tuples """ C, F = X if Y is not None: Y = np.array(Y).astype(np.float32) # Create word dictionary for LSTM if not hasattr(self, "word_dict"): self.word_dict = SymbolTable() arity = len(C[0]) # Add paddings into word dictionary for i in range(arity): list(map(self.word_dict.get, ["~~[[" + str(i), str(i) + "]]~~"])) # Make sequence input for LSTM from candidates seq_data = [] for candidate in C: cand_idx = [] for i in range(len(candidate)): # Add mark for each mention in the original sentence args = [ ( candidate[i].context.get_word_start_index(), candidate[i].context.get_word_end_index(), i, ) ] s = mark_sentence(mention_to_tokens(candidate[i]), args) f = self.word_dict.get if train else self.word_dict.lookup cand_idx.append(list(map(f, s))) seq_data.append(cand_idx) # Generate proprcessed the input if idxs is None: if Y is not None: return ( [ [ seq_data[i], F.indices[F.indptr[i] : F.indptr[i + 1]], F.data[F.indptr[i] : F.indptr[i + 1]], ] for i in range(len(C)) ], Y, ) else: return [ [ seq_data[i], F.indices[F.indptr[i] : F.indptr[i + 1]], F.data[F.indptr[i] : F.indptr[i + 1]], ] for i in range(len(C)) ] if Y is not None: return ( [ [ seq_data[i], F.indices[F.indptr[i] : F.indptr[i + 1]], F.data[F.indptr[i] : F.indptr[i + 1]], ] for i in idxs ], Y[idxs], ) else: return [ [ seq_data[i], F.indices[F.indptr[i] : F.indptr[i + 1]], F.data[F.indptr[i] : F.indptr[i + 1]], ] for i in idxs ]
python
def get_soap_object(self, client): """ Override default get_soap_object behavior to account for child Record types """ record_data = super().get_soap_object(client) record_data.records = [Record(r).get_soap_object(client) for r in record_data.records] return record_data
python
def linearization_error(nodes): """Image for :func:`.linearization_error` docstring.""" if NO_IMAGES: return curve = bezier.Curve.from_nodes(nodes) line = bezier.Curve.from_nodes(nodes[:, (0, -1)]) midpoints = np.hstack([curve.evaluate(0.5), line.evaluate(0.5)]) ax = curve.plot(256) line.plot(256, ax=ax) ax.plot( midpoints[0, :], midpoints[1, :], color="black", linestyle="dashed" ) ax.axis("scaled") save_image(ax.figure, "linearization_error.png")
java
private static String getParameterName(Method m, int paramIndex) { PName pName = getParameterAnnotation(m, paramIndex, PName.class); if (pName != null) { return pName.value(); } else { return ""; } }
python
def signature_unsafe(m, sk, pk, hash_func=H): """ Not safe to use with secret keys or secret data. See module docstring. This function should be used for testing only. """ h = hash_func(sk) a = 2 ** (b - 2) + sum(2 ** i * bit(h, i) for i in range(3, b - 2)) r = Hint(bytearray([h[j] for j in range(b // 8, b // 4)]) + m) R = scalarmult_B(r) S = (r + Hint(encodepoint(R) + pk + m) * a) % l return bytes(encodepoint(R) + encodeint(S))
python
def remove_model(self, model, **kwargs): """ Remove a 'model' from the bundle :parameter str twig: twig to filter for the model :parameter **kwargs: any other tags to do the filter (except twig or context) """ kwargs['model'] = model kwargs['context'] = 'model' self.remove_parameters_all(**kwargs)
java
public static String dateToString(Date date) { final TimeZone utc = TimeZone.getTimeZone("UTC"); SimpleDateFormat tformat = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm:ss.SSSXXX"); tformat.setTimeZone(utc); return tformat.format(date); }
python
def execute(action): """ Execute the given action. An action is any object with a ``forwards()`` and ``backwards()`` method. .. code-block:: python class CreateUser(object): def __init__(self, userinfo): self.userinfo = userinfo self.user_id = None def forwards(self): self.user_id = UserStore.create(userinfo) return self.user_id def backwards(self): if self.user_id is not None: # user_id will be None if creation failed UserStore.delete(self.user_id) If the ``forwards`` method succeeds, the action is considered successful. If the method fails, the ``backwards`` method is called to revert any effect it might have had on the system. In addition to defining classes, actions may be built using the :py:func:`reversible.action` decorator. Actions may be composed together using the :py:func:`reversible.gen` decorator. :param action: The action to execute. :returns: The value returned by the ``forwards()`` method of the action. :raises: The exception raised by the ``forwards()`` method if rollback succeeded. Otherwise, the exception raised by the ``backwards()`` method is raised. """ # TODO this should probably be a class to configure logging, etc. The # global execute can refer to the "default" instance of the executor. try: return action.forwards() except Exception: log.exception('%s failed to execute. Rolling back.', action) try: action.backwards() except Exception: log.exception('%s failed to roll back.', action) raise else: raise
python
def item_related_name(self): """ The ManyToMany field on the item class pointing to this class. If there is more than one field, this value will be None. """ if not hasattr(self, '_item_related_name'): many_to_many_rels = \ get_section_many_to_many_relations(self.__class__) if len(many_to_many_rels) != 1: self._item_related_name = None else: self._item_related_name = many_to_many_rels[0].field.name return self._item_related_name
python
def status(self, key, value): """Update the status of a build""" value = value.lower() if value not in valid_statuses: raise ValueError("Build Status must have a value from:\n{}".format(", ".join(valid_statuses))) self.obj['status'][key] = value self.changes.append("Updating build:{}.status.{}={}" .format(self.obj['name'], key, value)) return self
python
def _validate_required(self, settings, name, value): """ Validate a required setting (value can not be empty) Args: settings (dict): Current settings. name (str): Setting name. value (str): Required value to validate. Raises: boussole.exceptions.SettingsInvalidError: If value is empty. Returns: str: Validated value. """ if not value: raise SettingsInvalidError(("Required value from setting '{name}' " "must not be " "empty.").format(name=name)) return value
java
protected boolean tryBridgeMethod(MethodNode target, Expression receiver, boolean implicitThis, TupleExpression args) { ClassNode lookupClassNode; if (target.isProtected()) { lookupClassNode = controller.getClassNode(); if (controller.isInClosure()) { lookupClassNode = lookupClassNode.getOuterClass(); } } else { lookupClassNode = target.getDeclaringClass().redirect(); } Map<MethodNode, MethodNode> bridges = lookupClassNode.getNodeMetaData(PRIVATE_BRIDGE_METHODS); MethodNode bridge = bridges==null?null:bridges.get(target); if (bridge != null) { Expression fixedReceiver = receiver; ClassNode classNode = implicitThis?controller.getClassNode():null; ClassNode declaringClass = bridge.getDeclaringClass(); if (implicitThis && !controller.isInClosure() && !classNode.isDerivedFrom(declaringClass) && !classNode.implementsInterface(declaringClass) && classNode instanceof InnerClassNode) { fixedReceiver = new PropertyExpression(new ClassExpression(classNode.getOuterClass()), "this"); } ArgumentListExpression newArgs = new ArgumentListExpression(target.isStatic()?new ConstantExpression(null):fixedReceiver); for (Expression expression : args.getExpressions()) { newArgs.addExpression(expression); } return writeDirectMethodCall(bridge, implicitThis, fixedReceiver, newArgs); } return false; }
java
public Object getObjectInstance(Object obj, Name name, Context nameCtx, Hashtable<?, ?> environment) throws Exception { final boolean isTraceOn = TraceComponent.isAnyTracingEnabled(); if (isTraceOn && tc.isEntryEnabled()) Tr.entry(tc, "getObjectInstance : " + obj); // F743-17630CodRv // This factory should only be used from inside an ejb. // // Checking for the lack of EJBContext is good enough to tell us // we are not inside an ejb when the code flow was started by some // non-ejb component (such as a servlet). However, in the case where // we've got an ejb that calls into a webservice endpoint, then there // may be a ContainerTx that has a BeanO, even though we are not // allowed to use the factory from inside the webservice endpoint. // // To account for this, we must also check the ComponentMetaData (CMD) and // confirm that it is ejb specific. In the webservice endpoint case, // the CMD will have been switched to some non-ejb CMD, and so this check // so stop us. EJBContext ejbContext = EJSContainer.getCallbackBeanO(); // d630940 ComponentMetaData cmd = ComponentMetaDataAccessorImpl.getComponentMetaDataAccessor().getComponentMetaData(); if (ejbContext == null || !(cmd instanceof BeanMetaData)) { InjectionException iex = new InjectionException ("The EJBContext type may only be injected into an EJB instance " + "or looked up within the context of an EJB."); Tr.error(tc, "EJB_CONTEXT_DATA_NOT_AVAILABLE_CNTR0329E", new Object[] { EJBContext.class.getName() }); if (isTraceOn && tc.isEntryEnabled()) Tr.exit(tc, "getObjectInstance : ", iex); throw iex; } if (isTraceOn && tc.isEntryEnabled()) Tr.exit(tc, "getObjectInstance : " + ejbContext); return ejbContext; }
java
public ClientStatsContext fetch() { m_current = m_distributor.getStatsSnapshot(); m_currentIO = m_distributor.getIOStatsSnapshot(); m_currentTS = System.currentTimeMillis(); m_currentAffinity = m_distributor.getAffinityStatsSnapshot(); return this; }
java
private static String format(Date date, boolean millis, TimeZone tz) { Calendar calendar = new GregorianCalendar(tz, Locale.US); calendar.setTime(date); // estimate capacity of buffer as close as we can (yeah, that's pedantic ;) int capacity = "yyyy-MM-ddThh:mm:ss".length(); capacity += millis ? ".sss".length() : 0; capacity += tz.getRawOffset() == 0 ? "Z".length() : "+hh:mm".length(); StringBuilder formatted = new StringBuilder(capacity); padInt(formatted, calendar.get(Calendar.YEAR), "yyyy".length()); formatted.append('-'); padInt(formatted, calendar.get(Calendar.MONTH) + 1, "MM".length()); formatted.append('-'); padInt(formatted, calendar.get(Calendar.DAY_OF_MONTH), "dd".length()); formatted.append('T'); padInt(formatted, calendar.get(Calendar.HOUR_OF_DAY), "hh".length()); formatted.append(':'); padInt(formatted, calendar.get(Calendar.MINUTE), "mm".length()); formatted.append(':'); padInt(formatted, calendar.get(Calendar.SECOND), "ss".length()); if (millis) { formatted.append('.'); padInt(formatted, calendar.get(Calendar.MILLISECOND), "sss".length()); } int offset = tz.getOffset(calendar.getTimeInMillis()); if (offset != 0) { int hours = Math.abs((offset / (60 * 1000)) / 60); int minutes = Math.abs((offset / (60 * 1000)) % 60); formatted.append(offset < 0 ? '-' : '+'); padInt(formatted, hours, "hh".length()); formatted.append(':'); padInt(formatted, minutes, "mm".length()); } else { formatted.append('Z'); } return formatted.toString(); }
java
public void deltaTotalGetTime(long delta) { if (enabled.get() && delta > 0) { totalGetTime.addAndGet(delta); totalGetTimeInvocations.incrementAndGet(); if (delta > maxGetTime.get()) maxGetTime.set(delta); } }
python
def iris_enrich(self, *domains, **kwargs): """Returns back enriched data related to the specified domains using our Iris Enrich service each domain should be passed in as an un-named argument to the method: iris_enrich('domaintools.com', 'google.com') api.iris_enrich(*DOMAIN_LIST)['results_count'] Returns the number of results api.iris_enrich(*DOMAIN_LIST)['missing_domains'] Returns any domains that we were unable to retrieve enrichment data for api.iris_enrich(*DOMAIN_LIST)['limit_exceeded'] Returns True if you've exceeded your API usage for enrichment in api.iris_enrich(*DOMAIN_LIST): # Enables looping over all returned enriched domains for example: enrich_domains = ['google.com', 'amazon.com'] assert api.iris_enrich(*enrich_domains)['missing_domains'] == [] """ if not domains: raise ValueError('One or more domains to enrich must be provided') domains = ','.join(domains) data_updated_after = kwargs.get('data_updated_after', None) if hasattr(data_updated_after, 'strftime'): data_updated_after = data_updated_after.strftime('%Y-%M-%d') return self._results('iris-enrich', '/v1/iris-enrich/', domain=domains, data_updated_after=data_updated_after, items_path=('results', ), **kwargs)
java
public static File zip(File zipFile, String path, String data, Charset charset) throws UtilException { return zip(zipFile, path, IoUtil.toStream(data, charset), charset); }
python
def get_windows_interfaces(): """ Get Windows interfaces. :returns: list of windows interfaces """ import win32com.client import pywintypes interfaces = [] try: locator = win32com.client.Dispatch("WbemScripting.SWbemLocator") service = locator.ConnectServer(".", "root\cimv2") network_configs = service.InstancesOf("Win32_NetworkAdapterConfiguration") # more info on Win32_NetworkAdapter: http://msdn.microsoft.com/en-us/library/aa394216%28v=vs.85%29.aspx for adapter in service.InstancesOf("Win32_NetworkAdapter"): if adapter.NetConnectionStatus == 2 or adapter.NetConnectionStatus == 7: # adapter is connected or media disconnected ip_address = "" netmask = "" for network_config in network_configs: if network_config.InterfaceIndex == adapter.InterfaceIndex: if network_config.IPAddress: # get the first IPv4 address only ip_address = network_config.IPAddress[0] netmask = network_config.IPSubnet[0] break npf_interface = "\\Device\\NPF_{guid}".format(guid=adapter.GUID) interfaces.append({"id": npf_interface, "name": adapter.NetConnectionID, "ip_address": ip_address, "mac_address": adapter.MACAddress, "netcard": adapter.name, "netmask": netmask, "type": "ethernet"}) except (AttributeError, pywintypes.com_error): log.warn("Could not use the COM service to retrieve interface info, trying using the registry...") return _get_windows_interfaces_from_registry() return interfaces
python
def display_to_value(value, default_value, ignore_errors=True): """Convert back to value""" from qtpy.compat import from_qvariant value = from_qvariant(value, to_text_string) try: np_dtype = get_numpy_dtype(default_value) if isinstance(default_value, bool): # We must test for boolean before NumPy data types # because `bool` class derives from `int` class try: value = bool(float(value)) except ValueError: value = value.lower() == "true" elif np_dtype is not None: if 'complex' in str(type(default_value)): value = np_dtype(complex(value)) else: value = np_dtype(value) elif is_binary_string(default_value): value = to_binary_string(value, 'utf8') elif is_text_string(default_value): value = to_text_string(value) elif isinstance(default_value, complex): value = complex(value) elif isinstance(default_value, float): value = float(value) elif isinstance(default_value, int): try: value = int(value) except ValueError: value = float(value) elif isinstance(default_value, datetime.datetime): value = datestr_to_datetime(value) elif isinstance(default_value, datetime.date): value = datestr_to_datetime(value).date() elif isinstance(default_value, datetime.timedelta): value = str_to_timedelta(value) elif ignore_errors: value = try_to_eval(value) else: value = eval(value) except (ValueError, SyntaxError): if ignore_errors: value = try_to_eval(value) else: return default_value return value
python
def _any_would_run(func, filenames, *args): """True if a linter function would be called on any of filenames.""" if os.environ.get("_POLYSQUARE_GENERIC_FILE_LINTER_NO_STAMPING", None): return True for filename in filenames: # suppress(E204) stamp_args, stamp_kwargs = _run_lint_on_file_stamped_args(filename, *args, **{}) dependency = jobstamp.out_of_date(func, *stamp_args, **stamp_kwargs) if dependency: return True return False
python
def _simulate_coef_from_bootstraps( self, n_draws, coef_bootstraps, cov_bootstraps): """Simulate coefficients using bootstrap samples.""" # Sample indices uniformly from {0, ..., n_bootstraps - 1} # (Wood pg. 199 step 6) random_bootstrap_indices = np.random.choice( np.arange(len(coef_bootstraps)), size=n_draws, replace=True) # Simulate `n_draws` many random coefficient vectors from a # multivariate normal distribution with mean and covariance given by # the bootstrap samples (indexed by `random_bootstrap_indices`) of # `coef_bootstraps` and `cov_bootstraps`. Because it's faster to draw # many samples from a certain distribution all at once, we make a dict # mapping bootstrap indices to draw indices and use the `size` # parameter of `np.random.multivariate_normal` to sample the draws # needed from that bootstrap sample all at once. bootstrap_index_to_draw_indices = defaultdict(list) for draw_index, bootstrap_index in enumerate(random_bootstrap_indices): bootstrap_index_to_draw_indices[bootstrap_index].append(draw_index) coef_draws = np.empty((n_draws, len(self.coef_))) for bootstrap, draw_indices in bootstrap_index_to_draw_indices.items(): coef_draws[draw_indices] = np.random.multivariate_normal( coef_bootstraps[bootstrap], cov_bootstraps[bootstrap], size=len(draw_indices)) return coef_draws
java
private void removeFolders() throws CmsImportExportException { try { int size = m_folderStorage.size(); m_report.println(Messages.get().container(Messages.RPT_DELFOLDER_START_0), I_CmsReport.FORMAT_HEADLINE); // iterate though all collected folders. Iteration must start at the end of the list, // as folders habe to be deleted in the reverse order. int counter = 1; for (int j = (size - 1); j >= 0; j--) { String resname = m_folderStorage.get(j); resname = (resname.startsWith("/") ? "" : "/") + resname + (resname.endsWith("/") ? "" : "/"); // now check if the folder is really empty. Only delete empty folders List<CmsResource> files = m_cms.getFilesInFolder(resname, CmsResourceFilter.IGNORE_EXPIRATION); if (files.size() == 0) { List<CmsResource> folders = m_cms.getSubFolders(resname, CmsResourceFilter.IGNORE_EXPIRATION); if (folders.size() == 0) { m_report.print( org.opencms.report.Messages.get().container( org.opencms.report.Messages.RPT_SUCCESSION_2, String.valueOf(counter), String.valueOf(size)), I_CmsReport.FORMAT_NOTE); m_report.print(Messages.get().container(Messages.RPT_DELFOLDER_0), I_CmsReport.FORMAT_NOTE); m_report.print( org.opencms.report.Messages.get().container( org.opencms.report.Messages.RPT_ARGUMENT_1, resname), I_CmsReport.FORMAT_DEFAULT); m_cms.lockResource(resname); m_cms.deleteResource(resname, CmsResource.DELETE_PRESERVE_SIBLINGS); m_report.println( org.opencms.report.Messages.get().container(org.opencms.report.Messages.RPT_OK_0), I_CmsReport.FORMAT_OK); counter++; } } } } catch (CmsException e) { CmsMessageContainer message = Messages.get().container( Messages.ERR_IMPORTEXPORT_ERROR_REMOVING_FOLDERS_OF_IMPORTED_BODY_FILES_0); if (LOG.isDebugEnabled()) { LOG.debug(message.key(), e); } throw new CmsImportExportException(message, e); } }
python
def iter_lines(self, section): """Iterate over all lines in a section. This will skip 'header' lines. """ try: section = self._get_section(section, create=False) except KeyError: return for block in section: for line in block: yield line
java
public <T> T post(final Class<T> type, final Consumer<HttpConfig> configuration) { return type.cast(interceptors.get(HttpVerb.POST).apply(configureRequest(type, HttpVerb.POST, configuration), this::doPost)); }
python
def _serialize_dict(cls, dict_): """ :type dict_ dict :rtype: dict """ obj_serialized = {} for key in dict_.keys(): item_serialized = cls.serialize(dict_[key]) if item_serialized is not None: key = key.rstrip(cls._SUFFIX_KEY_OVERLAPPING) key = key.lstrip(cls._PREFIX_KEY_PROTECTED) obj_serialized[key] = item_serialized return obj_serialized
java
public void getAllLegendID(Callback<List<String>> callback) throws NullPointerException { gw2API.getAllLegendIDs().enqueue(callback); }
python
def _get(self, *rules): # type: (Iterable[Type[Rule]]) -> Generator[Type[Rule]] """ Get rules representing parameters. The return rules can be different from parameters, in case parameter define multiple rules in one class. :param rules: For which rules get the representation. :return: List of rules representing parameters. :raise NotRuleException: If the parameter doesn't inherit from Rule. :raise RuleException: If the syntax of the rule is invalid. """ for rule in rules: if not inspect.isclass(rule) or not issubclass(rule, Rule): raise NotRuleException(rule) for r in self._split_rules(rule): yield self._find_rule(r)
java
public static boolean hasGeoPackageExtension(File file) { String extension = GeoPackageIOUtils.getFileExtension(file); boolean isGeoPackage = extension != null && (extension .equalsIgnoreCase(GeoPackageConstants.GEOPACKAGE_EXTENSION) || extension .equalsIgnoreCase(GeoPackageConstants.GEOPACKAGE_EXTENDED_EXTENSION)); return isGeoPackage; }
java
public static Object getLazyString(LazyEvaluation lazyLambda) { return new Object() { @Override public String toString() { try { return lazyLambda.getString(); } catch (Exception ex) { StringWriter sw = new StringWriter(); ex.printStackTrace(new PrintWriter(sw)); return "Error while evaluating lazy String... " + sw.toString(); } } }; }
python
def send_offset_commit_request(self, group, payloads=None, fail_on_error=True, callback=None, group_generation_id=-1, consumer_id=''): """Send a list of OffsetCommitRequests to the Kafka broker for the given consumer group. Args: group (str): The consumer group to which to commit the offsets payloads ([OffsetCommitRequest]): List of topic, partition, offsets to commit. fail_on_error (bool): Whether to raise an exception if a response from the Kafka broker indicates an error callback (callable): a function to call with each of the responses before returning the returned value to the caller. group_generation_id (int): Must currently always be -1 consumer_id (str): Must currently always be empty string Returns: [OffsetCommitResponse]: List of OffsetCommitResponse objects. Will raise KafkaError for failed requests if fail_on_error is True """ group = _coerce_consumer_group(group) encoder = partial(KafkaCodec.encode_offset_commit_request, group=group, group_generation_id=group_generation_id, consumer_id=consumer_id) decoder = KafkaCodec.decode_offset_commit_response resps = yield self._send_broker_aware_request( payloads, encoder, decoder, consumer_group=group) returnValue(self._handle_responses( resps, fail_on_error, callback, group))
java
public void marshall(StartJobRequest startJobRequest, ProtocolMarshaller protocolMarshaller) { if (startJobRequest == null) { throw new SdkClientException("Invalid argument passed to marshall(...)"); } try { protocolMarshaller.marshall(startJobRequest.getAppId(), APPID_BINDING); protocolMarshaller.marshall(startJobRequest.getBranchName(), BRANCHNAME_BINDING); protocolMarshaller.marshall(startJobRequest.getJobId(), JOBID_BINDING); protocolMarshaller.marshall(startJobRequest.getJobType(), JOBTYPE_BINDING); protocolMarshaller.marshall(startJobRequest.getJobReason(), JOBREASON_BINDING); protocolMarshaller.marshall(startJobRequest.getCommitId(), COMMITID_BINDING); protocolMarshaller.marshall(startJobRequest.getCommitMessage(), COMMITMESSAGE_BINDING); protocolMarshaller.marshall(startJobRequest.getCommitTime(), COMMITTIME_BINDING); } catch (Exception e) { throw new SdkClientException("Unable to marshall request to JSON: " + e.getMessage(), e); } }
java
@Nonnull @ReturnsMutableCopy public ICommonsList <String> getAllUserGroupIDsWithAssignedUser (@Nullable final String sUserID) { if (StringHelper.hasNoText (sUserID)) return new CommonsArrayList <> (); return getAllMapped (aUserGroup -> aUserGroup.containsUserID (sUserID), aUserGroup -> aUserGroup.getID ()); }
python
def ensureVisible(self, viewType): """ Find and switch to the first tab of the specified view type. If the type does not exist, add it. :param viewType | <subclass of XView> :return <XView> || None """ # make sure we're not trying to switch to the same type view = self.currentView() if type(view) == viewType: return view self.blockSignals(True) self.setUpdatesEnabled(False) for i in xrange(self.count()): widget = self.widget(i) if type(widget) == viewType: self.setCurrentIndex(i) view = widget break else: view = self.addView(viewType) self.blockSignals(False) self.setUpdatesEnabled(True) return view
java
@Nonnull public String createBackupKey( @Nonnull final String origKey ) { if ( origKey == null ) { throw new IllegalArgumentException( "The origKey must not be null." ); } return BACKUP_PREFIX + _storageKeyFormat.format(origKey); }
python
def do_stop_role(self, role): """ Stop a role Usage: > stop_role <role> Stops this role """ if not role: return None if not self.has_cluster(): return None if '-' not in role: print("Please enter a valid role name") return None try: service = api.get_cluster(self.cluster).get_service(role.split('-')[0]) service.stop_roles(role) print("Stopping Role") except ApiException: print("Error: Role or Service Not Found")
python
def get_data_for_name(cls, service_name): """Get the data relating to a named music service. Args: service_name (str): The name of the music service for which data is required. Returns: dict: Data relating to the music service. Raises: `MusicServiceException`: if the music service cannot be found. """ for service in cls._get_music_services_data().values(): if service_name == service["Name"]: return service raise MusicServiceException( "Unknown music service: '%s'" % service_name)
java
public void updateComputeNodeUser(String poolId, String nodeId, String userName, String password, DateTime expiryTime, Iterable<BatchClientBehavior> additionalBehaviors) throws BatchErrorException, IOException { NodeUpdateUserParameter param = new NodeUpdateUserParameter(); param.withPassword(password); param.withExpiryTime(expiryTime); updateComputeNodeUser(poolId, nodeId, userName, param, additionalBehaviors); }
java
public final ListModelEvaluationsPagedResponse listModelEvaluations(String parent) { ListModelEvaluationsRequest request = ListModelEvaluationsRequest.newBuilder().setParent(parent).build(); return listModelEvaluations(request); }
java
public Iterator<T> getInputSplits() { final InputSplitProvider provider = getEnvironment().getInputSplitProvider(); return new Iterator<T>() { private T nextSplit; @Override public boolean hasNext() { if (this.nextSplit == null) { final InputSplit split = provider.getNextInputSplit(); if (split != null) { @SuppressWarnings("unchecked") final T tSplit = (T) split; this.nextSplit = tSplit; return true; } else { return false; } } else { return true; } } @Override public T next() { if (this.nextSplit == null && !hasNext()) { throw new NoSuchElementException(); } final T tmp = this.nextSplit; this.nextSplit = null; return tmp; } @Override public void remove() { throw new UnsupportedOperationException(); } }; }
java
public void doAdd(@FormGroup("canalInfo") Group canalInfo, @FormGroup("canalParameterInfo") Group canalParameterInfo, @FormField(name = "formCanalError", group = "canalInfo") CustomErrors err, @FormField(name = "formHeartBeatError", group = "canalParameterInfo") CustomErrors heartBeatErr, Navigator nav) throws Exception { Canal canal = new Canal(); CanalParameter parameter = new CanalParameter(); canalInfo.setProperties(canal); canalParameterInfo.setProperties(parameter); String zkClustersString = canalParameterInfo.getField("zkClusters").getStringValue(); String[] zkClusters = StringUtils.split(zkClustersString, ";"); parameter.setZkClusters(Arrays.asList(zkClusters)); Long zkClusterId = canalParameterInfo.getField("autoKeeperClusterId").getLongValue(); parameter.setZkClusterId(zkClusterId); canal.setCanalParameter(parameter); String dbAddressesString = canalParameterInfo.getField("groupDbAddresses").getStringValue(); // 解析格式: // 127.0.0.1:3306:MYSQL,127.0.0.1:3306:ORACLE;127.0.0.1:3306,127.0.0.1:3306; // 第一层的分号代表主备概念,,第二层逗号代表分组概念 if (StringUtils.isNotEmpty(dbAddressesString)) { List<List<DataSourcing>> dbSocketAddress = new ArrayList<List<DataSourcing>>(); String[] dbAddresses = StringUtils.split(dbAddressesString, ";"); for (String dbAddressString : dbAddresses) { List<DataSourcing> groupDbSocketAddress = new ArrayList<DataSourcing>(); String[] groupDbAddresses = StringUtils.split(dbAddressString, ","); for (String groupDbAddress : groupDbAddresses) { String strs[] = StringUtils.split(groupDbAddress, ":"); InetSocketAddress address = new InetSocketAddress(strs[0].trim(), Integer.valueOf(strs[1])); SourcingType type = parameter.getSourcingType(); if (strs.length > 2) { type = SourcingType.valueOf(strs[2]); } groupDbSocketAddress.add(new DataSourcing(type, address)); } dbSocketAddress.add(groupDbSocketAddress); } parameter.setGroupDbAddresses(dbSocketAddress); } String positionsString = canalParameterInfo.getField("positions").getStringValue(); if (StringUtils.isNotEmpty(positionsString)) { String positions[] = StringUtils.split(positionsString, ";"); parameter.setPositions(Arrays.asList(positions)); } if (parameter.getDetectingEnable() && StringUtils.startsWithIgnoreCase(parameter.getDetectingSQL(), "select")) { heartBeatErr.setMessage("invaliedHeartBeat"); return; } try { canalService.create(canal); } catch (RepeatConfigureException rce) { err.setMessage("invalidCanal"); return; } if (parameter.getSourcingType().isMysql() && parameter.getSlaveId() == null) { parameter.setSlaveId(10000 + canal.getId()); // 再次更新一下slaveId try { canalService.modify(canal); } catch (RepeatConfigureException rce) { err.setMessage("invalidCanal"); return; } } nav.redirectTo(WebConstant.CANAL_LIST_LINK); }
java
public static void runExample( AdWordsServicesInterface adWordsServices, AdWordsSession session, String conversionName, String gclId, OfflineConversionAdjustmentType adjustmentType, String conversionTime, String adjustmentTime, @Nullable Double adjustedValue) throws RemoteException { // Get the OfflineConversionAdjustmentFeedService. OfflineConversionAdjustmentFeedServiceInterface offlineConversionFeedService = adWordsServices.get(session, OfflineConversionAdjustmentFeedServiceInterface.class); // Associate conversion adjustments with the existing named conversion tracker. The GCLID should // have been uploaded before with a conversion. GclidOfflineConversionAdjustmentFeed feed = new GclidOfflineConversionAdjustmentFeed(); feed.setConversionName(conversionName); feed.setAdjustmentType(adjustmentType); feed.setConversionTime(conversionTime); feed.setAdjustmentTime(adjustmentTime); feed.setAdjustedValue(adjustedValue); feed.setGoogleClickId(gclId); OfflineConversionAdjustmentFeedOperation offlineConversionOperation = new OfflineConversionAdjustmentFeedOperation(); offlineConversionOperation.setOperator(Operator.ADD); offlineConversionOperation.setOperand(feed); OfflineConversionAdjustmentFeedReturnValue offlineConversionReturnValue = offlineConversionFeedService.mutate( new OfflineConversionAdjustmentFeedOperation[] {offlineConversionOperation}); for (OfflineConversionAdjustmentFeed newFeed : offlineConversionReturnValue.getValue()) { System.out.printf( "Uploaded conversion adjusted value of %.4f for Google Click ID '%s'.%n", newFeed.getAdjustedValue(), ((GclidOfflineConversionAdjustmentFeed) newFeed).getGoogleClickId()); } }