max_stars_count
int64
301
224k
text
stringlengths
6
1.05M
token_count
int64
3
727k
1,900
/* * Copyright Terracotta, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.ehcache.core.spi.service; import java.util.Arrays; import java.util.Collection; import java.util.Optional; import java.util.stream.Collectors; import java.util.stream.Stream; /** * Helper class to find a service or service configuration matching the wanted type. Note that the class * is named {@code ServiceUtils} but it would actually work with anything, not only service implementations. */ public final class ServiceUtils { private ServiceUtils() { // No instance possible } private static <T> Stream<T> findStreamAmongst(Class<T> clazz, Collection<?> instances) { return instances.stream() .filter(clazz::isInstance) .map(clazz::cast); } /** * Find instances of {@code clazz} among the {@code instances}. * * @param clazz searched class * @param instances instances looked at * @param <T> type of the searched instances * @return the list of compatible instances */ public static <T> Collection<T> findAmongst(Class<T> clazz, Collection<?> instances) { return findStreamAmongst(clazz, instances) .collect(Collectors.toList()); } /** * Find instances of {@code clazz} among the {@code instances}. * * @param clazz searched class * @param instances instances looked at * @param <T> type of the searched instances * @return the list of compatible instances */ public static <T> Collection<T> findAmongst(Class<T> clazz, Object ... instances) { return findAmongst(clazz, Arrays.asList(instances)); } /** * Find the only expected instance of {@code clazz} among the {@code instances}. * * @param clazz searched class * @param instances instances looked at * @param <T> type of the searched instance * @return the compatible instance or null if none are found * @throws IllegalArgumentException if more than one matching instance */ public static <T> T findSingletonAmongst(Class<T> clazz, Collection<?> instances) { return findOptionalAmongst(clazz, instances) .orElse(null); } /** * Find the only expected instance of {@code clazz} among the {@code instances}. * * @param clazz searched class * @param instances instances looked at * @param <T> type of the searched instance * @return the optionally found compatible instance * @throws IllegalArgumentException if more than one matching instance */ public static <T> Optional<T> findOptionalAmongst(Class<T> clazz, Collection<?> instances) { return findStreamAmongst(clazz, instances) .reduce((i1, i2) -> { throw new IllegalArgumentException("More than one " + clazz.getName() + " found"); }); } /** * Find the only expected instance of {@code clazz} among the {@code instances}. * * @param clazz searched class * @param instances instances looked at * @param <T> type of the searched instance * @return the compatible instance or null if none are found * @throws IllegalArgumentException if more than one matching instance */ public static <T> T findSingletonAmongst(Class<T> clazz, Object ... instances) { return findSingletonAmongst(clazz, Arrays.asList(instances)); } /** * Find the only expected instance of {@code clazz} among the {@code instances}. * * @param clazz searched class * @param instances instances looked at * @param <T> type of the searched instance * @return the optionally found compatible instance * @throws IllegalArgumentException if more than one matching instance */ public static <T> Optional<T> findOptionalAmongst(Class<T> clazz, Object ... instances) { return findOptionalAmongst(clazz, Arrays.asList(instances)); } }
1,286
6,958
// // ReverseTest.cpp // MNNTests // // Created by MNN on 2021/02/20. // Copyright © 2018, Alibaba Group Holding Limited // #include <MNN/expr/Expr.hpp> #include <MNN/expr/ExprCreator.hpp> #include "MNNTestSuite.h" #include "TestUtils.h" using namespace MNN::Express; class ReverseTest : public MNNTestCase { public: virtual ~ReverseTest() = default; virtual bool run(int precision) { auto input = _Input({3, 2, 3}, NCHW); input->setName("input_tensor"); // set input data const float inpudata[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18 }; auto inputPtr = input->writeMap<float>(); memcpy(inputPtr, inpudata, 18 * sizeof(float)); auto output0 = _Reverse(input, _Scalar<int32_t>(0)); const std::vector<float> expectedOutput0 = { 13, 14, 15, 16, 17, 18, 7, 8, 9, 10, 11, 12, 1, 2, 3, 4, 5, 6 }; auto gotOutput0 = output0->readMap<float>(); for (int i = 0; i < 18; ++i) { auto diff = ::fabsf(gotOutput0[i] - expectedOutput0[i]); if (diff > 0.01) { MNN_ERROR("ReverseTest[axis=0] test failed: %f - %f!\n", expectedOutput0[i], gotOutput0[i]); return false; } } auto output1 = _Reverse(input, _Scalar<int32_t>(1)); const std::vector<float> expectedOutput1 = { 4, 5, 6, 1, 2, 3, 10, 11, 12, 7, 8, 9, 16, 17, 18, 13, 14, 15 }; auto gotOutput1 = output1->readMap<float>(); for (int i = 0; i < 18; ++i) { auto diff = ::fabsf(gotOutput1[i] - expectedOutput1[i]); if (diff > 0.01) { MNN_ERROR("ReverseTest[axis=1] test failed: %f - %f!\n", expectedOutput1[i], gotOutput1[i]); return false; } } auto output2 = _Reverse(input, _Scalar<int32_t>(2)); const std::vector<float> expectedOutput2 = { 3, 2, 1, 6, 5, 4, 9, 8, 7, 12, 11, 10, 15, 14, 13, 18, 17, 16 }; auto gotOutput2 = output2->readMap<float>(); for (int i = 0; i < 18; ++i) { auto diff = ::fabsf(gotOutput2[i] - expectedOutput2[i]); if (diff > 0.01) { MNN_ERROR("ReverseTest[axis=2] test failed: %f - %f!\n", expectedOutput2[i], gotOutput2[i]); return false; } } return true; } private: VARP _Reverse(VARP x, VARP axis) { std::unique_ptr<MNN::OpT> op(new MNN::OpT); op->type = MNN::OpType_Reverse; return (Variable::create(Expr::create(op.get(), {x, axis}))); } }; MNNTestSuiteRegister(ReverseTest, "op/reverse");
1,877
1,463
## Copyright 2015-2019 <NAME>, <NAME> ## Licensed under the Apache License, Version 2.0 (the "License"); ## you may not use this file except in compliance with the License. ## You may obtain a copy of the License at ## http://www.apache.org/licenses/LICENSE-2.0 ## Unless required by applicable law or agreed to in writing, software ## distributed under the License is distributed on an "AS IS" BASIS, ## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ## See the License for the specific language governing permissions and ## limitations under the License. from nine import str from PyFlow.UI.Tool.Tool import ShelfTool from PyFlow.Packages.PyFlowBase.Tools import RESOURCES_DIR from PyFlow.UI.ContextMenuDataBuilder import ContextMenuDataBuilder from Qt import QtGui from Qt.QtWidgets import QFileDialog class ScreenshotTool(ShelfTool): """docstring for ScreenshotTool.""" def __init__(self): super(ScreenshotTool, self).__init__() self.format = "PNG" def saveState(self, settings): super(ScreenshotTool, self).saveState(settings) settings.setValue("format", self.format) def restoreState(self, settings): super(ScreenshotTool, self).restoreState(settings) formatValue = settings.value("format") if formatValue is not None: self.format = formatValue else: self.format = "PNG" def onSetFormat(self, fmt): self.format = fmt def contextMenuBuilder(self): builder = ContextMenuDataBuilder() builder.addEntry("Save to PNG", "PNG", lambda: self.onSetFormat("PNG")) builder.addEntry("Save to JPG", "JPG", lambda: self.onSetFormat("JPG")) return builder @staticmethod def toolTip(): return "Takes screenshot of visible area of canvas and\nsaves image to file" @staticmethod def getIcon(): return QtGui.QIcon(RESOURCES_DIR + "screenshot_icon.png") @staticmethod def name(): return str("ScreenshotTool") def do(self): name_filter = "Image (*.{0})".format(self.format.lower()) fName = QFileDialog.getSaveFileName(filter=name_filter) if not fName[0] == '': print("save screen to {0}".format(fName[0])) img = self.pyFlowInstance.getCanvas().grab() img.save(fName[0], format=self.format, quality=100)
871
347
<reponame>hbraha/ovirt-engine package org.ovirt.engine.core.common.businessentities.gluster; /** * Enum of Gluster Hooks Statuses * * @see GlusterHookEntity */ public enum GlusterHookStatus { /** * Hook is enabled in the cluster */ ENABLED, /** * Hook is disabled in the cluster */ DISABLED, /** * Unknown/Missing hook status */ MISSING; }
165
852
#include "CondFormats/L1TObjects/interface/L1MuDTTFMasks.h" #include "FWCore/Utilities/interface/typelookup.h" TYPELOOKUP_DATA_REG(L1MuDTTFMasks);
65
337
<gh_stars>100-1000 __author__ = 'maartenbreddels' import string import astropy.io.votable.ucd import astropy.units import logging from vaex.ui.qt import * # from astropy.utils import data import astropy.utils.data # astropy.utils.data import numpy as np logger = logging.getLogger("vaex.ui.completer") # based on http://stackoverflow.com/a/26065682 class Completer(QtGui.QCompleter): def __init__(self, line_edit, allowed_chars=string.ascii_letters + string.digits + "_", separators=None, match_contains=True, match_case=False): QtGui.QCompleter.__init__(self, [], line_edit) self.line_edit = line_edit self.match_contains = match_contains self.match_case = match_case self.allowed_chars = allowed_chars self.separators = separators self.line_edit.cursorPositionChanged.connect(self.onCursorPositionChanged) self.last_editing_cursor_position = None self.activated.connect(self.onActivated) self.setWrapAround(False) self.setCompletionMode(QtGui.QCompleter.UnfilteredPopupCompletion) self.setCaseSensitivity(QtCore.Qt.CaseInsensitive) # self.setCompletionMode(QtGui.QCompleter.InlineCompletion) self.model = QtGui.QStringListModel([], self) self.setModel(self.model) def pathFromIndex(self, index): # return QtGui.QCompleter.pathFromIndex(self, index) suggested_word = QtGui.QCompleter.pathFromIndex(self, index) full_text = self.line_edit.text() index = self.line_edit.cursorPosition() left, right = self.find_word_bounds(full_text, index, self.allowed_chars) suggested_text = full_text[:left] + suggested_word + full_text[right:] new_cursor_pos = len(full_text[:left] + suggested_word) logger.debug("cursor should go to: %d", new_cursor_pos) def fixcursor(): logger.debug("cursor set to: %d", new_cursor_pos) self.line_edit.setCursorPosition(new_cursor_pos) # print(("pathFromIndex", index, repr(full_text), repr(suggested_text), repr(suggested_text), self.last_editing_cursor_position)) # after the text is set by completer, the cursor is set to the end of the lineedit, we correct is by fixcursor to set it at # the end of the word QtCore.QTimer.singleShot(0, fixcursor) return suggested_text def splitPath(self, path): index = self.line_edit.cursorPosition() left, right = self.find_word_bounds(path, index, self.allowed_chars) part = path[left:right] result = QtGui.QCompleter.splitPath(self, path) # print "splitPath", path, result, part suggestions = self.get_suggestions(part, path[:left], path[right:]) self.suggestions = suggestions self.model.setStringList(suggestions) # self.model = QtGui.QStringListModel(suggestions, self.parent()) # self.setModel(self.model) # QtCore.QTimer.singleShot(0, lambda: self.setModel(self.model)); self.parts = [part] return self.parts def onActivated(self, text): pass # print "activated", text def onCursorPositionChanged(self, old_pos, new_pos): # print "cursor", old_pos, new_pos # this trick didn't work, as suggested by the SO anwser self.last_editing_cursor_position = None if old_pos == new_pos else new_pos def word_boundary_char(self, char): return (self.allowed_chars is not None and char not in self.allowed_chars) or\ (self.separators is not None and char in self.separators) def find_word_bounds(self, text, index, allowed_chars): right = left = index done = False while not done: if left == 0: done = True elif not self.word_boundary_char(text[left - 1]): left -= 1 else: done = True done = False while not done: if right == len(text): done = True elif not self.word_boundary_char(text[right]): right += 1 else: done = True return left, right def get_word_list(self, word, text_left, text_right): return "aap aardappel schaap koe blaat".split() def get_suggestions(self, typed_word, text_left, text_right): def case(word): return word if self.match_case else word.lower() suggestions = [] if typed_word: word_list = self.get_word_list(typed_word, text_left, text_right) for word in word_list: if (self.match_contains and case(typed_word) in case(word)) or (not self.match_contains and case(word).startswith(case(typed_word))): suggestions.append(word) return suggestions ucd_words = astropy.io.votable.ucd.UCDWords() primary_list = list(sorted(ucd_words._primary)) secondary_list = list(sorted(ucd_words._secondary)) class UCDCompleter(Completer): """ UCDs have primary words (that come first), and secondary, that come after the second UCD words are seperated by a ; char """ def __init__(self, line_edit): Completer.__init__(self, line_edit, allowed_chars=None, separators=";") def get_word_list(self, word, text_left, text_right): if text_left.strip(): return secondary_list else: return primary_list def get_suggestions(self, typed_word, text_left, text_right): typed_word = typed_word.lower() if text_left.strip(): word_list = secondary_list else: word_list = primary_list descriptions = {key: desc.lower() for key, desc in ucd_words._descriptions.items() if key in word_list} suggestions = [] if typed_word: for word in word_list: # if any([typed_word in word for word in word_list]) or any([typed_word in desc for desc in descriptions]): # print(typed_word) # print(list([typed_word in desc for desc in descriptions])) # print(any([typed_word in desc for desc in descriptions])) # print([desc for desc in descriptions if typed_word in desc]) if (typed_word in word) or typed_word in descriptions[word]: suggestions.append(ucd_words._capitalization[word]) return suggestions class IdentifierCompleter(Completer): """Completes variables and functions""" def __init__(self, line_edit, variables=[]): self.variables = variables Completer.__init__(self, line_edit) def get_word_list(self, word, text_left, text_right): return self.variables unit_list = [name for name, unit in vars(astropy.units).items() if isinstance(unit, astropy.units.UnitBase)] class UnitCompleter(Completer): """Completes units found in astropy""" def __init__(self, line_edit, unit_list=unit_list): self.unit_list = unit_list Completer.__init__(self, line_edit, match_contains=False) def get_word_list(self, word, text_left, text_right): return self.unit_list # based on from https://gist.github.com/Riateche/5984815 class UCDDelegate(QtGui.QItemDelegate): def __init__(self, parent): QtGui.QItemDelegate.__init__(self, parent) def createEditor(self, parent, option, index): editor = QtGui.QLineEdit(parent) completer = vaex.ui.completer.UCDCompleter(editor) editor.setCompleter(completer) # self.connect(combo, QtCore.SIGNAL("currentIndexChanged(int)"), self, QtCore.SLOT("currentIndexChanged()")) return editor def setEditorData(self, editor, index): editor.blockSignals(True) # editor.setCurrentIndex(int(index.model().data(index))) editor.setText(index.model().data(index)) editor.blockSignals(False) def setModelData(self, editor, model, index): model.setData(index, editor.text()) def currentIndexChanged(self): self.commitData.emit(self.sender()) class UnitDelegate(QtGui.QStyledItemDelegate): def __init__(self, parent): QtGui.QStyledItemDelegate.__init__(self, parent) self.lastEditor = None def createEditor(self, parent, option, index): self.lastEditor = editor = QtGui.QLineEdit(parent) self.completer = vaex.ui.completer.UnitCompleter(editor) editor.setCompleter(self.completer) # self.connect(combo, QtCore.SIGNAL("currentIndexChanged(int)"), self, QtCore.SLOT("currentIndexChanged()")) return editor def setEditorData(self, editor, index): editor.blockSignals(True) # editor.setCurrentIndex(int(index.model().data(index))) editor.setText(index.model().data(index)) editor.blockSignals(False) def setModelData(self, editor, model, index): model.setData(index, editor.text()) def currentIndexChanged(self): self.commitData.emit(self.sender()) vars = dir(np) # vars.append("hoeba") class LineCompleter(QtGui.QLineEdit): def __init__(self, parent): QtGui.QLineEdit.__init__(self, parent) # completer = UCDCompleter(self) # assert completer.word_boundary_char(";") completer = UnitCompleter(self) self.setCompleter(completer) class ExpressionCombobox(QtGui.QComboBox): def __init__(self, parent, dataset, variables=False): """ :param parent: :param Dataset dataset: """ QtGui.QComboBox.__init__(self, parent) self.identifiers = [] self.columns = dataset.get_column_names(virtual=True) self.identifiers.extend(vaex.dataset.expression_namespace.keys()) self.identifiers.extend(self.columns) if variables: self.identifiers.extend(list(dataset.variables.keys())) self.addItems([""] + self.columns) # = list(dataset.variables.keys()) + list(vaex.dataset.expression_namespace.keys()) # self.addItems(list(dataset.variables.keys())) # else: # self.identifiers = list(self.columns) + list(vaex.dataset.expression_namespace.keys()) self.setEditable(True) lineEdit = self.lineEdit() self.completer = IdentifierCompleter(lineEdit, self.identifiers) lineEdit.setCompleter(self.completer) if __name__ == "__main__": app = QtGui.QApplication([]) dialog = QtGui.QDialog() dialog.resize(400, 100) if 1: lineEdit = LineCompleter(dialog) # combo = QtGui.QComboBox(dialog) # combo.setEditable(True) # combo.addItems("hoeba blaat schaap".split()) # lineEdit = combo.lineEdit() # completer = MathCompleter(lineEdit, vars) completer = UnitCompleter(lineEdit) lineEdit.setCompleter(completer) else: dataset = vaex.open(sys.argv[1]) box = ExpressionCombobox(dialog, dataset) dialog.show() dialog.raise_() dialog.exec_() # app.exec_()
4,725
10,245
/* * Copyright 2020 The gRPC Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.grpc.rls; import static com.google.common.base.Preconditions.checkNotNull; import io.grpc.ConnectivityState; import io.grpc.LoadBalancer.Helper; import io.grpc.LoadBalancer.SubchannelPicker; import io.grpc.util.ForwardingLoadBalancerHelper; import javax.annotation.Nonnull; /** * A delegating {@link Helper} for the child load blanacer. The child load-balancer notifies the * higher level load-blancer with aggregated status instead of each individual child load-blanacer's * state. */ final class ChildLoadBalancerHelper extends ForwardingLoadBalancerHelper { private final String target; private final Helper rlsHelper; private final SubchannelStateManager subchannelStateManager; private final SubchannelPicker picker; private ChildLoadBalancerHelper( String target, Helper rlsHelper, SubchannelStateManager subchannelStateManager, SubchannelPicker picker) { this.target = checkNotNull(target, "target"); this.rlsHelper = checkNotNull(rlsHelper, "rlsHelper"); this.subchannelStateManager = checkNotNull(subchannelStateManager, "subchannelStateManager"); this.picker = checkNotNull(picker, "picker"); } @Override protected Helper delegate() { return rlsHelper; } /** * Updates balancing state from one or more subchannels tracked in the {@link * SubchannelStateManager}. The passed picker will be ignored, instead the picker which governs * many subchannels/pickers will be reported to the parent load-balancer. */ @Override public void updateBalancingState( @Nonnull ConnectivityState newState, @Nonnull SubchannelPicker unused) { subchannelStateManager.updateState(target, newState); super.updateBalancingState(subchannelStateManager.getAggregatedState(), picker); } static final class ChildLoadBalancerHelperProvider { private final Helper helper; private final SubchannelStateManager subchannelStateManager; private final SubchannelPicker picker; ChildLoadBalancerHelperProvider( Helper helper, SubchannelStateManager subchannelStateManager, SubchannelPicker picker) { this.helper = checkNotNull(helper, "helper"); this.subchannelStateManager = checkNotNull(subchannelStateManager, "subchannelStateManager"); this.picker = checkNotNull(picker, "picker"); } ChildLoadBalancerHelper forTarget(String target) { return new ChildLoadBalancerHelper(target, helper, subchannelStateManager, picker); } } }
924
777
<reponame>google-ar/chromium // Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "components/safe_browsing_db/android/jni_registrar.h" #include "base/android/jni_android.h" #include "base/android/jni_registrar.h" #include "base/macros.h" #include "components/safe_browsing_db/android/safe_browsing_api_handler_bridge.h" namespace safe_browsing { namespace android { static base::android::RegistrationMethod kSafeBrowsingRegisteredMethods[] = { {"SafeBrowsingApiBridge", safe_browsing::RegisterSafeBrowsingApiBridge}, }; bool RegisterBrowserJNI(JNIEnv* env) { return RegisterNativeMethods(env, kSafeBrowsingRegisteredMethods, arraysize(kSafeBrowsingRegisteredMethods)); } } // namespace android } // namespace safe_browsing
313
3,121
<gh_stars>1000+ from __future__ import absolute_import from nlpaug.model.word_stats.word_statistics import * from nlpaug.model.word_stats.tfidf import *
52
450
/* $PostgreSQL: pgsql/src/include/port/linux.h,v 1.42.2.1 2007/07/02 20:12:00 tgl Exp $ */ /* * As of July 2007, all known versions of the Linux kernel will sometimes * return EIDRM for a shmctl() operation when EINVAL is correct (it happens * when the low-order 15 bits of the supplied shm ID match the slot number * assigned to a newer shmem segment). We deal with this by assuming that * EIDRM means EINVAL in PGSharedMemoryIsInUse(). This is reasonably safe * since in fact Linux has no excuse for ever returning EIDRM; it doesn't * track removed segments in a way that would allow distinguishing them from * private ones. But someday that code might get upgraded, and we'd have * to have a kernel version test here. */ #define HAVE_LINUX_EIDRM_BUG
220
729
<reponame>nickles-lee/protobuf-java-format<gh_stars>100-1000 package com.googlecode.protobuf.format; import java.io.IOException; import com.google.protobuf.ExtensionRegistry; import com.google.protobuf.Message; import com.google.protobuf.UnknownFieldSet; /** * Created by IntelliJ IDEA. * User: aantonov * Date: Mar 16, 2010 * Time: 4:06:05 PM * To change this template use File | Settings | File Templates. */ public class CouchDBFormat extends JsonFormat { /** * Outputs a textual representation of the Protocol Message supplied into the parameter output. * (This representation is the new version of the classic "ProtocolPrinter" output from the * original Protocol Buffer system) */ public void print(final Message message, Appendable output) throws IOException { CouchDBGenerator generator = new CouchDBGenerator(output); generator.print("{"); print(message, generator); generator.print("}"); } /** * Outputs a textual representation of {@code fields} to {@code output}. */ public void print(final UnknownFieldSet fields, Appendable output) throws IOException { CouchDBGenerator generator = new CouchDBGenerator(output); generator.print("{"); printUnknownFields(fields, generator); generator.print("}"); } /** * Parse a text-format message from {@code input} and merge the contents into {@code builder}. * Extensions will be recognized if they are registered in {@code extensionRegistry}. */ public void merge(CharSequence input, ExtensionRegistry extensionRegistry, Message.Builder builder) throws ParseException { Tokenizer tokenizer = new Tokenizer(input); // Based on the state machine @ http://json.org/ tokenizer.consume("{"); // Needs to happen when the object starts. while (!tokenizer.tryConsume("}")) { // Continue till the object is done mergeField(tokenizer, extensionRegistry, builder); } } protected static class Tokenizer extends JsonFormat.Tokenizer { /** * Construct a tokenizer that parses tokens from the given text. */ public Tokenizer(CharSequence text) { super(text); } @Override public String consumeIdentifier() throws ParseException { String id = super.consumeIdentifier(); if ("_id".equals(id)) { return "id"; } else if ("_rev".equals(id)) { return "rev"; } return id; } } protected static class CouchDBGenerator extends JsonFormat.JsonGenerator { public CouchDBGenerator(Appendable output) { super(output); } @Override public void print(CharSequence text) throws IOException { if ("id".equals(text)) { super.print("_id"); } else if ("rev".equals(text)) { super.print("_rev"); } else { super.print(text); } } } }
1,263
604
# -*- coding: utf-8 -*- r""" Cross-platform logic for dealing with symlinks. Basic functionality should work on all operating systems including everyone's favorite pathological OS (note that there is an additional helper file for this case), but there are some corner cases depending on your version. Recent versions of Windows tend to work, but there certain system settings that cause issues. Obviously, any POSIX system work without difficulty. Example: >>> import ubelt as ub >>> from os.path import normpath, join >>> dpath = ub.ensure_app_cache_dir('ubelt', normpath('demo/symlink')) >>> real_path = join(dpath, 'real_file.txt') >>> link_path = join(dpath, 'link_file.txt') >>> ub.touch(real_path) >>> result = ub.symlink(real_path, link_path, overwrite=True, verbose=3) >>> parts = result.split(os.path.sep) >>> print(parts[-1]) link_file.txt """ from os.path import exists, islink, join, normpath import os import sys import warnings from ubelt import util_io from ubelt import util_platform __all__ = ['symlink'] if sys.platform.startswith('win32'): # nocover from ubelt import _win32_links else: _win32_links = None PY2 = sys.version_info[0] == 2 if PY2: FileExistsError = IOError def symlink(real_path, link_path, overwrite=False, verbose=0): """ Create a symbolic link. This will work on linux or windows, however windows does have some corner cases. For more details see notes in :mod:`ubelt._win32_links`. Args: path (str | PathLike): path to real file or directory link_path (str | PathLike): path to desired location for symlink overwrite (bool, default=False): overwrite existing symlinks. This will not overwrite real files on systems with proper symlinks. However, on older versions of windows junctions are indistinguishable from real files, so we cannot make this guarantee. verbose (int, default=0): verbosity level Returns: str | PathLike: link path Example: >>> import ubelt as ub >>> dpath = ub.ensure_app_cache_dir('ubelt', 'test_symlink0') >>> real_path = join(dpath, 'real_file.txt') >>> link_path = join(dpath, 'link_file.txt') >>> [ub.delete(p) for p in [real_path, link_path]] >>> ub.writeto(real_path, 'foo') >>> result = symlink(real_path, link_path) >>> assert ub.readfrom(result) == 'foo' >>> [ub.delete(p) for p in [real_path, link_path]] Example: >>> import ubelt as ub >>> from os.path import dirname >>> dpath = ub.ensure_app_cache_dir('ubelt', 'test_symlink1') >>> ub.delete(dpath) >>> ub.ensuredir(dpath) >>> _dirstats(dpath) >>> real_dpath = ub.ensuredir((dpath, 'real_dpath')) >>> link_dpath = ub.augpath(real_dpath, base='link_dpath') >>> real_path = join(dpath, 'afile.txt') >>> link_path = join(dpath, 'afile.txt') >>> [ub.delete(p) for p in [real_path, link_path]] >>> ub.writeto(real_path, 'foo') >>> result = symlink(real_dpath, link_dpath) >>> assert ub.readfrom(link_path) == 'foo', 'read should be same' >>> ub.writeto(link_path, 'bar') >>> _dirstats(dpath) >>> assert ub.readfrom(link_path) == 'bar', 'very bad bar' >>> assert ub.readfrom(real_path) == 'bar', 'changing link did not change real' >>> ub.writeto(real_path, 'baz') >>> _dirstats(dpath) >>> assert ub.readfrom(real_path) == 'baz', 'very bad baz' >>> assert ub.readfrom(link_path) == 'baz', 'changing real did not change link' >>> ub.delete(link_dpath, verbose=1) >>> _dirstats(dpath) >>> assert not exists(link_dpath), 'link should not exist' >>> assert exists(real_path), 'real path should exist' >>> _dirstats(dpath) >>> ub.delete(dpath, verbose=1) >>> _dirstats(dpath) >>> assert not exists(real_path) """ path = normpath(real_path) link = normpath(link_path) if not os.path.isabs(path): # if path is not absolute it must be specified relative to link if _can_symlink(): path = os.path.relpath(path, os.path.dirname(link)) else: # nocover # On windows, we need to use absolute paths path = os.path.abspath(path) if verbose: print('Symlink: {link} -> {path}'.format(path=path, link=link)) if islink(link): if verbose: print('... already exists') pointed = _readlink(link) if pointed == path: if verbose > 1: print('... and points to the right place') return link if verbose > 1: if not exists(link): print('... but it is broken and points somewhere else: {}'.format(pointed)) else: print('... but it points somewhere else: {}'.format(pointed)) if overwrite: util_io.delete(link, verbose=verbose > 1) elif exists(link): if _win32_links is None: if verbose: print('... already exists, but its a file. This will error.') raise FileExistsError( 'cannot overwrite a physical path: "{}"'.format(path)) else: # nocover if verbose: print('... already exists, and is either a file or hard link. ' 'Assuming it is a hard link. ' 'On non-win32 systems this would error.') if _win32_links is None: os.symlink(path, link) else: # nocover _win32_links._symlink(path, link, overwrite=overwrite, verbose=verbose) return link def _readlink(link): # Note: # https://docs.python.org/3/library/os.html#os.readlink # os.readlink was changed on win32 in version 3.8: Added support for # directory junctions, and changed to return the substitution path (which # typically includes \\?\ prefix) rather than the optional “print name” # field that was previously returned. if _win32_links: # nocover if _win32_links._win32_is_junction(link): return _win32_links._win32_read_junction(link) try: path = os.readlink(link) if util_platform.WIN32: # nocover junction_prefix = '\\\\?\\' if path.startswith(junction_prefix): path = path[len(junction_prefix):] return path except Exception: # nocover # On modern operating systems, we should never get here. (I think) if exists(link): warnings.warn('Reading symlinks seems to not be supported') raise def _can_symlink(verbose=0): # nocover """ Return true if we have permission to create real symlinks. This check always returns True on non-win32 systems. If this check returns false, then we still may be able to use junctions. Example: >>> # Script >>> print(_can_symlink(verbose=1)) """ if _win32_links is not None: return _win32_links._win32_can_symlink(verbose) else: return True def _dirstats(dpath=None): # nocover """ Testing helper for printing directory information (mostly for investigating windows weirdness) The column prefixes stand for: (E - exists), (L - islink), (F - isfile), (D - isdir), (J - isjunction) """ from ubelt import util_colors if dpath is None: dpath = os.getcwd() print('+--------------') print('Listing for dpath={}'.format(dpath)) print('E L F D J - path') print('+--------------') if not os.path.exists(dpath): print('... does not exist') else: paths = sorted(os.listdir(dpath)) for path in paths: full_path = join(dpath, path) E = os.path.exists(full_path) L = os.path.islink(full_path) F = os.path.isfile(full_path) D = os.path.isdir(full_path) J = util_platform.WIN32 and _win32_links._win32_is_junction(full_path) ELFDJ = [E, L, F, D, J] if ELFDJ == [1, 0, 0, 1, 0]: # A directory path = util_colors.color_text(path, 'green') elif ELFDJ == [1, 0, 1, 0, 0]: # A file (or a hard link, they're indistinguishable with 1 query) path = util_colors.color_text(path, 'white') elif ELFDJ == [1, 0, 0, 1, 1]: # A directory junction path = util_colors.color_text(path, 'yellow') elif ELFDJ == [1, 1, 1, 0, 0]: # A file link path = util_colors.color_text(path, 'brightgreen') elif ELFDJ == [1, 1, 0, 1, 0]: # A directory link path = util_colors.color_text(path, 'brightcyan') elif ELFDJ == [0, 1, 0, 0, 0]: # A broken file link path = util_colors.color_text(path, 'red') elif ELFDJ == [0, 1, 0, 1, 0]: # A broken directory link path = util_colors.color_text(path, 'darkred') elif ELFDJ == [0, 0, 0, 1, 1]: # A broken directory junction path = util_colors.color_text(path, 'purple') elif ELFDJ == [1, 0, 1, 0, 1]: # A file junction? Thats not good. # I guess this is a windows 7 thing? path = util_colors.color_text(path, 'red') elif ELFDJ == [1, 1, 0, 0, 0]: # Windows? Why? What does this mean!? # A directory link that cant be resolved? path = util_colors.color_text(path, 'red') elif ELFDJ == [0, 0, 0, 0, 0]: # Windows? AGAIN? HOW DO YOU LIST FILES THAT DONT EXIST? # I get it, they are probably broken junctions, but common # That should probably be 00011 not 00000 path = util_colors.color_text(path, 'red') else: print('dpath = {!r}'.format(dpath)) print('path = {!r}'.format(path)) raise AssertionError(str(ELFDJ) + str(path)) line = '{E:d} {L:d} {F:d} {D:d} {J:d} - {path}'.format(**locals()) if os.path.islink(full_path): line += ' -> ' + os.readlink(full_path) elif _win32_links is not None: if _win32_links._win32_is_junction(full_path): resolved = _win32_links._win32_read_junction(full_path) line += ' => ' + resolved print(line) print('+--------------')
4,862
310
{ "name": "<NAME>", "description": "PCB/schematic capture design software.", "url": "https://www.altium.com/altium-designer/overview" }
51
3,710
#include "morphtool.h" #include "tgl.h" #include "tvectorgl.h" #include "tvectorrenderdata.h" #include "tvectorimage.h" #include "tstroke.h" #include <math.h> #include "tools/toolhandle.h" #include "toonz/txshlevelhandle.h" #include "toonz/tframehandle.h" #include "toonz/txshsimplelevel.h" #include <QKeyEvent> class Deformation { public: std::vector<TPointD> m_controlPoints; int m_selected; TAffine m_aff; std::vector<TPointD> m_delta; int getClosest(const TPointD &p) const; Deformation(); void update() { TPointD p0 = m_controlPoints[0]; TPointD p1 = m_controlPoints[2]; TPointD p2 = m_controlPoints[4]; double a00 = p0.x - p2.x, a01 = p1.x - p2.x, a10 = p0.y - p2.y, a11 = p1.y - p2.y; TAffine aff(a00, a01, 0, a10, a11, 0); aff = aff.inv(); TPointD d = -(aff * p2); aff.a13 = d.x; aff.a23 = d.y; m_aff = aff; m_delta.resize(3); m_delta[0] = m_controlPoints[1] - p0; m_delta[1] = m_controlPoints[3] - p1; m_delta[2] = m_controlPoints[5] - p2; } TPointD apply(const TPointD &p, double t = 1.0) { TPointD d = m_aff * p; double c0 = d.x, c1 = d.y, c2 = 1 - c0 - c1; TPointD delta = c0 * m_delta[0] + c1 * m_delta[1] + c2 * m_delta[2]; return p + delta * t; } void deform(TStroke *dstStroke, const TStroke *srcStroke, double t = 1.0) { int n = srcStroke->getControlPointCount(); if (dstStroke->getControlPointCount() < n) n = dstStroke->getControlPointCount(); for (int i = 0; i < n; i++) { TThickPoint srcPoint = srcStroke->getControlPoint(i); dstStroke->setControlPoint(i, apply(srcPoint, t)); } } void deform(TVectorImage *dstImage, const TVectorImage *srcImage, double t = 1.0) { update(); int n = srcImage->getStrokeCount(); if ((int)dstImage->getStrokeCount() < n) n = dstImage->getStrokeCount(); std::vector<int> ii(n); std::vector<TStroke *> oldStrokes(n); for (int i = 0; i < n; i++) { ii[i] = i; oldStrokes[i] = srcImage->getStroke(i); deform(dstImage->getStroke(i), oldStrokes[i], t); } dstImage->notifyChangedStrokes(ii, oldStrokes); } void updateLevel() { TTool::Application *app = TTool::getApplication(); if (!app->getCurrentLevel()->getLevel()) return; TXshSimpleLevelP xl = app->getCurrentLevel()->getLevel()->getSimpleLevel(); if (app->getCurrentFrame()->getFrameType() != TFrameHandle::LevelFrame) return; TFrameId fid = app->getCurrentFrame()->getFid(); TVectorImageP src = xl->getFrame(fid, true); int count = src->getStrokeCount(); for (int i = 1; i < 10; i++) { ++fid; if (!xl->isFid(fid)) { TVectorImageP vi = new TVectorImage(); xl->setFrame(fid, vi); } TVectorImageP vi = xl->getFrame(fid, true); TVectorImageP dst = src->clone(); deform(dst.getPointer(), src.getPointer(), (double)i / (double)9); count = dst->getStrokeCount(); vi->mergeImage(dst, TAffine()); app->getCurrentTool()->getTool()->notifyImageChanged(fid); } } }; Deformation::Deformation() : m_selected(-1) { m_controlPoints.resize(6); m_controlPoints[0] = TPointD(-250, 100); m_controlPoints[2] = TPointD(0, -300); m_controlPoints[4] = TPointD(250, 100); for (int i = 0; i < 6; i += 2) m_controlPoints[i + 1] = m_controlPoints[i]; } int Deformation::getClosest(const TPointD &p) const { int k = -1; double closestD2 = 0; for (int i = 0; i < (int)m_controlPoints.size(); i++) { TPointD cp = m_controlPoints[i]; double d2 = norm2(p - cp); if (k < 0 || d2 <= closestD2) { closestD2 = d2; k = i; } } return closestD2 < 100 ? k : -1; } Deformation deformation; /* TThickPoint deform(const TThickPoint &p) { double r2 = p.x*p.x+p.y*p.y; double f = exp(-r2*0.001); return p + delta * f; } */ MorphTool::MorphTool() : m_pixelSize(1) {} MorphTool::~MorphTool() {} void MorphTool::setImage(const TVectorImageP &vi) { m_vi = vi; } void MorphTool::leftButtonDown(const TPointD &pos, const TMouseEvent &e) { m_lastPos = m_firstPos = pos; int index = deformation.getClosest(pos); if (index >= 0) deformation.m_selected = index; else deformation.m_selected = -1; if (m_vi && index >= 0) { m_vi2 = m_vi->clone(); deformation.deform(m_vi2.getPointer(), m_vi.getPointer()); } else { m_vi2 = TVectorImageP(); } } void MorphTool::leftButtonDrag(const TPointD &pos, const TMouseEvent &e) { if (deformation.m_selected < 0) return; TPointD delta = pos - m_lastPos; m_lastPos = pos; deformation.m_controlPoints[deformation.m_selected] += delta; if ((deformation.m_selected & 1) == 0) deformation.m_controlPoints[deformation.m_selected + 1] += delta; if (m_vi2 && m_vi) deformation.deform(m_vi2.getPointer(), m_vi.getPointer()); } void MorphTool::leftButtonUp(const TPointD &pos, const TMouseEvent &e) { m_vi2 = TVectorImageP(); } void MorphTool::draw() { m_pixelSize = sqrt(tglGetPixelSize2()); if (m_vi2) { TVectorRenderData rd(TTranslation(10, 10), TRect(), 0, 0); tglDraw(rd, m_vi2.getPointer()); } double u = m_pixelSize * 5; for (int i = 0; i < (int)deformation.m_controlPoints.size(); i++) { TPointD p = deformation.m_controlPoints[i]; bool selected = deformation.m_selected == i; bool base = (i & 1) == 0; if (base) if (selected) glColor3d(0.8, 0.8, 0.1); else glColor3d(0.5, 0.5, 0.1); else if (selected) glColor3d(0.8, 0.3, 0.1); else glColor3d(0.5, 0.1, 0.1); double r = base ? u * 2 : u * 1; tglDrawDisk(p, r); glColor3d(0, 0, 0); tglDrawCircle(p, r); } glColor3f(0, 1, 0); for (int i = 0; i + 1 < (int)deformation.m_controlPoints.size(); i += 2) { TPointD a = deformation.m_controlPoints[i]; TPointD b = deformation.m_controlPoints[i + 1]; tglDrawSegment(a, b); } /* deformation.update(); glBegin(GL_LINES); for(double x = -200; x<=200; x+=20) for(double y = -200; y<=200; y+=20) { TPointD p0(x,y); TPointD p1 = deformation.apply(p0); glColor3d(0,1,0); tglVertex(p0); glColor3d(1,0,0); tglVertex(p1); } glEnd(); */ } bool MorphTool::keyDown(QKeyEvent *event) { if (event->key() == Qt::Key_A) deformation.updateLevel(); else return false; return true; }
2,986
14,668
<filename>chrome/browser/ash/crosapi/file_manager_ash.cc // Copyright 2020 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chrome/browser/ash/crosapi/file_manager_ash.h" #include "base/bind.h" #include "base/callback.h" #include "base/files/file_path.h" #include "base/notreached.h" #include "chrome/browser/ash/file_manager/open_util.h" #include "chrome/browser/platform_util.h" #include "chrome/browser/profiles/profile_manager.h" #include "chromeos/crosapi/mojom/file_manager.mojom.h" namespace crosapi { namespace { // Adapts a platform_util::OpenOperationResult to a crosapi::mojom::OpenResult // when running a |callback|. void RunWithOpenResult(base::OnceCallback<void(mojom::OpenResult)> callback, platform_util::OpenOperationResult result) { mojom::OpenResult mojo_result; switch (result) { case platform_util::OPEN_SUCCEEDED: mojo_result = mojom::OpenResult::kSucceeded; break; case platform_util::OPEN_FAILED_PATH_NOT_FOUND: mojo_result = mojom::OpenResult::kFailedPathNotFound; break; case platform_util::OPEN_FAILED_INVALID_TYPE: mojo_result = mojom::OpenResult::kFailedInvalidType; break; case platform_util::OPEN_FAILED_NO_HANLDER_FOR_FILE_TYPE: mojo_result = mojom::OpenResult::kFailedNoHandlerForFileType; break; case platform_util::OPEN_FAILED_FILE_ERROR: mojo_result = mojom::OpenResult::kFailedFileError; break; } std::move(callback).Run(mojo_result); } // Opens an item of |type| at |path| and runs |callback| with the result. void OpenItem(const base::FilePath& path, platform_util::OpenItemType item_type, base::OnceCallback<void(mojom::OpenResult)> callback) { Profile* primary_profile = ProfileManager::GetPrimaryUserProfile(); file_manager::util::OpenItem( primary_profile, path, item_type, base::BindOnce(&RunWithOpenResult, std::move(callback))); } } // namespace FileManagerAsh::FileManagerAsh() = default; FileManagerAsh::~FileManagerAsh() = default; void FileManagerAsh::BindReceiver( mojo::PendingReceiver<mojom::FileManager> receiver) { receivers_.Add(this, std::move(receiver)); } void FileManagerAsh::DeprecatedShowItemInFolder(const base::FilePath& path) { // As of OS M89 all lacros clients now use ShowItemInFolder() below. NOTIMPLEMENTED(); } void FileManagerAsh::ShowItemInFolder(const base::FilePath& path, ShowItemInFolderCallback callback) { Profile* primary_profile = ProfileManager::GetPrimaryUserProfile(); file_manager::util::ShowItemInFolder( primary_profile, path, base::BindOnce(&RunWithOpenResult, std::move(callback))); } void FileManagerAsh::OpenFolder(const base::FilePath& path, OpenFolderCallback callback) { OpenItem(path, platform_util::OPEN_FOLDER, std::move(callback)); } void FileManagerAsh::OpenFile(const base::FilePath& path, OpenFileCallback callback) { OpenItem(path, platform_util::OPEN_FILE, std::move(callback)); } } // namespace crosapi
1,226
4,020
<filename>src/main/java/me/coley/recaf/search/InsnResult.java package me.coley.recaf.search; import java.util.List; import java.util.Objects; /** * Search result of a matched class. * * @author Matt */ public class InsnResult extends SearchResult { private final int index; private final List<String> lines; /** * Constructs a insn result. * * @param index * Index of first matched item. * @param lines * Lines of matched dissasembled method code. */ public InsnResult(int index, List<String> lines) { this.index = index; this.lines = lines; } /** * @return Matched Lines of dissasembled method code. */ public List<String> getLines() { return lines; } @Override public String toString() { return String.join("\n", lines); } @Override public int hashCode() { return Objects.hash(lines); } @Override public boolean equals(Object other) { if (other instanceof InsnResult) return hashCode() == other.hashCode(); return false; } @Override @SuppressWarnings("unchecked") public int compareTo(SearchResult other) { int ret = super.compareTo(other); if (ret == 0) { if (other instanceof InsnResult) { InsnResult otherResult = (InsnResult) other; ret = getContext().getParent().compareTo(otherResult.getContext().getParent()); // Same method -> sort by index if (ret == 0) return Integer.compare(index, otherResult.index); // Different method return ret; } } return ret; } }
534
892
{ "schema_version": "1.2.0", "id": "GHSA-h8x6-6x9h-gmgp", "modified": "2022-05-01T23:36:50Z", "published": "2022-05-01T23:36:50Z", "aliases": [ "CVE-2008-1116" ], "details": "Insecure method vulnerability in the Web Scan Object ActiveX control (OL2005.dll) in Rising Antivirus Online Scanner allows remote attackers to force the download and execution of arbitrary code by setting the BaseURL property and invoking the UpdateEngine method. NOTE: some of these details are obtained from third party information.", "severity": [ ], "affected": [ ], "references": [ { "type": "ADVISORY", "url": "https://nvd.nist.gov/vuln/detail/CVE-2008-1116" }, { "type": "WEB", "url": "https://exchange.xforce.ibmcloud.com/vulnerabilities/40838" }, { "type": "WEB", "url": "https://www.exploit-db.com/exploits/5188" }, { "type": "WEB", "url": "http://secunia.com/advisories/29109" }, { "type": "WEB", "url": "http://www.securityfocus.com/bid/27997" }, { "type": "WEB", "url": "http://www.vupen.com/english/advisories/2008/0683/references" } ], "database_specific": { "cwe_ids": [ ], "severity": "HIGH", "github_reviewed": false } }
564
421
<reponame>hamarb123/dotnet-api-docs<gh_stars>100-1000 // <Snippet1> using namespace System::Runtime::InteropServices; [ClassInterface(ClassInterfaceType::AutoDispatch)] public ref class MyClass { public: MyClass(){} }; // </Snippet1>
100
1,511
/* */ /* The FreeType private functions used in base module (specification). */ /* */ /* Copyright 2008, 2010 by */ /* <NAME>, <NAME>, <NAME>, and suzuki toshiya. */ /* */
326
1,676
#ifndef NEURALNET_OPENCL_TUNER_H_ #define NEURALNET_OPENCL_TUNER_H_ #include "../core/global.h" #include "../core/commontypes.h" #include "../core/logger.h" #include "../neuralnet/desc.h" #include "../neuralnet/nninputs.h" #include "../neuralnet/openclincludes.h" #include "../neuralnet/openclhelpers.h" namespace OpenCLParams { struct XGemmDirectParams { int WGD = 8; int MDIMCD = 1; int NDIMCD = 1; int MDIMAD = 1; int NDIMBD = 1; int KWID = 1; int VWMD = 1; int VWND = 1; int PADA = 1; int PADB = 1; std::string desc() const; std::string compileOptions() const; void fillFromDesc(const std::string& fileName, const std::string& desc); bool isValid() const; }; struct XGemmParams { int MWG = 8; int NWG = 8; int KWG = 8; int MDIMC = 1; int NDIMC = 1; int MDIMA = 1; int NDIMB = 1; int KWI = 1; int VWM = 1; int VWN = 1; int STRM = 0; int STRN = 0; int SA = 0; int SB = 0; std::string desc() const; std::string compileOptions() const; void fillFromDesc(const std::string& fileName, const std::string& desc); bool isValid() const; bool isSimple() const; }; struct HGemmWmmaParams { int MWG = 16; int NWG = 16; int KWG = 16; int MWAVE = 16; int NWAVE = 16; int MWARP = 16; int NWARP = 16; int VWM = 2; int VWN = 2; int SA = 0; int SB = 0; std::string desc() const; std::string compileOptions() const; void fillFromDesc(const std::string& fileName, const std::string& desc); bool isValid() const; bool isSimple() const; }; struct Conv3x3Params { //Winograd input and output tile sizes int INTILE_XSIZE = 4; int INTILE_YSIZE = 4; int OUTTILE_XSIZE = 2; int OUTTILE_YSIZE = 2; int transLocalSize0 = 1; int transLocalSize1 = 1; int untransLocalSize0 = 1; int untransLocalSize1 = 1; int untransLocalSize2 = 1; std::string desc() const; std::string transDesc() const; std::string untransDesc() const; std::string compileOptions() const; void fillFromDesc(const std::string& fileName, const std::string& desc); bool isValid() const; }; struct Conv5x5Params { //Winograd input and output tile sizes int INTILE_XSIZE = 6; int INTILE_YSIZE = 6; int OUTTILE_XSIZE = 2; int OUTTILE_YSIZE = 2; int transLocalSize0 = 1; int transLocalSize1 = 1; int untransLocalSize0 = 1; int untransLocalSize1 = 1; int untransLocalSize2 = 1; std::string desc() const; std::string transDesc() const; std::string untransDesc() const; std::string compileOptions() const; void fillFromDesc(const std::string& fileName, const std::string& desc); bool isValid() const; }; struct GPoolParams { int XYSTRIDE = 1; int CHANNELSTRIDE = 1; int BATCHSTRIDE = 1; std::string desc() const; std::string compileOptions() const; void fillFromDesc(const std::string& fileName, const std::string& desc); bool isValid() const; }; } struct OpenCLTuneParams { OpenCLParams::XGemmDirectParams xGemmDirect = OpenCLParams::XGemmDirectParams(); OpenCLParams::XGemmParams xGemm = OpenCLParams::XGemmParams(); bool shouldUseFP16Storage = false; bool shouldUseFP16Compute = false; OpenCLParams::XGemmParams xGemm16 = OpenCLParams::XGemmParams(); bool shouldUseFP16TensorCores = false; OpenCLParams::HGemmWmmaParams hGemmWmma = OpenCLParams::HGemmWmmaParams(); OpenCLParams::Conv3x3Params conv3x3 = OpenCLParams::Conv3x3Params(); OpenCLParams::Conv5x5Params conv5x5 = OpenCLParams::Conv5x5Params(); OpenCLParams::GPoolParams gPool = OpenCLParams::GPoolParams(); bool operator==(const OpenCLTuneParams& other) const; bool isValid() const; int getXGemmMPaddingMult(bool usingFP16Compute, bool usingFP16TensorCores) const; int getXGemmNPaddingMult(bool usingFP16Compute, bool usingFP16TensorCores) const; int getXGemmKPaddingMult(bool usingFP16Compute, bool usingFP16TensorCores) const; static void save(const std::string& filename, const OpenCLTuneParams& config); static OpenCLTuneParams load(const std::string& filename); }; namespace OpenCLTuner { constexpr int DEFAULT_X_SIZE = NNPos::MAX_BOARD_LEN; constexpr int DEFAULT_Y_SIZE = NNPos::MAX_BOARD_LEN; constexpr int DEFAULT_BATCH_SIZE = 4; constexpr int DEFAULT_WINOGRAD_3X3_TILE_SIZE = 4; struct ModelInfoForTuning { int maxConvChannels1x1; int maxConvChannels3x3; int trunkNumChannels; int midNumChannels; int regularNumChannels; int gpoolNumChannels; int version; static ModelInfoForTuning ofDesc(const ModelDesc* desc); }; void tune( const OpenCLTuneParams& initialConfig, DevicesContext& devicesContext, int gpuIdx, int batchSize, int nnXLen, int nnYLen, enabled_t testFP16Mode, enabled_t testFP16StorageMode, enabled_t testFP16ComputeMode, enabled_t testFP16TensorCoresMode, ModelInfoForTuning modelInfo, bool full, int winograd3x3TileSize, std::ostream& out, bool verboseErrors, bool verboseTuner, OpenCLTuneParams& tunedConfig ); std::string defaultDirectory(bool makeDir, const std::string& homeDataDirOverride); std::string defaultFileName(const std::string& gpuName, int nnXLen, int nnYLen, int trunkNumChannels, int modelVersion); std::string defaultFileName(const std::string& gpuName, int nnXLen, int nnYLen, ModelInfoForTuning modelInfo); OpenCLTuneParams loadOrAutoTune( std::string openCLTunerFile, const std::string& homeDataDirOverride, const std::string& gpuName, int gpuIdxForTuning, Logger* logger, bool openCLReTunePerBoardSize, int nnXLen, int nnYLen, enabled_t testFP16Mode, enabled_t testFP16StorageMode, enabled_t testFP16ComputeMode, enabled_t testFP16TensorCoresMode, ModelInfoForTuning modelInfo, bool full ); void autoTuneEverything( const std::string& homeDataDirOverride, int gpuIdxForTuning, Logger* logger, enabled_t useFP16Mode, bool full ); } #endif //NEURALNET_OPENCL_TUNER_H_
2,497
2,199
<reponame>SamihMustafa/bt /* * Copyright (c) 2016—2021 <NAME> and individual contributors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package bt.torrent; import bt.data.ChunkDescriptor; import bt.data.DataRange; import org.junit.BeforeClass; import java.util.Arrays; public abstract class BaseBitfieldTest { protected static long blockSize = 4; protected static ChunkDescriptor emptyChunk, completeChunk; @BeforeClass public static void setUp() { emptyChunk = mockChunk(new byte[]{0,0,0,0}); completeChunk = mockChunk(new byte[]{1,1,1,1}); } protected static ChunkDescriptor mockChunk(byte[] bitfield) { byte[] _bitfield = Arrays.copyOf(bitfield, bitfield.length); return new ChunkDescriptor() { @Override public byte[] getChecksum() { return new byte[0]; } @Override public int blockCount() { return _bitfield.length; } @Override public long length() { return blockSize * blockCount(); } @Override public long blockSize() { return blockSize; } @Override public long lastBlockSize() { return length() % blockSize(); } @Override public boolean isPresent(int blockIndex) { return _bitfield[blockIndex] == 1; } @Override public boolean isComplete() { for (byte b : _bitfield) { if (b != 1) { return false; } } return true; } @Override public boolean isEmpty() { for (byte b : _bitfield) { if (b == 1) { return false; } } return true; } @Override public void clear() { Arrays.fill(bitfield, (byte)0); } @Override public DataRange getData() { throw new UnsupportedOperationException(); } }; } }
1,351
8,570
{ "$schema": "http://json.schemastore.org/ide.host", "order": 700, "icon": "ide/RazorClassLibrary.ico", "symbolInfo": [ { "id": "SupportPagesAndViews", "isVisible": true, "persistenceScope": "templateGroup" } ] }
113
335
{ "word": "Intellectualize", "definitions": [ "Give an intellectual character to.", "Talk, write, or think intellectually." ], "parts-of-speech": "Verb" }
76
634
import uuid from django.db.models import F from django.test import TestCase from django.utils.crypto import get_random_string from zentral.contrib.inventory.models import EnrollmentSecret, MetaBusinessUnit, Tag from zentral.contrib.santa.models import (Bundle, Configuration, EnrolledMachine, Enrollment, MachineRule, Rule, Target, translate_rule_policy) def new_sha256(): return get_random_string(length=64, allowed_chars='abcdef0123456789') class SantaRuleEngineTestCase(TestCase): @classmethod def setUpTestData(cls): cls.configuration = Configuration.objects.create(name=get_random_string(256), batch_size=5) cls.meta_business_unit = MetaBusinessUnit.objects.create(name=get_random_string(64)) cls.enrollment_secret = EnrollmentSecret.objects.create(meta_business_unit=cls.meta_business_unit) cls.enrollment = Enrollment.objects.create(configuration=cls.configuration, secret=cls.enrollment_secret) cls.machine_serial_number = get_random_string(64) cls.enrolled_machine = EnrolledMachine.objects.create(enrollment=cls.enrollment, hardware_uuid=uuid.uuid4(), serial_number=cls.machine_serial_number, client_mode=Configuration.MONITOR_MODE, santa_version="1.17") cls.machine_serial_number2 = get_random_string(64) cls.enrolled_machine2 = EnrolledMachine.objects.create(enrollment=cls.enrollment, hardware_uuid=uuid.uuid4(), serial_number=cls.machine_serial_number2, client_mode=Configuration.MONITOR_MODE, santa_version="1.17") def create_rule(self, target_type=Target.BINARY, policy=Rule.ALLOWLIST, configuration=None): target = Target.objects.create(type=target_type, sha256=new_sha256()) if configuration is None: configuration = self.configuration rule = Rule.objects.create(configuration=configuration, target=target, policy=policy) return target, rule def create_bundle_rule(self, policy=Rule.ALLOWLIST): bundle_target = Target.objects.create(type=Target.BUNDLE, sha256=new_sha256()) bundle = Bundle.objects.create( target=bundle_target, path=get_random_string(78), executable_rel_path=get_random_string(89), name=get_random_string(13), version=get_random_string(13), version_str=get_random_string(12), binary_count=3, ) for _ in range(bundle.binary_count): binary_target = Target.objects.create(type=Target.BINARY, sha256=new_sha256()) bundle.binary_targets.add(binary_target) bundle_rule = Rule.objects.create( configuration=self.configuration, target=bundle_target, policy=policy ) return bundle_target, bundle, bundle_rule def create_and_serialize_for_iter_rule(self, target_type=Target.BINARY, policy=Rule.ALLOWLIST, configuration=None): target, rule = self.create_rule(target_type, policy, configuration) result = { "target_id": target.pk, "policy": rule.policy, "rule_type": target.type, "sha256": target.sha256, "custom_msg": "", "version": rule.version, } return target, rule, result def create_and_serialize_rule(self, target_type=Target.BINARY, policy=Rule.ALLOWLIST, configuration=None): target, rule = self.create_rule(target_type, policy, configuration) serialized_rule = { "rule_type": target.type, "sha256": target.sha256, "policy": translate_rule_policy(rule.policy), } if rule.custom_msg: serialized_rule["custom_msg"] = rule.custom_msg return target, rule, serialized_rule def test_iter_new_rules(self): # create rule target, rule, result = self.create_and_serialize_for_iter_rule() self.assertEqual(list(MachineRule.objects._iter_new_rules(self.enrolled_machine, [])), [result]) # sync rule machine_rule = MachineRule.objects.create( enrolled_machine=self.enrolled_machine, target=target, policy=rule.policy, version=rule.version, cursor=get_random_string(8), ) self.assertEqual(list(MachineRule.objects._iter_new_rules(self.enrolled_machine, [])), []) # update rule rule.custom_msg = "New message" rule.version = F("version") + 1 rule.save() rule.refresh_from_db() result2 = result.copy() result2["custom_msg"] = rule.custom_msg result2["version"] = 2 self.assertEqual(list(MachineRule.objects._iter_new_rules(self.enrolled_machine, [])), [result2]) # delete rule rule.delete() result3 = result.copy() result3["policy"] = 4 # REMOVE result3.pop("custom_msg", None) result3["version"] = 1 self.assertEqual(list(MachineRule.objects._iter_new_rules(self.enrolled_machine, [])), [result3]) # sync rule machine_rule.delete() self.assertEqual(list(MachineRule.objects._iter_new_rules(self.enrolled_machine, [])), []) def test_iter_new_rules_second_machine(self): # create rule target, rule, result = self.create_and_serialize_for_iter_rule() self.assertEqual(list(MachineRule.objects._iter_new_rules(self.enrolled_machine, [])), [result]) # sync rule MachineRule.objects.create( enrolled_machine=self.enrolled_machine, target=target, policy=rule.policy, version=rule.version, cursor=get_random_string(8), ) self.assertEqual(list(MachineRule.objects._iter_new_rules(self.enrolled_machine2, [])), [result]) def test_iter_serial_number_new_rules(self): target, rule, result = self.create_and_serialize_for_iter_rule() rule.serial_numbers = [get_random_string(13)] rule.save() self.assertEqual(list(MachineRule.objects._iter_new_rules(self.enrolled_machine, [])), []) rule.serial_numbers.append(self.enrolled_machine.serial_number) rule.save() self.assertEqual(list(MachineRule.objects._iter_new_rules(self.enrolled_machine, [])), [result]) def test_one_excluded_serial_number(self): target, rule, result = self.create_and_serialize_for_iter_rule() rule.excluded_serial_numbers = [self.enrolled_machine.serial_number] rule.save() self.assertEqual(list(MachineRule.objects._iter_new_rules(self.enrolled_machine, [])), []) rule.excluded_serial_numbers = [get_random_string()] rule.save() self.assertEqual(list(MachineRule.objects._iter_new_rules(self.enrolled_machine, [])), [result]) def test_two_primary_user_machines_one_excluded_serial_number(self): target, rule, result = self.create_and_serialize_for_iter_rule() primary_user = get_random_string(15) rule.primary_users.append(primary_user) rule.save() self.enrolled_machine.primary_user = primary_user self.enrolled_machine.save() self.enrolled_machine2.primary_user = primary_user self.enrolled_machine2.save() self.assertEqual(list(MachineRule.objects._iter_new_rules(self.enrolled_machine, [])), [result]) self.assertEqual(list(MachineRule.objects._iter_new_rules(self.enrolled_machine2, [])), [result]) rule.excluded_serial_numbers = [self.enrolled_machine.serial_number] rule.save() self.assertEqual(list(MachineRule.objects._iter_new_rules(self.enrolled_machine, [])), []) self.assertEqual(list(MachineRule.objects._iter_new_rules(self.enrolled_machine2, [])), [result]) def test_iter_primary_user_new_rules(self): target, rule, result = self.create_and_serialize_for_iter_rule() rule.primary_users = [get_random_string(14)] rule.save() self.assertEqual(list(MachineRule.objects._iter_new_rules(self.enrolled_machine, [])), []) primary_user = get_random_string(15) rule.primary_users.append(primary_user) rule.save() self.enrolled_machine.primary_user = primary_user self.enrolled_machine.save() self.assertEqual(list(MachineRule.objects._iter_new_rules(self.enrolled_machine, [])), [result]) def test_one_excluded_primary_user(self): target, rule, result = self.create_and_serialize_for_iter_rule() primary_user = get_random_string() rule.excluded_primary_users = [primary_user] rule.save() self.enrolled_machine.primary_user = primary_user self.enrolled_machine.save() self.assertEqual(list(MachineRule.objects._iter_new_rules(self.enrolled_machine, [])), []) rule.excluded_primary_users = [get_random_string()] rule.save() self.assertEqual(list(MachineRule.objects._iter_new_rules(self.enrolled_machine, [])), [result]) # no rules if excluded_primary_users and the machine reports no primary user!!! self.enrolled_machine.primary_user = None self.enrolled_machine.save() self.assertEqual(list(MachineRule.objects._iter_new_rules(self.enrolled_machine, [])), []) def test_two_serial_number_machines_one_excluded_primary_user(self): target, rule, result = self.create_and_serialize_for_iter_rule() rule.serial_numbers = [self.enrolled_machine.serial_number, self.enrolled_machine2.serial_number] rule.save() primary_user1 = get_random_string(15) self.enrolled_machine.primary_user = primary_user1 self.enrolled_machine.save() primary_user2 = get_random_string(15) self.enrolled_machine2.primary_user = primary_user2 self.enrolled_machine2.save() self.assertEqual(list(MachineRule.objects._iter_new_rules(self.enrolled_machine, [])), [result]) self.assertEqual(list(MachineRule.objects._iter_new_rules(self.enrolled_machine2, [])), [result]) rule.excluded_primary_users = [primary_user1] rule.save() self.assertEqual(list(MachineRule.objects._iter_new_rules(self.enrolled_machine, [])), []) self.assertEqual(list(MachineRule.objects._iter_new_rules(self.enrolled_machine2, [])), [result]) def test_iter_tag_new_rules(self): target, rule, result = self.create_and_serialize_for_iter_rule() tags = [Tag.objects.create(name=get_random_string(32)) for _ in range(3)] rule.tags.set(tags) self.assertEqual(list(MachineRule.objects._iter_new_rules(self.enrolled_machine, [])), []) self.assertEqual(list(MachineRule.objects._iter_new_rules(self.enrolled_machine, [tags[0].pk])), [result]) def test_one_excluded_tag(self): target, rule, result = self.create_and_serialize_for_iter_rule() tags = [Tag.objects.create(name=get_random_string(32)) for _ in range(2)] rule.excluded_tags.set(tags[-1:]) self.assertEqual(list(MachineRule.objects._iter_new_rules(self.enrolled_machine, [tags[-1].pk])), []) self.assertEqual(list(MachineRule.objects._iter_new_rules(self.enrolled_machine, [])), [result]) self.assertEqual(list(MachineRule.objects._iter_new_rules(self.enrolled_machine, [tags[0].pk])), [result]) rule.excluded_tags.set([]) self.assertEqual(list(MachineRule.objects._iter_new_rules(self.enrolled_machine, [tags[-1].pk])), [result]) self.assertEqual(list(MachineRule.objects._iter_new_rules(self.enrolled_machine, [])), [result]) self.assertEqual(list(MachineRule.objects._iter_new_rules(self.enrolled_machine, [tags[0].pk])), [result]) def test_primary_user_machine_two_tags_one_excluded_tag(self): target, rule, result = self.create_and_serialize_for_iter_rule() primary_user = get_random_string(14) rule.primary_users = [primary_user] rule.save() self.enrolled_machine.primary_user = primary_user self.enrolled_machine.save() tags = [Tag.objects.create(name=get_random_string(32)) for _ in range(3)] tag_pks = [t.pk for t in tags] rule.tags.set(tags[:-1]) self.assertEqual(list(MachineRule.objects._iter_new_rules(self.enrolled_machine, tag_pks)), [result]) rule.excluded_tags.add(tags[-1]) self.assertEqual(list(MachineRule.objects._iter_new_rules(self.enrolled_machine, tag_pks)), []) def test_iter_bundle_new_rules(self): bundle_target, bundle, bundle_rule = self.create_bundle_rule() results = [{ "target_id": binary_target.pk, "policy": bundle_rule.policy, "rule_type": binary_target.type, "sha256": binary_target.sha256, "custom_msg": "", "version": bundle_rule.version, "file_bundle_hash": bundle_target.sha256, "file_bundle_binary_count": bundle.binary_count, } for binary_target in bundle.binary_targets.all().order_by("sha256")] self.assertEqual(list(MachineRule.objects._iter_new_rules(self.enrolled_machine, [])), results) # simulate acknowleged sync for binary_target in bundle.binary_targets.all(): MachineRule.objects.create( enrolled_machine=self.enrolled_machine, target=binary_target, policy=bundle_rule.policy, version=bundle_rule.version, ) self.assertEqual(list(MachineRule.objects._iter_new_rules(self.enrolled_machine, [])), []) # delete the rule bundle_rule.delete() new_results = [] for r in results: nr = r.copy() nr["policy"] = MachineRule.REMOVE nr.pop("custom_msg") nr.pop("file_bundle_hash") nr.pop("file_bundle_binary_count") new_results.append(nr) self.assertEqual(list(MachineRule.objects._iter_new_rules(self.enrolled_machine, [])), new_results) def test_configuration_leakage(self): configuration2 = Configuration.objects.create(name=get_random_string(256)) target, rule, _ = self.create_and_serialize_for_iter_rule(configuration=configuration2) self.assertEqual(list(MachineRule.objects._iter_new_rules(self.enrolled_machine, [])), []) def test_one_next_rule(self): target, rule, serialized_rule = self.create_and_serialize_rule() for _ in range(2): rule_batch, response_cursor = MachineRule.objects.get_next_rule_batch(self.enrolled_machine, []) self.assertIsNotNone(response_cursor) self.assertEqual(rule_batch, [serialized_rule]) machine_rule_qs = self.enrolled_machine.machinerule_set.all() self.assertEqual(machine_rule_qs.count(), 1) machine_rule = machine_rule_qs.first() self.assertEqual(machine_rule.target, target) self.assertEqual(machine_rule.policy, rule.policy) self.assertEqual(machine_rule.version, rule.version) self.assertEqual(machine_rule.cursor, response_cursor) def test_next_rule_batch_pagination(self): serialized_rules = [] for _ in range(6): _, _, serialized_rule = self.create_and_serialize_rule() serialized_rules.append(serialized_rule) serialized_rules.sort(key=lambda r: r["sha256"]) i = 0 response_cursor = None for batch_len in (5, 1): rule_batch, response_cursor = MachineRule.objects.get_next_rule_batch( self.enrolled_machine, [], response_cursor ) self.assertIsNotNone(response_cursor) self.assertEqual(MachineRule.objects.filter(enrolled_machine=self.enrolled_machine, cursor=response_cursor).count(), batch_len) self.assertEqual(rule_batch, serialized_rules[i: i + batch_len]) i += batch_len machine_rule_qs = self.enrolled_machine.machinerule_set.all() self.assertEqual(machine_rule_qs.count(), 6) self.assertEqual(machine_rule_qs.filter(cursor__isnull=True).count(), 5) rule_batch, response_cursor = MachineRule.objects.get_next_rule_batch( self.enrolled_machine, [], response_cursor ) self.assertEqual(len(rule_batch), 0) self.assertIsNone(response_cursor) self.assertEqual(machine_rule_qs.filter(cursor__isnull=True).count(), 6) def test_lost_response_batch_pagination(self): serialized_rules = [] for _ in range(11): _, _, serialized_rule = self.create_and_serialize_rule() serialized_rules.append(serialized_rule) serialized_rules.sort(key=lambda r: r["sha256"]) response_cursor = None machine_rule_qs = self.enrolled_machine.machinerule_set.all() i = 0 # first client request, first 5 rules batch_len = 5 rule_batch, response_cursor1 = MachineRule.objects.get_next_rule_batch( self.enrolled_machine, [], response_cursor ) self.assertIsNotNone(response_cursor1) self.assertEqual(machine_rule_qs.filter(cursor__isnull=True).count(), 0) self.assertEqual(machine_rule_qs.filter(cursor=response_cursor1).count(), batch_len) self.assertEqual(machine_rule_qs.filter(cursor__isnull=False).exclude(cursor=response_cursor1).count(), 0) self.assertEqual(rule_batch, serialized_rules[i: i + batch_len]) i += batch_len # second client request, next 5 rules rule_batch, response_cursor2 = MachineRule.objects.get_next_rule_batch( self.enrolled_machine, [], response_cursor1 ) self.assertIsNotNone(response_cursor2) self.assertEqual(machine_rule_qs.filter(cursor__isnull=True).count(), batch_len) self.assertEqual(machine_rule_qs.filter(cursor=response_cursor2).count(), batch_len) self.assertEqual(machine_rule_qs.filter(cursor__isnull=False).exclude(cursor=response_cursor2).count(), 0) self.assertEqual(rule_batch, serialized_rules[i: i + batch_len]) i += batch_len # third client request, with first cursor. # the client has never received a response for the second request, and is retrying it. i -= batch_len rule_batch, response_cursor3 = MachineRule.objects.get_next_rule_batch( self.enrolled_machine, [], response_cursor1 ) self.assertIsNotNone(response_cursor3) self.assertEqual(machine_rule_qs.filter(cursor__isnull=True).count(), batch_len) self.assertEqual(machine_rule_qs.filter(cursor=response_cursor3).count(), batch_len) self.assertEqual(machine_rule_qs.filter(cursor__isnull=False).exclude(cursor=response_cursor3).count(), 0) self.assertEqual(rule_batch, serialized_rules[i: i + batch_len]) i += batch_len # the client received the last batch and makes another request batch_len = 1 rule_batch, response_cursor4 = MachineRule.objects.get_next_rule_batch( self.enrolled_machine, [], response_cursor3 ) self.assertIsNotNone(response_cursor4) self.assertEqual(machine_rule_qs.filter(cursor__isnull=True).count(), 10) self.assertEqual(machine_rule_qs.filter(cursor=response_cursor4).count(), batch_len) self.assertEqual(machine_rule_qs.filter(cursor__isnull=False).exclude(cursor=response_cursor4).count(), 0) self.assertEqual(rule_batch, serialized_rules[i: i + batch_len]) i += batch_len # last batch rule_batch, response_cursor5 = MachineRule.objects.get_next_rule_batch( self.enrolled_machine, [], response_cursor4 ) self.assertIsNone(response_cursor5) self.assertEqual(machine_rule_qs.filter(cursor__isnull=True).count(), 11) self.assertEqual(machine_rule_qs.filter(cursor__isnull=False).count(), 0) self.assertEqual(rule_batch, []) def test_reset_batch_pagination(self): serialized_rules = [] for _ in range(6): _, _, serialized_rule = self.create_and_serialize_rule() serialized_rules.append(serialized_rule) serialized_rules.sort(key=lambda r: r["sha256"]) machine_rule_qs = self.enrolled_machine.machinerule_set.all() # first 2 requests OK i = 0 response_cursor = None for batch_len in (5, 1): rule_batch, response_cursor = MachineRule.objects.get_next_rule_batch( self.enrolled_machine, [], response_cursor ) self.assertIsNotNone(response_cursor) self.assertEqual(machine_rule_qs.filter(cursor=response_cursor).count(), batch_len) self.assertEqual(rule_batch, serialized_rules[i: i + batch_len]) i += batch_len self.assertEqual(machine_rule_qs.count(), 6) self.assertEqual(machine_rule_qs.filter(cursor__isnull=True).count(), 5) # last batch, never acknowleged, the client keeps making new requests without cursor # and getting the last unacknowlegded rule for i in range(2): rule_batch, response_cursor_post_reset = MachineRule.objects.get_next_rule_batch( self.enrolled_machine, [] ) self.assertIsNotNone(response_cursor_post_reset) self.assertEqual(rule_batch, [serialized_rules[-1]]) self.assertEqual(machine_rule_qs.count(), 6) self.assertEqual(machine_rule_qs.filter(cursor__isnull=True).count(), 5) self.assertEqual(machine_rule_qs.filter(cursor=response_cursor_post_reset).count(), 1) # the client acknowleges the last rule rule_batch, final_response_cursor = MachineRule.objects.get_next_rule_batch( self.enrolled_machine, [], response_cursor_post_reset ) self.assertEqual(machine_rule_qs.count(), 6) self.assertEqual(machine_rule_qs.filter(cursor__isnull=True).count(), 6) self.assertEqual(rule_batch, []) def test_updated_rule(self): target, rule, serialized_rule = self.create_and_serialize_rule() _, response_cursor = MachineRule.objects.get_next_rule_batch(self.enrolled_machine, []) MachineRule.objects.get_next_rule_batch(self.enrolled_machine, [], response_cursor) rule.custom_msg = "YOLO" rule.version = F("version") + 1 rule.save() serialized_rule["custom_msg"] = rule.custom_msg rule_batch, response_cursor = MachineRule.objects.get_next_rule_batch(self.enrolled_machine, []) self.assertIsNotNone(response_cursor) self.assertEqual(rule_batch, [serialized_rule]) machine_rule_qs = self.enrolled_machine.machinerule_set.all() self.assertEqual(machine_rule_qs.count(), 1) machine_rule = machine_rule_qs.first() self.assertEqual(machine_rule.target, target) self.assertEqual(machine_rule.policy, rule.policy) self.assertEqual(machine_rule.version, 2) MachineRule.objects.get_next_rule_batch(self.enrolled_machine, [], response_cursor) self.assertEqual(machine_rule_qs.count(), 1) self.assertEqual(machine_rule.pk, machine_rule_qs.first().pk) machine_rule.refresh_from_db() self.assertIsNone(machine_rule.cursor) rule_batch2, response_cursor = MachineRule.objects.get_next_rule_batch(self.enrolled_machine, []) self.assertEqual(rule_batch2, []) self.assertEqual(response_cursor, None) def test_deleted_rule(self): target, rule, serialized_rule = self.create_and_serialize_rule() _, response_cursor = MachineRule.objects.get_next_rule_batch(self.enrolled_machine, []) MachineRule.objects.get_next_rule_batch(self.enrolled_machine, [], response_cursor) rule.delete() serialized_rule.pop("custom_msg", None) serialized_rule["policy"] = "REMOVE" response_cursor = None for i in range(2): rule_batch, response_cursor = MachineRule.objects.get_next_rule_batch(self.enrolled_machine, []) self.enrolled_machine.refresh_from_db() self.assertIsNotNone(response_cursor) self.assertEqual(rule_batch, [serialized_rule]) machine_rule_qs = self.enrolled_machine.machinerule_set.all() self.assertEqual(machine_rule_qs.count(), 1) machine_rule = machine_rule_qs.first() self.assertEqual(machine_rule.target, target) self.assertEqual(machine_rule.policy, MachineRule.REMOVE) self.assertEqual(machine_rule.cursor, response_cursor) MachineRule.objects.get_next_rule_batch(self.enrolled_machine, [], response_cursor) self.assertEqual(machine_rule_qs.count(), 0) def test_bundle_rules(self): # all bundle binary rules with extra attributes bundle_target, bundle, bundle_rule = self.create_bundle_rule() serialized_rules = [{ "policy": translate_rule_policy(bundle_rule.policy), "rule_type": binary_target.type, "sha256": binary_target.sha256, "file_bundle_hash": bundle_target.sha256, "file_bundle_binary_count": bundle.binary_count, } for binary_target in bundle.binary_targets.all().order_by("sha256")] rule_batch, response_cursor = MachineRule.objects.get_next_rule_batch(self.enrolled_machine, []) self.assertEqual(rule_batch, serialized_rules) # noop MachineRule.objects.get_next_rule_batch(self.enrolled_machine, [], response_cursor) rule_batch, response_cursor = MachineRule.objects.get_next_rule_batch(self.enrolled_machine, []) self.assertEqual(rule_batch, []) self.assertIsNone(response_cursor) # delete rule bundle_rule.delete() serialized_remove_rules = [] # all bundle binary remove rules without extra attributes for sr in serialized_rules: srr = sr.copy() srr["policy"] = "REMOVE" srr.pop("file_bundle_hash") srr.pop("file_bundle_binary_count") serialized_remove_rules.append(srr) rule_batch, response_cursor = MachineRule.objects.get_next_rule_batch(self.enrolled_machine, []) self.assertEqual(rule_batch, serialized_remove_rules) # noop MachineRule.objects.get_next_rule_batch(self.enrolled_machine, [], response_cursor) rule_batch, response_cursor = MachineRule.objects.get_next_rule_batch(self.enrolled_machine, []) self.assertEqual(rule_batch, []) self.assertIsNone(response_cursor) def test_scoped_rule(self): # rule without restrictions target, rule, serialized_rule = self.create_and_serialize_rule() _, response_cursor = MachineRule.objects.get_next_rule_batch(self.enrolled_machine, []) MachineRule.objects.get_next_rule_batch(self.enrolled_machine, [], response_cursor) # scope rule with some tags tags = [Tag.objects.create(name=get_random_string(32)) for _ in range(4)] rule.tags.set(tags[:-1]) rule.excluded_tags.set(tags[-2:-1]) # rule not in scope anymore, needs to be removed rule_batch, response_cursor = MachineRule.objects.get_next_rule_batch(self.enrolled_machine, []) serialized_remove_rule = serialized_rule.copy() serialized_remove_rule.pop("custom_msg", None) serialized_remove_rule["policy"] = "REMOVE" self.assertEqual(rule_batch, [serialized_remove_rule]) MachineRule.objects.get_next_rule_batch(self.enrolled_machine, [], response_cursor) # rule removed, noop rule_batch, _ = MachineRule.objects.get_next_rule_batch(self.enrolled_machine, []) self.assertEqual(rule_batch, []) # machine tagged, rule needs to be added rule_batch, response_cursor = MachineRule.objects.get_next_rule_batch(self.enrolled_machine, [tags[0].pk]) self.assertEqual(rule_batch, [serialized_rule]) MachineRule.objects.get_next_rule_batch(self.enrolled_machine, [tags[0].pk], response_cursor) # rule added, noop rule_batch, _ = MachineRule.objects.get_next_rule_batch(self.enrolled_machine, [tags[0].pk]) self.assertEqual(rule_batch, []) # rule again not in scope, needs to be removed rule_batch, response_cursor = MachineRule.objects.get_next_rule_batch(self.enrolled_machine, [tags[0].pk, tags[-2].pk]) serialized_remove_rule = serialized_rule.copy() serialized_remove_rule.pop("custom_msg", None) serialized_remove_rule["policy"] = "REMOVE" self.assertEqual(rule_batch, [serialized_remove_rule]) MachineRule.objects.get_next_rule_batch(self.enrolled_machine, [tags[0].pk, tags[-2].pk], response_cursor) # rule removed, noop rule_batch, _ = MachineRule.objects.get_next_rule_batch(self.enrolled_machine, [tags[0].pk, tags[-2].pk]) self.assertEqual(rule_batch, []) rule.tags.set([]) rule.excluded_tags.set(tags[-1:]) # rule again in scope, rule needs to be added rule_batch, response_cursor = MachineRule.objects.get_next_rule_batch(self.enrolled_machine, [tags[0].pk, tags[-2].pk]) self.assertEqual(rule_batch, [serialized_rule]) MachineRule.objects.get_next_rule_batch(self.enrolled_machine, [tags[0].pk, tags[-2].pk], response_cursor) # rule added noop rule_batch, _ = MachineRule.objects.get_next_rule_batch(self.enrolled_machine, [tags[0].pk, tags[-2].pk]) self.assertEqual(rule_batch, [])
13,568
5,169
<reponame>Gantios/Specs { "name": "TrustYouStarsSDK", "version": "1.0.0", "summary": "TrustYou Stars SDK for integrating TrustYou surveys to your App.", "description": "TrustYou Stars SDK for integrating TrustYou surveys to your App.", "homepage": "http://trustyou.com", "license": "Apache License, Version 2.0", "authors": { "Motius GmbH": "<EMAIL>" }, "platforms": { "ios": "8.0" }, "source": { "git": "https://github.com/trustyou/stars-sdk-ios.git", "tag": "1.0.0" }, "source_files": [ "trustyou-stars-sdk", "trustyou-stars-sdk/**/*.{h,m,swift}" ], "pushed_with_swift_version": "3.0" }
267
1,056
<filename>enterprise/web.jsf/src/org/netbeans/modules/web/jsf/wizards/FacesComponentPanel.java /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.netbeans.modules.web.jsf.wizards; import javax.swing.event.ChangeEvent; import javax.swing.event.ChangeListener; import org.netbeans.api.project.Project; import org.netbeans.modules.web.api.webmodule.WebModule; import org.netbeans.modules.web.jsf.api.facesmodel.JSFVersion; import org.netbeans.spi.project.ui.templates.support.Templates; import org.openide.WizardDescriptor; import org.openide.util.ChangeSupport; import org.openide.util.HelpCtx; import org.openide.util.NbBundle.Messages; /** * * @author <NAME> <<EMAIL>> */ class FacesComponentPanel implements WizardDescriptor.Panel<WizardDescriptor>, ChangeListener { protected static final String PROP_TAG_NAME = "tagName"; //NOI18N protected static final String PROP_TAG_NAMESPACE = "tagNamespace"; //NOI18N protected static final String PROP_SAMPLE_CODE = "sampleCode"; //NOI18N private final WizardDescriptor descriptor; private final ChangeSupport changeSupport = new ChangeSupport(this); private FacesComponentPanelVisual gui; public FacesComponentPanel(WizardDescriptor descriptor) { this.descriptor = descriptor; } @Override public FacesComponentPanelVisual getComponent() { if (gui == null) { gui = new FacesComponentPanelVisual(); } return gui; } @Override public HelpCtx getHelp() { return HelpCtx.DEFAULT_HELP; } @Override public void readSettings(WizardDescriptor settings) { getComponent(); gui.addChangeListener(this); } @Override public void storeSettings(WizardDescriptor settings) { getComponent(); gui.removeChangeListener(this); settings.putProperty(PROP_TAG_NAME, gui.getTagName()); settings.putProperty(PROP_TAG_NAMESPACE, gui.getTagNamespace()); settings.putProperty(PROP_SAMPLE_CODE, gui.isSampleCode()); } @Messages({ "FacesComponentPanel.err.jsf.version.not.suficient=Minimal required JSF version for this feature is JSF 2.2" }) @Override public boolean isValid() { getComponent(); descriptor.putProperty(WizardDescriptor.PROP_ERROR_MESSAGE, " "); //NOI18N Project project = Templates.getProject(descriptor); WebModule webModule = WebModule.getWebModule(project.getProjectDirectory()); if (webModule != null) { JSFVersion jsfVersion = JSFVersion.forWebModule(webModule); if (jsfVersion != null && !jsfVersion.isAtLeast(JSFVersion.JSF_2_2)) { descriptor.putProperty(WizardDescriptor.PROP_ERROR_MESSAGE, Bundle.FacesComponentPanel_err_jsf_version_not_suficient()); return false; } } return true; } @Override public void addChangeListener(ChangeListener l) { changeSupport.addChangeListener(l); } @Override public void removeChangeListener(ChangeListener l) { changeSupport.removeChangeListener(l); } @Override public void stateChanged(ChangeEvent e) { fireChangeEvent(); } private void fireChangeEvent() { changeSupport.fireChange(); } }
1,449
872
""" Implement the following operations of a queue using stacks. push(x) -- Push element x to the back of queue. pop() -- Removes the element from in front of queue. peek() -- Get the front element. empty() -- Return whether the queue is empty. Notes: You must use only standard operations of a stack -- which means only push to top, peek/pop from top, size, and is empty operations are valid. Depending on your language, stack may not be supported natively. You may simulate a stack by using a list or deque (double-ended queue), as long as you use only standard operations of a stack. You may assume that all operations are valid (for example, no pop or peek operations will be called on an empty queue). """ __author__ = 'Daniel' class Queue: def __init__(self): self.in_stk = [] self.out_stk = [] def push(self, x): """ :type x: int :rtype: None """ self.in_stk.append(x) def pop(self): """ :rtype: None """ if not self.out_stk: while self.in_stk: self.out_stk.append(self.in_stk.pop()) self.out_stk.pop() def peek(self): """ :rtype: int """ if not self.out_stk: while self.in_stk: self.out_stk.append(self.in_stk.pop()) return self.out_stk[-1] def empty(self): """ :rtype: bool """ return not self.out_stk and not self.in_stk
628
648
<gh_stars>100-1000 {"resourceType":"DataElement","id":"Sequence.quality.gtFP","meta":{"lastUpdated":"2017-04-19T07:44:43.294+10:00"},"url":"http://hl7.org/fhir/DataElement/Sequence.quality.gtFP","status":"draft","experimental":true,"stringency":"fully-specified","element":[{"id":"Sequence.quality.gtFP","path":"Sequence.quality.gtFP","short":"False positives where the non-REF alleles in the Truth and Query Call Sets match","definition":"The number of false positives where the non-REF alleles in the Truth and Query Call Sets match (i.e. cases where the truth is 1/1 and the query is 0/1 or similar).","min":0,"max":"1","type":[{"code":"decimal"}],"isSummary":true}]}
198
14,668
// Copyright 2021 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chromeos/services/assistant/assistant_manager_service.h" namespace chromeos { namespace assistant { AuthenticationStateObserver::AuthenticationStateObserver() = default; AuthenticationStateObserver::~AuthenticationStateObserver() = default; mojo::PendingRemote< ::chromeos::libassistant::mojom::AuthenticationStateObserver> AuthenticationStateObserver::BindNewPipeAndPassRemote() { return receiver_.BindNewPipeAndPassRemote(); } } // namespace assistant } // namespace chromeos
186
408
package com.davidecirillo.multichoicesample.sampleCustomView; import android.os.Bundle; import android.support.annotation.Nullable; import android.support.v7.widget.LinearLayoutManager; import android.support.v7.widget.RecyclerView; import android.view.View; import com.davidecirillo.multichoicerecyclerview.MultiChoiceAdapter; import com.davidecirillo.multichoicerecyclerview.MultiChoiceToolbar; import com.davidecirillo.multichoicesample.BaseActivity; import com.davidecirillo.multichoicesample.R; import java.util.ArrayList; import butterknife.BindView; public class SampleCustomActivity extends BaseActivity { @BindView(R.id.multiChoiceRecyclerView) public RecyclerView multiChoiceRecyclerView; @Override protected int setActivityIdentifier() { return R.layout.activity_sample_custom_view; } @Override protected void onCreate(@Nullable Bundle savedInstanceState) { super.onCreate(savedInstanceState); setUpMultiChoiceRecyclerView(); } @Override protected void onSaveInstanceState(Bundle outState) { ((MultiChoiceAdapter) multiChoiceRecyclerView.getAdapter()).onSaveInstanceState(outState); super.onSaveInstanceState(outState); } @Override protected void onRestoreInstanceState(Bundle savedInstanceState) { ((MultiChoiceAdapter) multiChoiceRecyclerView.getAdapter()).onRestoreInstanceState(savedInstanceState); super.onRestoreInstanceState(savedInstanceState); } private void setUpMultiChoiceRecyclerView() { multiChoiceRecyclerView.setLayoutManager(new LinearLayoutManager(this, LinearLayoutManager.VERTICAL, false)); MultiChoiceToolbar multiChoiceToolbar = new MultiChoiceToolbar.Builder(SampleCustomActivity.this, toolbar) .setTitles(getString(toolbarTitle() ), "") .setMultiChoiceColours(R.color.colorPrimaryMulti, R.color.colorPrimaryDarkMulti) .setDefaultIcon(R.drawable.ic_arrow_back_white_24dp, new View.OnClickListener() { @Override public void onClick(View view) { onBackPressed(); } }) .build(); SampleCustomViewAdapter adapter = new SampleCustomViewAdapter( getSampleMessageList(), SampleCustomActivity.this ); adapter.setMultiChoiceToolbar(multiChoiceToolbar); multiChoiceRecyclerView.setAdapter(adapter); } private ArrayList<MessageV0> getSampleMessageList() { ArrayList<MessageV0> sampleList = new ArrayList<>(); for (int i = 0; i < 100; i++) { sampleList.add(new MessageV0("Title message number " + i, "Lorem ipsum dolor " + i + " sit amet, consectetur adipiscing" + " elit. Donec id mi pharetra, porta felis sed, aliquam urna. Curabitur porta dolor lobortis semper dictum. " + "Vestibulum posuere velit nisl, at porta lectus condimentum vel. Duis pharetra auctor tempor. Proin feugiat turpis " + "vel tincidunt molestie. Pellentesque tincidunt felis vitae leo pharetra aliquam. Donec sapien ante, feugiat at " + "eleifend vel, laoreet vitae neque. Donec eu erat et diam fermentum congue a sed libero. Vivamus dapibus nunc nec " + "posuere elementum. Nunc eget mi sed est finibus ultricies. Nam ut sapien feugiat neque tempus feugiat.")); } return sampleList; } @Override protected int toolbarTitle() { return R.string.custom_selection_view; } @Override protected boolean showBackHomeAsUpIndicator() { return true; } }
1,598
1,144
<filename>u-boot-2019.01+gitAUTOINC+333c3e72d3-g333c3e72d3/drivers/mtd/nand/raw/sunxi_nand_spl.c // SPDX-License-Identifier: GPL-2.0+ /* * Copyright (c) 2014-2015, Antmicro Ltd <www.antmicro.com> * Copyright (c) 2015, AW-SOM Technologies <www.aw-som.com> */ #include <asm/arch/clock.h> #include <asm/io.h> #include <common.h> #include <config.h> #include <nand.h> #include <linux/ctype.h> /* registers */ #define NFC_CTL 0x00000000 #define NFC_ST 0x00000004 #define NFC_INT 0x00000008 #define NFC_TIMING_CTL 0x0000000C #define NFC_TIMING_CFG 0x00000010 #define NFC_ADDR_LOW 0x00000014 #define NFC_ADDR_HIGH 0x00000018 #define NFC_SECTOR_NUM 0x0000001C #define NFC_CNT 0x00000020 #define NFC_CMD 0x00000024 #define NFC_RCMD_SET 0x00000028 #define NFC_WCMD_SET 0x0000002C #define NFC_IO_DATA 0x00000030 #define NFC_ECC_CTL 0x00000034 #define NFC_ECC_ST 0x00000038 #define NFC_DEBUG 0x0000003C #define NFC_ECC_CNT0 0x00000040 #define NFC_ECC_CNT1 0x00000044 #define NFC_ECC_CNT2 0x00000048 #define NFC_ECC_CNT3 0x0000004C #define NFC_USER_DATA_BASE 0x00000050 #define NFC_EFNAND_STATUS 0x00000090 #define NFC_SPARE_AREA 0x000000A0 #define NFC_PATTERN_ID 0x000000A4 #define NFC_RAM0_BASE 0x00000400 #define NFC_RAM1_BASE 0x00000800 #define NFC_CTL_EN (1 << 0) #define NFC_CTL_RESET (1 << 1) #define NFC_CTL_RAM_METHOD (1 << 14) #define NFC_CTL_PAGE_SIZE_MASK (0xf << 8) #define NFC_CTL_PAGE_SIZE(a) ((fls(a) - 11) << 8) #define NFC_ECC_EN (1 << 0) #define NFC_ECC_PIPELINE (1 << 3) #define NFC_ECC_EXCEPTION (1 << 4) #define NFC_ECC_BLOCK_SIZE (1 << 5) #define NFC_ECC_RANDOM_EN (1 << 9) #define NFC_ECC_RANDOM_DIRECTION (1 << 10) #define NFC_ADDR_NUM_OFFSET 16 #define NFC_SEND_ADDR (1 << 19) #define NFC_ACCESS_DIR (1 << 20) #define NFC_DATA_TRANS (1 << 21) #define NFC_SEND_CMD1 (1 << 22) #define NFC_WAIT_FLAG (1 << 23) #define NFC_SEND_CMD2 (1 << 24) #define NFC_SEQ (1 << 25) #define NFC_DATA_SWAP_METHOD (1 << 26) #define NFC_ROW_AUTO_INC (1 << 27) #define NFC_SEND_CMD3 (1 << 28) #define NFC_SEND_CMD4 (1 << 29) #define NFC_RAW_CMD (0 << 30) #define NFC_ECC_CMD (1 << 30) #define NFC_PAGE_CMD (2 << 30) #define NFC_ST_CMD_INT_FLAG (1 << 1) #define NFC_ST_DMA_INT_FLAG (1 << 2) #define NFC_ST_CMD_FIFO_STAT (1 << 3) #define NFC_READ_CMD_OFFSET 0 #define NFC_RANDOM_READ_CMD0_OFFSET 8 #define NFC_RANDOM_READ_CMD1_OFFSET 16 #define NFC_CMD_RNDOUTSTART 0xE0 #define NFC_CMD_RNDOUT 0x05 #define NFC_CMD_READSTART 0x30 struct nfc_config { int page_size; int ecc_strength; int ecc_size; int addr_cycles; int nseeds; bool randomize; bool valid; }; /* minimal "boot0" style NAND support for Allwinner A20 */ /* random seed used by linux */ const uint16_t random_seed[128] = { 0x2b75, 0x0bd0, 0x5ca3, 0x62d1, 0x1c93, 0x07e9, 0x2162, 0x3a72, 0x0d67, 0x67f9, 0x1be7, 0x077d, 0x032f, 0x0dac, 0x2716, 0x2436, 0x7922, 0x1510, 0x3860, 0x5287, 0x480f, 0x4252, 0x1789, 0x5a2d, 0x2a49, 0x5e10, 0x437f, 0x4b4e, 0x2f45, 0x216e, 0x5cb7, 0x7130, 0x2a3f, 0x60e4, 0x4dc9, 0x0ef0, 0x0f52, 0x1bb9, 0x6211, 0x7a56, 0x226d, 0x4ea7, 0x6f36, 0x3692, 0x38bf, 0x0c62, 0x05eb, 0x4c55, 0x60f4, 0x728c, 0x3b6f, 0x2037, 0x7f69, 0x0936, 0x651a, 0x4ceb, 0x6218, 0x79f3, 0x383f, 0x18d9, 0x4f05, 0x5c82, 0x2912, 0x6f17, 0x6856, 0x5938, 0x1007, 0x61ab, 0x3e7f, 0x57c2, 0x542f, 0x4f62, 0x7454, 0x2eac, 0x7739, 0x42d4, 0x2f90, 0x435a, 0x2e52, 0x2064, 0x637c, 0x66ad, 0x2c90, 0x0bad, 0x759c, 0x0029, 0x0986, 0x7126, 0x1ca7, 0x1605, 0x386a, 0x27f5, 0x1380, 0x6d75, 0x24c3, 0x0f8e, 0x2b7a, 0x1418, 0x1fd1, 0x7dc1, 0x2d8e, 0x43af, 0x2267, 0x7da3, 0x4e3d, 0x1338, 0x50db, 0x454d, 0x764d, 0x40a3, 0x42e6, 0x262b, 0x2d2e, 0x1aea, 0x2e17, 0x173d, 0x3a6e, 0x71bf, 0x25f9, 0x0a5d, 0x7c57, 0x0fbe, 0x46ce, 0x4939, 0x6b17, 0x37bb, 0x3e91, 0x76db, }; #define DEFAULT_TIMEOUT_US 100000 static int check_value_inner(int offset, int expected_bits, int timeout_us, int negation) { do { int val = readl(offset) & expected_bits; if (negation ? !val : val) return 1; udelay(1); } while (--timeout_us); return 0; } static inline int check_value(int offset, int expected_bits, int timeout_us) { return check_value_inner(offset, expected_bits, timeout_us, 0); } static inline int check_value_negated(int offset, int unexpected_bits, int timeout_us) { return check_value_inner(offset, unexpected_bits, timeout_us, 1); } static int nand_wait_cmd_fifo_empty(void) { if (!check_value_negated(SUNXI_NFC_BASE + NFC_ST, NFC_ST_CMD_FIFO_STAT, DEFAULT_TIMEOUT_US)) { printf("nand: timeout waiting for empty cmd FIFO\n"); return -ETIMEDOUT; } return 0; } static int nand_wait_int(void) { if (!check_value(SUNXI_NFC_BASE + NFC_ST, NFC_ST_CMD_INT_FLAG, DEFAULT_TIMEOUT_US)) { printf("nand: timeout waiting for interruption\n"); return -ETIMEDOUT; } return 0; } static int nand_exec_cmd(u32 cmd) { int ret; ret = nand_wait_cmd_fifo_empty(); if (ret) return ret; writel(NFC_ST_CMD_INT_FLAG, SUNXI_NFC_BASE + NFC_ST); writel(cmd, SUNXI_NFC_BASE + NFC_CMD); return nand_wait_int(); } void nand_init(void) { uint32_t val; board_nand_init(); val = readl(SUNXI_NFC_BASE + NFC_CTL); /* enable and reset CTL */ writel(val | NFC_CTL_EN | NFC_CTL_RESET, SUNXI_NFC_BASE + NFC_CTL); if (!check_value_negated(SUNXI_NFC_BASE + NFC_CTL, NFC_CTL_RESET, DEFAULT_TIMEOUT_US)) { printf("Couldn't initialize nand\n"); } /* reset NAND */ nand_exec_cmd(NFC_SEND_CMD1 | NFC_WAIT_FLAG | NAND_CMD_RESET); } static void nand_apply_config(const struct nfc_config *conf) { u32 val; nand_wait_cmd_fifo_empty(); val = readl(SUNXI_NFC_BASE + NFC_CTL); val &= ~NFC_CTL_PAGE_SIZE_MASK; writel(val | NFC_CTL_RAM_METHOD | NFC_CTL_PAGE_SIZE(conf->page_size), SUNXI_NFC_BASE + NFC_CTL); writel(conf->ecc_size, SUNXI_NFC_BASE + NFC_CNT); writel(conf->page_size, SUNXI_NFC_BASE + NFC_SPARE_AREA); } static int nand_load_page(const struct nfc_config *conf, u32 offs) { int page = offs / conf->page_size; writel((NFC_CMD_RNDOUTSTART << NFC_RANDOM_READ_CMD1_OFFSET) | (NFC_CMD_RNDOUT << NFC_RANDOM_READ_CMD0_OFFSET) | (NFC_CMD_READSTART << NFC_READ_CMD_OFFSET), SUNXI_NFC_BASE + NFC_RCMD_SET); writel(((page & 0xFFFF) << 16), SUNXI_NFC_BASE + NFC_ADDR_LOW); writel((page >> 16) & 0xFF, SUNXI_NFC_BASE + NFC_ADDR_HIGH); return nand_exec_cmd(NFC_SEND_CMD1 | NFC_SEND_CMD2 | NFC_RAW_CMD | NFC_SEND_ADDR | NFC_WAIT_FLAG | ((conf->addr_cycles - 1) << NFC_ADDR_NUM_OFFSET)); } static int nand_change_column(u16 column) { int ret; writel((NFC_CMD_RNDOUTSTART << NFC_RANDOM_READ_CMD1_OFFSET) | (NFC_CMD_RNDOUT << NFC_RANDOM_READ_CMD0_OFFSET) | (NFC_CMD_RNDOUTSTART << NFC_READ_CMD_OFFSET), SUNXI_NFC_BASE + NFC_RCMD_SET); writel(column, SUNXI_NFC_BASE + NFC_ADDR_LOW); ret = nand_exec_cmd(NFC_SEND_CMD1 | NFC_SEND_CMD2 | NFC_RAW_CMD | (1 << NFC_ADDR_NUM_OFFSET) | NFC_SEND_ADDR | NFC_CMD_RNDOUT); if (ret) return ret; /* Ensure tCCS has passed before reading data */ udelay(1); return 0; } static const int ecc_bytes[] = {32, 46, 54, 60, 74, 88, 102, 110, 116}; static int nand_read_page(const struct nfc_config *conf, u32 offs, void *dest, int len) { int nsectors = len / conf->ecc_size; u16 rand_seed = 0; int oob_chunk_sz = ecc_bytes[conf->ecc_strength]; int page = offs / conf->page_size; u32 ecc_st; int i; if (offs % conf->page_size || len % conf->ecc_size || len > conf->page_size || len < 0) return -EINVAL; /* Choose correct seed if randomized */ if (conf->randomize) rand_seed = random_seed[page % conf->nseeds]; /* Retrieve data from SRAM (PIO) */ for (i = 0; i < nsectors; i++) { int data_off = i * conf->ecc_size; int oob_off = conf->page_size + (i * oob_chunk_sz); u8 *data = dest + data_off; /* Clear ECC status and restart ECC engine */ writel(0, SUNXI_NFC_BASE + NFC_ECC_ST); writel((rand_seed << 16) | (conf->ecc_strength << 12) | (conf->randomize ? NFC_ECC_RANDOM_EN : 0) | (conf->ecc_size == 512 ? NFC_ECC_BLOCK_SIZE : 0) | NFC_ECC_EN | NFC_ECC_EXCEPTION, SUNXI_NFC_BASE + NFC_ECC_CTL); /* Move the data in SRAM */ nand_change_column(data_off); writel(conf->ecc_size, SUNXI_NFC_BASE + NFC_CNT); nand_exec_cmd(NFC_DATA_TRANS); /* * Let the ECC engine consume the ECC bytes and possibly correct * the data. */ nand_change_column(oob_off); nand_exec_cmd(NFC_DATA_TRANS | NFC_ECC_CMD); /* Get the ECC status */ ecc_st = readl(SUNXI_NFC_BASE + NFC_ECC_ST); /* ECC error detected. */ if (ecc_st & 0xffff) return -EIO; /* * Return 1 if the first chunk is empty (needed for * configuration detection). */ if (!i && (ecc_st & 0x10000)) return 1; /* Retrieve the data from SRAM */ memcpy_fromio(data, SUNXI_NFC_BASE + NFC_RAM0_BASE, conf->ecc_size); /* Stop the ECC engine */ writel(readl(SUNXI_NFC_BASE + NFC_ECC_CTL) & ~NFC_ECC_EN, SUNXI_NFC_BASE + NFC_ECC_CTL); if (data_off + conf->ecc_size >= len) break; } return 0; } static int nand_max_ecc_strength(struct nfc_config *conf) { int max_oobsize, max_ecc_bytes; int nsectors = conf->page_size / conf->ecc_size; int i; /* * ECC strength is limited by the size of the OOB area which is * correlated with the page size. */ switch (conf->page_size) { case 2048: max_oobsize = 64; break; case 4096: max_oobsize = 256; break; case 8192: max_oobsize = 640; break; case 16384: max_oobsize = 1664; break; default: return -EINVAL; } max_ecc_bytes = max_oobsize / nsectors; for (i = 0; i < ARRAY_SIZE(ecc_bytes); i++) { if (ecc_bytes[i] > max_ecc_bytes) break; } if (!i) return -EINVAL; return i - 1; } static int nand_detect_ecc_config(struct nfc_config *conf, u32 offs, void *dest) { /* NAND with pages > 4k will likely require 1k sector size. */ int min_ecc_size = conf->page_size > 4096 ? 1024 : 512; int page = offs / conf->page_size; int ret; /* * In most cases, 1k sectors are preferred over 512b ones, start * testing this config first. */ for (conf->ecc_size = 1024; conf->ecc_size >= min_ecc_size; conf->ecc_size >>= 1) { int max_ecc_strength = nand_max_ecc_strength(conf); nand_apply_config(conf); /* * We are starting from the maximum ECC strength because * most of the time NAND vendors provide an OOB area that * barely meets the ECC requirements. */ for (conf->ecc_strength = max_ecc_strength; conf->ecc_strength >= 0; conf->ecc_strength--) { conf->randomize = false; if (nand_change_column(0)) return -EIO; /* * Only read the first sector to speedup detection. */ ret = nand_read_page(conf, offs, dest, conf->ecc_size); if (!ret) { return 0; } else if (ret > 0) { /* * If page is empty we can't deduce anything * about the ECC config => stop the detection. */ return -EINVAL; } conf->randomize = true; conf->nseeds = ARRAY_SIZE(random_seed); do { if (nand_change_column(0)) return -EIO; if (!nand_read_page(conf, offs, dest, conf->ecc_size)) return 0; /* * Find the next ->nseeds value that would * change the randomizer seed for the page * we're trying to read. */ while (conf->nseeds >= 16) { int seed = page % conf->nseeds; conf->nseeds >>= 1; if (seed != page % conf->nseeds) break; } } while (conf->nseeds >= 16); } } return -EINVAL; } static int nand_detect_config(struct nfc_config *conf, u32 offs, void *dest) { if (conf->valid) return 0; /* * Modern NANDs are more likely than legacy ones, so we start testing * with 5 address cycles. */ for (conf->addr_cycles = 5; conf->addr_cycles >= 4; conf->addr_cycles--) { int max_page_size = conf->addr_cycles == 4 ? 2048 : 16384; /* * Ignoring 1k pages cause I'm not even sure this case exist * in the real world. */ for (conf->page_size = 2048; conf->page_size <= max_page_size; conf->page_size <<= 1) { if (nand_load_page(conf, offs)) return -1; if (!nand_detect_ecc_config(conf, offs, dest)) { conf->valid = true; return 0; } } } return -EINVAL; } static int nand_read_buffer(struct nfc_config *conf, uint32_t offs, unsigned int size, void *dest) { int first_seed = 0, page, ret; size = ALIGN(size, conf->page_size); page = offs / conf->page_size; if (conf->randomize) first_seed = page % conf->nseeds; for (; size; size -= conf->page_size) { if (nand_load_page(conf, offs)) return -1; ret = nand_read_page(conf, offs, dest, conf->page_size); /* * The ->nseeds value should be equal to the number of pages * in an eraseblock. Since we don't know this information in * advance we might have picked a wrong value. */ if (ret < 0 && conf->randomize) { int cur_seed = page % conf->nseeds; /* * We already tried all the seed values => we are * facing a real corruption. */ if (cur_seed < first_seed) return -EIO; /* Try to adjust ->nseeds and read the page again... */ conf->nseeds = cur_seed; if (nand_change_column(0)) return -EIO; /* ... it still fails => it's a real corruption. */ if (nand_read_page(conf, offs, dest, conf->page_size)) return -EIO; } else if (ret && conf->randomize) { memset(dest, 0xff, conf->page_size); } page++; offs += conf->page_size; dest += conf->page_size; } return 0; } int nand_spl_load_image(uint32_t offs, unsigned int size, void *dest) { static struct nfc_config conf = { }; int ret; ret = nand_detect_config(&conf, offs, dest); if (ret) return ret; return nand_read_buffer(&conf, offs, size, dest); } void nand_deselect(void) { struct sunxi_ccm_reg *const ccm = (struct sunxi_ccm_reg *)SUNXI_CCM_BASE; clrbits_le32(&ccm->ahb_gate0, (CLK_GATE_OPEN << AHB_GATE_OFFSET_NAND0)); #ifdef CONFIG_MACH_SUN9I clrbits_le32(&ccm->ahb_gate1, (1 << AHB_GATE_OFFSET_DMA)); #else clrbits_le32(&ccm->ahb_gate0, (1 << AHB_GATE_OFFSET_DMA)); #endif clrbits_le32(&ccm->nand0_clk_cfg, CCM_NAND_CTRL_ENABLE | AHB_DIV_1); }
7,397
1,178
<reponame>leozz37/makani /* * Copyright 2020 Makani Technologies LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "avionics/common/gill_binary.h" #include <stdbool.h> #include <stdint.h> #include "avionics/common/endian.h" #include "avionics/common/fast_math/fast_math.h" #include "avionics/common/gill_types.h" // Returns length of binary message and -1 for unknown messages. int32_t GillBinaryGetLength(GillBinaryId id) { switch (id) { case kGillBinaryIdWindmasterMode7: return 13; case kGillBinaryIdWindmasterMode8: return 13; case kGillBinaryIdWindmasterMode9: return 23; case kGillBinaryIdWindmasterMode10: return 23; default: return -1; } } static bool WindmasterMode7(const uint8_t *data, GillData *gill) { int32_t o = 2; int16_t i16; GillDataWindmasterPolar *out = &gill->u.windmaster_polar; gill->id = kGillDataIdWindmasterPolar; o += ReadInt16Be(&data[o], &i16); out->status = (uint8_t)i16; if ((out->status & 0xFF00) == 0x0) { o += ReadInt16Be(&data[o], &i16); // [deg] out->wind_direction = i16 * PI_F / 180.0f; // [rad] o += ReadInt16Be(&data[o], &i16); out->wind_speed = i16 * 0.01f; o += ReadInt16Be(&data[o], &i16); out->w_axis = i16 * 0.01f; o += ReadInt16Be(&data[o], &i16); out->speed_of_sound = i16 * 0.01f; return true; } return false; } static bool WindmasterMode8(const uint8_t *data, GillData *gill) { int32_t o = 2; int16_t i16; GillDataWindmasterUvw *out = &gill->u.windmaster_uvw; gill->id = kGillDataIdWindmasterUvw; o += ReadInt16Be(&data[o], &i16); out->status = (uint8_t)i16; if ((out->status & 0xFF00) == 0x0) { o += ReadInt16Be(&data[o], &i16); out->wind_velocity[0] = i16 * 0.01f; o += ReadInt16Be(&data[o], &i16); out->wind_velocity[1] = i16 * 0.01f; o += ReadInt16Be(&data[o], &i16); out->wind_velocity[2] = i16 * 0.01f; o += ReadInt16Be(&data[o], &i16); out->speed_of_sound = i16 * 0.01f; return true; } return false; } static bool WindmasterMode9(const uint8_t *data, GillData *gill) { // Additional Mode9 fields not supported. return WindmasterMode7(data, gill); } static bool WindmasterMode10(const uint8_t *data, GillData *gill) { // Additional Mode10 fields not supported. return WindmasterMode8(data, gill); } bool GillBinaryDecodeWindmaster(const GillBinary *bin, const uint8_t *data, GillData *out) { switch (bin->id) { case kGillBinaryIdWindmasterMode7: return WindmasterMode7(data, out); case kGillBinaryIdWindmasterMode8: return WindmasterMode8(data, out); case kGillBinaryIdWindmasterMode9: return WindmasterMode9(data, out); case kGillBinaryIdWindmasterMode10: return WindmasterMode10(data, out); default: return false; } }
1,367
2,921
{ "name": "CashDog", "website": "https://www.cashdog.io/", "description": "CashDog is a Charity Focused DeFi Token with Automatic Liquidity Generation and Token Redistribution on each trade.", "explorer": "https://bscscan.com/token/0x494a2f5395ac213622762e4ef4d44661758ca639", "type": "BEP20", "symbol": "CASHDOG", "decimals": 9, "status": "active", "id": "0x494A2f5395aC213622762e4ef4d44661758Ca639" }
192
1,188
/* * Copyright 2016 <NAME> <<EMAIL>> * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.javadeobfuscator.deobfuscator.utils; import java.io.File; import java.io.FileInputStream; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.io.PrintWriter; import java.io.StringWriter; import java.lang.reflect.Constructor; import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Modifier; import java.util.ArrayList; import java.util.Enumeration; import java.util.HashMap; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.zip.ZipEntry; import java.util.zip.ZipFile; import org.apache.commons.io.IOUtils; import org.objectweb.asm.Opcodes; import org.objectweb.asm.Type; import org.objectweb.asm.tree.*; import org.objectweb.asm.util.Printer; import org.objectweb.asm.util.Textifier; import org.objectweb.asm.util.TraceMethodVisitor; import sun.misc.Unsafe; import static org.objectweb.asm.Opcodes.*; public class Utils { public static boolean isInstruction(AbstractInsnNode node) { return !(node instanceof LineNumberNode) && !(node instanceof FrameNode) && !(node instanceof LabelNode); } public static boolean notAbstractOrNative(MethodNode methodNode) { return !Modifier.isNative(methodNode.access) && !Modifier.isAbstract(methodNode.access); } public static AbstractInsnNode getNextFollowGoto(AbstractInsnNode node) { AbstractInsnNode next = node.getNext(); while (next instanceof LabelNode || next instanceof LineNumberNode || next instanceof FrameNode) { next = next.getNext(); } if (next.getOpcode() == GOTO) { JumpInsnNode cast = (JumpInsnNode) next; next = cast.label; while (!Utils.isInstruction(next)) { next = next.getNext(); } } return next; } public static AbstractInsnNode getNext(AbstractInsnNode node, int amount) { for (int i = 0; i < amount; i++) { node = getNext(node); } return node; } public static AbstractInsnNode getNext(AbstractInsnNode node) { AbstractInsnNode next = node.getNext(); while (!Utils.isInstruction(next)) { next = next.getNext(); } return next; } public static AbstractInsnNode getPrevious(AbstractInsnNode node, int amount) { for (int i = 0; i < amount; i++) { node = getPrevious(node); } return node; } public static AbstractInsnNode getPrevious(AbstractInsnNode node) { AbstractInsnNode prev = node.getPrevious(); while (!Utils.isInstruction(prev)) { prev = prev.getPrevious(); } return prev; } public static int iconstToInt(int opcode) { int operand = Integer.MIN_VALUE; switch (opcode) { case ICONST_0: operand = 0; break; case ICONST_1: operand = 1; break; case ICONST_2: operand = 2; break; case ICONST_3: operand = 3; break; case ICONST_4: operand = 4; break; case ICONST_5: operand = 5; break; case ICONST_M1: operand = -1; break; } return operand; } public static MethodNode getMethodNode(ClassNode start, String methodName, String methodDesc, Map<String, ClassNode> dictionary) { MethodNode targetMethod = null; LinkedList<ClassNode> haystack = new LinkedList<>(); haystack.add(start); while (targetMethod == null && !haystack.isEmpty()) { ClassNode needle = haystack.poll(); targetMethod = needle.methods.stream().filter(imn -> imn.name.equals(methodName) && imn.desc.equals(methodDesc)).findFirst().orElse(null); if (targetMethod == null) { if (!needle.name.equals("java/lang/Object")) { for (String intf : needle.interfaces) { ClassNode intfNode = dictionary.get(intf); if (intfNode == null) { throw new IllegalArgumentException("Class not found: " + intf); } haystack.add(intfNode); } String superName = needle.superName; needle = dictionary.get(needle.superName); if (needle == null) { throw new IllegalArgumentException("Class not found: " + superName); } haystack.add(needle); } } } return targetMethod; } public static long copy(InputStream from, OutputStream to) throws IOException { byte[] buf = new byte[4096]; long total = 0; while (true) { int r = from.read(buf); if (r == -1) { break; } to.write(buf, 0, r); total += r; } return total; } public static String descFromTypes(Type[] types) { StringBuilder descBuilder = new StringBuilder("("); for (Type type : types) { descBuilder.append(type.getDescriptor()); } descBuilder.append(")"); return descBuilder.toString(); } public static void sneakyThrow(Throwable t) { Utils.<Error>sneakyThrow0(t); } @SuppressWarnings("unchecked") private static <T extends Throwable> void sneakyThrow0(Throwable t) throws T { throw (T) t; } private static final Printer printer = new Textifier(); private static final TraceMethodVisitor methodPrinter = new TraceMethodVisitor(printer); public static String prettyprint(AbstractInsnNode insnNode) { if (insnNode == null) return "null"; insnNode.accept(methodPrinter); StringWriter sw = new StringWriter(); printer.print(new PrintWriter(sw)); printer.getText().clear(); return sw.toString().trim(); } public static boolean isTerminating(AbstractInsnNode next) { switch (next.getOpcode()) { case RETURN: case ARETURN: case IRETURN: case FRETURN: case DRETURN: case LRETURN: case ATHROW: case TABLESWITCH: case LOOKUPSWITCH: case GOTO: return true; } return false; } public static boolean willPushToStack(int opcode) { switch (opcode) { case ACONST_NULL: case ICONST_M1: case ICONST_0: case ICONST_1: case ICONST_2: case ICONST_3: case ICONST_4: case ICONST_5: case FCONST_0: case FCONST_1: case FCONST_2: case BIPUSH: case SIPUSH: case LDC: case GETSTATIC: case ILOAD: case LLOAD: case FLOAD: case DLOAD: case ALOAD: { return true; } } return false; } public static Unsafe getUnsafe() { try { initializeUnsafe(); } catch (Exception e) { e.printStackTrace(); } return unsafe; } public static <T> T allocateInstance(Class<T> t) { try { return (T) getUnsafe().allocateInstance(t); } catch (InstantiationException e) { e.printStackTrace(); } return null; } private static Unsafe unsafe; private static void initializeUnsafe() throws NoSuchMethodException, IllegalAccessException, InvocationTargetException, InstantiationException { if (unsafe == null) { Constructor<Unsafe> ctor = Unsafe.class.getDeclaredConstructor(); ctor.setAccessible(true); unsafe = ctor.newInstance(); } } public static boolean isNumber(String type) { switch (type) { case "I": case "S": case "B": case "J": case "D": case "F": return true; default: return false; } } public static boolean canReturnDigit(String type) { switch (type) { case "I": case "S": case "B": case "J": case "Z": case "C": return true; default: return false; } } public static boolean isFloat(String type) { switch (type) { case "F": case "D": return true; default: return false; } } public static InsnList copyInsnList(InsnList original) { InsnList newInsnList = new InsnList(); for (AbstractInsnNode insn = original.getFirst(); insn != null; insn = insn.getNext()) { newInsnList.add(insn); } return newInsnList; } public static InsnList cloneInsnList(InsnList original) { InsnList newInsnList = new InsnList(); Map<LabelNode, LabelNode> labels = new HashMap<>(); for (AbstractInsnNode insn = original.getFirst(); insn != null; insn = insn.getNext()) { if (insn instanceof LabelNode) { labels.put((LabelNode)insn, new LabelNode()); } } for (AbstractInsnNode insn = original.getFirst(); insn != null; insn = insn.getNext()) { newInsnList.add(insn.clone(labels)); } return newInsnList; } public static AbstractInsnNode getIntInsn(int number) { if (number >= -1 && number <= 5) return new InsnNode(number + 3); else if (number >= -128 && number <= 127) return new IntInsnNode(Opcodes.BIPUSH, number); else if (number >= -32768 && number <= 32767) return new IntInsnNode(Opcodes.SIPUSH, number); else return new LdcInsnNode(number); } public static AbstractInsnNode getLongInsn(long number) { if (number >= 0 && number <= 1) return new InsnNode((int) (number + 9)); else return new LdcInsnNode(number); } public static AbstractInsnNode getFloatInsn(float number) { if (number >= 0 && number <= 2) { return new InsnNode((int) (number + 11)); } else { return new LdcInsnNode(number); } } public static AbstractInsnNode getDoubleInsn(double number) { if (number >= 0 && number <= 1) return new InsnNode((int) (number + 14)); else return new LdcInsnNode(number); } public static void printClass(ClassNode classNode) { System.out.println(classNode.name + '\n'); classNode.methods.forEach(methodNode -> { System.out.println(methodNode.name + " " + methodNode.desc); for (int i = 0; i < methodNode.instructions.size(); i++) { System.out.printf("%s: %s \n", i, prettyprint(methodNode.instructions.get(i))); } }); } public static boolean isInteger(AbstractInsnNode ain) { if (ain == null) return false; if((ain.getOpcode() >= Opcodes.ICONST_M1 && ain.getOpcode() <= Opcodes.ICONST_5) || ain.getOpcode() == Opcodes.SIPUSH || ain.getOpcode() == Opcodes.BIPUSH) return true; if(ain instanceof LdcInsnNode) { LdcInsnNode ldc = (LdcInsnNode)ain; if(ldc.cst instanceof Integer) return true; } return false; } public static int getIntValue(AbstractInsnNode node) { if(node.getOpcode() >= Opcodes.ICONST_M1 && node.getOpcode() <= Opcodes.ICONST_5) return node.getOpcode() - 3; if(node.getOpcode() == Opcodes.SIPUSH || node.getOpcode() == Opcodes.BIPUSH) return ((IntInsnNode)node).operand; if(node instanceof LdcInsnNode) { LdcInsnNode ldc = (LdcInsnNode)node; if(ldc.cst instanceof Integer) return (int)ldc.cst; } return 0; } public static boolean isLong(AbstractInsnNode ain) { if (ain == null) return false; if(ain.getOpcode() == Opcodes.LCONST_0 || ain.getOpcode() == Opcodes.LCONST_1) return true; if(ain instanceof LdcInsnNode) { LdcInsnNode ldc = (LdcInsnNode)ain; if(ldc.cst instanceof Long) return true; } return false; } public static long getLongValue(AbstractInsnNode node) { if(node.getOpcode() >= Opcodes.LCONST_0 && node.getOpcode() <= Opcodes.LCONST_1) return node.getOpcode() - 9; if(node instanceof LdcInsnNode) { LdcInsnNode ldc = (LdcInsnNode)node; if(ldc.cst instanceof Long) return (long)ldc.cst; } return 0; } public static List<byte[]> loadBytes(File input) { List<byte[]> result = new ArrayList<>(); if (input.getName().endsWith(".jar")) { try (ZipFile zipIn = new ZipFile(input)) { Enumeration<? extends ZipEntry> e = zipIn.entries(); while (e.hasMoreElements()) { ZipEntry next = e.nextElement(); if (next.getName().endsWith(".class")) { try (InputStream in = zipIn.getInputStream(next)) { result.add(IOUtils.toByteArray(in)); } catch (IllegalArgumentException x) { System.out.println("Could not parse " + next.getName() + " (is it a class?)"); } } } } catch (IOException e) { e.printStackTrace(System.out); } } else if (input.getName().endsWith(".class")) { try (InputStream in = new FileInputStream(input)) { result.add(IOUtils.toByteArray(in)); } catch (Throwable x) { System.out.println("Could not parse " + input.getName() + " (is it a class?)"); } } return result; } public static Map<LabelNode, LabelNode> generateCloneMap(InsnList list) { Map<LabelNode, LabelNode> result = new HashMap<>(); list.iterator().forEachRemaining(insn -> { if (insn instanceof LabelNode) { result.put((LabelNode) insn, new LabelNode()); } }); return result; } public static int getPullValue(AbstractInsnNode ain, boolean includeShifts) { switch(ain.getOpcode()) { case IALOAD: case LALOAD: case FALOAD: case DALOAD: case AALOAD: case BALOAD: case CALOAD: case SALOAD: return 2; case ISTORE: case FSTORE: case ASTORE: return 1; case LSTORE: case DSTORE: return 2; case IASTORE: case FASTORE: case AASTORE: case BASTORE: case CASTORE: case SASTORE: return 3; case LASTORE: case DASTORE: return 4; case POP: return 1; case POP2: return 2; case DUP: if(includeShifts) return 1; break; case DUP_X1: if(includeShifts) return 1; break; case DUP_X2: if(includeShifts) return 1; break; case DUP2: if(includeShifts) return 2; break; case DUP2_X1: if(includeShifts) return 2; break; case DUP2_X2: if(includeShifts) return 2; break; case SWAP: if(includeShifts) return 2; break; case IADD: case ISUB: case IMUL: case IDIV: case IREM: case ISHL: case ISHR: case IUSHR: case IAND: case IOR: case IXOR: return 2; case LADD: case LSUB: case LMUL: case LDIV: case LREM: case LAND: case LOR: case LXOR: case LCMP: return 4; case LSHL: case LSHR: case LUSHR: return 3; case FADD: case FSUB: case FMUL: case FDIV: case FREM: case FCMPL: case FCMPG: return 2; case DADD: case DSUB: case DMUL: case DDIV: case DREM: case DCMPL: case DCMPG: return 4; case INEG: case FNEG: return 1; case DNEG: case LNEG: return 2; case I2L: case I2D: case I2F: return 1; case L2I: case L2D: case L2F: return 2; case F2I: case F2D: case F2L: return 1; case D2F: case D2L: case D2I: return 2; case IFNE: case IFEQ: case IFLT: case IFGE: case IFGT: case IFLE: return 1; case IF_ICMPEQ: case IF_ICMPNE: case IF_ICMPLT: case IF_ICMPGE: case IF_ICMPGT: case IF_ICMPLE: case IF_ACMPNE: case IF_ACMPEQ: return 2; case TABLESWITCH: case LOOKUPSWITCH: return 1; case IRETURN: case FRETURN: case ARETURN: return 1; case LRETURN: case DRETURN: return 2; case PUTSTATIC: if(Type.getType(((FieldInsnNode)ain).desc).getSort() == Type.LONG || Type.getType(((FieldInsnNode)ain).desc).getSort() == Type.DOUBLE) return 2; return 1; case GETFIELD: return 1; case PUTFIELD: if(Type.getType(((FieldInsnNode)ain).desc).getSort() == Type.LONG || Type.getType(((FieldInsnNode)ain).desc).getSort() == Type.DOUBLE) return 3; return 2; case INVOKESTATIC: case INVOKEVIRTUAL: case INVOKEINTERFACE: case INVOKESPECIAL: int args = 0; if(ain.getOpcode() != Opcodes.INVOKESTATIC) args++; for(Type t : Type.getArgumentTypes((((MethodInsnNode)ain).desc))) if(t.getSort() == Type.LONG || t.getSort() == Type.DOUBLE) args += 2; else args++; return args; case INVOKEDYNAMIC: int args1 = 0; for(Type t : Type.getArgumentTypes((((InvokeDynamicInsnNode)ain).desc))) if(t.getSort() == Type.LONG || t.getSort() == Type.DOUBLE) args1 += 2; else args1++; return args1; case NEWARRAY: case ANEWARRAY: case ARRAYLENGTH: case ATHROW: case CHECKCAST: case INSTANCEOF: case MONITORENTER: case MONITOREXIT: case IFNULL: case IFNONNULL: return 1; case MULTIANEWARRAY: return ((MultiANewArrayInsnNode)ain).dims; } return 0; } }
10,288
301
#include <stdlib.h> #include <sys/socket.h> /* from stdlib.h */ float strtof(const char *nptr, char **endptr) { return (float)strtod(nptr, endptr); } double atof(const char *nptr) { return strtod(nptr, NULL); } int abs(int __n) { return (__n < 0) ? -__n : __n; } long labs(long __n) { return (__n < 0L) ? -__n : __n; } long long llabs(long long __n) { return (__n < 0LL) ? -__n : __n; } int rand(void) { return (int)lrand48(); } void srand(unsigned int __s) { srand48(__s); } long random(void) { return lrand48(); } void srandom(unsigned int __s) { srand48(__s); } /* from __cmsg_nxthdr.cpp */ /* * The function __cmsg_nxthd() is missing in Android 4.4, but the Android NDK * header files in the version we are using are referencing it and we use it in * our code, this functions was added in version 5.0. To make IoTivity * dynamically loadable at load time on Android KitKat 4.4 add this functions * as a weak symbol, so it will be used if the c lib does not provide it, like * on Android < 5.0 This code was taken from these two resources: * https://raw.githubusercontent.com/android/platform_bionic/master/libc/bionic/__cmsg_nxthdr.cpp * https://github.com/android/platform_bionic/commit/ff64831b0965c16c95c9f81a148f30a6ef3a6c64 */ struct cmsghdr* __attribute__((weak)) __cmsg_nxthdr(struct msghdr* msg, struct cmsghdr* cmsg) { struct cmsghdr* ptr; ptr = (struct cmsghdr*)(((unsigned char*) cmsg) + CMSG_ALIGN(cmsg->cmsg_len)); size_t len = (unsigned long)((char*)(ptr+1) - (char*) msg->msg_control); if (len > msg->msg_controllen) { return NULL; } return ptr; }
672
3,269
# Time: O(nlogn) # Space: O(1) class Solution(object): def eliminateMaximum(self, dist, speed): """ :type dist: List[int] :type speed: List[int] :rtype: int """ for i in xrange(len(dist)): dist[i] = (dist[i]-1)//speed[i] dist.sort() result = 0 for i in xrange(len(dist)): if result > dist[i]: break result += 1 return result
253
395
<reponame>jabader97/backpack<filename>backpack/extensions/curvmatprod/pchmp/flatten.py from backpack.core.derivatives.flatten import FlattenDerivatives from backpack.extensions.curvmatprod.pchmp.pchmpbase import PCHMPBase class PCHMPFlatten(PCHMPBase): def __init__(self): super().__init__(derivatives=FlattenDerivatives())
128
14,668
// Copyright 2013 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chrome/browser/search/instant_unittest_base.h" #include <string> #include "base/bind.h" #include "base/strings/utf_string_conversions.h" #include "build/build_config.h" #include "chrome/browser/chrome_notification_types.h" #include "chrome/browser/profiles/profile.h" #include "chrome/browser/search/instant_service.h" #include "chrome/browser/search/instant_service_factory.h" #include "chrome/browser/search/search.h" #include "chrome/browser/search_engines/template_url_service_factory.h" #include "chrome/browser/search_engines/ui_thread_search_terms_data.h" #include "chrome/test/base/browser_with_test_window_test.h" #include "chrome/test/base/search_test_utils.h" #include "components/search/search.h" #include "components/search_engines/template_url.h" #include "components/search_engines/template_url_service.h" InstantUnitTestBase::InstantUnitTestBase() { } InstantUnitTestBase::~InstantUnitTestBase() { } void InstantUnitTestBase::SetUp() { BrowserWithTestWindowTest::SetUp(); clock_ = new base::SimpleTestClock(); template_url_service_ = TemplateURLServiceFactory::GetForProfile(profile()); search_test_utils::WaitForTemplateURLServiceToLoad(template_url_service_); SetUserSelectedDefaultSearchProvider("{google:baseURL}"); instant_service_ = InstantServiceFactory::GetForProfile(profile()); } void InstantUnitTestBase::TearDown() { delete clock_; clock_ = nullptr; BrowserWithTestWindowTest::TearDown(); } ntp_tiles::MostVisitedSites* InstantUnitTestBase::most_visited_sites() { return instant_service_->most_visited_sites_.get(); } void InstantUnitTestBase::SetUserSelectedDefaultSearchProvider( const std::string& base_url) { TemplateURLData data; data.SetShortName(base::UTF8ToUTF16(base_url)); data.SetKeyword(base::UTF8ToUTF16(base_url)); data.SetURL(base_url + "url?bar={searchTerms}"); data.new_tab_url = base_url + "newtab"; data.alternate_urls.push_back(base_url + "alt#quux={searchTerms}"); TemplateURL* template_url = template_url_service_->Add(std::make_unique<TemplateURL>(data)); template_url_service_->SetUserSelectedDefaultSearchProvider(template_url); } TestingProfile* InstantUnitTestBase::CreateProfile() { TestingProfile* profile = BrowserWithTestWindowTest::CreateProfile(); TemplateURLServiceFactory::GetInstance()->SetTestingFactoryAndUse( profile, base::BindRepeating(&TemplateURLServiceFactory::BuildInstanceFor)); return profile; }
859
16,483
<filename>common/src/main/java/io/seata/common/holder/ObjectHolder.java /* * Copyright 1999-2019 Seata.io Group. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.seata.common.holder; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import io.seata.common.exception.ShouldNeverHappenException; /** * @author <EMAIL> * The enum object holder */ public enum ObjectHolder { /** * singleton instance */ INSTANCE; private static final int MAP_SIZE = 8; private static final Map<String, Object> OBJECT_MAP = new ConcurrentHashMap<>(MAP_SIZE); public Object getObject(String objectKey) { return OBJECT_MAP.get(objectKey); } public <T> T getObject(Class<T> clasz) { return clasz.cast(OBJECT_MAP.values().stream().filter(clasz::isInstance).findAny().orElseThrow(() -> new ShouldNeverHappenException("Can't find any object of class " + clasz.getName()))); } /** * Sets object. * * @param objectKey the key * @param object the object * @return the previous object with the key, or null */ public Object setObject(String objectKey, Object object) { return OBJECT_MAP.put(objectKey, object); } }
591
4,812
<filename>llvm/include/llvm/FuzzMutate/Operations.h //===-- Operations.h - ----------------------------------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // Implementations of common fuzzer operation descriptors for building an IR // mutator. // //===----------------------------------------------------------------------===// #ifndef LLVM_FUZZMUTATE_OPERATIONS_H #define LLVM_FUZZMUTATE_OPERATIONS_H #include "llvm/FuzzMutate/OpDescriptor.h" #include "llvm/IR/InstrTypes.h" #include "llvm/IR/Instruction.h" namespace llvm { /// Getters for the default sets of operations, per general category. /// @{ void describeFuzzerIntOps(std::vector<fuzzerop::OpDescriptor> &Ops); void describeFuzzerFloatOps(std::vector<fuzzerop::OpDescriptor> &Ops); void describeFuzzerControlFlowOps(std::vector<fuzzerop::OpDescriptor> &Ops); void describeFuzzerPointerOps(std::vector<fuzzerop::OpDescriptor> &Ops); void describeFuzzerAggregateOps(std::vector<fuzzerop::OpDescriptor> &Ops); void describeFuzzerVectorOps(std::vector<fuzzerop::OpDescriptor> &Ops); /// @} namespace fuzzerop { /// Descriptors for individual operations. /// @{ OpDescriptor binOpDescriptor(unsigned Weight, Instruction::BinaryOps Op); OpDescriptor cmpOpDescriptor(unsigned Weight, Instruction::OtherOps CmpOp, CmpInst::Predicate Pred); OpDescriptor splitBlockDescriptor(unsigned Weight); OpDescriptor gepDescriptor(unsigned Weight); OpDescriptor extractValueDescriptor(unsigned Weight); OpDescriptor insertValueDescriptor(unsigned Weight); OpDescriptor extractElementDescriptor(unsigned Weight); OpDescriptor insertElementDescriptor(unsigned Weight); OpDescriptor shuffleVectorDescriptor(unsigned Weight); /// @} } // end fuzzerop namespace } // end llvm namespace #endif // LLVM_FUZZMUTATE_OPERATIONS_H
659
4,013
<gh_stars>1000+ '''初始化''' from .interfaces import showEndGameInterface from .Sprites import BadguySprite, ArrowSprite, BunnySprite
46
727
<reponame>sandeepnair2812/Weibull-Time-To-Event-Recurrent-Neural-Network from __future__ import absolute_import from __future__ import division from __future__ import print_function #import pytest import numpy as np import pandas as pd from six.moves import xrange from wtte.transforms import padded_to_df from wtte.pipelines import data_pipeline from wtte.data_generators import generate_random_df def run_test( # '1' on the end dirty way to not pollute testing-namespace id_col1, abs_time_col1, discrete_time1, pad_between_steps1): np.random.seed(1) # Should fail randomly if unique_times = False since it reduces those # times. df = generate_random_df(n_seqs=5, max_seq_length=10, unique_times=True) # rename the abs_time_col to something new to spot assumptions. df.rename(columns={"dt": abs_time_col1, 'id': id_col1}, inplace=True) column_names1 = ['event', 'int_column', 'double_column'] padded, padded_t, seq_ids, df_collapsed = \ data_pipeline(df, id_col=id_col1, abs_time_col=abs_time_col1, column_names=column_names1, discrete_time=discrete_time1, pad_between_steps=pad_between_steps1, infer_seq_endtime=False, time_sec_interval=1, timestep_aggregation_dict=None, drop_last_timestep=False ) if pad_between_steps1: df_new = padded_to_df(padded, column_names1, [ int, int, float], ids=seq_ids, id_col=id_col1, t_col='t_elapsed') df = df[[id_col1, 't_elapsed']+column_names1].reset_index(drop=True) pd.util.testing.assert_frame_equal(df, df_new) else: df_new = padded_to_df(padded, column_names1, [ int, int, float], ids=seq_ids, id_col=id_col1, t_col='t_ix') df = df[[id_col1, 't_ix']+column_names1].reset_index(drop=True) pd.util.testing.assert_frame_equal(df, df_new) class TestPipeline(): def test_discrete_padded_pipeline(self): run_test( # '1' on the end dirty way to not pollute testing-namespace id_col1='idnewname', abs_time_col1='time_int', discrete_time1=True, pad_between_steps1=True) def test_discrete_unpadded_pipeline(self): run_test( # '1' on the end dirty way to not pollute testing-namespace id_col1='idnewname', abs_time_col1='time_int', discrete_time1=True, pad_between_steps1=False) def test_continuous_pipeline(self): run_test( # '1' on the end dirty way to not pollute testing-namespace id_col1='idnewname', abs_time_col1='time_int', discrete_time1=True, pad_between_steps1=False) # def test_discrete_time_continous(): # TODO # def test_continuous_time_discrete(): # TODO # TODO test with flag infer enddtime # TODO test with tte and censoring etc.
1,528
3,222
package org.apache.maven.model.plugin; /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ import java.util.List; import javax.inject.Named; import javax.inject.Singleton; import org.apache.maven.model.Build; import org.apache.maven.model.Model; import org.apache.maven.model.Plugin; import org.apache.maven.model.PluginExecution; import org.apache.maven.model.PluginManagement; import org.apache.maven.model.building.ModelBuildingRequest; import org.apache.maven.model.building.ModelProblemCollector; import org.codehaus.plexus.util.xml.Xpp3Dom; /** * Handles expansion of general build plugin configuration into individual executions. * * @author <NAME> */ @Named @Singleton public class DefaultPluginConfigurationExpander implements PluginConfigurationExpander { @Override public void expandPluginConfiguration( Model model, ModelBuildingRequest request, ModelProblemCollector problems ) { Build build = model.getBuild(); if ( build != null ) { expand( build.getPlugins() ); PluginManagement pluginManagement = build.getPluginManagement(); if ( pluginManagement != null ) { expand( pluginManagement.getPlugins() ); } } } private void expand( List<Plugin> plugins ) { for ( Plugin plugin : plugins ) { Xpp3Dom pluginConfiguration = (Xpp3Dom) plugin.getConfiguration(); if ( pluginConfiguration != null ) { for ( PluginExecution execution : plugin.getExecutions() ) { Xpp3Dom executionConfiguration = (Xpp3Dom) execution.getConfiguration(); executionConfiguration = Xpp3Dom.mergeXpp3Dom( executionConfiguration, new Xpp3Dom( pluginConfiguration ) ); execution.setConfiguration( executionConfiguration ); } } } } }
962
323
<gh_stars>100-1000 package org.jsets.shiro.listener; import java.util.Date; import javax.servlet.ServletRequest; public interface AuthListener { /** * 登录成功 * @param request http请求 * @param account 账号 */ void onLoginSuccess(ServletRequest request,String account); /** * 登录失败 * @param request http请求 * @param account 账号 * @param reason 登录失败原因原因 */ void onLoginFailure(ServletRequest request,String account,String reason); /** * 登出 * @param request http请求 * @param account 账号 */ void onLogout(ServletRequest request,String account); /** * * 用户被踢出 * @param request http请求 * @param account 账号 * @param loginedHost 已登录HOST * @param loginedTime 已登时间 * * * 如:账号admin在机器A登录, * 再有人用admin在机器B登录, * 会触发此事件,loginedHost为机器A的HOST,loginedTime为在机器A登录的时间 * * */ void onKeepOneKickout(ServletRequest request,String account,String loginedHost,Date loginedTime); /** * * 强制用户下线 * @param request http请求 * @param account 账号 * */ void onForceLogout(ServletRequest request,String account); /** * 访问断言 * @param request http请求 * @param account 账号 * @param needRoles 访问资源需要的角色 * @param allowed 是否允许访问 */ void onAccessAssert(ServletRequest request,String account,String needRoles,boolean allowed); }
761
343
package com.ejlchina.searcher.param; import java.util.List; /** * 字段参数组 * @author Troy.Zhou @ 2021-10-31 * @since v3.3.0 */ public class ParamGroup { /** * 且组,表示子组 groups 之间都是 且 的关系 */ public static final int TYPE_AND = 1; /** * 或组,表示子组 groups 之间都是 或 的关系 */ public static final int TYPE_OR = 2; /** * 原生组,表示改组有原生 params 参数集 */ public static final int TYPE_RAW = 3; // 类型 private int type; // 子组:且组 与 或组 有此属性 private List<ParamGroup> groups; // 原生组有此属性 private List<FieldParam> params; public int getType() { return type; } public void setType(int type) { this.type = type; } public List<ParamGroup> getGroups() { return groups; } public void setGroups(List<ParamGroup> groups) { this.groups = groups; } public List<FieldParam> getParams() { return params; } public void setParams(List<FieldParam> params) { this.params = params; } }
573
550
<gh_stars>100-1000 #ifndef example_exchange_h #define example_exchange_h #include "order.h" #include "example_order_book.h" #include <string> #include <map> #include <boost/shared_ptr.hpp> namespace liquibook { namespace examples { class Exchange { public: Exchange(ExampleOrderBook::TypedDepthListener* depth_listener, ExampleOrderBook::TypedTradeListener* trade_listener); // Permanently add an order book to the exchange void add_order_book(const std::string& symbol); // Handle an incoming order void add_order(const std::string& symbol, OrderPtr& order); private: typedef std::map<std::string, ExampleOrderBook> OrderBookMap; OrderBookMap order_books_; ExampleOrderBook::TypedDepthListener* depth_listener_; ExampleOrderBook::TypedTradeListener* trade_listener_; }; } } #endif
270
663
<reponame>Euromance/pycopy<gh_stars>100-1000 # Calling native base class unbound method with subclass instance. class mylist(list): pass l = mylist((1, 2, 3)) assert type(l) is mylist print(l) list.append(l, 4) print(l)
89
580
<reponame>bkmgit/rumale #include "tree.h" RUBY_EXTERN VALUE mRumale; double* alloc_dbl_array(const long n_dimensions) { double* arr = ALLOC_N(double, n_dimensions); memset(arr, 0, n_dimensions * sizeof(double)); return arr; } double calc_gini_coef(double* histogram, const long n_elements, const long n_classes) { long i; double el; double gini = 0.0; for (i = 0; i < n_classes; i++) { el = histogram[i] / n_elements; gini += el * el; } return 1.0 - gini; } double calc_entropy(double* histogram, const long n_elements, const long n_classes) { long i; double el; double entropy = 0.0; for (i = 0; i < n_classes; i++) { el = histogram[i] / n_elements; entropy += el * log(el + 1.0); } return -entropy; } VALUE calc_mean_vec(double* sum_vec, const long n_dimensions, const long n_elements) { long i; VALUE mean_vec = rb_ary_new2(n_dimensions); for (i = 0; i < n_dimensions; i++) { rb_ary_store(mean_vec, i, DBL2NUM(sum_vec[i] / n_elements)); } return mean_vec; } double calc_vec_mae(VALUE vec_a, VALUE vec_b) { long i; const long n_dimensions = RARRAY_LEN(vec_a); double sum = 0.0; double diff; for (i = 0; i < n_dimensions; i++) { diff = NUM2DBL(rb_ary_entry(vec_a, i)) - NUM2DBL(rb_ary_entry(vec_b, i)); sum += fabs(diff); } return sum / n_dimensions; } double calc_vec_mse(VALUE vec_a, VALUE vec_b) { long i; const long n_dimensions = RARRAY_LEN(vec_a); double sum = 0.0; double diff; for (i = 0; i < n_dimensions; i++) { diff = NUM2DBL(rb_ary_entry(vec_a, i)) - NUM2DBL(rb_ary_entry(vec_b, i)); sum += diff * diff; } return sum / n_dimensions; } double calc_mae(VALUE target_vecs, VALUE mean_vec) { long i; const long n_elements = RARRAY_LEN(target_vecs); double sum = 0.0; for (i = 0; i < n_elements; i++) { sum += calc_vec_mae(rb_ary_entry(target_vecs, i), mean_vec); } return sum / n_elements; } double calc_mse(VALUE target_vecs, VALUE mean_vec) { long i; const long n_elements = RARRAY_LEN(target_vecs); double sum = 0.0; for (i = 0; i < n_elements; i++) { sum += calc_vec_mse(rb_ary_entry(target_vecs, i), mean_vec); } return sum / n_elements; } double calc_impurity_cls(const char* criterion, double* histogram, const long n_elements, const long n_classes) { if (strcmp(criterion, "entropy") == 0) { return calc_entropy(histogram, n_elements, n_classes); } return calc_gini_coef(histogram, n_elements, n_classes); } double calc_impurity_reg(const char* criterion, VALUE target_vecs, double* sum_vec) { const long n_elements = RARRAY_LEN(target_vecs); const long n_dimensions = RARRAY_LEN(rb_ary_entry(target_vecs, 0)); VALUE mean_vec = calc_mean_vec(sum_vec, n_dimensions, n_elements); if (strcmp(criterion, "mae") == 0) { return calc_mae(target_vecs, mean_vec); } return calc_mse(target_vecs, mean_vec); } void add_sum_vec(double* sum_vec, VALUE target) { long i; const long n_dimensions = RARRAY_LEN(target); for (i = 0; i < n_dimensions; i++) { sum_vec[i] += NUM2DBL(rb_ary_entry(target, i)); } } void sub_sum_vec(double* sum_vec, VALUE target) { long i; const long n_dimensions = RARRAY_LEN(target); for (i = 0; i < n_dimensions; i++) { sum_vec[i] -= NUM2DBL(rb_ary_entry(target, i)); } } /** * @!visibility private */ typedef struct { char* criterion; long n_classes; double impurity; } split_opts_cls; /** * @!visibility private */ static void iter_find_split_params_cls(na_loop_t const* lp) { const int32_t* o = (int32_t*)NDL_PTR(lp, 0); const double* f = (double*)NDL_PTR(lp, 1); const int32_t* y = (int32_t*)NDL_PTR(lp, 2); const long n_elements = NDL_SHAPE(lp, 0)[0]; const char* criterion = ((split_opts_cls*)lp->opt_ptr)->criterion; const long n_classes = ((split_opts_cls*)lp->opt_ptr)->n_classes; const double w_impurity = ((split_opts_cls*)lp->opt_ptr)->impurity; double* params = (double*)NDL_PTR(lp, 3); long i; long curr_pos = 0; long next_pos = 0; long n_l_elements = 0; long n_r_elements = n_elements; double curr_el = f[o[0]]; double last_el = f[o[n_elements - 1]]; double next_el; double l_impurity; double r_impurity; double gain; double* l_histogram = alloc_dbl_array(n_classes); double* r_histogram = alloc_dbl_array(n_classes); /* Initialize optimal parameters. */ params[0] = 0.0; /* left impurity */ params[1] = w_impurity; /* right impurity */ params[2] = curr_el; /* threshold */ params[3] = 0.0; /* gain */ /* Initialize child node variables. */ for (i = 0; i < n_elements; i++) { r_histogram[y[o[i]]] += 1.0; } /* Find optimal parameters. */ while (curr_pos < n_elements && curr_el != last_el) { next_el = f[o[next_pos]]; while (next_pos < n_elements && next_el == curr_el) { l_histogram[y[o[next_pos]]] += 1; n_l_elements++; r_histogram[y[o[next_pos]]] -= 1; n_r_elements--; next_pos++; next_el = f[o[next_pos]]; } /* Calculate gain of new split. */ l_impurity = calc_impurity_cls(criterion, l_histogram, n_l_elements, n_classes); r_impurity = calc_impurity_cls(criterion, r_histogram, n_r_elements, n_classes); gain = w_impurity - (n_l_elements * l_impurity + n_r_elements * r_impurity) / n_elements; /* Update optimal parameters. */ if (gain > params[3]) { params[0] = l_impurity; params[1] = r_impurity; params[2] = 0.5 * (curr_el + next_el); params[3] = gain; } if (next_pos == n_elements) break; curr_pos = next_pos; curr_el = f[o[curr_pos]]; } xfree(l_histogram); xfree(r_histogram); } /** * @!visibility private * Find for split point with maximum information gain. * * @overload find_split_params(criterion, impurity, order, features, labels, n_classes) -> Array<Float> * * @param criterion [String] The function to evaluate spliting point. Supported criteria are 'gini' and 'entropy'. * @param impurity [Float] The impurity of whole dataset. * @param order [Numo::Int32] (shape: [n_elements]) The element indices sorted according to feature values. * @param features [Numo::DFloat] (shape: [n_elements]) The feature values. * @param labels [Numo::Int32] (shape: [n_elements]) The labels. * @param n_classes [Integer] The number of classes. * @return [Array<Float>] The array consists of optimal parameters including impurities of child nodes, threshold, and gain. */ static VALUE find_split_params_cls(VALUE self, VALUE criterion, VALUE impurity, VALUE order, VALUE features, VALUE labels, VALUE n_classes) { ndfunc_arg_in_t ain[3] = {{numo_cInt32, 1}, {numo_cDFloat, 1}, {numo_cInt32, 1}}; size_t out_shape[1] = {4}; ndfunc_arg_out_t aout[1] = {{numo_cDFloat, 1, out_shape}}; ndfunc_t ndf = {(na_iter_func_t)iter_find_split_params_cls, NO_LOOP, 3, 1, ain, aout}; split_opts_cls opts = {StringValuePtr(criterion), NUM2LONG(n_classes), NUM2DBL(impurity)}; VALUE params = na_ndloop3(&ndf, &opts, 3, order, features, labels); VALUE results = rb_ary_new2(4); double* params_ptr = (double*)na_get_pointer_for_read(params); rb_ary_store(results, 0, DBL2NUM(params_ptr[0])); rb_ary_store(results, 1, DBL2NUM(params_ptr[1])); rb_ary_store(results, 2, DBL2NUM(params_ptr[2])); rb_ary_store(results, 3, DBL2NUM(params_ptr[3])); RB_GC_GUARD(params); RB_GC_GUARD(criterion); return results; } /** * @!visibility private */ typedef struct { char* criterion; double impurity; } split_opts_reg; /** * @!visibility private */ static void iter_find_split_params_reg(na_loop_t const* lp) { const int32_t* o = (int32_t*)NDL_PTR(lp, 0); const double* f = (double*)NDL_PTR(lp, 1); const double* y = (double*)NDL_PTR(lp, 2); const long n_elements = NDL_SHAPE(lp, 0)[0]; const long n_outputs = NDL_SHAPE(lp, 2)[1]; const char* criterion = ((split_opts_reg*)lp->opt_ptr)->criterion; const double w_impurity = ((split_opts_reg*)lp->opt_ptr)->impurity; double* params = (double*)NDL_PTR(lp, 3); long i, j; long curr_pos = 0; long next_pos = 0; long n_l_elements = 0; long n_r_elements = n_elements; double curr_el = f[o[0]]; double last_el = f[o[n_elements - 1]]; double next_el; double l_impurity; double r_impurity; double gain; double* l_sum_vec = alloc_dbl_array(n_outputs); double* r_sum_vec = alloc_dbl_array(n_outputs); double target_var; VALUE l_target_vecs = rb_ary_new(); VALUE r_target_vecs = rb_ary_new(); VALUE target; /* Initialize optimal parameters. */ params[0] = 0.0; /* left impurity */ params[1] = w_impurity; /* right impurity */ params[2] = curr_el; /* threshold */ params[3] = 0.0; /* gain */ /* Initialize child node variables. */ for (i = 0; i < n_elements; i++) { target = rb_ary_new2(n_outputs); for (j = 0; j < n_outputs; j++) { target_var = y[o[i] * n_outputs + j]; rb_ary_store(target, j, DBL2NUM(target_var)); r_sum_vec[j] += target_var; } rb_ary_push(r_target_vecs, target); } /* Find optimal parameters. */ while (curr_pos < n_elements && curr_el != last_el) { next_el = f[o[next_pos]]; while (next_pos < n_elements && next_el == curr_el) { target = rb_ary_shift(r_target_vecs); n_r_elements--; sub_sum_vec(r_sum_vec, target); rb_ary_push(l_target_vecs, target); n_l_elements++; add_sum_vec(l_sum_vec, target); next_pos++; next_el = f[o[next_pos]]; } /* Calculate gain of new split. */ l_impurity = calc_impurity_reg(criterion, l_target_vecs, l_sum_vec); r_impurity = calc_impurity_reg(criterion, r_target_vecs, r_sum_vec); gain = w_impurity - (n_l_elements * l_impurity + n_r_elements * r_impurity) / n_elements; /* Update optimal parameters. */ if (gain > params[3]) { params[0] = l_impurity; params[1] = r_impurity; params[2] = 0.5 * (curr_el + next_el); params[3] = gain; } if (next_pos == n_elements) break; curr_pos = next_pos; curr_el = f[o[curr_pos]]; } xfree(l_sum_vec); xfree(r_sum_vec); } /** * @!visibility private * Find for split point with maximum information gain. * * @overload find_split_params(criterion, impurity, order, features, targets) -> Array<Float> * * @param criterion [String] The function to evaluate spliting point. Supported criteria are 'mae' and 'mse'. * @param impurity [Float] The impurity of whole dataset. * @param order [Numo::Int32] (shape: [n_samples]) The element indices sorted according to feature values in ascending order. * @param features [Numo::DFloat] (shape: [n_samples]) The feature values. * @param targets [Numo::DFloat] (shape: [n_samples, n_outputs]) The target values. * @return [Array<Float>] The array consists of optimal parameters including impurities of child nodes, threshold, and gain. */ static VALUE find_split_params_reg(VALUE self, VALUE criterion, VALUE impurity, VALUE order, VALUE features, VALUE targets) { ndfunc_arg_in_t ain[3] = {{numo_cInt32, 1}, {numo_cDFloat, 1}, {numo_cDFloat, 2}}; size_t out_shape[1] = {4}; ndfunc_arg_out_t aout[1] = {{numo_cDFloat, 1, out_shape}}; ndfunc_t ndf = {(na_iter_func_t)iter_find_split_params_reg, NO_LOOP, 3, 1, ain, aout}; split_opts_reg opts = {StringValuePtr(criterion), NUM2DBL(impurity)}; VALUE params = na_ndloop3(&ndf, &opts, 3, order, features, targets); VALUE results = rb_ary_new2(4); double* params_ptr = (double*)na_get_pointer_for_read(params); rb_ary_store(results, 0, DBL2NUM(params_ptr[0])); rb_ary_store(results, 1, DBL2NUM(params_ptr[1])); rb_ary_store(results, 2, DBL2NUM(params_ptr[2])); rb_ary_store(results, 3, DBL2NUM(params_ptr[3])); RB_GC_GUARD(params); RB_GC_GUARD(criterion); return results; } /** * @!visibility private */ static void iter_find_split_params_grad_reg(na_loop_t const* lp) { const int32_t* o = (int32_t*)NDL_PTR(lp, 0); const double* f = (double*)NDL_PTR(lp, 1); const double* g = (double*)NDL_PTR(lp, 2); const double* h = (double*)NDL_PTR(lp, 3); const double s_grad = ((double*)lp->opt_ptr)[0]; const double s_hess = ((double*)lp->opt_ptr)[1]; const double reg_lambda = ((double*)lp->opt_ptr)[2]; const long n_elements = NDL_SHAPE(lp, 0)[0]; double* params = (double*)NDL_PTR(lp, 4); long curr_pos = 0; long next_pos = 0; double curr_el = f[o[0]]; double last_el = f[o[n_elements - 1]]; double next_el; double l_grad = 0.0; double l_hess = 0.0; double r_grad; double r_hess; double threshold = curr_el; double gain_max = 0.0; double gain; /* Find optimal parameters. */ while (curr_pos < n_elements && curr_el != last_el) { next_el = f[o[next_pos]]; while (next_pos < n_elements && next_el == curr_el) { l_grad += g[o[next_pos]]; l_hess += h[o[next_pos]]; next_pos++; next_el = f[o[next_pos]]; } /* Calculate gain of new split. */ r_grad = s_grad - l_grad; r_hess = s_hess - l_hess; gain = (l_grad * l_grad) / (l_hess + reg_lambda) + (r_grad * r_grad) / (r_hess + reg_lambda) - (s_grad * s_grad) / (s_hess + reg_lambda); /* Update optimal parameters. */ if (gain > gain_max) { threshold = 0.5 * (curr_el + next_el); gain_max = gain; } if (next_pos == n_elements) { break; } curr_pos = next_pos; curr_el = f[o[curr_pos]]; } params[0] = threshold; params[1] = gain_max; } /** * @!visibility private * Find for split point with maximum information gain. * * @overload find_split_params(order, features, gradients, hessians, sum_gradient, sum_hessian, reg_lambda) -> Array<Float> * @param order [Numo::Int32] (shape: [n_elements]) The element indices sorted according to feature values. * @param features [Numo::DFloat] (shape: [n_elements]) The feature values. * @param gradients [Numo::DFloat] (shape: [n_elements]) The gradient values. * @param hessians [Numo::DFloat] (shape: [n_elements]) The hessian values. * @param sum_gradient [Float] The sum of gradient values. * @param sum_hessian [Float] The sum of hessian values. * @param reg_lambda [Float] The L2 regularization term on weight. * @return [Array<Float>] The array consists of optimal parameters including threshold and gain. */ static VALUE find_split_params_grad_reg(VALUE self, VALUE order, VALUE features, VALUE gradients, VALUE hessians, VALUE sum_gradient, VALUE sum_hessian, VALUE reg_lambda) { ndfunc_arg_in_t ain[4] = {{numo_cInt32, 1}, {numo_cDFloat, 1}, {numo_cDFloat, 1}, {numo_cDFloat, 1}}; size_t out_shape[1] = {2}; ndfunc_arg_out_t aout[1] = {{numo_cDFloat, 1, out_shape}}; ndfunc_t ndf = {(na_iter_func_t)iter_find_split_params_grad_reg, NO_LOOP, 4, 1, ain, aout}; double opts[3] = {NUM2DBL(sum_gradient), NUM2DBL(sum_hessian), NUM2DBL(reg_lambda)}; VALUE params = na_ndloop3(&ndf, opts, 4, order, features, gradients, hessians); VALUE results = rb_ary_new2(2); double* params_ptr = (double*)na_get_pointer_for_read(params); rb_ary_store(results, 0, DBL2NUM(params_ptr[0])); rb_ary_store(results, 1, DBL2NUM(params_ptr[1])); RB_GC_GUARD(params); return results; } /** * @!visibility private * Calculate impurity based on criterion. * * @overload node_impurity(criterion, y, n_classes) -> Float * * @param criterion [String] The function to calculate impurity. Supported criteria are 'gini' and 'entropy'. * @param y_nary [Numo::Int32] (shape: [n_samples]) The labels. * @param n_elements_ [Integer] The number of elements. * @param n_classes_ [Integer] The number of classes. * @return [Float] impurity */ static VALUE node_impurity_cls(VALUE self, VALUE criterion, VALUE y_nary, VALUE n_elements_, VALUE n_classes_) { long i; const long n_classes = NUM2LONG(n_classes_); const long n_elements = NUM2LONG(n_elements_); const int32_t* y = (int32_t*)na_get_pointer_for_read(y_nary); double* histogram = alloc_dbl_array(n_classes); VALUE ret; for (i = 0; i < n_elements; i++) { histogram[y[i]] += 1; } ret = DBL2NUM(calc_impurity_cls(StringValuePtr(criterion), histogram, n_elements, n_classes)); xfree(histogram); RB_GC_GUARD(y_nary); RB_GC_GUARD(criterion); return ret; } /** * @!visibility private * Calculate impurity based on criterion. * * @overload node_impurity(criterion, y) -> Float * * @param criterion [String] The function to calculate impurity. Supported criteria are 'mae' and 'mse'. * @param y [Array<Float>] (shape: [n_samples, n_outputs]) The taget values. * @return [Float] impurity */ static VALUE node_impurity_reg(VALUE self, VALUE criterion, VALUE y) { long i; const long n_elements = RARRAY_LEN(y); const long n_outputs = RARRAY_LEN(rb_ary_entry(y, 0)); double* sum_vec = alloc_dbl_array(n_outputs); VALUE target_vecs = rb_ary_new(); VALUE target; VALUE ret; for (i = 0; i < n_elements; i++) { target = rb_ary_entry(y, i); add_sum_vec(sum_vec, target); rb_ary_push(target_vecs, target); } ret = DBL2NUM(calc_impurity_reg(StringValuePtr(criterion), target_vecs, sum_vec)); xfree(sum_vec); RB_GC_GUARD(criterion); return ret; } void init_tree_module() { VALUE mTree = rb_define_module_under(mRumale, "Tree"); /** * Document-module: Rumale::Tree::ExtDecisionTreeClassifier * @!visibility private * The mixin module consisting of extension method for DecisionTreeClassifier class. * This module is used internally. */ VALUE mExtDTreeCls = rb_define_module_under(mTree, "ExtDecisionTreeClassifier"); /** * Document-module: Rumale::Tree::ExtDecisionTreeRegressor * @!visibility private * The mixin module consisting of extension method for DecisionTreeRegressor class. * This module is used internally. */ VALUE mExtDTreeReg = rb_define_module_under(mTree, "ExtDecisionTreeRegressor"); /** * Document-module: Rumale::Tree::ExtGradientTreeRegressor * @!visibility private * The mixin module consisting of extension method for GradientTreeRegressor class. * This module is used internally. */ VALUE mExtGTreeReg = rb_define_module_under(mTree, "ExtGradientTreeRegressor"); rb_define_private_method(mExtDTreeCls, "find_split_params", find_split_params_cls, 6); rb_define_private_method(mExtDTreeReg, "find_split_params", find_split_params_reg, 5); rb_define_private_method(mExtGTreeReg, "find_split_params", find_split_params_grad_reg, 7); rb_define_private_method(mExtDTreeCls, "node_impurity", node_impurity_cls, 4); rb_define_private_method(mExtDTreeReg, "node_impurity", node_impurity_reg, 2); }
7,700
1,144
#include <string.h> #include <stdlib.h> #include "uri.h" using namespace std; namespace rokid { static bool parse_scheme(Uri* uri, const char* s, int32_t& b, int32_t e) { int32_t i = b; while (i < e) { if (s[i] == ':') { if (i > b) { uri->scheme.assign(s, i - b); b = i + 1; return true; } break; } ++i; } return false; } static bool parse_authority(Uri* uri, const char* s, int32_t& b, int32_t e) { if (e - b < 2) return true; if (s[b] != '/' || s[b + 1] != '/') return true; int32_t nb = b + 2; int32_t i = nb; bool found_at = false; while (i < e) { if (!found_at) { if (s[i] == '@') { if (i - nb == 0) { // invalid uri schema://@... return false; } uri->user.assign(s + nb, i - nb); found_at = true; nb = i + 1; ++i; continue; } } if (s[i] == ':') { if (i - nb == 0) { // invalid uri schema://: // schema://[user]@: return false; } uri->host.assign(s + nb, i - nb); nb = i + 1; // found ':', don't need find '@' found_at = true; ++i; continue; } if (s[i] == '/') { if (i - nb == 0) { // invalid uri schema:/// // schema://[user]@/ // schema:// [user]@host:/ return false; } if (uri->host.empty()) { uri->host.assign(s + nb, i - nb); } else { string portstr(s + nb, i - nb); char* ep; uri->port = strtol(portstr.c_str(), &ep, 10); if (ep[0] != '\0') return false; } b = i; return true; } ++i; } return false; } static bool parse_path(Uri* uri, const char* s, int32_t b, int32_t e) { if (e - b == 0) return false; int32_t nb = b; int32_t i = nb; int32_t found_symbol = 0; while (i < e) { if (found_symbol < 1) { if (s[i] == '?') { if (i - nb == 0) { // invalid uri schema://[[user]@host[:port]]? return false; } uri->path.assign(s + nb, i - nb); found_symbol = 1; nb = i + 1; ++i; continue; } } if (found_symbol < 2) { if (s[i] == '#') { if (i - nb > 0) { if (found_symbol == 0) uri->path.assign(s + nb, i - nb); else uri->query.assign(s + nb, i - nb); } nb = i + 1; // found '#', don't need find '?' found_symbol = 2; ++i; continue; } } ++i; } if (found_symbol == 0) uri->path.assign(s + nb, e - nb); else if (found_symbol == 1) uri->query.assign(s + nb, e - nb); else uri->fragment.assign(s + nb, e - nb); return true; } bool Uri::parse(const char* uri) { if (uri == nullptr) return false; clear(); int32_t e = strlen(uri); int32_t b = 0; if (!parse_scheme(this, uri, b, e)) return false; if (!parse_authority(this, uri, b, e)) return false; return parse_path(this, uri, b, e); } void Uri::clear() { scheme.clear(); user.clear(); host.clear(); port = 0; path.clear(); query.clear(); fragment.clear(); } } // namespace rokid
1,797
4,124
# # Autogenerated by Thrift Compiler (0.9.2) # # DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING # # options string: py:utf8strings # from thrift.Thrift import TType, TMessageType, TException, TApplicationException from thrift.transport import TTransport from thrift.protocol import TBinaryProtocol, TProtocol try: from thrift.protocol import fastbinary except: fastbinary = None class TopologyInitialStatus: ACTIVE = 1 INACTIVE = 2 _VALUES_TO_NAMES = { 1: "ACTIVE", 2: "INACTIVE", } _NAMES_TO_VALUES = { "ACTIVE": 1, "INACTIVE": 2, } class JavaObjectArg: """ Attributes: - int_arg - long_arg - string_arg - bool_arg - binary_arg - double_arg """ thrift_spec = ( None, # 0 (1, TType.I32, 'int_arg', None, None, ), # 1 (2, TType.I64, 'long_arg', None, None, ), # 2 (3, TType.STRING, 'string_arg', None, None, ), # 3 (4, TType.BOOL, 'bool_arg', None, None, ), # 4 (5, TType.STRING, 'binary_arg', None, None, ), # 5 (6, TType.DOUBLE, 'double_arg', None, None, ), # 6 ) def __init__(self, int_arg=None, long_arg=None, string_arg=None, bool_arg=None, binary_arg=None, double_arg=None,): self.int_arg = int_arg self.long_arg = long_arg self.string_arg = string_arg self.bool_arg = bool_arg self.binary_arg = binary_arg self.double_arg = double_arg def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.I32: self.int_arg = iprot.readI32(); else: iprot.skip(ftype) elif fid == 2: if ftype == TType.I64: self.long_arg = iprot.readI64(); else: iprot.skip(ftype) elif fid == 3: if ftype == TType.STRING: self.string_arg = iprot.readString().decode('utf-8') else: iprot.skip(ftype) elif fid == 4: if ftype == TType.BOOL: self.bool_arg = iprot.readBool(); else: iprot.skip(ftype) elif fid == 5: if ftype == TType.STRING: self.binary_arg = iprot.readString(); else: iprot.skip(ftype) elif fid == 6: if ftype == TType.DOUBLE: self.double_arg = iprot.readDouble(); else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('JavaObjectArg') if self.int_arg is not None: oprot.writeFieldBegin('int_arg', TType.I32, 1) oprot.writeI32(self.int_arg) oprot.writeFieldEnd() if self.long_arg is not None: oprot.writeFieldBegin('long_arg', TType.I64, 2) oprot.writeI64(self.long_arg) oprot.writeFieldEnd() if self.string_arg is not None: oprot.writeFieldBegin('string_arg', TType.STRING, 3) oprot.writeString(self.string_arg.encode('utf-8')) oprot.writeFieldEnd() if self.bool_arg is not None: oprot.writeFieldBegin('bool_arg', TType.BOOL, 4) oprot.writeBool(self.bool_arg) oprot.writeFieldEnd() if self.binary_arg is not None: oprot.writeFieldBegin('binary_arg', TType.STRING, 5) oprot.writeString(self.binary_arg) oprot.writeFieldEnd() if self.double_arg is not None: oprot.writeFieldBegin('double_arg', TType.DOUBLE, 6) oprot.writeDouble(self.double_arg) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.int_arg) value = (value * 31) ^ hash(self.long_arg) value = (value * 31) ^ hash(self.string_arg) value = (value * 31) ^ hash(self.bool_arg) value = (value * 31) ^ hash(self.binary_arg) value = (value * 31) ^ hash(self.double_arg) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class JavaObject: """ Attributes: - full_class_name - args_list """ thrift_spec = ( None, # 0 (1, TType.STRING, 'full_class_name', None, None, ), # 1 (2, TType.LIST, 'args_list', (TType.STRUCT,(JavaObjectArg, JavaObjectArg.thrift_spec)), None, ), # 2 ) def __init__(self, full_class_name=None, args_list=None,): self.full_class_name = full_class_name self.args_list = args_list def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRING: self.full_class_name = iprot.readString().decode('utf-8') else: iprot.skip(ftype) elif fid == 2: if ftype == TType.LIST: self.args_list = [] (_etype3, _size0) = iprot.readListBegin() for _i4 in xrange(_size0): _elem5 = JavaObjectArg() _elem5.read(iprot) self.args_list.append(_elem5) iprot.readListEnd() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('JavaObject') if self.full_class_name is not None: oprot.writeFieldBegin('full_class_name', TType.STRING, 1) oprot.writeString(self.full_class_name.encode('utf-8')) oprot.writeFieldEnd() if self.args_list is not None: oprot.writeFieldBegin('args_list', TType.LIST, 2) oprot.writeListBegin(TType.STRUCT, len(self.args_list)) for iter6 in self.args_list: iter6.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): if self.full_class_name is None: raise TProtocol.TProtocolException(message='Required field full_class_name is unset!') if self.args_list is None: raise TProtocol.TProtocolException(message='Required field args_list is unset!') return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.full_class_name) value = (value * 31) ^ hash(self.args_list) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class NullStruct: thrift_spec = ( ) def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('NullStruct') oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __hash__(self): value = 17 return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class GlobalStreamId: """ Attributes: - componentId - streamId """ thrift_spec = ( None, # 0 (1, TType.STRING, 'componentId', None, None, ), # 1 (2, TType.STRING, 'streamId', None, None, ), # 2 ) def __init__(self, componentId=None, streamId=None,): self.componentId = componentId self.streamId = streamId def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRING: self.componentId = iprot.readString().decode('utf-8') else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRING: self.streamId = iprot.readString().decode('utf-8') else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('GlobalStreamId') if self.componentId is not None: oprot.writeFieldBegin('componentId', TType.STRING, 1) oprot.writeString(self.componentId.encode('utf-8')) oprot.writeFieldEnd() if self.streamId is not None: oprot.writeFieldBegin('streamId', TType.STRING, 2) oprot.writeString(self.streamId.encode('utf-8')) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): if self.componentId is None: raise TProtocol.TProtocolException(message='Required field componentId is unset!') if self.streamId is None: raise TProtocol.TProtocolException(message='Required field streamId is unset!') return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.componentId) value = (value * 31) ^ hash(self.streamId) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class Grouping: """ Attributes: - fields - shuffle - all - none - direct - custom_object - custom_serialized - local_or_shuffle - localFirst """ thrift_spec = ( None, # 0 (1, TType.LIST, 'fields', (TType.STRING,None), None, ), # 1 (2, TType.STRUCT, 'shuffle', (NullStruct, NullStruct.thrift_spec), None, ), # 2 (3, TType.STRUCT, 'all', (NullStruct, NullStruct.thrift_spec), None, ), # 3 (4, TType.STRUCT, 'none', (NullStruct, NullStruct.thrift_spec), None, ), # 4 (5, TType.STRUCT, 'direct', (NullStruct, NullStruct.thrift_spec), None, ), # 5 (6, TType.STRUCT, 'custom_object', (JavaObject, JavaObject.thrift_spec), None, ), # 6 (7, TType.STRING, 'custom_serialized', None, None, ), # 7 (8, TType.STRUCT, 'local_or_shuffle', (NullStruct, NullStruct.thrift_spec), None, ), # 8 (9, TType.STRUCT, 'localFirst', (NullStruct, NullStruct.thrift_spec), None, ), # 9 ) def __init__(self, fields=None, shuffle=None, all=None, none=None, direct=None, custom_object=None, custom_serialized=None, local_or_shuffle=None, localFirst=None,): self.fields = fields self.shuffle = shuffle self.all = all self.none = none self.direct = direct self.custom_object = custom_object self.custom_serialized = custom_serialized self.local_or_shuffle = local_or_shuffle self.localFirst = localFirst def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.LIST: self.fields = [] (_etype10, _size7) = iprot.readListBegin() for _i11 in xrange(_size7): _elem12 = iprot.readString().decode('utf-8') self.fields.append(_elem12) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRUCT: self.shuffle = NullStruct() self.shuffle.read(iprot) else: iprot.skip(ftype) elif fid == 3: if ftype == TType.STRUCT: self.all = NullStruct() self.all.read(iprot) else: iprot.skip(ftype) elif fid == 4: if ftype == TType.STRUCT: self.none = NullStruct() self.none.read(iprot) else: iprot.skip(ftype) elif fid == 5: if ftype == TType.STRUCT: self.direct = NullStruct() self.direct.read(iprot) else: iprot.skip(ftype) elif fid == 6: if ftype == TType.STRUCT: self.custom_object = JavaObject() self.custom_object.read(iprot) else: iprot.skip(ftype) elif fid == 7: if ftype == TType.STRING: self.custom_serialized = iprot.readString(); else: iprot.skip(ftype) elif fid == 8: if ftype == TType.STRUCT: self.local_or_shuffle = NullStruct() self.local_or_shuffle.read(iprot) else: iprot.skip(ftype) elif fid == 9: if ftype == TType.STRUCT: self.localFirst = NullStruct() self.localFirst.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('Grouping') if self.fields is not None: oprot.writeFieldBegin('fields', TType.LIST, 1) oprot.writeListBegin(TType.STRING, len(self.fields)) for iter13 in self.fields: oprot.writeString(iter13.encode('utf-8')) oprot.writeListEnd() oprot.writeFieldEnd() if self.shuffle is not None: oprot.writeFieldBegin('shuffle', TType.STRUCT, 2) self.shuffle.write(oprot) oprot.writeFieldEnd() if self.all is not None: oprot.writeFieldBegin('all', TType.STRUCT, 3) self.all.write(oprot) oprot.writeFieldEnd() if self.none is not None: oprot.writeFieldBegin('none', TType.STRUCT, 4) self.none.write(oprot) oprot.writeFieldEnd() if self.direct is not None: oprot.writeFieldBegin('direct', TType.STRUCT, 5) self.direct.write(oprot) oprot.writeFieldEnd() if self.custom_object is not None: oprot.writeFieldBegin('custom_object', TType.STRUCT, 6) self.custom_object.write(oprot) oprot.writeFieldEnd() if self.custom_serialized is not None: oprot.writeFieldBegin('custom_serialized', TType.STRING, 7) oprot.writeString(self.custom_serialized) oprot.writeFieldEnd() if self.local_or_shuffle is not None: oprot.writeFieldBegin('local_or_shuffle', TType.STRUCT, 8) self.local_or_shuffle.write(oprot) oprot.writeFieldEnd() if self.localFirst is not None: oprot.writeFieldBegin('localFirst', TType.STRUCT, 9) self.localFirst.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.fields) value = (value * 31) ^ hash(self.shuffle) value = (value * 31) ^ hash(self.all) value = (value * 31) ^ hash(self.none) value = (value * 31) ^ hash(self.direct) value = (value * 31) ^ hash(self.custom_object) value = (value * 31) ^ hash(self.custom_serialized) value = (value * 31) ^ hash(self.local_or_shuffle) value = (value * 31) ^ hash(self.localFirst) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class StreamInfo: """ Attributes: - output_fields - direct """ thrift_spec = ( None, # 0 (1, TType.LIST, 'output_fields', (TType.STRING,None), None, ), # 1 (2, TType.BOOL, 'direct', None, None, ), # 2 ) def __init__(self, output_fields=None, direct=None,): self.output_fields = output_fields self.direct = direct def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.LIST: self.output_fields = [] (_etype17, _size14) = iprot.readListBegin() for _i18 in xrange(_size14): _elem19 = iprot.readString().decode('utf-8') self.output_fields.append(_elem19) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.BOOL: self.direct = iprot.readBool(); else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('StreamInfo') if self.output_fields is not None: oprot.writeFieldBegin('output_fields', TType.LIST, 1) oprot.writeListBegin(TType.STRING, len(self.output_fields)) for iter20 in self.output_fields: oprot.writeString(iter20.encode('utf-8')) oprot.writeListEnd() oprot.writeFieldEnd() if self.direct is not None: oprot.writeFieldBegin('direct', TType.BOOL, 2) oprot.writeBool(self.direct) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): if self.output_fields is None: raise TProtocol.TProtocolException(message='Required field output_fields is unset!') if self.direct is None: raise TProtocol.TProtocolException(message='Required field direct is unset!') return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.output_fields) value = (value * 31) ^ hash(self.direct) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class ShellComponent: """ Attributes: - execution_command - script """ thrift_spec = ( None, # 0 (1, TType.STRING, 'execution_command', None, None, ), # 1 (2, TType.STRING, 'script', None, None, ), # 2 ) def __init__(self, execution_command=None, script=None,): self.execution_command = execution_command self.script = script def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRING: self.execution_command = iprot.readString().decode('utf-8') else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRING: self.script = iprot.readString().decode('utf-8') else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('ShellComponent') if self.execution_command is not None: oprot.writeFieldBegin('execution_command', TType.STRING, 1) oprot.writeString(self.execution_command.encode('utf-8')) oprot.writeFieldEnd() if self.script is not None: oprot.writeFieldBegin('script', TType.STRING, 2) oprot.writeString(self.script.encode('utf-8')) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.execution_command) value = (value * 31) ^ hash(self.script) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class ComponentObject: """ Attributes: - serialized_java - shell - java_object """ thrift_spec = ( None, # 0 (1, TType.STRING, 'serialized_java', None, None, ), # 1 (2, TType.STRUCT, 'shell', (ShellComponent, ShellComponent.thrift_spec), None, ), # 2 (3, TType.STRUCT, 'java_object', (JavaObject, JavaObject.thrift_spec), None, ), # 3 ) def __init__(self, serialized_java=None, shell=None, java_object=None,): self.serialized_java = serialized_java self.shell = shell self.java_object = java_object def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRING: self.serialized_java = iprot.readString(); else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRUCT: self.shell = ShellComponent() self.shell.read(iprot) else: iprot.skip(ftype) elif fid == 3: if ftype == TType.STRUCT: self.java_object = JavaObject() self.java_object.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('ComponentObject') if self.serialized_java is not None: oprot.writeFieldBegin('serialized_java', TType.STRING, 1) oprot.writeString(self.serialized_java) oprot.writeFieldEnd() if self.shell is not None: oprot.writeFieldBegin('shell', TType.STRUCT, 2) self.shell.write(oprot) oprot.writeFieldEnd() if self.java_object is not None: oprot.writeFieldBegin('java_object', TType.STRUCT, 3) self.java_object.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.serialized_java) value = (value * 31) ^ hash(self.shell) value = (value * 31) ^ hash(self.java_object) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class ComponentCommon: """ Attributes: - inputs - streams - parallelism_hint - json_conf """ thrift_spec = ( None, # 0 (1, TType.MAP, 'inputs', (TType.STRUCT,(GlobalStreamId, GlobalStreamId.thrift_spec),TType.STRUCT,(Grouping, Grouping.thrift_spec)), None, ), # 1 (2, TType.MAP, 'streams', (TType.STRING,None,TType.STRUCT,(StreamInfo, StreamInfo.thrift_spec)), None, ), # 2 (3, TType.I32, 'parallelism_hint', None, None, ), # 3 (4, TType.STRING, 'json_conf', None, None, ), # 4 ) def __init__(self, inputs=None, streams=None, parallelism_hint=None, json_conf=None,): self.inputs = inputs self.streams = streams self.parallelism_hint = parallelism_hint self.json_conf = json_conf def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.MAP: self.inputs = {} (_ktype22, _vtype23, _size21 ) = iprot.readMapBegin() for _i25 in xrange(_size21): _key26 = GlobalStreamId() _key26.read(iprot) _val27 = Grouping() _val27.read(iprot) self.inputs[_key26] = _val27 iprot.readMapEnd() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.MAP: self.streams = {} (_ktype29, _vtype30, _size28 ) = iprot.readMapBegin() for _i32 in xrange(_size28): _key33 = iprot.readString().decode('utf-8') _val34 = StreamInfo() _val34.read(iprot) self.streams[_key33] = _val34 iprot.readMapEnd() else: iprot.skip(ftype) elif fid == 3: if ftype == TType.I32: self.parallelism_hint = iprot.readI32(); else: iprot.skip(ftype) elif fid == 4: if ftype == TType.STRING: self.json_conf = iprot.readString().decode('utf-8') else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('ComponentCommon') if self.inputs is not None: oprot.writeFieldBegin('inputs', TType.MAP, 1) oprot.writeMapBegin(TType.STRUCT, TType.STRUCT, len(self.inputs)) for kiter35,viter36 in self.inputs.items(): kiter35.write(oprot) viter36.write(oprot) oprot.writeMapEnd() oprot.writeFieldEnd() if self.streams is not None: oprot.writeFieldBegin('streams', TType.MAP, 2) oprot.writeMapBegin(TType.STRING, TType.STRUCT, len(self.streams)) for kiter37,viter38 in self.streams.items(): oprot.writeString(kiter37.encode('utf-8')) viter38.write(oprot) oprot.writeMapEnd() oprot.writeFieldEnd() if self.parallelism_hint is not None: oprot.writeFieldBegin('parallelism_hint', TType.I32, 3) oprot.writeI32(self.parallelism_hint) oprot.writeFieldEnd() if self.json_conf is not None: oprot.writeFieldBegin('json_conf', TType.STRING, 4) oprot.writeString(self.json_conf.encode('utf-8')) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): if self.inputs is None: raise TProtocol.TProtocolException(message='Required field inputs is unset!') if self.streams is None: raise TProtocol.TProtocolException(message='Required field streams is unset!') return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.inputs) value = (value * 31) ^ hash(self.streams) value = (value * 31) ^ hash(self.parallelism_hint) value = (value * 31) ^ hash(self.json_conf) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class SpoutSpec: """ Attributes: - spout_object - common """ thrift_spec = ( None, # 0 (1, TType.STRUCT, 'spout_object', (ComponentObject, ComponentObject.thrift_spec), None, ), # 1 (2, TType.STRUCT, 'common', (ComponentCommon, ComponentCommon.thrift_spec), None, ), # 2 ) def __init__(self, spout_object=None, common=None,): self.spout_object = spout_object self.common = common def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRUCT: self.spout_object = ComponentObject() self.spout_object.read(iprot) else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRUCT: self.common = ComponentCommon() self.common.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('SpoutSpec') if self.spout_object is not None: oprot.writeFieldBegin('spout_object', TType.STRUCT, 1) self.spout_object.write(oprot) oprot.writeFieldEnd() if self.common is not None: oprot.writeFieldBegin('common', TType.STRUCT, 2) self.common.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): if self.spout_object is None: raise TProtocol.TProtocolException(message='Required field spout_object is unset!') if self.common is None: raise TProtocol.TProtocolException(message='Required field common is unset!') return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.spout_object) value = (value * 31) ^ hash(self.common) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class Bolt: """ Attributes: - bolt_object - common """ thrift_spec = ( None, # 0 (1, TType.STRUCT, 'bolt_object', (ComponentObject, ComponentObject.thrift_spec), None, ), # 1 (2, TType.STRUCT, 'common', (ComponentCommon, ComponentCommon.thrift_spec), None, ), # 2 ) def __init__(self, bolt_object=None, common=None,): self.bolt_object = bolt_object self.common = common def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRUCT: self.bolt_object = ComponentObject() self.bolt_object.read(iprot) else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRUCT: self.common = ComponentCommon() self.common.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('Bolt') if self.bolt_object is not None: oprot.writeFieldBegin('bolt_object', TType.STRUCT, 1) self.bolt_object.write(oprot) oprot.writeFieldEnd() if self.common is not None: oprot.writeFieldBegin('common', TType.STRUCT, 2) self.common.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): if self.bolt_object is None: raise TProtocol.TProtocolException(message='Required field bolt_object is unset!') if self.common is None: raise TProtocol.TProtocolException(message='Required field common is unset!') return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.bolt_object) value = (value * 31) ^ hash(self.common) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class StateSpoutSpec: """ Attributes: - state_spout_object - common """ thrift_spec = ( None, # 0 (1, TType.STRUCT, 'state_spout_object', (ComponentObject, ComponentObject.thrift_spec), None, ), # 1 (2, TType.STRUCT, 'common', (ComponentCommon, ComponentCommon.thrift_spec), None, ), # 2 ) def __init__(self, state_spout_object=None, common=None,): self.state_spout_object = state_spout_object self.common = common def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRUCT: self.state_spout_object = ComponentObject() self.state_spout_object.read(iprot) else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRUCT: self.common = ComponentCommon() self.common.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('StateSpoutSpec') if self.state_spout_object is not None: oprot.writeFieldBegin('state_spout_object', TType.STRUCT, 1) self.state_spout_object.write(oprot) oprot.writeFieldEnd() if self.common is not None: oprot.writeFieldBegin('common', TType.STRUCT, 2) self.common.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): if self.state_spout_object is None: raise TProtocol.TProtocolException(message='Required field state_spout_object is unset!') if self.common is None: raise TProtocol.TProtocolException(message='Required field common is unset!') return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.state_spout_object) value = (value * 31) ^ hash(self.common) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class StormTopology: """ Attributes: - spouts - bolts - state_spouts """ thrift_spec = ( None, # 0 (1, TType.MAP, 'spouts', (TType.STRING,None,TType.STRUCT,(SpoutSpec, SpoutSpec.thrift_spec)), None, ), # 1 (2, TType.MAP, 'bolts', (TType.STRING,None,TType.STRUCT,(Bolt, Bolt.thrift_spec)), None, ), # 2 (3, TType.MAP, 'state_spouts', (TType.STRING,None,TType.STRUCT,(StateSpoutSpec, StateSpoutSpec.thrift_spec)), None, ), # 3 ) def __init__(self, spouts=None, bolts=None, state_spouts=None,): self.spouts = spouts self.bolts = bolts self.state_spouts = state_spouts def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.MAP: self.spouts = {} (_ktype40, _vtype41, _size39 ) = iprot.readMapBegin() for _i43 in xrange(_size39): _key44 = iprot.readString().decode('utf-8') _val45 = SpoutSpec() _val45.read(iprot) self.spouts[_key44] = _val45 iprot.readMapEnd() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.MAP: self.bolts = {} (_ktype47, _vtype48, _size46 ) = iprot.readMapBegin() for _i50 in xrange(_size46): _key51 = iprot.readString().decode('utf-8') _val52 = Bolt() _val52.read(iprot) self.bolts[_key51] = _val52 iprot.readMapEnd() else: iprot.skip(ftype) elif fid == 3: if ftype == TType.MAP: self.state_spouts = {} (_ktype54, _vtype55, _size53 ) = iprot.readMapBegin() for _i57 in xrange(_size53): _key58 = iprot.readString().decode('utf-8') _val59 = StateSpoutSpec() _val59.read(iprot) self.state_spouts[_key58] = _val59 iprot.readMapEnd() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('StormTopology') if self.spouts is not None: oprot.writeFieldBegin('spouts', TType.MAP, 1) oprot.writeMapBegin(TType.STRING, TType.STRUCT, len(self.spouts)) for kiter60,viter61 in self.spouts.items(): oprot.writeString(kiter60.encode('utf-8')) viter61.write(oprot) oprot.writeMapEnd() oprot.writeFieldEnd() if self.bolts is not None: oprot.writeFieldBegin('bolts', TType.MAP, 2) oprot.writeMapBegin(TType.STRING, TType.STRUCT, len(self.bolts)) for kiter62,viter63 in self.bolts.items(): oprot.writeString(kiter62.encode('utf-8')) viter63.write(oprot) oprot.writeMapEnd() oprot.writeFieldEnd() if self.state_spouts is not None: oprot.writeFieldBegin('state_spouts', TType.MAP, 3) oprot.writeMapBegin(TType.STRING, TType.STRUCT, len(self.state_spouts)) for kiter64,viter65 in self.state_spouts.items(): oprot.writeString(kiter64.encode('utf-8')) viter65.write(oprot) oprot.writeMapEnd() oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): if self.spouts is None: raise TProtocol.TProtocolException(message='Required field spouts is unset!') if self.bolts is None: raise TProtocol.TProtocolException(message='Required field bolts is unset!') if self.state_spouts is None: raise TProtocol.TProtocolException(message='Required field state_spouts is unset!') return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.spouts) value = (value * 31) ^ hash(self.bolts) value = (value * 31) ^ hash(self.state_spouts) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class AlreadyAliveException(TException): """ Attributes: - msg """ thrift_spec = ( None, # 0 (1, TType.STRING, 'msg', None, None, ), # 1 ) def __init__(self, msg=None,): self.msg = msg def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRING: self.msg = iprot.readString().decode('utf-8') else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('AlreadyAliveException') if self.msg is not None: oprot.writeFieldBegin('msg', TType.STRING, 1) oprot.writeString(self.msg.encode('utf-8')) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): if self.msg is None: raise TProtocol.TProtocolException(message='Required field msg is unset!') return def __str__(self): return repr(self) def __hash__(self): value = 17 value = (value * 31) ^ hash(self.msg) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class NotAliveException(TException): """ Attributes: - msg """ thrift_spec = ( None, # 0 (1, TType.STRING, 'msg', None, None, ), # 1 ) def __init__(self, msg=None,): self.msg = msg def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRING: self.msg = iprot.readString().decode('utf-8') else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('NotAliveException') if self.msg is not None: oprot.writeFieldBegin('msg', TType.STRING, 1) oprot.writeString(self.msg.encode('utf-8')) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): if self.msg is None: raise TProtocol.TProtocolException(message='Required field msg is unset!') return def __str__(self): return repr(self) def __hash__(self): value = 17 value = (value * 31) ^ hash(self.msg) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class AuthorizationException(TException): """ Attributes: - msg """ thrift_spec = ( None, # 0 (1, TType.STRING, 'msg', None, None, ), # 1 ) def __init__(self, msg=None,): self.msg = msg def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRING: self.msg = iprot.readString().decode('utf-8') else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('AuthorizationException') if self.msg is not None: oprot.writeFieldBegin('msg', TType.STRING, 1) oprot.writeString(self.msg.encode('utf-8')) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): if self.msg is None: raise TProtocol.TProtocolException(message='Required field msg is unset!') return def __str__(self): return repr(self) def __hash__(self): value = 17 value = (value * 31) ^ hash(self.msg) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class InvalidTopologyException(TException): """ Attributes: - msg """ thrift_spec = ( None, # 0 (1, TType.STRING, 'msg', None, None, ), # 1 ) def __init__(self, msg=None,): self.msg = msg def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRING: self.msg = iprot.readString().decode('utf-8') else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('InvalidTopologyException') if self.msg is not None: oprot.writeFieldBegin('msg', TType.STRING, 1) oprot.writeString(self.msg.encode('utf-8')) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): if self.msg is None: raise TProtocol.TProtocolException(message='Required field msg is unset!') return def __str__(self): return repr(self) def __hash__(self): value = 17 value = (value * 31) ^ hash(self.msg) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class TopologyAssignException(TException): """ Attributes: - msg """ thrift_spec = ( None, # 0 (1, TType.STRING, 'msg', None, None, ), # 1 ) def __init__(self, msg=None,): self.msg = msg def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRING: self.msg = iprot.readString().decode('utf-8') else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('TopologyAssignException') if self.msg is not None: oprot.writeFieldBegin('msg', TType.STRING, 1) oprot.writeString(self.msg.encode('utf-8')) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): if self.msg is None: raise TProtocol.TProtocolException(message='Required field msg is unset!') return def __str__(self): return repr(self) def __hash__(self): value = 17 value = (value * 31) ^ hash(self.msg) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class KeyNotFoundException(TException): """ Attributes: - msg """ thrift_spec = ( None, # 0 (1, TType.STRING, 'msg', None, None, ), # 1 ) def __init__(self, msg=None,): self.msg = msg def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRING: self.msg = iprot.readString().decode('utf-8') else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('KeyNotFoundException') if self.msg is not None: oprot.writeFieldBegin('msg', TType.STRING, 1) oprot.writeString(self.msg.encode('utf-8')) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): if self.msg is None: raise TProtocol.TProtocolException(message='Required field msg is unset!') return def __str__(self): return repr(self) def __hash__(self): value = 17 value = (value * 31) ^ hash(self.msg) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class KeyAlreadyExistsException(TException): """ Attributes: - msg """ thrift_spec = ( None, # 0 (1, TType.STRING, 'msg', None, None, ), # 1 ) def __init__(self, msg=None,): self.msg = msg def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRING: self.msg = iprot.readString().decode('utf-8') else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('KeyAlreadyExistsException') if self.msg is not None: oprot.writeFieldBegin('msg', TType.STRING, 1) oprot.writeString(self.msg.encode('utf-8')) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): if self.msg is None: raise TProtocol.TProtocolException(message='Required field msg is unset!') return def __str__(self): return repr(self) def __hash__(self): value = 17 value = (value * 31) ^ hash(self.msg) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class TopologySummary: """ Attributes: - id - name - status - uptimeSecs - numTasks - numWorkers - errorInfo """ thrift_spec = ( None, # 0 (1, TType.STRING, 'id', None, None, ), # 1 (2, TType.STRING, 'name', None, None, ), # 2 (3, TType.STRING, 'status', None, None, ), # 3 (4, TType.I32, 'uptimeSecs', None, None, ), # 4 (5, TType.I32, 'numTasks', None, None, ), # 5 (6, TType.I32, 'numWorkers', None, None, ), # 6 (7, TType.STRING, 'errorInfo', None, None, ), # 7 ) def __init__(self, id=None, name=None, status=None, uptimeSecs=None, numTasks=None, numWorkers=None, errorInfo=None,): self.id = id self.name = name self.status = status self.uptimeSecs = uptimeSecs self.numTasks = numTasks self.numWorkers = numWorkers self.errorInfo = errorInfo def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRING: self.id = iprot.readString().decode('utf-8') else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRING: self.name = iprot.readString().decode('utf-8') else: iprot.skip(ftype) elif fid == 3: if ftype == TType.STRING: self.status = iprot.readString().decode('utf-8') else: iprot.skip(ftype) elif fid == 4: if ftype == TType.I32: self.uptimeSecs = iprot.readI32(); else: iprot.skip(ftype) elif fid == 5: if ftype == TType.I32: self.numTasks = iprot.readI32(); else: iprot.skip(ftype) elif fid == 6: if ftype == TType.I32: self.numWorkers = iprot.readI32(); else: iprot.skip(ftype) elif fid == 7: if ftype == TType.STRING: self.errorInfo = iprot.readString().decode('utf-8') else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('TopologySummary') if self.id is not None: oprot.writeFieldBegin('id', TType.STRING, 1) oprot.writeString(self.id.encode('utf-8')) oprot.writeFieldEnd() if self.name is not None: oprot.writeFieldBegin('name', TType.STRING, 2) oprot.writeString(self.name.encode('utf-8')) oprot.writeFieldEnd() if self.status is not None: oprot.writeFieldBegin('status', TType.STRING, 3) oprot.writeString(self.status.encode('utf-8')) oprot.writeFieldEnd() if self.uptimeSecs is not None: oprot.writeFieldBegin('uptimeSecs', TType.I32, 4) oprot.writeI32(self.uptimeSecs) oprot.writeFieldEnd() if self.numTasks is not None: oprot.writeFieldBegin('numTasks', TType.I32, 5) oprot.writeI32(self.numTasks) oprot.writeFieldEnd() if self.numWorkers is not None: oprot.writeFieldBegin('numWorkers', TType.I32, 6) oprot.writeI32(self.numWorkers) oprot.writeFieldEnd() if self.errorInfo is not None: oprot.writeFieldBegin('errorInfo', TType.STRING, 7) oprot.writeString(self.errorInfo.encode('utf-8')) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): if self.id is None: raise TProtocol.TProtocolException(message='Required field id is unset!') if self.name is None: raise TProtocol.TProtocolException(message='Required field name is unset!') if self.status is None: raise TProtocol.TProtocolException(message='Required field status is unset!') if self.uptimeSecs is None: raise TProtocol.TProtocolException(message='Required field uptimeSecs is unset!') if self.numTasks is None: raise TProtocol.TProtocolException(message='Required field numTasks is unset!') if self.numWorkers is None: raise TProtocol.TProtocolException(message='Required field numWorkers is unset!') return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.id) value = (value * 31) ^ hash(self.name) value = (value * 31) ^ hash(self.status) value = (value * 31) ^ hash(self.uptimeSecs) value = (value * 31) ^ hash(self.numTasks) value = (value * 31) ^ hash(self.numWorkers) value = (value * 31) ^ hash(self.errorInfo) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class SupervisorSummary: """ Attributes: - host - supervisorId - uptimeSecs - numWorkers - numUsedWorkers - version - buildTs - port - errorMessage """ thrift_spec = ( None, # 0 (1, TType.STRING, 'host', None, None, ), # 1 (2, TType.STRING, 'supervisorId', None, None, ), # 2 (3, TType.I32, 'uptimeSecs', None, None, ), # 3 (4, TType.I32, 'numWorkers', None, None, ), # 4 (5, TType.I32, 'numUsedWorkers', None, None, ), # 5 (6, TType.STRING, 'version', None, None, ), # 6 (7, TType.STRING, 'buildTs', None, None, ), # 7 (8, TType.I32, 'port', None, None, ), # 8 (9, TType.STRING, 'errorMessage', None, None, ), # 9 ) def __init__(self, host=None, supervisorId=None, uptimeSecs=None, numWorkers=None, numUsedWorkers=None, version=None, buildTs=None, port=None, errorMessage=None,): self.host = host self.supervisorId = supervisorId self.uptimeSecs = uptimeSecs self.numWorkers = numWorkers self.numUsedWorkers = numUsedWorkers self.version = version self.buildTs = buildTs self.port = port self.errorMessage = errorMessage def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRING: self.host = iprot.readString().decode('utf-8') else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRING: self.supervisorId = iprot.readString().decode('utf-8') else: iprot.skip(ftype) elif fid == 3: if ftype == TType.I32: self.uptimeSecs = iprot.readI32(); else: iprot.skip(ftype) elif fid == 4: if ftype == TType.I32: self.numWorkers = iprot.readI32(); else: iprot.skip(ftype) elif fid == 5: if ftype == TType.I32: self.numUsedWorkers = iprot.readI32(); else: iprot.skip(ftype) elif fid == 6: if ftype == TType.STRING: self.version = iprot.readString().decode('utf-8') else: iprot.skip(ftype) elif fid == 7: if ftype == TType.STRING: self.buildTs = iprot.readString().decode('utf-8') else: iprot.skip(ftype) elif fid == 8: if ftype == TType.I32: self.port = iprot.readI32(); else: iprot.skip(ftype) elif fid == 9: if ftype == TType.STRING: self.errorMessage = iprot.readString().decode('utf-8') else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('SupervisorSummary') if self.host is not None: oprot.writeFieldBegin('host', TType.STRING, 1) oprot.writeString(self.host.encode('utf-8')) oprot.writeFieldEnd() if self.supervisorId is not None: oprot.writeFieldBegin('supervisorId', TType.STRING, 2) oprot.writeString(self.supervisorId.encode('utf-8')) oprot.writeFieldEnd() if self.uptimeSecs is not None: oprot.writeFieldBegin('uptimeSecs', TType.I32, 3) oprot.writeI32(self.uptimeSecs) oprot.writeFieldEnd() if self.numWorkers is not None: oprot.writeFieldBegin('numWorkers', TType.I32, 4) oprot.writeI32(self.numWorkers) oprot.writeFieldEnd() if self.numUsedWorkers is not None: oprot.writeFieldBegin('numUsedWorkers', TType.I32, 5) oprot.writeI32(self.numUsedWorkers) oprot.writeFieldEnd() if self.version is not None: oprot.writeFieldBegin('version', TType.STRING, 6) oprot.writeString(self.version.encode('utf-8')) oprot.writeFieldEnd() if self.buildTs is not None: oprot.writeFieldBegin('buildTs', TType.STRING, 7) oprot.writeString(self.buildTs.encode('utf-8')) oprot.writeFieldEnd() if self.port is not None: oprot.writeFieldBegin('port', TType.I32, 8) oprot.writeI32(self.port) oprot.writeFieldEnd() if self.errorMessage is not None: oprot.writeFieldBegin('errorMessage', TType.STRING, 9) oprot.writeString(self.errorMessage.encode('utf-8')) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): if self.host is None: raise TProtocol.TProtocolException(message='Required field host is unset!') if self.supervisorId is None: raise TProtocol.TProtocolException(message='Required field supervisorId is unset!') if self.uptimeSecs is None: raise TProtocol.TProtocolException(message='Required field uptimeSecs is unset!') if self.numWorkers is None: raise TProtocol.TProtocolException(message='Required field numWorkers is unset!') if self.numUsedWorkers is None: raise TProtocol.TProtocolException(message='Required field numUsedWorkers is unset!') return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.host) value = (value * 31) ^ hash(self.supervisorId) value = (value * 31) ^ hash(self.uptimeSecs) value = (value * 31) ^ hash(self.numWorkers) value = (value * 31) ^ hash(self.numUsedWorkers) value = (value * 31) ^ hash(self.version) value = (value * 31) ^ hash(self.buildTs) value = (value * 31) ^ hash(self.port) value = (value * 31) ^ hash(self.errorMessage) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class NimbusStat: """ Attributes: - host - uptimeSecs """ thrift_spec = ( None, # 0 (1, TType.STRING, 'host', None, None, ), # 1 (2, TType.STRING, 'uptimeSecs', None, None, ), # 2 ) def __init__(self, host=None, uptimeSecs=None,): self.host = host self.uptimeSecs = uptimeSecs def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRING: self.host = iprot.readString().decode('utf-8') else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRING: self.uptimeSecs = iprot.readString().decode('utf-8') else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('NimbusStat') if self.host is not None: oprot.writeFieldBegin('host', TType.STRING, 1) oprot.writeString(self.host.encode('utf-8')) oprot.writeFieldEnd() if self.uptimeSecs is not None: oprot.writeFieldBegin('uptimeSecs', TType.STRING, 2) oprot.writeString(self.uptimeSecs.encode('utf-8')) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): if self.host is None: raise TProtocol.TProtocolException(message='Required field host is unset!') if self.uptimeSecs is None: raise TProtocol.TProtocolException(message='Required field uptimeSecs is unset!') return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.host) value = (value * 31) ^ hash(self.uptimeSecs) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class NimbusSummary: """ Attributes: - nimbusMaster - nimbusSlaves - supervisorNum - totalPortNum - usedPortNum - freePortNum - version """ thrift_spec = ( None, # 0 (1, TType.STRUCT, 'nimbusMaster', (NimbusStat, NimbusStat.thrift_spec), None, ), # 1 (2, TType.LIST, 'nimbusSlaves', (TType.STRUCT,(NimbusStat, NimbusStat.thrift_spec)), None, ), # 2 (3, TType.I32, 'supervisorNum', None, None, ), # 3 (4, TType.I32, 'totalPortNum', None, None, ), # 4 (5, TType.I32, 'usedPortNum', None, None, ), # 5 (6, TType.I32, 'freePortNum', None, None, ), # 6 (7, TType.STRING, 'version', None, None, ), # 7 ) def __init__(self, nimbusMaster=None, nimbusSlaves=None, supervisorNum=None, totalPortNum=None, usedPortNum=None, freePortNum=None, version=None,): self.nimbusMaster = nimbusMaster self.nimbusSlaves = nimbusSlaves self.supervisorNum = supervisorNum self.totalPortNum = totalPortNum self.usedPortNum = usedPortNum self.freePortNum = freePortNum self.version = version def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRUCT: self.nimbusMaster = NimbusStat() self.nimbusMaster.read(iprot) else: iprot.skip(ftype) elif fid == 2: if ftype == TType.LIST: self.nimbusSlaves = [] (_etype69, _size66) = iprot.readListBegin() for _i70 in xrange(_size66): _elem71 = NimbusStat() _elem71.read(iprot) self.nimbusSlaves.append(_elem71) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 3: if ftype == TType.I32: self.supervisorNum = iprot.readI32(); else: iprot.skip(ftype) elif fid == 4: if ftype == TType.I32: self.totalPortNum = iprot.readI32(); else: iprot.skip(ftype) elif fid == 5: if ftype == TType.I32: self.usedPortNum = iprot.readI32(); else: iprot.skip(ftype) elif fid == 6: if ftype == TType.I32: self.freePortNum = iprot.readI32(); else: iprot.skip(ftype) elif fid == 7: if ftype == TType.STRING: self.version = iprot.readString().decode('utf-8') else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('NimbusSummary') if self.nimbusMaster is not None: oprot.writeFieldBegin('nimbusMaster', TType.STRUCT, 1) self.nimbusMaster.write(oprot) oprot.writeFieldEnd() if self.nimbusSlaves is not None: oprot.writeFieldBegin('nimbusSlaves', TType.LIST, 2) oprot.writeListBegin(TType.STRUCT, len(self.nimbusSlaves)) for iter72 in self.nimbusSlaves: iter72.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.supervisorNum is not None: oprot.writeFieldBegin('supervisorNum', TType.I32, 3) oprot.writeI32(self.supervisorNum) oprot.writeFieldEnd() if self.totalPortNum is not None: oprot.writeFieldBegin('totalPortNum', TType.I32, 4) oprot.writeI32(self.totalPortNum) oprot.writeFieldEnd() if self.usedPortNum is not None: oprot.writeFieldBegin('usedPortNum', TType.I32, 5) oprot.writeI32(self.usedPortNum) oprot.writeFieldEnd() if self.freePortNum is not None: oprot.writeFieldBegin('freePortNum', TType.I32, 6) oprot.writeI32(self.freePortNum) oprot.writeFieldEnd() if self.version is not None: oprot.writeFieldBegin('version', TType.STRING, 7) oprot.writeString(self.version.encode('utf-8')) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): if self.nimbusMaster is None: raise TProtocol.TProtocolException(message='Required field nimbusMaster is unset!') if self.nimbusSlaves is None: raise TProtocol.TProtocolException(message='Required field nimbusSlaves is unset!') if self.supervisorNum is None: raise TProtocol.TProtocolException(message='Required field supervisorNum is unset!') if self.totalPortNum is None: raise TProtocol.TProtocolException(message='Required field totalPortNum is unset!') if self.usedPortNum is None: raise TProtocol.TProtocolException(message='Required field usedPortNum is unset!') if self.freePortNum is None: raise TProtocol.TProtocolException(message='Required field freePortNum is unset!') if self.version is None: raise TProtocol.TProtocolException(message='Required field version is unset!') return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.nimbusMaster) value = (value * 31) ^ hash(self.nimbusSlaves) value = (value * 31) ^ hash(self.supervisorNum) value = (value * 31) ^ hash(self.totalPortNum) value = (value * 31) ^ hash(self.usedPortNum) value = (value * 31) ^ hash(self.freePortNum) value = (value * 31) ^ hash(self.version) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class ClusterSummary: """ Attributes: - nimbus - supervisors - topologies """ thrift_spec = ( None, # 0 (1, TType.STRUCT, 'nimbus', (NimbusSummary, NimbusSummary.thrift_spec), None, ), # 1 (2, TType.LIST, 'supervisors', (TType.STRUCT,(SupervisorSummary, SupervisorSummary.thrift_spec)), None, ), # 2 (3, TType.LIST, 'topologies', (TType.STRUCT,(TopologySummary, TopologySummary.thrift_spec)), None, ), # 3 ) def __init__(self, nimbus=None, supervisors=None, topologies=None,): self.nimbus = nimbus self.supervisors = supervisors self.topologies = topologies def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRUCT: self.nimbus = NimbusSummary() self.nimbus.read(iprot) else: iprot.skip(ftype) elif fid == 2: if ftype == TType.LIST: self.supervisors = [] (_etype76, _size73) = iprot.readListBegin() for _i77 in xrange(_size73): _elem78 = SupervisorSummary() _elem78.read(iprot) self.supervisors.append(_elem78) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 3: if ftype == TType.LIST: self.topologies = [] (_etype82, _size79) = iprot.readListBegin() for _i83 in xrange(_size79): _elem84 = TopologySummary() _elem84.read(iprot) self.topologies.append(_elem84) iprot.readListEnd() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('ClusterSummary') if self.nimbus is not None: oprot.writeFieldBegin('nimbus', TType.STRUCT, 1) self.nimbus.write(oprot) oprot.writeFieldEnd() if self.supervisors is not None: oprot.writeFieldBegin('supervisors', TType.LIST, 2) oprot.writeListBegin(TType.STRUCT, len(self.supervisors)) for iter85 in self.supervisors: iter85.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.topologies is not None: oprot.writeFieldBegin('topologies', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.topologies)) for iter86 in self.topologies: iter86.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): if self.nimbus is None: raise TProtocol.TProtocolException(message='Required field nimbus is unset!') if self.supervisors is None: raise TProtocol.TProtocolException(message='Required field supervisors is unset!') if self.topologies is None: raise TProtocol.TProtocolException(message='Required field topologies is unset!') return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.nimbus) value = (value * 31) ^ hash(self.supervisors) value = (value * 31) ^ hash(self.topologies) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class TaskComponent: """ Attributes: - taskId - component """ thrift_spec = ( None, # 0 (1, TType.I32, 'taskId', None, None, ), # 1 (2, TType.STRING, 'component', None, None, ), # 2 ) def __init__(self, taskId=None, component=None,): self.taskId = taskId self.component = component def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.I32: self.taskId = iprot.readI32(); else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRING: self.component = iprot.readString().decode('utf-8') else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('TaskComponent') if self.taskId is not None: oprot.writeFieldBegin('taskId', TType.I32, 1) oprot.writeI32(self.taskId) oprot.writeFieldEnd() if self.component is not None: oprot.writeFieldBegin('component', TType.STRING, 2) oprot.writeString(self.component.encode('utf-8')) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): if self.taskId is None: raise TProtocol.TProtocolException(message='Required field taskId is unset!') if self.component is None: raise TProtocol.TProtocolException(message='Required field component is unset!') return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.taskId) value = (value * 31) ^ hash(self.component) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class WorkerSummary: """ Attributes: - port - uptime - topology - tasks """ thrift_spec = ( None, # 0 (1, TType.I32, 'port', None, None, ), # 1 (2, TType.I32, 'uptime', None, None, ), # 2 (3, TType.STRING, 'topology', None, None, ), # 3 (4, TType.LIST, 'tasks', (TType.STRUCT,(TaskComponent, TaskComponent.thrift_spec)), None, ), # 4 ) def __init__(self, port=None, uptime=None, topology=None, tasks=None,): self.port = port self.uptime = uptime self.topology = topology self.tasks = tasks def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.I32: self.port = iprot.readI32(); else: iprot.skip(ftype) elif fid == 2: if ftype == TType.I32: self.uptime = iprot.readI32(); else: iprot.skip(ftype) elif fid == 3: if ftype == TType.STRING: self.topology = iprot.readString().decode('utf-8') else: iprot.skip(ftype) elif fid == 4: if ftype == TType.LIST: self.tasks = [] (_etype90, _size87) = iprot.readListBegin() for _i91 in xrange(_size87): _elem92 = TaskComponent() _elem92.read(iprot) self.tasks.append(_elem92) iprot.readListEnd() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('WorkerSummary') if self.port is not None: oprot.writeFieldBegin('port', TType.I32, 1) oprot.writeI32(self.port) oprot.writeFieldEnd() if self.uptime is not None: oprot.writeFieldBegin('uptime', TType.I32, 2) oprot.writeI32(self.uptime) oprot.writeFieldEnd() if self.topology is not None: oprot.writeFieldBegin('topology', TType.STRING, 3) oprot.writeString(self.topology.encode('utf-8')) oprot.writeFieldEnd() if self.tasks is not None: oprot.writeFieldBegin('tasks', TType.LIST, 4) oprot.writeListBegin(TType.STRUCT, len(self.tasks)) for iter93 in self.tasks: iter93.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): if self.port is None: raise TProtocol.TProtocolException(message='Required field port is unset!') if self.uptime is None: raise TProtocol.TProtocolException(message='Required field uptime is unset!') if self.topology is None: raise TProtocol.TProtocolException(message='Required field topology is unset!') if self.tasks is None: raise TProtocol.TProtocolException(message='Required field tasks is unset!') return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.port) value = (value * 31) ^ hash(self.uptime) value = (value * 31) ^ hash(self.topology) value = (value * 31) ^ hash(self.tasks) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class MetricWindow: """ Attributes: - metricWindow """ thrift_spec = ( None, # 0 (1, TType.MAP, 'metricWindow', (TType.I32,None,TType.DOUBLE,None), None, ), # 1 ) def __init__(self, metricWindow=None,): self.metricWindow = metricWindow def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.MAP: self.metricWindow = {} (_ktype95, _vtype96, _size94 ) = iprot.readMapBegin() for _i98 in xrange(_size94): _key99 = iprot.readI32(); _val100 = iprot.readDouble(); self.metricWindow[_key99] = _val100 iprot.readMapEnd() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('MetricWindow') if self.metricWindow is not None: oprot.writeFieldBegin('metricWindow', TType.MAP, 1) oprot.writeMapBegin(TType.I32, TType.DOUBLE, len(self.metricWindow)) for kiter101,viter102 in self.metricWindow.items(): oprot.writeI32(kiter101) oprot.writeDouble(viter102) oprot.writeMapEnd() oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): if self.metricWindow is None: raise TProtocol.TProtocolException(message='Required field metricWindow is unset!') return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.metricWindow) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class MetricSnapshot: """ Attributes: - metricId - ts - metricType - longValue - doubleValue - m1 - m5 - m15 - mean - min - max - p50 - p75 - p95 - p98 - p99 - p999 - stddev - points - pointSize """ thrift_spec = ( None, # 0 (1, TType.I64, 'metricId', None, None, ), # 1 (2, TType.I64, 'ts', None, None, ), # 2 (3, TType.I32, 'metricType', None, None, ), # 3 (4, TType.I64, 'longValue', None, None, ), # 4 (5, TType.DOUBLE, 'doubleValue', None, None, ), # 5 (6, TType.DOUBLE, 'm1', None, None, ), # 6 (7, TType.DOUBLE, 'm5', None, None, ), # 7 (8, TType.DOUBLE, 'm15', None, None, ), # 8 (9, TType.DOUBLE, 'mean', None, None, ), # 9 (10, TType.I64, 'min', None, None, ), # 10 (11, TType.I64, 'max', None, None, ), # 11 (12, TType.DOUBLE, 'p50', None, None, ), # 12 (13, TType.DOUBLE, 'p75', None, None, ), # 13 (14, TType.DOUBLE, 'p95', None, None, ), # 14 (15, TType.DOUBLE, 'p98', None, None, ), # 15 (16, TType.DOUBLE, 'p99', None, None, ), # 16 (17, TType.DOUBLE, 'p999', None, None, ), # 17 (18, TType.DOUBLE, 'stddev', None, None, ), # 18 (19, TType.STRING, 'points', None, None, ), # 19 (20, TType.I32, 'pointSize', None, None, ), # 20 ) def __init__(self, metricId=None, ts=None, metricType=None, longValue=None, doubleValue=None, m1=None, m5=None, m15=None, mean=None, min=None, max=None, p50=None, p75=None, p95=None, p98=None, p99=None, p999=None, stddev=None, points=None, pointSize=None,): self.metricId = metricId self.ts = ts self.metricType = metricType self.longValue = longValue self.doubleValue = doubleValue self.m1 = m1 self.m5 = m5 self.m15 = m15 self.mean = mean self.min = min self.max = max self.p50 = p50 self.p75 = p75 self.p95 = p95 self.p98 = p98 self.p99 = p99 self.p999 = p999 self.stddev = stddev self.points = points self.pointSize = pointSize def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.I64: self.metricId = iprot.readI64(); else: iprot.skip(ftype) elif fid == 2: if ftype == TType.I64: self.ts = iprot.readI64(); else: iprot.skip(ftype) elif fid == 3: if ftype == TType.I32: self.metricType = iprot.readI32(); else: iprot.skip(ftype) elif fid == 4: if ftype == TType.I64: self.longValue = iprot.readI64(); else: iprot.skip(ftype) elif fid == 5: if ftype == TType.DOUBLE: self.doubleValue = iprot.readDouble(); else: iprot.skip(ftype) elif fid == 6: if ftype == TType.DOUBLE: self.m1 = iprot.readDouble(); else: iprot.skip(ftype) elif fid == 7: if ftype == TType.DOUBLE: self.m5 = iprot.readDouble(); else: iprot.skip(ftype) elif fid == 8: if ftype == TType.DOUBLE: self.m15 = iprot.readDouble(); else: iprot.skip(ftype) elif fid == 9: if ftype == TType.DOUBLE: self.mean = iprot.readDouble(); else: iprot.skip(ftype) elif fid == 10: if ftype == TType.I64: self.min = iprot.readI64(); else: iprot.skip(ftype) elif fid == 11: if ftype == TType.I64: self.max = iprot.readI64(); else: iprot.skip(ftype) elif fid == 12: if ftype == TType.DOUBLE: self.p50 = iprot.readDouble(); else: iprot.skip(ftype) elif fid == 13: if ftype == TType.DOUBLE: self.p75 = iprot.readDouble(); else: iprot.skip(ftype) elif fid == 14: if ftype == TType.DOUBLE: self.p95 = iprot.readDouble(); else: iprot.skip(ftype) elif fid == 15: if ftype == TType.DOUBLE: self.p98 = iprot.readDouble(); else: iprot.skip(ftype) elif fid == 16: if ftype == TType.DOUBLE: self.p99 = iprot.readDouble(); else: iprot.skip(ftype) elif fid == 17: if ftype == TType.DOUBLE: self.p999 = iprot.readDouble(); else: iprot.skip(ftype) elif fid == 18: if ftype == TType.DOUBLE: self.stddev = iprot.readDouble(); else: iprot.skip(ftype) elif fid == 19: if ftype == TType.STRING: self.points = iprot.readString(); else: iprot.skip(ftype) elif fid == 20: if ftype == TType.I32: self.pointSize = iprot.readI32(); else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('MetricSnapshot') if self.metricId is not None: oprot.writeFieldBegin('metricId', TType.I64, 1) oprot.writeI64(self.metricId) oprot.writeFieldEnd() if self.ts is not None: oprot.writeFieldBegin('ts', TType.I64, 2) oprot.writeI64(self.ts) oprot.writeFieldEnd() if self.metricType is not None: oprot.writeFieldBegin('metricType', TType.I32, 3) oprot.writeI32(self.metricType) oprot.writeFieldEnd() if self.longValue is not None: oprot.writeFieldBegin('longValue', TType.I64, 4) oprot.writeI64(self.longValue) oprot.writeFieldEnd() if self.doubleValue is not None: oprot.writeFieldBegin('doubleValue', TType.DOUBLE, 5) oprot.writeDouble(self.doubleValue) oprot.writeFieldEnd() if self.m1 is not None: oprot.writeFieldBegin('m1', TType.DOUBLE, 6) oprot.writeDouble(self.m1) oprot.writeFieldEnd() if self.m5 is not None: oprot.writeFieldBegin('m5', TType.DOUBLE, 7) oprot.writeDouble(self.m5) oprot.writeFieldEnd() if self.m15 is not None: oprot.writeFieldBegin('m15', TType.DOUBLE, 8) oprot.writeDouble(self.m15) oprot.writeFieldEnd() if self.mean is not None: oprot.writeFieldBegin('mean', TType.DOUBLE, 9) oprot.writeDouble(self.mean) oprot.writeFieldEnd() if self.min is not None: oprot.writeFieldBegin('min', TType.I64, 10) oprot.writeI64(self.min) oprot.writeFieldEnd() if self.max is not None: oprot.writeFieldBegin('max', TType.I64, 11) oprot.writeI64(self.max) oprot.writeFieldEnd() if self.p50 is not None: oprot.writeFieldBegin('p50', TType.DOUBLE, 12) oprot.writeDouble(self.p50) oprot.writeFieldEnd() if self.p75 is not None: oprot.writeFieldBegin('p75', TType.DOUBLE, 13) oprot.writeDouble(self.p75) oprot.writeFieldEnd() if self.p95 is not None: oprot.writeFieldBegin('p95', TType.DOUBLE, 14) oprot.writeDouble(self.p95) oprot.writeFieldEnd() if self.p98 is not None: oprot.writeFieldBegin('p98', TType.DOUBLE, 15) oprot.writeDouble(self.p98) oprot.writeFieldEnd() if self.p99 is not None: oprot.writeFieldBegin('p99', TType.DOUBLE, 16) oprot.writeDouble(self.p99) oprot.writeFieldEnd() if self.p999 is not None: oprot.writeFieldBegin('p999', TType.DOUBLE, 17) oprot.writeDouble(self.p999) oprot.writeFieldEnd() if self.stddev is not None: oprot.writeFieldBegin('stddev', TType.DOUBLE, 18) oprot.writeDouble(self.stddev) oprot.writeFieldEnd() if self.points is not None: oprot.writeFieldBegin('points', TType.STRING, 19) oprot.writeString(self.points) oprot.writeFieldEnd() if self.pointSize is not None: oprot.writeFieldBegin('pointSize', TType.I32, 20) oprot.writeI32(self.pointSize) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): if self.metricId is None: raise TProtocol.TProtocolException(message='Required field metricId is unset!') if self.ts is None: raise TProtocol.TProtocolException(message='Required field ts is unset!') if self.metricType is None: raise TProtocol.TProtocolException(message='Required field metricType is unset!') return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.metricId) value = (value * 31) ^ hash(self.ts) value = (value * 31) ^ hash(self.metricType) value = (value * 31) ^ hash(self.longValue) value = (value * 31) ^ hash(self.doubleValue) value = (value * 31) ^ hash(self.m1) value = (value * 31) ^ hash(self.m5) value = (value * 31) ^ hash(self.m15) value = (value * 31) ^ hash(self.mean) value = (value * 31) ^ hash(self.min) value = (value * 31) ^ hash(self.max) value = (value * 31) ^ hash(self.p50) value = (value * 31) ^ hash(self.p75) value = (value * 31) ^ hash(self.p95) value = (value * 31) ^ hash(self.p98) value = (value * 31) ^ hash(self.p99) value = (value * 31) ^ hash(self.p999) value = (value * 31) ^ hash(self.stddev) value = (value * 31) ^ hash(self.points) value = (value * 31) ^ hash(self.pointSize) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class MetricInfo: """ Attributes: - metrics """ thrift_spec = ( None, # 0 (1, TType.MAP, 'metrics', (TType.STRING,None,TType.MAP,(TType.I32,None,TType.STRUCT,(MetricSnapshot, MetricSnapshot.thrift_spec))), None, ), # 1 ) def __init__(self, metrics=None,): self.metrics = metrics def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.MAP: self.metrics = {} (_ktype104, _vtype105, _size103 ) = iprot.readMapBegin() for _i107 in xrange(_size103): _key108 = iprot.readString().decode('utf-8') _val109 = {} (_ktype111, _vtype112, _size110 ) = iprot.readMapBegin() for _i114 in xrange(_size110): _key115 = iprot.readI32(); _val116 = MetricSnapshot() _val116.read(iprot) _val109[_key115] = _val116 iprot.readMapEnd() self.metrics[_key108] = _val109 iprot.readMapEnd() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('MetricInfo') if self.metrics is not None: oprot.writeFieldBegin('metrics', TType.MAP, 1) oprot.writeMapBegin(TType.STRING, TType.MAP, len(self.metrics)) for kiter117,viter118 in self.metrics.items(): oprot.writeString(kiter117.encode('utf-8')) oprot.writeMapBegin(TType.I32, TType.STRUCT, len(viter118)) for kiter119,viter120 in viter118.items(): oprot.writeI32(kiter119) viter120.write(oprot) oprot.writeMapEnd() oprot.writeMapEnd() oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.metrics) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class SupervisorWorkers: """ Attributes: - supervisor - workers - workerMetric """ thrift_spec = ( None, # 0 (1, TType.STRUCT, 'supervisor', (SupervisorSummary, SupervisorSummary.thrift_spec), None, ), # 1 (2, TType.LIST, 'workers', (TType.STRUCT,(WorkerSummary, WorkerSummary.thrift_spec)), None, ), # 2 (3, TType.MAP, 'workerMetric', (TType.STRING,None,TType.STRUCT,(MetricInfo, MetricInfo.thrift_spec)), None, ), # 3 ) def __init__(self, supervisor=None, workers=None, workerMetric=None,): self.supervisor = supervisor self.workers = workers self.workerMetric = workerMetric def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRUCT: self.supervisor = SupervisorSummary() self.supervisor.read(iprot) else: iprot.skip(ftype) elif fid == 2: if ftype == TType.LIST: self.workers = [] (_etype124, _size121) = iprot.readListBegin() for _i125 in xrange(_size121): _elem126 = WorkerSummary() _elem126.read(iprot) self.workers.append(_elem126) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 3: if ftype == TType.MAP: self.workerMetric = {} (_ktype128, _vtype129, _size127 ) = iprot.readMapBegin() for _i131 in xrange(_size127): _key132 = iprot.readString().decode('utf-8') _val133 = MetricInfo() _val133.read(iprot) self.workerMetric[_key132] = _val133 iprot.readMapEnd() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('SupervisorWorkers') if self.supervisor is not None: oprot.writeFieldBegin('supervisor', TType.STRUCT, 1) self.supervisor.write(oprot) oprot.writeFieldEnd() if self.workers is not None: oprot.writeFieldBegin('workers', TType.LIST, 2) oprot.writeListBegin(TType.STRUCT, len(self.workers)) for iter134 in self.workers: iter134.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.workerMetric is not None: oprot.writeFieldBegin('workerMetric', TType.MAP, 3) oprot.writeMapBegin(TType.STRING, TType.STRUCT, len(self.workerMetric)) for kiter135,viter136 in self.workerMetric.items(): oprot.writeString(kiter135.encode('utf-8')) viter136.write(oprot) oprot.writeMapEnd() oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): if self.supervisor is None: raise TProtocol.TProtocolException(message='Required field supervisor is unset!') if self.workers is None: raise TProtocol.TProtocolException(message='Required field workers is unset!') if self.workerMetric is None: raise TProtocol.TProtocolException(message='Required field workerMetric is unset!') return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.supervisor) value = (value * 31) ^ hash(self.workers) value = (value * 31) ^ hash(self.workerMetric) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class ErrorInfo: """ Attributes: - error - errorTimeSecs - errorLevel - errorCode """ thrift_spec = ( None, # 0 (1, TType.STRING, 'error', None, None, ), # 1 (2, TType.I32, 'errorTimeSecs', None, None, ), # 2 (3, TType.STRING, 'errorLevel', None, None, ), # 3 (4, TType.I32, 'errorCode', None, None, ), # 4 ) def __init__(self, error=None, errorTimeSecs=None, errorLevel=None, errorCode=None,): self.error = error self.errorTimeSecs = errorTimeSecs self.errorLevel = errorLevel self.errorCode = errorCode def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRING: self.error = iprot.readString().decode('utf-8') else: iprot.skip(ftype) elif fid == 2: if ftype == TType.I32: self.errorTimeSecs = iprot.readI32(); else: iprot.skip(ftype) elif fid == 3: if ftype == TType.STRING: self.errorLevel = iprot.readString().decode('utf-8') else: iprot.skip(ftype) elif fid == 4: if ftype == TType.I32: self.errorCode = iprot.readI32(); else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('ErrorInfo') if self.error is not None: oprot.writeFieldBegin('error', TType.STRING, 1) oprot.writeString(self.error.encode('utf-8')) oprot.writeFieldEnd() if self.errorTimeSecs is not None: oprot.writeFieldBegin('errorTimeSecs', TType.I32, 2) oprot.writeI32(self.errorTimeSecs) oprot.writeFieldEnd() if self.errorLevel is not None: oprot.writeFieldBegin('errorLevel', TType.STRING, 3) oprot.writeString(self.errorLevel.encode('utf-8')) oprot.writeFieldEnd() if self.errorCode is not None: oprot.writeFieldBegin('errorCode', TType.I32, 4) oprot.writeI32(self.errorCode) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): if self.error is None: raise TProtocol.TProtocolException(message='Required field error is unset!') if self.errorTimeSecs is None: raise TProtocol.TProtocolException(message='Required field errorTimeSecs is unset!') if self.errorLevel is None: raise TProtocol.TProtocolException(message='Required field errorLevel is unset!') if self.errorCode is None: raise TProtocol.TProtocolException(message='Required field errorCode is unset!') return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.error) value = (value * 31) ^ hash(self.errorTimeSecs) value = (value * 31) ^ hash(self.errorLevel) value = (value * 31) ^ hash(self.errorCode) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class ComponentSummary: """ Attributes: - name - parallel - type - taskIds - errors """ thrift_spec = ( None, # 0 (1, TType.STRING, 'name', None, None, ), # 1 (2, TType.I32, 'parallel', None, None, ), # 2 (3, TType.STRING, 'type', None, None, ), # 3 (4, TType.LIST, 'taskIds', (TType.I32,None), None, ), # 4 (5, TType.LIST, 'errors', (TType.STRUCT,(ErrorInfo, ErrorInfo.thrift_spec)), None, ), # 5 ) def __init__(self, name=None, parallel=None, type=None, taskIds=None, errors=None,): self.name = name self.parallel = parallel self.type = type self.taskIds = taskIds self.errors = errors def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRING: self.name = iprot.readString().decode('utf-8') else: iprot.skip(ftype) elif fid == 2: if ftype == TType.I32: self.parallel = iprot.readI32(); else: iprot.skip(ftype) elif fid == 3: if ftype == TType.STRING: self.type = iprot.readString().decode('utf-8') else: iprot.skip(ftype) elif fid == 4: if ftype == TType.LIST: self.taskIds = [] (_etype140, _size137) = iprot.readListBegin() for _i141 in xrange(_size137): _elem142 = iprot.readI32(); self.taskIds.append(_elem142) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 5: if ftype == TType.LIST: self.errors = [] (_etype146, _size143) = iprot.readListBegin() for _i147 in xrange(_size143): _elem148 = ErrorInfo() _elem148.read(iprot) self.errors.append(_elem148) iprot.readListEnd() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('ComponentSummary') if self.name is not None: oprot.writeFieldBegin('name', TType.STRING, 1) oprot.writeString(self.name.encode('utf-8')) oprot.writeFieldEnd() if self.parallel is not None: oprot.writeFieldBegin('parallel', TType.I32, 2) oprot.writeI32(self.parallel) oprot.writeFieldEnd() if self.type is not None: oprot.writeFieldBegin('type', TType.STRING, 3) oprot.writeString(self.type.encode('utf-8')) oprot.writeFieldEnd() if self.taskIds is not None: oprot.writeFieldBegin('taskIds', TType.LIST, 4) oprot.writeListBegin(TType.I32, len(self.taskIds)) for iter149 in self.taskIds: oprot.writeI32(iter149) oprot.writeListEnd() oprot.writeFieldEnd() if self.errors is not None: oprot.writeFieldBegin('errors', TType.LIST, 5) oprot.writeListBegin(TType.STRUCT, len(self.errors)) for iter150 in self.errors: iter150.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): if self.name is None: raise TProtocol.TProtocolException(message='Required field name is unset!') if self.parallel is None: raise TProtocol.TProtocolException(message='Required field parallel is unset!') if self.type is None: raise TProtocol.TProtocolException(message='Required field type is unset!') if self.taskIds is None: raise TProtocol.TProtocolException(message='Required field taskIds is unset!') return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.name) value = (value * 31) ^ hash(self.parallel) value = (value * 31) ^ hash(self.type) value = (value * 31) ^ hash(self.taskIds) value = (value * 31) ^ hash(self.errors) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class TaskSummary: """ Attributes: - taskId - uptime - status - host - port - errors """ thrift_spec = ( None, # 0 (1, TType.I32, 'taskId', None, None, ), # 1 (2, TType.I32, 'uptime', None, None, ), # 2 (3, TType.STRING, 'status', None, None, ), # 3 (4, TType.STRING, 'host', None, None, ), # 4 (5, TType.I32, 'port', None, None, ), # 5 (6, TType.LIST, 'errors', (TType.STRUCT,(ErrorInfo, ErrorInfo.thrift_spec)), None, ), # 6 ) def __init__(self, taskId=None, uptime=None, status=None, host=None, port=None, errors=None,): self.taskId = taskId self.uptime = uptime self.status = status self.host = host self.port = port self.errors = errors def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.I32: self.taskId = iprot.readI32(); else: iprot.skip(ftype) elif fid == 2: if ftype == TType.I32: self.uptime = iprot.readI32(); else: iprot.skip(ftype) elif fid == 3: if ftype == TType.STRING: self.status = iprot.readString().decode('utf-8') else: iprot.skip(ftype) elif fid == 4: if ftype == TType.STRING: self.host = iprot.readString().decode('utf-8') else: iprot.skip(ftype) elif fid == 5: if ftype == TType.I32: self.port = iprot.readI32(); else: iprot.skip(ftype) elif fid == 6: if ftype == TType.LIST: self.errors = [] (_etype154, _size151) = iprot.readListBegin() for _i155 in xrange(_size151): _elem156 = ErrorInfo() _elem156.read(iprot) self.errors.append(_elem156) iprot.readListEnd() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('TaskSummary') if self.taskId is not None: oprot.writeFieldBegin('taskId', TType.I32, 1) oprot.writeI32(self.taskId) oprot.writeFieldEnd() if self.uptime is not None: oprot.writeFieldBegin('uptime', TType.I32, 2) oprot.writeI32(self.uptime) oprot.writeFieldEnd() if self.status is not None: oprot.writeFieldBegin('status', TType.STRING, 3) oprot.writeString(self.status.encode('utf-8')) oprot.writeFieldEnd() if self.host is not None: oprot.writeFieldBegin('host', TType.STRING, 4) oprot.writeString(self.host.encode('utf-8')) oprot.writeFieldEnd() if self.port is not None: oprot.writeFieldBegin('port', TType.I32, 5) oprot.writeI32(self.port) oprot.writeFieldEnd() if self.errors is not None: oprot.writeFieldBegin('errors', TType.LIST, 6) oprot.writeListBegin(TType.STRUCT, len(self.errors)) for iter157 in self.errors: iter157.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): if self.taskId is None: raise TProtocol.TProtocolException(message='Required field taskId is unset!') if self.uptime is None: raise TProtocol.TProtocolException(message='Required field uptime is unset!') if self.status is None: raise TProtocol.TProtocolException(message='Required field status is unset!') if self.host is None: raise TProtocol.TProtocolException(message='Required field host is unset!') if self.port is None: raise TProtocol.TProtocolException(message='Required field port is unset!') return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.taskId) value = (value * 31) ^ hash(self.uptime) value = (value * 31) ^ hash(self.status) value = (value * 31) ^ hash(self.host) value = (value * 31) ^ hash(self.port) value = (value * 31) ^ hash(self.errors) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class TopologyMetric: """ Attributes: - topologyMetric - componentMetric - workerMetric - taskMetric - streamMetric - nettyMetric - compStreamMetric """ thrift_spec = ( None, # 0 (1, TType.STRUCT, 'topologyMetric', (MetricInfo, MetricInfo.thrift_spec), None, ), # 1 (2, TType.STRUCT, 'componentMetric', (MetricInfo, MetricInfo.thrift_spec), None, ), # 2 (3, TType.STRUCT, 'workerMetric', (MetricInfo, MetricInfo.thrift_spec), None, ), # 3 (4, TType.STRUCT, 'taskMetric', (MetricInfo, MetricInfo.thrift_spec), None, ), # 4 (5, TType.STRUCT, 'streamMetric', (MetricInfo, MetricInfo.thrift_spec), None, ), # 5 (6, TType.STRUCT, 'nettyMetric', (MetricInfo, MetricInfo.thrift_spec), None, ), # 6 (7, TType.STRUCT, 'compStreamMetric', (MetricInfo, MetricInfo.thrift_spec), None, ), # 7 ) def __init__(self, topologyMetric=None, componentMetric=None, workerMetric=None, taskMetric=None, streamMetric=None, nettyMetric=None, compStreamMetric=None,): self.topologyMetric = topologyMetric self.componentMetric = componentMetric self.workerMetric = workerMetric self.taskMetric = taskMetric self.streamMetric = streamMetric self.nettyMetric = nettyMetric self.compStreamMetric = compStreamMetric def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRUCT: self.topologyMetric = MetricInfo() self.topologyMetric.read(iprot) else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRUCT: self.componentMetric = MetricInfo() self.componentMetric.read(iprot) else: iprot.skip(ftype) elif fid == 3: if ftype == TType.STRUCT: self.workerMetric = MetricInfo() self.workerMetric.read(iprot) else: iprot.skip(ftype) elif fid == 4: if ftype == TType.STRUCT: self.taskMetric = MetricInfo() self.taskMetric.read(iprot) else: iprot.skip(ftype) elif fid == 5: if ftype == TType.STRUCT: self.streamMetric = MetricInfo() self.streamMetric.read(iprot) else: iprot.skip(ftype) elif fid == 6: if ftype == TType.STRUCT: self.nettyMetric = MetricInfo() self.nettyMetric.read(iprot) else: iprot.skip(ftype) elif fid == 7: if ftype == TType.STRUCT: self.compStreamMetric = MetricInfo() self.compStreamMetric.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('TopologyMetric') if self.topologyMetric is not None: oprot.writeFieldBegin('topologyMetric', TType.STRUCT, 1) self.topologyMetric.write(oprot) oprot.writeFieldEnd() if self.componentMetric is not None: oprot.writeFieldBegin('componentMetric', TType.STRUCT, 2) self.componentMetric.write(oprot) oprot.writeFieldEnd() if self.workerMetric is not None: oprot.writeFieldBegin('workerMetric', TType.STRUCT, 3) self.workerMetric.write(oprot) oprot.writeFieldEnd() if self.taskMetric is not None: oprot.writeFieldBegin('taskMetric', TType.STRUCT, 4) self.taskMetric.write(oprot) oprot.writeFieldEnd() if self.streamMetric is not None: oprot.writeFieldBegin('streamMetric', TType.STRUCT, 5) self.streamMetric.write(oprot) oprot.writeFieldEnd() if self.nettyMetric is not None: oprot.writeFieldBegin('nettyMetric', TType.STRUCT, 6) self.nettyMetric.write(oprot) oprot.writeFieldEnd() if self.compStreamMetric is not None: oprot.writeFieldBegin('compStreamMetric', TType.STRUCT, 7) self.compStreamMetric.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): if self.topologyMetric is None: raise TProtocol.TProtocolException(message='Required field topologyMetric is unset!') if self.componentMetric is None: raise TProtocol.TProtocolException(message='Required field componentMetric is unset!') if self.workerMetric is None: raise TProtocol.TProtocolException(message='Required field workerMetric is unset!') if self.taskMetric is None: raise TProtocol.TProtocolException(message='Required field taskMetric is unset!') if self.streamMetric is None: raise TProtocol.TProtocolException(message='Required field streamMetric is unset!') if self.nettyMetric is None: raise TProtocol.TProtocolException(message='Required field nettyMetric is unset!') return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.topologyMetric) value = (value * 31) ^ hash(self.componentMetric) value = (value * 31) ^ hash(self.workerMetric) value = (value * 31) ^ hash(self.taskMetric) value = (value * 31) ^ hash(self.streamMetric) value = (value * 31) ^ hash(self.nettyMetric) value = (value * 31) ^ hash(self.compStreamMetric) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class TopologyInfo: """ Attributes: - topology - components - tasks - metrics """ thrift_spec = ( None, # 0 (1, TType.STRUCT, 'topology', (TopologySummary, TopologySummary.thrift_spec), None, ), # 1 (2, TType.LIST, 'components', (TType.STRUCT,(ComponentSummary, ComponentSummary.thrift_spec)), None, ), # 2 (3, TType.LIST, 'tasks', (TType.STRUCT,(TaskSummary, TaskSummary.thrift_spec)), None, ), # 3 (4, TType.STRUCT, 'metrics', (TopologyMetric, TopologyMetric.thrift_spec), None, ), # 4 ) def __init__(self, topology=None, components=None, tasks=None, metrics=None,): self.topology = topology self.components = components self.tasks = tasks self.metrics = metrics def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRUCT: self.topology = TopologySummary() self.topology.read(iprot) else: iprot.skip(ftype) elif fid == 2: if ftype == TType.LIST: self.components = [] (_etype161, _size158) = iprot.readListBegin() for _i162 in xrange(_size158): _elem163 = ComponentSummary() _elem163.read(iprot) self.components.append(_elem163) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 3: if ftype == TType.LIST: self.tasks = [] (_etype167, _size164) = iprot.readListBegin() for _i168 in xrange(_size164): _elem169 = TaskSummary() _elem169.read(iprot) self.tasks.append(_elem169) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 4: if ftype == TType.STRUCT: self.metrics = TopologyMetric() self.metrics.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('TopologyInfo') if self.topology is not None: oprot.writeFieldBegin('topology', TType.STRUCT, 1) self.topology.write(oprot) oprot.writeFieldEnd() if self.components is not None: oprot.writeFieldBegin('components', TType.LIST, 2) oprot.writeListBegin(TType.STRUCT, len(self.components)) for iter170 in self.components: iter170.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.tasks is not None: oprot.writeFieldBegin('tasks', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.tasks)) for iter171 in self.tasks: iter171.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.metrics is not None: oprot.writeFieldBegin('metrics', TType.STRUCT, 4) self.metrics.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): if self.topology is None: raise TProtocol.TProtocolException(message='Required field topology is unset!') if self.components is None: raise TProtocol.TProtocolException(message='Required field components is unset!') if self.tasks is None: raise TProtocol.TProtocolException(message='Required field tasks is unset!') if self.metrics is None: raise TProtocol.TProtocolException(message='Required field metrics is unset!') return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.topology) value = (value * 31) ^ hash(self.components) value = (value * 31) ^ hash(self.tasks) value = (value * 31) ^ hash(self.metrics) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class WorkerUploadMetrics: """ Attributes: - topologyId - supervisorId - port - allMetrics """ thrift_spec = ( None, # 0 (1, TType.STRING, 'topologyId', None, None, ), # 1 (2, TType.STRING, 'supervisorId', None, None, ), # 2 (3, TType.I32, 'port', None, None, ), # 3 (4, TType.STRUCT, 'allMetrics', (MetricInfo, MetricInfo.thrift_spec), None, ), # 4 ) def __init__(self, topologyId=None, supervisorId=None, port=None, allMetrics=None,): self.topologyId = topologyId self.supervisorId = supervisorId self.port = port self.allMetrics = allMetrics def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRING: self.topologyId = iprot.readString().decode('utf-8') else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRING: self.supervisorId = iprot.readString().decode('utf-8') else: iprot.skip(ftype) elif fid == 3: if ftype == TType.I32: self.port = iprot.readI32(); else: iprot.skip(ftype) elif fid == 4: if ftype == TType.STRUCT: self.allMetrics = MetricInfo() self.allMetrics.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('WorkerUploadMetrics') if self.topologyId is not None: oprot.writeFieldBegin('topologyId', TType.STRING, 1) oprot.writeString(self.topologyId.encode('utf-8')) oprot.writeFieldEnd() if self.supervisorId is not None: oprot.writeFieldBegin('supervisorId', TType.STRING, 2) oprot.writeString(self.supervisorId.encode('utf-8')) oprot.writeFieldEnd() if self.port is not None: oprot.writeFieldBegin('port', TType.I32, 3) oprot.writeI32(self.port) oprot.writeFieldEnd() if self.allMetrics is not None: oprot.writeFieldBegin('allMetrics', TType.STRUCT, 4) self.allMetrics.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): if self.topologyId is None: raise TProtocol.TProtocolException(message='Required field topologyId is unset!') if self.supervisorId is None: raise TProtocol.TProtocolException(message='Required field supervisorId is unset!') if self.port is None: raise TProtocol.TProtocolException(message='Required field port is unset!') if self.allMetrics is None: raise TProtocol.TProtocolException(message='Required field allMetrics is unset!') return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.topologyId) value = (value * 31) ^ hash(self.supervisorId) value = (value * 31) ^ hash(self.port) value = (value * 31) ^ hash(self.allMetrics) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class KillOptions: """ Attributes: - wait_secs """ thrift_spec = ( None, # 0 (1, TType.I32, 'wait_secs', None, None, ), # 1 ) def __init__(self, wait_secs=None,): self.wait_secs = wait_secs def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.I32: self.wait_secs = iprot.readI32(); else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('KillOptions') if self.wait_secs is not None: oprot.writeFieldBegin('wait_secs', TType.I32, 1) oprot.writeI32(self.wait_secs) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.wait_secs) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class RebalanceOptions: """ Attributes: - wait_secs - reassign - conf """ thrift_spec = ( None, # 0 (1, TType.I32, 'wait_secs', None, None, ), # 1 (2, TType.BOOL, 'reassign', None, None, ), # 2 (3, TType.STRING, 'conf', None, None, ), # 3 ) def __init__(self, wait_secs=None, reassign=None, conf=None,): self.wait_secs = wait_secs self.reassign = reassign self.conf = conf def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.I32: self.wait_secs = iprot.readI32(); else: iprot.skip(ftype) elif fid == 2: if ftype == TType.BOOL: self.reassign = iprot.readBool(); else: iprot.skip(ftype) elif fid == 3: if ftype == TType.STRING: self.conf = iprot.readString().decode('utf-8') else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('RebalanceOptions') if self.wait_secs is not None: oprot.writeFieldBegin('wait_secs', TType.I32, 1) oprot.writeI32(self.wait_secs) oprot.writeFieldEnd() if self.reassign is not None: oprot.writeFieldBegin('reassign', TType.BOOL, 2) oprot.writeBool(self.reassign) oprot.writeFieldEnd() if self.conf is not None: oprot.writeFieldBegin('conf', TType.STRING, 3) oprot.writeString(self.conf.encode('utf-8')) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.wait_secs) value = (value * 31) ^ hash(self.reassign) value = (value * 31) ^ hash(self.conf) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class Credentials: """ Attributes: - creds """ thrift_spec = ( None, # 0 (1, TType.MAP, 'creds', (TType.STRING,None,TType.STRING,None), None, ), # 1 ) def __init__(self, creds=None,): self.creds = creds def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.MAP: self.creds = {} (_ktype173, _vtype174, _size172 ) = iprot.readMapBegin() for _i176 in xrange(_size172): _key177 = iprot.readString().decode('utf-8') _val178 = iprot.readString().decode('utf-8') self.creds[_key177] = _val178 iprot.readMapEnd() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('Credentials') if self.creds is not None: oprot.writeFieldBegin('creds', TType.MAP, 1) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.creds)) for kiter179,viter180 in self.creds.items(): oprot.writeString(kiter179.encode('utf-8')) oprot.writeString(viter180.encode('utf-8')) oprot.writeMapEnd() oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): if self.creds is None: raise TProtocol.TProtocolException(message='Required field creds is unset!') return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.creds) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class SubmitOptions: """ Attributes: - initial_status - creds """ thrift_spec = ( None, # 0 (1, TType.I32, 'initial_status', None, None, ), # 1 (2, TType.STRUCT, 'creds', (Credentials, Credentials.thrift_spec), None, ), # 2 ) def __init__(self, initial_status=None, creds=None,): self.initial_status = initial_status self.creds = creds def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.I32: self.initial_status = iprot.readI32(); else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRUCT: self.creds = Credentials() self.creds.read(iprot) else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('SubmitOptions') if self.initial_status is not None: oprot.writeFieldBegin('initial_status', TType.I32, 1) oprot.writeI32(self.initial_status) oprot.writeFieldEnd() if self.creds is not None: oprot.writeFieldBegin('creds', TType.STRUCT, 2) self.creds.write(oprot) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): if self.initial_status is None: raise TProtocol.TProtocolException(message='Required field initial_status is unset!') return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.initial_status) value = (value * 31) ^ hash(self.creds) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class MonitorOptions: """ Attributes: - isEnable """ thrift_spec = ( None, # 0 (1, TType.BOOL, 'isEnable', None, None, ), # 1 ) def __init__(self, isEnable=None,): self.isEnable = isEnable def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.BOOL: self.isEnable = iprot.readBool(); else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('MonitorOptions') if self.isEnable is not None: oprot.writeFieldBegin('isEnable', TType.BOOL, 1) oprot.writeBool(self.isEnable) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.isEnable) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class ThriftSerializedObject: """ Attributes: - name - bits """ thrift_spec = ( None, # 0 (1, TType.STRING, 'name', None, None, ), # 1 (2, TType.STRING, 'bits', None, None, ), # 2 ) def __init__(self, name=None, bits=None,): self.name = name self.bits = bits def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRING: self.name = iprot.readString().decode('utf-8') else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRING: self.bits = iprot.readString(); else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('ThriftSerializedObject') if self.name is not None: oprot.writeFieldBegin('name', TType.STRING, 1) oprot.writeString(self.name.encode('utf-8')) oprot.writeFieldEnd() if self.bits is not None: oprot.writeFieldBegin('bits', TType.STRING, 2) oprot.writeString(self.bits) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): if self.name is None: raise TProtocol.TProtocolException(message='Required field name is unset!') if self.bits is None: raise TProtocol.TProtocolException(message='Required field bits is unset!') return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.name) value = (value * 31) ^ hash(self.bits) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class LocalStateData: """ Attributes: - serialized_parts """ thrift_spec = ( None, # 0 (1, TType.MAP, 'serialized_parts', (TType.STRING,None,TType.STRUCT,(ThriftSerializedObject, ThriftSerializedObject.thrift_spec)), None, ), # 1 ) def __init__(self, serialized_parts=None,): self.serialized_parts = serialized_parts def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.MAP: self.serialized_parts = {} (_ktype182, _vtype183, _size181 ) = iprot.readMapBegin() for _i185 in xrange(_size181): _key186 = iprot.readString().decode('utf-8') _val187 = ThriftSerializedObject() _val187.read(iprot) self.serialized_parts[_key186] = _val187 iprot.readMapEnd() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('LocalStateData') if self.serialized_parts is not None: oprot.writeFieldBegin('serialized_parts', TType.MAP, 1) oprot.writeMapBegin(TType.STRING, TType.STRUCT, len(self.serialized_parts)) for kiter188,viter189 in self.serialized_parts.items(): oprot.writeString(kiter188.encode('utf-8')) viter189.write(oprot) oprot.writeMapEnd() oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): if self.serialized_parts is None: raise TProtocol.TProtocolException(message='Required field serialized_parts is unset!') return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.serialized_parts) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class TaskHeartbeat: """ Attributes: - time - uptime """ thrift_spec = ( None, # 0 (1, TType.I32, 'time', None, None, ), # 1 (2, TType.I32, 'uptime', None, None, ), # 2 ) def __init__(self, time=None, uptime=None,): self.time = time self.uptime = uptime def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.I32: self.time = iprot.readI32(); else: iprot.skip(ftype) elif fid == 2: if ftype == TType.I32: self.uptime = iprot.readI32(); else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('TaskHeartbeat') if self.time is not None: oprot.writeFieldBegin('time', TType.I32, 1) oprot.writeI32(self.time) oprot.writeFieldEnd() if self.uptime is not None: oprot.writeFieldBegin('uptime', TType.I32, 2) oprot.writeI32(self.uptime) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): if self.time is None: raise TProtocol.TProtocolException(message='Required field time is unset!') if self.uptime is None: raise TProtocol.TProtocolException(message='Required field uptime is unset!') return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.time) value = (value * 31) ^ hash(self.uptime) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class TopologyTaskHbInfo: """ Attributes: - topologyId - topologyMasterId - taskHbs """ thrift_spec = ( None, # 0 (1, TType.STRING, 'topologyId', None, None, ), # 1 (2, TType.I32, 'topologyMasterId', None, None, ), # 2 (3, TType.MAP, 'taskHbs', (TType.I32,None,TType.STRUCT,(TaskHeartbeat, TaskHeartbeat.thrift_spec)), None, ), # 3 ) def __init__(self, topologyId=None, topologyMasterId=None, taskHbs=None,): self.topologyId = topologyId self.topologyMasterId = topologyMasterId self.taskHbs = taskHbs def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRING: self.topologyId = iprot.readString().decode('utf-8') else: iprot.skip(ftype) elif fid == 2: if ftype == TType.I32: self.topologyMasterId = iprot.readI32(); else: iprot.skip(ftype) elif fid == 3: if ftype == TType.MAP: self.taskHbs = {} (_ktype191, _vtype192, _size190 ) = iprot.readMapBegin() for _i194 in xrange(_size190): _key195 = iprot.readI32(); _val196 = TaskHeartbeat() _val196.read(iprot) self.taskHbs[_key195] = _val196 iprot.readMapEnd() else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('TopologyTaskHbInfo') if self.topologyId is not None: oprot.writeFieldBegin('topologyId', TType.STRING, 1) oprot.writeString(self.topologyId.encode('utf-8')) oprot.writeFieldEnd() if self.topologyMasterId is not None: oprot.writeFieldBegin('topologyMasterId', TType.I32, 2) oprot.writeI32(self.topologyMasterId) oprot.writeFieldEnd() if self.taskHbs is not None: oprot.writeFieldBegin('taskHbs', TType.MAP, 3) oprot.writeMapBegin(TType.I32, TType.STRUCT, len(self.taskHbs)) for kiter197,viter198 in self.taskHbs.items(): oprot.writeI32(kiter197) viter198.write(oprot) oprot.writeMapEnd() oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): if self.topologyId is None: raise TProtocol.TProtocolException(message='Required field topologyId is unset!') if self.topologyMasterId is None: raise TProtocol.TProtocolException(message='Required field topologyMasterId is unset!') return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.topologyId) value = (value * 31) ^ hash(self.topologyMasterId) value = (value * 31) ^ hash(self.taskHbs) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class SettableBlobMeta: """ Attributes: - replication_factor """ thrift_spec = ( None, # 0 (1, TType.I32, 'replication_factor', None, None, ), # 1 ) def __init__(self, replication_factor=None,): self.replication_factor = replication_factor def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.I32: self.replication_factor = iprot.readI32(); else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('SettableBlobMeta') if self.replication_factor is not None: oprot.writeFieldBegin('replication_factor', TType.I32, 1) oprot.writeI32(self.replication_factor) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.replication_factor) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class ReadableBlobMeta: """ Attributes: - settable - version """ thrift_spec = ( None, # 0 (1, TType.STRUCT, 'settable', (SettableBlobMeta, SettableBlobMeta.thrift_spec), None, ), # 1 (2, TType.I64, 'version', None, None, ), # 2 ) def __init__(self, settable=None, version=None,): self.settable = settable self.version = version def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRUCT: self.settable = SettableBlobMeta() self.settable.read(iprot) else: iprot.skip(ftype) elif fid == 2: if ftype == TType.I64: self.version = iprot.readI64(); else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('ReadableBlobMeta') if self.settable is not None: oprot.writeFieldBegin('settable', TType.STRUCT, 1) self.settable.write(oprot) oprot.writeFieldEnd() if self.version is not None: oprot.writeFieldBegin('version', TType.I64, 2) oprot.writeI64(self.version) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): if self.settable is None: raise TProtocol.TProtocolException(message='Required field settable is unset!') if self.version is None: raise TProtocol.TProtocolException(message='Required field version is unset!') return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.settable) value = (value * 31) ^ hash(self.version) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class ListBlobsResult: """ Attributes: - keys - session """ thrift_spec = ( None, # 0 (1, TType.LIST, 'keys', (TType.STRING,None), None, ), # 1 (2, TType.STRING, 'session', None, None, ), # 2 ) def __init__(self, keys=None, session=None,): self.keys = keys self.session = session def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.LIST: self.keys = [] (_etype202, _size199) = iprot.readListBegin() for _i203 in xrange(_size199): _elem204 = iprot.readString().decode('utf-8') self.keys.append(_elem204) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRING: self.session = iprot.readString().decode('utf-8') else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('ListBlobsResult') if self.keys is not None: oprot.writeFieldBegin('keys', TType.LIST, 1) oprot.writeListBegin(TType.STRING, len(self.keys)) for iter205 in self.keys: oprot.writeString(iter205.encode('utf-8')) oprot.writeListEnd() oprot.writeFieldEnd() if self.session is not None: oprot.writeFieldBegin('session', TType.STRING, 2) oprot.writeString(self.session.encode('utf-8')) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): if self.keys is None: raise TProtocol.TProtocolException(message='Required field keys is unset!') if self.session is None: raise TProtocol.TProtocolException(message='Required field session is unset!') return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.keys) value = (value * 31) ^ hash(self.session) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class BeginDownloadResult: """ Attributes: - version - session - data_size """ thrift_spec = ( None, # 0 (1, TType.I64, 'version', None, None, ), # 1 (2, TType.STRING, 'session', None, None, ), # 2 (3, TType.I64, 'data_size', None, None, ), # 3 ) def __init__(self, version=None, session=None, data_size=None,): self.version = version self.session = session self.data_size = data_size def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.I64: self.version = iprot.readI64(); else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRING: self.session = iprot.readString().decode('utf-8') else: iprot.skip(ftype) elif fid == 3: if ftype == TType.I64: self.data_size = iprot.readI64(); else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('BeginDownloadResult') if self.version is not None: oprot.writeFieldBegin('version', TType.I64, 1) oprot.writeI64(self.version) oprot.writeFieldEnd() if self.session is not None: oprot.writeFieldBegin('session', TType.STRING, 2) oprot.writeString(self.session.encode('utf-8')) oprot.writeFieldEnd() if self.data_size is not None: oprot.writeFieldBegin('data_size', TType.I64, 3) oprot.writeI64(self.data_size) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): if self.version is None: raise TProtocol.TProtocolException(message='Required field version is unset!') if self.session is None: raise TProtocol.TProtocolException(message='Required field session is unset!') return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.version) value = (value * 31) ^ hash(self.session) value = (value * 31) ^ hash(self.data_size) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class DRPCRequest: """ Attributes: - func_args - request_id """ thrift_spec = ( None, # 0 (1, TType.STRING, 'func_args', None, None, ), # 1 (2, TType.STRING, 'request_id', None, None, ), # 2 ) def __init__(self, func_args=None, request_id=None,): self.func_args = func_args self.request_id = request_id def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRING: self.func_args = iprot.readString().decode('utf-8') else: iprot.skip(ftype) elif fid == 2: if ftype == TType.STRING: self.request_id = iprot.readString().decode('utf-8') else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('DRPCRequest') if self.func_args is not None: oprot.writeFieldBegin('func_args', TType.STRING, 1) oprot.writeString(self.func_args.encode('utf-8')) oprot.writeFieldEnd() if self.request_id is not None: oprot.writeFieldBegin('request_id', TType.STRING, 2) oprot.writeString(self.request_id.encode('utf-8')) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): if self.func_args is None: raise TProtocol.TProtocolException(message='Required field func_args is unset!') if self.request_id is None: raise TProtocol.TProtocolException(message='Required field request_id is unset!') return def __hash__(self): value = 17 value = (value * 31) ^ hash(self.func_args) value = (value * 31) ^ hash(self.request_id) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other) class DRPCExecutionException(TException): """ Attributes: - msg """ thrift_spec = ( None, # 0 (1, TType.STRING, 'msg', None, None, ), # 1 ) def __init__(self, msg=None,): self.msg = msg def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) return iprot.readStructBegin() while True: (fname, ftype, fid) = iprot.readFieldBegin() if ftype == TType.STOP: break if fid == 1: if ftype == TType.STRING: self.msg = iprot.readString().decode('utf-8') else: iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() iprot.readStructEnd() def write(self, oprot): if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) return oprot.writeStructBegin('DRPCExecutionException') if self.msg is not None: oprot.writeFieldBegin('msg', TType.STRING, 1) oprot.writeString(self.msg.encode('utf-8')) oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() def validate(self): if self.msg is None: raise TProtocol.TProtocolException(message='Required field msg is unset!') return def __str__(self): return repr(self) def __hash__(self): value = 17 value = (value * 31) ^ hash(self.msg) return value def __repr__(self): L = ['%s=%r' % (key, value) for key, value in self.__dict__.iteritems()] return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) def __eq__(self, other): return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ def __ne__(self, other): return not (self == other)
76,606
372
/* Kerberos4 SASL plugin * <NAME> * <NAME> * $Id: kerberos4.c,v 1.99 2005/01/10 07:08:53 shadow Exp $ */ /* * Copyright (c) 1998-2003 Carnegie Mellon University. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * 3. The name "Carnegie Mellon University" must not be used to * endorse or promote products derived from this software without * prior written permission. For permission or any other legal * details, please contact * Office of Technology Transfer * Carnegie Mellon University * 5000 Forbes Avenue * Pittsburgh, PA 15213-3890 * (412) 268-4387, fax: (412) 268-7395 * <EMAIL> * * 4. Redistributions of any form whatsoever must retain the following * acknowledgment: * "This product includes software developed by Computing Services * at Carnegie Mellon University (http://www.cmu.edu/computing/)." * * CARNEGIE MELLON UNIVERSITY DISCLAIMS ALL WARRANTIES WITH REGARD TO * THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY * AND FITNESS, IN NO EVENT SHALL CARNEGIE MELLON UNIVERSITY BE LIABLE * FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN * AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <config.h> #include <stdlib.h> #include <string.h> #include <krb.h> #ifdef WITH_DES # ifdef WITH_SSL_DES # include <openssl/des.h> # else # include <des.h> # endif /* WITH_SSL_DES */ #endif /* WITH_DES */ #ifdef WIN32 # include <winsock2.h> #elif defined(macintosh) #include <kcglue_krb.h> #else # include <sys/param.h> # include <sys/socket.h> # include <netinet/in.h> # include <arpa/inet.h> # include <netdb.h> #endif /* WIN32 */ #ifdef HAVE_UNISTD_H #include <unistd.h> #endif #include <fcntl.h> #include <sasl.h> #include <saslutil.h> #include <saslplug.h> #include <errno.h> #include <ctype.h> #include "plugin_common.h" #ifdef macintosh /* * krb.h doenst include some functions and mac compiler is picky * about declartions */ #include <extra_krb.h> #include <sasl_kerberos4_plugin_decl.h> #endif #ifdef WIN32 /* This must be after sasl.h, saslutil.h */ # include "saslKERBEROSV4.h" /* KClient doesn't define this */ typedef struct krb_principal { char name[ANAME_SZ]; char instance[INST_SZ]; char realm[REALM_SZ]; } krb_principal; /* This isn't defined under WIN32. For access() */ #ifndef R_OK #define R_OK 04 #endif /* we also need io.h for access() prototype */ #include <io.h> #endif /* WIN32 */ #ifdef sun /* gotta define gethostname ourselves on suns */ extern int gethostname(char *, int); #endif /***************************** Common Section *****************************/ static const char plugin_id[] = "$Id: kerberos4.c,v 1.99 2005/01/10 07:08:53 shadow Exp $"; #ifndef KEYFILE #define KEYFILE "/etc/srvtab"; #endif #define KRB_SECFLAG_NONE (1) #define KRB_SECFLAG_INTEGRITY (2) #define KRB_SECFLAG_ENCRYPTION (4) #define KRB_SECFLAGS (7) #define KRB_SECFLAG_CREDENTIALS (8) #define KRB_DES_SECURITY_BITS (56) #define KRB_INTEGRITY_BITS (1) typedef enum Krb_sec { KRB_SEC_NONE = 0, KRB_SEC_INTEGRITY = 1, KRB_SEC_ENCRYPTION = 2 } Krb_sec_t; typedef struct context { int state; int challenge; /* this is the challenge (32 bit int) used for the authentication */ char *service; /* kerberos service */ char instance[ANAME_SZ]; char pname[ANAME_SZ]; char pinst[INST_SZ]; char prealm[REALM_SZ]; char *hostname; /* hostname */ char *realm; /* kerberos realm */ char *auth; /* */ CREDENTIALS credentials; des_cblock key; /* session key */ des_cblock session; /* session key */ des_key_schedule init_keysched; /* key schedule for initialization */ des_key_schedule enc_keysched; /* encryption key schedule */ des_key_schedule dec_keysched; /* decryption key schedule */ struct sockaddr_in ip_local; /* local ip address and port. needed for layers */ struct sockaddr_in ip_remote; /* remote ip address and port. needed for layers */ const sasl_utils_t *utils; /* this is useful to have around */ Krb_sec_t sec_type; char *encode_buf; /* For encoding/decoding mem management */ char *decode_buf; char *decode_once_buf; unsigned encode_buf_len; unsigned decode_buf_len; unsigned decode_once_buf_len; buffer_info_t *enc_in_buf; decode_context_t decode_context; char *out_buf; /* per-step mem management */ unsigned out_buf_len; const char *user; /* used by client */ int secflags; /* client/server supports layers? */ long time_sec; /* These are used to make sure we are getting */ char time_5ms; /* strictly increasing timestamps */ } context_t; #define KRB_LOCK_MUTEX(utils) \ if(((sasl_utils_t *)(utils))->mutex_lock(krb_mutex) != 0) { \ ((sasl_utils_t *)(utils))->seterror(((sasl_utils_t *)(utils))->conn, \ 0, "error locking mutex"); \ return SASL_FAIL; \ } #define KRB_UNLOCK_MUTEX(utils) \ if(((sasl_utils_t *)(utils))->mutex_unlock(krb_mutex) != 0) { \ ((sasl_utils_t *)(utils))->seterror(((sasl_utils_t *)(utils))->conn, \ 0, "error unlocking mutex"); \ return SASL_FAIL; \ } /* Mutex for not-thread-safe kerberos 4 library */ static void *krb_mutex = NULL; static char *srvtab = NULL; static unsigned refcount = 0; static int kerberosv4_encode(void *context, const struct iovec *invec, unsigned numiov, const char **output, unsigned *outputlen) { int len, ret; context_t *text = (context_t *)context; struct buffer_info *inblob, bufinfo; if(numiov > 1) { ret = _plug_iovec_to_buf(text->utils, invec, numiov, &text->enc_in_buf); if(ret != SASL_OK) return ret; inblob = text->enc_in_buf; } else { bufinfo.data = invec[0].iov_base; bufinfo.curlen = invec[0].iov_len; inblob = &bufinfo; } ret = _plug_buf_alloc(text->utils, &(text->encode_buf), &text->encode_buf_len, inblob->curlen+40); if(ret != SASL_OK) return ret; KRB_LOCK_MUTEX(text->utils); if (text->sec_type == KRB_SEC_ENCRYPTION) { /* Type incompatibility on 4th arg probably means you're building against krb4 in MIT krb5, but got the OpenSSL headers in your way. You need to not use openssl/des.h with MIT kerberos. */ len=krb_mk_priv(inblob->data, (text->encode_buf+4), inblob->curlen, text->init_keysched, &text->session, &text->ip_local, &text->ip_remote); } else if (text->sec_type == KRB_SEC_INTEGRITY) { len=krb_mk_safe(inblob->data, (text->encode_buf+4), inblob->curlen, &text->session, &text->ip_local, &text->ip_remote); } else { len = -1; } KRB_UNLOCK_MUTEX(text->utils); /* returns -1 on error */ if (len==-1) return SASL_FAIL; /* now copy in the len of the buffer in network byte order */ *outputlen=len+4; len=htonl(len); memcpy(text->encode_buf, &len, 4); /* Setup the const pointer */ *output = text->encode_buf; return SASL_OK; } static int kerberosv4_decode_packet(void *context, const char *input, unsigned inputlen, char **output, unsigned *outputlen) { context_t *text = (context_t *) context; int result; MSG_DAT data; memset(&data,0,sizeof(MSG_DAT)); KRB_LOCK_MUTEX(text->utils); if (text->sec_type == KRB_SEC_ENCRYPTION) { result=krb_rd_priv(input, inputlen, text->init_keysched, &text->session, &text->ip_remote, &text->ip_local, &data); } else if (text->sec_type == KRB_SEC_INTEGRITY) { result = krb_rd_safe(input, inputlen, &text->session, &text->ip_remote, &text->ip_local, &data); } else { KRB_UNLOCK_MUTEX(text->utils); text->utils->seterror(text->utils->conn, 0, "KERBEROS_4 decode called with KRB_SEC_NONE"); return SASL_FAIL; } KRB_UNLOCK_MUTEX(text->utils); /* see if the krb library gave us a failure */ if (result != 0) { text->utils->seterror(text->utils->conn, 0, get_krb_err_txt(result)); return SASL_FAIL; } /* check to make sure the timestamps are ok */ if ((data.time_sec < text->time_sec) || /* if an earlier time */ (((data.time_sec == text->time_sec) && /* or the exact same time */ (data.time_5ms < text->time_5ms)))) { text->utils->seterror(text->utils->conn, 0, "timestamps not ok"); return SASL_FAIL; } text->time_sec = data.time_sec; text->time_5ms = data.time_5ms; result = _plug_buf_alloc(text->utils, &text->decode_once_buf, &text->decode_once_buf_len, data.app_length + 1); if(result != SASL_OK) return result; *output = text->decode_once_buf; *outputlen = data.app_length; memcpy(*output, data.app_data, data.app_length); (*output)[*outputlen] = '\0'; return SASL_OK; } static int kerberosv4_decode(void *context, const char *input, unsigned inputlen, const char **output, unsigned *outputlen) { context_t *text = (context_t *) context; int ret; ret = _plug_decode(&text->decode_context, input, inputlen, &text->decode_buf, &text->decode_buf_len, outputlen, kerberosv4_decode_packet, text); *output = text->decode_buf; return ret; } static int new_text(const sasl_utils_t *utils, context_t **text) { context_t *ret = (context_t *) utils->malloc(sizeof(context_t)); if (ret == NULL) { MEMERROR(utils); return SASL_NOMEM; } memset(ret, 0, sizeof(context_t)); ret->state = 1; ret->utils = utils; *text = ret; return SASL_OK; } static void kerberosv4_common_mech_dispose(void *conn_context, const sasl_utils_t *utils) { context_t *text = (context_t *)conn_context; if(!text) return; _plug_decode_free(&text->decode_context); if (text->encode_buf) utils->free(text->encode_buf); if (text->decode_buf) utils->free(text->decode_buf); if (text->decode_once_buf) utils->free(text->decode_once_buf); if (text->out_buf) utils->free(text->out_buf); if (text->enc_in_buf) { if(text->enc_in_buf->data) utils->free(text->enc_in_buf->data); utils->free(text->enc_in_buf); } /* no need to free userid, it's just the interaction result */ utils->free(text); } static void kerberosv4_common_mech_free(void *glob_context __attribute__((unused)), const sasl_utils_t *utils) { if (krb_mutex) { utils->mutex_free(krb_mutex); krb_mutex = NULL; /* in case we need to re-use it */ } refcount--; if (srvtab && !refcount) { utils->free(srvtab); srvtab = NULL; } } /***************************** Server Section *****************************/ static int cando_sec(sasl_security_properties_t *props, int external_ssf, int secflag) { int need; int musthave; if(props->maxbufsize == 0) { need = musthave = 0; } else { need = props->max_ssf - external_ssf; musthave = props->min_ssf - external_ssf; } switch (secflag) { case KRB_SECFLAG_NONE: if (musthave <= 0) return 1; break; case KRB_SECFLAG_INTEGRITY: if ((musthave <= KRB_INTEGRITY_BITS) && (KRB_INTEGRITY_BITS <= need)) return 1; break; case KRB_SECFLAG_ENCRYPTION: if ((musthave <= KRB_DES_SECURITY_BITS) && (KRB_DES_SECURITY_BITS <= need)) return 1; break; case KRB_SECFLAG_CREDENTIALS: if (props->security_flags & SASL_SEC_PASS_CREDENTIALS) return 1; break; } return 0; } static int ipv4_ipfromstring(const sasl_utils_t *utils, const char *addr, struct sockaddr_in *out) { struct sockaddr_storage ss; int result; result = _plug_ipfromstring(utils, addr, (struct sockaddr *)&ss, sizeof(ss)); if (result != SASL_OK) { /* couldn't get local IP address */ return result; } /* Kerberos_V4 supports only IPv4 */ if (((struct sockaddr *)&ss)->sa_family != AF_INET) return SASL_FAIL; memcpy(out, &ss, sizeof(struct sockaddr_in)); return SASL_OK; } #ifndef macintosh static int kerberosv4_server_mech_new(void *glob_context __attribute__((unused)), sasl_server_params_t *sparams, const char *challenge __attribute__((unused)), unsigned challen __attribute__((unused)), void **conn_context) { return new_text(sparams->utils, (context_t **) conn_context); } static int kerberosv4_server_mech_step(void *conn_context, sasl_server_params_t *sparams, const char *clientin, unsigned clientinlen, const char **serverout, unsigned *serveroutlen, sasl_out_params_t *oparams) { context_t *text = (context_t *) conn_context; int result; *serverout = NULL; *serveroutlen = 0; switch (text->state) { case 1: { /* random 32-bit number */ int randocts, nchal; /* shouldn't we check for erroneous client input here?!? */ sparams->utils->rand(sparams->utils->rpool,(char *) &randocts , sizeof(randocts)); text->challenge=randocts; nchal = htonl(text->challenge); result = _plug_buf_alloc(text->utils, &text->out_buf, &text->out_buf_len, 5); if (result != SASL_OK) return result; memcpy(text->out_buf,&nchal,4); *serverout = text->out_buf; *serveroutlen = 4; text->state = 2; return SASL_CONTINUE; } case 2: { int nchal; unsigned char sout[8]; AUTH_DAT ad; KTEXT_ST ticket; unsigned lup; struct sockaddr_in addr; char *dot; /* received authenticator */ /* create ticket */ if (clientinlen > MAX_KTXT_LEN) { text->utils->seterror(text->utils->conn,0, "request larger than maximum ticket size"); return SASL_FAIL; } ticket.length=clientinlen; for (lup = 0; lup < clientinlen; lup++) ticket.dat[lup] = clientin[lup]; KRB_LOCK_MUTEX(sparams->utils); text->realm = krb_realmofhost(sparams->serverFQDN); /* get instance */ strncpy (text->instance, krb_get_phost (sparams->serverFQDN), sizeof (text->instance)); KRB_UNLOCK_MUTEX(sparams->utils); text->instance[sizeof(text->instance)-1] = 0; /* At some sites, krb_get_phost() sensibly but * atypically returns FQDNs, versus the first component, * which is what we need for RFC2222 section 7.1 */ dot = strchr(text->instance, '.'); if (dot) *dot = '\0'; memset(&addr, 0, sizeof(struct sockaddr_in)); #ifndef KRB4_IGNORE_IP_ADDRESS /* (we ignore IP addresses in krb4 tickets at CMU to facilitate moving from machine to machine) */ /* get ip number in addr*/ result = ipv4_ipfromstring(sparams->utils, sparams->ipremoteport, &addr); if (result != SASL_OK || !sparams->ipremoteport) { SETERROR(text->utils, "couldn't get remote IP address"); return result; } #endif /* check ticket */ KRB_LOCK_MUTEX(sparams->utils); result = krb_rd_req(&ticket, (char *) sparams->service, text->instance, addr.sin_addr.s_addr, &ad, srvtab); KRB_UNLOCK_MUTEX(sparams->utils); if (result) { /* if fails mechanism fails */ text->utils->seterror(text->utils->conn,0, "krb_rd_req failed service=%s instance=%s error code=%s (%i)", sparams->service, text->instance,get_krb_err_txt(result),result); return SASL_BADAUTH; } /* 8 octets of data * 1-4 checksum+1 * 5 security layers * 6-8max cipher text buffer size * use DES ECB in the session key */ nchal=htonl(text->challenge+1); memcpy(sout, &nchal, 4); sout[4]= 0; if (cando_sec(&sparams->props, sparams->external_ssf, KRB_SECFLAG_NONE)) sout[4] |= KRB_SECFLAG_NONE; if (cando_sec(&sparams->props, sparams->external_ssf, KRB_SECFLAG_INTEGRITY)) sout[4] |= KRB_SECFLAG_INTEGRITY; if (cando_sec(&sparams->props, sparams->external_ssf, KRB_SECFLAG_ENCRYPTION)) sout[4] |= KRB_SECFLAG_ENCRYPTION; if (cando_sec(&sparams->props, sparams->external_ssf, KRB_SECFLAG_CREDENTIALS)) sout[4] |= KRB_SECFLAG_CREDENTIALS; if(sparams->props.maxbufsize) { int tmpmaxbuf = (sparams->props.maxbufsize > 0xFFFFFF) ? 0xFFFFFF : sparams->props.maxbufsize; sout[5]=((tmpmaxbuf >> 16) & 0xFF); sout[6]=((tmpmaxbuf >> 8) & 0xFF); sout[7]=(tmpmaxbuf & 0xFF); } else { /* let's say we can support up to 64K */ /* no inherent inability with our layers to support more */ sout[5]=0x00; /* max ciphertext buffer size */ sout[6]=0xFF; sout[7]=0xFF; } memcpy(text->session, ad.session, 8); memcpy(text->pname, ad.pname, sizeof(text->pname)); memcpy(text->pinst, ad.pinst, sizeof(text->pinst)); memcpy(text->prealm, ad.prealm, sizeof(text->prealm)); des_key_sched(&ad.session, text->init_keysched); /* make keyschedule for encryption and decryption */ des_key_sched(&ad.session, text->enc_keysched); des_key_sched(&ad.session, text->dec_keysched); des_ecb_encrypt((des_cblock *)sout, (des_cblock *)sout, text->init_keysched, DES_ENCRYPT); result = _plug_buf_alloc(text->utils, &text->out_buf, &text->out_buf_len, 9); if(result != SASL_OK) return result; memcpy(text->out_buf,&sout,8); *serverout = text->out_buf; *serveroutlen = 8; text->state = 3; return SASL_CONTINUE; } case 3: { int result; int testnum; int flag; unsigned char *in; if ((clientinlen == 0) || (clientinlen % 8 != 0)) { text->utils->seterror(text->utils->conn,0, "Response to challengs is not a multiple of 8 octets (a DES block)"); return SASL_FAIL; } /* we need to make a copy because des does in place decrpytion */ in = sparams->utils->malloc(clientinlen + 1); if (in == NULL) { MEMERROR(sparams->utils); return SASL_NOMEM; } memcpy(in, clientin, clientinlen); in[clientinlen] = '\0'; /* decrypt; verify checksum */ des_pcbc_encrypt((des_cblock *)in, (des_cblock *)in, clientinlen, text->init_keysched, &text->session, DES_DECRYPT); testnum = (in[0]*256*256*256)+(in[1]*256*256)+(in[2]*256)+in[3]; if (testnum != text->challenge) { SETERROR(sparams->utils, "incorrect response to challenge"); return SASL_BADAUTH; } if (!cando_sec(&sparams->props, sparams->external_ssf, in[4] & KRB_SECFLAGS)) { SETERROR(sparams->utils, "invalid security property specified"); return SASL_BADPROT; } oparams->encode = &kerberosv4_encode; oparams->decode = &kerberosv4_decode; switch (in[4] & KRB_SECFLAGS) { case KRB_SECFLAG_NONE: text->sec_type = KRB_SEC_NONE; oparams->encode = NULL; oparams->decode = NULL; oparams->mech_ssf = 0; break; case KRB_SECFLAG_INTEGRITY: text->sec_type = KRB_SEC_INTEGRITY; oparams->mech_ssf = KRB_INTEGRITY_BITS; break; case KRB_SECFLAG_ENCRYPTION: text->sec_type = KRB_SEC_ENCRYPTION; oparams->mech_ssf = KRB_DES_SECURITY_BITS; break; default: /* Mark that we tried */ oparams->mech_ssf = 2; SETERROR(sparams->utils, "not a supported encryption layer"); return SASL_BADPROT; } /* get ip data */ /* get ip number in addr*/ result = ipv4_ipfromstring(sparams->utils, sparams->iplocalport, &(text->ip_local)); if (result != SASL_OK) { SETERROR(sparams->utils, "couldn't get local ip address"); /* couldn't get local IP address */ return result; } result = ipv4_ipfromstring(sparams->utils, sparams->ipremoteport, &(text->ip_remote)); if (result != SASL_OK) { SETERROR(sparams->utils, "couldn't get remote ip address"); /* couldn't get remote IP address */ return result; } /* fill in oparams */ oparams->maxoutbuf = (in[5] << 16) + (in[6] << 8) + in[7]; if(oparams->mech_ssf) { /* FIXME: Likely to be too large */ oparams->maxoutbuf -= 50; } if (sparams->canon_user) { char *user=NULL, *authid=NULL; size_t ulen = 0, alen = strlen(text->pname); int ret, cflag = SASL_CU_AUTHID; if (text->pinst[0]) { alen += strlen(text->pinst) + 1 /* for the . */; } flag = 0; if (strcmp(text->realm, text->prealm)) { alen += strlen(text->prealm) + 1 /* for the @ */; flag = 1; } authid = sparams->utils->malloc(alen + 1); if (!authid) { MEMERROR(sparams->utils); return SASL_NOMEM; } strcpy(authid, text->pname); if (text->pinst[0]) { strcat(authid, "."); strcat(authid, text->pinst); } if (flag) { strcat(authid, "@"); strcat(authid, text->prealm); } if (in[8]) { user = sparams->utils->malloc(strlen((char *) in + 8) + 1); if (!user) { MEMERROR(sparams->utils); return SASL_NOMEM; } strcpy(user, (char *) in + 8); ulen = strlen(user); } else { cflag |= SASL_CU_AUTHZID; } ret = sparams->canon_user(sparams->utils->conn, authid, alen, cflag, oparams); sparams->utils->free(authid); if (ret != SASL_OK) { if (user) sparams->utils->free(user); return ret; } if (user) { ret = sparams->canon_user(sparams->utils->conn, user, ulen, SASL_CU_AUTHZID, oparams); sparams->utils->free(user); } if (ret != SASL_OK) return ret; } /* nothing more to do; authenticated */ oparams->doneflag = 1; oparams->param_version = 0; /* used by layers */ _plug_decode_init(&text->decode_context, text->utils, (sparams->props.maxbufsize > 0xFFFFFF) ? 0xFFFFFF : sparams->props.maxbufsize); sparams->utils->free(in); return SASL_OK; } default: sparams->utils->log(NULL, SASL_LOG_ERR, "Invalid Kerberos server step %d\n", text->state); return SASL_FAIL; } return SASL_FAIL; /* should never get here */ } static int kerberosv4_mech_avail(void *glob_context __attribute__((unused)), sasl_server_params_t *sparams, void **conn_context __attribute__((unused))) { struct sockaddr_in addr; if (!sparams->iplocalport || !sparams->ipremoteport || ipv4_ipfromstring(sparams->utils, sparams->iplocalport, &addr) != SASL_OK || ipv4_ipfromstring(sparams->utils, sparams->ipremoteport, &addr) != SASL_OK) { SETERROR(sparams->utils, "KERBEROS_V4 unavailable due to lack of IPv4 information"); return SASL_NOMECH; } return SASL_OK; } static sasl_server_plug_t kerberosv4_server_plugins[] = { { "KERBEROS_V4", /* mech_name */ KRB_DES_SECURITY_BITS, /* max_ssf */ SASL_SEC_NOPLAINTEXT | SASL_SEC_NOACTIVE | SASL_SEC_NOANONYMOUS | SASL_SEC_MUTUAL_AUTH, /* security_flags */ SASL_FEAT_SERVER_FIRST | SASL_FEAT_ALLOWS_PROXY, /* features */ NULL, /* glob_context */ &kerberosv4_server_mech_new, /* mech_new */ &kerberosv4_server_mech_step, /* mech_step */ &kerberosv4_common_mech_dispose,/* mech_dispose */ &kerberosv4_common_mech_free, /* mech_free */ NULL, /* setpass */ NULL, /* user_query */ NULL, /* idle */ &kerberosv4_mech_avail, /* mech_avail */ NULL /* spare */ } }; #endif /* macintosh */ int kerberos4_server_plug_init(const sasl_utils_t *utils, int maxversion, int *out_version, sasl_server_plug_t **pluglist, int *plugcount) { #ifdef macintosh return SASL_BADVERS; #else const char *ret; unsigned int rl; if (maxversion < SASL_SERVER_PLUG_VERSION) { return SASL_BADVERS; } if (!krb_mutex) { krb_mutex = utils->mutex_alloc(); if(!krb_mutex) { return SASL_FAIL; } } if (!srvtab) { utils->getopt(utils->getopt_context, "KERBEROS_V4", "srvtab", &ret, &rl); if (ret == NULL) { ret = KEYFILE; rl = strlen(ret); } srvtab = utils->malloc(sizeof(char) * (rl + 1)); if(!srvtab) { MEMERROR(utils); return SASL_NOMEM; } strcpy(srvtab, ret); } refcount++; /* fail if we can't open the srvtab file */ if (access(srvtab, R_OK) != 0) { utils->log(NULL, SASL_LOG_ERR, "can't access srvtab file %s: %m", srvtab, errno); if(!(--refcount)) { utils->free(srvtab); srvtab=NULL; } return SASL_FAIL; } *out_version = SASL_SERVER_PLUG_VERSION; *pluglist = kerberosv4_server_plugins; *plugcount = 1; return SASL_OK; #endif } /***************************** Client Section *****************************/ static int kerberosv4_client_mech_new(void *glob_context __attribute__((unused)), sasl_client_params_t *params, void **conn_context) { return new_text(params->utils, (context_t **) conn_context); } static int kerberosv4_client_mech_step(void *conn_context, sasl_client_params_t *cparams, const char *serverin, unsigned serverinlen, sasl_interact_t **prompt_need, const char **clientout, unsigned *clientoutlen, sasl_out_params_t *oparams) { context_t *text = (context_t *) conn_context; KTEXT_ST authent; int ret; *clientout = NULL; *clientoutlen = 0; authent.length = MAX_KTXT_LEN; switch (text->state) { case 1: { /* We should've just recieved a 32-bit number in network byte order. * We want to reply with an authenticator. */ int result; KTEXT_ST ticket; char *dot; memset(&ticket, 0L, sizeof(ticket)); ticket.length = MAX_KTXT_LEN; if (serverinlen != 4) { text->utils->seterror(text->utils->conn, 0, "server challenge not 4 bytes long"); return SASL_BADPROT; } memcpy(&text->challenge, serverin, 4); text->challenge=ntohl(text->challenge); if (cparams->serverFQDN == NULL) { cparams->utils->log(NULL, SASL_LOG_ERR, "no 'serverFQDN' set"); SETERROR(text->utils, "paramater error"); return SASL_BADPARAM; } if (cparams->service == NULL) { cparams->utils->log(NULL, SASL_LOG_ERR, "no 'service' set"); SETERROR(text->utils, "paramater error"); return SASL_BADPARAM; } KRB_LOCK_MUTEX(cparams->utils); text->realm=krb_realmofhost(cparams->serverFQDN); text->hostname=(char *) cparams->serverFQDN; /* the instance of the principal we're authenticating with */ strncpy (text->instance, krb_get_phost (cparams->serverFQDN), sizeof (text->instance)); /* text->instance is NULL terminated unless it was too long */ text->instance[sizeof(text->instance)-1] = '\0'; /* At some sites, krb_get_phost() sensibly but * atypically returns FQDNs, versus the first component, * which is what we need for RFC2222 section 7.1 */ dot = strchr(text->instance, '.'); if (dot) *dot = '\0'; #ifndef macintosh if ((result = krb_mk_req(&ticket, (char *) cparams->service, text->instance, text->realm, text->challenge))) #else memset(&text->credentials,0,sizeof(text->credentials)); if (kcglue_krb_mk_req(ticket.dat, &ticket.length, cparams->service, text->instance, text->realm, text->challenge, &text->credentials.session, text->credentials.pname, text->credentials.pinst) != 0) #endif { KRB_UNLOCK_MUTEX(cparams->utils); text->utils->seterror(text->utils->conn,SASL_NOLOG, "krb_mk_req() failed"); cparams->utils->log(NULL, SASL_LOG_ERR, "krb_mk_req() failed: %s (%d)", get_krb_err_txt(result), result); return SASL_FAIL; } KRB_UNLOCK_MUTEX(cparams->utils); ret = _plug_buf_alloc(text->utils, &(text->out_buf), &(text->out_buf_len), ticket.length); if (ret != SASL_OK) return ret; memcpy(text->out_buf, ticket.dat, ticket.length); *clientout = text->out_buf; *clientoutlen = ticket.length; text->state = 2; return SASL_CONTINUE; } /* challenge #2 */ case 2: { int need = 0; int musthave = 0; int testnum; int nchal; unsigned char *sout = NULL; unsigned len; unsigned char in[8]; int result; int servermaxbuf; char *buf; int user_result = SASL_OK; /* try to get the authid */ if (text->user == NULL) { user_result = _plug_get_userid(cparams->utils, &text->user, prompt_need); if (user_result != SASL_OK && user_result != SASL_INTERACT) return user_result; } /* free prompts we got */ if (prompt_need && *prompt_need) { cparams->utils->free(*prompt_need); *prompt_need = NULL; } /* if there are prompts not filled in */ if (user_result == SASL_INTERACT) { /* make the prompt list */ int result = _plug_make_prompts(cparams->utils, prompt_need, user_result == SASL_INTERACT ? "Please enter your authorization name" : NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL); if (result!=SASL_OK) return result; return SASL_INTERACT; } /* must be 8 octets */ if (serverinlen!=8) { SETERROR(cparams->utils, "server response not 8 bytes long"); return SASL_BADAUTH; } memcpy(in, serverin, 8); #ifndef macintosh /* get credentials */ KRB_LOCK_MUTEX(cparams->utils); result = krb_get_cred((char *)cparams->service, text->instance, text->realm, &text->credentials); KRB_UNLOCK_MUTEX(cparams->utils); if(result != 0) { cparams->utils->log(NULL, SASL_LOG_ERR, "krb_get_cred() failed: %s (%d)", get_krb_err_txt(result), result); SETERROR(cparams->utils, "krb_get_cred() failed"); return SASL_BADAUTH; } #endif memcpy(text->session, text->credentials.session, 8); /* make key schedule for encryption and decryption */ des_key_sched(&text->session, text->init_keysched); des_key_sched(&text->session, text->enc_keysched); des_key_sched(&text->session, text->dec_keysched); /* decrypt from server */ des_ecb_encrypt((des_cblock *)in, (des_cblock *)in, text->init_keysched, DES_DECRYPT); /* convert to 32bit int */ testnum = (in[0]*256*256*256)+(in[1]*256*256)+(in[2]*256)+in[3]; /* verify data 1st 4 octets must be equal to chal+1 */ if (testnum != text->challenge+1) { SETERROR(cparams->utils,"server response incorrect"); return SASL_BADAUTH; } /* construct 8 octets * 1-4 - original checksum * 5 - bitmask of sec layer * 6-8 max buffer size */ if (cparams->props.min_ssf > KRB_DES_SECURITY_BITS + cparams->external_ssf) { SETERROR(cparams->utils, "minimum ssf too strong for this mechanism"); return SASL_TOOWEAK; } else if (cparams->props.min_ssf > cparams->props.max_ssf) { SETERROR(cparams->utils, "minimum ssf larger than maximum ssf"); return SASL_BADPARAM; } /* create stuff to send to server */ sout = (char *) cparams->utils->malloc(9+(text->user ? strlen(text->user) : 0)+9); if (!sout) { MEMERROR(cparams->utils); return SASL_NOMEM; } nchal = htonl(text->challenge); memcpy(sout, &nchal, 4); /* need bits of layer */ if(cparams->props.maxbufsize == 0) { need = musthave = 0; } else { need = cparams->props.max_ssf - cparams->external_ssf; musthave = cparams->props.min_ssf - cparams->external_ssf; } oparams->decode = &kerberosv4_decode; oparams->encode = &kerberosv4_encode; if ((in[4] & KRB_SECFLAG_ENCRYPTION) && (need>=56) && (musthave <= 56)) { /* encryption */ text->sec_type = KRB_SEC_ENCRYPTION; oparams->mech_ssf = 56; sout[4] = KRB_SECFLAG_ENCRYPTION; /* using encryption layer */ } else if ((in[4] & KRB_SECFLAG_INTEGRITY) && (need >= 1) && (musthave <= 1)) { /* integrity */ text->sec_type = KRB_SEC_INTEGRITY; oparams->mech_ssf=1; sout[4] = KRB_SECFLAG_INTEGRITY; /* using integrity layer */ } else if ((in[4] & KRB_SECFLAG_NONE) && (musthave <= 0)) { /* no layer */ text->sec_type = KRB_SEC_NONE; oparams->encode=NULL; oparams->decode=NULL; oparams->mech_ssf=0; sout[4] = KRB_SECFLAG_NONE; } else { /* Mark that we tried */ oparams->mech_ssf=2; SETERROR(cparams->utils, "unable to agree on layers with server"); return SASL_BADPROT; } servermaxbuf = in[5]*256*256+in[6]*256+in[7]; oparams->maxoutbuf = servermaxbuf; if (oparams->mech_ssf) { /* FIXME: Likely to be too large */ oparams->maxoutbuf -= 50; } if(cparams->props.maxbufsize) { int tmpmaxbuf = ( cparams->props.maxbufsize > 0xFFFFFF ) ? 0xFFFFFF : cparams->props.maxbufsize; sout[5]=((tmpmaxbuf >> 16) & 0xFF); sout[6]=((tmpmaxbuf >> 8) & 0xFF); sout[7]=(tmpmaxbuf & 0xFF); } else { /* let's say we can support up to 64K */ /* no inherent inability with our layers to support more */ sout[5]=0x00; /* max ciphertext buffer size */ sout[6]=0xFF; sout[7]=0xFF; } sout[8] = 0x00; /* just to be safe */ /* append userid */ len = 9; /* 8 + trailing NULL */ if (text->user) { strcpy((char *)sout + 8, text->user); len += strlen(text->user); } /* append 0 based octets so is multiple of 8 */ while(len % 8) { sout[len]=0; len++; } sout[len]=0; des_pcbc_encrypt((des_cblock *)sout, (des_cblock *)sout, len, text->init_keysched, (des_cblock *)text->session, DES_ENCRYPT); result = _plug_buf_alloc(text->utils, &text->out_buf, &text->out_buf_len, len); if (result != SASL_OK) return result; memcpy(text->out_buf, sout, len); *clientout = text->out_buf; *clientoutlen = len; /* nothing more to do; should be authenticated */ if(cparams->iplocalport) { result = ipv4_ipfromstring(cparams->utils, cparams->iplocalport, &(text->ip_local)); if (result != SASL_OK) { /* couldn't get local IP address */ return result; } } if (cparams->ipremoteport) { result = ipv4_ipfromstring(cparams->utils, cparams->ipremoteport, &(text->ip_remote)); if (result != SASL_OK) { /* couldn't get local IP address */ return result; } } buf = cparams->utils->malloc(strlen(text->credentials.pname) + strlen(text->credentials.pinst) + 2); if (!buf) { MEMERROR(cparams->utils); return SASL_NOMEM; } strcpy(buf, text->credentials.pname); if (text->credentials.pinst[0]) { strcat(buf, "."); strcat(buf, text->credentials.pinst); } if (text->user && !text->user[0]) { text->user = NULL; } ret = cparams->canon_user(cparams->utils->conn, buf, 0, SASL_CU_AUTHID, oparams); if (ret != SASL_OK) { cparams->utils->free(buf); cparams->utils->free(sout); return ret; } if (!text->user) { /* 0 in length fields means use strlen() */ ret = cparams->canon_user(cparams->utils->conn, buf, 0, SASL_CU_AUTHZID, oparams); } else { ret = cparams->canon_user(cparams->utils->conn, text->user, 0, SASL_CU_AUTHZID, oparams); } cparams->utils->free(buf); oparams->doneflag = 1; oparams->param_version = 0; /* used by layers */ _plug_decode_init(&text->decode_context, text->utils, (cparams->props.maxbufsize > 0xFFFFFF) ? 0xFFFFFF : cparams->props.maxbufsize); if (sout) cparams->utils->free(sout); return SASL_OK; } default: cparams->utils->log(NULL, SASL_LOG_ERR, "Invalid Kerberos client step %d\n", text->state); return SASL_FAIL; } return SASL_FAIL; /* should never get here */ } static const long kerberosv4_required_prompts[] = { SASL_CB_LIST_END }; static sasl_client_plug_t kerberosv4_client_plugins[] = { { "KERBEROS_V4", /* mech_name */ KRB_DES_SECURITY_BITS, /* max_ssf */ SASL_SEC_NOPLAINTEXT | SASL_SEC_NOACTIVE | SASL_SEC_NOANONYMOUS | SASL_SEC_MUTUAL_AUTH, /* security_flags */ SASL_FEAT_NEEDSERVERFQDN | SASL_FEAT_SERVER_FIRST | SASL_FEAT_ALLOWS_PROXY, /* features */ kerberosv4_required_prompts, /* required_prompts */ NULL, /* glob_context */ &kerberosv4_client_mech_new, /* mech_new */ &kerberosv4_client_mech_step, /* mech_step */ &kerberosv4_common_mech_dispose,/* mech_dispose */ &kerberosv4_common_mech_free, /* mech_free */ NULL, /* idle */ NULL, /* spare */ NULL /* spare */ } }; int kerberos4_client_plug_init(const sasl_utils_t *utils, int maxversion, int *out_version, sasl_client_plug_t **pluglist, int *plugcount) { if (maxversion < SASL_CLIENT_PLUG_VERSION) { SETERROR(utils, "Wrong KERBEROS_V4 version"); return SASL_BADVERS; } if(!krb_mutex) { krb_mutex = utils->mutex_alloc(); if(!krb_mutex) { return SASL_FAIL; } } *out_version = SASL_CLIENT_PLUG_VERSION; *pluglist = kerberosv4_client_plugins; *plugcount = 1; refcount++; return SASL_OK; }
16,973
1,444
<reponame>amc8391/mage package org.mage.test.cards.single.m21; import mage.constants.PhaseStep; import mage.constants.Zone; import org.junit.Test; import org.mage.test.serverside.base.CardTestPlayerBase; public class DiscontinuityTest extends CardTestPlayerBase { @Test public void testCostReduction(){ addCard(Zone.BATTLEFIELD, playerA, "Island", 2); // As long as it's your turn, this spell costs {2}{U}{U} less to cast. addCard(Zone.HAND, playerA, "Discontinuity"); castSpell(1, PhaseStep.PRECOMBAT_MAIN, playerA, "Discontinuity"); setStrictChooseMode(true); setStopAt(1, PhaseStep.END_TURN); execute(); assertAllCommandsUsed(); assertTappedCount("Island", true, 2); } }
310
519
// BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE #define FILENAME(line) FILENAME_FOR_EXCEPTIONS_C("src/cpu-kernels/awkward_index_rpad_and_clip_axis1.cpp", line) #include "awkward/kernels.h" template <typename T> ERROR awkward_index_rpad_and_clip_axis1( T* tostarts, T* tostops, int64_t target, int64_t length) { int64_t offset = 0; for (int64_t i = 0; i < length; i++) { tostarts[i] = offset; offset = offset + target; tostops[i] = offset; } return success(); } ERROR awkward_index_rpad_and_clip_axis1_64( int64_t* tostarts, int64_t* tostops, int64_t target, int64_t length) { return awkward_index_rpad_and_clip_axis1<int64_t>( tostarts, tostops, target, length); }
342
1,031
<filename>cacreader/swig-4.0.2/Examples/test-suite/java/li_boost_shared_ptr_attribute_runme.java import li_boost_shared_ptr_attribute.*; public class li_boost_shared_ptr_attribute_runme { static { try { System.loadLibrary("li_boost_shared_ptr_attribute"); } catch (UnsatisfiedLinkError e) { System.err.println("Native code library failed to load. See the chapter on Dynamic Linking Problems in the SWIG Java documentation for help.\n" + e); System.exit(1); } } public static void check(GetSetMe g, int expected) { int got = g.getN(); if (got != expected) throw new RuntimeException("GetSetMe value is " + got + " but should be " + expected); } public static void check(GetMe g, int expected) { int got = g.getN(); if (got != expected) throw new RuntimeException("GetMe value is " + got + " but should be " + expected); } public static void main(String argv[]) { GetterSetter gs = new GetterSetter(5); check(gs.getMyval(), 25); check(gs.getAddedAttrib(), 25); gs.setAddedAttrib(new GetSetMe(6)); check(gs.getMyval(), 6); check(gs.getAddedAttrib(), 6); GetterOnly g = new GetterOnly(4); check(g.getMyval(), 16); check(g.getAddedAttrib(), 16); } }
478
360
""" Copyright (c) 2020 Huawei Technologies Co.,Ltd. openGauss is licensed under Mulan PSL v2. You can use this software according to the terms and conditions of the Mulan PSL v2. You may obtain a copy of Mulan PSL v2 at: http://license.coscl.org.cn/MulanPSL2 THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. See the Mulan PSL v2 for more details. """ import math from collections import Counter def distance(str1, str2): c_1 = Counter(str1) c_2 = Counter(str2) c_union = set(c_1).union(c_2) dot_product = sum(c_1.get(item, 0) * c_2.get(item, 0) for item in c_union) mag_c1 = math.sqrt(sum(c_1.get(item, 0)**2 for item in c_union)) mag_c2 = math.sqrt(sum(c_2.get(item, 0)**2 for item in c_union)) return dot_product / (mag_c1 * mag_c2)
365
576
<filename>Examples/DicomSeriesFromArray/DicomSeriesFromArray.py<gh_stars>100-1000 # ========================================================================= # # Copyright NumFOCUS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0.txt # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # ========================================================================= import SimpleITK as sitk import sys import time import os import numpy as np pixel_dtypes = {"int16": np.int16, "float64": np.float64} def writeSlices(series_tag_values, new_img, out_dir, i): image_slice = new_img[:, :, i] # Tags shared by the series. list(map(lambda tag_value: image_slice.SetMetaData(tag_value[0], tag_value[1]), series_tag_values)) # Slice specific tags. # Instance Creation Date image_slice.SetMetaData("0008|0012", time.strftime("%Y%m%d")) # Instance Creation Time image_slice.SetMetaData("0008|0013", time.strftime("%H%M%S")) # Setting the type to CT so that the slice location is preserved and # the thickness is carried over. image_slice.SetMetaData("0008|0060", "CT") # (0020, 0032) image position patient determines the 3D spacing between # slices. # Image Position (Patient) image_slice.SetMetaData("0020|0032", '\\'.join( map(str, new_img.TransformIndexToPhysicalPoint((0, 0, i))))) # Instance Number image_slice.SetMetaData("0020,0013", str(i)) # Write to the output directory and add the extension dcm, to force # writing in DICOM format. writer.SetFileName(os.path.join(out_dir, str(i) + '.dcm')) writer.Execute(image_slice) if len(sys.argv) < 3: print("Usage: python " + __file__ + " <output_directory> [" + ", " .join(pixel_dtypes) + "]") sys.exit(1) # Create a new series from a numpy array try: pixel_dtype = pixel_dtypes[sys.argv[2]] except KeyError: pixel_dtype = pixel_dtypes["int16"] new_arr = np.random.uniform(-10, 10, size=(3, 4, 5)).astype(pixel_dtype) new_img = sitk.GetImageFromArray(new_arr) new_img.SetSpacing([2.5, 3.5, 4.5]) # Write the 3D image as a series # IMPORTANT: There are many DICOM tags that need to be updated when you modify # an original image. This is a delicate opration and requires # knowledge of the DICOM standard. This example only modifies some. # For a more complete list of tags that need to be modified see: # http://gdcm.sourceforge.net/wiki/index.php/Writing_DICOM # If it is critical for your work to generate valid DICOM files, # It is recommended to use David Clunie's Dicom3tools to validate # the files: # http://www.dclunie.com/dicom3tools.html writer = sitk.ImageFileWriter() # Use the study/series/frame of reference information given in the meta-data # dictionary and not the automatically generated information from the file IO writer.KeepOriginalImageUIDOn() modification_time = time.strftime("%H%M%S") modification_date = time.strftime("%Y%m%d") # Copy some of the tags and add the relevant tags indicating the change. # For the series instance UID (0020|000e), each of the components is a number, # cannot start with zero, and separated by a '.' We create a unique series ID # using the date and time. Tags of interest: direction = new_img.GetDirection() series_tag_values = [ ("0008|0031", modification_time), # Series Time ("0008|0021", modification_date), # Series Date ("0008|0008", "DERIVED\\SECONDARY"), # Image Type ("0020|000e", "1.2.826.0.1.3680043.2.1125." + modification_date + ".1" + modification_time), # Series Instance UID ("0020|0037", '\\'.join(map(str, (direction[0], direction[3], direction[6], direction[1], direction[4], direction[7])))), # Image Orientation # (Patient) ("0008|103e", "Created-SimpleITK") # Series Description ] if pixel_dtype == np.float64: # If we want to write floating point values, we need to use the rescale # slope, "0028|1053", to select the number of digits we want to keep. We # also need to specify additional pixel storage and representation # information. rescale_slope = 0.001 # keep three digits after the decimal point series_tag_values = series_tag_values + [ ('0028|1053', str(rescale_slope)), # rescale slope ('0028|1052', '0'), # rescale intercept ('0028|0100', '16'), # bits allocated ('0028|0101', '16'), # bits stored ('0028|0102', '15'), # high bit ('0028|0103', '1')] # pixel representation # Write slices to output directory list(map(lambda i: writeSlices(series_tag_values, new_img, sys.argv[1], i), range(new_img.GetDepth()))) # Re-read the series # Read the original series. First obtain the series file names using the # image series reader. data_directory = sys.argv[1] series_IDs = sitk.ImageSeriesReader.GetGDCMSeriesIDs(data_directory) if not series_IDs: print("ERROR: given directory \"" + data_directory + "\" does not contain a DICOM series.") sys.exit(1) series_file_names = sitk.ImageSeriesReader.GetGDCMSeriesFileNames( data_directory, series_IDs[0]) series_reader = sitk.ImageSeriesReader() series_reader.SetFileNames(series_file_names) # Configure the reader to load all of the DICOM tags (public+private): # By default tags are not loaded (saves time). # By default if tags are loaded, the private tags are not loaded. # We explicitly configure the reader to load tags, including the # private ones. series_reader.LoadPrivateTagsOn() image3D = series_reader.Execute() print(image3D.GetSpacing(), 'vs', new_img.GetSpacing()) sys.exit(0)
2,309
575
<filename>chrome/browser/ui/webui/read_later/read_later_ui.h // Copyright 2020 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CHROME_BROWSER_UI_WEBUI_READ_LATER_READ_LATER_UI_H_ #define CHROME_BROWSER_UI_WEBUI_READ_LATER_READ_LATER_UI_H_ #include <memory> #include "base/macros.h" #include "chrome/browser/ui/webui/read_later/read_later.mojom.h" #include "chrome/browser/ui/webui/webui_load_timer.h" #include "mojo/public/cpp/bindings/pending_receiver.h" #include "mojo/public/cpp/bindings/pending_remote.h" #include "mojo/public/cpp/bindings/receiver.h" #include "ui/webui/mojo_bubble_web_ui_controller.h" class ReadLaterPageHandler; class ReadLaterUI : public ui::MojoBubbleWebUIController, public read_later::mojom::PageHandlerFactory { public: explicit ReadLaterUI(content::WebUI* web_ui); ReadLaterUI(const ReadLaterUI&) = delete; ReadLaterUI& operator=(const ReadLaterUI&) = delete; ~ReadLaterUI() override; // Instantiates the implementor of the mojom::PageHandlerFactory mojo // interface passing the pending receiver that will be internally bound. void BindInterface( mojo::PendingReceiver<read_later::mojom::PageHandlerFactory> receiver); private: // read_later::mojom::PageHandlerFactory: void CreatePageHandler( mojo::PendingRemote<read_later::mojom::Page> page, mojo::PendingReceiver<read_later::mojom::PageHandler> receiver) override; std::unique_ptr<ReadLaterPageHandler> page_handler_; mojo::Receiver<read_later::mojom::PageHandlerFactory> page_factory_receiver_{ this}; WebuiLoadTimer webui_load_timer_; WEB_UI_CONTROLLER_TYPE_DECL(); }; #endif // CHROME_BROWSER_UI_WEBUI_READ_LATER_READ_LATER_UI_H_
672
575
// Copyright 2020 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CHROME_BROWSER_SPEECH_SPEECH_RECOGNITION_CLIENT_BROWSER_INTERFACE_FACTORY_H_ #define CHROME_BROWSER_SPEECH_SPEECH_RECOGNITION_CLIENT_BROWSER_INTERFACE_FACTORY_H_ #include "components/keyed_service/content/browser_context_keyed_service_factory.h" class Profile; namespace base { template <class T> class NoDestructor; } // namespace base namespace speech { class SpeechRecognitionClientBrowserInterface; } // namespace speech // Factory to get or create an instance of // SpeechRecognitionClientBrowserInterface from a Profile. class SpeechRecognitionClientBrowserInterfaceFactory : public BrowserContextKeyedServiceFactory { public: static speech::SpeechRecognitionClientBrowserInterface* GetForProfile( Profile* profile); private: friend class base::NoDestructor< SpeechRecognitionClientBrowserInterfaceFactory>; static SpeechRecognitionClientBrowserInterfaceFactory* GetInstance(); SpeechRecognitionClientBrowserInterfaceFactory(); ~SpeechRecognitionClientBrowserInterfaceFactory() override; // BrowserContextKeyedServiceFactory: KeyedService* BuildServiceInstanceFor( content::BrowserContext* context) const override; content::BrowserContext* GetBrowserContextToUse( content::BrowserContext* context) const override; }; #endif // CHROME_BROWSER_SPEECH_SPEECH_RECOGNITION_CLIENT_BROWSER_INTERFACE_FACTORY_H_
462
335
{ "word": "Token", "definitions": [ "A thing serving as a visible or tangible representation of a fact, quality, feeling, etc.", "A badge or favour worn to indicate allegiance to a particular person or party.", "A word or object conferring authority on or serving to authenticate the speaker or holder.", "A staff or other object given to a train driver on a single-track railway as authority to proceed over a given section of line.", "A voucher that can be exchanged for goods or services, typically one given as a gift or forming part of a promotional offer.", "A metal or plastic disc used to operate a machine or in exchange for particular goods or services.", "An individual occurrence of a linguistic unit in speech or writing.", "The smallest meaningful unit of information in a sequence of data for a compiler.", "A sequence of bits passed continuously between nodes in a fixed order and enabling a node to transmit information.", "A member of a minority group included in an otherwise homogeneous set of people in order to give the appearance of diversity." ], "parts-of-speech": "Noun" }
326
6,089
{ "name": "@react-icons/all-files", "version": "4.3.1", "description": "SVG React icons of popular icon packs using ES6 imports", "author": "<NAME>", "contributors": [ "kamijin_fanta <<EMAIL>>" ], "license": "MIT", "main": "lib", "types": "./lib/esm/index.d.ts", "sideEffects": false, "repository": { "type": "git", "url": "git+ssh://[email protected]:react-icons/react-icons.git" }, "bugs": { "url": "https://github.com/react-icons/react-icons/issues" }, "homepage": "https://github.com/react-icons/react-icons#readme", "peerDependencies": { "react": "*" } }
255
488
#ifndef FIXUP_FORTRAN_ARRAY_VS_FUNCTION_REFERENCES_H #define FIXUP_FORTRAN_ARRAY_VS_FUNCTION_REFERENCES_H // DQ (11/24/2007): /*! \brief Fixup Fortran array vs. function references. Fortran references to arrys and functions can not be easily disambiguated until after the whole program has been seen. Part of this is because function may not be defined until after they are called. To support this analysis, we build a special IR node (SgUnknownArrayOrFunctionReference) when a function call can not be resolved to a function symbol in the symbol table. There may in the future be more complex senarios where we build the SgUnknownArrayOrFunctionReference IR nodes and defer the evaluation of a reference (as an array or a function) until post-processing. \implementation This analysis is required only for Fortran support. */ void fixupFortranReferences ( SgNode* node ); class FixupFortranReferences : public AstSimpleProcessing { public: void visit ( SgNode* node ); }; // endif for FIXUP_FORTRAN_ARRAY_VS_FUNCTION_REFERENCES_H #endif
350
1,467
/* * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ package software.amazon.awssdk.enhanced.dynamodb.converters; import java.time.Instant; import software.amazon.awssdk.annotations.Immutable; import software.amazon.awssdk.annotations.ThreadSafe; import software.amazon.awssdk.enhanced.dynamodb.AttributeConverter; import software.amazon.awssdk.enhanced.dynamodb.AttributeValueType; import software.amazon.awssdk.enhanced.dynamodb.EnhancedType; import software.amazon.awssdk.services.dynamodb.model.AttributeValue; /** * This class updated the LastUpdatedTimeStamp by as offset before storing to DDB. */ @ThreadSafe @Immutable public final class EpochMillisFormatTestConverter implements AttributeConverter<Instant> { public EpochMillisFormatTestConverter() { } public static EpochMillisFormatTestConverter create() { return new EpochMillisFormatTestConverter(); } @Override public EnhancedType<Instant> type() { return EnhancedType.of(Instant.class); } @Override public AttributeValueType attributeValueType() { return AttributeValueType.S; } @Override public AttributeValue transformFrom(Instant input) { return AttributeValue.builder().n(String.valueOf(input.toEpochMilli())).build(); } @Override public Instant transformTo(AttributeValue input) { return Instant.ofEpochMilli(Long.parseLong(input.n())); } }
617
4,601
<filename>examples/basic/ios/VideoPlayer-Bridging-Header.h // // VideoPlayer-Bridging-Header.h // VideoPlayer // // Created by <NAME> on 2/1/22. // #ifndef VideoPlayer_Bridging_Header_h #define VideoPlayer_Bridging_Header_h #endif /* VideoPlayer_Bridging_Header_h */
105
316
import numpy as np import glob import re import csv from ssd_data import BaseGTUtility from thirdparty.get_image_size import get_image_size class GTUtility(BaseGTUtility): """Utility for RoboTT-Net dataset. # Arguments data_path: Path to ground truth and image data. """ def __init__(self, data_path): self.data_path = data_path self.image_path = data_path self.gt_path = data_path self.classes = ['Background', 'Lot 0', 'Lot 90', 'Lot 180', 'Lot -90'] self.classes_lower = [s.lower() for s in self.classes] self.num_classes = len(self.classes) self.image_names = [] self.data = [] for filename in sorted(glob.glob(data_path+'**/*_location.csv', recursive=True)): image_path = re.sub(r'_location.csv$', '.jpg', filename) image_name = image_path[len(data_path):] boxes = [] with open(filename, newline='') as f: reader = csv.reader(f, delimiter=';') header = next(reader) box = next(reader, None) if box: class_idx = ['0', '90', '180', '-90'].index(box[0]) + 1 image_size = get_image_size(image_path) img_width, img_height = image_size xmin = float(box[2]) / img_width ymin = float(box[1]) / img_height xmax = float(box[4]) / img_width ymax = float(box[3]) / img_height box = [xmin, ymin, xmax, ymax, class_idx] boxes.append(box) # only images with boxes if len(boxes) == 0: continue boxes = np.empty((0,4+self.num_classes)) else: boxes = np.asarray(boxes) self.image_names.append(image_name) self.data.append(boxes)
1,092
854
<reponame>rakhi2001/ecom7<filename>Java/1191.java<gh_stars>100-1000 __________________________________________________________________________________________________ sample 4 ms submission class Solution { public int kConcatenationMaxSum(int[] arr, int k) { long l=0,r=0,sum=0,total=0,tmp=0; for(int i=0;i<arr.length;i++){ tmp+=arr[i]; r=Math.max(tmp,r); } tmp=0; for(int i=arr.length-1;i>=0;i--){ tmp+=arr[i]; l=Math.max(tmp,l); } tmp=0; for(int i=0;i<arr.length;i++){ tmp+=arr[i]; if(tmp<0) tmp=0; sum=Math.max(sum,tmp); } for(int i=0;i<arr.length;i++){ total+=arr[i]; } int mod=1000000007; if(k==1) return (int)sum; else if(k==2) return (int)(Math.max(sum,l+r)%mod); else{ if(total>0) return (int)((l+r+(k-2)*total)%mod); else return (int)(Math.max(sum,l+r)%mod); } } } __________________________________________________________________________________________________ sample 5 ms submission class Solution { public int kConcatenationMaxSum(int[] arr, int k) { long sum = 0l; for (int i : arr) sum += i; if (k == 1) return getMaxSubarr(arr); if (sum <= 0) { int[] doubleArr = doubleArray(arr); return getMaxSubarr(doubleArr); } else { int maxFromStart = 0, maxFromEnd = 0, accu = 0; for (int i = 0; i < arr.length; i++) { accu += arr[i]; maxFromStart = Math.max(maxFromStart, accu); } accu = 0; for (int i = arr.length - 1; i >= 0; i--) { accu += arr[i]; maxFromEnd = Math.max(maxFromEnd, accu); } long res = sum * (k - 2) + maxFromStart + maxFromEnd; return (int)(res % 1000000007); } } private int[] doubleArray(int[] arr) { int[] b = new int[arr.length * 2]; System.arraycopy(arr, 0, b, 0, arr.length); System.arraycopy(arr, 0, b, arr.length, arr.length); return b; } private int getMaxSubarr(int[] arr) { int max = 0, accu = 0; for (int i : arr) { accu = Math.max(i, i + accu); max = Math.max(max, accu); } return max; } } __________________________________________________________________________________________________
1,373
937
package cyclops.function; import java.util.function.Consumer; import java.util.function.Function; /** * A FunctionalInterface for side-effecting statements that accept 3 inputs (with no result). * The three-arity specialization of {@link Consumer}. * * @author johnmcclean * * @param <S1> Type of first input parameter * @param <S2> Type of second input parameter * @param <S3> Type of third input parameter */ @FunctionalInterface public interface Consumer3<S1, S2, S3> { /** * Performs operation with input parameters * * @param a the first input parameter * @param b the second input parameter * @param c the third input parameter */ void accept(S1 a, S2 b, S3 c); /** * Partially applyHKT the first input parameter to this C3 * * @param s the first input parameter * @return A curried function that returns a Consumer */ default Function<S2, Consumer<S3>> apply(final S1 s) { return CurryConsumer.curryC3(this) .apply(s); } /** * Partially applyHKT the first and second input parameter to this C3 * * @param s the first input parameter * @param s2 the second input parameter * @return A Consumer that accepts the third parameter */ default Consumer<S3> apply(final S1 s, final S2 s2) { return CurryConsumer.curryC3(this) .apply(s) .apply(s2); } }
573
716
<gh_stars>100-1000 // Copyright (c) 2022 The Orbit Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef ORBIT_GL_OPEN_GL_TEXT_RENDERER_H_ #define ORBIT_GL_OPEN_GL_TEXT_RENDERER_H_ #include <GteVector.h> #include <freetype-gl/mat4.h> #include <freetype-gl/texture-atlas.h> #include <freetype-gl/texture-font.h> #include <freetype-gl/vec234.h> #include <glad/glad.h> #include <stddef.h> #include <stdint.h> #include <map> #include <unordered_map> #include <vector> #include "CoreMath.h" #include "PickingManager.h" #include "PrimitiveAssembler.h" #include "TextRenderer.h" namespace ftgl { struct vertex_buffer_t; struct texture_font_t; } // namespace ftgl namespace orbit_gl { // OpenGl implementation of the TextRenderer. class OpenGlTextRenderer : public TextRenderer { public: explicit OpenGlTextRenderer(); ~OpenGlTextRenderer(); void Init() override; void Clear() override; void RenderLayer(float layer) override; void RenderDebug(PrimitiveAssembler* primitive_assembler) override; [[nodiscard]] std::vector<float> GetLayers() const override; void AddText(const char* text, float x, float y, float z, TextFormatting formatting) override; void AddText(const char* text, float x, float y, float z, TextFormatting formatting, Vec2* out_text_pos, Vec2* out_text_size) override; float AddTextTrailingCharsPrioritized(const char* text, float x, float y, float z, TextFormatting formatting, size_t trailing_chars_length) override; [[nodiscard]] float GetStringWidth(const char* text, uint32_t font_size) override; [[nodiscard]] float GetStringHeight(const char* text, uint32_t font_size) override; protected: void AddTextInternal(const char* text, ftgl::vec2* pen, const TextFormatting& formatting, float z, ftgl::vec2* out_text_pos = nullptr, ftgl::vec2* out_text_size = nullptr); [[nodiscard]] int GetStringWidthScreenSpace(const char* text, uint32_t font_size); [[nodiscard]] int GetStringHeightScreenSpace(const char* text, uint32_t font_size); [[nodiscard]] ftgl::texture_font_t* GetFont(uint32_t size); [[nodiscard]] ftgl::texture_glyph_t* MaybeLoadAndGetGlyph(ftgl::texture_font_t* self, const char* character); void DrawOutline(PrimitiveAssembler* primitive_assembler, ftgl::vertex_buffer_t* buffer); private: ftgl::texture_atlas_t* texture_atlas_; // Indicates when a change to the texture atlas occurred so that we have to reupload the // texture data. Only freetype-gl's texture_font_load_glyph modifies the texture atlas, // so we need to set this to true when and only when we call that function. bool texture_atlas_changed_; std::unordered_map<float, ftgl::vertex_buffer_t*> vertex_buffers_by_layer_; std::map<uint32_t, ftgl::texture_font_t*> fonts_by_size_; GLuint shader_; ftgl::mat4 model_; ftgl::mat4 view_; ftgl::mat4 projection_; ftgl::vec2 pen_; bool initialized_; }; } // namespace orbit_gl #endif // ORBIT_GL_OPEN_GL_TEXT_RENDERER_H_
1,240
2,151
// Copyright 2016 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chrome/browser/policy/default_geolocation_policy_handler.h" #include "base/memory/ptr_util.h" #include "base/values.h" #include "components/arc/arc_prefs.h" #include "components/content_settings/core/common/content_settings.h" #include "components/policy/core/browser/configuration_policy_pref_store.h" #include "components/policy/core/browser/configuration_policy_pref_store_test.h" #include "components/policy/core/common/policy_map.h" #include "components/policy/core/common/policy_types.h" #include "components/policy/policy_constants.h" #include "testing/gtest/include/gtest/gtest.h" namespace policy { class DefaultGeolocationPolicyHandlerTest : public ConfigurationPolicyPrefStoreTest { void SetUp() override { handler_list_.AddHandler(base::WrapUnique<ConfigurationPolicyHandler>( new DefaultGeolocationPolicyHandler)); } }; TEST_F(DefaultGeolocationPolicyHandlerTest, AllowGeolocation) { // DefaultGeolocationSetting of CONTENT_SETTING_ALLOW (AllowGeolocation) // should not translate to the ArcLocationServiceEnabled preference. EXPECT_FALSE( store_->GetValue(arc::prefs::kArcLocationServiceEnabled, nullptr)); PolicyMap policy; policy.Set(key::kDefaultGeolocationSetting, POLICY_LEVEL_MANDATORY, POLICY_SCOPE_USER, POLICY_SOURCE_CLOUD, base::WrapUnique(new base::Value(CONTENT_SETTING_ALLOW)), nullptr); UpdateProviderPolicy(policy); EXPECT_FALSE( store_->GetValue(arc::prefs::kArcLocationServiceEnabled, nullptr)); } TEST_F(DefaultGeolocationPolicyHandlerTest, BlockGeolocation) { // DefaultGeolocationSetting of CONTENT_SETTING_BLOCK (BlockGeolocation) // should set the ArcLocationServiceEnabled preference to false. EXPECT_FALSE( store_->GetValue(arc::prefs::kArcLocationServiceEnabled, nullptr)); PolicyMap policy; policy.Set(key::kDefaultGeolocationSetting, POLICY_LEVEL_MANDATORY, POLICY_SCOPE_USER, POLICY_SOURCE_CLOUD, base::WrapUnique(new base::Value(CONTENT_SETTING_BLOCK)), nullptr); UpdateProviderPolicy(policy); const base::Value* value = nullptr; EXPECT_TRUE(store_->GetValue(arc::prefs::kArcLocationServiceEnabled, &value)); EXPECT_TRUE(base::Value(false).Equals(value)); } TEST_F(DefaultGeolocationPolicyHandlerTest, AskGeolocation) { // DefaultGeolocationSetting of CONTENT_SETTING_ASK (AskGeolocation) should // not translate to the ArcLocationServiceEnabled preference. EXPECT_FALSE( store_->GetValue(arc::prefs::kArcLocationServiceEnabled, nullptr)); PolicyMap policy; policy.Set(key::kDefaultGeolocationSetting, POLICY_LEVEL_MANDATORY, POLICY_SCOPE_USER, POLICY_SOURCE_CLOUD, base::WrapUnique(new base::Value(CONTENT_SETTING_ASK)), nullptr); UpdateProviderPolicy(policy); EXPECT_FALSE( store_->GetValue(arc::prefs::kArcLocationServiceEnabled, nullptr)); } } // namespace policy
1,072
956
<gh_stars>100-1000 /* SPDX-License-Identifier: BSD-3-Clause * Copyright 2015 6WIND S.A. * Copyright 2015 Mellanox Technologies, Ltd */ #ifndef RTE_PMD_MLX5_UTILS_H_ #define RTE_PMD_MLX5_UTILS_H_ #include <stddef.h> #include <stdint.h> #include <stdio.h> #include <limits.h> #include <errno.h> #include <rte_spinlock.h> #include <rte_rwlock.h> #include <rte_memory.h> #include <rte_bitmap.h> #include <mlx5_common.h> #include "mlx5_defs.h" /* Convert a bit number to the corresponding 64-bit mask */ #define MLX5_BITSHIFT(v) (UINT64_C(1) << (v)) /* Save and restore errno around argument evaluation. */ #define ERRNO_SAFE(x) ((errno = (int []){ errno, ((x), 0) }[0])) extern int mlx5_logtype; /* Generic printf()-like logging macro with automatic line feed. */ #define DRV_LOG(level, ...) \ PMD_DRV_LOG_(level, mlx5_logtype, MLX5_DRIVER_NAME, \ __VA_ARGS__ PMD_DRV_LOG_STRIP PMD_DRV_LOG_OPAREN, \ PMD_DRV_LOG_CPAREN) /* Convenience macros for accessing mbuf fields. */ #define NEXT(m) ((m)->next) #define DATA_LEN(m) ((m)->data_len) #define PKT_LEN(m) ((m)->pkt_len) #define DATA_OFF(m) ((m)->data_off) #define SET_DATA_OFF(m, o) ((m)->data_off = (o)) #define NB_SEGS(m) ((m)->nb_segs) #define PORT(m) ((m)->port) /* Transpose flags. Useful to convert IBV to DPDK flags. */ #define TRANSPOSE(val, from, to) \ (((from) >= (to)) ? \ (((val) & (from)) / ((from) / (to))) : \ (((val) & (from)) * ((to) / (from)))) /* * For the case which data is linked with sequence increased index, the * array table will be more efficiect than hash table once need to serarch * one data entry in large numbers of entries. Since the traditional hash * tables has fixed table size, when huge numbers of data saved to the hash * table, it also comes lots of hash conflict. * * But simple array table also has fixed size, allocates all the needed * memory at once will waste lots of memory. For the case don't know the * exactly number of entries will be impossible to allocate the array. * * Then the multiple level table helps to balance the two disadvantages. * Allocate a global high level table with sub table entries at first, * the global table contains the sub table entries, and the sub table will * be allocated only once the corresponding index entry need to be saved. * e.g. for up to 32-bits index, three level table with 10-10-12 splitting, * with sequence increased index, the memory grows with every 4K entries. * * The currently implementation introduces 10-10-12 32-bits splitting * Three-Level table to help the cases which have millions of enties to * save. The index entries can be addressed directly by the index, no * search will be needed.q */ /* L3 table global table define. */ #define MLX5_L3T_GT_OFFSET 22 #define MLX5_L3T_GT_SIZE (1 << 10) #define MLX5_L3T_GT_MASK (MLX5_L3T_GT_SIZE - 1) /* L3 table middle table define. */ #define MLX5_L3T_MT_OFFSET 12 #define MLX5_L3T_MT_SIZE (1 << 10) #define MLX5_L3T_MT_MASK (MLX5_L3T_MT_SIZE - 1) /* L3 table entry table define. */ #define MLX5_L3T_ET_OFFSET 0 #define MLX5_L3T_ET_SIZE (1 << 12) #define MLX5_L3T_ET_MASK (MLX5_L3T_ET_SIZE - 1) /* L3 table type. */ enum mlx5_l3t_type { MLX5_L3T_TYPE_WORD = 0, MLX5_L3T_TYPE_DWORD, MLX5_L3T_TYPE_QWORD, MLX5_L3T_TYPE_PTR, MLX5_L3T_TYPE_MAX, }; struct mlx5_indexed_pool; /* Generic data struct. */ union mlx5_l3t_data { uint16_t word; uint32_t dword; uint64_t qword; void *ptr; }; /* L3 level table data structure. */ struct mlx5_l3t_level_tbl { uint64_t ref_cnt; /* Table ref_cnt. */ void *tbl[]; /* Table array. */ }; /* L3 word entry table data structure. */ struct mlx5_l3t_entry_word { uint32_t idx; /* Table index. */ uint64_t ref_cnt; /* Table ref_cnt. */ struct { uint16_t data; uint32_t ref_cnt; } entry[MLX5_L3T_ET_SIZE]; /* Entry array */ } __rte_packed; /* L3 double word entry table data structure. */ struct mlx5_l3t_entry_dword { uint32_t idx; /* Table index. */ uint64_t ref_cnt; /* Table ref_cnt. */ struct { uint32_t data; int32_t ref_cnt; } entry[MLX5_L3T_ET_SIZE]; /* Entry array */ } __rte_packed; /* L3 quad word entry table data structure. */ struct mlx5_l3t_entry_qword { uint32_t idx; /* Table index. */ uint64_t ref_cnt; /* Table ref_cnt. */ struct { uint64_t data; uint32_t ref_cnt; } entry[MLX5_L3T_ET_SIZE]; /* Entry array */ } __rte_packed; /* L3 pointer entry table data structure. */ struct mlx5_l3t_entry_ptr { uint32_t idx; /* Table index. */ uint64_t ref_cnt; /* Table ref_cnt. */ struct { void *data; uint32_t ref_cnt; } entry[MLX5_L3T_ET_SIZE]; /* Entry array */ } __rte_packed; /* L3 table data structure. */ struct mlx5_l3t_tbl { enum mlx5_l3t_type type; /* Table type. */ struct mlx5_indexed_pool *eip; /* Table index pool handles. */ struct mlx5_l3t_level_tbl *tbl; /* Global table index. */ rte_spinlock_t sl; /* The table lock. */ }; /** Type of function that is used to handle the data before freeing. */ typedef int32_t (*mlx5_l3t_alloc_callback_fn)(void *ctx, union mlx5_l3t_data *data); /* * The indexed memory entry index is made up of trunk index and offset of * the entry in the trunk. Since the entry index is 32 bits, in case user * prefers to have small trunks, user can change the macro below to a big * number which helps the pool contains more trunks with lots of entries * allocated. */ #define TRUNK_IDX_BITS 16 #define TRUNK_MAX_IDX ((1 << TRUNK_IDX_BITS) - 1) #define TRUNK_INVALID TRUNK_MAX_IDX #define MLX5_IPOOL_DEFAULT_TRUNK_SIZE (1 << (28 - TRUNK_IDX_BITS)) #ifdef RTE_LIBRTE_MLX5_DEBUG #define POOL_DEBUG 1 #endif struct mlx5_indexed_pool_config { uint32_t size; /* Pool entry size. */ uint32_t trunk_size:22; /* * Trunk entry number. Must be power of 2. It can be increased * if trunk_grow enable. The trunk entry number increases with * left shift grow_shift. Trunks with index are after grow_trunk * will keep the entry number same with the last grow trunk. */ uint32_t grow_trunk:4; /* * Trunks with entry number increase in the pool. Set it to 0 * to make the pool works as trunk entry fixed pool. It works * only if grow_shift is not 0. */ uint32_t grow_shift:4; /* * Trunk entry number increase shift value, stop after grow_trunk. * It works only if grow_trunk is not 0. */ uint32_t need_lock:1; /* Lock is needed for multiple thread usage. */ uint32_t release_mem_en:1; /* Rlease trunk when it is free. */ const char *type; /* Memory allocate type name. */ void *(*malloc)(uint32_t flags, size_t size, unsigned int align, int socket); /* User defined memory allocator. */ void (*free)(void *addr); /* User defined memory release. */ }; struct mlx5_indexed_trunk { uint32_t idx; /* Trunk id. */ uint32_t prev; /* Previous free trunk in free list. */ uint32_t next; /* Next free trunk in free list. */ uint32_t free; /* Free entries available */ struct rte_bitmap *bmp; uint8_t data[] __rte_cache_aligned; /* Entry data start. */ }; struct mlx5_indexed_pool { struct mlx5_indexed_pool_config cfg; /* Indexed pool configuration. */ rte_spinlock_t lock; /* Pool lock for multiple thread usage. */ uint32_t n_trunk_valid; /* Trunks allocated. */ uint32_t n_trunk; /* Trunk pointer array size. */ /* Dim of trunk pointer array. */ struct mlx5_indexed_trunk **trunks; uint32_t free_list; /* Index to first free trunk. */ #ifdef POOL_DEBUG uint32_t n_entry; uint32_t trunk_new; uint32_t trunk_avail; uint32_t trunk_empty; uint32_t trunk_free; #endif uint32_t grow_tbl[]; /* Save the index offset for the grow trunks. */ }; /** * Return logarithm of the nearest power of two above input value. * * @param v * Input value. * * @return * Logarithm of the nearest power of two above input value. */ static inline unsigned int log2above(unsigned int v) { unsigned int l; unsigned int r; for (l = 0, r = 0; (v >> 1); ++l, v >>= 1) r |= (v & 1); return l + r; } #define MLX5_HLIST_DIRECT_KEY 0x0001 /* Use the key directly as hash index. */ #define MLX5_HLIST_WRITE_MOST 0x0002 /* List mostly used for append new. */ /** Maximum size of string for naming the hlist table. */ #define MLX5_HLIST_NAMESIZE 32 struct mlx5_hlist; /** * Structure of the entry in the hash list, user should define its own struct * that contains this in order to store the data. The 'key' is 64-bits right * now and its user's responsibility to guarantee there is no collision. */ struct mlx5_hlist_entry { LIST_ENTRY(mlx5_hlist_entry) next; /* entry pointers in the list. */ uint32_t idx; /* Bucket index the entry belongs to. */ uint32_t ref_cnt; /* Reference count. */ }; /** Structure for hash head. */ LIST_HEAD(mlx5_hlist_head, mlx5_hlist_entry); /** * Type of callback function for entry removal. * * @param list * The hash list. * @param entry * The entry in the list. */ typedef void (*mlx5_hlist_remove_cb)(struct mlx5_hlist *list, struct mlx5_hlist_entry *entry); /** * Type of function for user defined matching. * * @param list * The hash list. * @param entry * The entry in the list. * @param key * The new entry key. * @param ctx * The pointer to new entry context. * * @return * 0 if matching, non-zero number otherwise. */ typedef int (*mlx5_hlist_match_cb)(struct mlx5_hlist *list, struct mlx5_hlist_entry *entry, uint64_t key, void *ctx); /** * Type of function for user defined hash list entry creation. * * @param list * The hash list. * @param key * The key of the new entry. * @param ctx * The pointer to new entry context. * * @return * Pointer to allocated entry on success, NULL otherwise. */ typedef struct mlx5_hlist_entry *(*mlx5_hlist_create_cb) (struct mlx5_hlist *list, uint64_t key, void *ctx); /* Hash list bucket head. */ struct mlx5_hlist_bucket { struct mlx5_hlist_head head; /* List head. */ rte_rwlock_t lock; /* Bucket lock. */ uint32_t gen_cnt; /* List modification will update generation count. */ } __rte_cache_aligned; /** * Hash list table structure * * Entry in hash list could be reused if entry already exists, reference * count will increase and the existing entry returns. * * When destroy an entry from list, decrease reference count and only * destroy when no further reference. */ struct mlx5_hlist { char name[MLX5_HLIST_NAMESIZE]; /**< Name of the hash list. */ /**< number of heads, need to be power of 2. */ uint32_t table_sz; uint32_t entry_sz; /**< Size of entry, used to allocate entry. */ /**< mask to get the index of the list heads. */ uint32_t mask; bool direct_key; /* Use the new entry key directly as hash index. */ bool write_most; /* List mostly used for append new or destroy. */ void *ctx; mlx5_hlist_create_cb cb_create; /**< entry create callback. */ mlx5_hlist_match_cb cb_match; /**< entry match callback. */ mlx5_hlist_remove_cb cb_remove; /**< entry remove callback. */ struct mlx5_hlist_bucket buckets[] __rte_cache_aligned; /**< list bucket arrays. */ }; /** * Create a hash list table, the user can specify the list heads array size * of the table, now the size should be a power of 2 in order to get better * distribution for the entries. Each entry is a part of the whole data element * and the caller should be responsible for the data element's allocation and * cleanup / free. Key of each entry will be calculated with CRC in order to * generate a little fairer distribution. * * @param name * Name of the hash list(optional). * @param size * Heads array size of the hash list. * @param entry_size * Entry size to allocate if cb_create not specified. * @param flags * The hash list attribute flags. * @param cb_create * Callback function for entry create. * @param cb_match * Callback function for entry match. * @param cb_destroy * Callback function for entry destroy. * @return * Pointer of the hash list table created, NULL on failure. */ struct mlx5_hlist *mlx5_hlist_create(const char *name, uint32_t size, uint32_t entry_size, uint32_t flags, mlx5_hlist_create_cb cb_create, mlx5_hlist_match_cb cb_match, mlx5_hlist_remove_cb cb_destroy); /** * Search an entry matching the key. * * Result returned might be destroyed by other thread, must use * this function only in main thread. * * @param h * Pointer to the hast list table. * @param key * Key for the searching entry. * @param ctx * Common context parameter used by entry callback function. * * @return * Pointer of the hlist entry if found, NULL otherwise. */ struct mlx5_hlist_entry *mlx5_hlist_lookup(struct mlx5_hlist *h, uint64_t key, void *ctx); /** * Insert an entry to the hash list table, the entry is only part of whole data * element and a 64B key is used for matching. User should construct the key or * give a calculated hash signature and guarantee there is no collision. * * @param h * Pointer to the hast list table. * @param entry * Entry to be inserted into the hash list table. * @param ctx * Common context parameter used by callback function. * * @return * registered entry on success, NULL otherwise */ struct mlx5_hlist_entry *mlx5_hlist_register(struct mlx5_hlist *h, uint64_t key, void *ctx); /** * Remove an entry from the hash list table. User should guarantee the validity * of the entry. * * @param h * Pointer to the hast list table. (not used) * @param entry * Entry to be removed from the hash list table. * @return * 0 on entry removed, 1 on entry still referenced. */ int mlx5_hlist_unregister(struct mlx5_hlist *h, struct mlx5_hlist_entry *entry); /** * Destroy the hash list table, all the entries already inserted into the lists * will be handled by the callback function provided by the user (including * free if needed) before the table is freed. * * @param h * Pointer to the hast list table. */ void mlx5_hlist_destroy(struct mlx5_hlist *h); /************************ cache list *****************************/ /** Maximum size of string for naming. */ #define MLX5_NAME_SIZE 32 struct mlx5_cache_list; /** * Structure of the entry in the cache list, user should define its own struct * that contains this in order to store the data. */ struct mlx5_cache_entry { LIST_ENTRY(mlx5_cache_entry) next; /* Entry pointers in the list. */ uint32_t ref_cnt; /* Reference count. */ }; /** * Type of callback function for entry removal. * * @param list * The cache list. * @param entry * The entry in the list. */ typedef void (*mlx5_cache_remove_cb)(struct mlx5_cache_list *list, struct mlx5_cache_entry *entry); /** * Type of function for user defined matching. * * @param list * The cache list. * @param entry * The entry in the list. * @param ctx * The pointer to new entry context. * * @return * 0 if matching, non-zero number otherwise. */ typedef int (*mlx5_cache_match_cb)(struct mlx5_cache_list *list, struct mlx5_cache_entry *entry, void *ctx); /** * Type of function for user defined cache list entry creation. * * @param list * The cache list. * @param entry * The new allocated entry, NULL if list entry size unspecified, * New entry has to be allocated in callback and return. * @param ctx * The pointer to new entry context. * * @return * Pointer of entry on success, NULL otherwise. */ typedef struct mlx5_cache_entry *(*mlx5_cache_create_cb) (struct mlx5_cache_list *list, struct mlx5_cache_entry *entry, void *ctx); /** * Linked cache list structure. * * Entry in cache list could be reused if entry already exists, * reference count will increase and the existing entry returns. * * When destroy an entry from list, decrease reference count and only * destroy when no further reference. * * Linked list cache is designed for limited number of entries cache, * read mostly, less modification. * * For huge amount of entries cache, please consider hash list cache. * */ struct mlx5_cache_list { char name[MLX5_NAME_SIZE]; /**< Name of the cache list. */ uint32_t entry_sz; /**< Entry size, 0: use create callback. */ rte_rwlock_t lock; /* read/write lock. */ uint32_t gen_cnt; /* List modification will update generation count. */ uint32_t count; /* number of entries in list. */ void *ctx; /* user objects target to callback. */ mlx5_cache_create_cb cb_create; /**< entry create callback. */ mlx5_cache_match_cb cb_match; /**< entry match callback. */ mlx5_cache_remove_cb cb_remove; /**< entry remove callback. */ LIST_HEAD(mlx5_cache_head, mlx5_cache_entry) head; }; /** * Initialize a cache list. * * @param list * Pointer to the hast list table. * @param name * Name of the cache list. * @param entry_size * Entry size to allocate, 0 to allocate by creation callback. * @param ctx * Pointer to the list context data. * @param cb_create * Callback function for entry create. * @param cb_match * Callback function for entry match. * @param cb_remove * Callback function for entry remove. * @return * 0 on success, otherwise failure. */ int mlx5_cache_list_init(struct mlx5_cache_list *list, const char *name, uint32_t entry_size, void *ctx, mlx5_cache_create_cb cb_create, mlx5_cache_match_cb cb_match, mlx5_cache_remove_cb cb_remove); /** * Search an entry matching the key. * * Result returned might be destroyed by other thread, must use * this function only in main thread. * * @param list * Pointer to the cache list. * @param ctx * Common context parameter used by entry callback function. * * @return * Pointer of the cache entry if found, NULL otherwise. */ struct mlx5_cache_entry *mlx5_cache_lookup(struct mlx5_cache_list *list, void *ctx); /** * Reuse or create an entry to the cache list. * * @param list * Pointer to the hast list table. * @param ctx * Common context parameter used by callback function. * * @return * registered entry on success, NULL otherwise */ struct mlx5_cache_entry *mlx5_cache_register(struct mlx5_cache_list *list, void *ctx); /** * Remove an entry from the cache list. * * User should guarantee the validity of the entry. * * @param list * Pointer to the hast list. * @param entry * Entry to be removed from the cache list table. * @return * 0 on entry removed, 1 on entry still referenced. */ int mlx5_cache_unregister(struct mlx5_cache_list *list, struct mlx5_cache_entry *entry); /** * Destroy the cache list. * * @param list * Pointer to the cache list. */ void mlx5_cache_list_destroy(struct mlx5_cache_list *list); /** * Get entry number from the cache list. * * @param list * Pointer to the hast list. * @return * Cache list entry number. */ uint32_t mlx5_cache_list_get_entry_num(struct mlx5_cache_list *list); /********************************* indexed pool *************************/ /** * This function allocates non-initialized memory entry from pool. * In NUMA systems, the memory entry allocated resides on the same * NUMA socket as the core that calls this function. * * Memory entry is allocated from memory trunk, no alignment. * * @param pool * Pointer to indexed memory entry pool. * No initialization required. * @param[out] idx * Pointer to memory to save allocated index. * Memory index always positive value. * @return * - Pointer to the allocated memory entry. * - NULL on error. Not enough memory, or invalid arguments. */ void *mlx5_ipool_malloc(struct mlx5_indexed_pool *pool, uint32_t *idx); /** * This function allocates zero initialized memory entry from pool. * In NUMA systems, the memory entry allocated resides on the same * NUMA socket as the core that calls this function. * * Memory entry is allocated from memory trunk, no alignment. * * @param pool * Pointer to indexed memory pool. * No initialization required. * @param[out] idx * Pointer to memory to save allocated index. * Memory index always positive value. * @return * - Pointer to the allocated memory entry . * - NULL on error. Not enough memory, or invalid arguments. */ void *mlx5_ipool_zmalloc(struct mlx5_indexed_pool *pool, uint32_t *idx); /** * This function frees indexed memory entry to pool. * Caller has to make sure that the index is allocated from same pool. * * @param pool * Pointer to indexed memory pool. * @param idx * Allocated memory entry index. */ void mlx5_ipool_free(struct mlx5_indexed_pool *pool, uint32_t idx); /** * This function returns pointer of indexed memory entry from index. * Caller has to make sure that the index is valid, and allocated * from same pool. * * @param pool * Pointer to indexed memory pool. * @param idx * Allocated memory index. * @return * - Pointer to indexed memory entry. */ void *mlx5_ipool_get(struct mlx5_indexed_pool *pool, uint32_t idx); /** * This function creates indexed memory pool. * Caller has to configure the configuration accordingly. * * @param pool * Pointer to indexed memory pool. * @param cfg * Allocated memory index. */ struct mlx5_indexed_pool * mlx5_ipool_create(struct mlx5_indexed_pool_config *cfg); /** * This function releases all resources of pool. * Caller has to make sure that all indexes and memories allocated * from this pool not referenced anymore. * * @param pool * Pointer to indexed memory pool. * @return * - non-zero value on error. * - 0 on success. */ int mlx5_ipool_destroy(struct mlx5_indexed_pool *pool); /** * This function dumps debug info of pool. * * @param pool * Pointer to indexed memory pool. */ void mlx5_ipool_dump(struct mlx5_indexed_pool *pool); /** * This function allocates new empty Three-level table. * * @param type * The l3t can set as word, double word, quad word or pointer with index. * * @return * - Pointer to the allocated l3t. * - NULL on error. Not enough memory, or invalid arguments. */ struct mlx5_l3t_tbl *mlx5_l3t_create(enum mlx5_l3t_type type); /** * This function destroys Three-level table. * * @param tbl * Pointer to the l3t. */ void mlx5_l3t_destroy(struct mlx5_l3t_tbl *tbl); /** * This function gets the index entry from Three-level table. * * @param tbl * Pointer to the l3t. * @param idx * Index to the entry. * @param data * Pointer to the memory which saves the entry data. * When function call returns 0, data contains the entry data get from * l3t. * When function call returns -1, data is not modified. * * @return * 0 if success, -1 on error. */ int32_t mlx5_l3t_get_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx, union mlx5_l3t_data *data); /** * This function gets the index entry from Three-level table. * * If the index entry is not available, allocate new one by callback * function and fill in the entry. * * @param tbl * Pointer to the l3t. * @param idx * Index to the entry. * @param data * Pointer to the memory which saves the entry data. * When function call returns 0, data contains the entry data get from * l3t. * When function call returns -1, data is not modified. * @param cb * Callback function to allocate new data. * @param ctx * Context for callback function. * * @return * 0 if success, -1 on error. */ int32_t mlx5_l3t_prepare_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx, union mlx5_l3t_data *data, mlx5_l3t_alloc_callback_fn cb, void *ctx); /** * This function decreases and clear index entry if reference * counter is 0 from Three-level table. * * @param tbl * Pointer to the l3t. * @param idx * Index to the entry. * * @return * The remaining reference count, 0 means entry be cleared, -1 on error. */ int32_t mlx5_l3t_clear_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx); /** * This function sets the index entry to Three-level table. * If the entry is already set, the EEXIST errno will be given, and * the set data will be filled to the data. * * @param tbl[in] * Pointer to the l3t. * @param idx[in] * Index to the entry. * @param data[in/out] * Pointer to the memory which contains the entry data save to l3t. * If the entry is already set, the set data will be filled. * * @return * 0 if success, -1 on error. */ int32_t mlx5_l3t_set_entry(struct mlx5_l3t_tbl *tbl, uint32_t idx, union mlx5_l3t_data *data); /* * Macros for linked list based on indexed memory. * Example data structure: * struct Foo { * ILIST_ENTRY(uint16_t) next; * ... * } * */ #define ILIST_ENTRY(type) \ struct { \ type prev; /* Index of previous element. */ \ type next; /* Index of next element. */ \ } #define ILIST_INSERT(pool, head, idx, elem, field) \ do { \ typeof(elem) peer; \ MLX5_ASSERT((elem) && (idx)); \ (elem)->field.next = *(head); \ (elem)->field.prev = 0; \ if (*(head)) { \ (peer) = mlx5_ipool_get(pool, *(head)); \ if (peer) \ (peer)->field.prev = (idx); \ } \ *(head) = (idx); \ } while (0) #define ILIST_REMOVE(pool, head, idx, elem, field) \ do { \ typeof(elem) peer; \ MLX5_ASSERT(elem); \ MLX5_ASSERT(head); \ if ((elem)->field.prev) { \ (peer) = mlx5_ipool_get \ (pool, (elem)->field.prev); \ if (peer) \ (peer)->field.next = (elem)->field.next;\ } \ if ((elem)->field.next) { \ (peer) = mlx5_ipool_get \ (pool, (elem)->field.next); \ if (peer) \ (peer)->field.prev = (elem)->field.prev;\ } \ if (*(head) == (idx)) \ *(head) = (elem)->field.next; \ } while (0) #define ILIST_FOREACH(pool, head, idx, elem, field) \ for ((idx) = (head), (elem) = \ (idx) ? mlx5_ipool_get(pool, (idx)) : NULL; (elem); \ idx = (elem)->field.next, (elem) = \ (idx) ? mlx5_ipool_get(pool, idx) : NULL) /* Single index list. */ #define SILIST_ENTRY(type) \ struct { \ type next; /* Index of next element. */ \ } #define SILIST_INSERT(head, idx, elem, field) \ do { \ MLX5_ASSERT((elem) && (idx)); \ (elem)->field.next = *(head); \ *(head) = (idx); \ } while (0) #define SILIST_FOREACH(pool, head, idx, elem, field) \ for ((idx) = (head), (elem) = \ (idx) ? mlx5_ipool_get(pool, (idx)) : NULL; (elem); \ idx = (elem)->field.next, (elem) = \ (idx) ? mlx5_ipool_get(pool, idx) : NULL) #endif /* RTE_PMD_MLX5_UTILS_H_ */
9,679
5,169
{ "name": "phoneid_iOS", "version": "0.1.8", "summary": "Phone.Id SDK library", "description": "iOS library that provides access to phone.id service.\nPhone.id service allows App developers to use the phone number as a social login, without using nicknames or passwords at all.", "homepage": "https://github.com/phoneid/phoneid_iOS", "license": "Apache License, Version 2.0 (http://www.apache.org/licenses/LICENSE-2.0)", "authors": { "<NAME>": "<EMAIL>" }, "source": { "git": "https://github.com/phoneid/phoneid_iOS.git", "tag": "0.1.8" }, "pod_target_xcconfig": { "ENABLE_TESTABILITY": "YES", "SWIFT_VERSION": "2.3" }, "platforms": { "ios": "8.0" }, "requires_arc": true, "source_files": "Pod/Classes/**/*", "resources": [ "Pod/Assets/Images.xcassets", "Pod/Assets/strings/**" ], "frameworks": [ "UIKit", "CoreTelephony" ], "dependencies": { "libPhoneNumber-iOS": [ "~> 0.8.16" ] } }
406
975
# Copyright (C) 2017 Nippon Telegraph and Telephone Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import import logging import socket from sqlalchemy import Column from sqlalchemy import Boolean from sqlalchemy import Integer from sqlalchemy import String from ryu.lib import ip from ryu.lib.packet import safi as packet_safi from ryu.lib.packet import zebra from . import base from . import interface LOG = logging.getLogger(__name__) class Route(base.Base): """ Route table (like routing table) for Zebra protocol service. ``id``: (Primary Key) ID of this route. ``family``: Address Family, not AFI (Address Family Identifiers). Mostly, "socket.AF_INET" or "socket.AF_INET6". ``safi``: Subsequent Address Family Identifiers. ``destination``: Destination prefix of this route. ``gateway``: Next hop address of this route. The default is "" (empty string). ``ifindex``: Index of interface to forward packets. ``source``: Source IP address of this route, which should be an address assigned to the local interface. ``route_type``: Route Type of this route. This type shows which daemon (or kernel) generated this route. ``is_selected``: Whether this route is selected for "destination". """ __tablename__ = 'route' id = Column(Integer, primary_key=True) family = Column(Integer, default=socket.AF_INET) safi = Column(Integer, default=packet_safi.UNICAST) destination = Column(String, default='0.0.0.0/0') gateway = Column(String, default='') ifindex = Column(Integer, default=0) source = Column(String, default='') route_type = Column(Integer, default=zebra.ZEBRA_ROUTE_KERNEL) is_selected = Column(Boolean, default=False) @base.sql_function def ip_route_show(session, destination, device, **kwargs): """ Returns a selected route record matching the given filtering rules. The arguments are similar to "ip route showdump" command of iproute2. :param session: Session instance connecting to database. :param destination: Destination prefix. :param device: Source device. :param kwargs: Filtering rules to query. :return: Instance of route record or "None" if failed. """ intf = interface.ip_link_show(session, ifname=device) if not intf: LOG.debug('Interface "%s" does not exist', device) return None return session.query(Route).filter_by( destination=destination, ifindex=intf.ifindex, **kwargs).first() @base.sql_function def ip_route_show_all(session, **kwargs): """ Returns a selected route record matching the given filtering rules. The arguments are similar to "ip route showdump" command of iproute2. If "is_selected=True", disables the existing selected route for the given destination. :param session: Session instance connecting to database. :param kwargs: Filtering rules to query. :return: A list of route records. """ return session.query(Route).filter_by(**kwargs).all() @base.sql_function def ip_route_add(session, destination, device=None, gateway='', source='', ifindex=0, route_type=zebra.ZEBRA_ROUTE_KERNEL, is_selected=True): """ Adds a route record into Zebra protocol service database. The arguments are similar to "ip route add" command of iproute2. If "is_selected=True", disables the existing selected route for the given destination. :param session: Session instance connecting to database. :param destination: Destination prefix. :param device: Source device. :param gateway: Gateway IP address. :param source: Source IP address. :param ifindex: Index of source device. :param route_type: Route type of daemon (or kernel). :param is_selected: If select the given route as "in use" or not. :return: Instance of record or "None" if failed. """ if device: intf = interface.ip_link_show(session, ifname=device) if not intf: LOG.debug('Interface "%s" does not exist', device) return None ifindex = ifindex or intf.ifindex route = ip_route_show(session, destination=destination, device=device) if route: LOG.debug( 'Route to "%s" already exists on "%s" device', destination, device) return route dest_addr, dest_prefix_num = destination.split('/') dest_prefix_num = int(dest_prefix_num) if ip.valid_ipv4(dest_addr) and 0 <= dest_prefix_num <= 32: family = socket.AF_INET elif ip.valid_ipv6(dest_addr) and 0 <= dest_prefix_num <= 128: family = socket.AF_INET6 else: LOG.debug('Invalid IP address for "prefix": %s', destination) return None safi = packet_safi.UNICAST if is_selected: old_routes = ip_route_show_all( session, destination=destination, is_selected=True) for old_route in old_routes: if old_route: LOG.debug('Set existing route to unselected: %s', old_route) old_route.is_selected = False new_route = Route( family=family, safi=safi, destination=destination, gateway=gateway, ifindex=ifindex, source=source, route_type=route_type, is_selected=is_selected) session.add(new_route) return new_route @base.sql_function def ip_route_delete(session, destination, **kwargs): """ Deletes route record(s) from Zebra protocol service database. The arguments are similar to "ip route delete" command of iproute2. :param session: Session instance connecting to database. :param destination: Destination prefix. :param kwargs: Filtering rules to query. :return: Records which are deleted. """ routes = ip_route_show_all(session, destination=destination, **kwargs) for route in routes: session.delete(route) return routes
2,290
4,962
<filename>byte-buddy-dep/src/test/java/net/bytebuddy/agent/builder/AgentBuilderDefaultNativeMethodStrategyTest.java package net.bytebuddy.agent.builder; import net.bytebuddy.description.method.MethodDescription; import net.bytebuddy.test.utility.MockitoRule; import org.junit.Before; import org.junit.Rule; import org.junit.Test; import org.junit.rules.TestRule; import org.mockito.Mock; import java.lang.instrument.ClassFileTransformer; import java.lang.instrument.Instrumentation; import static org.hamcrest.CoreMatchers.startsWith; import static org.hamcrest.CoreMatchers.*; import static org.hamcrest.MatcherAssert.assertThat; import static org.mockito.Mockito.*; public class AgentBuilderDefaultNativeMethodStrategyTest { private static final String FOO = "foo", BAR = "bar"; @Rule public TestRule mockitoRule = new MockitoRule(this); @Mock private MethodDescription methodDescription; @Mock private Instrumentation instrumentation; @Mock private ClassFileTransformer classFileTransformer; @Before public void setUp() throws Exception { when(methodDescription.getInternalName()).thenReturn(BAR); } @Test public void testDisabledStrategySuffixesNames() throws Exception { assertThat(AgentBuilder.Default.NativeMethodStrategy.Disabled.INSTANCE.resolve().transform(methodDescription), startsWith(BAR)); assertThat(AgentBuilder.Default.NativeMethodStrategy.Disabled.INSTANCE.resolve().transform(methodDescription), not(BAR)); } @Test public void testDisabledStrategyApply() throws Exception { AgentBuilder.Default.NativeMethodStrategy.Disabled.INSTANCE.apply(instrumentation, classFileTransformer); verifyZeroInteractions(instrumentation); verifyZeroInteractions(classFileTransformer); } @Test(expected = IllegalArgumentException.class) public void testEnabledStrategyMustNotBeEmptyString() throws Exception { AgentBuilder.Default.NativeMethodStrategy.ForPrefix.of(""); } @Test public void testEnabledStrategySuffixesNames() throws Exception { assertThat(new AgentBuilder.Default.NativeMethodStrategy.ForPrefix(FOO).resolve().transform(methodDescription), is(FOO + BAR)); } @Test public void testEnabledStrategyApplySupported() throws Exception { when(instrumentation.isNativeMethodPrefixSupported()).thenReturn(true); new AgentBuilder.Default.NativeMethodStrategy.ForPrefix(FOO).apply(instrumentation, classFileTransformer); verify(instrumentation).isNativeMethodPrefixSupported(); verify(instrumentation).setNativeMethodPrefix(classFileTransformer, FOO); verifyNoMoreInteractions(instrumentation); verifyZeroInteractions(classFileTransformer); } @Test(expected = IllegalArgumentException.class) public void testEnabledStrategyApplyNotSupported() throws Exception { when(instrumentation.isNativeMethodPrefixSupported()).thenReturn(false); new AgentBuilder.Default.NativeMethodStrategy.ForPrefix(FOO).apply(instrumentation, classFileTransformer); } }
1,010
1,175
<gh_stars>1000+ package com.dianping.loader.model; import org.json.JSONArray; import org.json.JSONException; import org.json.JSONObject; import android.os.Parcel; import android.os.Parcelable; public class FileSpec implements Parcelable { /** * Download when required<br> * 需要时再下载 */ public static final int DOWN_NONE = 0; /** * Try to download in background if Wifi or faster network is available<br> * Wifi网络下尝试后台下载 */ public static final int DOWN_WIFI = 1; /** * Try to download in background if 3G or faster network is available<br> * 3G或Wifi网络下尝试后台下载 */ public static final int DOWN_3G = 2; /** * Try to download in background<br> * 任何时候都尝试后台下载 */ public static final int DOWN_ALWAYS = 5; private String id; private String url; private String md5; private int down; private int length; private String[] deps; public FileSpec(String id, String url, String md5, int down, int length, String[] deps) { this.id = id; this.url = url; this.md5 = md5; this.down = down; this.length = length; this.deps = deps; } public FileSpec(JSONObject json) throws JSONException { id = json.getString("id"); url = json.getString("url"); md5 = json.optString("md5"); down = json.optInt("down", 0); length = json.optInt("length", 0); JSONArray arr = json.optJSONArray("deps"); if (arr != null) { deps = new String[arr.length()]; for (int i = 0; i < deps.length; i++) { deps[i] = arr.getString(i); } } } public String id() { return id; } public String url() { return url; } public String md5() { return md5; } public int down() { return down; } public int length() { return length; } public String[] deps() { return deps; } @Override public int hashCode() { return id.hashCode(); } @Override public boolean equals(Object o) { if (o == this) return true; if (!(o instanceof FileSpec)) return false; return id.equals(((FileSpec) o).id); } @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append(id); if (deps != null && deps.length > 0) { sb.append(':'); sb.append(deps[0]); for (int i = 1; i < deps.length; i++) { sb.append(',').append(deps[i]); } } return sb.toString(); } // // Parcelable // @Override public void writeToParcel(Parcel out, int flags) { out.writeString(id); out.writeString(url); out.writeString(md5); out.writeInt(down); out.writeInt(length); out.writeStringArray(deps); } @Override public int describeContents() { return 0; } public static final Parcelable.Creator<FileSpec> CREATOR = new Parcelable.Creator<FileSpec>() { public FileSpec createFromParcel(Parcel in) { return new FileSpec(in); } public FileSpec[] newArray(int size) { return new FileSpec[size]; } }; protected FileSpec(Parcel in) { id = in.readString(); url = in.readString(); md5 = in.readString(); down = in.readInt(); length = in.readInt(); deps = in.createStringArray(); } }
1,412
1,109
<reponame>amd-yan/simple-salesforce """Simple-Salesforce Package""" # flake8: noqa from .api import Salesforce, SFType from .bulk import SFBulkHandler from .exceptions import (SalesforceAuthenticationFailed, SalesforceError, SalesforceExpiredSession, SalesforceGeneralError, SalesforceMalformedRequest, SalesforceMoreThanOneRecord, SalesforceRefusedRequest, SalesforceResourceNotFound) from .login import SalesforceLogin from .format import format_soql, format_external_id
230
8,755
#pragma once struct AsyncMqttClientMessageProperties { uint8_t qos; bool dup; bool retain; };
40
601
""" Spacer components to add horizontal or vertical space to a layout. """ import param from bokeh.models import Div as BkDiv, Spacer as BkSpacer from ..reactive import Reactive class Spacer(Reactive): """ The `Spacer` layout is a very versatile component which makes it easy to put fixed or responsive spacing between objects. Like all other components spacers support both absolute and responsive sizing modes. Reference: https://panel.holoviz.org/user_guide/Customization.html#spacers :Example: >>> pn.Row( ... 1, pn.Spacer(width=200), ... 2, pn.Spacer(width=100), ... 3 ... ) """ _bokeh_model = BkSpacer def _get_model(self, doc, root=None, parent=None, comm=None): properties = self._process_param_change(self._init_params()) model = self._bokeh_model(**properties) if root is None: root = model self._models[root.ref['id']] = (model, parent) return model class VSpacer(Spacer): """ The `VSpacer` layout provides responsive vertical spacing. Using this component we can space objects equidistantly in a layout and allow the empty space to shrink when the browser is resized. Reference: https://panel.holoviz.org/user_guide/Customization.html#spacers :Example: >>> pn.Column( ... pn.layout.VSpacer(), 'Item 1', ... pn.layout.VSpacer(), 'Item 2', ... pn.layout.VSpacer() ... ) """ sizing_mode = param.Parameter(default='stretch_height', readonly=True) class HSpacer(Spacer): """ The `HSpacer` layout provides responsive vertical spacing. Using this component we can space objects equidistantly in a layout and allow the empty space to shrink when the browser is resized. Reference: https://panel.holoviz.org/user_guide/Customization.html#spacers :Example: >>> pn.Row( ... pn.layout.HSpacer(), 'Item 1', ... pn.layout.HSpacer(), 'Item 2', ... pn.layout.HSpacer() ... ) """ sizing_mode = param.Parameter(default='stretch_width', readonly=True) class Divider(Reactive): """ A `Divider` draws a horizontal rule (a `<hr>` tag in HTML) to separate multiple components in a layout. It automatically spans the full width of the container. Reference: https://panel.holoviz.org/reference/layouts/Divider.html :Example: >>> pn.Column( ... '# Lorem Ipsum', ... pn.layout.Divider(), ... 'A very long text... ' >>> ) """ width_policy = param.ObjectSelector(default="fit", readonly=True) _bokeh_model = BkDiv def _get_model(self, doc, root=None, parent=None, comm=None): properties = self._process_param_change(self._init_params()) properties['style'] = {'width': '100%', 'height': '100%'} model = self._bokeh_model(text='<hr style="margin: 0px">', **properties) if root is None: root = model self._models[root.ref['id']] = (model, parent) return model
1,168
915
package org.nzbhydra.fortests; import lombok.Builder; import lombok.Data; import java.time.Instant; import java.util.List; @Data @Builder public class NewznabItemData { private String title; private String description; private String link; private String category; private List<Integer> newznabCategories; private Long size; private Instant pubDate; private String group; private String poster; }
145
1,826
package com.vladsch.flexmark.util.sequence; import com.vladsch.flexmark.util.data.DataHolder; import com.vladsch.flexmark.util.data.DataKeyBase; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; /** * A CharSequence that references original char[] * a subSequence() returns a sub-sequence from the original base sequence * <p> * NOTE: '\0' changed to '\uFFFD' use {@link com.vladsch.flexmark.util.sequence.mappers.NullEncoder#decodeNull} mapper to get original null chars. */ final public class CharSubSequence extends BasedSequenceImpl { final private char[] baseChars; final private CharSubSequence base; final private int startOffset; final private int endOffset; private CharSubSequence(char[] chars, int hash) { super(hash); int iMax = chars.length; base = this; baseChars = chars; startOffset = 0; endOffset = baseChars.length; } private CharSubSequence(CharSubSequence baseSeq, int startIndex, int endIndex) { super(0); assert startIndex >= 0 && endIndex >= startIndex && endIndex <= baseSeq.baseChars.length : String.format("CharSubSequence must have (startIndex > 0 || endIndex < %d) && endIndex >= startIndex, got startIndex:%d, endIndex: %d", baseSeq.baseChars.length, startIndex, endIndex); assert (startIndex > 0 || endIndex < baseSeq.baseChars.length) : String.format("CharSubSequence must be proper subsequences [1, %d) got startIndex:%d, endIndex: %d", Math.max(0, baseSeq.baseChars.length - 1), startIndex, endIndex); base = baseSeq; baseChars = baseSeq.baseChars; startOffset = base.startOffset + startIndex; endOffset = base.startOffset + endIndex; } @Override public int getOptionFlags() { return 0; } @Override public boolean allOptions(int options) { return false; } @Override public boolean anyOptions(int options) { return false; } @Override public <T> T getOption(DataKeyBase<T> dataKey) { return dataKey.get(null); } @Override public @Nullable DataHolder getOptions() { return null; } @NotNull @Override public CharSubSequence getBaseSequence() { return base; } @NotNull @Override public char[] getBase() { return baseChars; } public int getStartOffset() { return startOffset; } public int getEndOffset() { return endOffset; } @Override public int length() { return endOffset - startOffset; } @NotNull @Override public Range getSourceRange() { return Range.of(startOffset, endOffset); } @Override public int getIndexOffset(int index) { SequenceUtils.validateIndexInclusiveEnd(index, length()); return startOffset + index; } @Override public char charAt(int index) { SequenceUtils.validateIndex(index, length()); char c = baseChars[index + startOffset]; return c == SequenceUtils.NUL ? SequenceUtils.ENC_NUL : c; } @NotNull @Override public CharSubSequence subSequence(int startIndex, int endIndex) { SequenceUtils.validateStartEnd(startIndex, endIndex, length()); return base.baseSubSequence(startOffset + startIndex, startOffset + endIndex); } @NotNull @Override public CharSubSequence baseSubSequence(int startIndex, int endIndex) { SequenceUtils.validateStartEnd(startIndex, endIndex, baseChars.length); return startIndex == startOffset && endIndex == endOffset ? this : base != this ? base.baseSubSequence(startIndex, endIndex) : new CharSubSequence(base, startIndex, endIndex); } public static CharSubSequence of(CharSequence charSequence) { return of(charSequence, 0, charSequence.length()); } public static CharSubSequence of(CharSequence charSequence, int startIndex) { assert startIndex <= charSequence.length(); return of(charSequence, startIndex, charSequence.length()); } /** * @param chars char array * @param startIndex start index in array * @param endIndex end index in array * @return CharSubSequence based sequence of array * @deprecated NOTE: use BasedSequence.of() for creating based sequences */ @Deprecated public static CharSubSequence of(char[] chars, int startIndex, int endIndex) { assert startIndex >= 0 && startIndex <= endIndex && endIndex <= chars.length; char[] useChars = new char[chars.length]; System.arraycopy(chars, 0, useChars, 0, chars.length); return startIndex == 0 && endIndex == chars.length ? new CharSubSequence(useChars, 0) : new CharSubSequence(useChars, 0).subSequence(startIndex, endIndex); } /** * @param charSequence char sequence * @param startIndex start index in sequence * @param endIndex end index in sequence * @return char based sequence * @deprecated NOTE: use BasedSequence.of() for creating based sequences */ @Deprecated public static CharSubSequence of(CharSequence charSequence, int startIndex, int endIndex) { assert startIndex >= 0 && startIndex <= endIndex && endIndex <= charSequence.length(); CharSubSequence charSubSequence; if (charSequence instanceof CharSubSequence) { charSubSequence = ((CharSubSequence) charSequence); } else if (charSequence instanceof String) { charSubSequence = new CharSubSequence(((String) charSequence).toCharArray(), ((String) charSequence).hashCode()); } else if (charSequence instanceof StringBuilder) { char[] chars = new char[charSequence.length()]; ((StringBuilder) charSequence).getChars(0, charSequence.length(), chars, 0); charSubSequence = new CharSubSequence(chars, 0); } else { charSubSequence = new CharSubSequence(charSequence.toString().toCharArray(), 0); } if (startIndex == 0 && endIndex == charSequence.length()) { return charSubSequence; } else { return charSubSequence.subSequence(startIndex, endIndex); } } }
2,349
8,805
//---------------------------------------------------------------------------// // Copyright (c) 2013 <NAME> <<EMAIL>> // // Distributed under the Boost Software License, Version 1.0 // See accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt // // See http://boostorg.github.com/compute for more information. //---------------------------------------------------------------------------// #ifndef BOOST_COMPUTE_FUNCTIONAL_RELATIONAL_HPP #define BOOST_COMPUTE_FUNCTIONAL_RELATIONAL_HPP #include <boost/compute/functional/detail/macros.hpp> namespace boost { namespace compute { BOOST_COMPUTE_DECLARE_BUILTIN_FUNCTION_UNDERSCORE(isequal, int (T, T), class T) BOOST_COMPUTE_DECLARE_BUILTIN_FUNCTION_UNDERSCORE(isnotequal, int (T, T), class T) BOOST_COMPUTE_DECLARE_BUILTIN_FUNCTION_UNDERSCORE(isgreater, int (T, T), class T) BOOST_COMPUTE_DECLARE_BUILTIN_FUNCTION_UNDERSCORE(isgreaterequal, int (T, T), class T) BOOST_COMPUTE_DECLARE_BUILTIN_FUNCTION_UNDERSCORE(isless, int (T, T), class T) BOOST_COMPUTE_DECLARE_BUILTIN_FUNCTION_UNDERSCORE(islessequal, int (T, T), class T) BOOST_COMPUTE_DECLARE_BUILTIN_FUNCTION_UNDERSCORE(islessgreater, int (T, T), class T) BOOST_COMPUTE_DECLARE_BUILTIN_FUNCTION_UNDERSCORE(isfinite, int (T), class T) BOOST_COMPUTE_DECLARE_BUILTIN_FUNCTION_UNDERSCORE(isinf, int (T), class T) BOOST_COMPUTE_DECLARE_BUILTIN_FUNCTION_UNDERSCORE(isnan, int (T), class T) BOOST_COMPUTE_DECLARE_BUILTIN_FUNCTION_UNDERSCORE(isnormal, int (T), class T) BOOST_COMPUTE_DECLARE_BUILTIN_FUNCTION_UNDERSCORE(isordered, int (T), class T) BOOST_COMPUTE_DECLARE_BUILTIN_FUNCTION_UNDERSCORE(isunordered, int (T), class T) BOOST_COMPUTE_DECLARE_BUILTIN_FUNCTION_UNDERSCORE(signbit, int (T), class T) BOOST_COMPUTE_DECLARE_BUILTIN_FUNCTION(any, int (T), class T) BOOST_COMPUTE_DECLARE_BUILTIN_FUNCTION(all, int (T), class T) } // end compute namespace } // end boost namespace #endif // BOOST_COMPUTE_FUNCTIONAL_RELATIONAL_HPP
865
2,881
<gh_stars>1000+ package com.salesmanager.shop.store.api.v1.security; import java.util.ArrayList; import java.util.List; import java.util.Set; import javax.inject.Inject; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.http.HttpStatus; import org.springframework.http.MediaType; import org.springframework.web.bind.annotation.GetMapping; import org.springframework.web.bind.annotation.PathVariable; import org.springframework.web.bind.annotation.RequestMapping; import org.springframework.web.bind.annotation.ResponseStatus; import org.springframework.web.bind.annotation.RestController; import com.salesmanager.core.business.services.user.GroupService; import com.salesmanager.core.business.services.user.PermissionService; import com.salesmanager.core.model.user.Group; import com.salesmanager.core.model.user.Permission; import com.salesmanager.shop.model.security.ReadableGroup; import com.salesmanager.shop.model.security.ReadablePermission; import com.salesmanager.shop.store.api.exception.ResourceNotFoundException; import com.salesmanager.shop.store.api.exception.ServiceRuntimeException; import io.swagger.annotations.Api; import io.swagger.annotations.ApiOperation; import io.swagger.annotations.SwaggerDefinition; import io.swagger.annotations.Tag; /** * Api for managing security * * @author carlsamson * */ @RestController @RequestMapping(value = "/api/v1/sec") @Api(tags = { "Groups and permissions Api" }) @SwaggerDefinition(tags = { @Tag(name = "List of supported groups and permissions", description = "List groups and attached permissions for reference") }) public class SecurityApi { private static final Logger LOGGER = LoggerFactory.getLogger(SecurityApi.class); @Inject private PermissionService permissionService; @Inject private GroupService groupService; @ResponseStatus(HttpStatus.OK) @GetMapping({ "/private/{group}/permissions" }) @ApiOperation(httpMethod = "GET", value = "Get permissions by group", notes = "", produces = MediaType.APPLICATION_JSON_VALUE, response = List.class) public List<ReadablePermission> listPermissions(@PathVariable String group) { Group g = null; try { g = groupService.findByName(group); if(g == null) { throw new ResourceNotFoundException("Group [" + group + "] does not exist"); } } catch (Exception e) { LOGGER.error("An error occured while getting group [" + group + "]",e); throw new ServiceRuntimeException("An error occured while getting group [" + group + "]"); } Set<Permission> permissions = g.getPermissions(); List<ReadablePermission> readablePermissions = new ArrayList<ReadablePermission>(); for (Permission permission : permissions) { ReadablePermission readablePermission = new ReadablePermission(); readablePermission.setName(permission.getPermissionName()); readablePermission.setId(permission.getId()); readablePermissions.add(readablePermission); } return readablePermissions; } /** * Permissions Requires service user authentication * * @return */ @GetMapping("/private/permissions") public List<ReadablePermission> permissions() { List<Permission> permissions = permissionService.list(); List<ReadablePermission> readablePermissions = new ArrayList<ReadablePermission>(); for (Permission permission : permissions) { ReadablePermission readablePermission = new ReadablePermission(); readablePermission.setName(permission.getPermissionName()); readablePermission.setId(permission.getId()); readablePermissions.add(readablePermission); } return readablePermissions; } /** * Load groups Requires service user authentication * * @return */ @GetMapping("/private/groups") public List<ReadableGroup> groups() { List<Group> groups = groupService.list(); List<ReadableGroup> readableGroups = new ArrayList<ReadableGroup>(); for (Group group : groups) { ReadableGroup readableGroup = new ReadableGroup(); readableGroup.setName(group.getGroupName()); readableGroup.setId(group.getId().longValue()); readableGroup.setType(group.getGroupType().name()); readableGroups.add(readableGroup); } return readableGroups; } }
1,306
3,102
// RUN: %clang_cc1 -triple %itanium_abi_triple -emit-llvm -std=c++11 -fcxx-exceptions -fexceptions -S -emit-llvm -o - %s | FileCheck %s namespace std { struct string { const char *p; string(const char *s); ~string(); }; } struct Bar { int a; }; struct Foo { std::string c; Bar d[32]; }; static Foo table[] = { { "blerg" }, }; // CHECK: define internal void @__cxx_global_var_init // CHECK: invoke {{.*}} @_ZNSt6stringC1EPKc( // CHECK-NOT: unreachable // CHECK: br label
212
5,169
{ "name": "TravelerKit", "version": "0.1.1", "license": { "type": "Apache" }, "homepage": "https://github.com/Guestlogix/traveler-ios", "authors": { "<NAME>": "<EMAIL>" }, "summary": "Traveler Swift Core SDK", "source": { "git": "https://github.com/Guestlogix/traveler-ios.git", "branch": "master" }, "swift_version": "4.2", "source_files": "traveler-swift-core/TravelerKit/**/*.{swift}", "platforms": { "ios": "11.4" }, "module_name": "TravelerKit" }
220
2,434
/* * Copyright 2017 <NAME> * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.github.swagger2markup.internal.component; import io.github.swagger2markup.OpenAPI2MarkupConverter; import io.github.swagger2markup.extension.MarkupComponent; import io.github.swagger2markup.adoc.ast.impl.ParagraphBlockImpl; import io.swagger.v3.oas.models.ExternalDocumentation; import org.apache.commons.lang3.StringUtils; import org.asciidoctor.ast.Block; import org.asciidoctor.ast.StructuralNode; public class ExternalDocumentationComponent extends MarkupComponent<StructuralNode, ExternalDocumentationComponent.Parameters, StructuralNode> { public ExternalDocumentationComponent(OpenAPI2MarkupConverter.OpenAPIContext context) { super(context); } public static Parameters parameters(ExternalDocumentation externalDocs) { return new Parameters(externalDocs); } public StructuralNode apply(StructuralNode node, ExternalDocumentation externalDocs) { return apply(node, parameters(externalDocs)); } @Override public StructuralNode apply(StructuralNode node, Parameters params) { ExternalDocumentation externalDocs = params.externalDocs; if (externalDocs == null) return node; String url = externalDocs.getUrl(); if (StringUtils.isNotBlank(url)) { Block paragraph = new ParagraphBlockImpl(node); String desc = externalDocs.getDescription(); paragraph.setSource(url + (StringUtils.isNotBlank(desc) ? "[" + desc + "]" : "")); node.append(paragraph); } return node; } public static class Parameters { private final ExternalDocumentation externalDocs; public Parameters(ExternalDocumentation externalDocs) { this.externalDocs = externalDocs; } } }
777
345
#pragma once #include "Sqex_Excel.h" #include "Sqex_Sqpack.h" namespace Sqex::Excel { class Depth2ExhExdCreator { public: const std::string Name; const std::vector<Exh::Column> Columns; const int SomeSortOfBufferSize; const size_t DivideUnit; const uint32_t FixedDataSize; std::map<uint32_t, std::map<Language, std::vector<ExdColumn>>> Data; std::set<uint32_t> DivideAtIds; std::vector<Language> Languages; std::vector<Language> FillMissingLanguageFrom; Depth2ExhExdCreator(std::string name, std::vector<Exh::Column> columns, int someSortOfBufferSize, size_t divideUnit = SIZE_MAX); void AddLanguage(Language language); const std::vector<ExdColumn>& GetRow(uint32_t id, Language language) const; void SetRow(uint32_t id, Language language, std::vector<ExdColumn> row, bool replace = true); private: std::pair<Sqpack::EntryPathSpec, std::vector<char>> Flush(uint32_t startId, std::map<uint32_t, std::vector<char>> rows, Language language); public: std::map<Sqpack::EntryPathSpec, std::vector<char>, Sqpack::EntryPathSpec::FullPathComparator> Compile(); }; }
403
569
# Lint as: python3 # Copyright 2019 DeepMind Technologies Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for dataset.""" import socket import threading import time from absl.testing import parameterized import numpy as np from reverb import client from reverb import dataset as reverb_dataset from reverb import errors from reverb import item_selectors from reverb import rate_limiters from reverb import replay_sample from reverb import server as reverb_server import tensorflow.compat.v1 as tf import tree from tensorflow.python.framework import tensor_spec # pylint:disable=g-direct-tensorflow-import def make_server(): return reverb_server.Server( tables=[ reverb_server.Table( 'dist', sampler=item_selectors.Prioritized(priority_exponent=1), remover=item_selectors.Fifo(), max_size=1000000, rate_limiter=rate_limiters.MinSize(1)), reverb_server.Table( 'dist_queue', sampler=item_selectors.Fifo(), remover=item_selectors.Fifo(), max_size=1000000, max_times_sampled=1, rate_limiter=rate_limiters.MinSize(1)), reverb_server.Table( 'signatured', sampler=item_selectors.Prioritized(priority_exponent=1), remover=item_selectors.Fifo(), max_size=1000000, rate_limiter=rate_limiters.MinSize(1), signature=tf.TensorSpec(dtype=tf.float32, shape=(None, None))), reverb_server.Table( 'bounded_spec_signatured', sampler=item_selectors.Prioritized(priority_exponent=1), remover=item_selectors.Fifo(), max_size=1000000, rate_limiter=rate_limiters.MinSize(1), # Currently only the `shape` and `dtype` of the bounded spec # is considered during signature check. # TODO(b/158033101): Check the boundaries as well. signature=tensor_spec.BoundedTensorSpec( dtype=tf.float32, shape=(None, None), minimum=(0.0, 0.0), maximum=(10.0, 10.)), ), ], port=None, ) class LocalReplayDatasetTest(tf.test.TestCase, parameterized.TestCase): USE_LOCALHOST = True @classmethod def setUpClass(cls): super().setUpClass() cls._server = make_server() if cls.USE_LOCALHOST: connect_to = 'localhost' else: connect_to = 'dns:///{}'.format(socket.gethostname()) cls._client = client.Client(f'{connect_to}:{cls._server.port}') def setUp(self): super().setUp() self._num_prev_samples = { table: self._get_total_num_samples(table) for table in ('dist', 'dist_queue', 'signatured', 'bounded_spec_signatured') } def tearDown(self): super().tearDown() self._client.reset('dist') self._client.reset('dist_queue') self._client.reset('signatured') self._client.reset('bounded_spec_signatured') @classmethod def tearDownClass(cls): super().tearDownClass() cls._server.stop() def _populate_replay(self, sequence_length=100, max_time_steps=None, max_items=1000): max_time_steps = max_time_steps or sequence_length num_items = 0 with self._client.writer(max_time_steps) as writer: for i in range(1000): writer.append([np.zeros((3, 3), dtype=np.float32)]) if i % min(5, sequence_length) == 0 and i >= sequence_length: writer.create_item( table='dist', num_timesteps=sequence_length, priority=1) writer.create_item( table='dist_queue', num_timesteps=sequence_length, priority=1) writer.create_item( table='signatured', num_timesteps=sequence_length, priority=1) writer.create_item( table='bounded_spec_signatured', num_timesteps=sequence_length, priority=1) num_items += 1 if num_items >= max_items: break def _sample_from(self, dataset, num_samples): iterator = dataset.make_initializable_iterator() dataset_item = iterator.get_next() self.evaluate(iterator.initializer) return [self.evaluate(dataset_item) for _ in range(num_samples)] def _get_total_num_samples(self, table: str) -> int: table_info = self._client.server_info()[table] return table_info.rate_limiter_info.sample_stats.completed def _get_num_samples(self, table: str) -> int: """Gets the number of samples since the start of the test.""" return self._get_total_num_samples(table) - self._num_prev_samples[table] @parameterized.named_parameters( { 'testcase_name': 'default_values', }, { 'testcase_name': 'num_workers_per_iterator_is_0', 'num_workers_per_iterator': 0, 'want_error': ValueError, }, { 'testcase_name': 'num_workers_per_iterator_is_1', 'num_workers_per_iterator': 1, }, { 'testcase_name': 'num_workers_per_iterator_is_minus_1', 'num_workers_per_iterator': -1, }, { 'testcase_name': 'num_workers_per_iterator_is_minus_2', 'num_workers_per_iterator': -2, 'want_error': ValueError, }, { 'testcase_name': 'max_samples_per_stream_is_0', 'max_samples_per_stream': 0, 'want_error': ValueError, }, { 'testcase_name': 'max_samples_per_stream_is_1', 'max_samples_per_stream': 1, }, { 'testcase_name': 'max_samples_per_stream_is_minus_1', 'max_samples_per_stream': -1, }, { 'testcase_name': 'max_samples_per_stream_is_minus_2', 'num_workers_per_iterator': -2, 'want_error': ValueError, }, { 'testcase_name': 'max_in_flight_samples_per_worker_is_0', 'max_in_flight_samples_per_worker': 0, 'want_error': ValueError, }, { 'testcase_name': 'max_in_flight_samples_per_worker_is_1', 'max_in_flight_samples_per_worker': 1, }, { 'testcase_name': 'max_in_flight_samples_per_worker_is_minus_1', 'max_in_flight_samples_per_worker': -1, 'want_error': ValueError, }, ) def test_sampler_parameter_validation(self, **kwargs): dtypes = (tf.float32,) shapes = (tf.TensorShape([3, 3]),) if 'max_in_flight_samples_per_worker' not in kwargs: kwargs['max_in_flight_samples_per_worker'] = 100 if 'want_error' in kwargs: error = kwargs.pop('want_error') with self.assertRaises(error): reverb_dataset.ReplayDataset(self._client.server_address, 'dist', dtypes, shapes, **kwargs) else: reverb_dataset.ReplayDataset(self._client.server_address, 'dist', dtypes, shapes, **kwargs) def test_iterate(self): self._populate_replay() dataset = reverb_dataset.ReplayDataset( tf.constant(self._client.server_address), table=tf.constant('dist'), dtypes=(tf.float32,), shapes=(tf.TensorShape([3, 3]),), max_in_flight_samples_per_worker=100) got = self._sample_from(dataset, 10) for sample in got: self.assertIsInstance(sample, replay_sample.ReplaySample) # A single sample is returned so the key should be a scalar int64. self.assertIsInstance(sample.info.key, np.uint64) np.testing.assert_array_equal(sample.data[0], np.zeros((3, 3), dtype=np.float32)) def test_distribution_strategy(self): self._populate_replay() physical_devices = tf.config.list_physical_devices('CPU') configs = tf.config.experimental.get_virtual_device_configuration( physical_devices[0]) if configs is None: virtual_devices = [tf.config.experimental.VirtualDeviceConfiguration() for _ in range(4)] tf.config.experimental.set_virtual_device_configuration( physical_devices[0], virtual_devices) strategy = tf.distribute.MirroredStrategy(['/cpu:%d' % i for i in range(4)]) def reverb_dataset_fn(i): tf.print('Creating dataset for replica; index:', i) return reverb_dataset.ReplayDataset( self._client.server_address, table=tf.constant('dist'), dtypes=(tf.float32,), shapes=(tf.TensorShape([3, 3]),), max_in_flight_samples_per_worker=100).take(2) def dataset_fn(_): return tf.data.Dataset.range(4).flat_map(reverb_dataset_fn).take(2 * 4) ds = strategy.experimental_distribute_datasets_from_function(dataset_fn) def check_probabilities(_, v): probability = v.info.probability self.assertLen(probability.values, 4) # Don't use any math ops since tensor values seem to contain # unaligned tensors on some systems; but tf.print doesn't check alignment. # # This seems to be caused by a compatibility issue where DistStrat isn't # well tested when eager mode is disabled. So instead of treating this # as a true TF bug, we just work around it. We can remove this hack and # convert it to e.g. tf.assert_greater type check if/when we enable eager # execution for these tests. tf.print('Probability values:', probability.values) def get_next_value(v): return tf.distribute.get_replica_context().merge_call( check_probabilities, args=(v,)) @tf.function def run_strategy(ds_): i = tf.constant(0) for v in ds_: strategy.run(get_next_value, args=(v,)) i += 1 return i rs = run_strategy(ds) # Each iteration contains 4 items - one from each replica. We take 8 items # total, so there should be 2 iterations. self.assertEqual(2, self.evaluate(rs)) def test_timeout_invalid_arguments(self): with self.assertRaisesRegex(ValueError, r'must be an integer >= -1'): reverb_dataset.ReplayDataset( self._client.server_address, table='dist', dtypes=(tf.float32,), shapes=(tf.TensorShape([3, 3]),), rate_limiter_timeout_ms=-2, max_in_flight_samples_per_worker=100) def test_timeout(self): dataset_0s = reverb_dataset.ReplayDataset( self._client.server_address, table='dist_queue', dtypes=(tf.float32,), shapes=(tf.TensorShape([3, 3]),), rate_limiter_timeout_ms=50, # Slightly above exactly 0. max_in_flight_samples_per_worker=100) dataset_1s = reverb_dataset.ReplayDataset( self._client.server_address, table='dist_queue', dtypes=(tf.float32,), shapes=(tf.TensorShape([3, 3]),), rate_limiter_timeout_ms=1000, max_in_flight_samples_per_worker=100) dataset_2s = reverb_dataset.ReplayDataset( self._client.server_address, table='dist_queue', dtypes=(tf.float32,), shapes=(tf.TensorShape([3, 3]),), rate_limiter_timeout_ms=2000, max_in_flight_samples_per_worker=100) start_time = time.time() with self.assertRaisesWithPredicateMatch(tf.errors.OutOfRangeError, r'End of sequence'): self._sample_from(dataset_0s, 1) duration = time.time() - start_time self.assertGreaterEqual(duration, 0) self.assertLess(duration, 5) start_time = time.time() with self.assertRaisesWithPredicateMatch(tf.errors.OutOfRangeError, r'End of sequence'): self._sample_from(dataset_1s, 1) duration = time.time() - start_time self.assertGreaterEqual(duration, 1) self.assertLess(duration, 10) start_time = time.time() with self.assertRaisesWithPredicateMatch(tf.errors.OutOfRangeError, r'End of sequence'): self._sample_from(dataset_2s, 1) duration = time.time() - start_time self.assertGreaterEqual(duration, 2) self.assertLess(duration, 10) # If we insert some data, and the rate limiter doesn't force any waiting, # then we can ask for a timeout of 0s and still get data back. iterator = dataset_0s.make_initializable_iterator() dataset_0s_item = iterator.get_next() self.evaluate(iterator.initializer) for _ in range(3): self._populate_replay(max_items=2) # Pull two items for _ in range(2): self.evaluate(dataset_0s_item) # Wait for the time it would take a broken sampler to time out # on next iteration. time.sleep(0.5) @parameterized.parameters(['signatured'], ['bounded_spec_signatured']) def test_inconsistent_signature_size(self, table_name): self._populate_replay() dataset = reverb_dataset.ReplayDataset( self._client.server_address, table=table_name, dtypes=(tf.float32, tf.float64), shapes=(tf.TensorShape([3, 3]), tf.TensorShape([])), max_in_flight_samples_per_worker=100) with self.assertRaisesWithPredicateMatch( tf.errors.InvalidArgumentError, r'Inconsistent number of tensors requested from table \'{}\'. ' r'Requested 6 tensors, but table signature shows 5 tensors.'.format( table_name)): self._sample_from(dataset, 10) @parameterized.parameters(['signatured'], ['bounded_spec_signatured']) def test_incompatible_signature_dtype(self, table_name): self._populate_replay() dataset = reverb_dataset.ReplayDataset( self._client.server_address, table=table_name, dtypes=(tf.int64,), shapes=(tf.TensorShape([3, 3]),), max_in_flight_samples_per_worker=100) with self.assertRaisesWithPredicateMatch( tf.errors.InvalidArgumentError, r'Requested incompatible tensor at flattened index 4 from table ' r'\'{}\'. Requested \(dtype, shape\): \(int64, \[3,3\]\). ' r'Signature \(dtype, shape\): \(float, \[\?,\?\]\)'.format(table_name)): self._sample_from(dataset, 10) dataset_emit_sequences = reverb_dataset.ReplayDataset( self._client.server_address, table=table_name, dtypes=(tf.int64,), shapes=(tf.TensorShape([None, 3, 3]),), emit_timesteps=False, max_in_flight_samples_per_worker=100) with self.assertRaisesWithPredicateMatch( tf.errors.InvalidArgumentError, r'Requested incompatible tensor at flattened index 4 from table ' r'\'{}\'. Requested \(dtype, shape\): \(int64, \[3,3\]\). ' r'Signature \(dtype, shape\): \(float, \[\?,\?\]\)'.format(table_name)): self._sample_from(dataset_emit_sequences, 10) @parameterized.parameters(['signatured'], ['bounded_spec_signatured']) def test_incompatible_signature_shape(self, table_name): self._populate_replay() dataset = reverb_dataset.ReplayDataset( self._client.server_address, table=table_name, dtypes=(tf.float32,), shapes=(tf.TensorShape([3]),), max_in_flight_samples_per_worker=100) with self.assertRaisesWithPredicateMatch( tf.errors.InvalidArgumentError, r'Requested incompatible tensor at flattened index 4 from table ' r'\'{}\'. Requested \(dtype, shape\): \(float, \[3\]\). ' r'Signature \(dtype, shape\): \(float, \[\?,\?\]\)'.format(table_name)): self._sample_from(dataset, 10) dataset_emit_sequences = reverb_dataset.ReplayDataset( self._client.server_address, table=table_name, dtypes=(tf.float32,), shapes=(tf.TensorShape([None, 3]),), emit_timesteps=False, max_in_flight_samples_per_worker=100) with self.assertRaisesWithPredicateMatch( tf.errors.InvalidArgumentError, r'Requested incompatible tensor at flattened index 4 from table ' r'\'{}\'. Requested \(dtype, shape\): \(float, \[3\]\). ' r'Signature \(dtype, shape\): \(float, \[\?,\?\]\)'.format(table_name)): self._sample_from(dataset_emit_sequences, 10) @parameterized.parameters([1], [3], [10]) def test_incompatible_shape_when_using_sequence_length(self, sequence_length): with self.assertRaises(ValueError): reverb_dataset.ReplayDataset( self._client.server_address, table='dist', dtypes=(tf.float32,), shapes=(tf.TensorShape([sequence_length + 1, 3, 3]),), emit_timesteps=False, sequence_length=sequence_length, max_in_flight_samples_per_worker=100) def test_incompatible_dataset_shapes_and_types_without_signature(self): self._populate_replay() ds_wrong_shape = reverb_dataset.ReplayDataset( self._client.server_address, table='dist', dtypes=(tf.float32,), shapes=(tf.TensorShape([]),), max_in_flight_samples_per_worker=100) with self.assertRaisesRegex( tf.errors.InvalidArgumentError, r'Specification has \(dtype, shape\): \(float, \[\]\). ' r'Tensor has \(dtype, shape\): \(float, \[3,3\]\).'): self._sample_from(ds_wrong_shape, 1) ds_full_sequences_wrong_shape = reverb_dataset.ReplayDataset( self._client.server_address, table='dist', dtypes=(tf.float32,), shapes=(tf.TensorShape([None]),), emit_timesteps=False, max_in_flight_samples_per_worker=100) with self.assertRaisesRegex( tf.errors.InvalidArgumentError, r'Specification has \(dtype, shape\): \(float, \[\]\). ' r'Tensor has \(dtype, shape\): \(float, \[3,3\]\).'): self._sample_from(ds_full_sequences_wrong_shape, 1) @parameterized.parameters( ('dist', 1, 1), ('dist', 1, 3), ('dist', 3, 3), ('dist', 3, 5), ('dist', 10, 10), ('dist', 10, 11), ('signatured', 1, 1), ('signatured', 3, 3), ('signatured', 3, 5), ('signatured', 10, 10), ('bounded_spec_signatured', 1, 1), ('bounded_spec_signatured', 3, 3), ('bounded_spec_signatured', 3, 5), ('bounded_spec_signatured', 10, 10), ) def test_iterate_with_sequence_length(self, table_name, sequence_length, max_time_steps): # Also ensure we get sequence_length-shaped outputs when # writers' max_time_steps != sequence_length. self._populate_replay(sequence_length, max_time_steps=max_time_steps) dataset = reverb_dataset.ReplayDataset( self._client.server_address, table=table_name, dtypes=(tf.float32,), shapes=(tf.TensorShape([sequence_length, 3, 3]),), emit_timesteps=False, sequence_length=sequence_length, max_in_flight_samples_per_worker=100) got = self._sample_from(dataset, 10) for sample in got: self.assertIsInstance(sample, replay_sample.ReplaySample) # The keys and data should be batched up by the sequence length. self.assertEqual(sample.info.key.shape, (sequence_length,)) np.testing.assert_array_equal( sample.data[0], np.zeros((sequence_length, 3, 3), dtype=np.float32)) @parameterized.parameters( ('dist', 1), ('dist', 3), ('dist', 10), ('signatured', 1), ('signatured', 3), ('signatured', 10), ('bounded_spec_signatured', 1), ('bounded_spec_signatured', 3), ('bounded_spec_signatured', 10), ) def test_iterate_with_unknown_sequence_length(self, table_name, sequence_length): self._populate_replay(sequence_length) dataset = reverb_dataset.ReplayDataset( self._client.server_address, table=table_name, dtypes=(tf.float32,), shapes=(tf.TensorShape([None, 3, 3]),), emit_timesteps=False, sequence_length=None, max_in_flight_samples_per_worker=100) # Check the shape of the items. iterator = dataset.make_initializable_iterator() dataset_item = iterator.get_next() self.assertIsNone(dataset_item.info.key.shape.as_list()[0], None) self.assertIsNone(dataset_item.data[0].shape.as_list()[0], None) # Verify that once evaluated, the samples has the expected length. got = self._sample_from(dataset, 10) for sample in got: self.assertIsInstance(sample, replay_sample.ReplaySample) # The keys and data should be batched up by the sequence length. self.assertEqual(sample.info.key.shape, (sequence_length,)) np.testing.assert_array_equal( sample.data[0], np.zeros((sequence_length, 3, 3), dtype=np.float32)) @parameterized.parameters( ('dist', 1, 2), ('dist', 2, 1), ('signatured', 1, 2), ('signatured', 2, 1), ('bounded_spec_signatured', 1, 2), ('bounded_spec_signatured', 2, 1), ) def test_checks_sequence_length_when_timesteps_emitted( self, table_name, actual_sequence_length, provided_sequence_length): self._populate_replay(actual_sequence_length) dataset = reverb_dataset.ReplayDataset( self._client.server_address, table=table_name, dtypes=(tf.float32,), shapes=(tf.TensorShape([provided_sequence_length, 3, 3]),), emit_timesteps=True, sequence_length=provided_sequence_length, max_in_flight_samples_per_worker=100) with self.assertRaises(tf.errors.InvalidArgumentError): self._sample_from(dataset, 10) @parameterized.named_parameters( dict(testcase_name='TableDist', table_name='dist'), dict(testcase_name='TableSignatured', table_name='signatured'), dict( testcase_name='TableBoundedSpecSignatured', table_name='bounded_spec_signatured')) def test_iterate_batched(self, table_name): self._populate_replay() dataset = reverb_dataset.ReplayDataset( self._client.server_address, table=table_name, dtypes=(tf.float32,), shapes=(tf.TensorShape([3, 3]),), max_in_flight_samples_per_worker=100) dataset = dataset.batch(2, True) got = self._sample_from(dataset, 10) for sample in got: self.assertIsInstance(sample, replay_sample.ReplaySample) # The keys should be batched up like the data. self.assertEqual(sample.info.key.shape, (2,)) np.testing.assert_array_equal(sample.data[0], np.zeros((2, 3, 3), dtype=np.float32)) def test_iterate_nested_and_batched(self): with self._client.writer(100) as writer: for i in range(1000): writer.append({ 'observation': { 'data': np.zeros((3, 3), dtype=np.float32), 'extras': [ np.int64(10), np.ones([1], dtype=np.int32), ], }, 'reward': np.zeros((10, 10), dtype=np.float32), }) if i % 5 == 0 and i >= 100: writer.create_item( table='dist', num_timesteps=100, priority=1) dataset = reverb_dataset.ReplayDataset( self._client.server_address, table='dist', dtypes=(((tf.float32), (tf.int64, tf.int32)), tf.float32), shapes=((tf.TensorShape([3, 3]), (tf.TensorShape(None), tf.TensorShape([1]))), tf.TensorShape([10, 10])), max_in_flight_samples_per_worker=100) dataset = dataset.batch(3) structure = { 'observation': { 'data': tf.TensorSpec([3, 3], tf.float32), 'extras': [ tf.TensorSpec([], tf.int64), tf.TensorSpec([1], tf.int32), ], }, 'reward': tf.TensorSpec([], tf.int64), } got = self._sample_from(dataset, 10) self.assertLen(got, 10) for sample in got: self.assertIsInstance(sample, replay_sample.ReplaySample) transition = tree.unflatten_as(structure, tree.flatten(sample.data)) np.testing.assert_array_equal(transition['observation']['data'], np.zeros([3, 3, 3], dtype=np.float32)) np.testing.assert_array_equal(transition['observation']['extras'][0], np.ones([3], dtype=np.int64) * 10) np.testing.assert_array_equal(transition['observation']['extras'][1], np.ones([3, 1], dtype=np.int32)) np.testing.assert_array_equal(transition['reward'], np.zeros([3, 10, 10], dtype=np.float32)) def test_multiple_iterators(self): with self._client.writer(100) as writer: for i in range(10): writer.append([np.ones((81, 81), dtype=np.float32) * i]) writer.create_item(table='dist', num_timesteps=10, priority=1) trajectory_length = 5 batch_size = 3 dataset = reverb_dataset.ReplayDataset( self._client.server_address, table='dist', dtypes=(tf.float32,), shapes=(tf.TensorShape([81, 81]),), max_in_flight_samples_per_worker=100) dataset = dataset.batch(trajectory_length) iterators = [ dataset.make_initializable_iterator() for _ in range(batch_size) ] items = tf.stack( [tf.squeeze(iterator.get_next().data) for iterator in iterators]) with self.session() as session: session.run([iterator.initializer for iterator in iterators]) got = session.run(items) self.assertEqual(got.shape, (batch_size, trajectory_length, 81, 81)) want = np.array( [[np.ones([81, 81]) * i for i in range(trajectory_length)]] * batch_size) np.testing.assert_array_equal(got, want) def test_iterate_over_blobs(self): for _ in range(10): self._client.insert((np.ones([3, 3], dtype=np.int32)), {'dist': 1}) dataset = reverb_dataset.ReplayDataset( self._client.server_address, table='dist', dtypes=(tf.int32,), shapes=(tf.TensorShape([3, 3]),), max_in_flight_samples_per_worker=100) got = self._sample_from(dataset, 20) self.assertLen(got, 20) for sample in got: self.assertIsInstance(sample, replay_sample.ReplaySample) self.assertIsInstance(sample.info.key, np.uint64) self.assertIsInstance(sample.info.probability, np.float64) np.testing.assert_array_equal(sample.data[0], np.ones((3, 3), dtype=np.int32)) @parameterized.parameters(1, 3, 7) def test_respects_max_in_flight_samples_per_worker( self, max_in_flight_samples_per_worker): if not self.USE_LOCALHOST: self.skipTest('TODO(b/190761815): test broken in Nonlocal case') for _ in range(10): self._client.insert((np.ones([3, 3], dtype=np.int32)), {'dist': 1}) dataset = reverb_dataset.ReplayDataset( self._client.server_address, table='dist', dtypes=(tf.int32,), shapes=(tf.TensorShape([3, 3]),), max_in_flight_samples_per_worker=max_in_flight_samples_per_worker) iterator = dataset.make_initializable_iterator() dataset_item = iterator.get_next() self.evaluate(iterator.initializer) for _ in range(100): self.evaluate(dataset_item) # Check that the buffer is incremented by steps of # max_in_flight_samples_per_worker. self.assertEqual( self._get_num_samples('dist') % max_in_flight_samples_per_worker, 0) def test_iterate_over_batched_blobs(self): for _ in range(10): self._client.insert((np.ones([3, 3], dtype=np.int32)), {'dist': 1}) dataset = reverb_dataset.ReplayDataset( self._client.server_address, table='dist', dtypes=(tf.int32,), shapes=(tf.TensorShape([3, 3]),), max_in_flight_samples_per_worker=100) dataset = dataset.batch(5) got = self._sample_from(dataset, 20) self.assertLen(got, 20) for sample in got: self.assertIsInstance(sample, replay_sample.ReplaySample) self.assertEqual(sample.info.key.shape, (5,)) np.testing.assert_array_equal(sample.data[0], np.ones((5, 3, 3), dtype=np.int32)) def test_converts_spec_lists_into_tuples(self): for _ in range(10): data = [ (np.ones([1, 1], dtype=np.int32),), [ np.ones([3, 3], dtype=np.int8), (np.ones([2, 2], dtype=np.float64),) ], ] self._client.insert(data, {'dist': 1}) dataset = reverb_dataset.ReplayDataset( self._client.server_address, table='dist', dtypes=[ (tf.int32,), [ tf.int8, (tf.float64,), ], ], shapes=[ (tf.TensorShape([1, 1]),), [ tf.TensorShape([3, 3]), (tf.TensorShape([2, 2]),), ], ], max_in_flight_samples_per_worker=100) got = self._sample_from(dataset, 10) for sample in got: self.assertIsInstance(sample, replay_sample.ReplaySample) self.assertIsInstance(sample.info.key, np.uint64) tree.assert_same_structure(sample.data, ( (None,), ( None, (None,), ), )) def test_session_is_closed_while_op_pending(self): dataset = reverb_dataset.ReplayDataset( self._client.server_address, table='dist', dtypes=tf.float32, shapes=tf.TensorShape([]), max_in_flight_samples_per_worker=100) iterator = dataset.make_initializable_iterator() item = iterator.get_next() def _session_closer(sess, wait_time_secs): def _fn(): time.sleep(wait_time_secs) sess.close() return _fn with self.session() as sess: sess.run(iterator.initializer) thread = threading.Thread(target=_session_closer(sess, 3)) thread.start() with self.assertRaises(tf.errors.CancelledError): sess.run(item) class NonlocalReplayDatasetTest(LocalReplayDatasetTest): """Test with non-localhost connection to server.""" USE_LOCALHOST = False class FromTableSignatureTest(tf.test.TestCase): def test_table_not_found(self): server = reverb_server.Server([ reverb_server.Table.queue('table_a', 10), reverb_server.Table.queue('table_c', 10), reverb_server.Table.queue('table_b', 10), ]) address = f'localhost:{server.port}' with self.assertRaisesWithPredicateMatch( ValueError, f'Server at {address} does not contain any table named not_found. ' f'Found: table_a, table_b, table_c.'): reverb_dataset.ReplayDataset.from_table_signature( address, 'not_found', 100) def test_server_not_found(self): with self.assertRaises(errors.DeadlineExceededError): reverb_dataset.ReplayDataset.from_table_signature( 'localhost:1234', 'not_found', 100, get_signature_timeout_secs=1) def test_table_does_not_have_signature(self): server = make_server() address = f'localhost:{server.port}' with self.assertRaisesWithPredicateMatch( ValueError, f'Table dist at {address} does not have a signature.'): reverb_dataset.ReplayDataset.from_table_signature( address, 'dist', 100) def test_sets_dtypes_from_signature(self): signature = { 'a': { 'b': tf.TensorSpec([3, 3], tf.float32), 'c': tf.TensorSpec([], tf.int64), }, 'x': tf.TensorSpec([None], tf.uint64), } server = reverb_server.Server( [reverb_server.Table.queue('queue', 10, signature=signature)]) dataset = reverb_dataset.ReplayDataset.from_table_signature( f'localhost:{server.port}', 'queue', 100) self.assertDictEqual(dataset.element_spec.data, signature) def test_sets_dtypes_from_bounded_spec_signature(self): bounded_spec_signature = { 'a': { 'b': tensor_spec.BoundedTensorSpec([3, 3], tf.float32, 0, 3), 'c': tensor_spec.BoundedTensorSpec([], tf.int64, 0, 5), }, } server = reverb_server.Server([ reverb_server.Table.queue( 'queue', 10, signature=bounded_spec_signature) ]) dataset = reverb_dataset.ReplayDataset.from_table_signature( f'localhost:{server.port}', 'queue', 100) self.assertDictEqual( dataset.element_spec.data, { 'a': { 'b': tf.TensorSpec([3, 3], tf.float32), 'c': tf.TensorSpec([], tf.int64), }, }) def test_combines_sequence_length_with_signature_if_not_emit_timestamps(self): server = reverb_server.Server([ reverb_server.Table.queue( 'queue', 10, signature={ 'a': { 'b': tf.TensorSpec([3, 3], tf.float32), 'c': tf.TensorSpec([], tf.int64), }, }) ]) dataset = reverb_dataset.ReplayDataset.from_table_signature( f'localhost:{server.port}', 'queue', 100, emit_timesteps=False, sequence_length=5) self.assertDictEqual( dataset.element_spec.data, { 'a': { 'b': tf.TensorSpec([5, 3, 3], tf.float32), 'c': tf.TensorSpec([5], tf.int64), }, }) if __name__ == '__main__': tf.disable_eager_execution() tf.test.main()
15,699
42,015
<reponame>ye1088/uni-app<gh_stars>1000+ { "name": "vue-router", "version": "3.0.1", "description": "Official router for Vue.js 2", "author": "<NAME>", "license": "MIT", "main": "dist/vue-router.common.js", "module": "dist/vue-router.esm.js", "unpkg": "dist/vue-router.js", "repository": { "type": "git", "url": "https://github.com/vuejs/vue-router.git" }, "typings": "types/index.d.ts", "keywords": [ "vue", "router", "routing" ], "scripts": {}, "devDependencies": {} }
245
9,959
import lombok.experimental.SuperBuilder; public class SuperBuilderInAnonymousClass { Object annonymous = new Object() { @SuperBuilder class InnerParent { private String string; InnerParent() { super(); } } @SuperBuilder class InnerChild { private String string; InnerChild() { super(); } } x() { super(); } }; public SuperBuilderInAnonymousClass() { super(); } }
181
3,084
/*++ Copyright (c) Microsoft Corporation. All rights reserved. Module Name: controller.h Abstract: This module contains the controller-specific function definitions. Environment: kernel-mode only Revision History: --*/ #ifndef _CONTROLLER_H_ #define _CONTROLLER_H_ // // Controller specific function prototypes. // VOID ControllerInitialize( _In_ PPBC_DEVICE pDevice); VOID ControllerUninitialize( _In_ PPBC_DEVICE pDevice); VOID ControllerConfigureForTransfer( _In_ PPBC_DEVICE pDevice, _In_ PPBC_REQUEST pRequest); NTSTATUS ControllerTransferData( _In_ PPBC_DEVICE pDevice, _In_ PPBC_REQUEST pRequest); VOID ControllerCompleteTransfer( _In_ PPBC_DEVICE pDevice, _In_ PPBC_REQUEST pRequest, _In_ BOOLEAN AbortSequence); VOID ControllerEnableInterrupts( _In_ PPBC_DEVICE pDevice, _In_ ULONG InterruptMask); VOID ControllerDisableInterrupts( _In_ PPBC_DEVICE pDevice); ULONG ControllerGetInterruptStatus( _In_ PPBC_DEVICE pDevice, _In_ ULONG InterruptMask); VOID ControllerAcknowledgeInterrupts( _In_ PPBC_DEVICE pDevice, _In_ ULONG InterruptMask); VOID ControllerProcessInterrupts( _In_ PPBC_DEVICE pDevice, _In_ PPBC_REQUEST pRequest, _In_ ULONG InterruptStatus); #endif
663
1,340
// // MTIRenderGraphMerge.h // MetalPetal // // Created by <NAME> on 20/11/2017. // #if __has_include(<MetalPetal/MetalPetal.h>) #import <MetalPetal/MTIImagePromise.h> #else #import "MTIImagePromise.h" #endif NS_ASSUME_NONNULL_BEGIN @class MTIImage; __attribute__((objc_subclassing_restricted)) @interface MTIRenderGraphNode: NSObject @property (nonatomic, strong, nullable) NSMutableArray<MTIRenderGraphNode *> *inputs; @property (nonatomic, strong, nullable) MTIImage *image; @property (nonatomic, readonly) NSInteger uniqueDependentCount; @end __attribute__((objc_subclassing_restricted)) @interface MTIRenderGraphOptimizer : NSObject + (id<MTIImagePromise>)promiseByOptimizingRenderGraphOfPromise:(id<MTIImagePromise>)promise; @end NS_ASSUME_NONNULL_END
288
1,306
/* * Copyright (C) 2011 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef ART_RUNTIME_IMAGE_H_ #define ART_RUNTIME_IMAGE_H_ #include <string.h> #include "globals.h" #include "mirror/object.h" #include "utils.h" namespace art { // header of image files written by ImageWriter, read and validated by Space. class PACKED(4) ImageHeader { public: ImageHeader() {} ImageHeader(uint32_t image_begin, uint32_t image_size_, uint32_t image_bitmap_offset, uint32_t image_bitmap_size, uint32_t image_roots, uint32_t oat_checksum, uint32_t oat_file_begin, uint32_t oat_data_begin, uint32_t oat_data_end, uint32_t oat_file_end); bool IsValid() const; const char* GetMagic() const; byte* GetImageBegin() const { return reinterpret_cast<byte*>(image_begin_); } size_t GetImageSize() const { return static_cast<uint32_t>(image_size_); } size_t GetImageBitmapOffset() const { return image_bitmap_offset_; } size_t GetImageBitmapSize() const { return image_bitmap_size_; } uint32_t GetOatChecksum() const { return oat_checksum_; } void SetOatChecksum(uint32_t oat_checksum) { oat_checksum_ = oat_checksum; } byte* GetOatFileBegin() const { return reinterpret_cast<byte*>(oat_file_begin_); } byte* GetOatDataBegin() const { return reinterpret_cast<byte*>(oat_data_begin_); } byte* GetOatDataEnd() const { return reinterpret_cast<byte*>(oat_data_end_); } byte* GetOatFileEnd() const { return reinterpret_cast<byte*>(oat_file_end_); } size_t GetBitmapOffset() const { return RoundUp(image_size_, kPageSize); } enum ImageRoot { kResolutionMethod, kCalleeSaveMethod, kRefsOnlySaveMethod, kRefsAndArgsSaveMethod, kOatLocation, kDexCaches, kClassRoots, kImageRootsMax, }; mirror::Object* GetImageRoot(ImageRoot image_root) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); private: mirror::ObjectArray<mirror::Object>* GetImageRoots() const; static const byte kImageMagic[4]; static const byte kImageVersion[4]; byte magic_[4]; byte version_[4]; // Required base address for mapping the image. uint32_t image_begin_; // Image size, not page aligned. uint32_t image_size_; // Image bitmap offset in the file. uint32_t image_bitmap_offset_; // Size of the image bitmap. uint32_t image_bitmap_size_; // Checksum of the oat file we link to for load time sanity check. uint32_t oat_checksum_; // Start address for oat file. Will be before oat_data_begin_ for .so files. uint32_t oat_file_begin_; // Required oat address expected by image Method::GetCode() pointers. uint32_t oat_data_begin_; // End of oat data address range for this image file. uint32_t oat_data_end_; // End of oat file address range. will be after oat_data_end_ for // .so files. Used for positioning a following alloc spaces. uint32_t oat_file_end_; // Absolute address of an Object[] of objects needed to reinitialize from an image. uint32_t image_roots_; friend class ImageWriter; friend class ImageDumper; // For GetImageRoots() }; } // namespace art #endif // ART_RUNTIME_IMAGE_H_
1,433
504
<reponame>steakknife/pcgeos<gh_stars>100-1000 /*********************************************************************** * * Copyright (c) Berkeley Softworks 1989 -- All Rights Reserved * * PROJECT: PCGEOS * MODULE: Tools Library -- String Table Handling * FILE: stDestroy.c * * AUTHOR: <NAME>: Feb 28, 1990 * * REVISION HISTORY: * Date Name Description * ---- ---- ----------- * 2/28/90 ardeb Initial version * * DESCRIPTION: * Free all space occupied by a string table. * ***********************************************************************/ #ifndef lint static char *rcsid = "$Id: stDest.c,v 1.1 91/04/26 11:49:37 adam Exp $"; #endif lint #include <config.h> #include "stInt.h" /*********************************************************************** * ST_Destroy *********************************************************************** * SYNOPSIS: Release all space occupied by a string table. * CALLED BY: EXTERNAL * RETURN: Nothing * SIDE EFFECTS: See above... * * STRATEGY: * * REVISION HISTORY: * Name Date Description * ---- ---- ----------- * ardeb 2/28/90 Initial Revision * ***********************************************************************/ void ST_Destroy(VMHandle vmHandle, /* File in which table resides */ VMBlockHandle table) /* Table to free */ { STHeader *hdr; /* Locked header block */ int i; /* Current bucket number */ hdr = (STHeader *)VMLock(vmHandle, table, (MemHandle *)0); for (i = 0; i < ST_NUM_BUCKETS; i++) { #if 0 /* If we decide to chain things... */ VMBlockHandle next, cur; for (cur = hdr->chains[i]; cur != 0; cur = next) { STChainHdr *chdr = VMLock(vmHandle, cur, (MemHandle *)NULL); next = chdr->next; VMFree(vmHandle, cur); } #else if (hdr->chains[i] != 0) { VMFree(vmHandle, hdr->chains[i]); } #endif } /* * Biff the header... */ VMFree(vmHandle, table); }
797
713
<reponame>vansh-tiwari/coding-interview-gym # """ # This is Master's API interface. # You should not implement it, or speculate about its implementation # """ # class Master(object): # def guess(self, word): # """ # :type word: str # :rtype int # """ # https://tinyurl.com/wqy5ll4 from collections import Counter import itertools class Solution(object): def findSecretWord(self, wordlist, master): """ :type wordlist: List[Str] :type master: Master :rtype: None """ matchCount = 0 while matchCount < 6: count = Counter() for word1, word2 in itertools.permutations(wordlist, 2): if self.match(word1, word2) == 0: count[word1] += 1 gussedWord = min(wordlist, key=lambda word: count[word]) # get the word that has minimum count of 0 in the word Counter matchCount = master.guess(gussedWord) newWordList = [] for word in wordlist: if self.match(word, gussedWord) == matchCount: newWordList.append(word) wordlist = newWordList def match(self, word1, word2): matches = [] for c1, c2 in zip(word1, word2): if c1 == c2: matches.append(1) else: matches.append(0) return sum(matches)
679
1,338
<gh_stars>1000+ #ifndef _MALLOC_FREE_ALLOCATOR_H_ #define _MALLOC_FREE_ALLOCATOR_H_ #include <util/Constructor.h> #include <malloc.h> template <class DataType> class MallocFreeAllocator : public Constructor<DataType> { public: typedef DataType* Pointer; typedef const DataType* ConstPointer; typedef DataType& Reference; typedef const DataType& ConstReference; /*! malloc()'s an object of type \c DataType and returns a pointer to it. */ Pointer Allocate() { return reinterpret_cast<Pointer>(malloc(sizeof(DataType))); } /*! free()'s the given object. */ void Deallocate(Pointer object) { free(object); } }; #endif // _MALLOC_FREE_ALLOCATOR_H_
259
3,673
#ifndef KERNEL_PCI_MANAGER_HPP #define KERNEL_PCI_MANAGER_HPP #include <cstdint> #include <hw/nic.hpp> #include <hw/block_device.hpp> #include <hw/pci_device.hpp> namespace hw { class PCI_manager { public: // a <...> driver is constructed from a PCI device, // and returns a unique_ptr to itself using NIC_driver = delegate< std::unique_ptr<hw::Nic> (PCI_Device&, uint16_t) >; using Device_vector = std::vector<const hw::PCI_Device*>; static void register_nic(uint16_t, uint16_t, NIC_driver); using BLK_driver = delegate< std::unique_ptr<hw::Block_device> (PCI_Device&) >; static void register_blk(uint16_t, uint16_t, BLK_driver); static void init(); static void init_devices(uint8_t classcode); static Device_vector devices(); private: static void scan_bus(int bus); }; } #endif //< KERNEL_PCI_MANAGER_HPP
328
312
//----------------------------------------------------------------------------- // Copyright (c) 2015 <NAME> // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to // deal in the Software without restriction, including without limitation the // rights to use, copy, modify, merge, publish, distribute, sublicense, and/or // sell copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS // IN THE SOFTWARE. //----------------------------------------------------------------------------- #include "console/consoleTypes.h" #include "textComponent.h" #include "graphics/core.h" #include "rendering/rendering.h" #include "scene/scene.h" // Script bindings. #include "textComponent_Binding.h" // Debug Profiling. #include "debug/profiler.h" // bgfx/bx #include <bgfx/bgfx.h> #include <bx/fpumath.h> // Assimp - Asset Import Library #include <assimp/cimport.h> #include <assimp/scene.h> #include <assimp/postprocess.h> #include <assimp/types.h> namespace Scene { IMPLEMENT_CONOBJECT(TextComponent); bool TextComponent::smCanRender = false; TextComponent::TextComponent() { mRenderData = NULL; mRedrawText = false; mText = StringTable->insert("TextComponent"); mTextSize = 32.0f; mTextTexture.idx = bgfx::invalidHandle; mTextureWidth = 256.0f; mTextureHeight = 256.0f; mUScrollSpeed = 0.0f; mVScrollSpeed = 0.0f; mTextColor.set(1.0f, 1.0f, 1.0f, 1.0f); initTexture(); } TextComponent::~TextComponent() { destroyTexture(); } void TextComponent::initPersistFields() { // Call parent. Parent::initPersistFields(); addGroup("TextComponent"); addProtectedField("Text", TypeString, Offset(mText, TextComponent), &setText, &defaultProtectedGetFn, &defaultProtectedWriteFn, ""); addProtectedField("TextColor", TypeColorF, Offset(mTextColor, TextComponent), &setTextColor, &defaultProtectedGetFn, &defaultProtectedWriteFn, ""); addProtectedField("TextSize", TypeF32, Offset(mTextSize, TextComponent), &setTextSize, &defaultProtectedGetFn, &defaultProtectedWriteFn, ""); addProtectedField("TextureWidth", TypeF32, Offset(mTextureWidth, TextComponent), &setTextureWidth, &defaultProtectedGetFn, &defaultProtectedWriteFn, ""); addProtectedField("TextureHeight", TypeF32, Offset(mTextureHeight, TextComponent), &setTextureHeight, &defaultProtectedGetFn, &defaultProtectedWriteFn, ""); addField("UScrollSpeed", TypeF32, Offset(mUScrollSpeed, TextComponent), ""); addField("VScrollSpeed", TypeF32, Offset(mVScrollSpeed, TextComponent), ""); endGroup("TextComponent"); } void TextComponent::initTexture() { destroyTexture(); // Texture we copy text to. mTextTexture = bgfx::createTexture2D(mTextureWidth, mTextureHeight, 1, bgfx::TextureFormat::BGRA8); } void TextComponent::destroyTexture() { if (bgfx::isValid(mTextTexture)) bgfx::destroyTexture(mTextTexture); } void TextComponent::onAddToScene() { mRenderData = Rendering::createRenderData(); mRenderData->flags = 0 | Rendering::RenderData::Transparent; mRenderData->indexBuffer = Graphics::planeIB; mRenderData->vertexBuffer = Graphics::planeVB; mRenderData->shader = Graphics::getDefaultShader("components/textComponent/world_text_vs.tsh", "components/textComponent/world_text_fs.tsh")->mProgram; mRenderData->state = 0 | BGFX_STATE_RGB_WRITE | BGFX_STATE_ALPHA_WRITE | BGFX_STATE_DEPTH_TEST_LESS | BGFX_STATE_BLEND_FUNC_SEPARATE(BGFX_STATE_BLEND_ONE, BGFX_STATE_BLEND_ONE, BGFX_STATE_BLEND_ZERO, BGFX_STATE_BLEND_INV_SRC_ALPHA); refresh(); mRedrawText = true; } void TextComponent::onRemoveFromScene() { mRenderData->flags |= Rendering::RenderData::Deleted; mRenderData = NULL; } void TextComponent::preRender() { smCanRender = true; if (mRedrawText) { Graphics::ViewTableEntry* tempView = Graphics::getTemporaryView("TextTexture", 250); Graphics::ViewTableEntry* tempCopyView = Graphics::getTemporaryView("TextTextureCopy", 251); } } void TextComponent::render() { if (mRedrawText) mRedrawText = !renderText(mTextureWidth, mTextureHeight, mText, mTextColor, mTextSize, mTextTexture); } void TextComponent::postRender() { } void TextComponent::setText(const char* text) { mText = StringTable->insert(text); mRedrawText = true; } void TextComponent::setTextColor(ColorF textColor) { mTextColor = textColor; mRedrawText = true; } void TextComponent::setTextSize(F32 textSize) { mTextSize = textSize; mRedrawText = true; } void TextComponent::setTextureWidth(F32 width) { mTextureWidth = width; initTexture(); mRedrawText = true; } void TextComponent::setTextureHeight(F32 height) { mTextureHeight = height; initTexture(); mRedrawText = true; } void TextComponent::refresh() { Parent::refresh(); mBoundingBox.minExtents.set(-1.0f, -0.01f, -1.0f); mBoundingBox.maxExtents.set(1.0f, 0.01f, 1.0f); mBoundingBox.transform(mTransform); // Sanity Checks. if ( mOwnerObject == NULL ) return; // Debug Render. if ( mRenderData ) { // Base Component transform matrix is always slot 0 in the transform table. mRenderData->transformTable = &mTransformMatrix[0]; mRenderData->transformCount = 1; // Setup Uniforms with Light Data mRenderData->uniforms.uniforms = &mUniforms; mUniforms.clear(); // [U Scroll Speed, V Scroll Speed, Empty, Empty] mUniforms.push_back(Rendering::UniformData(Graphics::Shader::getUniformVec4("textParams"))); Rendering::UniformData* uTextParams = &mUniforms.back(); uTextParams->setValue(Point4F(mUScrollSpeed, mVScrollSpeed, 0.0f, 0.0f)); mTextures.clear(); mRenderData->textures = &mTextures; // Text Texture Rendering::TextureData textTex; textTex.uniform = Graphics::Shader::getTextureUniform(0); textTex.handle = mTextTexture; mTextures.push_back(textTex); } } // Static function. bool TextComponent::renderText(F32 width, F32 height, StringTableEntry text, ColorF textColor, F32 textSize, bgfx::TextureHandle targetTexture) { if (!smCanRender) return false; bgfx::FrameBufferHandle tempTextBuffer = bgfx::createFrameBuffer(width, height, bgfx::TextureFormat::BGRA8); if (!bgfx::isValid(tempTextBuffer)) return false; Graphics::ViewTableEntry* tempView = Graphics::getTemporaryView("TextTexture", 250); Graphics::ViewTableEntry* tempCopyView = Graphics::getTemporaryView("TextTextureCopy", 251); // GUI Orthographic Projection float ortho[16]; bx::mtxOrtho(ortho, 0.0f, width, height, 0.0f, 0.0f, 1000.0f); bgfx::setViewFrameBuffer(tempView->id, tempTextBuffer); bgfx::setViewRect(tempView->id, 0, 0, width, height); bgfx::setViewTransform(tempView->id, NULL, ortho); bgfx::setViewClear(tempView->id , BGFX_CLEAR_COLOR , 0x00000000 , 1.0f , 0 ); bgfx::touch(tempView->id); // Use NVG to render our text. NVGcontext* nvgContext = dglGetNVGContext(); nvgViewId(nvgContext, tempView->id); nvgBeginFrame(nvgContext, width, height, 1.0f); nvgFontFace(nvgContext, "lucida console"); nvgFontSize(nvgContext, textSize); nvgFillColor(nvgContext, nvgRGBA((U8)(textColor.red * 255.0f), (U8)(textColor.green * 255.0f), (U8)(textColor.blue * 255.0f), (U8)(textColor.alpha * 255.0f))); nvgTextAlign(nvgContext, NVG_ALIGN_CENTER | NVG_ALIGN_MIDDLE); nvgText(nvgContext, width / 2.0f, height / 2.0f, text, NULL); nvgEndFrame(nvgContext); // Copy the data from NVG into text texture. bgfx::blit(tempCopyView->id, targetTexture, 0, 0, tempTextBuffer, 0); // Clean up bgfx::destroyFrameBuffer(tempTextBuffer); // Only do one text render per frame. smCanRender = false; return true; } }
3,669
587
import torch import torch.nn as nn import pytorch_lightning as pl import vilt.modules.vision_transformer as vit from transformers.models.bert.modeling_bert import BertConfig, BertEmbeddings from vilt.modules import heads, objectives, vilt_utils class ViLTransformerSS(pl.LightningModule): def __init__(self, config): super().__init__() self.save_hyperparameters() bert_config = BertConfig( vocab_size=config["vocab_size"], hidden_size=config["hidden_size"], num_hidden_layers=config["num_layers"], num_attention_heads=config["num_heads"], intermediate_size=config["hidden_size"] * config["mlp_ratio"], max_position_embeddings=config["max_text_len"], hidden_dropout_prob=config["drop_rate"], attention_probs_dropout_prob=config["drop_rate"], ) self.text_embeddings = BertEmbeddings(bert_config) self.text_embeddings.apply(objectives.init_weights) self.token_type_embeddings = nn.Embedding(2, config["hidden_size"]) self.token_type_embeddings.apply(objectives.init_weights) if self.hparams.config["load_path"] == "": self.transformer = getattr(vit, self.hparams.config["vit"])( pretrained=True, config=self.hparams.config ) else: self.transformer = getattr(vit, self.hparams.config["vit"])( pretrained=False, config=self.hparams.config ) self.pooler = heads.Pooler(config["hidden_size"]) self.pooler.apply(objectives.init_weights) if config["loss_names"]["mlm"] > 0: self.mlm_score = heads.MLMHead(bert_config) self.mlm_score.apply(objectives.init_weights) if config["loss_names"]["itm"] > 0: self.itm_score = heads.ITMHead(config["hidden_size"]) self.itm_score.apply(objectives.init_weights) if config["loss_names"]["mpp"] > 0: self.mpp_score = heads.MPPHead(bert_config) self.mpp_score.apply(objectives.init_weights) # ===================== Downstream ===================== # if ( self.hparams.config["load_path"] != "" and not self.hparams.config["test_only"] ): ckpt = torch.load(self.hparams.config["load_path"], map_location="cpu") state_dict = ckpt["state_dict"] self.load_state_dict(state_dict, strict=False) hs = self.hparams.config["hidden_size"] if self.hparams.config["loss_names"]["vqa"] > 0: vs = self.hparams.config["vqav2_label_size"] self.vqa_classifier = nn.Sequential( nn.Linear(hs, hs * 2), nn.LayerNorm(hs * 2), nn.GELU(), nn.Linear(hs * 2, vs), ) self.vqa_classifier.apply(objectives.init_weights) if self.hparams.config["loss_names"]["nlvr2"] > 0: self.nlvr2_classifier = nn.Sequential( nn.Linear(hs * 2, hs * 2), nn.LayerNorm(hs * 2), nn.GELU(), nn.Linear(hs * 2, 2), ) self.nlvr2_classifier.apply(objectives.init_weights) emb_data = self.token_type_embeddings.weight.data self.token_type_embeddings = nn.Embedding(3, hs) self.token_type_embeddings.apply(objectives.init_weights) self.token_type_embeddings.weight.data[0, :] = emb_data[0, :] self.token_type_embeddings.weight.data[1, :] = emb_data[1, :] self.token_type_embeddings.weight.data[2, :] = emb_data[1, :] if self.hparams.config["loss_names"]["irtr"] > 0: self.rank_output = nn.Linear(hs, 1) self.rank_output.weight.data = self.itm_score.fc.weight.data[1:, :] self.rank_output.bias.data = self.itm_score.fc.bias.data[1:] self.margin = 0.2 for p in self.itm_score.parameters(): p.requires_grad = False vilt_utils.set_metrics(self) self.current_tasks = list() # ===================== load downstream (test_only) ====================== if self.hparams.config["load_path"] != "" and self.hparams.config["test_only"]: ckpt = torch.load(self.hparams.config["load_path"], map_location="cpu") state_dict = ckpt["state_dict"] self.load_state_dict(state_dict, strict=False) def infer( self, batch, mask_text=False, mask_image=False, image_token_type_idx=1, image_embeds=None, image_masks=None, ): if f"image_{image_token_type_idx - 1}" in batch: imgkey = f"image_{image_token_type_idx - 1}" else: imgkey = "image" do_mlm = "_mlm" if mask_text else "" text_ids = batch[f"text_ids{do_mlm}"] text_labels = batch[f"text_labels{do_mlm}"] text_masks = batch[f"text_masks"] text_embeds = self.text_embeddings(text_ids) if image_embeds is None and image_masks is None: img = batch[imgkey][0] ( image_embeds, image_masks, patch_index, image_labels, ) = self.transformer.visual_embed( img, max_image_len=self.hparams.config["max_image_len"], mask_it=mask_image, ) else: patch_index, image_labels = ( None, None, ) text_embeds, image_embeds = ( text_embeds + self.token_type_embeddings(torch.zeros_like(text_masks)), image_embeds + self.token_type_embeddings( torch.full_like(image_masks, image_token_type_idx) ), ) co_embeds = torch.cat([text_embeds, image_embeds], dim=1) co_masks = torch.cat([text_masks, image_masks], dim=1) x = co_embeds for i, blk in enumerate(self.transformer.blocks): x, _attn = blk(x, mask=co_masks) x = self.transformer.norm(x) text_feats, image_feats = ( x[:, : text_embeds.shape[1]], x[:, text_embeds.shape[1] :], ) cls_feats = self.pooler(x) ret = { "text_feats": text_feats, "image_feats": image_feats, "cls_feats": cls_feats, "raw_cls_feats": x[:, 0], "image_labels": image_labels, "image_masks": image_masks, "text_labels": text_labels, "text_ids": text_ids, "text_masks": text_masks, "patch_index": patch_index, } return ret def forward(self, batch): ret = dict() if len(self.current_tasks) == 0: ret.update(self.infer(batch)) return ret # Masked Language Modeling if "mlm" in self.current_tasks: ret.update(objectives.compute_mlm(self, batch)) # Masked Patch Prediction if "mpp" in self.current_tasks: ret.update(objectives.compute_mpp(self, batch)) # Image Text Matching if "itm" in self.current_tasks: ret.update(objectives.compute_itm_wpa(self, batch)) # Visual Question Answering if "vqa" in self.current_tasks: ret.update(objectives.compute_vqa(self, batch)) # Natural Language for Visual Reasoning 2 if "nlvr2" in self.current_tasks: ret.update(objectives.compute_nlvr2(self, batch)) # Image Retrieval and Text Retrieval if "irtr" in self.current_tasks: ret.update(objectives.compute_irtr(self, batch)) return ret def training_step(self, batch, batch_idx): vilt_utils.set_task(self) output = self(batch) total_loss = sum([v for k, v in output.items() if "loss" in k]) return total_loss def training_epoch_end(self, outs): vilt_utils.epoch_wrapup(self) def validation_step(self, batch, batch_idx): vilt_utils.set_task(self) output = self(batch) def validation_epoch_end(self, outs): vilt_utils.epoch_wrapup(self) def test_step(self, batch, batch_idx): vilt_utils.set_task(self) output = self(batch) ret = dict() if self.hparams.config["loss_names"]["vqa"] > 0: ret.update(objectives.vqa_test_step(self, batch, output)) return ret def test_epoch_end(self, outs): model_name = self.hparams.config["load_path"].split("/")[-1][:-5] if self.hparams.config["loss_names"]["vqa"] > 0: objectives.vqa_test_wrapup(outs, model_name) vilt_utils.epoch_wrapup(self) def configure_optimizers(self): return vilt_utils.set_schedule(self)
4,501
1,338
<filename>src/bin/debug/profile/Thread.cpp /* * Copyright 2008-2010, <NAME>, <EMAIL>. * Distributed under the terms of the MIT License. */ #include "Thread.h" #include <algorithm> #include <new> #include <debug_support.h> #include "debug_utils.h" #include "Image.h" #include "Options.h" #include "Team.h" // #pragma mark - ThreadImage ThreadImage::ThreadImage(Image* image, ImageProfileResult* result) : fImage(image), fResult(result) { fImage->AcquireReference(); fResult->AcquireReference(); } ThreadImage::~ThreadImage() { fImage->ReleaseReference(); fResult->ReleaseReference(); } // #pragma mark - ThreadI Thread::Thread(thread_id threadID, const char* name, Team* team) : fID(threadID), fName(name), fTeam(team), fSampleArea(-1), fSamples(NULL), fProfileResult(NULL), fLazyImages(true) { fTeam->AcquireReference(); } Thread::~Thread() { if (fSampleArea >= 0) delete_area(fSampleArea); if (fProfileResult != NULL) fProfileResult->ReleaseReference(); while (ThreadImage* image = fImages.RemoveHead()) delete image; while (ThreadImage* image = fOldImages.RemoveHead()) delete image; fTeam->ReleaseReference(); } int32 Thread::EntityID() const { return ID(); } const char* Thread::EntityName() const { return Name(); } const char* Thread::EntityType() const { return "thread"; } void Thread::SetProfileResult(ProfileResult* result) { ProfileResult* oldResult = fProfileResult; fProfileResult = result; if (fProfileResult != NULL) fProfileResult->AcquireReference(); if (oldResult) oldResult->ReleaseReference(); } void Thread::UpdateInfo(const char* name) { fName = name; } void Thread::SetSampleArea(area_id area, addr_t* samples) { fSampleArea = area; fSamples = samples; } void Thread::SetInterval(bigtime_t interval) { fProfileResult->SetInterval(interval); } void Thread::SetLazyImages(bool lazy) { fLazyImages = lazy; } status_t Thread::AddImage(Image* image) { ImageProfileResult* result; status_t error = fProfileResult->GetImageProfileResult( image->GetSharedImage(), image->ID(), result); if (error != B_OK) return error; BReference<ImageProfileResult> resultReference(result, true); ThreadImage* threadImage = new(std::nothrow) ThreadImage(image, result); if (threadImage == NULL) return B_NO_MEMORY; if (fLazyImages) fNewImages.Add(threadImage); else fImages.Add(threadImage); return B_OK; } void Thread::RemoveImage(Image* image) { ImageList::Iterator it = fImages.GetIterator(); while (ThreadImage* threadImage = it.Next()) { if (threadImage->GetImage() == image) { it.Remove(); if (threadImage->Result()->TotalHits() > 0) fOldImages.Add(threadImage); else delete threadImage; break; } } } void Thread::AddSamples(int32 count, int32 dropped, int32 stackDepth, bool variableStackDepth, int32 event) { _SynchronizeImages(event); if (variableStackDepth) { addr_t* samples = fSamples; while (count > 0) { addr_t sampleCount = *(samples++); if (sampleCount >= B_DEBUG_PROFILE_EVENT_BASE) { int32 eventParameterCount = sampleCount & B_DEBUG_PROFILE_EVENT_PARAMETER_MASK; if (sampleCount == B_DEBUG_PROFILE_IMAGE_EVENT) { _SynchronizeImages((int32)samples[0]); } else { fprintf(stderr, "unknown profile event: %#lx\n", sampleCount); } samples += eventParameterCount; count -= eventParameterCount + 1; continue; } fProfileResult->AddSamples(this, samples, sampleCount); samples += sampleCount; count -= sampleCount + 1; } } else { count = count / stackDepth * stackDepth; for (int32 i = 0; i < count; i += stackDepth) fProfileResult->AddSamples(this, fSamples + i, stackDepth); } fProfileResult->AddDroppedTicks(dropped); } void Thread::AddSamples(addr_t* samples, int32 sampleCount) { fProfileResult->AddSamples(this, samples, sampleCount); } void Thread::PrintResults() { fProfileResult->PrintResults(this); } int32 Thread::CountImages() const { return fImages.Count() + fOldImages.Count(); } ImageProfileResult* Thread::VisitImages(Visitor& visitor) const { ImageList::ConstIterator it = fOldImages.GetIterator(); while (ThreadImage* image = it.Next()) { if (visitor.VisitImage(image->Result())) return image->Result(); } it = fImages.GetIterator(); while (ThreadImage* image = it.Next()) { if (visitor.VisitImage(image->Result())) return image->Result(); } return NULL; } ImageProfileResult* Thread::FindImage(addr_t address, addr_t& _loadDelta) const { ImageList::ConstIterator it = fImages.GetIterator(); while (ThreadImage* image = it.Next()) { if (image->GetImage()->ContainsAddress(address)) { _loadDelta = image->GetImage()->LoadDelta(); return image->Result(); } } return NULL; } void Thread::_SynchronizeImages(int32 event) { // remove obsolete images ImageList::Iterator it = fImages.GetIterator(); while (ThreadImage* image = it.Next()) { int32 deleted = image->GetImage()->DeletionEvent(); if (deleted >= 0 && event >= deleted) { it.Remove(); if (image->Result()->TotalHits() > 0) fOldImages.Add(image); else delete image; } } // add new images it = fNewImages.GetIterator(); while (ThreadImage* image = it.Next()) { if (image->GetImage()->CreationEvent() <= event) { it.Remove(); int32 deleted = image->GetImage()->DeletionEvent(); if (deleted >= 0 && event >= deleted) { // image already deleted delete image; } else fImages.Add(image); } } }
2,028
3,083
<reponame>utumen/binnavi // Copyright 2011-2016 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package com.google.security.zynamics.binnavi.debug.connection.packets.commands; import com.google.security.zynamics.binnavi.debug.connection.DebugCommandType; /** * Command class for the request files command. This command should be sent whenever the content of * the default directory of the machine where the debug client is located is required. */ public final class RequestFilesCommand extends DebugCommand { /** * Creates a new list files command. * * @param packetId Packet ID of the command. */ public RequestFilesCommand(final int packetId) { super(DebugCommandType.CMD_LIST_FILES, packetId); } }
339
1,457
/* * Copyright 2016 KairosDB Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.kairosdb.core.formatter; import org.json.JSONException; import org.json.JSONObject; import org.json.JSONWriter; import org.kairosdb.core.DataPoint; import org.kairosdb.core.datastore.DataPointGroup; import org.kairosdb.core.groupby.GroupByResult; import java.io.IOException; import java.io.Writer; import java.util.List; public class JsonResponse { private Writer m_writer; private JSONWriter m_jsonWriter; public JsonResponse(Writer writer) { m_writer = writer; m_jsonWriter = new JSONWriter(writer); } public void begin(String originalQuery) throws FormatterException { try { m_jsonWriter.object(); if (originalQuery != null) m_jsonWriter.key("original_query").value(new JSONObject(originalQuery)); m_jsonWriter.key("queries").array(); } catch (JSONException e) { throw new FormatterException(e); } } /** * Formats the query results * * @param queryResults results of the query * @param excludeTags if true do not include tag information * @param sampleSize Passing a sample size of -1 will cause the attribute to not show up * @throws FormatterException */ public void formatQuery(List<DataPointGroup> queryResults, boolean excludeTags, int sampleSize) throws FormatterException { try { m_jsonWriter.object(); if (sampleSize != -1) m_jsonWriter.key("sample_size").value(sampleSize); m_jsonWriter.key("results").array(); //This loop must call close on each group at the end. for (DataPointGroup group : queryResults) { final String metric = group.getName(); m_jsonWriter.object(); m_jsonWriter.key("name").value(metric); if (!group.getGroupByResult().isEmpty()) { m_jsonWriter.key("group_by"); m_jsonWriter.array(); boolean first = true; for (GroupByResult groupByResult : group.getGroupByResult()) { if (!first) m_writer.write(","); m_writer.write(groupByResult.toJson()); first = false; } m_jsonWriter.endArray(); } if (!excludeTags) { //todo move this to after the values are obtained so we can filter unused tags m_jsonWriter.key("tags").object(); for (String tagName : group.getTagNames()) { m_jsonWriter.key(tagName); m_jsonWriter.value(group.getTagValues(tagName)); } m_jsonWriter.endObject(); } m_jsonWriter.key("values").array(); while (group.hasNext()) { DataPoint dataPoint = group.next(); m_jsonWriter.array().value(dataPoint.getTimestamp()); dataPoint.writeValueToJson(m_jsonWriter); m_jsonWriter.endArray(); } m_jsonWriter.endArray(); m_jsonWriter.endObject(); //Don't close the group the caller will do that. } m_jsonWriter.endArray().endObject(); } catch (JSONException e) { throw new FormatterException(e); } catch (IOException e) { throw new FormatterException(e); } } public void end() throws FormatterException { try { m_jsonWriter.endArray(); m_jsonWriter.endObject(); } catch (JSONException e) { throw new FormatterException(e); } } }
1,392
651
/****************************************************************************** * Copyright (c) Intel Corporation - All rights reserved. * * This file is part of the LIBXSMM library. * * * * For information on the license, see the LICENSE file. * * Further information: https://github.com/hfp/libxsmm/ * * SPDX-License-Identifier: BSD-3-Clause * ******************************************************************************/ /* <NAME> (Intel Corp.) ******************************************************************************/ #ifndef _UTILS_H_ #define _UTILS_H_ #include <stdio.h> #include <stdlib.h> #include <time.h> #include <unistd.h> #ifdef _OPENMP #include <omp.h> #else #define omp_get_num_threads() (1) #define omp_get_thread_num() (0) #define omp_get_max_threads() (1) #endif const int alignment = 64; typedef long ITyp; typedef float FTyp; typedef uint16_t Half; extern thread_local struct drand48_data rand_buf; static double get_time() { static bool init_done = false; static struct timespec stp = {0,0}; struct timespec tp; clock_gettime(CLOCK_REALTIME, &tp); /*clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &tp);*/ if(!init_done) { init_done = true; stp = tp; } double ret = (tp.tv_sec - stp.tv_sec) * 1e3 + (tp.tv_nsec - stp.tv_nsec)*1e-6; return ret; } void set_random_seed(int seed); template<typename T> void init_zero(size_t sz, T *buf) { #pragma omp parallel for for(size_t i = 0; i < sz; i++) buf[i] = (T)0; } template<typename T> void init_random(size_t sz, T *buf, T low, T high) { T range = high - low; #pragma omp parallel for schedule(static) for(size_t i = 0; i < sz; i++) { double randval; drand48_r(&rand_buf, &randval); buf[i] = randval * range - low; } } inline void *my_malloc(size_t sz, size_t align) { return _mm_malloc(sz, align); } inline void my_free(void *p) { _mm_free(p); } #endif /*_UTILS_H_*/
954