max_stars_count
int64
301
224k
text
stringlengths
6
1.05M
token_count
int64
3
727k
3,285
<reponame>grybd/oneflow<filename>oneflow/user/data/data_reader.h /* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #ifndef ONEFLOW_USER_DATA_DATA_READER_H_ #define ONEFLOW_USER_DATA_DATA_READER_H_ #include "oneflow/core/common/buffer.h" #include "oneflow/core/framework/op_kernel.h" #include "oneflow/user/data/dataset.h" #include "oneflow/user/data/parser.h" namespace oneflow { namespace data { static const int32_t kDataReaderBatchBufferSize = 4; template<typename LoadTarget> class DataReader { public: using LoadTargetPtr = std::shared_ptr<LoadTarget>; using LoadTargetPtrList = std::vector<LoadTargetPtr>; DataReader(user_op::KernelInitContext* ctx) : is_closed_(false), batch_buffer_(kDataReaderBatchBufferSize) {} virtual ~DataReader() { Close(); if (load_thrd_.joinable()) { load_thrd_.join(); } } void Read(user_op::KernelComputeContext* ctx) { CHECK(load_thrd_.joinable()) << "You should call StartLoadThread before read data"; auto batch_data = FetchBatchData(); parser_->Parse(batch_data, ctx); } void Close() { is_closed_.store(true); bool buffer_drained = false; while (!buffer_drained) { std::shared_ptr<LoadTargetPtrList> abandoned_batch_data(nullptr); auto status = batch_buffer_.TryReceive(&abandoned_batch_data); CHECK_NE(status, BufferStatus::kBufferStatusErrorClosed); buffer_drained = (status == BufferStatus::kBufferStatusEmpty); } batch_buffer_.Close(); } protected: void StartLoadThread() { if (load_thrd_.joinable()) { return; } load_thrd_ = std::thread([this] { while (!is_closed_.load() && LoadBatch()) {} }); } std::unique_ptr<Dataset<LoadTarget>> loader_; std::unique_ptr<Parser<LoadTarget>> parser_; private: std::shared_ptr<LoadTargetPtrList> FetchBatchData() { std::shared_ptr<LoadTargetPtrList> batch_data(nullptr); CHECK_EQ(batch_buffer_.Pull(&batch_data), BufferStatus::kBufferStatusSuccess); return batch_data; } bool LoadBatch() { std::shared_ptr<LoadTargetPtrList> batch_data = std::make_shared<LoadTargetPtrList>(std::move(loader_->Next())); return batch_buffer_.Push(batch_data) == BufferStatus::kBufferStatusSuccess; } std::atomic<bool> is_closed_; Buffer<std::shared_ptr<LoadTargetPtrList>> batch_buffer_; std::thread load_thrd_; }; } // namespace data } // namespace oneflow #endif // ONEFLOW_USER_DATA_DATA_READER_H_
1,038
1,816
from tests import TestCase from src.masonite.notification import Notification, Notifiable from src.masonite.mail import Mailable from masoniteorm.models import Model class User(Model, Notifiable): """User Model""" __fillable__ = ["name", "email", "password"] class WelcomeUserNotification(Notification): def to_mail(self, notifiable): return ( Mailable() .to(notifiable.email) .subject("Masonite 4") .from_("<EMAIL>") .text(f"Hello {notifiable.name}") ) def via(self, notifiable): return ["mail"] class WelcomeNotification(Notification): def to_mail(self, notifiable): return ( Mailable() .subject("Masonite 4") .from_("<EMAIL>") .text("Hello from Masonite!") ) def via(self, notifiable): return ["mail"] class TestMailDriver(TestCase): def setUp(self): super().setUp() self.notification = self.application.make("notification") def test_send_to_anonymous(self): self.notification.route("mail", "<EMAIL>").send(WelcomeNotification()) def test_send_to_notifiable(self): user = User.find(1) user.notify(WelcomeUserNotification()) def test_send_and_override_driver(self): # TODO: but I don't really know how to proceed as driver can't be defined anymore # in the Mailable # Some API solutions: # self.notification.route("mail", "<EMAIL>").send(WelcomeNotification()).driver("log") # self.notification.route("mail", "<EMAIL>").send(WelcomeNotification(), driver="log") # self.notification.route("mail", "<EMAIL>", driver="log").send(WelcomeNotification()) pass
722
522
package algs.example.gui.problems.segmentIntersection; import algs.example.gui.canvas.ElementCanvas; import algs.example.gui.canvas.NopDrawer; import algs.example.gui.canvas.SegmentCanvas; import algs.example.gui.generator.GeneratorPanel; import algs.example.gui.model.IModelUpdated; import algs.example.gui.problems.segmentIntersection.controller.MouseHandler; import algs.example.gui.problems.segmentIntersection.controller.SegmentMouseHandler; import algs.example.gui.problems.segmentIntersection.model.LineSegmentModel; import algs.example.gui.problems.segmentIntersection.view.ActiveEntityDecorator; import algs.example.gui.problems.segmentIntersection.view.IntersectionDecorator; import algs.model.ILineSegment; import algs.model.data.Generator; import algs.model.data.segments.GridGenerator; import algs.model.data.segments.LoadFromFileGenerator; import algs.model.data.segments.DoubleGenerator; import algs.model.data.segments.HubGenerator; import algs.model.data.segments.IntegerGenerator; import algs.model.data.segments.SlidingLadderGenerator; import algs.model.data.segments.UniformGenerator; import algs.model.twod.TwoDLineSegment; import algs.model.twod.TwoDPoint; import java.util.ArrayList; /** * GUI to present functionality to explore intersecting segments. * * @author <NAME> * @version 1.0, 6/15/08 * @since 1.0 */ public class IntersectingSegmentsGUI extends IntersectingEntitiesGUI<ILineSegment> implements IModelUpdated<ILineSegment> { /** Constructed canvas. */ private ElementCanvas<ILineSegment> canvas; /** * Keep Eclipse happy. */ private static final long serialVersionUID = 1L; @Override protected void constructModel() { model = new LineSegmentModel(); model.setListener(this); } @Override protected ElementCanvas<ILineSegment> createCanvas(int width, int height) { canvas = new SegmentCanvas(width, height); canvas.setModel(model); // we want active points as well as intersections... canvas.setDrawer(new ActiveEntityDecorator<ILineSegment> ( new IntersectionDecorator<ILineSegment> (new NopDrawer(), canvas, model), canvas, model)); // install handlers MouseHandler<ILineSegment> mh = new SegmentMouseHandler(canvas, this, model); canvas.addMouseListener(mh); canvas.addMouseMotionListener(mh); return canvas; } public ElementCanvas<ILineSegment> getCanvas() { return canvas; } /** Our set of generators. */ @Override protected void customize(GeneratorPanel<ILineSegment> gp) { // the following objects are never used to generate points; they are // used solely as prototypes when constructing the real things. Generator<ILineSegment> gen1 = new UniformGenerator(1); // dummy argument Generator<ILineSegment> gen2 = new DoubleGenerator(1.0,1.0); // dummy argument Generator<ILineSegment> gen3 = new IntegerGenerator(100,100); // dummy arguments Generator<ILineSegment> gen4 = new SlidingLadderGenerator(100); // dummy arguments Generator<ILineSegment> gen4a = new GridGenerator(100,5); // dummy arguments Generator<ILineSegment> gen5 = new HubGenerator(40, 100, 100); // dummy arguments Generator<ILineSegment> gen6 = new LoadFromFileGenerator(""); // dummy arguments gp.addGenerator("Uniform", gen1); gp.addGenerator("Double", gen2); gp.addGenerator("Integer", gen3); gp.addGenerator("Sliding Ladder", gen4); gp.addGenerator("Grid", gen4a); gp.addGenerator("Wheel Hub", gen5); gp.addGenerator("File...", gen6); } /** How to transform line segments. */ @Override protected ILineSegment[] transform(ILineSegment[] segments, int width, int height) { ArrayList<ILineSegment> als = new ArrayList<ILineSegment>(); double minX = 0; double minY = 0; double maxX = 0; double maxY = 0; for (ILineSegment ils: segments) { double x1 = ils.getStart().getX(); double y1 = ils.getStart().getY(); double x2 = ils.getEnd().getX(); double y2 = ils.getEnd().getY(); if (x1 < minX) { minX = x1; } if (x2 < minX) { minX = x2; } if (y1 < minY) { minY = y1; } if (y2 < minY) { minY = y2; } if (x1 > maxX) { maxX = x1; } if (x2 > maxX) { maxX = x2; } if (y1 > maxY) { maxY = y1; } if (y2 > maxY) { maxY = y2; } } double xFactor = 1.0 * width / (maxX - minX); double yFactor = 1.0 * height / (maxY - minY); // Does user want to scale? if (!shouldScale()) { ILineSegment[] copy = new ILineSegment [segments.length]; for (int i = 0; i < copy.length; i++) { copy[i] = segments[i]; } return copy; } // Scale appropriately for (ILineSegment ils: segments) { ILineSegment newOne = new TwoDLineSegment ( new TwoDPoint(ils.getStart().getX()*xFactor, ils.getStart().getY()*yFactor), new TwoDPoint(ils.getEnd().getX()*xFactor, ils.getEnd().getY()*yFactor) ); als.add(newOne); } // convert as array return als.toArray(new ILineSegment[0]); } }
1,842
449
<gh_stars>100-1000 from helper import * from model.compgcn_conv import CompGCNConv from model.compgcn_conv_basis import CompGCNConvBasis class BaseModel(torch.nn.Module): def __init__(self, params): super(BaseModel, self).__init__() self.p = params self.act = torch.tanh self.bceloss = torch.nn.BCELoss() def loss(self, pred, true_label): return self.bceloss(pred, true_label) class CompGCNBase(BaseModel): def __init__(self, edge_index, edge_type, num_rel, params=None): super(CompGCNBase, self).__init__(params) self.edge_index = edge_index self.edge_type = edge_type self.p.gcn_dim = self.p.embed_dim if self.p.gcn_layer == 1 else self.p.gcn_dim self.init_embed = get_param((self.p.num_ent, self.p.init_dim)) self.device = self.edge_index.device if self.p.num_bases > 0: self.init_rel = get_param((self.p.num_bases, self.p.init_dim)) else: if self.p.score_func == 'transe': self.init_rel = get_param((num_rel, self.p.init_dim)) else: self.init_rel = get_param((num_rel*2, self.p.init_dim)) if self.p.num_bases > 0: self.conv1 = CompGCNConvBasis(self.p.init_dim, self.p.gcn_dim, num_rel, self.p.num_bases, act=self.act, params=self.p) self.conv2 = CompGCNConv(self.p.gcn_dim, self.p.embed_dim, num_rel, act=self.act, params=self.p) if self.p.gcn_layer == 2 else None else: self.conv1 = CompGCNConv(self.p.init_dim, self.p.gcn_dim, num_rel, act=self.act, params=self.p) self.conv2 = CompGCNConv(self.p.gcn_dim, self.p.embed_dim, num_rel, act=self.act, params=self.p) if self.p.gcn_layer == 2 else None self.register_parameter('bias', Parameter(torch.zeros(self.p.num_ent))) def forward_base(self, sub, rel, drop1, drop2): r = self.init_rel if self.p.score_func != 'transe' else torch.cat([self.init_rel, -self.init_rel], dim=0) x, r = self.conv1(self.init_embed, self.edge_index, self.edge_type, rel_embed=r) x = drop1(x) x, r = self.conv2(x, self.edge_index, self.edge_type, rel_embed=r) if self.p.gcn_layer == 2 else (x, r) x = drop2(x) if self.p.gcn_layer == 2 else x sub_emb = torch.index_select(x, 0, sub) rel_emb = torch.index_select(r, 0, rel) return sub_emb, rel_emb, x class CompGCN_TransE(CompGCNBase): def __init__(self, edge_index, edge_type, params=None): super(self.__class__, self).__init__(edge_index, edge_type, params.num_rel, params) self.drop = torch.nn.Dropout(self.p.hid_drop) def forward(self, sub, rel): sub_emb, rel_emb, all_ent = self.forward_base(sub, rel, self.drop, self.drop) obj_emb = sub_emb + rel_emb x = self.p.gamma - torch.norm(obj_emb.unsqueeze(1) - all_ent, p=1, dim=2) score = torch.sigmoid(x) return score class CompGCN_DistMult(CompGCNBase): def __init__(self, edge_index, edge_type, params=None): super(self.__class__, self).__init__(edge_index, edge_type, params.num_rel, params) self.drop = torch.nn.Dropout(self.p.hid_drop) def forward(self, sub, rel): sub_emb, rel_emb, all_ent = self.forward_base(sub, rel, self.drop, self.drop) obj_emb = sub_emb * rel_emb x = torch.mm(obj_emb, all_ent.transpose(1, 0)) x += self.bias.expand_as(x) score = torch.sigmoid(x) return score class CompGCN_ConvE(CompGCNBase): def __init__(self, edge_index, edge_type, params=None): super(self.__class__, self).__init__(edge_index, edge_type, params.num_rel, params) self.bn0 = torch.nn.BatchNorm2d(1) self.bn1 = torch.nn.BatchNorm2d(self.p.num_filt) self.bn2 = torch.nn.BatchNorm1d(self.p.embed_dim) self.hidden_drop = torch.nn.Dropout(self.p.hid_drop) self.hidden_drop2 = torch.nn.Dropout(self.p.hid_drop2) self.feature_drop = torch.nn.Dropout(self.p.feat_drop) self.m_conv1 = torch.nn.Conv2d(1, out_channels=self.p.num_filt, kernel_size=(self.p.ker_sz, self.p.ker_sz), stride=1, padding=0, bias=self.p.bias) flat_sz_h = int(2*self.p.k_w) - self.p.ker_sz + 1 flat_sz_w = self.p.k_h - self.p.ker_sz + 1 self.flat_sz = flat_sz_h*flat_sz_w*self.p.num_filt self.fc = torch.nn.Linear(self.flat_sz, self.p.embed_dim) def concat(self, e1_embed, rel_embed): e1_embed = e1_embed. view(-1, 1, self.p.embed_dim) rel_embed = rel_embed.view(-1, 1, self.p.embed_dim) stack_inp = torch.cat([e1_embed, rel_embed], 1) stack_inp = torch.transpose(stack_inp, 2, 1).reshape((-1, 1, 2*self.p.k_w, self.p.k_h)) return stack_inp def forward(self, sub, rel): sub_emb, rel_emb, all_ent = self.forward_base(sub, rel, self.hidden_drop, self.feature_drop) stk_inp = self.concat(sub_emb, rel_emb) x = self.bn0(stk_inp) x = self.m_conv1(x) x = self.bn1(x) x = F.relu(x) x = self.feature_drop(x) x = x.view(-1, self.flat_sz) x = self.fc(x) x = self.hidden_drop2(x) x = self.bn2(x) x = F.relu(x) x = torch.mm(x, all_ent.transpose(1,0)) x += self.bias.expand_as(x) score = torch.sigmoid(x) return score
2,337
831
<gh_stars>100-1000 /* * Copyright (C) 2019 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.android.tools.idea.room.migrations.generators; import static com.android.tools.idea.lang.androidSql.parser.AndroidSqlLexer.*; import static com.android.tools.idea.room.migrations.update.SchemaDiffUtil.*; import com.android.tools.idea.room.migrations.json.BundleUtil; import com.android.tools.idea.room.migrations.json.DatabaseViewBundle; import com.android.tools.idea.room.migrations.json.EntityBundle; import com.android.tools.idea.room.migrations.json.FieldBundle; import com.android.tools.idea.room.migrations.json.ForeignKeyBundle; import com.android.tools.idea.room.migrations.json.FtsEntityBundle; import com.android.tools.idea.room.migrations.json.FtsOptionsBundle; import com.android.tools.idea.room.migrations.json.IndexBundle; import com.android.tools.idea.room.migrations.json.PrimaryKeyBundle; import com.android.tools.idea.room.migrations.update.DatabaseUpdate; import com.android.tools.idea.room.migrations.update.EntityUpdate; import com.intellij.openapi.util.InvalidDataException; import java.util.ArrayList; import java.util.LinkedHashMap; import java.util.List; import java.util.Locale; import java.util.Map; import java.util.stream.Collectors; import org.jetbrains.annotations.NotNull; import org.jetbrains.annotations.Nullable; /** * Responsible for generating SQLite statements which perform updates between two versions of a database schema. */ public class SqlStatementsGenerator { private static final String TMP_TABLE_NAME_TEMPLATE = "%s_data$android_studio_tmp"; /** * Returns SQLite statements which produce the update from the older version of the database to the current one. * * @param databaseUpdate the representation of the updates which need to be performed */ @NotNull public static List<String> getMigrationStatements(@NotNull DatabaseUpdate databaseUpdate) { List<String> updateStatements = new ArrayList<>(); databaseUpdate.getRenamedEntities().forEach((oldName, newName) -> {updateStatements.add(getRenameTableStatement(oldName, newName));}); for (EntityBundle entity : databaseUpdate.getNewEntities().values()) { updateStatements.add(getCreateTableStatement(entity.getTableName(), entity)); } for (EntityUpdate entityUpdate : databaseUpdate.getModifiedEntities().values()) { updateStatements.addAll(getMigrationStatements(entityUpdate)); } for (EntityBundle entity : databaseUpdate.getDeletedEntities().values()) { updateStatements.add(getDropTableStatement(entity.getTableName())); } for (DatabaseViewBundle view : databaseUpdate.getDeletedViews()) { updateStatements.add(getDropViewStatement(view)); } for (DatabaseViewBundle view : databaseUpdate.getNewOrModifiedViews()) { updateStatements.add(getCreateViewStatement(view)); } for (String tableName : databaseUpdate.getTablesToForeignKeyCheck()) { updateStatements.add(getForeignKeyConstraintCheck(tableName)); } return updateStatements; } /** * Returns SQLite statements which produce the update form the older version of the table to the current one. * * @param entityUpdate the representation of the updates which need to be performed */ @NotNull public static List<String> getMigrationStatements(@NotNull EntityUpdate entityUpdate) { String tableName = entityUpdate.getNewTableName(); ArrayList<String> updateStatements = new ArrayList<>(); if (entityUpdate.isComplexUpdate()) { // If the update produces an FTS table which requires data copied from an external source, we need to recreate the table and // copy the data from the external content table. if (entityUpdate.shouldCreateAnFtsEntity() && ftsTableNeedsExternalContentSource((FtsEntityBundle)entityUpdate.getNewState())) { updateStatements.addAll(getComplexUpdateForFtsTableWithExternalContent(entityUpdate)); } else { updateStatements.addAll(getComplexTableUpdate(entityUpdate)); } } else { if (entityUpdate.shouldRenameTable()) { updateStatements.add(getRenameTableStatement(entityUpdate.getOldTableName(), entityUpdate.getNewTableName())); } Map<String, FieldBundle> newFields = entityUpdate.getNewFields(); for (FieldBundle field : newFields.values()) { updateStatements.add(getAddColumnStatement(tableName, field)); } Map<FieldBundle, String> renamedFields = entityUpdate.getRenamedFields(); for (Map.Entry<FieldBundle, String> newFieldToOldNameMapping : renamedFields.entrySet()) { updateStatements .add(getRenameColumnStatement(tableName, newFieldToOldNameMapping.getValue(), newFieldToOldNameMapping.getKey().getColumnName())); } Map<FieldBundle, String> valuesForUninitializedFields = entityUpdate.getValuesForUninitializedFields(); if (!valuesForUninitializedFields.isEmpty()) { updateStatements.add(getUpdateColumnsValuesStatement(tableName, valuesForUninitializedFields)); } } for (IndexBundle index : entityUpdate.getIndicesToBeDropped()) { updateStatements.add(getDropIndexStatement(index)); } for (IndexBundle index : entityUpdate.getIndicesToBeCreated()) { updateStatements.add(getCreateIndexStatement(index, tableName)); } return updateStatements; } /** * Returns a collection of statements which perform complex updates on a SQLite table, i.e. deleting/renaming/modifying columns or * modifying an FTS table which does not have an external source of content. * * <p>As the SQLite ALTER TABLE command does not support these operations, the way to perform them is to create a new table with the desired * new format and then transfer to content from te original table to the new one. More information ca be found in the SQLite documentation * (https://www.sqlite.org/lang_altertable.html).</p> * * @param entityUpdate the object which describes the changes which need to be executed */ @NotNull private static List<String> getComplexTableUpdate(@NotNull EntityUpdate entityUpdate) { List<String> updateStatements = new ArrayList<>(); String dataSource = getDataSourceForComplexUpdate(entityUpdate); Map<String, String> columnNameToColumnValue = getColumnNameToColumnValueMapping(entityUpdate); if (columnNameToColumnValue.isEmpty()) { updateStatements.add(getDropTableStatement(entityUpdate.getOldTableName())); updateStatements.add(getCreateTableStatement(entityUpdate.getNewTableName(), entityUpdate.getNewState())); } else { String oldTableName = !entityUpdate.shouldRenameTable() ? entityUpdate.getNewTableName() : entityUpdate.getOldTableName(); String newTableName = !entityUpdate.shouldRenameTable() ? String.format(TMP_TABLE_NAME_TEMPLATE, oldTableName) : entityUpdate.getNewTableName(); updateStatements.add(getCreateTableStatement(newTableName, entityUpdate.getNewState())); updateStatements.add(getInsertIntoTableStatement(newTableName, new ArrayList<>(columnNameToColumnValue.keySet()), getSelectFromTableStatement( dataSource, new ArrayList<>(columnNameToColumnValue.values())))); updateStatements.add(getDropTableStatement(oldTableName)); if (!entityUpdate.shouldRenameTable()) { updateStatements.add(getRenameTableStatement(newTableName, oldTableName)); } } return updateStatements; } /** * Returns a collection of statements which perform the update of an FTS table with external content. * * <p>Because we have an external source of data, we only need to drop the old table, create a new one and copy the data form the external * content table. More information can be found in the SQLite documentation (https://www.sqlite.org/fts3.html#summary).</p> */ private static List<String> getComplexUpdateForFtsTableWithExternalContent(@NotNull EntityUpdate entityUpdate) { List<String> updateStatements = new ArrayList<>(); String oldTableName = entityUpdate.getOldTableName(); String newTableName = entityUpdate.getNewTableName(); String dataSource = getDataSourceForComplexUpdate(entityUpdate); Map<String, String> columnNameToColumnValue = getColumnNameToColumnValueMapping(entityUpdate); updateStatements.add(getDropTableStatement(oldTableName)); updateStatements.add(getCreateTableStatement(newTableName, entityUpdate.getNewState())); updateStatements.add(getInsertIntoTableStatement( newTableName, new ArrayList<>(columnNameToColumnValue.keySet()), getSelectFromTableStatement(dataSource, new ArrayList<>(columnNameToColumnValue.values())))); return updateStatements; } /** * Returns the statement which performs the query. * * @param tableName the name of the table to select from * @param columnNames the names of the columns to be selected */ @NotNull private static String getSelectFromTableStatement(@NotNull String tableName, @NotNull List<String> columnNames) { tableName = getValidName(tableName); StringBuilder statement = new StringBuilder(String.format("SELECT %s\n", getColumnEnumeration(columnNames))); statement.append(String.format("\tFROM %s;", tableName)); return statement.toString(); } /** * Returns the statement which performs the insertion. * * @param tableName the name of the table to insert into * @param columnNames the names of the columns to insert values into * @param values the values to be inserted (could be either the result of another SQL statement or just an enumeration of values) */ @NotNull private static String getInsertIntoTableStatement(@NotNull String tableName, @NotNull List<String> columnNames, @NotNull String values) { tableName = getValidName(tableName); StringBuilder statement = new StringBuilder(String.format("INSERT INTO %s (%s)\n\t", tableName, getColumnEnumeration(columnNames))); statement.append(values); if (!statement.toString().endsWith(";")) { statement.append(";"); } return statement.toString(); } /** * Returns a statement for which performs the renaming. * * @param oldName the original name of the table to be renamed * @param newName the new name of the table */ @NotNull private static String getRenameTableStatement(@NotNull String oldName, @NotNull String newName) { return String.format("ALTER TABLE %s RENAME TO %s;", getValidName(oldName), getValidName(newName)); } /** * Returns a statement which creates the table. * * @param tableName the name of the new table * @param entity the EntityBundleObject which describes the table to be created and contains a template for the creation statement */ @NotNull private static String getCreateTableStatement(@NotNull String tableName, @NotNull EntityBundle entity) { // If the entity describes an FTS table, we use the create statement stored inside the entity as the information from the FtsOptionBundle // class is not accessible from this scope. if (entity instanceof FtsEntityBundle) { return getCreateTableStatementFromEntityBundle(tableName, entity); } tableName = getValidName(tableName); StringBuilder statement = new StringBuilder(String.format("CREATE TABLE %s\n(\n", tableName)); for (FieldBundle field : entity.getFields()) { statement.append(String.format("\t%s,\n", getColumnDescription(field))); if (shouldAddAutoIncrementToColumn(field, entity.getPrimaryKey())) { statement.replace(statement.length() - 2, statement.length(), ""); statement.append(" PRIMARY KEY AUTOINCREMENT,\n"); } } if (!entity.getPrimaryKey().isAutoGenerate()) { statement.append(String.format("\t%s,\n", getPrimaryKeyConstraint(entity.getPrimaryKey()))); } if (entity.getForeignKeys() != null) { for (ForeignKeyBundle foreignKey : entity.getForeignKeys()) { statement.append(String.format("\t%s,\n", getForeignKeyConstraint(foreignKey))); } } statement.replace(statement.length() - 2, statement.length() - 1, ""); statement.append(");"); return statement.toString(); } /** * Returns a statement which creates the table. * @param tableName the name of the table * @param entity the EntityBundleObject which describes the table to be created and contains a template for the creation statement */ @NotNull private static String getCreateTableStatementFromEntityBundle(@NotNull String tableName, @NotNull EntityBundle entity) { String statement = entity.getCreateSql().replace(BundleUtil.TABLE_NAME_PLACEHOLDER, tableName); if (!statement.trim().endsWith(";")) { statement += ";"; } return statement; } /** * Returns a statement which destroys the table. * * @param tableName the name of the table to be deleted */ @NotNull private static String getDropTableStatement(@NotNull String tableName) { tableName = getValidName(tableName); return String.format("DROP TABLE %s;", tableName); } /** * Returns a statement which adds the column to the table. * * @param tableName the name of the table to be modified * @param field the FieldBundle which describes the column to be added */ @NotNull private static String getAddColumnStatement(@NotNull String tableName, @NotNull FieldBundle field) { tableName = getValidName(tableName); return String.format("ALTER TABLE %s ADD COLUMN %s;", tableName, getColumnDescription(field)); } /** * Returns a statement which renames a column of a table. * * @param tableName the name of the table * @param oldColumnName the name of the column to be renamed * @param newColumnName the new name of the column to be renamed */ private static String getRenameColumnStatement(@NotNull String tableName, @NotNull String oldColumnName, @NotNull String newColumnName) { tableName = getValidName(tableName); oldColumnName = getValidName(oldColumnName); newColumnName = getValidName(newColumnName); return String.format("ALTER TABLE %s RENAME COLUMN %s TO %s;", tableName, oldColumnName, newColumnName); } /** * Returns a String containing the full description of the column. * * @param field the FieldBundle which describes the column */ @NotNull private static String getColumnDescription(@NotNull FieldBundle field) { StringBuilder fieldDescription = new StringBuilder(String.format("%s %s", getValidName(field.getColumnName()), field.getAffinity())); if (field.getDefaultValue() != null) { fieldDescription.append(String.format(" DEFAULT %s", toSqlStringLiteral(field.getDefaultValue()))); } if (field.isNonNull()) { fieldDescription.append(" NOT NULL"); } return fieldDescription.toString(); } @NotNull private static String getPrimaryKeyConstraint(@NotNull PrimaryKeyBundle primaryKey) { return String.format("PRIMARY KEY (%s)", getColumnEnumeration(primaryKey.getColumnNames())); } @NotNull private static String getForeignKeyConstraint(@NotNull ForeignKeyBundle foreignKey) { String onUpdate = foreignKey.getOnUpdate() != null && !foreignKey.getOnUpdate().isEmpty() ? String.format(" ON UPDATE %s", foreignKey.getOnUpdate()) : ""; String onDelete = foreignKey.getOnDelete() != null && !foreignKey.getOnDelete().isEmpty() ? String.format(" ON DELETE %s", foreignKey.getOnDelete()) : ""; return String.format( "FOREIGN KEY (%s) REFERENCES %s (%s)%s%s", getColumnEnumeration(foreignKey.getColumns()), getValidName(foreignKey.getTable()), getColumnEnumeration(foreignKey.getReferencedColumns()), onUpdate, onDelete); } private static String getForeignKeyConstraintCheck(@NotNull String tableName) { return String.format("PRAGMA foreign_key_check(%s);", getValidName(tableName)); } @NotNull private static String getCreateIndexStatement(@NotNull IndexBundle index, @NotNull String tableName) { tableName = getValidName(tableName); StringBuilder statement = new StringBuilder("CREATE "); if (index.isUnique()) { statement.append("UNIQUE "); } statement.append( String.format("INDEX %s ON %s (%s);", getValidName(index.getName()), tableName, getColumnEnumeration(index.getColumnNames()))); return statement.toString(); } @NotNull private static String getDropIndexStatement(@NotNull IndexBundle index) { return String.format("DROP INDEX %s;", getValidName(index.getName())); } @NotNull private static String getUpdateColumnsValuesStatement(@NotNull String tableName, @NotNull Map<FieldBundle, String> newFieldsValues) { tableName = getValidName(tableName); StringBuilder statement = new StringBuilder(String.format("UPDATE %s\nSET", tableName)); String valueAssignments = newFieldsValues.keySet().stream() .map(fieldBundle -> String.format("\t%s = %s", fieldBundle.getColumnName(), toSqlStringLiteral(newFieldsValues.get(fieldBundle)))) .collect(Collectors.joining(",\n")); statement.append(valueAssignments); statement.append(";"); return statement.toString(); } @NotNull private static String getCreateViewStatement(@NotNull DatabaseViewBundle view) { String statement = view.getCreateSql().replace(BundleUtil.VIEW_NAME_PLACEHOLDER, getValidName(view.getViewName())); if (!statement.trim().endsWith(";")) { statement += ";"; } return statement; } @NotNull private static String getDropViewStatement(@NotNull DatabaseViewBundle view) { return String.format("DROP VIEW %s;", getValidName(view.getViewName())); } @NotNull private static String getColumnEnumeration(@NotNull List<String> columnNames) { return columnNames.stream().map(c -> getValidName(c)).collect(Collectors.joining(", ")); } /** * Formats a string into a valid SQLite string literal by quoting it with simple quotes and escaping any already existing quotes. */ @NotNull private static String toSqlStringLiteral(@NotNull String value) { if (value.contains("'")) { value = value.replaceAll("'", "''"); } return String.format("'%s'", value); } private static boolean shouldAddAutoIncrementToColumn(@NotNull FieldBundle field, @NotNull PrimaryKeyBundle primaryKey) { return primaryKey.isAutoGenerate() && field.getAffinity().toLowerCase(Locale.US).equals("integer") && (primaryKey.getColumnNames().size() == 1 && primaryKey.getColumnNames().get(0).equals(field.getColumnName())); } /** * Returns the name of the table to copy data from in case of an complex update. * In case of an FTS table with external content, it always returns the name of the external content table. * In case of a renamed table (which is not an FTS table with external content), it will return the old name of the table. * Otherwise, it returns the name of the table to be updated. */ @NotNull private static String getDataSourceForComplexUpdate(@NotNull EntityUpdate entityUpdate) { EntityBundle newState = entityUpdate.getNewState(); if (entityUpdate.shouldCreateAnFtsEntity() && ftsTableNeedsExternalContentSource((FtsEntityBundle)newState)) { return ((FtsEntityBundle)newState).getFtsOptions().getContentTable(); } if (entityUpdate.shouldRenameTable()) { return entityUpdate.getOldTableName(); } return entityUpdate.getNewTableName(); } /** * Returns a value to initialize the given column with. * * <p>First we look for a user defined value. If there is none, we check whether the column is already present in the * table and therefore already has values to be populated with.</p> */ @Nullable private static String getValueForField(@NotNull FieldBundle field, @NotNull EntityUpdate entityUpdate) { // Check for user specified value String userSpecifiedValue = entityUpdate.getValuesForUninitializedFields().get(field); if (userSpecifiedValue != null) { return toSqlStringLiteral(userSpecifiedValue); } // If the table to be updated is an FTS table and has an external content source, we take the values for the current // column from its correspondent column in the external content table if (entityUpdate.shouldCreateAnFtsEntity()) { FtsOptionsBundle ftsOptions = ((FtsEntityBundle)entityUpdate.getNewState()).getFtsOptions(); if (ftsOptions != null && (ftsOptions.getContentTable() != null && !ftsOptions.getContentTable().isEmpty())) { return getValidName(field.getColumnName()); } } // If the column was renamed, we use the values under the old name String oldColumnName = entityUpdate.getRenamedFields().get(field); if (oldColumnName != null) { return getValidName(oldColumnName); } // If the column is present in the table as is, we use the values under its name String columnName = field.getColumnName(); if (entityUpdate.getUnmodifiedFields().get(columnName) != null || entityUpdate.getModifiedFields().get(columnName) != null) { return getValidName(columnName); } return null; } /** * Provides a correlation between the name of the columns from newly created table and the values they should be initialized with. */ @NotNull private static Map<String, String> getColumnNameToColumnValueMapping(@NotNull EntityUpdate entityUpdate) { Map<String, String> columnNameToColumnValue = new LinkedHashMap<>(); for (FieldBundle field : entityUpdate.getAllFields()) { String value = getValueForField(field, entityUpdate); if (value == null) { if (columnNeedsUserSpecifiedValue(field)) { throw new InvalidDataException("NOT NULL column without default value or user specified value."); } } else { columnNameToColumnValue.put(getValidName(field.getColumnName()), value); } } return columnNameToColumnValue; } private static boolean columnNeedsUserSpecifiedValue(@NotNull FieldBundle field) { return field.isNonNull() && (field.getDefaultValue() == null || field.getDefaultValue().isEmpty()); } }
7,631
686
/* * (C) Copyright IBM Corp. 2021. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. */ package com.ibm.watson.assistant.v1.model; import static org.testng.Assert.*; import com.ibm.cloud.sdk.core.service.model.FileWithMetadata; import com.ibm.watson.assistant.v1.utils.TestUtilities; import java.io.InputStream; import java.util.HashMap; import java.util.List; import org.testng.annotations.Test; /** Unit test class for the DialogNodeOutputGenericDialogNodeOutputResponseTypeUserDefined model. */ public class DialogNodeOutputGenericDialogNodeOutputResponseTypeUserDefinedTest { final HashMap<String, InputStream> mockStreamMap = TestUtilities.createMockStreamMap(); final List<FileWithMetadata> mockListFileWithMetadata = TestUtilities.creatMockListFileWithMetadata(); @Test public void testDialogNodeOutputGenericDialogNodeOutputResponseTypeUserDefined() throws Throwable { ResponseGenericChannel responseGenericChannelModel = new ResponseGenericChannel.Builder().channel("chat").build(); assertEquals(responseGenericChannelModel.channel(), "chat"); DialogNodeOutputGenericDialogNodeOutputResponseTypeUserDefined dialogNodeOutputGenericDialogNodeOutputResponseTypeUserDefinedModel = new DialogNodeOutputGenericDialogNodeOutputResponseTypeUserDefined.Builder() .responseType("user_defined") .userDefined( new java.util.HashMap<String, Object>() { { put("foo", "testString"); } }) .channels( new java.util.ArrayList<ResponseGenericChannel>( java.util.Arrays.asList(responseGenericChannelModel))) .build(); assertEquals( dialogNodeOutputGenericDialogNodeOutputResponseTypeUserDefinedModel.responseType(), "user_defined"); assertEquals( dialogNodeOutputGenericDialogNodeOutputResponseTypeUserDefinedModel.userDefined(), new java.util.HashMap<String, Object>() { { put("foo", "testString"); } }); assertEquals( dialogNodeOutputGenericDialogNodeOutputResponseTypeUserDefinedModel.channels(), new java.util.ArrayList<ResponseGenericChannel>( java.util.Arrays.asList(responseGenericChannelModel))); String json = TestUtilities.serialize( dialogNodeOutputGenericDialogNodeOutputResponseTypeUserDefinedModel); DialogNodeOutputGenericDialogNodeOutputResponseTypeUserDefined dialogNodeOutputGenericDialogNodeOutputResponseTypeUserDefinedModelNew = TestUtilities.deserialize( json, DialogNodeOutputGenericDialogNodeOutputResponseTypeUserDefined.class); assertTrue( dialogNodeOutputGenericDialogNodeOutputResponseTypeUserDefinedModelNew instanceof DialogNodeOutputGenericDialogNodeOutputResponseTypeUserDefined); assertEquals( dialogNodeOutputGenericDialogNodeOutputResponseTypeUserDefinedModelNew.responseType(), "user_defined"); } @Test(expectedExceptions = IllegalArgumentException.class) public void testDialogNodeOutputGenericDialogNodeOutputResponseTypeUserDefinedError() throws Throwable { new DialogNodeOutputGenericDialogNodeOutputResponseTypeUserDefined.Builder().build(); } }
1,361
808
#include <ntddk.h> // This implementation is derived from Hvpp by <NAME> // - https://github.com/wbenny/hvpp // Based on my benchmarks, this simple implementation beats other (often // more complex) spinlock implementations - such as queue spinlocks, ticket // spinlocks, MCS locks. The only difference between this implementation // and completely naive spinlock is the "backoff". // // Also, benefit of this implementation is that we can use it with // STL lock guards, e.g.: std::lock_guard. // // Look here for more information: // - https://locklessinc.com/articles/locks/ // - https://github.com/cyfdecyf/spinlock static unsigned max_wait = 65536; //---------------------------------------------------------------------------------------------- inline BOOLEAN SpinlockTryLock(volatile LONG* Lock) { return (!(*Lock) && !_interlockedbittestandset(Lock, 0)); } //---------------------------------------------------------------------------------------------- void SpinlockLock(volatile LONG* Lock) { unsigned wait = 1; while (!SpinlockTryLock(Lock)) { for (unsigned i = 0; i < wait; ++i) { _mm_pause(); } // Don't call "pause" too many times. If the wait becomes too big, // clamp it to the max_wait. if (wait * 2 > max_wait) { wait = max_wait; } else { wait = wait * 2; } } } //---------------------------------------------------------------------------------------------- void SpinlockUnlock(volatile LONG* Lock) { *Lock = 0; } //----------------------------------------------------------------------------------------------
512
852
#ifndef _CALOMISCALIBTOOLSMC_H #define _CALOMISCALIBTOOLSMC_H // -*- C++ -*- // // Package: CaloMiscalibToolsMC // Class: CaloMiscalibToolsMC // /**\class CaloMiscalibToolsMC CaloMiscalibToolsMC.cc CalibCalorimetry/CaloMiscalibToolsMC/src/CaloMiscalibToolsMC.cc Description: Definition of CaloMiscalibToolsMC Implementation: <Notes on implementation> */ // // Original Author: <NAME> // Created: Mon Jul 17 18:07:01 CEST 2006 // // Modified : <NAME> // Date: : 11/09/2006 // Reason : split class definition (.h) from source code (.cc) // system include files #include <memory> // user include files #include "FWCore/Framework/interface/SourceFactory.h" #include "FWCore/Framework/interface/Frameworkfwd.h" #include "FWCore/Framework/interface/ESProducer.h" #include "FWCore/Framework/interface/Event.h" #include "FWCore/Framework/interface/MakerMacros.h" #include "FWCore/ParameterSet/interface/ParameterSet.h" #include "FWCore/Framework/interface/EventSetupRecordIntervalFinder.h" #include "FWCore/Framework/interface/EventSetup.h" #include "CondFormats/EcalObjects/interface/EcalIntercalibConstantsMC.h" #include "CondFormats/DataRecord/interface/EcalIntercalibConstantsMCRcd.h" #include "CalibCalorimetry/CaloMiscalibTools/interface/CaloMiscalibMapEcal.h" // // class decleration // class CaloMiscalibToolsMC : public edm::ESProducer, public edm::EventSetupRecordIntervalFinder { public: CaloMiscalibToolsMC(const edm::ParameterSet &); ~CaloMiscalibToolsMC() override; typedef std::unique_ptr<EcalIntercalibConstantsMC> ReturnType; ReturnType produce(const EcalIntercalibConstantsMCRcd &); private: // ----------member data --------------------------- void setIntervalFor(const edm::eventsetup::EventSetupRecordKey &, const edm::IOVSyncValue &, edm::ValidityInterval &) override; std::string barrelfile_; std::string endcapfile_; std::string barrelfileinpath_; std::string endcapfileinpath_; }; #endif
764
3,372
<reponame>MC-JY/aws-sdk-java /* * Copyright 2016-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with * the License. A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions * and limitations under the License. */ package com.amazonaws.services.route53recoveryreadiness.model; import java.io.Serializable; import javax.annotation.Generated; import com.amazonaws.protocol.StructuredPojo; import com.amazonaws.protocol.ProtocolMarshaller; /** * Result with status for an individual rule.. * * @see <a href="http://docs.aws.amazon.com/goto/WebAPI/route53-recovery-readiness-2019-12-02/RuleResult" * target="_top">AWS API Documentation</a> */ @Generated("com.amazonaws:aws-java-sdk-code-generator") public class RuleResult implements Serializable, Cloneable, StructuredPojo { /** The time the resource was last checked for readiness, in ISO-8601 format, UTC. */ private java.util.Date lastCheckedTimestamp; /** Details about the resource's readiness */ private java.util.List<Message> messages; /** The readiness at rule level. */ private String readiness; /** The identifier of the rule. */ private String ruleId; /** * The time the resource was last checked for readiness, in ISO-8601 format, UTC. * * @param lastCheckedTimestamp * The time the resource was last checked for readiness, in ISO-8601 format, UTC. */ public void setLastCheckedTimestamp(java.util.Date lastCheckedTimestamp) { this.lastCheckedTimestamp = lastCheckedTimestamp; } /** * The time the resource was last checked for readiness, in ISO-8601 format, UTC. * * @return The time the resource was last checked for readiness, in ISO-8601 format, UTC. */ public java.util.Date getLastCheckedTimestamp() { return this.lastCheckedTimestamp; } /** * The time the resource was last checked for readiness, in ISO-8601 format, UTC. * * @param lastCheckedTimestamp * The time the resource was last checked for readiness, in ISO-8601 format, UTC. * @return Returns a reference to this object so that method calls can be chained together. */ public RuleResult withLastCheckedTimestamp(java.util.Date lastCheckedTimestamp) { setLastCheckedTimestamp(lastCheckedTimestamp); return this; } /** * Details about the resource's readiness * * @return Details about the resource's readiness */ public java.util.List<Message> getMessages() { return messages; } /** * Details about the resource's readiness * * @param messages * Details about the resource's readiness */ public void setMessages(java.util.Collection<Message> messages) { if (messages == null) { this.messages = null; return; } this.messages = new java.util.ArrayList<Message>(messages); } /** * Details about the resource's readiness * <p> * <b>NOTE:</b> This method appends the values to the existing list (if any). Use * {@link #setMessages(java.util.Collection)} or {@link #withMessages(java.util.Collection)} if you want to override * the existing values. * </p> * * @param messages * Details about the resource's readiness * @return Returns a reference to this object so that method calls can be chained together. */ public RuleResult withMessages(Message... messages) { if (this.messages == null) { setMessages(new java.util.ArrayList<Message>(messages.length)); } for (Message ele : messages) { this.messages.add(ele); } return this; } /** * Details about the resource's readiness * * @param messages * Details about the resource's readiness * @return Returns a reference to this object so that method calls can be chained together. */ public RuleResult withMessages(java.util.Collection<Message> messages) { setMessages(messages); return this; } /** * The readiness at rule level. * * @param readiness * The readiness at rule level. * @see Readiness */ public void setReadiness(String readiness) { this.readiness = readiness; } /** * The readiness at rule level. * * @return The readiness at rule level. * @see Readiness */ public String getReadiness() { return this.readiness; } /** * The readiness at rule level. * * @param readiness * The readiness at rule level. * @return Returns a reference to this object so that method calls can be chained together. * @see Readiness */ public RuleResult withReadiness(String readiness) { setReadiness(readiness); return this; } /** * The readiness at rule level. * * @param readiness * The readiness at rule level. * @return Returns a reference to this object so that method calls can be chained together. * @see Readiness */ public RuleResult withReadiness(Readiness readiness) { this.readiness = readiness.toString(); return this; } /** * The identifier of the rule. * * @param ruleId * The identifier of the rule. */ public void setRuleId(String ruleId) { this.ruleId = ruleId; } /** * The identifier of the rule. * * @return The identifier of the rule. */ public String getRuleId() { return this.ruleId; } /** * The identifier of the rule. * * @param ruleId * The identifier of the rule. * @return Returns a reference to this object so that method calls can be chained together. */ public RuleResult withRuleId(String ruleId) { setRuleId(ruleId); return this; } /** * Returns a string representation of this object. This is useful for testing and debugging. Sensitive data will be * redacted from this string using a placeholder value. * * @return A string representation of this object. * * @see java.lang.Object#toString() */ @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append("{"); if (getLastCheckedTimestamp() != null) sb.append("LastCheckedTimestamp: ").append(getLastCheckedTimestamp()).append(","); if (getMessages() != null) sb.append("Messages: ").append(getMessages()).append(","); if (getReadiness() != null) sb.append("Readiness: ").append(getReadiness()).append(","); if (getRuleId() != null) sb.append("RuleId: ").append(getRuleId()); sb.append("}"); return sb.toString(); } @Override public boolean equals(Object obj) { if (this == obj) return true; if (obj == null) return false; if (obj instanceof RuleResult == false) return false; RuleResult other = (RuleResult) obj; if (other.getLastCheckedTimestamp() == null ^ this.getLastCheckedTimestamp() == null) return false; if (other.getLastCheckedTimestamp() != null && other.getLastCheckedTimestamp().equals(this.getLastCheckedTimestamp()) == false) return false; if (other.getMessages() == null ^ this.getMessages() == null) return false; if (other.getMessages() != null && other.getMessages().equals(this.getMessages()) == false) return false; if (other.getReadiness() == null ^ this.getReadiness() == null) return false; if (other.getReadiness() != null && other.getReadiness().equals(this.getReadiness()) == false) return false; if (other.getRuleId() == null ^ this.getRuleId() == null) return false; if (other.getRuleId() != null && other.getRuleId().equals(this.getRuleId()) == false) return false; return true; } @Override public int hashCode() { final int prime = 31; int hashCode = 1; hashCode = prime * hashCode + ((getLastCheckedTimestamp() == null) ? 0 : getLastCheckedTimestamp().hashCode()); hashCode = prime * hashCode + ((getMessages() == null) ? 0 : getMessages().hashCode()); hashCode = prime * hashCode + ((getReadiness() == null) ? 0 : getReadiness().hashCode()); hashCode = prime * hashCode + ((getRuleId() == null) ? 0 : getRuleId().hashCode()); return hashCode; } @Override public RuleResult clone() { try { return (RuleResult) super.clone(); } catch (CloneNotSupportedException e) { throw new IllegalStateException("Got a CloneNotSupportedException from Object.clone() " + "even though we're Cloneable!", e); } } @com.amazonaws.annotation.SdkInternalApi @Override public void marshall(ProtocolMarshaller protocolMarshaller) { com.amazonaws.services.route53recoveryreadiness.model.transform.RuleResultMarshaller.getInstance().marshall(this, protocolMarshaller); } }
3,691
3,041
<gh_stars>1000+ import pytest from ethpm import ( Package, ) from ethpm.dependencies import ( Dependencies, ) from ethpm.exceptions import ( EthPMValidationError, FailureToFetchIPFSAssetsError, ) @pytest.fixture def piper_coin_pkg(piper_coin_manifest, w3): return Package(piper_coin_manifest, w3) def test_get_build_dependencies(dummy_ipfs_backend, piper_coin_pkg, w3): build_deps = piper_coin_pkg.build_dependencies assert isinstance(build_deps, Dependencies) def test_get_build_dependencies_with_invalid_uris( dummy_ipfs_backend, piper_coin_pkg, w3 ): piper_coin_pkg.manifest["buildDependencies"]["standard-token"] = "invalid_ipfs_uri" with pytest.raises(FailureToFetchIPFSAssetsError): piper_coin_pkg.build_dependencies def test_get_build_dependencies_without_dependencies_raises_exception( piper_coin_manifest, w3 ): piper_coin_manifest.pop("buildDependencies", None) pkg = Package(piper_coin_manifest, w3) with pytest.raises(EthPMValidationError, match="Manifest doesn't have any build dependencies"): pkg.build_dependencies def test_get_build_dependencies_with_empty_dependencies_raises_exception( dummy_ipfs_backend, piper_coin_manifest, w3 ): piper_coin_manifest["buildDependencies"] = {} pkg = Package(piper_coin_manifest, w3) with pytest.raises(EthPMValidationError, match="Manifest's build dependencies key is empty"): pkg.build_dependencies
569
1,909
<filename>xchange-okcoin/src/test/java/org/knowm/xchange/okcoin/service/MetaDataFileTest.java package org.knowm.xchange.okcoin.service; import java.io.BufferedReader; import java.io.InputStream; import java.io.InputStreamReader; import java.util.stream.Collectors; import org.junit.Test; import org.knowm.xchange.BaseExchange; import org.knowm.xchange.Exchange; import org.knowm.xchange.ExchangeFactory; import org.knowm.xchange.ExchangeSpecification; import org.knowm.xchange.okcoin.OkCoinExchange; import org.knowm.xchange.service.BaseExchangeService; import org.knowm.xchange.utils.Assert; public class MetaDataFileTest { @Test public void metaDataFileNameTest() { ExchangeSpecification exSpec = new ExchangeSpecification(OkCoinExchange.class); Exchange exchange = ExchangeFactory.INSTANCE.createExchange(exSpec); String metaDataFileName = ((BaseExchange) exchange).getMetaDataFileName(exSpec); Assert.isTrue( "okcoin_china".equals(metaDataFileName), "the meta data file name not equal \"okcoin_china\" ???"); System.out.println("metaDataFileName=" + metaDataFileName); exSpec.setExchangeSpecificParametersItem("Use_Intl", true); metaDataFileName = ((BaseExchange) exchange).getMetaDataFileName(exSpec); Assert.isTrue( "okcoin_intl".equals(metaDataFileName), "the meta data file name not equal \"okcoin_intl\" ???"); System.out.println("metaDataFileName=" + metaDataFileName); exSpec.setExchangeSpecificParametersItem("Use_Intl", true); exSpec.setExchangeSpecificParametersItem("Use_Futures", true); metaDataFileName = ((BaseExchange) exchange).getMetaDataFileName(exSpec); Assert.isTrue( "okcoin_futures".equals(metaDataFileName), "the meta data file name not equal \"okcoin_futures\" ???"); System.out.println("metaDataFileName=" + metaDataFileName); } @Test public void loadMetaDataFileNameForChinaTest() { ExchangeSpecification exSpec = new ExchangeSpecification(OkCoinExchange.class); Exchange exchange = ExchangeFactory.INSTANCE.createExchange(exSpec); String metaDataFileName = ((BaseExchange) exchange).getMetaDataFileName(exSpec); loadMetaDataFileContents(metaDataFileName); exSpec.setExchangeSpecificParametersItem("Use_Intl", true); metaDataFileName = ((BaseExchange) exchange).getMetaDataFileName(exSpec); loadMetaDataFileContents(metaDataFileName); exSpec.setExchangeSpecificParametersItem("Use_Intl", true); exSpec.setExchangeSpecificParametersItem("Use_Futures", true); metaDataFileName = ((BaseExchange) exchange).getMetaDataFileName(exSpec); loadMetaDataFileContents(metaDataFileName); } private void loadMetaDataFileContents(String metaDataFileName) { InputStream inputStream = BaseExchangeService.class.getClassLoader().getResourceAsStream(metaDataFileName + ".json"); String strContents = new BufferedReader(new InputStreamReader(inputStream)) .lines() .collect(Collectors.joining("\n")); System.out.println(strContents); } }
1,049
1,056
<reponame>timfel/netbeans<gh_stars>1000+ /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.netbeans.modules.form.fakepeer; import java.awt.*; /** * * @author <NAME> */ public class FakePeerContainer extends Container { public FakePeerContainer() { super(); setFont(FakePeerSupport.getDefaultAWTFont()); } @Override public void addNotify() { FakePeerSupport.attachFakePeerRecursively(this); super.addNotify(); } @Override protected void addImpl(Component comp, Object constraints, int index) { FakePeerSupport.attachFakePeer(comp); super.addImpl(comp, constraints, index); } @Override public void update(Graphics g) { } @Override public void paint(Graphics g) { Dimension sz = getSize(); // Shape oldClip = g.getClip(); // g.setClip(0, 0, sz.width, sz.height); Color c = SystemColor.control; g.setColor(c); g.fillRect(0, 0, sz.width, sz.height); // g.setClip(oldClip); super.paint(g); paintFakePeersRecursively(g, this); } private static void paintFakePeersRecursively(Graphics g, Container container) { if (!container.isVisible()) return; Component components[] = FakePeerSupport.getComponents(container); int ncomponents = components.length; Rectangle clip = g.getClipBounds(); for (int i = 0; i < ncomponents; i++) { Component comp = components[i]; if (comp != null && FakePeerSupport.getPeer(comp) instanceof FakePeer && comp.isVisible()) { Rectangle cr = comp.getBounds(); if ((clip == null) || cr.intersects(clip)) { Graphics cg = g.create(cr.x, cr.y, cr.width, cr.height); cg.setFont(comp.getFont()); try { FakePeerSupport.getPeer(comp).paint(cg); } finally { cg.dispose(); } } } if (comp instanceof Container) { Rectangle cr = comp.getBounds(); if ((clip == null) || cr.intersects(clip)) { Graphics cg = g.create(cr.x, cr.y, cr.width, cr.height); paintFakePeersRecursively(cg,(Container) comp); cg.dispose(); } } } } }
1,435
988
<reponame>JoachimRohde/netbeans /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.openide.nodes; import java.lang.ref.Reference; import java.lang.ref.WeakReference; import java.util.List; import javax.swing.Action; import org.openide.util.NbBundle; /** * Factory used to create <a href="Children.html">Children</a> * objects. Children objects supply child Nodes for a * Node. Usage is to write a class that extends ChildFactory and * pass that to Children.create(). When the Node is expanded or its * children are programmatically requested, the * <a href="#createKeys(java.util.List)">createKeys(List &lt;T&gt;)</a> method * will be invoked to create the List of objects to be modelled as Nodes. * Later, on demand, each object from the List will be passed in turn to * <a href="#createNodesForKey(java.lang.Object)">createNodesForKey(T)</a>, * which may return an array of zero or more Nodes for the object. * <p> * A ChildFactory can be used either to create typical Children object, or * one which will be initialized on a background thread (providing * a "Please Wait" Node in the meantime). It can be used most simple cases * that Children.Keys has been historically used for, and makes it easy to * change a Children object to compute its keys asynchronously if that is * needed for performance reasons. * <p> * Only one ChildFactory object may be used per Children object; if you wish * to have multiple Nodes modelling children produced by a single * ChildFactory, use @link FilterNode to wrap the Node that owns the * Children object for this ChildFactory. * <p> * To use, simply override * <a href="#createKeys(java.util.List)">createKeys(List &lt;T&gt;) and * <a href="#createNodesForKey(java.lang.Object)">createNodesForKey(T)</a> or * <a href="#createNodeForKey(java.lang.Object)">createNodeForKey(T)</a>. * * @param T The type of objects in the keys collection * @author <NAME> * @see Children#create(ChildFactory, boolean) * @since org.openide.nodes 7.1 */ public abstract class ChildFactory <T> { /** * Create a Node for a given key that was put into the list passed into * createKeys(). Either override this method if there will always be * 0 or 1 nodes per key, or createNodesForKey() if there may be more * than one. * * The default implementation throws an AssertionError. If you override * createNodesForKey() and do not call super, then you do not need to * override this method; but at least one of the two must be overridden. * * @param key An object that was previously put into the list passed * to createKeys() * @return A node, or null if no node should be shown for this object. */ protected Node createNodeForKey(T key) { throw new AssertionError("Neither createNodeForKey() nor " + //NOI18N "createNodesForKey() overridden in " + getClass().getName()); //NOI18N } /** * Create Nodes for a given key object (one from the <code>List</code> * passed to createKeys(List <T>)). The default implementation simply * delegates to <code>createNodeForKey</code> and returns the result of * that call in an array of nodes. * <p> * Most Children objects have a 1:1 mapping between keys and nodes. For * convenience in that situation, simply override createNodeForKey(T). * * @param key An object from the list returned by * <code>asynchCreateKeys()</code> * @return null if no nodes, or zero or more Nodes to represent this key */ protected Node[] createNodesForKey(T key) { Node n = createNodeForKey(key); return n == null ? null : new Node[] { n }; } /** * Create a list of keys which can be individually passed to * createNodes() to create child Nodes. Implementations of * this method should regularly check Thread.interrupted(), and * if it returns true (meaning the parent Node was collapsed or * destroyed), stop creating keys immediately and return * true. This method is guaranteed <i>not</i> to be called on the * AWT event thread if this ChildFactory was passed to * Children.create() with the <code>asynchronous</code> parameter * set to true. If not, then no guarantees are made as to what * the calling thread is. * <p>Returning false is tricky since there is no way to tell whether * the loop has been restarted except by examining what is already in * the list. As of 7.27 it is generally unnecessary since calls to * {@link List#add(Object)} will immediately display the new element * as well as checking for interruption. * @param toPopulate A list to add key objects to * @return true if the list of keys has been completely populated, * false if the list has only been partially populated and * this method should be called again to batch more keys */ protected abstract boolean createKeys(List <T> toPopulate); /** * Call this method when the list of objects being modelled by the * has changed and the child Nodes of this Node should be updated. The * boolean argument is a <i>hint</i> to the refresh mechanism (which * will cause createKeys() to be invoked again) that it is safe to * synchronously recreate. * * @param immediate If true, the refresh should occur in the calling * thread (be careful not to be holding any locks that might * deadlock with your key/child creation methods if you pass true). * Note that this parameter is only meaningful when using an * asynchronous children instance (i.e. true was passed as the * second parameter to <code>Children.create()</code>). If the * Children object for this ChildFactory is called with <code>immediate</code> * true on the AWT event dispatch thread, and it is an asynchronous * Children object, this parameter will be ignored and computation * will be scheduled on a background thread. */ protected final void refresh(boolean immediate) { Observer obs = observer == null ? null : observer.get(); if (obs != null) { obs.refresh(immediate); } } Node getWaitNode() { Node n = createWaitNode(); return n == null ? null : new WaitFilterNode(n); } /** * Create the Node that should be shown while the keys are being computed * on a background thread. * This method will not be called if this ChildFactory is used for a * synchronous children which does not compute its keys on a background * thread. Whether an instance is synchronous or not is determined by a * parameter to * <a href="Children.html#create(ChildFactory, boolean)">Children.create()</a>. * <p> * To show no node at all when the Children object is initially expanded in * the UI, simply return null. * <p> * The default implementation returns a Node that shows an hourglass cursor * and the localized text &quot;Please Wait...&quot;. * * @return A Node, or null if no wait node should be shown. */ protected Node createWaitNode() { AbstractNode n = new AbstractNode(Children.LEAF) { public @Override Action[] getActions(boolean context) { return new Action[0]; } }; n.setIconBaseWithExtension("org/openide/nodes/wait.gif"); //NOI18N n.setDisplayName(NbBundle.getMessage(ChildFactory.class, "LBL_WAIT")); //NOI18N return n; } private Reference <Observer> observer = null; final void setObserver(Observer observer) { if (this.observer != null) { throw new IllegalStateException("Attempting to create two Children" + //NOI18N " objects for a single ChildFactory " + this + ". Use " + //NOI18N "FilterNode.Children over the existing Children object " + //NOI18N "instead"); //NOI18N } this.observer = new WeakReference <Observer> (observer); } void removeNotify() { //do nothing } void addNotify() { //do nothing } void destroyNodes(Node[] arr) { //do nothing } interface Observer { public void refresh(boolean immediate); } static boolean isWaitNode(Object n) { return n instanceof WaitFilterNode; } /** * This class exists to uniquify/mark any Node returned by createWaitNode * such that AsynchChildren can identify it absolutely as not being an * object that should be passed to createNodeForKey(). */ private static final class WaitFilterNode extends FilterNode { public WaitFilterNode(Node orig) { super(orig); } } /** * Subclass of ChildFactory with lifecycle methods which will be called * on first use and last use. * * @param <T> The key type for this child factory * @since org.openide.nodes 7.7 */ public abstract static class Detachable<T> extends ChildFactory<T>{ /** * Called immediately before the first call to createKeys(). Override * to set up listening for changes, allocating expensive-to-create * resources, etc. */ @Override protected void addNotify() { //do nothing } /** * Called when this child factory is no longer in memory. * Does nothing by default; override if you need notification when not in use anymore. * * Note that this is usually not the best place for unregistering listeners, etc., * as listeners usually keep the child factory in memory, preventing it from being collected, thus preventing * this method to be called in the first place. */ @Override protected void removeNotify() { //do nothing } } }
3,647
521
<gh_stars>100-1000 #include <elle/nbd/Server.hh> #include <chrono> #include <fstream> #include <boost/endian/conversion.hpp> #include <boost/range/algorithm/find_if.hpp> #include <elle/bitfield.hh> #include <elle/enum.hh> #include <elle/log.hh> #include <elle/reactor/Scope.hh> #include <elle/reactor/network/Error.hh> #include <elle/reactor/network/TCPSocket.hh> #include <elle/reactor/scheduler.hh> #include <elle/utility/Move.hh> #include <elle/Exit.hh> ELLE_LOG_COMPONENT("elle.nbd.Server"); using boost::endian::endian_reverse; using namespace std::chrono_literals; namespace elle { namespace nbd { ELLE_ENUM( Info, (export_, 0), (name, 1), (description, 2), (block_size, 3)); ELLE_ENUM( Option, // Client wants to select a named export (is followed by name of export). (export_name, 1), // Client wishes to abort negotiation. (abort, 2), // Client request list of supported exports (not followed by data). (list, 3), // Client wishes to initiate TLS. (starttls, 5), // Client wants information about the given export. (info, 6), // Client wants to select the given and move to the transmission phase. (go, 7), // Client wishes to use structured replies during the transmission phase. (structured_replies, 8)); // Replies the server can send during negotiation ELLE_ENUM( Response, // ACK a request. Data: option number to be acked (ack, 1), // Reply to NBD_OPT_LIST (one of these per server; must be followed by // NBD_REP_ACK to signal the end of the list (server, 2), // Reply to NBD_OPT_INFO (info, 3), // If the high bit is set, the reply is an error (flag_error, (1 << 31)), // Client requested an option not understood by this version of the server (err_unsup, (1 | flag_error)), // Client requested an option not allowed by server configuration. (e.g., // the option was disabled) (err_policy, (2 | flag_error)), // Client issued an invalid request (err_invalid, (3 | flag_error)), // Option not supported on this platform (err_platform, (4 | flag_error)), // TLS required (err_tls_reqd, (5 | flag_error)), // NBD_OPT_INFO or ..._GO requested on unknown export (err_unknown, (6 | flag_error)), // Server is not willing to serve the export without the block size being // negotiated (err_block_size_reqd, (8 | flag_error))); ELLE_ENUM( HandshakeFlag, // new-style export that actually supports extending. (fixed_newstyle, 1 << 0), // we won't send the 128 bits of zeroes if the client sends // NBD_FLAG_C_NO_ZEROES. (no_zeroes, 1 << 1)); ELLE_ENUM( Command, (read, 0), (write, 1), (disc, 2), (flush, 3), (trim, 4), (cache, 5), (write_zeroes, 6), (block_status, 7), (resize, 8)); ELLE_ENUM( TransmissionFlag, (has_flags, 1 << 0), (read_only, 1 << 1), (send_flush, 1 << 2), (send_fua, 1 << 3), (rotational, 1 << 4), (send_trim, 1 << 5), (send_write_zeroes, 1 << 6), (send_df, 1 << 7), (can_multi_conn, 1 << 8), (send_resize, 1 << 9), (send_cache, 1 << 10)); ELLE_ENUM( Error, (none, 0), (perm, 1), (io, 5), (nomem, 12), (inval, 22), (nospc, 28), (overflow, 75), (shutdown, 108)); } } ELLE_BITFIELD(elle::nbd::HandshakeFlag); ELLE_BITFIELD(elle::nbd::TransmissionFlag); namespace elle { namespace nbd { static uint16_t read_16(reactor::network::Socket& sock) { return endian_reverse( *reinterpret_cast<uint16_t*>(sock.read(2).contents())); } static uint32_t read_32(reactor::network::Socket& sock) { return endian_reverse( *reinterpret_cast<uint32_t*>(sock.read(4).contents())); } static uint64_t read_64(reactor::network::Socket& sock) { return endian_reverse( *reinterpret_cast<uint64_t*>(sock.read(8).contents())); } Server::Server(boost::asio::ip::address host, int port) : _host(host) , _port(port) , _server() {} void Server::run() { this->_server.listen(this->_host, this->_port); this->_listening(this->_server.port()); elle::With<elle::reactor::Scope>() << [&] (elle::reactor::Scope& scope) { while (true) { auto sock = std::make_shared(std::move(*this->_server.accept())); scope.run_background( elle::print("{}", sock->peer()), [&, sock] { try { ELLE_TRACE_SCOPE("handle connection from {}", sock->peer()); bool zeroes = true; ELLE_DEBUG("initiate handshake") { sock->write("NBDMAGIC"); sock->write("IHAVEOPT"); sock->write(endian_reverse(uint16_t(HandshakeFlag::fixed_newstyle | HandshakeFlag::no_zeroes))); { auto flags = HandshakeFlag(read_32(*sock)); auto const consume = [&] (HandshakeFlag flag) { auto res = bool(flags & flag); flags = flags & ~flag; return res; }; if (consume(HandshakeFlag::fixed_newstyle)) ELLE_DEBUG("client supports fixed newstyle handshake"); else ELLE_TRACE( "client does not support fixed newstyle handshake"); if (consume(HandshakeFlag::no_zeroes)) { zeroes = false; ELLE_DEBUG("client requires no zeroes"); } if (bool(flags)) { ELLE_WARN("rejecting client with unknown flags: %x", flags); return; } } } auto& device = this->_options_haggling(*sock, zeroes); while (true) { // Consume magic number { static constexpr uint8_t magic_expected[4] = {0x25, 0x60, 0x95, 0x13}; auto magic = sock->read(4); if (magic != elle::ConstWeakBuffer(magic_expected)) ELLE_WARN("invalid request magic: {}", magic); } auto flags = read_16(*sock); auto cmd = Command(read_16(*sock)); auto handle = *reinterpret_cast<uint64_t*>(sock->read(8).contents()); auto offset = read_64(*sock); auto length = read_32(*sock); auto const resp = [&] (Error error = Error::none) { static constexpr uint8_t magic[4] = {0x67, 0x44, 0x66, 0x98}; sock->write(magic); sock->write(uint32_t(error)); sock->write(handle); }; ELLE_DUMP_SCOPE("received command: {}({}, {}, {}, {})", cmd, flags, handle, offset, length); if (offset + length > device.size()) if (cmd == Command::write || cmd == Command::write_zeroes) { ELLE_TRACE("{} is out of bound", cmd); resp(Error::nospc); break; } else if (cmd == Command::read || cmd == Command::trim) { ELLE_TRACE("{} is out of bound", cmd); resp(Error::inval); break; } switch (cmd) { case Command::read: { ELLE_TRACE_SCOPE("read {} bytes at {}", length, offset); resp(); auto data = device.read(offset, length); sock->write(data); break; } case Command::write: case Command::write_zeroes: { ELLE_TRACE_SCOPE( "write {} {} at {}", length, cmd == Command::write ? "bytes" : "zeroes", offset); if (cmd == Command::write) // FIXME: use kernel level no-copy device.write(offset, sock->read(length)); else device.write_zeroes(offset, length); resp(); break; } case Command::disc: { ELLE_TRACE_SCOPE("client requests disconnect"); throw elle::Exit(); } case Command::flush: { ELLE_TRACE_SCOPE("flush"); device.sync(); resp(); break; } case Command::trim: { ELLE_TRACE_SCOPE("trim {} bytes at {}", length, offset); device.trim(offset, length); resp(); break; } case Command::cache: { ELLE_TRACE_SCOPE("cache {} bytes at {}", length, offset); device.cache(offset, length); resp(); break; } case Command::block_status: case Command::resize: { ELLE_TRACE_SCOPE("unsupported {} request", cmd); resp(Error::inval); break; } default: { ELLE_WARN("unrecognized client request: {}", int(cmd)); resp(Error::inval); } } } } catch (elle::Exit const&) { ELLE_TRACE("gracefully ending session"); } catch (elle::reactor::network::ConnectionClosed const&) { ELLE_TRACE("client closed connection"); } catch (elle::Error const& e) { ELLE_WARN("client error: {}", e); } }); } }; } Server::Device& Server::_options_haggling(reactor::network::Socket& sock, bool zeroes) { ELLE_DEBUG("initiate options haggling") while (true) { auto magic = sock.read(8); if (magic != elle::ConstWeakBuffer("IHAVEOPT")) elle::err("unknow magic during options haggling: {}", magic); auto option = Option(read_32(sock)); ELLE_DUMP_SCOPE("received option: {}", option); auto data = [&] { auto length = read_32(sock); ELLE_DUMP("option data length: {}", length); auto data = sock.read(length); if (length > 0) ELLE_DUMP("option data: {}", data); return data; }(); auto const resp_header = [&] (Option option, Response rep) { sock.write(endian_reverse(uint64_t(0x3e889045565a9))); sock.write(endian_reverse(uint32_t(option))); sock.write(endian_reverse(uint32_t(rep))); }; auto const resp = [&] (Option option, Response rep) { ELLE_DUMP_SCOPE("send response: {}", rep); resp_header(option, rep); sock.write(endian_reverse(uint32_t(0))); }; auto const resp_export = [&] (std::size_t size) { sock.write(endian_reverse(uint64_t(size))); sock.write(endian_reverse( uint16_t( TransmissionFlag::has_flags | TransmissionFlag::send_flush | TransmissionFlag::send_fua | TransmissionFlag::send_trim | TransmissionFlag::send_df | TransmissionFlag::send_cache))); }; switch (option) { case Option::export_name: { auto& device = [&] () -> Device& { if (data == elle::ConstWeakBuffer("") && !this->_devices.empty()) return this->_devices.front().get(); auto res = boost::range::find_if( this->_devices, [&] (Device const& d) { return data == d.name(); }); if (res != this->_devices.end()) return (*res).get(); else { auto& res = this->_device_not_found(data.string()); this->add(res); return res; } }(); ELLE_TRACE_SCOPE("export volume {}", device.name()); resp_export(device.size()); if (zeroes) { uint8_t zeroes[124]; memset(&zeroes, 0, sizeof(zeroes)); sock.write(zeroes); } return device; } case Option::abort: { ELLE_TRACE("abort session upon client request") resp(Option::abort, Response::ack); throw elle::Exit(); } case Option::list: { ELLE_TRACE_SCOPE("list exports"); for (auto const& d: this->_devices) { auto const& name = d.get().name(); resp_header(Option::list, Response::server); sock.write( endian_reverse(uint32_t(4 + name.size()))); sock.write( endian_reverse(uint32_t(name.size()))); sock.write(name); } resp(Option::list, Response::ack); break; } case Option::starttls: { resp(Option::abort, Response::err_unsup); break; } case Option::info: case Option::go: { // nbd-client doesn't use these, so I have no reliable // way of testing it for now. ELLE_ERR("OPT_INFO AND OPT_GO NOT IMPLEMENTED"); resp(option, Response::err_unsup); // resp_header(Option::info, Response::info); // sock.write(endian_reverse(uint32_t(12))); // sock.write(endian_reverse(uint16_t(Info::export_))); // resp_export(); // resp_header(Option::info, Response::info); // sock.write(endian_reverse(uint32_t(2 + path.size()))); // sock.write(endian_reverse(uint16_t(Info::name))); // sock.write(path.string()); // resp(Option::info, Response::ack); // if (option == Option::go) // return XXX; break; } case Option::structured_replies: { if (data.size() == 0) // nbd-client doesn't seem to negotiate structured // replies, so I have no reliable way of testing it // for now. resp(Option::structured_replies, Response::err_unsup); else resp(Option::structured_replies, Response::err_invalid); break; } default: { ELLE_WARN("unknown client option: {}", int(option)); resp(option, Response::err_unsup); break; } } } } Server::Device& Server::_device_not_found(std::string name) { elle::err("no such device: {}", name); } void Server::add(Device& d) { this->_devices.emplace_back(d); } Server::Device::Device(std::string name, std::size_t size) : _name(std::move(name)) , _size(size) {} void Server::Device::trim(uint64_t, uint32_t) {} void Server::Device::cache(uint64_t offset, uint32_t length) {} } }
10,279
650
<reponame>radish2012/flask-restful-example from datetime import datetime from app.models.model import User from app.utils.core import db def my_job(): print(datetime.now().strftime('%Y-%m-%d %H:%M:%S')) def db_query(): with db.app.app_context(): data = db.session.query(User).first() print(data)
133
416
<filename>src/main/java/com/tencentcloudapi/cwp/v20180228/models/AssetUserKeyInfo.java<gh_stars>100-1000 /* * Copyright (c) 2017-2018 THL A29 Limited, a Tencent company. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.tencentcloudapi.cwp.v20180228.models; import com.tencentcloudapi.common.AbstractModel; import com.google.gson.annotations.SerializedName; import com.google.gson.annotations.Expose; import java.util.HashMap; public class AssetUserKeyInfo extends AbstractModel{ /** * 公钥值 */ @SerializedName("Value") @Expose private String Value; /** * 公钥备注 */ @SerializedName("Comment") @Expose private String Comment; /** * 加密方式 */ @SerializedName("EncryptType") @Expose private String EncryptType; /** * Get 公钥值 * @return Value 公钥值 */ public String getValue() { return this.Value; } /** * Set 公钥值 * @param Value 公钥值 */ public void setValue(String Value) { this.Value = Value; } /** * Get 公钥备注 * @return Comment 公钥备注 */ public String getComment() { return this.Comment; } /** * Set 公钥备注 * @param Comment 公钥备注 */ public void setComment(String Comment) { this.Comment = Comment; } /** * Get 加密方式 * @return EncryptType 加密方式 */ public String getEncryptType() { return this.EncryptType; } /** * Set 加密方式 * @param EncryptType 加密方式 */ public void setEncryptType(String EncryptType) { this.EncryptType = EncryptType; } public AssetUserKeyInfo() { } /** * NOTE: Any ambiguous key set via .set("AnyKey", "value") will be a shallow copy, * and any explicit key, i.e Foo, set via .setFoo("value") will be a deep copy. */ public AssetUserKeyInfo(AssetUserKeyInfo source) { if (source.Value != null) { this.Value = new String(source.Value); } if (source.Comment != null) { this.Comment = new String(source.Comment); } if (source.EncryptType != null) { this.EncryptType = new String(source.EncryptType); } } /** * Internal implementation, normal users should not use it. */ public void toMap(HashMap<String, String> map, String prefix) { this.setParamSimple(map, prefix + "Value", this.Value); this.setParamSimple(map, prefix + "Comment", this.Comment); this.setParamSimple(map, prefix + "EncryptType", this.EncryptType); } }
1,322
703
#pragma once #include <Core/World/Component.h> /// \brief Base class for settings components, of which only one per type should exist in each world. /// /// Settings components are used to store global scene specific settings, e.g. for physics it would be the scene gravity, /// for rendering it might be the time of day, fog settings, etc. /// /// Components of this type should be managed by an ezSettingsComponentManager, which makes it easy to query for the one instance /// in the world. /// /// class EZ_CORE_DLL ezSettingsComponent : public ezComponent { EZ_ADD_DYNAMIC_REFLECTION(ezSettingsComponent, ezComponent); ////////////////////////////////////////////////////////////////////////// // ezSettingsComponent public: /// \brief The constructor marks the component as modified. ezSettingsComponent(); ~ezSettingsComponent(); /// \brief Marks the component as modified. Individual bits can be used to mark only specific settings (groups) as modified. void SetModified(ezUInt32 uiBits = 0xFFFFFFFF) { m_uiSettingsModified |= uiBits; } /// \brief Checks whether the component (or some settings group) was marked as modified. bool IsModified(ezUInt32 uiBits = 0xFFFFFFFF) const { return (m_uiSettingsModified & uiBits) != 0; } /// \brief Marks the settings as not-modified. void ResetModified(ezUInt32 uiBits = 0xFFFFFFFF) { m_uiSettingsModified &= ~uiBits; } private: ezUInt32 m_uiSettingsModified; };
413
5,903
<gh_stars>1000+ /*! ****************************************************************************** * * Pentaho Data Integration * * Copyright (C) 2002-2018 by <NAME> : http://www.pentaho.com * ******************************************************************************* * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * ******************************************************************************/ package org.pentaho.di.ui.trans.steps.tableoutput; import org.eclipse.swt.custom.CCombo; import org.junit.Before; import org.junit.Test; import org.pentaho.di.core.database.DatabaseInterface; import org.pentaho.di.core.database.DatabaseMeta; import org.pentaho.di.core.row.RowMeta; import org.pentaho.di.core.row.RowMetaInterface; import org.pentaho.di.core.row.value.ValueMetaString; import org.pentaho.di.trans.TransMeta; import org.pentaho.di.ui.core.widget.TextVar; import java.lang.reflect.Method; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import static org.mockito.Matchers.anyString; import static org.mockito.Mockito.doCallRealMethod; import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; import static org.powermock.reflect.Whitebox.setInternalState; public class TableOutputDialogTest { private static RowMetaInterface filled; private static RowMetaInterface empty; private static String[] sample = { "1", "2", "3" }; @Before public void setup() { filled = createRowMeta( sample, false ); empty = createRowMeta( sample, true ); } @Test public void validationRowMetaTest() throws Exception { Method m = TableOutputDialog.class.getDeclaredMethod( "isValidRowMeta", RowMetaInterface.class ); m.setAccessible( true ); Object result1 = m.invoke( null, filled ); Object result2 = m.invoke( null, empty ); assertTrue( Boolean.parseBoolean( result1 + "" ) ); assertFalse( Boolean.parseBoolean( result2 + "" ) ); } private RowMetaInterface createRowMeta( String[] args, boolean hasEmptyFields ) { RowMetaInterface result = new RowMeta(); if ( hasEmptyFields ) { result.addValueMeta( new ValueMetaString( "" ) ); } for ( String s : args ) { result.addValueMeta( new ValueMetaString( s ) ); } return result; } private void isConnectionSupportedTest( boolean supported ) { TableOutputDialog dialog = mock( TableOutputDialog.class ); TransMeta transMeta = mock( TransMeta.class ); DatabaseMeta dbMeta = mock( DatabaseMeta.class ); TextVar text = mock( TextVar.class ); CCombo combo = mock( CCombo.class ); DatabaseInterface dbInterface = mock( DatabaseInterface.class ); setInternalState( dialog, "wTable", text ); setInternalState( dialog, "wConnection", combo ); setInternalState( dialog, "transMeta", transMeta ); when( text.getText() ).thenReturn( "someTable" ); when( combo.getText() ).thenReturn( "someConnection" ); when( transMeta.findDatabase( anyString() ) ).thenReturn( dbMeta ); when( dbMeta.getDatabaseInterface() ).thenReturn( dbInterface ); doNothing().when( dialog ).showUnsupportedConnectionMessageBox( dbInterface ); doCallRealMethod().when( dialog ).isConnectionSupported(); //Check that if the db interface does not support standard output then showUnsupportedConnection is called when( dbInterface.supportsStandardTableOutput() ).thenReturn( supported ); dialog.isConnectionSupported(); verify( dialog, times( !supported ? 1 : 0 ) ).showUnsupportedConnectionMessageBox( dbInterface ); } @Test public void isConnectionSupportedValidTest() { isConnectionSupportedTest( true ); } @Test public void isConnectionSupportedInvalidTest() { isConnectionSupportedTest( false ); } }
1,436
419
#pragma once #include "VisualGraph_StateMachineGraph.h" #include "VisualGraph_FlowGraph.h" //------------------------------------------------------------------------- namespace KRG::VisualGraph { // Helper to unsure we can maintain selection after a undo/redo struct SelectedNode { SelectedNode( BaseNode* pNode ) : m_nodeID( pNode->GetID() ), m_pNode( pNode ) {} bool operator==( SelectedNode const& rhs ) const { return m_nodeID == rhs.m_nodeID; } bool operator==( BaseNode const* pNode ) const { return m_nodeID == pNode->GetID(); } UUID m_nodeID; BaseNode* m_pNode = nullptr; }; //------------------------------------------------------------------------- class KRG_TOOLS_CORE_API GraphView { constexpr static char const* const s_copiedNodesKey = "Copied Visual Graph Nodes"; constexpr static char const* const s_copiedConnectionsKey = "Copied Visual Graph Connections"; protected: enum class DrawChannel { Background = 0, NodeBackground = 1, NodeForeground = 2, Connections = 3 }; enum class DragMode { None, View, Selection, Node, Connection, }; // Drag state //------------------------------------------------------------------------- struct DragState { inline Flow::Node* GetAsFlowNode() const{ return Cast<Flow::Node> ( m_pNode ); } inline SM::Node* GetAsStateMachineNode() const{ return Cast<SM::Node> ( m_pNode ); } void Reset() { m_mode = DragMode::None; m_startValue = m_lastFrameDragDelta = ImVec2( 0, 0 ); m_pNode = nullptr; m_pPin = nullptr; } public: DragMode m_mode = DragMode::None; ImVec2 m_startValue = ImVec2( 0, 0 ); ImVec2 m_lastFrameDragDelta = ImVec2( 0, 0 ); BaseNode* m_pNode = nullptr; Flow::Pin* m_pPin = nullptr; bool m_leftMouseClickDetected = false; bool m_middleMouseClickDetected = false; }; // Context menu state //------------------------------------------------------------------------- struct ContextMenuState { inline bool IsNodeContextMenu() const { return m_pNode != nullptr; } inline Flow::Node* GetAsFlowNode() const{ return Cast<Flow::Node>( m_pNode ); } inline SM::Node* GetAsStateMachineNode() const{ return Cast<SM::Node>( m_pNode ); } void Reset() { m_mouseCanvasPos = ImVec2(); m_pNode = nullptr; m_menuOpened = false; m_pPin = nullptr; } public: ImVec2 m_mouseCanvasPos; BaseNode* m_pNode = nullptr; Flow::Pin* m_pPin = nullptr; bool m_menuOpened = false; }; public: bool HasFocus() const { return m_hasFocus; } //------------------------------------------------------------------------- void SetGraphToView( BaseGraph* pGraph, bool tryMaintainSelection = false ); inline BaseGraph* GetViewedGraph() { return m_pGraph; }; inline BaseGraph const* GetViewedGraph() const { return m_pGraph; } inline bool IsViewingFlowGraph() const { return m_pGraph != nullptr && IsOfType<FlowGraph>( m_pGraph ); } inline bool IsViewingStateMachineGraph() const { return m_pGraph != nullptr && IsOfType<StateMachineGraph>( m_pGraph ); } inline FlowGraph* GetFlowGraph() const { return Cast<FlowGraph>( m_pGraph ); } inline StateMachineGraph* GetStateMachineGraph() const { return Cast<StateMachineGraph>( m_pGraph ); } // Drawing and view //------------------------------------------------------------------------- void UpdateAndDraw( TypeSystem::TypeRegistry const& typeRegistry, float childHeightOverride = 0.0f, void* pUserContext = nullptr ); void ResetView(); void CenterView( BaseNode const* pNode ); // Selection //------------------------------------------------------------------------- // This returns whether any selection changes occurred this update, will be cleared on each call to draw inline bool HasSelectionChanged() const { return m_selectionChanged; } inline void SelectNode( BaseNode const* pNode ); inline bool HasSelectedNodes() const { return !m_selectedNodes.empty(); } inline bool IsNodeSelected( BaseNode const* pNode ) const { return eastl::find( m_selectedNodes.begin(), m_selectedNodes.end(), pNode ) != m_selectedNodes.end(); } inline TVector<SelectedNode> const& GetSelectedNodes() const { return m_selectedNodes; } void ClearSelection(); protected: void ResetInternalState(); // Node //------------------------------------------------------------------------- inline ImRect GetNodeCanvasRect( BaseNode* pNode ) const { ImVec2 const nodeMargin = pNode->GetNodeMargin(); ImVec2 const rectMin = ImVec2( pNode->GetCanvasPosition() ) - nodeMargin; ImVec2 const rectMax = ImVec2( pNode->GetCanvasPosition() ) + pNode->GetSize() + nodeMargin; return ImRect( rectMin, rectMax ); } inline ImRect GetNodeWindowRect( BaseNode* pNode ) const { ImVec2 const nodeMargin = pNode->GetNodeMargin(); ImVec2 const rectMin = ImVec2( pNode->GetCanvasPosition() ) - nodeMargin - m_viewOffset; ImVec2 const rectMax = ImVec2( pNode->GetCanvasPosition() ) + pNode->GetSize() + nodeMargin - m_viewOffset; return ImRect( rectMin, rectMax ); } void DestroySelectedNodes(); // Visual //------------------------------------------------------------------------- bool BeginDrawCanvas( float childHeightOverride ); void EndDrawCanvas(); // User implementable function to draw any additional information needed in the graph (called after everything is drawn) virtual void DrawExtraInformation( DrawContext const& ctx ) {} // Dragging //------------------------------------------------------------------------- inline DragMode GetDragMode() const { return m_dragState.m_mode; } inline bool IsNotDragging() const { return GetDragMode() == DragMode::None; } inline bool IsDraggingView() const { return GetDragMode() == DragMode::View; } inline bool IsDraggingSelection() const { return GetDragMode() == DragMode::Selection; } inline bool IsDraggingNode() const { return GetDragMode() == DragMode::Node; } inline bool IsDraggingConnection() const { return GetDragMode() == DragMode::Connection; } virtual void StartDraggingView( DrawContext const& ctx ); virtual void OnDragView( DrawContext const& ctx ); virtual void StopDraggingView( DrawContext const& ctx ); virtual void StartDraggingSelection( DrawContext const& ctx ); virtual void OnDragSelection( DrawContext const& ctx ); virtual void StopDraggingSelection( DrawContext const& ctx ); virtual void StartDraggingNode( DrawContext const& ctx ); virtual void OnDragNode( DrawContext const& ctx ); virtual void StopDraggingNode( DrawContext const& ctx ); virtual void StartDraggingConnection( DrawContext const& ctx ); virtual void OnDragConnection( DrawContext const& ctx ); virtual void StopDraggingConnection( DrawContext const& ctx ); // Selection //------------------------------------------------------------------------- void UpdateSelection( BaseNode* pNewSelectedNode ); void UpdateSelection( TVector<SelectedNode>&& newSelection ); void AddToSelection( BaseNode* pNodeToAdd ); void RemoveFromSelection( BaseNode* pNodeToRemove ); // User implementable custom selection change handler virtual void OnSelectionChanged( TVector<SelectedNode> const& oldSelection, TVector<SelectedNode> const& newSelection ) {} // Context Menu //------------------------------------------------------------------------- inline bool IsContextMenuOpen() const { return m_contextMenuState.m_menuOpened; } void HandleContextMenu( DrawContext const& ctx ); // Called when we create a context menu allowing derived views to set custom data virtual void FillContextMenuState(); // Called when we actually draw the context menu, is expected to call the specific draw functions for graphs and nodes virtual void DrawContextMenu(); // Custom context menu option for the graph virtual void DrawContextMenuForGraph(); // Custom context menu options for graph nodes virtual void DrawContextMenuForNode(); // Input Handling //------------------------------------------------------------------------- void HandleInput( TypeSystem::TypeRegistry const& typeRegistry, DrawContext const& ctx ); virtual void OnGraphDoubleClick( BaseGraph* pGraph ) {} virtual void OnNodeDoubleClick( BaseNode* pNode ) {} virtual void HandleDragAndDrop( ImVec2 const& mouseCanvasPos ) {} private: KRG_FORCE_INLINE void OnSelectionChangedInternal( TVector<SelectedNode> const& oldSelection, TVector<SelectedNode> const& newSelection ) { m_selectionChanged = true; OnSelectionChanged( oldSelection, newSelection ); } // Drawing //------------------------------------------------------------------------- void DrawStateMachineNodeTitle( DrawContext const& ctx, SM::Node* pNode, ImVec2& newNodeSize ); void DrawStateMachineNodeBackground( DrawContext const& ctx, SM::Node* pNode, ImVec2& newNodeSize ); void DrawStateMachineNode( DrawContext const& ctx, SM::Node* pNode ); void DrawStateMachineTransitionConduit( DrawContext const& ctx, SM::TransitionConduit* pTransition ); void DrawFlowNodeTitle( DrawContext const& ctx, Flow::Node* pNode, ImVec2& newNodeSize ); void DrawFlowNodePins( DrawContext const& ctx, Flow::Node* pNode, ImVec2& newNodeSize ); void DrawFlowNodeBackground( DrawContext const& ctx, Flow::Node* pNode, ImVec2& newNodeSize ); void DrawFlowNode( DrawContext const& ctx, Flow::Node* pNode ); // Copy/Paste //------------------------------------------------------------------------- void CopySelectedNodes( TypeSystem::TypeRegistry const& typeRegistry ); void PasteNodes( TypeSystem::TypeRegistry const& typeRegistry, ImVec2 const& canvasPastePosition ); protected: BaseGraph* m_pGraph = nullptr; BaseNode* m_pHoveredNode = nullptr; ImVec2 m_viewOffset = ImVec2( 0, 0 ); ImVec2 m_canvasSize = ImVec2( 0, 0 ); TVector<SelectedNode> m_selectedNodes; bool m_hasFocus = false; bool m_isViewHovered = false; bool m_selectionChanged = false; DragState m_dragState; ContextMenuState m_contextMenuState; // Flow graph state Flow::Pin* m_pHoveredPin = nullptr; UUID m_hoveredConnectionID; }; }
4,707
2,023
optparser = OptionParser() ... optparser.disable_interspersed_args() (opts, argv) = optparser.parse_args() ## argv now has the options to pass to the second program
51
445
#include "multiverso/blob.h" #include "multiverso/util/allocator.h" #include "multiverso/util/log.h" namespace multiverso { Blob::Blob(size_t size) : size_(size) { CHECK(size > 0); data_ = Allocator::Get()->Alloc(size); } // Construct from external memory. Will copy a new piece Blob::Blob(const void* data, size_t size) : size_(size) { data_ = Allocator::Get()->Alloc(size); memcpy(data_, data, size_); } Blob::Blob(void* data, size_t size) : size_(size) { data_ = Allocator::Get()->Alloc(size); memcpy(data_, data, size_); } Blob::Blob(const Blob& rhs) { if (rhs.size() != 0) { Allocator::Get()->Refer(rhs.data_); } this->data_ = rhs.data_; this->size_ = rhs.size_; } Blob::~Blob() { if (data_ != nullptr) { Allocator::Get()->Free(data_); } } // Shallow copy by default. Call \ref CopyFrom for a deep copy void Blob::operator=(const Blob& rhs) { if (rhs.size() != 0) { Allocator::Get()->Refer(rhs.data_); } this->data_ = rhs.data_; this->size_ = rhs.size_; } } // namespace multiverso
449
1,056
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.netbeans.modules.git; import org.netbeans.modules.git.client.GitProgressSupport; import java.beans.PropertyChangeListener; import java.beans.PropertyChangeSupport; import java.io.File; import java.io.FileOutputStream; import java.io.IOException; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Set; import java.util.concurrent.Callable; import java.util.logging.Level; import java.util.logging.Logger; import org.netbeans.libs.git.GitException; import org.netbeans.libs.git.GitRepository; import org.netbeans.modules.git.client.CredentialsCallback; import org.netbeans.modules.git.client.GitClient; import org.netbeans.modules.git.ui.shelve.ShelveChangesAction; import org.netbeans.modules.git.utils.GitUtils; import org.netbeans.modules.versioning.shelve.ShelveChangesActionsRegistry; import org.netbeans.modules.versioning.spi.VersioningSupport; import org.netbeans.modules.versioning.util.RootsToFile; import org.netbeans.modules.versioning.util.Utils; import org.netbeans.modules.versioning.util.VCSHyperlinkProvider; import org.openide.util.Lookup; import org.openide.util.Lookup.Result; import org.openide.util.RequestProcessor; /** * * @author ondra */ public final class Git { private static Git instance; private Annotator annotator; private FilesystemInterceptor interceptor; public static final Logger LOG = Logger.getLogger("org.netbeans.modules.git"); //NOI18N public static final Logger STATUS_LOG = Logger.getLogger("org.netbeans.modules.git.status"); //NOI18N; private final PropertyChangeSupport support = new PropertyChangeSupport(this); private FileStatusCache fileStatusCache; private HashMap<File, RequestProcessor> processorsToUrl; public static final String PROP_ANNOTATIONS_CHANGED = "annotationsChanged"; // NOI18N static final String PROP_VERSIONED_FILES_CHANGED = "versionedFilesChanged"; // NOI18N private RootsToFile rootsToFile; private GitVCS gitVCS; private Result<? extends VCSHyperlinkProvider> hpResult; private HistoryProvider historyProvider; private Git () {} public static synchronized Git getInstance () { if (instance == null) { instance = new Git(); instance.init(); } return instance; } private void init() { fileStatusCache = new FileStatusCache(); annotator = new Annotator(); interceptor = new FilesystemInterceptor(); int statisticsFrequency; String s = System.getProperty("git.root.stat.frequency", "0"); //NOI18N try { statisticsFrequency = Integer.parseInt(s); } catch (NumberFormatException ex) { statisticsFrequency = 0; } rootsToFile = new RootsToFile(new RootsToFile.Callback() { @Override public boolean repositoryExistsFor (File file) { return GitUtils.repositoryExistsFor(file); } @Override public File getTopmostManagedAncestor (File file) { // skip exceptions - hidden folders, we already think the file is under a repository. return Git.this.getTopmostManagedAncestor(file, true); } }, Logger.getLogger("org.netbeans.modules.git.RootsToFile"), statisticsFrequency); //NOI18N ModuleLifecycleManager.getInstance().disableOtherModules(); } void registerGitVCS(final GitVCS gitVCS) { this.gitVCS = gitVCS; fileStatusCache.addPropertyChangeListener(gitVCS); addPropertyChangeListener(gitVCS); getRequestProcessor().post(new Runnable() { @Override public void run () { ShelveChangesActionsRegistry.getInstance().registerAction(gitVCS, ShelveChangesAction.getProvider()); } }); } public Annotator getVCSAnnotator() { return annotator; } FilesystemInterceptor getVCSInterceptor() { return interceptor; } void getOriginalFile (File workingCopy, File originalFile) { File repository = getRepositoryRoot(workingCopy); if (repository != null) { GitClient client = null; try { client = getClient(repository); FileOutputStream fos = new FileOutputStream(originalFile); boolean ok; try { ok = client.catFile(workingCopy, GitUtils.HEAD, fos, GitUtils.NULL_PROGRESS_MONITOR); } finally { fos.close(); } if (!ok) { originalFile.delete(); } } catch (java.io.FileNotFoundException ex) { LOG.log(Level.SEVERE, "Parent folder [{0}] does not exist", originalFile.getParentFile()); //NOI18N LOG.log(Level.SEVERE, null, ex); } catch (GitException.MissingObjectException ex) { LOG.log(Level.FINE, null, ex); //NOI18N originalFile.delete(); } catch (GitException ex) { LOG.log(Level.INFO, "Error retrieving file", ex); //NOI18N originalFile.delete(); } catch (IOException ex) { LOG.log(Level.INFO, "IO exception", ex); //NOI18N } finally { if (client != null) { client.release(); } } } } /** * Tests whether a file or directory should receive the STATUS_NOTVERSIONED_NOTMANAGED status. * @param file a file or directory * @return false if the file should receive the STATUS_NOTVERSIONED_NOTMANAGED status, true otherwise */ public boolean isManaged(File file) { return VersioningSupport.getOwner(file) instanceof GitVCS && !GitUtils.isPartOfGitMetadata(file); } public FileStatusCache getFileStatusCache() { return fileStatusCache; } public File getRepositoryRoot (File file) { return rootsToFile.getRepositoryRoot(file); } public GitClient getClient (File repository) throws GitException { return getClient(repository, null); } public GitClient getClient (File repository, GitProgressSupport progressSupport) throws GitException { return getClient(repository, progressSupport, true); } public GitClient getClient (File repository, GitProgressSupport progressSupport, boolean handleAuthenticationIssues) throws GitException { GitClient client = new GitClient(singleInstanceRepositoryRoot(repository), progressSupport, handleAuthenticationIssues); client.setCallback(new CredentialsCallback()); return client; } public GitRepository getRepository (File repository) throws GitException { return GitRepository.getInstance(singleInstanceRepositoryRoot(repository)); } public RequestProcessor getRequestProcessor() { return getRequestProcessor(null); } /** * @param repositoryRoot repository root or {@code null} */ public RequestProcessor getRequestProcessor (File repositoryRoot) { if(processorsToUrl == null) { processorsToUrl = new HashMap<File, RequestProcessor>(); } RequestProcessor rp = processorsToUrl.get(repositoryRoot); if (rp == null) { if(repositoryRoot == null) { String rpName = "Git - ANY_KEY";//NOI18N rp = new RequestProcessor(rpName, 50, true); } else { String rpName = "Git - " + repositoryRoot.toString();//NOI18N rp = new RequestProcessor(rpName, 1, true); } processorsToUrl.put(repositoryRoot, rp); } return rp; } public void refreshAllAnnotations() { support.firePropertyChange(PROP_ANNOTATIONS_CHANGED, null, null); } public void addPropertyChangeListener(PropertyChangeListener listener) { support.addPropertyChangeListener(listener); } public void removePropertyChangeListener(PropertyChangeListener listener) { support.removePropertyChangeListener(listener); } public void headChanged (Set<File> files) { assert gitVCS != null; gitVCS.refreshStatus(files); } public void versionedFilesChanged () { clearAncestorCaches(); support.firePropertyChange(PROP_VERSIONED_FILES_CHANGED, null, null); } /** * Runs a given callable and disable listening for external repository events for the time the callable is running. * Refreshes cached modification timestamp of metadata for the given git repository after. * @param callable code to run * @param repository * @param commandName name of the git command if available */ public <T> T runWithoutExternalEvents(File repository, String commandName, Callable<T> callable) throws Exception { return getVCSInterceptor().runWithoutExternalEvents(repository, commandName, callable); } /** * Returns a set of known repository roots (those visible or open in IDE) * @param repositoryRoot * @return */ public Set<File> getSeenRoots (File repositoryRoot) { return getVCSInterceptor().getSeenRoots(repositoryRoot); } private Set<File> knownRoots = Collections.synchronizedSet(new HashSet<File>()); private final Set<File> unversionedParents = Collections.synchronizedSet(new HashSet<File>(20)); public File getTopmostManagedAncestor (File file) { return getTopmostManagedAncestor(file, false); } private File getTopmostManagedAncestor (File file, boolean noExceptions) { long t = System.currentTimeMillis(); LOG.log(Level.FINE, "getTopmostManagedParent {0}", new Object[] { file }); if(unversionedParents.contains(file)) { LOG.fine(" cached as unversioned"); return null; } LOG.log(Level.FINE, "getTopmostManagedParent {0}", new Object[] { file }); File parent = getKnownParent(file); if(parent != null) { LOG.log(Level.FINE, " getTopmostManagedParent returning known parent {0}", parent); return parent; } if (GitUtils.isPartOfGitMetadata(file)) { for (;file != null; file = file.getParentFile()) { if (GitUtils.isAdministrative(file)) { file = file.getParentFile(); // the parent folder of .hg metadata cannot be unversioned, it's nonsense unversionedParents.remove(file); break; } } } Set<File> done = new HashSet<File>(); File topmost = null; for (;file != null; file = file.getParentFile()) { if(unversionedParents.contains(file)) { LOG.log(Level.FINE, " already known as unversioned {0}", new Object[] { file }); break; } if (!noExceptions && VersioningSupport.isExcluded(file)) break; // is the folder a special one where metadata should not be looked for? boolean forbiddenFolder = Utils.isForbiddenFolder(file); if (!forbiddenFolder && GitUtils.repositoryExistsFor(file)) { LOG.log(Level.FINE, " found managed parent {0}", new Object[] { file }); done.clear(); // all folders added before must be removed, they ARE in fact managed by git topmost = file; if (topmost.getParentFile() == null) { LOG.log(Level.WARNING, "found managed root folder {0}", file); //NOI18N } } else { LOG.log(Level.FINE, " found unversioned {0}", new Object[] { file }); if(file.exists()) { // could be created later ... done.add(file); } } } if(done.size() > 0) { LOG.log(Level.FINE, " storing unversioned"); unversionedParents.addAll(done); } if(LOG.isLoggable(Level.FINE)) { LOG.log(Level.FINE, " getTopmostManagedParent returns {0} after {1} millis", new Object[] { topmost, System.currentTimeMillis() - t }); } if(topmost != null) { if (knownRoots.add(topmost)) { String homeDir = System.getProperty("user.home"); //NOI18N if (homeDir != null && homeDir.startsWith(topmost.getAbsolutePath())) { LOG.log(Level.WARNING, "Home folder {0} lies under a git versioned root {1}. " //NOI18N + "Expecting lots of performance issues.", new Object[] { homeDir, topmost }); //NOI18N } } } return topmost; } private File singleInstanceRepositoryRoot (File repository) { // get the only instance for the repository folder, so we can synchronize on it File repositoryFolder = getRepositoryRoot(repository); if (repositoryFolder != null && repository.equals(repositoryFolder)) { repository = repositoryFolder; } return repository; } private File getKnownParent(File file) { File[] roots = knownRoots.toArray(new File[knownRoots.size()]); File knownParent = null; for (File r : roots) { if(!VersioningSupport.isExcluded(file) && Utils.isAncestorOrEqual(r, file) && (knownParent == null || Utils.isAncestorOrEqual(knownParent, r))) { knownParent = r; } } return knownParent; } public void clearAncestorCaches() { unversionedParents.clear(); knownRoots.clear(); rootsToFile.clear(); } /** * * @return registered hyperlink providers */ public List<VCSHyperlinkProvider> getHyperlinkProviders() { if (hpResult == null) { hpResult = (Result<? extends VCSHyperlinkProvider>) Lookup.getDefault().lookupResult(VCSHyperlinkProvider.class); } if (hpResult == null) { return Collections.<VCSHyperlinkProvider>emptyList(); } Collection<? extends VCSHyperlinkProvider> providersCol = hpResult.allInstances(); List<VCSHyperlinkProvider> providersList = new ArrayList<VCSHyperlinkProvider>(providersCol.size()); providersList.addAll(providersCol); return Collections.unmodifiableList(providersList); } public Collection<File> getCreatedFolders () { return getVCSInterceptor().getCreatedFolders(); } public HistoryProvider getHistoryProvider () { if (historyProvider == null) { historyProvider = new HistoryProvider(); } return historyProvider; } }
6,385
721
<gh_stars>100-1000 /* * [The "BSD licence"] * Copyright (c) 2010 <NAME> (JesusFreke) * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ package org.jf.dexlib.Util; import java.util.HashMap; public enum AccessFlags { PUBLIC(0x1, "public", true, true, true), PRIVATE(0x2, "private", true, true, true), PROTECTED(0x4, "protected", true, true, true), STATIC(0x8, "static", true, true, true), FINAL(0x10, "final", true, true, true), SYNCHRONIZED(0x20, "synchronized", false, true, false), VOLATILE(0x40, "volatile", false, false, true), BRIDGE(0x40, "bridge", false, true, false), TRANSIENT(0x80, "transient", false, false, true), VARARGS(0x80, "varargs", false, true, false), NATIVE(0x100, "native", false, true, false), INTERFACE(0x200, "interface", true, false, false), ABSTRACT(0x400, "abstract", true, true, false), STRICTFP(0x800, "strictfp", false, true, false), SYNTHETIC(0x1000, "synthetic", true, true, true), ANNOTATION(0x2000, "annotation", true, false, false), ENUM(0x4000, "enum", true, false, true), CONSTRUCTOR(0x10000, "constructor", false, true, false), DECLARED_SYNCHRONIZED(0x20000, "declared-synchronized", false, true, false); private int value; private String accessFlagName; private boolean validForClass; private boolean validForMethod; private boolean validForField; //cache the array of all AccessFlags, because .values() allocates a new array for every call private final static AccessFlags[] allFlags; private static HashMap<String, AccessFlags> accessFlagsByName; static { allFlags = AccessFlags.values(); accessFlagsByName = new HashMap<String, AccessFlags>(); for (AccessFlags accessFlag: allFlags) { accessFlagsByName.put(accessFlag.accessFlagName, accessFlag); } } private AccessFlags(int value, String accessFlagName, boolean validForClass, boolean validForMethod, boolean validForField) { this.value = value; this.accessFlagName = accessFlagName; this.validForClass = validForClass; this.validForMethod = validForMethod; this.validForField = validForField; } public static AccessFlags[] getAccessFlagsForClass(int accessFlagValue) { int size = 0; for (AccessFlags accessFlag: allFlags) { if (accessFlag.validForClass && (accessFlagValue & accessFlag.value) != 0) { size++; } } AccessFlags[] accessFlags = new AccessFlags[size]; int accessFlagsPosition = 0; for (AccessFlags accessFlag: allFlags) { if (accessFlag.validForClass && (accessFlagValue & accessFlag.value) != 0) { accessFlags[accessFlagsPosition++] = accessFlag; } } return accessFlags; } private static String formatAccessFlags(AccessFlags[] accessFlags) { int size = 0; for (AccessFlags accessFlag: accessFlags) { size += accessFlag.toString().length() + 1; } StringBuilder sb = new StringBuilder(size); for (AccessFlags accessFlag: accessFlags) { sb.append(accessFlag.toString()); sb.append(" "); } if (accessFlags.length > 0) { sb.delete(sb.length() - 1, sb.length()); } return sb.toString(); } public static String formatAccessFlagsForClass(int accessFlagValue) { return formatAccessFlags(getAccessFlagsForClass(accessFlagValue)); } public static AccessFlags[] getAccessFlagsForMethod(int accessFlagValue) { int size = 0; for (AccessFlags accessFlag: allFlags) { if (accessFlag.validForMethod && (accessFlagValue & accessFlag.value) != 0) { size++; } } AccessFlags[] accessFlags = new AccessFlags[size]; int accessFlagsPosition = 0; for (AccessFlags accessFlag: allFlags) { if (accessFlag.validForMethod && (accessFlagValue & accessFlag.value) != 0) { accessFlags[accessFlagsPosition++] = accessFlag; } } return accessFlags; } public static String formatAccessFlagsForMethod(int accessFlagValue) { return formatAccessFlags(getAccessFlagsForMethod(accessFlagValue)); } public static AccessFlags[] getAccessFlagsForField(int accessFlagValue) { int size = 0; for (AccessFlags accessFlag: allFlags) { if (accessFlag.validForField && (accessFlagValue & accessFlag.value) != 0) { size++; } } AccessFlags[] accessFlags = new AccessFlags[size]; int accessFlagsPosition = 0; for (AccessFlags accessFlag: allFlags) { if (accessFlag.validForField && (accessFlagValue & accessFlag.value) != 0) { accessFlags[accessFlagsPosition++] = accessFlag; } } return accessFlags; } public static String formatAccessFlagsForField(int accessFlagValue) { return formatAccessFlags(getAccessFlagsForField(accessFlagValue)); } public static AccessFlags getAccessFlag(String accessFlag) { return accessFlagsByName.get(accessFlag); } public int getValue() { return value; } public String toString() { return accessFlagName; } }
2,574
8,851
<gh_stars>1000+ # -*- coding: utf-8 -*- from django.db import migrations def grant_instance_level_collection_management_permissions(apps, schema_editor): """ Give the groups who currently manage all collections permission to manage root collections """ Collection = apps.get_model('wagtailcore.Collection') Group = apps.get_model('auth.Group') GroupCollectionPermission = apps.get_model('wagtailcore.GroupCollectionPermission') Permission = apps.get_model('auth.Permission') groups_w_permissions = Group.objects.filter( permissions__content_type__app_label='wagtailcore', permissions__content_type__model='collection', permissions__codename__in=['add_collection', 'change_collection', 'delete_collection'] ).values('id', 'name', 'permissions__id', 'permissions__codename') for root_collection in Collection.objects.filter(depth=1).all(): for row in groups_w_permissions: GroupCollectionPermission.objects.create( group_id=row['id'], permission_id=row['permissions__id'], collection_id=root_collection.id, ) # Now remove the model-level permissions for collections collection_permissions = Permission.objects.filter( content_type__app_label='wagtailcore', content_type__model='collection', codename__in=['add_collection', 'change_collection', 'delete_collection'], ) for perm in collection_permissions.all(): perm.group_set.clear() def revert_to_model_level_collection_management_permissions(apps, schema_editor): """ Give model-level permission to all groups who have that permission on the root collection """ Collection = apps.get_model('wagtailcore.Collection') GroupCollectionPermission = apps.get_model('wagtailcore.GroupCollectionPermission') root_collections = Collection.objects.filter(depth=1).all() group_collection_permissions = GroupCollectionPermission.objects.filter( permission__content_type__app_label='wagtailcore', permission__content_type__model='collection', permission__codename__in=['add_collection', 'change_collection', 'delete_collection'], collection__in=root_collections, ).select_related('group', 'permission') for row in group_collection_permissions.all(): row.group.permissions.add(row.permission) # Now delete the instance-level collection management permissions group_collection_permissions.all().delete() class Migration(migrations.Migration): dependencies = [ ('wagtailcore', '0065_log_entry_uuid'), ] operations = [ migrations.RunPython( grant_instance_level_collection_management_permissions, revert_to_model_level_collection_management_permissions ) ]
999
1,350
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. // Code generated by Microsoft (R) AutoRest Code Generator. package com.azure.resourcemanager.apimanagement.models; import com.azure.core.annotation.Fluent; import com.azure.core.util.logging.ClientLogger; import com.fasterxml.jackson.annotation.JsonIgnore; import com.fasterxml.jackson.annotation.JsonProperty; /** Authorization header information. */ @Fluent public final class BackendAuthorizationHeaderCredentials { @JsonIgnore private final ClientLogger logger = new ClientLogger(BackendAuthorizationHeaderCredentials.class); /* * Authentication Scheme name. */ @JsonProperty(value = "scheme", required = true) private String scheme; /* * Authentication Parameter value. */ @JsonProperty(value = "parameter", required = true) private String parameter; /** * Get the scheme property: Authentication Scheme name. * * @return the scheme value. */ public String scheme() { return this.scheme; } /** * Set the scheme property: Authentication Scheme name. * * @param scheme the scheme value to set. * @return the BackendAuthorizationHeaderCredentials object itself. */ public BackendAuthorizationHeaderCredentials withScheme(String scheme) { this.scheme = scheme; return this; } /** * Get the parameter property: Authentication Parameter value. * * @return the parameter value. */ public String parameter() { return this.parameter; } /** * Set the parameter property: Authentication Parameter value. * * @param parameter the parameter value to set. * @return the BackendAuthorizationHeaderCredentials object itself. */ public BackendAuthorizationHeaderCredentials withParameter(String parameter) { this.parameter = parameter; return this; } /** * Validates the instance. * * @throws IllegalArgumentException thrown if the instance is not valid. */ public void validate() { if (scheme() == null) { throw logger .logExceptionAsError( new IllegalArgumentException( "Missing required property scheme in model BackendAuthorizationHeaderCredentials")); } if (parameter() == null) { throw logger .logExceptionAsError( new IllegalArgumentException( "Missing required property parameter in model BackendAuthorizationHeaderCredentials")); } } }
983
575
// Copyright 2020 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chrome/browser/ui/views/extensions/settings_overridden_dialog_view.h" #include "base/path_service.h" #include "base/strings/utf_string_conversions.h" #include "base/time/time.h" #include "build/build_config.h" #include "chrome/app/vector_icons/vector_icons.h" #include "chrome/browser/extensions/chrome_test_extension_loader.h" #include "chrome/browser/profiles/profile.h" #include "chrome/browser/search_engines/template_url_service_factory.h" #include "chrome/browser/ui/browser.h" #include "chrome/browser/ui/browser_window.h" #include "chrome/browser/ui/extensions/settings_api_bubble_helpers.h" #include "chrome/browser/ui/extensions/settings_overridden_dialog_controller.h" #include "chrome/browser/ui/tabs/tab_strip_model.h" #include "chrome/browser/ui/test/test_browser_dialog.h" #include "chrome/common/chrome_paths.h" #include "chrome/common/webui_url_constants.h" #include "chrome/test/base/search_test_utils.h" #include "chrome/test/base/ui_test_utils.h" #include "components/search_engines/search_engines_test_util.h" #include "components/search_engines/template_url.h" #include "components/search_engines/template_url_service.h" #include "content/public/test/browser_test.h" #include "content/public/test/browser_test_utils.h" #include "ui/views/test/widget_test.h" namespace { // A stub dialog controller that displays the dialog with the supplied params. class TestDialogController : public SettingsOverriddenDialogController { public: TestDialogController(ShowParams show_params, base::Optional<DialogResult>* dialog_result_out) : show_params_(std::move(show_params)), dialog_result_out_(dialog_result_out) { DCHECK(dialog_result_out_); } TestDialogController(const TestDialogController&) = delete; TestDialogController& operator=(const TestDialogController&) = delete; ~TestDialogController() override = default; private: bool ShouldShow() override { return true; } ShowParams GetShowParams() override { return show_params_; } void OnDialogShown() override {} void HandleDialogResult(DialogResult result) override { ASSERT_FALSE(dialog_result_out_->has_value()); *dialog_result_out_ = result; } const ShowParams show_params_; // The result to populate. Must outlive this object. base::Optional<DialogResult>* const dialog_result_out_; }; } // namespace class SettingsOverriddenDialogViewBrowserTest : public DialogBrowserTest { public: enum class DefaultSearch { kUseDefault, kUseNonGoogleFromDefaultList, kUseNewSearch, }; SettingsOverriddenDialogViewBrowserTest() = default; ~SettingsOverriddenDialogViewBrowserTest() override = default; void SetUpOnMainThread() override { DialogBrowserTest::SetUpOnMainThread(); search_test_utils::WaitForTemplateURLServiceToLoad( TemplateURLServiceFactory::GetForProfile(browser()->profile())); } void ShowUi(const std::string& name) override { test_name_ = name; if (name == "SimpleDialog") { ShowSimpleDialog(false, browser()); } else if (name == "SimpleDialogWithIcon") { ShowSimpleDialog(true, browser()); } else if (name == "NtpOverriddenDialog_BackToDefault") { ShowNtpOverriddenDefaultDialog(); } else if (name == "NtpOverriddenDialog_Generic") { ShowNtpOverriddenGenericDialog(); } else if (name == "SearchOverriddenDialog_BackToGoogle") { ShowSearchOverriddenDialog(DefaultSearch::kUseDefault); } else if (name == "SearchOverriddenDialog_BackToOther") { ShowSearchOverriddenDialog(DefaultSearch::kUseNonGoogleFromDefaultList); } else if (name == "SearchOverriddenDialog_Generic") { ShowSearchOverriddenDialog(DefaultSearch::kUseNewSearch); } else { NOTREACHED() << name; } } // Creates, shows, and returns a dialog anchored to the given |browser|. The // dialog is owned by the views framework. SettingsOverriddenDialogView* ShowSimpleDialog(bool show_icon, Browser* browser) { SettingsOverriddenDialogController::ShowParams params{ u"Settings overridden dialog title", base::ASCIIToUTF16( "Settings overriden dialog body, which is quite a bit " "longer than the title alone")}; if (show_icon) params.icon = &kProductIcon; auto* dialog = new SettingsOverriddenDialogView(std::make_unique<TestDialogController>( std::move(params), &dialog_result_)); dialog->Show(browser->window()->GetNativeWindow()); return dialog; } void ShowNtpOverriddenDefaultDialog() { // Load an extension overriding the NTP and open a new tab to trigger the // dialog. LoadExtensionOverridingNewTab(); NavigateToNewTab(); } void ShowNtpOverriddenGenericDialog() { SetNewSearchProvider(DefaultSearch::kUseNonGoogleFromDefaultList); LoadExtensionOverridingNewTab(); NavigateToNewTab(); } void ShowSearchOverriddenDialog(DefaultSearch search) { SetNewSearchProvider(search); LoadExtensionOverridingSearch(); PerformSearchFromOmnibox(); } bool VerifyUi() override { if (!DialogBrowserTest::VerifyUi()) return false; if (base::StartsWith(test_name_, "SearchOverriddenDialog", base::CompareCase::SENSITIVE)) { // Note: Because this is a test, we don't actually expect this navigation // to succeed. But we can still check that the user was sent to // example.com (the new search engine). EXPECT_EQ("www.example.com", browser() ->tab_strip_model() ->GetActiveWebContents() ->GetLastCommittedURL() .host_piece()); } return true; } base::Optional<SettingsOverriddenDialogController::DialogResult> dialog_result() const { return dialog_result_; } private: void LoadExtensionOverridingNewTab() { base::FilePath test_root_path; ASSERT_TRUE(base::PathService::Get(chrome::DIR_TEST_DATA, &test_root_path)); Profile* const profile = browser()->profile(); scoped_refptr<const extensions::Extension> extension = extensions::ChromeTestExtensionLoader(profile).LoadExtension( test_root_path.AppendASCII("extensions/api_test/override/newtab")); ASSERT_TRUE(extension); } void LoadExtensionOverridingSearch() { base::FilePath test_root_path; ASSERT_TRUE(base::PathService::Get(chrome::DIR_TEST_DATA, &test_root_path)); Profile* const profile = browser()->profile(); scoped_refptr<const extensions::Extension> extension = extensions::ChromeTestExtensionLoader(profile).LoadExtension( test_root_path.AppendASCII("extensions/search_provider_override")); ASSERT_TRUE(extension); } void NavigateToNewTab() { ui_test_utils::NavigateToURLWithDisposition( browser(), GURL(chrome::kChromeUINewTabURL), WindowOpenDisposition::NEW_FOREGROUND_TAB, ui_test_utils::BROWSER_TEST_WAIT_FOR_LOAD_STOP); } void SetNewSearchProvider(DefaultSearch search) { if (search == DefaultSearch::kUseDefault) return; TemplateURLService* const template_url_service = TemplateURLServiceFactory::GetForProfile(browser()->profile()); bool new_search_shows_in_default_list = true; // If the test requires a search engine that doesn't show in the default // list, we need to add one. if (search == DefaultSearch::kUseNewSearch) { new_search_shows_in_default_list = false; template_url_service->Add( std::make_unique<TemplateURL>(*GenerateDummyTemplateURLData("test"))); } TemplateURLService::TemplateURLVector template_urls = template_url_service->GetTemplateURLs(); auto iter = std::find_if(template_urls.begin(), template_urls.end(), [template_url_service, new_search_shows_in_default_list]( const TemplateURL* turl) { return !turl->HasGoogleBaseURLs( template_url_service->search_terms_data()) && template_url_service->ShowInDefaultList(turl) == new_search_shows_in_default_list; }); ASSERT_TRUE(iter != template_urls.end()); template_url_service->SetUserSelectedDefaultSearchProvider(*iter); } void PerformSearchFromOmnibox() { ui_test_utils::SendToOmniboxAndSubmit(browser(), "Penguin", base::TimeTicks::Now()); content::WaitForLoadStop( browser()->tab_strip_model()->GetActiveWebContents()); } std::string test_name_; base::Optional<SettingsOverriddenDialogController::DialogResult> dialog_result_; }; //////////////////////////////////////////////////////////////////////////////// // UI Browser Tests IN_PROC_BROWSER_TEST_F(SettingsOverriddenDialogViewBrowserTest, InvokeUi_SimpleDialog) { ShowAndVerifyUi(); } IN_PROC_BROWSER_TEST_F(SettingsOverriddenDialogViewBrowserTest, InvokeUi_SimpleDialogWithIcon) { ShowAndVerifyUi(); } IN_PROC_BROWSER_TEST_F(SettingsOverriddenDialogViewBrowserTest, InvokeUi_NtpOverriddenDialog_BackToDefault) { // Force the post-install NTP UI to be enabled, so that we can test on all // platforms. extensions::SetNtpPostInstallUiEnabledForTesting(true); ShowAndVerifyUi(); extensions::SetNtpPostInstallUiEnabledForTesting(false); } IN_PROC_BROWSER_TEST_F(SettingsOverriddenDialogViewBrowserTest, InvokeUi_NtpOverriddenDialog_Generic) { // Force the post-install NTP UI to be enabled, so that we can test on all // platforms. extensions::SetNtpPostInstallUiEnabledForTesting(true); ShowAndVerifyUi(); extensions::SetNtpPostInstallUiEnabledForTesting(false); } // The chrome_settings_overrides API that allows extensions to override the // default search provider is only available on Windows and Mac. #if defined(OS_WIN) || defined(OS_MAC) IN_PROC_BROWSER_TEST_F(SettingsOverriddenDialogViewBrowserTest, InvokeUi_SearchOverriddenDialog_BackToGoogle) { ShowAndVerifyUi(); } IN_PROC_BROWSER_TEST_F(SettingsOverriddenDialogViewBrowserTest, InvokeUi_SearchOverriddenDialog_BackToOther) { ShowAndVerifyUi(); } IN_PROC_BROWSER_TEST_F(SettingsOverriddenDialogViewBrowserTest, InvokeUi_SearchOverriddenDialog_Generic) { ShowAndVerifyUi(); } #endif // defined(OS_WIN) || defined(OS_MAC) //////////////////////////////////////////////////////////////////////////////// // Functional Browser Tests // Verify that if the parent window is closed, the dialog notifies the // controller that it was closed without any user action. IN_PROC_BROWSER_TEST_F(SettingsOverriddenDialogViewBrowserTest, DialogWindowClosed) { Browser* second_browser = CreateBrowser(browser()->profile()); ASSERT_TRUE(second_browser); SettingsOverriddenDialogView* dialog = ShowSimpleDialog(false, second_browser); views::test::WidgetDestroyedWaiter widget_destroyed_waiter( dialog->GetWidget()); CloseBrowserSynchronously(second_browser); widget_destroyed_waiter.Wait(); ASSERT_TRUE(dialog_result()); EXPECT_EQ(SettingsOverriddenDialogController::DialogResult:: kDialogClosedWithoutUserAction, *dialog_result()); }
4,369
839
from typing import Any, Optional from baserow.core.models import Application, TrashEntry, Group from baserow.core.registries import application_type_registry from baserow.core.signals import application_created, group_restored from baserow.core.trash.registries import TrashableItemType, trash_item_type_registry class ApplicationTrashableItemType(TrashableItemType): type = "application" model_class = Application def get_parent(self, trashed_item: Any, parent_id: int) -> Optional[Any]: return trashed_item.group def get_name(self, trashed_item: Application) -> str: return trashed_item.name def trashed_item_restored(self, trashed_item: Application, trash_entry: TrashEntry): application_created.send( self, application=trashed_item, user=None, ) def permanently_delete_item( self, trashed_item: Application, trash_item_lookup_cache=None ): """ Deletes an application and the related relations in the correct way. """ application = trashed_item.specific application_type = application_type_registry.get_by_model(application) application_type.pre_delete(application) application.delete() return application class GroupTrashableItemType(TrashableItemType): type = "group" model_class = Group def get_parent(self, trashed_item: Any, parent_id: int) -> Optional[Any]: return None def get_name(self, trashed_item: Group) -> str: return trashed_item.name def trashed_item_restored(self, trashed_item: Group, trash_entry: TrashEntry): """ Informs any clients that the group exists again. """ for group_user in trashed_item.groupuser_set.all(): group_restored.send(self, group_user=group_user, user=None) def permanently_delete_item( self, trashed_group: Group, trash_item_lookup_cache=None ): """ Deletes the provided group and all of its applications permanently. """ # Select all the applications so we can delete them via the handler which is # needed in order to call the pre_delete method for each application. applications = ( trashed_group.application_set(manager="objects_and_trash") .all() .select_related("group") ) application_trashable_type = trash_item_type_registry.get("application") for application in applications: application_trashable_type.permanently_delete_item(application) trashed_group.delete()
997
4,640
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Test search policy""" import numpy as np import tempfile import tvm import tvm.testing from tvm import auto_scheduler from tvm.auto_scheduler.utils import get_const_tuple from tvm.testing.auto_scheduler import ( matmul_auto_scheduler_test, zero_rank_compute_auto_scheduler_test, zero_rank_reduce_auto_scheduler_test, ) def test_search_task_add_task_input(): auto_scheduler.search_task.TASK_INPUT_BUFFER_TABLE.clear() N = 64 target = "llvm" test_input_0 = tvm.runtime.ndarray.empty((64, 64)) test_input_1 = tvm.runtime.ndarray.empty((10, 20)) test_input_2 = tvm.runtime.ndarray.empty((30, 40, 50)) task = auto_scheduler.SearchTask( func="matmul_auto_scheduler_test", args=(N, N, N), target=target, task_inputs={ "test_input_0": test_input_0, "test_input_1": test_input_1, "test_input_2": test_input_2, }, task_inputs_overwrite=True, ) assert len(task.task_input_names) == 3 assert task.task_input_names[0] == "test_input_0" assert task.task_input_names[1] == "test_input_1" assert task.task_input_names[2] == "test_input_2" def test_search_task_record(): auto_scheduler.search_task.TASK_INPUT_BUFFER_TABLE.clear() N = 64 target = "llvm" # Log with no task input task = auto_scheduler.SearchTask( func="matmul_auto_scheduler_test", args=(N, N, N), target=target ) task_record = auto_scheduler._ffi_api.SerializeSearchTask(task) new_task = auto_scheduler._ffi_api.DeserializeSearchTask(task_record) # TODO(jcf94): Check the compute dag & hardware parameter assert task.workload_key == new_task.workload_key assert str(task.target) == str(new_task.target) assert str(task.target.host) == str(new_task.target.host) assert task.layout_rewrite_option == new_task.layout_rewrite_option # Log with 1 task input test_input_0 = tvm.runtime.ndarray.empty((64, 64)) task = auto_scheduler.SearchTask( func="matmul_auto_scheduler_test", args=(N, N, N), target=target, task_inputs={"test_input_0": test_input_0}, task_inputs_overwrite=True, ) task_record = auto_scheduler._ffi_api.SerializeSearchTask(task) new_task = auto_scheduler._ffi_api.DeserializeSearchTask(task_record) assert task.workload_key == new_task.workload_key assert str(task.target) == str(new_task.target) assert str(task.target.host) == str(new_task.target.host) assert task.layout_rewrite_option == new_task.layout_rewrite_option assert len(new_task.task_input_names) == 1 assert new_task.task_input_names[0] == "test_input_0" # Log with multiple task inputs test_input_1 = tvm.runtime.ndarray.empty((64, 64)) task = auto_scheduler.SearchTask( func="matmul_auto_scheduler_test", args=(N, N, N), target=target, task_inputs={ "test_input_0": test_input_0, "test_input_1": test_input_1, }, task_inputs_overwrite=True, ) task_record = auto_scheduler._ffi_api.SerializeSearchTask(task) new_task = auto_scheduler._ffi_api.DeserializeSearchTask(task_record) assert task.workload_key == new_task.workload_key assert str(task.target) == str(new_task.target) assert str(task.target.host) == str(new_task.target.host) assert task.layout_rewrite_option == new_task.layout_rewrite_option assert len(new_task.task_input_names) == 2 assert new_task.task_input_names[0] == "test_input_0" assert new_task.task_input_names[1] == "test_input_1" # Log with version 0.5 v5_log = """["[\\\"matmul_auto_scheduler_test\\\", 64, 64, 64]", "llvm -keys=cpu", [6, 64, 64, 0, 0, 0, 0, 0], "", 1]""" new_task = auto_scheduler._ffi_api.DeserializeSearchTask(v5_log) assert task.workload_key == new_task.workload_key assert str(task.target) == str(new_task.target) assert str(task.target.host) == str(new_task.target.host) assert task.layout_rewrite_option == new_task.layout_rewrite_option assert len(new_task.task_input_names) == 0 def test_recover_measure_input_with_task_input(): auto_scheduler.search_task.TASK_INPUT_BUFFER_TABLE.clear() # Since this file is tests for search_task, we only check the search_task here # Log with no task input task = auto_scheduler.SearchTask( func=matmul_auto_scheduler_test, args=(512, 512, 512), target="llvm" ) inp = auto_scheduler.measure.MeasureInput(task, task.compute_dag.init_state) res = auto_scheduler.measure.MeasureResult([0.1], 0, "", 0.2, 1) measure_record = auto_scheduler.measure_record.dump_record_to_string(inp, res) measure_log = auto_scheduler.measure_record.load_record_from_string(measure_record) new_task = measure_log[0].task assert task.workload_key == new_task.workload_key assert str(task.target) == str(new_task.target) assert str(task.target.host) == str(new_task.target.host) assert task.layout_rewrite_option == new_task.layout_rewrite_option # Log with 1 task input test_input_0 = tvm.runtime.ndarray.empty((64, 64)) task = auto_scheduler.SearchTask( func=matmul_auto_scheduler_test, args=(512, 512, 512), target="llvm", task_inputs={ "test_input_0": test_input_0, }, task_inputs_overwrite=True, ) inp = auto_scheduler.measure.MeasureInput(task, task.compute_dag.init_state) res = auto_scheduler.measure.MeasureResult([0.1], 0, "", 0.2, 1) measure_record = auto_scheduler.measure_record.dump_record_to_string(inp, res) measure_log = auto_scheduler.measure_record.load_record_from_string(measure_record) new_task = measure_log[0].task assert task.workload_key == new_task.workload_key assert str(task.target) == str(new_task.target) assert str(task.target.host) == str(new_task.target.host) assert task.layout_rewrite_option == new_task.layout_rewrite_option assert len(new_task.task_input_names) == 1 assert new_task.task_input_names[0] == "test_input_0" # Log with multiple task inputs test_input_1 = tvm.runtime.ndarray.empty((64, 64)) task = auto_scheduler.SearchTask( func=matmul_auto_scheduler_test, args=(512, 512, 512), target="llvm", task_inputs={ "test_input_0": test_input_0, "test_input_1": test_input_1, }, task_inputs_overwrite=True, ) inp = auto_scheduler.measure.MeasureInput(task, task.compute_dag.init_state) res = auto_scheduler.measure.MeasureResult([0.1], 0, "", 0.2, 1) measure_record = auto_scheduler.measure_record.dump_record_to_string(inp, res) measure_log = auto_scheduler.measure_record.load_record_from_string(measure_record) new_task = measure_log[0].task assert task.workload_key == new_task.workload_key assert str(task.target) == str(new_task.target) assert str(task.target.host) == str(new_task.target.host) assert task.layout_rewrite_option == new_task.layout_rewrite_option assert len(new_task.task_input_names) == 2 assert new_task.task_input_names[0] == "test_input_0" assert new_task.task_input_names[1] == "test_input_1" # Log with version 0.5 v5_log = """{"i": [["[\\\"matmul_auto_scheduler_test\\\", 512, 512, 512]", "llvm -keys=cpu", [6, 64, 64, 0, 0, 0, 0, 0], "", 1], [[], []]], "r": [[0.1], 0, 0.2, 1], "v": "v0.6"}""" measure_log = auto_scheduler.measure_record.load_record_from_string(v5_log) new_task = measure_log[0].task assert task.workload_key == new_task.workload_key assert str(task.target) == str(new_task.target) assert str(task.target.host) == str(new_task.target.host) assert task.layout_rewrite_option == new_task.layout_rewrite_option assert len(new_task.task_input_names) == 0 if __name__ == "__main__": test_search_task_add_task_input() test_search_task_record() test_recover_measure_input_with_task_input()
3,591
2,127
<filename>spring-boot-freemarker/src/main/java/com/lance/freemaker/web/system/SystemLoginController.java package com.lance.freemaker.web.system; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpSession; import org.apache.commons.lang3.StringUtils; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Controller; import org.springframework.web.bind.annotation.RequestMapping; import org.springframework.web.bind.annotation.RequestMethod; import org.springframework.web.servlet.mvc.support.RedirectAttributes; import com.lance.freemaker.domain.UserInfo; import com.lance.freemaker.service.UserService; import com.lance.freemaker.utils.EncryptUtils; @Controller public class SystemLoginController { @Autowired private UserService userService; /** * Login GetMethod * @return */ @RequestMapping(value="system/login", method=RequestMethod.GET) public String login() { return "system/login.jsp"; } /** * Login PostMethod * @return */ @RequestMapping(value="system/login", method=RequestMethod.POST) public String login(HttpServletRequest request, RedirectAttributes rediect) { String account = request.getParameter("account"); String password = request.getParameter("password"); UserInfo info = userService.findByAccount(account); if(info == null) { rediect.addFlashAttribute("errorText", "该用户不存在"); return "redirect:/system/login"; } if(!StringUtils.equals(EncryptUtils.encryptMD5(password), info.getPassword())) { rediect.addFlashAttribute("errorText", "密码错误"); return "redirect:/system/login"; } request.getSession().setAttribute("sys_user_key", info); return "redirect:/system/admin/index"; } /** * Exit * @return */ @RequestMapping("system/logout") public String logout(HttpSession session) { session.invalidate(); return "redirect:/system/login"; } }
657
1,144
/** Generated Model - DO NOT CHANGE */ package org.compiere.model; import java.sql.ResultSet; import java.util.Properties; /** Generated Model for C_Mail * @author Adempiere (generated) */ @SuppressWarnings("javadoc") public class X_C_Mail extends org.compiere.model.PO implements I_C_Mail, org.compiere.model.I_Persistent { /** * */ private static final long serialVersionUID = -794005892L; /** Standard Constructor */ public X_C_Mail (Properties ctx, int C_Mail_ID, String trxName) { super (ctx, C_Mail_ID, trxName); /** if (C_Mail_ID == 0) { setC_Mail_ID (0); setIsInboundEMail (true); // Y } */ } /** Load Constructor */ public X_C_Mail (Properties ctx, ResultSet rs, String trxName) { super (ctx, rs, trxName); } /** Load Meta Data */ @Override protected org.compiere.model.POInfo initPO (Properties ctx) { org.compiere.model.POInfo poi = org.compiere.model.POInfo.getPOInfo (ctx, Table_Name, get_TrxName()); return poi; } /** Set Mail. @param C_Mail_ID Mail */ @Override public void setC_Mail_ID (int C_Mail_ID) { if (C_Mail_ID < 1) set_ValueNoCheck (COLUMNNAME_C_Mail_ID, null); else set_ValueNoCheck (COLUMNNAME_C_Mail_ID, Integer.valueOf(C_Mail_ID)); } /** Get Mail. @return Mail */ @Override public int getC_Mail_ID () { Integer ii = (Integer)get_Value(COLUMNNAME_C_Mail_ID); if (ii == null) return 0; return ii.intValue(); } /** Set Inhalt. @param ContentText Inhalt */ @Override public void setContentText (java.lang.String ContentText) { set_Value (COLUMNNAME_ContentText, ContentText); } /** Get Inhalt. @return Inhalt */ @Override public java.lang.String getContentText () { return (java.lang.String)get_Value(COLUMNNAME_ContentText); } /** Set Content type. @param ContentType Content type */ @Override public void setContentType (java.lang.String ContentType) { set_Value (COLUMNNAME_ContentType, ContentType); } /** Get Content type. @return Content type */ @Override public java.lang.String getContentType () { return (java.lang.String)get_Value(COLUMNNAME_ContentType); } /** Set Eingangsdatum. @param DateReceived Datum, zu dem ein Produkt empfangen wurde */ @Override public void setDateReceived (java.sql.Timestamp DateReceived) { set_Value (COLUMNNAME_DateReceived, DateReceived); } /** Get Eingangsdatum. @return Datum, zu dem ein Produkt empfangen wurde */ @Override public java.sql.Timestamp getDateReceived () { return (java.sql.Timestamp)get_Value(COLUMNNAME_DateReceived); } /** Set EMail Bcc. @param EMail_Bcc EMail Bcc */ @Override public void setEMail_Bcc (java.lang.String EMail_Bcc) { set_Value (COLUMNNAME_EMail_Bcc, EMail_Bcc); } /** Get EMail Bcc. @return EMail Bcc */ @Override public java.lang.String getEMail_Bcc () { return (java.lang.String)get_Value(COLUMNNAME_EMail_Bcc); } /** Set EMail Cc. @param EMail_Cc EMail Cc */ @Override public void setEMail_Cc (java.lang.String EMail_Cc) { set_Value (COLUMNNAME_EMail_Cc, EMail_Cc); } /** Get EMail Cc. @return EMail Cc */ @Override public java.lang.String getEMail_Cc () { return (java.lang.String)get_Value(COLUMNNAME_EMail_Cc); } /** Set EMail Absender. @param EMail_From Full EMail address used to send requests - e.g. <EMAIL> */ @Override public void setEMail_From (java.lang.String EMail_From) { set_Value (COLUMNNAME_EMail_From, EMail_From); } /** Get EMail Absender. @return Full EMail address used to send requests - e.g. <EMAIL> */ @Override public java.lang.String getEMail_From () { return (java.lang.String)get_Value(COLUMNNAME_EMail_From); } /** Set EMail Empfänger. @param EMail_To EMail address to send requests to - e.g. <EMAIL> */ @Override public void setEMail_To (java.lang.String EMail_To) { set_Value (COLUMNNAME_EMail_To, EMail_To); } /** Get EMail Empfänger. @return EMail address to send requests to - e.g. <EMAIL> */ @Override public java.lang.String getEMail_To () { return (java.lang.String)get_Value(COLUMNNAME_EMail_To); } /** Set EMail Headers. @param EMailHeadersJSON EMail Headers */ @Override public void setEMailHeadersJSON (java.lang.String EMailHeadersJSON) { set_Value (COLUMNNAME_EMailHeadersJSON, EMailHeadersJSON); } /** Get EMail Headers. @return EMail Headers */ @Override public java.lang.String getEMailHeadersJSON () { return (java.lang.String)get_Value(COLUMNNAME_EMailHeadersJSON); } @Override public org.compiere.model.I_AD_User getFrom_User() throws RuntimeException { return get_ValueAsPO(COLUMNNAME_From_User_ID, org.compiere.model.I_AD_User.class); } @Override public void setFrom_User(org.compiere.model.I_AD_User From_User) { set_ValueFromPO(COLUMNNAME_From_User_ID, org.compiere.model.I_AD_User.class, From_User); } /** Set From User. @param From_User_ID From User */ @Override public void setFrom_User_ID (int From_User_ID) { if (From_User_ID < 1) set_Value (COLUMNNAME_From_User_ID, null); else set_Value (COLUMNNAME_From_User_ID, Integer.valueOf(From_User_ID)); } /** Get From User. @return From User */ @Override public int getFrom_User_ID () { Integer ii = (Integer)get_Value(COLUMNNAME_From_User_ID); if (ii == null) return 0; return ii.intValue(); } /** Set Initial Message-ID. @param InitialMessageID EMail Initial Message-ID */ @Override public void setInitialMessageID (java.lang.String InitialMessageID) { set_Value (COLUMNNAME_InitialMessageID, InitialMessageID); } /** Get Initial Message-ID. @return EMail Initial Message-ID */ @Override public java.lang.String getInitialMessageID () { return (java.lang.String)get_Value(COLUMNNAME_InitialMessageID); } /** Set Inbound EMail. @param IsInboundEMail Inbound EMail */ @Override public void setIsInboundEMail (boolean IsInboundEMail) { set_Value (COLUMNNAME_IsInboundEMail, Boolean.valueOf(IsInboundEMail)); } /** Get Inbound EMail. @return Inbound EMail */ @Override public boolean isInboundEMail () { Object oo = get_Value(COLUMNNAME_IsInboundEMail); if (oo != null) { if (oo instanceof Boolean) return ((Boolean)oo).booleanValue(); return "Y".equals(oo); } return false; } /** Set Message-ID. @param MessageID EMail Message-ID */ @Override public void setMessageID (java.lang.String MessageID) { set_Value (COLUMNNAME_MessageID, MessageID); } /** Get Message-ID. @return EMail Message-ID */ @Override public java.lang.String getMessageID () { return (java.lang.String)get_Value(COLUMNNAME_MessageID); } @Override public org.compiere.model.I_R_Request getR_Request() throws RuntimeException { return get_ValueAsPO(COLUMNNAME_R_Request_ID, org.compiere.model.I_R_Request.class); } @Override public void setR_Request(org.compiere.model.I_R_Request R_Request) { set_ValueFromPO(COLUMNNAME_R_Request_ID, org.compiere.model.I_R_Request.class, R_Request); } /** Set Aufgabe. @param R_Request_ID Request from a Business Partner or Prospect */ @Override public void setR_Request_ID (int R_Request_ID) { if (R_Request_ID < 1) set_Value (COLUMNNAME_R_Request_ID, null); else set_Value (COLUMNNAME_R_Request_ID, Integer.valueOf(R_Request_ID)); } /** Get Aufgabe. @return Request from a Business Partner or Prospect */ @Override public int getR_Request_ID () { Integer ii = (Integer)get_Value(COLUMNNAME_R_Request_ID); if (ii == null) return 0; return ii.intValue(); } /** Set Betreff. @param Subject Mail Betreff */ @Override public void setSubject (java.lang.String Subject) { set_Value (COLUMNNAME_Subject, Subject); } /** Get Betreff. @return Mail Betreff */ @Override public java.lang.String getSubject () { return (java.lang.String)get_Value(COLUMNNAME_Subject); } @Override public org.compiere.model.I_AD_User getTo_User() throws RuntimeException { return get_ValueAsPO(COLUMNNAME_To_User_ID, org.compiere.model.I_AD_User.class); } @Override public void setTo_User(org.compiere.model.I_AD_User To_User) { set_ValueFromPO(COLUMNNAME_To_User_ID, org.compiere.model.I_AD_User.class, To_User); } /** Set To User. @param To_User_ID To User */ @Override public void setTo_User_ID (int To_User_ID) { if (To_User_ID < 1) set_Value (COLUMNNAME_To_User_ID, null); else set_Value (COLUMNNAME_To_User_ID, Integer.valueOf(To_User_ID)); } /** Get To User. @return To User */ @Override public int getTo_User_ID () { Integer ii = (Integer)get_Value(COLUMNNAME_To_User_ID); if (ii == null) return 0; return ii.intValue(); } }
3,620
13,648
// Include MicroPython API. #include "py/runtime.h" // Declare the function we'll make available in Python as cppexample.cppfunc(). extern mp_obj_t cppfunc(mp_obj_t a_obj, mp_obj_t b_obj);
68
5,169
<reponame>Gantios/Specs<filename>Specs/d/3/8/Quicksilver/1.0.5/Quicksilver.podspec.json<gh_stars>1000+ { "name": "Quicksilver", "version": "1.0.5", "summary": "Quicksilver is an iOS/macOS/tvOS/watchOS framework that extends the collection classes and makes them easier to work with.", "description": "Quicksilver is an iOS/macOS/tvOS/watchOS framework that extends the collection classes (`NSArray`,`NSSet`,`NSOrderedSet`,`NSDictionary`, and `NSString`) and makes them easier to work with. The added methods are modeled after the related list functions (`map`,`filter`,`reduce`, etc.) in Haskell.", "homepage": "https://github.com/Kosoku/Quicksilver", "license": { "type": "BSD", "file": "license.txt" }, "authors": { "<NAME>": "<EMAIL>" }, "source": { "git": "https://github.com/Kosoku/Quicksilver.git", "tag": "1.0.5" }, "platforms": { "ios": "9.0", "osx": "10.12", "tvos": "10.0", "watchos": "3.0" }, "requires_arc": true, "source_files": "Quicksilver/**/*.{h,m}", "exclude_files": "Quicksilver/Quicksilver-Info.h", "frameworks": [ "Foundation", "CoreGraphics" ] }
455
1,330
<gh_stars>1000+ /* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.struts2.portlet.servlet; import java.io.IOException; import java.io.PrintWriter; import java.util.Locale; import javax.portlet.MimeResponse; import javax.portlet.PortletResponse; import javax.portlet.ResourceResponse; import javax.servlet.ServletOutputStream; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.LogManager; /** * PortletServletResponseJSR286. * * @author <NAME> */ public class PortletServletResponseJSR286 extends PortletServletResponse { private static final Logger LOG = LogManager.getLogger(PortletServletResponseJSR286.class); public PortletServletResponseJSR286( PortletResponse portletResponse ) { super(portletResponse); } public void flushBuffer() throws IOException { if(portletResponse instanceof MimeResponse) { ((MimeResponse)portletResponse).flushBuffer(); } else { throw new IllegalStateException("Only allowed in render or resource phase"); } } public int getBufferSize() { if(portletResponse instanceof MimeResponse) { return ((MimeResponse)portletResponse).getBufferSize(); } else { throw new IllegalStateException("Only allowed in render or resource phase"); } } public String getCharacterEncoding() { if(portletResponse instanceof MimeResponse) { return ((MimeResponse)portletResponse).getCharacterEncoding(); } else { throw new IllegalStateException("Only allowed in render or resource phase"); } } public String getContentType() { if(portletResponse instanceof MimeResponse) { return ((MimeResponse)portletResponse).getContentType(); } else { throw new IllegalStateException("Only allowed in render or resource phase"); } } public Locale getLocale() { if(portletResponse instanceof MimeResponse) { return ((MimeResponse)portletResponse).getLocale(); } else { throw new IllegalStateException("Only allowed in render or resource phase"); } } public ServletOutputStream getOutputStream() throws IOException { if(portletResponse instanceof MimeResponse) { return new PortletServletOutputStream(((MimeResponse)portletResponse).getPortletOutputStream()); } else { throw new IllegalStateException("Only allowed in render or resource phase"); } } public PrintWriter getWriter() throws IOException { if(portletResponse instanceof MimeResponse) { return ((MimeResponse)portletResponse).getWriter(); } else { throw new IllegalStateException("Only allowed in render or resource phase"); } } public boolean isCommitted() { if(portletResponse instanceof MimeResponse) { return ((MimeResponse)portletResponse).isCommitted(); } else { throw new IllegalStateException("Only allowed in render or resource phase"); } } public void reset() { if(portletResponse instanceof MimeResponse) { ((MimeResponse)portletResponse).reset(); } else { throw new IllegalStateException("Only allowed in render or resource phase"); } } public void resetBuffer() { if(portletResponse instanceof MimeResponse) { ((MimeResponse)portletResponse).resetBuffer(); } else { throw new IllegalStateException("Only allowed in render or resource phase"); } } public void setBufferSize(int size) { if(portletResponse instanceof MimeResponse) { ((MimeResponse)portletResponse).setBufferSize(size); } else { throw new IllegalStateException("Only allowed in render or resource phase"); } } public void setCharacterEncoding(String charset) { if(portletResponse instanceof ResourceResponse) { ((ResourceResponse)portletResponse).setCharacterEncoding(charset); } else { throw new IllegalStateException("Only allowed in resource phase"); } } public void setContentLength(int len) { if(portletResponse instanceof ResourceResponse) { ((ResourceResponse)portletResponse).setContentLength(len); } else { throw new IllegalStateException("Only allowed in resource phase"); } } public void setContentType(String type) { if(portletResponse instanceof MimeResponse) { ((MimeResponse)portletResponse).setContentType(type); } else { throw new IllegalStateException("Only allowed in render or resource phase"); } } public void setLocale(Locale loc) { if(portletResponse instanceof ResourceResponse) { ((ResourceResponse)portletResponse).setLocale(loc); } else { throw new IllegalStateException("Only allowed in resource phase"); } } }
2,212
684
<filename>modules/activiti-explorer/src/main/java/org/activiti/explorer/ui/custom/TabbedSelectionWindow.java /* Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.activiti.explorer.ui.custom; import java.util.HashMap; import java.util.Map; import org.activiti.explorer.ExplorerApp; import org.activiti.explorer.I18nManager; import org.activiti.explorer.Messages; import org.activiti.explorer.ui.mainlayout.ExplorerLayout; import com.vaadin.data.Item; import com.vaadin.data.Property.ValueChangeEvent; import com.vaadin.data.Property.ValueChangeListener; import com.vaadin.ui.Alignment; import com.vaadin.ui.Button; import com.vaadin.ui.Button.ClickEvent; import com.vaadin.ui.Button.ClickListener; import com.vaadin.ui.Component; import com.vaadin.ui.Embedded; import com.vaadin.ui.GridLayout; import com.vaadin.ui.HorizontalLayout; import com.vaadin.ui.Table; import com.vaadin.ui.Table.CellStyleGenerator; import com.vaadin.ui.themes.Reindeer; /** * Generic component for a popup window that allows to display * multiple selection tabs on the left, and displays * configured components matching those the selection on the right. * * Note: For best visual results, the components on the right * should be of fixed size, since the window width and heigh * is calculated based on these components * * @author <NAME> * @author <NAME> */ public class TabbedSelectionWindow extends PopupWindow { private static final long serialVersionUID = 1L; protected I18nManager i18nManager; protected HorizontalLayout windowLayout; protected Table selectionTable; protected String currentSelection; protected Component currentComponent; protected Map<String, Component> components = new HashMap<String, Component>(); protected Map<String, ClickListener> listeners = new HashMap<String, Button.ClickListener>(); protected GridLayout selectedComponentLayout; protected Button okButton; public TabbedSelectionWindow(String title) { this.i18nManager = ExplorerApp.get().getI18nManager(); initWindow(title); initWindowLayout(); initSelectionTable(); initComponentLayout(); initActions(); } protected void initWindow(String title) { setCaption(title); center(); setModal(true); addStyleName(Reindeer.WINDOW_LIGHT); } protected void initWindowLayout() { windowLayout = new HorizontalLayout(); windowLayout.setSpacing(false); windowLayout.setMargin(true); windowLayout.setSizeFull(); setContent(windowLayout); } protected void initComponentLayout() { selectedComponentLayout = new GridLayout(1,2); selectedComponentLayout.setSizeFull(); selectedComponentLayout.setMargin(true); selectedComponentLayout.setSpacing(true); selectedComponentLayout.addStyleName(ExplorerLayout.STYLE_RELATED_CONTENT_CREATE_DETAIL); windowLayout.addComponent(selectedComponentLayout); windowLayout.setExpandRatio(selectedComponentLayout, 1.0f); selectedComponentLayout.setRowExpandRatio(0, 1.0f); selectedComponentLayout.setColumnExpandRatio(0, 1.0f); } protected void initActions() { okButton = new Button(i18nManager.getMessage(Messages.BUTTON_OK)); selectedComponentLayout.addComponent(okButton, 0, 1); okButton.setEnabled(false); okButton.addListener(new ClickListener() { private static final long serialVersionUID = 1L; public void buttonClick(ClickEvent event) { listeners.get(currentSelection).buttonClick(event); close(); } }); selectedComponentLayout.setComponentAlignment(okButton, Alignment.BOTTOM_RIGHT); } protected void initSelectionTable() { selectionTable = new Table(); selectionTable.setSizeUndefined(); selectionTable.setColumnHeaderMode(Table.COLUMN_HEADER_MODE_HIDDEN); selectionTable.setSelectable(true); selectionTable.setImmediate(true); selectionTable.setNullSelectionAllowed(false); selectionTable.setWidth(150, UNITS_PIXELS); selectionTable.setHeight(100, UNITS_PERCENTAGE); selectionTable.setCellStyleGenerator(new CellStyleGenerator() { private static final long serialVersionUID = 1L; public String getStyle(Object itemId, Object propertyId) { if("name".equals(propertyId)) { return ExplorerLayout.STYLE_RELATED_CONTENT_CREATE_LIST_LAST_COLUMN; } return null; } }); selectionTable.addStyleName(ExplorerLayout.STYLE_RELATED_CONTENT_CREATE_LIST); selectionTable.addContainerProperty("type", Embedded.class, null); selectionTable.setColumnWidth("type", 22); selectionTable.addContainerProperty("name", String.class, null); // Listener to switch to the selected component selectionTable.addListener(new ValueChangeListener() { private static final long serialVersionUID = 1L; public void valueChange(ValueChangeEvent event) { String name = (String) event.getProperty().getValue(); if (name != null) { currentSelection = name; currentComponent = components.get(name); selectedComponentLayout.removeComponent(selectedComponentLayout.getComponent(0, 0)); if (currentComponent != null) { currentComponent.setSizeFull(); selectedComponentLayout.addComponent(currentComponent, 0, 0); okButton.setEnabled(true); } else { okButton.setEnabled(false); } } } }); windowLayout.addComponent(selectionTable); } /** * @param icon The 16x16 icon that will be displayed on the left in the selection table. * @param name The name that will be shown in the selection table * @param component The component that is selected when the item in the selection table is clicked. * @param clickListener The listener that will be attached to the OK button displayed beneath * the component. */ public void addSelectionItem(Embedded icon, String name, Component component, ClickListener clickListener) { Item item = selectionTable.addItem(name); item.getItemProperty("type").setValue(icon); item.getItemProperty("name").setValue(name); components.put(name, component); listeners.put(name, clickListener); } }
2,212
540
package com.mooveit.fakeit.viewmodels; import com.mooveit.fakeit.models.ChuckNorrisFactsData; import com.mooveit.library.Fakeit; public class ChuckNorrisFactsViewModel extends BaseViewModel { private ChuckNorrisFactsData mData; public ChuckNorrisFactsViewModel(ChuckNorrisFactsData data) { this.mData = data; setBusinessData(); } public ChuckNorrisFactsData getData() { return mData; } private void setBusinessData() { mData.chuckNorrisFact.set(Fakeit.chuckNorris().fact()); } @Override public void refresh() { setBusinessData(); } }
242
3,269
<filename>C++/insert-interval.cpp // Time: O(n) // Space: O(1) class Solution { public: vector<vector<int>> insert(vector<vector<int>>& intervals, vector<int>& newInterval) { size_t i = 0; vector<vector<int>> result; // Insert intervals appeared before newInterval. while (i < size(intervals) && newInterval[0] > intervals[i][1]) { result.emplace_back(intervals[i++]); } // Merge intervals that overlap with newInterval. while (i < size(intervals) && newInterval[1] >= intervals[i][0]) { newInterval = {min(newInterval[0], intervals[i][0]), max(newInterval[1], intervals[i][1])}; ++i; } result.emplace_back(newInterval); // Insert intervals appearing after newInterval. copy(cbegin(intervals) + i, cend(intervals), back_inserter(result)); return result; } };
417
468
<gh_stars>100-1000 """An implementation of ESIM Model.""" import typing import torch import torch.nn as nn from torch.nn import functional as F from matchzoo.engine.param_table import ParamTable from matchzoo.engine.param import Param from matchzoo.engine.base_model import BaseModel from matchzoo.modules import RNNDropout from matchzoo.modules import BidirectionalAttention from matchzoo.modules import StackedBRNN class ESIM(BaseModel): """ ESIM Model. Examples: >>> model = ESIM() >>> model.guess_and_fill_missing_params(verbose=0) >>> model.build() """ @classmethod def get_default_params(cls) -> ParamTable: """:return: model default parameters.""" params = super().get_default_params( with_embedding=True, with_multi_layer_perceptron=False ) params.add(Param(name='mask_value', value=0, desc="The value to be masked from inputs.")) params.add(Param(name='dropout', value=0.2, desc="Dropout rate.")) params.add(Param(name='hidden_size', value=200, desc="Hidden size.")) params.add(Param(name='lstm_layer', value=1, desc="Number of LSTM layers")) params.add(Param(name='drop_lstm', value=False, desc="Whether dropout LSTM.")) params.add(Param(name='concat_lstm', value=True, desc="Whether concat intermediate outputs.")) params.add(Param(name='rnn_type', value='lstm', desc="Choose rnn type, lstm or gru.")) return params def build(self): """Instantiating layers.""" rnn_mapping = {'lstm': nn.LSTM, 'gru': nn.GRU} self.embedding = self._make_default_embedding_layer() self.rnn_dropout = RNNDropout(p=self._params['dropout']) lstm_size = self._params['hidden_size'] if self._params['concat_lstm']: lstm_size /= self._params['lstm_layer'] self.input_encoding = StackedBRNN( self._params['embedding_output_dim'], int(lstm_size / 2), self._params['lstm_layer'], dropout_rate=self._params['dropout'], dropout_output=self._params['drop_lstm'], rnn_type=rnn_mapping[self._params['rnn_type'].lower()], concat_layers=self._params['concat_lstm']) self.attention = BidirectionalAttention() self.projection = nn.Sequential( nn.Linear( 4 * self._params['hidden_size'], self._params['hidden_size']), nn.ReLU()) self.composition = StackedBRNN( self._params['hidden_size'], int(lstm_size / 2), self._params['lstm_layer'], dropout_rate=self._params['dropout'], dropout_output=self._params['drop_lstm'], rnn_type=rnn_mapping[self._params['rnn_type'].lower()], concat_layers=self._params['concat_lstm']) self.classification = nn.Sequential( nn.Dropout( p=self._params['dropout']), nn.Linear( 4 * self._params['hidden_size'], self._params['hidden_size']), nn.Tanh(), nn.Dropout( p=self._params['dropout'])) self.out = self._make_output_layer(self._params['hidden_size']) def forward(self, inputs): """Forward.""" # Scalar dimensions referenced here: # B = batch size (number of sequences) # D = embedding size # L = `input_left` sequence length # R = `input_right` sequence length # H = hidden size # [B, L], [B, R] query, doc = inputs['text_left'].long(), inputs['text_right'].long() # [B, L] # [B, R] query_mask = (query == self._params['mask_value']) doc_mask = (doc == self._params['mask_value']) # [B, L, D] # [B, R, D] query = self.embedding(query) doc = self.embedding(doc) # [B, L, D] # [B, R, D] query = self.rnn_dropout(query) doc = self.rnn_dropout(doc) # [B, L, H] # [B, R, H] query = self.input_encoding(query, query_mask) doc = self.input_encoding(doc, doc_mask) # [B, L, H], [B, L, H] attended_query, attended_doc = self.attention( query, query_mask, doc, doc_mask) # [B, L, 4 * H] # [B, L, 4 * H] enhanced_query = torch.cat([query, attended_query, query - attended_query, query * attended_query], dim=-1) enhanced_doc = torch.cat([doc, attended_doc, doc - attended_doc, doc * attended_doc], dim=-1) # [B, L, H] # [B, L, H] projected_query = self.projection(enhanced_query) projected_doc = self.projection(enhanced_doc) # [B, L, H] # [B, L, H] query = self.composition(projected_query, query_mask) doc = self.composition(projected_doc, doc_mask) # [B, L] # [B, R] reverse_query_mask = 1. - query_mask.float() reverse_doc_mask = 1. - doc_mask.float() # [B, H] # [B, H] query_avg = torch.sum(query * reverse_query_mask.unsqueeze(2), dim=1)\ / (torch.sum(reverse_query_mask, dim=1, keepdim=True) + 1e-8) doc_avg = torch.sum(doc * reverse_doc_mask.unsqueeze(2), dim=1)\ / (torch.sum(reverse_doc_mask, dim=1, keepdim=True) + 1e-8) # [B, L, H] # [B, L, H] query = query.masked_fill(query_mask.unsqueeze(2), -1e7) doc = doc.masked_fill(doc_mask.unsqueeze(2), -1e7) # [B, H] # [B, H] query_max, _ = query.max(dim=1) doc_max, _ = doc.max(dim=1) # [B, 4 * H] v = torch.cat([query_avg, query_max, doc_avg, doc_max], dim=-1) # [B, H] hidden = self.classification(v) # [B, num_classes] out = self.out(hidden) return out
3,319
331
package am.gaut.android.toolbarbutton; import android.animation.Animator; import android.animation.AnimatorListenerAdapter; import android.content.Context; import android.graphics.Rect; import android.os.Build; import android.support.annotation.Nullable; import android.support.design.widget.AppBarLayout; import android.support.design.widget.CoordinatorLayout; import android.support.v4.view.ViewCompat; import android.support.v4.view.animation.FastOutLinearInInterpolator; import android.support.v4.view.animation.LinearOutSlowInInterpolator; import android.util.AttributeSet; import android.view.View; import android.view.ViewGroup; import android.view.animation.Interpolator; import android.widget.Button; import am.gaut.android.toolbarbutton.helpers.CollapsingToolbarHelper; /** * Toolbar buttons are used for a special type of promoted action. They are used in combination * with a FloatingActionButton anchored to a CollapsingToolbarLayout. * * Requires ICS+ (sdk 14+) */ @CoordinatorLayout.DefaultBehavior(ToolbarButton.Behavior.class) public class ToolbarButton extends Button { private static final String LOG_TAG = "ToolbarButton"; /** * Callback to be invoked when the visibility of a ToolbarButton changes. */ public abstract static class OnVisibilityChangedListener { /** * Called when a ToolbarButton has been * {@link #show(OnVisibilityChangedListener) shown}. * * @param toolbarBtn the ToolbarButton that was shown. */ public void onShown(ToolbarButton toolbarBtn) {} /** * Called when a ToolbarButton has been * {@link #hide(OnVisibilityChangedListener) hidden}. * * @param toolbarBtn the ToolbarButton that was hidden. */ public void onHidden(ToolbarButton toolbarBtn) {} } private static final String XMLNS_ANDROID = "http://schemas.android.com/apk/res/android"; private static final int SHOW_HIDE_ANIM_DURATION = 200; private static final Interpolator FAST_OUT_LINEAR_IN_INTERPOLATOR = new FastOutLinearInInterpolator(); private static final Interpolator LINEAR_OUT_SLOW_IN_INTERPOLATOR = new LinearOutSlowInInterpolator(); private boolean mIsHiding; public ToolbarButton(Context context) { this(context, null); } public ToolbarButton(Context context, AttributeSet attrs) { this(context, attrs, 0); } public ToolbarButton(Context context, AttributeSet attrs, int defStyleAttr) { super(context, attrs, defStyleAttr); // Hide if there's no visibility attribute if (attrs.getAttributeValue(XMLNS_ANDROID, "visibility") == null) { setVisibility(GONE); } // Add elevation if it's not set if (Build.VERSION.SDK_INT >= 21 && attrs.getAttributeValue(XMLNS_ANDROID, "elevation") == null) { setElevation(R.dimen.toolbar_button_elevation); } } /** * Shows the button. * <p>This method will animate the button show if the view has already been laid out.</p> */ public void show() { show(null); } /** * Shows the button. * <p>This method will animate the button show if the view has already been laid out.</p> * * @param listener the listener to notify when this view is shown */ public void show(@Nullable final OnVisibilityChangedListener listener) { if (mIsHiding || getVisibility() != View.VISIBLE) { if (ViewCompat.isLaidOut(this) && !isInEditMode()) { animate().cancel(); if (getVisibility() != View.VISIBLE) { // If the view isn't visible currently, we'll animate it from a single pixel setAlpha(0f); setScaleY(0f); setScaleX(0f); } animate() .scaleX(1f) .scaleY(1f) .alpha(1f) .setDuration(SHOW_HIDE_ANIM_DURATION) .setInterpolator(LINEAR_OUT_SLOW_IN_INTERPOLATOR) .setListener(new AnimatorListenerAdapter() { @Override public void onAnimationStart(Animator animation) { setVisibility(View.VISIBLE); } @Override public void onAnimationEnd(Animator animation) { if (listener != null) { listener.onShown(ToolbarButton.this); } } }); } else { setVisibility(View.VISIBLE); setAlpha(1f); setScaleY(1f); setScaleX(1f); if (listener != null) { listener.onShown(this); } } } } /** * Hides the button. * <p>This method will animate the button hide if the view has already been laid out.</p> */ public void hide() { hide(null); } private void hide(@Nullable final OnVisibilityChangedListener listener) { if (mIsHiding || getVisibility() != View.VISIBLE) { // A hide animation is in progress, or we're already hidden. Skip the call if (listener != null) { listener.onHidden(this); } return; } if (!ViewCompat.isLaidOut(this) || isInEditMode()) { // If the view isn't laid out, or we're in the editor, don't run the animation setVisibility(View.GONE); if (listener != null) { listener.onHidden(this); } } else { animate().cancel(); animate().scaleX(0.0F) .scaleY(0.0F) .alpha(0.0F) .setDuration(SHOW_HIDE_ANIM_DURATION) .setInterpolator(FAST_OUT_LINEAR_IN_INTERPOLATOR) .setListener(new AnimatorListenerAdapter() { private boolean mCancelled; @Override public void onAnimationStart(Animator animation) { mIsHiding = true; mCancelled = false; setVisibility(View.VISIBLE); } @Override public void onAnimationCancel(Animator animation) { mIsHiding = false; mCancelled = true; } @Override public void onAnimationEnd(Animator animation) { mIsHiding = false; if (!mCancelled) { setVisibility(View.GONE); if (listener != null) { listener.onHidden(ToolbarButton.this); } } } }); } } /** * Behavior designed for use with {@link ToolbarButton} instances. It's main function * is to show/hide {@link ToolbarButton} views based on the layout they are associated with. */ public static class Behavior extends CoordinatorLayout.Behavior<ToolbarButton> { private Rect mTmpRect; public Behavior() { } public Behavior(Context context, AttributeSet attrs) { } public boolean layoutDependsOn(CoordinatorLayout parent, ToolbarButton child, View dependency) { return dependency instanceof AppBarLayout; } public boolean onDependentViewChanged(CoordinatorLayout parent, ToolbarButton child, View dependency) { if (dependency instanceof AppBarLayout) { this.updateButtonVisibility(parent, (AppBarLayout) dependency, child); } return false; } private boolean updateButtonVisibility(CoordinatorLayout parent, AppBarLayout appBarLayout, final ToolbarButton child) { CoordinatorLayout.LayoutParams lp = (CoordinatorLayout.LayoutParams) child.getLayoutParams(); if (lp.getAnchorId() != appBarLayout.getId()) { // The anchor ID doesn't match the dependency, so we won't automatically // show/hide the button return false; } if (mTmpRect == null) { mTmpRect = new Rect(); } final Rect rect = mTmpRect; CollapsingToolbarHelper.getDescendantRect(parent, appBarLayout, rect); // Hide show code logic borrowed from Android Support Library Floating Action Button if (rect.bottom <= CollapsingToolbarHelper.getMinimumHeightForVisibleOverlappingContent(appBarLayout)) { child.show(); // Height should equal toolbar height // If android:fitsSystemWindows="true" is enabled, add appropriate top margin final int inset = CollapsingToolbarHelper.getTopInset(appBarLayout); ViewGroup.MarginLayoutParams params = (ViewGroup.MarginLayoutParams) child.getLayoutParams(); params.topMargin = inset; params.height = rect.bottom - inset; child.setLayoutParams(params); } else { child.hide(); } return true; } } }
4,629
1,511
/* * Advanced Linux Sound Architecture - ALSA - Driver * Copyright (c) 1994-2003 by <NAME> <<EMAIL>>, * <NAME> <<EMAIL>> *
71
60,067
<filename>benchmarks/sparse/utils.py import torch import functools import random import operator import numpy as np import time # shim for torch.cuda.Event when running on cpu class Event(object): def __init__(self, enable_timing): pass def record(self): self.time = time.perf_counter() def elapsed_time(self, end_event): assert isinstance(end_event, Event) return end_event.time - self.time def gen_sparse_csr(shape, nnz): fill_value = 0 total_values = functools.reduce(operator.mul, shape, 1) dense = np.random.randn(total_values) fills = random.sample(list(range(total_values)), total_values - nnz) for f in fills: dense[f] = fill_value dense = torch.from_numpy(dense.reshape(shape)) return dense.to_sparse_csr() def gen_sparse_coo(shape, nnz): dense = np.random.randn(*shape) values = [] indices = [[], []] for n in range(nnz): row = random.randint(0, shape[0] - 1) col = random.randint(0, shape[1] - 1) indices[0].append(row) indices[1].append(col) values.append(dense[row, col]) return torch.sparse_coo_tensor(indices, values, size=shape) def gen_sparse_coo_and_csr(shape, nnz): total_values = functools.reduce(operator.mul, shape, 1) dense = np.random.randn(total_values) fills = random.sample(list(range(total_values)), total_values - nnz) for f in fills: dense[f] = 0 dense = torch.from_numpy(dense.reshape(shape)) return dense.to_sparse(), dense.to_sparse_csr()
651
937
package com.oath.cyclops.matching; import java.util.function.Function; public interface Sealed2<T1,T2> { public <R> R fold(Function<? super T1, ? extends R> fn1, Function<? super T2, ? extends R> fn2); }
79
1,199
<reponame>davidlrichmond/macports-ports<gh_stars>1000+ package org.jd.gui.view.component; import org.jd.gui.util.io.FileUtils; import org.jd.gui.util.sys.SystemUtils; import javax.swing.*; import java.awt.*; import java.io.File; import java.io.FilenameFilter; /** * Created by jianhua.fengjh on 27/11/2015. */ public class FileChooser extends JFileChooser { /** * */ private static final long serialVersionUID = 1L; public int showOpenDialog(Component parent) { if (!SystemUtils.isMacOS()) { return super.showOpenDialog(parent); } else { setDialogType(JFileChooser.OPEN_DIALOG); return showNativeFileDialog(this); } } public int showSaveDialog(Component parent) { if (!SystemUtils.isMacOS()) { return super.showSaveDialog(parent); } else { setDialogType(JFileChooser.SAVE_DIALOG); return showNativeFileDialog(this); } } private static int showNativeFileDialog(final JFileChooser chooser) { if (chooser != null) { FileDialog fileDialog = new FileDialog((Frame) chooser.getParent()); fileDialog.setDirectory(chooser.getCurrentDirectory().getPath()); File file = chooser.getSelectedFile(); if (chooser.getDialogType() == JFileChooser.SAVE_DIALOG) { fileDialog.setFile(file != null ? file.getName() : ""); //save only need name } else { fileDialog.setFile(file != null ? file.getPath() : ""); } fileDialog.setFilenameFilter(new FilenameFilter() { public boolean accept(File dir, String name) { String path = dir.getPath(); String pathSeparator = File.pathSeparator; return chooser.getFileFilter().accept(new File(0 + path.length() + pathSeparator.length() + name.length() + path + pathSeparator + name)); } }); if (chooser.getDialogType() == JFileChooser.SAVE_DIALOG) { fileDialog.setMode(FileDialog.SAVE); } else { fileDialog.setMode(FileDialog.LOAD); } if (chooser.getFileSelectionMode() == JFileChooser.DIRECTORIES_ONLY) { System.setProperty("apple.awt.fileDialogForDirectories", "true"); } else { System.setProperty("apple.awt.fileDialogForDirectories", "false"); } fileDialog.setVisible(true); //reset fileDialogForDirectories property System.setProperty("apple.awt.fileDialogForDirectories", "false"); if (fileDialog.getFile() == null) { return JFileChooser.CANCEL_OPTION; } String dir = fileDialog.getDirectory(); String trailingSlash = FileUtils.ensureTrailingSlash(dir); String strFile = fileDialog.getFile(); chooser.setSelectedFile(new File(strFile.length() != 0 ? trailingSlash.concat(strFile) : trailingSlash)); return JFileChooser.APPROVE_OPTION; } return JFileChooser.ERROR_OPTION; } }
1,448
1,367
<reponame>developer-inspur/SwissArmyKnife package com.wanjian.sak; import android.app.Application; import com.wanjian.sak.config.Config; /** * Created by wanjian on 2017/2/20. */ public class SAK { public static void init(Application application, Config config) { } private SAK() { } public static void unInstall() { } }
134
578
/* * Copyright (c) 2003-2021 <NAME> <<EMAIL>>. * All rights reserved. Use of the code is allowed under the * Artistic License 2.0 terms, as specified in the LICENSE file * distributed with this code, or available from * http://www.opensource.org/licenses/artistic-license-2.0.php */ /** \file winutils.cpp * * Contains generic utility functions that should be global and don't fit anywhere else */ #include "stdafx.h" #include "winutils.h" #include <sstream> #include "core/StringX.h" #include "core/SysInfo.h" #include "os/dir.h" #include "GeneralMsgBox.h" #include "core/PWSprefs.h" #include "core/XMLprefs.h" #include "os/env.h" #include "os/file.h" #include "os/lib.h" // typedefs for function pointers: typedef int (WINAPI* FP_GETDPI4SYSTEM) (); typedef int (WINAPI* FP_GETDPI4WINDOW) (HWND); typedef int (WINAPI* FP_GETSYSMETRICS4DPI) (int, UINT); void WinUtil::RelativizePath(std::wstring &curfile) { // If IsUnderPw2go() && exec's drive == curfile's drive, remove // from latter's path. This supports DoK usage if (SysInfo::IsUnderPw2go()) { const std::wstring execDir = pws_os::getexecdir(); std::wstring execDrive, dontCare; pws_os::splitpath(execDir, execDrive, dontCare, dontCare, dontCare); std::wstring fileDrive, fileDir, fileFile, fileExt; pws_os::splitpath(curfile, fileDrive, fileDir, fileFile, fileExt); ToUpper(fileDrive); ToUpper(execDrive); if (fileDrive == execDrive) { curfile = pws_os::makepath(L"", fileDir, fileFile, fileExt); } } } static BOOL CALLBACK EnumScreens(HMONITOR hMonitor, HDC, LPRECT, LPARAM lParam) { MONITORINFO mi; HRGN *phrgn = (HRGN *)lParam; mi.cbSize = sizeof(mi); GetMonitorInfo(hMonitor, &mi); HRGN hrgn2 = CreateRectRgnIndirect(&mi.rcWork); CombineRgn(*phrgn, *phrgn, hrgn2, RGN_OR); ::DeleteObject(hrgn2); return TRUE; } HRGN WinUtil::GetWorkAreaRegion() { HRGN hrgn = CreateRectRgn(0, 0, 0, 0); HDC hdc = ::GetDC(nullptr); EnumDisplayMonitors(hdc, nullptr, EnumScreens, (LPARAM)&hrgn); ::ReleaseDC(nullptr, hdc); return hrgn; } // Following 2 functions moved from MigratePrefs.cpp /** * PerformConfigMigration() should be called if (a) we detected the preference file * in the old location (exec. dir), and (b) user chose to migrate. * To be more accurate: In addition to being in the exec dir, the current * username/hostname combination should be in the file, since when there are * several u/h prefs, we migrate only the current one. */ bool WinUtil::OfferConfigMigration() { /** * Offer the user the option of migrating config files iff ALL * of the following are true: * 1. Config file is currently in executable directory * 2. This is NOT a U3 installation * 3. The executable directory is on a fixed or network drive * 4. The user did NOT override the config file, user name or host name * via command line (-g, -u, -h). * 5. There isn't a config file already in the APPDATA location * 6. The APPDATA location exists */ const SysInfo *si = SysInfo::GetInstance(); // start with quickest checks if (si->IsUnderU3() || PWSprefs::UserSetCfgFile() || (si->GetRealHost() != si->GetEffectiveHost()) || (si->GetRealUser() != si->GetEffectiveUser())) return false; std::wstring wsExecDir = pws_os::getexecdir(); std::wstring wsExDrive, wsExDir, wsExFileName, wsExExt; pws_os::splitpath(wsExecDir, wsExDrive, wsExDir, wsExFileName, wsExExt); wsExDrive += L"\\"; UINT uiDT = GetDriveType(wsExDrive.c_str()); // Do not touch if not on local or remote (network) disk if (uiDT != DRIVE_FIXED && uiDT != DRIVE_REMOTE) return false; const std::wstring wsUserCfgDir = pws_os::getuserprefsdir(); // empty if couldn't find/create if (wsUserCfgDir.empty()) return false; const std::wstring wsExecDirCfgFile = wsExecDir + PWSprefs::cfgFileName; const std::wstring wsDefaultCfgFile = wsUserCfgDir + PWSprefs::cfgFileName; return (pws_os::FileExists(wsExecDirCfgFile) && !pws_os::FileExists(wsDefaultCfgFile)); } bool WinUtil::PerformConfigMigration() { /** * * We're here after the application's started and the conditions * listed above (in OfferConfigMigration) hold. * This constrains what we can assume and what we have to check. */ ASSERT(OfferConfigMigration()); // should not be here otherwise! if (!OfferConfigMigration()) return false; // I mean it! PWSprefs::ConfigOption configoption; // Note value meaningless at this point! std::wstring wsCnfgFile = PWSprefs::GetConfigFile(configoption); const std::wstring wsExecDir = pws_os::getexecdir(); const std::wstring wsUserCfgDir = pws_os::getuserprefsdir(); if (wsUserCfgDir.empty()) // couldn't find or create !? return false; std::wstring wsDefaultCfgFile = wsUserCfgDir + PWSprefs::cfgFileName; std::wstring wsExecDirCfgFile = wsExecDir + PWSprefs::cfgFileName; bool bRetVal(false); bool bExecCFRO(false); pws_os::FileExists(wsExecDirCfgFile, bExecCFRO); /** * MIGRATE **/ bRetVal = false; bool bNoMoreNodes(false); CXMLprefs newXMLConfig(wsExecDirCfgFile.c_str()); // for migrating user/host to new CXMLprefs oldXMLConfig(wsExecDirCfgFile.c_str()); // for removing user/host from old // Create the new one from it just containing our host/user if (!newXMLConfig.XML_Load()) return false; // WTF?!? const SysInfo *si = SysInfo::GetInstance(); stringT hn = si->GetEffectiveHost(); PWSprefs::XMLify(charT('H'), hn); stringT un = si->GetEffectiveUser(); PWSprefs::XMLify(charT('u'), un); stringT csHKCU_PREF = _T("Pwsafe_Settings\\"); csHKCU_PREF += hn.c_str(); csHKCU_PREF += _T("\\"); csHKCU_PREF += un.c_str(); csHKCU_PREF += _T("\\Preferences"); bool rc = newXMLConfig.MigrateSettings(wsDefaultCfgFile, hn, un); if (rc) { // That worked, now remove us from the old one config file // in the Installation directory newXMLConfig.Unlock(); // Since we now have new config file, remove host/user from old. if (!oldXMLConfig.XML_Load()) { rc = false; if (!oldXMLConfig.getReason().empty()) { CGeneralMsgBox gmb; gmb.SetMsg(oldXMLConfig.getReason().c_str()); gmb.AddButton(IDS_CONTINUE, IDS_CONTINUE); gmb.AddButton(IDS_EXIT, IDS_EXIT, TRUE, TRUE); if (gmb.DoModal() == IDS_EXIT) { goto exit; } // Problem loading XML file but user says continue rather than Exit PWS! // But we will not remove them from the old file and we will // delete the new file - better luck next time! pws_os::DeleteAFile(wsDefaultCfgFile); } } // Load failed // Now remove this hostname/username from old configuration file in the // installation directory (as long as everything OK and it is not R-O) if (rc && !bExecCFRO) { rc = oldXMLConfig.RemoveHostnameUsername(hn, un, bNoMoreNodes); if (rc) { oldXMLConfig.XML_Store(csHKCU_PREF); // However, if no more host/user nodes in this file - delete the // configuration file from the installation directory! if (bNoMoreNodes) { pws_os::DeleteAFile(wsExecDirCfgFile); } bRetVal = true; } // RemoveHostnameUsername } // rc && !bExecCFRO } // MigrateSettings // If this all worked, now copy autoload_filters.xml if it exists and not // already in the new location. // This is ONLY done when we migrate the user's settings. if (bRetVal == true) { bool bALFRO(false); std::wstring wsOldAutoLoadFilters = wsExecDir + L"autoload_filters.xml"; std::wstring wsNewAutoLoadFilters = wsUserCfgDir + L"autoload_filters.xml"; if (pws_os::FileExists(wsOldAutoLoadFilters, bALFRO) && !pws_os::FileExists(wsNewAutoLoadFilters)) { bool bCopyAutoloadFilters = pws_os::CopyAFile(wsOldAutoLoadFilters, wsNewAutoLoadFilters); // If we have copied it, there are no more nodes in the old configuration file // and it isn't read only - delete it from the installation directory if (bCopyAutoloadFilters && bNoMoreNodes && !bALFRO) pws_os::DeleteAFile(wsOldAutoLoadFilters); } } // Migration all done! exit: // Clean up newXMLConfig.Unlock(); oldXMLConfig.Unlock(); // Set config file if (bRetVal) PWSprefs::SetConfigFile(wsDefaultCfgFile); return bRetVal; } /** * Following started out as a way to test hi-resolution support without access to a hires monitor. * Now it's a wrapper to support pre-Windows 10 systems as well. * */ UINT WinUtil::GetDPI(HWND hwnd) { static bool inited = false; static FP_GETDPI4SYSTEM fp_getdpi4_system = nullptr; static FP_GETDPI4WINDOW fp_getdpi4_window = nullptr; if (!inited) { auto hUser32 = static_cast<HMODULE>(pws_os::LoadLibrary(L"User32.dll", pws_os::loadLibraryTypes::SYS)); ASSERT(hUser32 != nullptr); if (hUser32 != nullptr) { fp_getdpi4_system = static_cast<FP_GETDPI4SYSTEM>(pws_os::GetFunction(hUser32, "GetDpiForSystem")); fp_getdpi4_window = static_cast<FP_GETDPI4WINDOW>(pws_os::GetFunction(hUser32, "GetDpiForWindow")); inited = true; } } UINT retval = 96; const stringT dbg_dpi = pws_os::getenv("PWS_DPI", false); if (dbg_dpi.empty()) { if (fp_getdpi4_window != nullptr && fp_getdpi4_system != nullptr) retval = (hwnd == nullptr) ? fp_getdpi4_system() : fp_getdpi4_window(hwnd); } else { // !dbg_dpi.empty() std::wistringstream iss(dbg_dpi); iss >> retval; } return retval; } void WinUtil::ResizeBitmap(CBitmap& bmp_src, CBitmap& bmp_dst, int dstW, int dstH) { // from https://stackoverflow.com/questions/2770855/how-do-you-scale-a-cbitmap-object BITMAP bm = { 0 }; bmp_src.GetBitmap(&bm); auto size = CSize(bm.bmWidth, bm.bmHeight); CWindowDC wndDC(nullptr); CDC srcDC; srcDC.CreateCompatibleDC(&wndDC); srcDC.SelectObject(&bmp_src); CDC destDC; destDC.CreateCompatibleDC(&wndDC); bmp_dst.CreateCompatibleBitmap(&wndDC, dstW, dstH); destDC.SelectObject(&bmp_dst); destDC.StretchBlt(0, 0, dstW, dstH, &srcDC, 0, 0, size.cx, size.cy, SRCCOPY); } void WinUtil::FixBitmapBackground(CBitmap& bm) { // Change bitmap's {192,192,192} pixels // to current flavor of the month default background // Get how many pixels in the bitmap const COLORREF crCOLOR_3DFACE = GetSysColor(COLOR_3DFACE); BITMAP bmInfo; int rc = bm.GetBitmap(&bmInfo); if (rc == 0) { ASSERT(0); return; } const UINT numPixels(bmInfo.bmHeight * bmInfo.bmWidth); // get a pointer to the pixels DIBSECTION ds; VERIFY(bm.GetObject(sizeof(DIBSECTION), &ds) == sizeof(DIBSECTION)); RGBTRIPLE* pixels = reinterpret_cast<RGBTRIPLE*>(ds.dsBm.bmBits); if (pixels == nullptr) { ASSERT(0); return; } const RGBTRIPLE newbkgrndColourRGB = { GetBValue(crCOLOR_3DFACE), GetGValue(crCOLOR_3DFACE), GetRValue(crCOLOR_3DFACE) }; for (UINT i = 0; i < numPixels; ++i) { if (pixels[i].rgbtBlue == 192 && pixels[i].rgbtGreen == 192 && pixels[i].rgbtRed == 192) { pixels[i] = newbkgrndColourRGB; } } } BOOL WinUtil::LoadScaledBitmap(CBitmap &bitmap, UINT nID, bool fixBckgrnd, HWND hwnd) { CBitmap tmpBitmap; BITMAP bm; BOOL retval = tmpBitmap.Attach( ::LoadImage(::AfxFindResourceHandle(MAKEINTRESOURCE(nID), RT_BITMAP), MAKEINTRESOURCE(nID), IMAGE_BITMAP, 0, 0, (LR_DEFAULTSIZE | LR_CREATEDIBSECTION | LR_SHARED))); if (retval == FALSE) return retval; if (fixBckgrnd) { FixBitmapBackground(tmpBitmap); } UINT dpi = GetDPI(hwnd); tmpBitmap.GetBitmap(&bm); int dpiScaledWidth = MulDiv(bm.bmWidth, dpi, 96); int dpiScaledHeight = MulDiv(bm.bmHeight, dpi, 96); WinUtil::ResizeBitmap(tmpBitmap, bitmap, dpiScaledWidth, dpiScaledHeight); tmpBitmap.DeleteObject(); return TRUE; } int WinUtil::GetSystemMetrics(int nIndex, HWND hwnd) { static FP_GETSYSMETRICS4DPI fp_getsysmetrics_4dpi = nullptr; static bool inited = false; if (!inited) { auto hUser32 = static_cast<HMODULE>(pws_os::LoadLibrary(reinterpret_cast<const TCHAR *>(L"User32.dll"), pws_os::loadLibraryTypes::SYS)); ASSERT(hUser32 != nullptr); if (hUser32 != nullptr) { fp_getsysmetrics_4dpi = static_cast<FP_GETSYSMETRICS4DPI>(pws_os::GetFunction(hUser32, "GetSystemMetricsForDpi")); inited = true; } } if (fp_getsysmetrics_4dpi != nullptr) { // Windows 10 or greater UINT dpi = GetDPI(hwnd); return fp_getsysmetrics_4dpi(nIndex, dpi); } else { // server or older than Win10, punt to older API return ::GetSystemMetrics(nIndex); } } bool WinUtil::HasTouchscreen() // for BR1539 workaround { int value = ::GetSystemMetrics(SM_DIGITIZER); return (value != 0); }
5,183
494
<reponame>MaksTuev/ferro package com.agna.ferro.sample.ui.screen.catalog; import android.os.Bundle; import android.os.Handler; import android.support.v4.widget.SwipeRefreshLayout; import android.support.v7.widget.GridLayoutManager; import android.support.v7.widget.RecyclerView; import android.widget.Toast; import com.agna.ferro.mvp.component.ScreenComponent; import com.agna.ferro.mvp.presenter.MvpPresenter; import com.agna.ferro.sample.R; import com.agna.ferro.sample.domain.Book; import com.agna.ferro.sample.ui.base.BaseActivityView; import com.agna.ferro.sample.ui.common.dagger.ActivityModule; import com.agna.ferro.sample.ui.screen.catalog.grid.BookItemListener; import com.agna.ferro.sample.ui.screen.catalog.grid.BooksAdapter; import java.util.List; import javax.inject.Inject; /** * view for Catalog screen */ public class CatalogActivityView extends BaseActivityView { @Inject CatalogPresenter presenter; private Handler handler = new Handler(); private SwipeRefreshLayout swipeRefreshLayout; private RecyclerView booksRw; private BooksAdapter adapter; @Override protected int getContentView() { return R.layout.activity_catalog; } @Override protected ScreenComponent createScreenComponent() { //creating Screen Component, you needn't call ScreenComponent#inject(this) return DaggerCatalogScreenComponent.builder() .appComponent(getAppComponent()) .activityModule(new ActivityModule(getPersistentScreenScope())) .build(); } @Override public MvpPresenter getPresenter() { return presenter; } @Override public String getName() { // name of the screen return "Catalog"; } @Override protected void onCreate(Bundle savedInstanceState, boolean viewRecreated) { super.onCreate(savedInstanceState, viewRecreated); findViews(); initViews(); initGrid(); } public void showLoading() { handler.post(() -> swipeRefreshLayout.setRefreshing(true)); } public void hideLoading() { swipeRefreshLayout.setRefreshing(false); } private void initViews() { swipeRefreshLayout.setOnRefreshListener(() -> presenter.reloadData()); } private void initGrid() { int spanCount = getResources().getInteger(R.integer.books_span_count); GridLayoutManager layoutManager = new GridLayoutManager(this, spanCount); booksRw.setLayoutManager(layoutManager); booksRw.setItemAnimator(null); adapter = new BooksAdapter(bookItemListener); booksRw.setAdapter(adapter); } private void findViews() { booksRw = (RecyclerView) findViewById(R.id.catalog_rw); swipeRefreshLayout = (SwipeRefreshLayout) findViewById(R.id.catalog_swr); } private BookItemListener bookItemListener = new BookItemListener() { @Override public void onDownloadClick(Book book) { presenter.downloadBook(book); } @Override public void onReadClick(Book book) { Toast.makeText(CatalogActivityView.this, "Stub", Toast.LENGTH_SHORT).show(); } @Override public void onClick(Book book) { presenter.openBookScreen(book); } }; public void notifyDataChanged() { adapter.notifyDataSetChanged(); } public void updateBooksData(List<Book> books) { adapter.updateBooksData(books); } public void notifyItemChanged(int position) { adapter.notifyItemChanged(position); } }
1,377
5,119
<filename>tests/codegen/call_system.cpp #include "common.h" namespace bpftrace { namespace test { namespace codegen { TEST(codegen, call_system) { test(" kprobe:f { system(\"echo %d\", 100) }", NAME, false); } } // namespace codegen } // namespace test } // namespace bpftrace
117
379
<filename>datavec-data/datavec-data-image/src/test/java/org/datavec/image/transform/ResizeImageTransformTest.java package org.datavec.image.transform; import org.bytedeco.javacv.Frame; import org.datavec.image.data.ImageWritable; import org.junit.Before; import org.junit.Test; import static org.junit.Assert.assertEquals; /** * Tests for ResizeImage * * @author <EMAIL> */ public class ResizeImageTransformTest { @Before public void setUp() throws Exception { } @Test public void testResizeUpscale1() throws Exception { ImageWritable srcImg = TestImageTransform.makeRandomImage(32, 32, 3); ResizeImageTransform transform = new ResizeImageTransform(200, 200); ImageWritable dstImg = transform.transform(srcImg); Frame f = dstImg.getFrame(); assertEquals(f.imageWidth, 200); assertEquals(f.imageHeight, 200); float[] coordinates = {100, 200}; float[] transformed = transform.query(coordinates); assertEquals(200f * 100 / 32, transformed[0], 0); assertEquals(200f * 200 / 32, transformed[1], 0); } @Test public void testResizeDownscale() throws Exception { ImageWritable srcImg = TestImageTransform.makeRandomImage(571, 443, 3); ResizeImageTransform transform = new ResizeImageTransform(200, 200); ImageWritable dstImg = transform.transform(srcImg); Frame f = dstImg.getFrame(); assertEquals(f.imageWidth, 200); assertEquals(f.imageHeight, 200); float[] coordinates = {300, 400}; float[] transformed = transform.query(coordinates); assertEquals(200f * 300 / 443, transformed[0], 0); assertEquals(200f * 400 / 571, transformed[1], 0); } }
667
729
<reponame>probonopd/imagewriter<filename>powersaveblocker.cpp<gh_stars>100-1000 /* * SPDX-License-Identifier: Apache-2.0 * Copyright (C) 2020 Raspberry Pi (Trading) Limited */ #include "powersaveblocker.h" #include <QDebug> #include <string> PowerSaveBlocker::PowerSaveBlocker(QObject *parent) : QObject(parent), _stayingAwake(false) { } PowerSaveBlocker::~PowerSaveBlocker() { if (_stayingAwake) removeBlock(); } void PowerSaveBlocker::applyBlock(const QString &reason) { if (_stayingAwake) return; #ifdef Q_OS_WIN REASON_CONTEXT rc; std::wstring wreason = reason.toStdWString(); rc.Version = POWER_REQUEST_CONTEXT_VERSION; rc.Flags = POWER_REQUEST_CONTEXT_SIMPLE_STRING; rc.Reason.SimpleReasonString = (wchar_t *) wreason.c_str(); _powerRequest = PowerCreateRequest(&rc); if (_powerRequest == INVALID_HANDLE_VALUE) { qDebug() << "Error creating power request:" << GetLastError(); return; } _stayingAwake = PowerSetRequest(_powerRequest, PowerRequestDisplayRequired); if (!_stayingAwake) { qDebug() << "Error running PowerSetRequest():" << GetLastError(); } #endif } void PowerSaveBlocker::removeBlock() { if (!_stayingAwake) return; #ifdef Q_OS_WIN PowerClearRequest(_powerRequest, PowerRequestDisplayRequired); CloseHandle(_powerRequest); #endif }
597
409
#!/usr/bin/env python2 """Execute the tests for micro_razers. The golden test outputs are generated by the script generate_outputs.sh. You have to give the root paths to the source and the binaries as arguments to the program. These are the paths to the directory that contains the 'projects' directory. Usage: run_tests.py SOURCE_ROOT_PATH BINARY_ROOT_PATH """ import logging import os.path import sys # Automagically add util/py_lib to PYTHONPATH environment variable. path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..', 'util', 'py_lib')) sys.path.insert(0, path) import seqan.app_tests as app_tests def main(source_base, binary_base): """Main entry point of the script.""" print 'Executing test for micro_razers' print '===============================' print ph = app_tests.TestPathHelper( source_base, binary_base, 'apps/micro_razers/tests') # tests dir # ============================================================ # Auto-detect the binary path. # ============================================================ path_to_program = app_tests.autolocateBinary( binary_base, 'apps/micro_razers', 'micro_razers') # ============================================================ # Built TestConf list. # ============================================================ # Build list with TestConf objects, analoguely to how the output # was generated in generate_outputs.sh. conf_list = [] # ============================================================ # First Section. # ============================================================ # Run with default options. conf = app_tests.TestConf( program=path_to_program, redir_stdout=ph.outFile('se-adeno-reads36_1_default.stdout'), args=[ph.inFile('adeno-genome.fa'), ph.inFile('adeno-reads36_1.fa'), '-o', ph.outFile('se-adeno-reads36_1_default.razers' )], to_diff=[(ph.inFile('se-adeno-reads36_1_default.razers' ), ph.outFile('se-adeno-reads36_1_default.razers' )), (ph.inFile('se-adeno-reads36_1_default.stdout' ), ph.outFile('se-adeno-reads36_1_default.stdout' ))]) conf_list.append(conf) # Run with different seed lengths for sl in range(14,21): conf = app_tests.TestConf( program=path_to_program, redir_stdout=ph.outFile('se-adeno-reads36_1_sl%d.stdout' % sl), args=['-sL', str(sl), ph.inFile('adeno-genome.fa'), ph.inFile('adeno-reads36_1.fa'), '-o', ph.outFile('se-adeno-reads36_1_sl%d.razers' % sl)], to_diff=[(ph.inFile('se-adeno-reads36_1_sl%d.razers' % sl), ph.outFile('se-adeno-reads36_1_sl%d.razers' % sl)), (ph.inFile('se-adeno-reads36_1_sl%d.stdout' % sl), ph.outFile('se-adeno-reads36_1_sl%d.stdout' % sl))]) conf_list.append(conf) conf = app_tests.TestConf( program=path_to_program, redir_stdout=ph.outFile('se-adeno-reads36_1_sl%d_sam.stdout' % sl), args=['-sL', str(sl), ph.inFile('adeno-genome.fa'), ph.inFile('adeno-reads36_1.fa'), '-o', ph.outFile('se-adeno-reads36_1_sl%d.sam' % sl)], to_diff=[(ph.inFile('se-adeno-reads36_1_sl%d.sam' % sl), ph.outFile('se-adeno-reads36_1_sl%d.sam' % sl)), (ph.inFile('se-adeno-reads36_1_sl%d_sam.stdout' % sl), ph.outFile('se-adeno-reads36_1_sl%d_sam.stdout' % sl))]) conf_list.append(conf) # allow error in seed conf = app_tests.TestConf( program=path_to_program, redir_stdout=ph.outFile('se-adeno-reads36_1_sl%d_se.stdout' % sl), args=['-sL', str(sl), '-sE', ph.inFile('adeno-genome.fa'), ph.inFile('adeno-reads36_1.fa'), '-o', ph.outFile('se-adeno-reads36_1_sl%d_se.razers' % sl)], to_diff=[(ph.inFile('se-adeno-reads36_1_sl%d_se.razers' % sl), ph.outFile('se-adeno-reads36_1_sl%d_se.razers' % sl)), (ph.inFile('se-adeno-reads36_1_sl%d_se.stdout' % sl), ph.outFile('se-adeno-reads36_1_sl%d_se.stdout' % sl))]) conf_list.append(conf) # change maxhits parameter conf = app_tests.TestConf( program=path_to_program, redir_stdout=ph.outFile('se-adeno-reads36_1_sl18_m20_pa.stdout' ), args=['-sL', str(18), '-m', str(20), '-pa', ph.inFile('adeno-genome.fa'), ph.inFile('adeno-reads36_1.fa'), '-o', ph.outFile('se-adeno-reads36_1_sl18_m20_pa.razers' )], to_diff=[(ph.inFile('se-adeno-reads36_1_sl18_m20_pa.razers' ), ph.outFile('se-adeno-reads36_1_sl18_m20_pa.razers' )), (ph.inFile('se-adeno-reads36_1_sl18_m20_pa.stdout' ), ph.outFile('se-adeno-reads36_1_sl18_m20_pa.stdout' ))]) conf_list.append(conf) # ============================================================ # Execute the tests. # ============================================================ failures = 0 for conf in conf_list: res = app_tests.runTest(conf) # Output to the user. print ' '.join(['micro_razers'] + conf.args), if res: print 'OK' else: failures += 1 print 'FAILED' # Cleanup. ph.deleteTempDir() print '==============================' print ' total tests: %d' % len(conf_list) print ' failed tests: %d' % failures print 'successful tests: %d' % (len(conf_list) - failures) print '==============================' # Compute and return return code. return failures != 0 if __name__ == '__main__': sys.exit(app_tests.main(main))
2,902
892
{ "schema_version": "1.2.0", "id": "GHSA-2qh3-cx4w-cf3x", "modified": "2022-05-13T01:13:58Z", "published": "2022-05-13T01:13:58Z", "aliases": [ "CVE-2018-18809" ], "details": "The default server implementation of TIBCO Software Inc.'s TIBCO JasperReports Library, TIBCO JasperReports Library Community Edition, TIBCO JasperReports Library for ActiveMatrix BPM, TIBCO JasperReports Server, TIBCO JasperReports Server Community Edition, TIBCO JasperReports Server for ActiveMatrix BPM, TIBCO Jaspersoft for AWS with Multi-Tenancy, and TIBCO Jaspersoft Reporting and Analytics for AWS contains a directory-traversal vulnerability that may theoretically allow web server users to access contents of the host system. Affected releases are TIBCO Software Inc.'s TIBCO JasperReports Library: versions up to and including 6.3.4; 6.4.1; 6.4.2; 6.4.21; 7.1.0; 7.2.0, TIBCO JasperReports Library Community Edition: versions up to and including 6.7.0, TIBCO JasperReports Library for ActiveMatrix BPM: versions up to and including 6.4.21, TIBCO JasperReports Server: versions up to and including 6.3.4; 6.4.0; 6.4.1; 6.4.2; 6.4.3; 7.1.0, TIBCO JasperReports Server Community Edition: versions up to and including 6.4.3; 7.1.0, TIBCO JasperReports Server for ActiveMatrix BPM: versions up to and including 6.4.3, TIBCO Jaspersoft for AWS with Multi-Tenancy: versions up to and including 7.1.0, TIBCO Jaspersoft Reporting and Analytics for AWS: versions up to and including 7.1.0.", "severity": [ { "type": "CVSS_V3", "score": "CVSS:3.0/AV:N/AC:L/PR:L/UI:N/S:U/C:H/I:N/A:N" } ], "affected": [ ], "references": [ { "type": "ADVISORY", "url": "https://nvd.nist.gov/vuln/detail/CVE-2018-18809" }, { "type": "WEB", "url": "https://cybersecurityworks.com/zerodays/cve-2018-18809-tibco.html" }, { "type": "WEB", "url": "https://security.elarlang.eu/cve-2018-18809-path-traversal-in-tibco-jaspersoft.html" }, { "type": "WEB", "url": "https://www.tibco.com/support/advisories/2019/03/tibco-security-advisory-march-6-2019-tibco-jasperreports-library-2018-18809" }, { "type": "WEB", "url": "http://packetstormsecurity.com/files/154406/Tibco-JasperSoft-Path-Traversal.html" }, { "type": "WEB", "url": "http://seclists.org/fulldisclosure/2019/Sep/17" }, { "type": "WEB", "url": "http://www.securityfocus.com/bid/107351" }, { "type": "WEB", "url": "http://www.tibco.com/services/support/advisories" } ], "database_specific": { "cwe_ids": [ "CWE-22" ], "severity": "MODERATE", "github_reviewed": false } }
1,121
678
// // Generated by class-dump 3.5 (64 bit). // // class-dump is Copyright (C) 1997-1998, 2000-2001, 2004-2013 by <NAME>. // #import "UIFont.h" @interface UIFont (Extend) + (void)loadOldFontSet:(unsigned int)arg1; + (void)copyFontSetToLocalInfo; + (void)setIpadClassic:(_Bool)arg1; + (unsigned int)getWebviewFontScale; + (void)setAppFontSize:(unsigned int)arg1 andBackTab:(int)arg2; + (void)setAppFontSize:(unsigned int)arg1; + (double)getNormalFontSizeByLevel:(unsigned int)arg1; + (double)getNormalFontSizeByLevelForTimeline:(unsigned int)arg1; + (double)getNormalFontSize:(int)arg1; + (double)getNormalFontSize; + (_Bool)useDynamicSize; + (_Bool)useDynamicSize:(int)arg1; + (double)dynamicFontSize:(double)arg1; + (double)dynamicFontSize:(double)arg1 forModule:(int)arg2; + (double)dynamicLength:(double)arg1; + (double)dynamicLength:(double)arg1 forModule:(int)arg2; + (id)dynamicBoldSystemFontOfSize:(double)arg1; + (id)dynamicSystemFontOfSize:(double)arg1; + (id)dynamicBoldSystemFontOfSize:(double)arg1 forModule:(int)arg2; + (id)dynamicSystemFontOfSize:(double)arg1 forModule:(int)arg2; + (id)dynamicSettingFont:(double)arg1; + (id)settingFont:(double)arg1; + (id)systemLittleFont; + (id)systemSmallFont; + (id)systemFont; + (id)systemBoldBigFont; + (id)fontWechatNumOfSize:(double)arg1; + (id)fontMonacoOfSize:(double)arg1; + (id)mediumSystemFontOfSize:(double)arg1; + (id)lightSystemFontOfSize:(double)arg1; - (double)topMargin; @end
554
1,080
<filename>src/protocol/admin/format.c<gh_stars>1000+ #include "format.h" #include <cc_print.h> size_t print_stats(char *buf, size_t cap, struct metric *metrics, unsigned int nmetric) { size_t offset = 0; /* TODO: report error if running out of space in buf */ for (int i = 0; i < nmetric; ++i) { offset += metric_print(buf + offset, cap - offset, METRIC_PRINT_FMT, &metrics[i]); } offset += cc_scnprintf(buf + offset, cap - offset, METRIC_END); return offset; }
215
309
"""Complex type annotations.""" from pathlib import Path from typing import List, Tuple, Union import aria2p PathOrStr = Union[Path, str] OptionsType = Union["aria2p.options.Options", dict] OperationResult = Union[bool, "aria2p.client.ClientException"] CallsType = List[Tuple[str, List[str], Union[str, int]]] Multicalls2Type = List[Tuple[str, List[str]]]
120
2,056
<filename>driver/c++/src/driver_api.cc #include <stdio.h> #include <stdlib.h> #include <string.h> #include <unistd.h> #include <sys/ioctl.h> #include <sys/poll.h> #include <sys/time.h> #include <sys/shm.h> #include <errno.h> #include <iostream> #include <vector> #include <string> #include "qconf_log.h" #include "qconf_shm.h" #include "qconf_msg.h" #include "qconf_errno.h" #include "qconf_format.h" #include "driver_api.h" using namespace std; static qhasharr_t *_qconf_hashtbl = NULL; static key_t _qconf_hashtbl_key = QCONF_DEFAULT_SHM_KEY; static int _qconf_msqid = QCONF_INVALID_SEM_ID; static key_t _qconf_msqid_key = QCONF_DEFAULT_MSG_QUEUE_KEY; static int init_shm(); static int init_msg(); static int send_msg_to_agent(int msqid, const string &idc, const string &path, char data_type); static int qconf_get_(const string &path, string &tblval, char dtype, const string &idc, int flags); int init_qconf_env() { int ret = init_shm(); if (QCONF_OK != ret) return ret; ret = init_msg(); return ret; } static int init_shm() { int ret = QCONF_OK; if (NULL != _qconf_hashtbl) return ret; ret = init_hash_tbl(_qconf_hashtbl, _qconf_hashtbl_key, 0444, SHM_RDONLY); if (QCONF_OK != ret) { LOG_FATAL_ERR("Failed to init hash table! key:%#x", _qconf_hashtbl_key); return ret; } return ret; } static int init_msg() { int ret = QCONF_OK; if (QCONF_INVALID_SEM_ID != _qconf_msqid) return ret; ret = init_msg_queue(_qconf_msqid_key, _qconf_msqid); if (QCONF_OK != ret) { LOG_FATAL_ERR("Failed to init msg queue! key:%#x", _qconf_msqid_key); return ret; } return ret; } /** * Get child nodes from hash table(share memory) */ int qconf_get_children(const string &path, string_vector_t &nodes, const string &idc, int flags) { if (path.empty()) return QCONF_ERR_PARAM; string tblval; int ret = qconf_get_(path, tblval, QCONF_DATA_TYPE_SERVICE, idc, flags); if (QCONF_OK != ret) return ret; ret = tblval_to_chdnodeval(tblval, nodes); return ret; } int qconf_get_batchnode(const string &path, qconf_batch_nodes &bnodes, const string &idc, int flags) { if (path.empty()) return QCONF_ERR_PARAM; string buf; string child_path; int ret = QCONF_OK; string_vector_t nodes; memset(&nodes, 0, sizeof(string_vector_t)); ret = qconf_get_batchnode_keys(path, nodes, idc, flags); if (QCONF_OK == ret) { bnodes.count = nodes.count; if (0 == bnodes.count) { bnodes.nodes = NULL; return ret; } bnodes.nodes = (qconf_node*) calloc(bnodes.count, sizeof(qconf_node)); if (NULL == bnodes.nodes) { LOG_ERR("Failed to malloc bnodes->nodes! errno:%d", errno); free_string_vector(nodes, nodes.count); bnodes.count = 0; return QCONF_ERR_MEM; } for (int i = 0; i < nodes.count; i++) { child_path = path + "/" + nodes.data[i]; ret = qconf_get(child_path, buf, idc, flags); if (QCONF_OK == ret) { bnodes.nodes[i].value = strndup(buf.c_str(), buf.size() + 1); if (NULL == bnodes.nodes[i].value) { LOG_ERR("Failed to strdup value of path:%s! errno:%d", child_path.c_str(), errno); free_string_vector(nodes, nodes.count); free_qconf_batch_nodes(&bnodes, i); return QCONF_ERR_MEM; } bnodes.nodes[i].key = nodes.data[i]; nodes.data[i] = NULL; } else { LOG_ERR("Failed to call qconf_get! ret:%d", ret); free_string_vector(nodes, nodes.count); free_qconf_batch_nodes(&bnodes, i); return ret; } } free_string_vector(nodes, 0); } else { LOG_ERR("Failed to get batch node keys! path:%s, idc:%s ret:%d", path.c_str(), idc.c_str(), ret); } return ret; } int qconf_get_batchnode_keys(const string &path, string_vector_t &nodes, const string &idc, int flags) { if (path.empty()) return QCONF_ERR_PARAM; string tblval; int ret = qconf_get_(path, tblval, QCONF_DATA_TYPE_BATCH_NODE, idc, flags); if (QCONF_OK != ret) return ret; ret = tblval_to_batchnodeval(tblval, nodes); return ret; } /** * Get child nodes from hash table(share memory) */ int qconf_get(const string &path, string &buf, const string &idc, int flags) { if (path.empty()) return QCONF_ERR_PARAM; string tblval; int ret = qconf_get_(path, tblval, QCONF_DATA_TYPE_NODE, idc, flags); if (QCONF_OK != ret) return ret; ret = tblval_to_nodeval(tblval, buf); return ret; } static int qconf_get_(const string &path, string &tblval, char dtype, const string &idc, int flags) { int count = 0; string tblkey; int ret = QCONF_OK; ret = init_qconf_env(); if (QCONF_OK != ret) return ret; string tmp_idc(idc); if (idc.empty()) { ret = qconf_get_localidc(_qconf_hashtbl, tmp_idc); if (QCONF_OK != ret) { LOG_ERR("Failed to get local idc! ret:%d", ret); return ret; } } ret = serialize_to_tblkey(dtype, tmp_idc, path, tblkey); if (QCONF_OK != ret) return ret; // get value from tbl ret = hash_tbl_get(_qconf_hashtbl, tblkey, tblval); if (QCONF_OK == ret) return ret; // Not get batch keys from share memory, then send message to agent int ret_snd = send_msg_to_agent(_qconf_msqid, tmp_idc, path, dtype); if (QCONF_OK != ret_snd) { LOG_ERR("Failed to send message to agent, ret:%d", ret_snd); return ret_snd; } // If not wait, then return directly if (QCONF_NOWAIT == flags) return ret; while (count < QCONF_MAX_GET_TIMES) { usleep(5000); count++; ret = hash_tbl_get(_qconf_hashtbl, tblkey, tblval); if (QCONF_OK == ret) { LOG_ERR("Wait time:%d*5ms, type:%c, idc:%s, path:%s", count, dtype, tmp_idc.c_str(), path.c_str()); return ret; } } if (count >= QCONF_MAX_GET_TIMES) { LOG_FATAL_ERR("Failed to get value! wait time:%d*5ms, type:%c, idc:%s, path:%s, ret:%d", count, dtype, tmp_idc.c_str(), path.c_str(), ret); } return ret; } static int send_msg_to_agent(int msqid, const string &idc, const string &path, char data_type) { string tblkey; serialize_to_tblkey(data_type, idc, path, tblkey); int ret = send_msg(msqid, tblkey); return ret; }
3,427
1,679
/** * \file * * \brief SPI related functionality declaration. * * Copyright (C) 2015 Atmel Corporation. All rights reserved. * * \asf_license_start * * \page License * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. The name of Atmel may not be used to endorse or promote products derived * from this software without specific prior written permission. * * 4. This software may only be redistributed and used in connection with an * Atmel microcontroller product. * * THIS SOFTWARE IS PROVIDED BY ATMEL "AS IS" AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT ARE * EXPRESSLY AND SPECIFICALLY DISCLAIMED. IN NO EVENT SHALL ATMEL BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * \asf_license_stop * */ #ifndef _HPL_SPI_S_SYNC_H_INCLUDED #define _HPL_SPI_S_SYNC_H_INCLUDED #include <hpl_spi_sync.h> /** * \addtogroup hpl_spi HPL SPI * *@{ */ #ifdef __cplusplus extern "C" { #endif /** Uses common SPI async device driver. */ #define _spi_s_sync_dev _spi_sync_dev /** * \brief Initialize SPI for access without interrupts * It will load default hardware configuration and software struct. * \param[in, out] dev Pointer to the SPI device instance. * \param[in] hw Pointer to the hardware base. * \return Operation status. * \retval ERR_INVALID_ARG Input parameter problem. * \retval ERR_BUSY SPI hardware not ready (resetting). * \retval ERR_DENIED SPI has been enabled. * \retval 0 Operation done successfully. */ int32_t _spi_s_sync_init(struct _spi_s_sync_dev *dev, void *const hw); /** * \brief Initialize SPI for access with interrupts * Disable, reset the hardware and the software struct. * \param[in, out] dev Pointer to the SPI device instance. * \return Operation status. * \retval 0 Operation done successfully. */ int32_t _spi_s_sync_deinit(struct _spi_s_sync_dev *dev); /** * \brief Enable SPI for access without interrupts * \param[in, out] dev Pointer to the SPI device instance. * \return Operation status. * \retval ERR_BUSY SPI hardware not ready (resetting). * \retval 0 Operation done successfully. */ int32_t _spi_s_sync_enable(struct _spi_s_sync_dev *dev); /** * \brief Disable SPI for access without interrupts * Disable SPI. Deactivate all CS pins if works as master. * \param[in, out] dev Pointer to the SPI device instance. * \return Operation status. * \retval 0 Operation done successfully. */ int32_t _spi_s_sync_disable(struct _spi_s_sync_dev *dev); /** * \brief Set SPI transfer mode * Set SPI transfer mode (\ref spi_transfer_mode), * which controls clock polarity and clock phase. * Mode 0: leading edge is rising edge, data sample on leading edge. * Mode 1: leading edge is rising edge, data sample on trailing edge. * Mode 2: leading edge is falling edge, data sample on leading edge. * Mode 3: leading edge is falling edge, data sample on trailing edge. * \param[in, out] dev Pointer to the SPI device instance. * \param[in] mode The SPI transfer mode. * \return Operation status. * \retval ERR_BUSY SPI is not ready to accept new setting. * \retval 0 Operation done successfully. */ int32_t _spi_s_sync_set_mode(struct _spi_s_sync_dev *dev, const enum spi_transfer_mode mode); /** * \brief Set SPI baudrate * \param[in, out] dev Pointer to the SPI device instance. * \param[in] char_size The character size, see \ref spi_char_size. * \return Operation status. * \retval ERR_INVALID_ARG The character size is not supported. * \retval ERR_BUSY SPI is not ready to accept new setting. * \retval 0 Operation done successfully. */ int32_t _spi_s_sync_set_char_size(struct _spi_s_sync_dev *dev, const enum spi_char_size char_size); /** * \brief Set SPI data order * \param[in, out] dev Pointer to the SPI device instance. * \param[in] dord SPI data order (LSB/MSB first). * \return Operation status. * \retval ERR_INVALID_ARG The character size is not supported. * \retval ERR_BUSY SPI is not ready to accept new setting. * \retval 0 Operation done successfully. */ int32_t _spi_s_sync_set_data_order(struct _spi_s_sync_dev *dev, const enum spi_data_order dord); /** * \brief Enable interrupt on character output * * Enable interrupt when a new character can be written * to the SPI device. * * \param[in] dev Pointer to the SPI device instance * \param[in] state true = enable output interrupt * false = disable output interrupt * * \return Status code * \retval 0 Ok status */ int32_t _spi_s_sync_enable_tx(struct _spi_s_sync_dev *dev, bool state); /** * \brief Enable interrupt on character input * * Enable interrupt when a new character is ready to be * read from the SPI device. * * \param[in] dev Pointer to the SPI device instance * \param[in] state true = enable input interrupts * false = disable input interrupt * * \return Status code * \retval 0 OK Status */ int32_t _spi_s_sync_enable_rx(struct _spi_s_sync_dev *dev, bool state); /** * \brief Read one character to SPI device instance * \param[in, out] dev Pointer to the SPI device instance. * * \return Character read from SPI module */ uint16_t _spi_s_sync_read_one(struct _spi_s_sync_dev *dev); /** * \brief Write one character to assigned buffer * \param[in, out] dev Pointer to the SPI device instance. * \param[in] data * * \return Status code of write operation * \retval 0 Write operation OK */ int32_t _spi_s_sync_write_one(struct _spi_s_sync_dev *dev, uint16_t data); /** * \brief Check if TX ready * * \param[in] dev Pointer to the SPI device instance * * \return TX ready state * \retval true TX ready * \retval false TX not ready */ bool _spi_s_sync_is_tx_ready(struct _spi_s_sync_dev *dev); /** * \brief Check if RX character ready * * \param[in] dev Pointer to the SPI device instance * * \return RX character ready state * \retval true RX character ready * \retval false RX character not ready */ bool _spi_s_sync_is_rx_ready(struct _spi_s_sync_dev *dev); /** * \brief Check if SS deactiviation detected * * \param[in] dev Pointer to the SPI device instance * * \return SS deactiviation state * \retval true SS deactiviation detected * \retval false SS deactiviation not detected */ bool _spi_s_sync_is_ss_deactivated(struct _spi_s_sync_dev *dev); /** * \brief Check if error is detected * * \param[in] dev Pointer to the SPI device instance * * \return Error detection state * \retval true Error detected * \retval false Error not detected */ bool _spi_s_sync_is_error(struct _spi_s_sync_dev *dev); #ifdef __cplusplus } #endif /**@}*/ #endif /* ifndef _HPL_SPI_S_SYNC_H_INCLUDED */
2,528
1,444
package mage.cards.b; import java.util.UUID; import mage.MageInt; import mage.abilities.Ability; import mage.abilities.common.BeginningOfEndStepTriggeredAbility; import mage.abilities.common.BeginningOfUpkeepTriggeredAbility; import mage.abilities.common.SimpleStaticAbility; import mage.abilities.condition.Condition; import mage.abilities.condition.common.CardsInControllerGraveyardCondition; import mage.abilities.decorator.ConditionalContinuousEffect; import mage.abilities.effects.ContinuousEffect; import mage.abilities.effects.common.ExileCardFromOwnGraveyardControllerEffect; import mage.abilities.effects.common.MillCardsControllerEffect; import mage.abilities.effects.common.continuous.BoostSourceEffect; import mage.abilities.effects.common.continuous.GainAbilitySourceEffect; import mage.abilities.keyword.FlyingAbility; import mage.cards.CardImpl; import mage.cards.CardSetInfo; import mage.constants.*; /** * * @author cbt33, plopman (Immortal Coil) */ public final class Bloodcurdler extends CardImpl { public Bloodcurdler(UUID ownerId, CardSetInfo setInfo) { super(ownerId,setInfo,new CardType[]{CardType.CREATURE},"{1}{B}"); this.subtype.add(SubType.HORROR); this.power = new MageInt(1); this.toughness = new MageInt(1); // Flying this.addAbility(FlyingAbility.getInstance()); // At the beginning of your upkeep, put the top card of your library into your graveyard. this.addAbility(new BeginningOfUpkeepTriggeredAbility(Zone.BATTLEFIELD, new MillCardsControllerEffect(1), TargetController.YOU, false)); Condition thresholdCondition = new CardsInControllerGraveyardCondition(7); // Threshold - As long as seven or more cards are in your graveyard, Bloodcurdler gets +1/+1 and has "At the beginning of your end step, exile two cards from your graveyard." Ability thresholdAbility = new SimpleStaticAbility(Zone.BATTLEFIELD, new ConditionalContinuousEffect(new BoostSourceEffect(1, 1, Duration.WhileOnBattlefield), thresholdCondition, "If seven or more cards are in your graveyard, {this} gets +1/+1")); ContinuousEffect effect = new GainAbilitySourceEffect(new BeginningOfEndStepTriggeredAbility(new ExileCardFromOwnGraveyardControllerEffect(2), TargetController.YOU, false)); thresholdAbility.addEffect(new ConditionalContinuousEffect(effect, thresholdCondition, "and has \"At the beginning of your end step, exile two cards from your graveyard.\"")); thresholdAbility.setAbilityWord(AbilityWord.THRESHOLD); this.addAbility(thresholdAbility); } private Bloodcurdler(final Bloodcurdler card) { super(card); } @Override public Bloodcurdler copy() { return new Bloodcurdler(this); } }
895
5,535
//--------------------------------------------------------------------------- // Greenplum Database // Copyright (C) 2013 VMware, Inc. or its affiliates. // // @filename: // CXformJoin2IndexApply.cpp // // @doc: // Implementation of Inner/Outer Join to Apply transform //--------------------------------------------------------------------------- #include "gpopt/xforms/CXformJoin2IndexApply.h" #include "gpos/base.h" #include "gpopt/base/COptCtxt.h" #include "gpopt/base/CUtils.h" #include "gpopt/operators/CLogicalApply.h" #include "gpopt/operators/CLogicalCTEAnchor.h" #include "gpopt/operators/CLogicalDynamicGet.h" #include "gpopt/operators/CLogicalIndexApply.h" #include "gpopt/operators/CLogicalJoin.h" #include "gpopt/operators/CLogicalSelect.h" #include "gpopt/operators/CLogicalUnionAll.h" #include "gpopt/operators/CNormalizer.h" #include "gpopt/operators/CPredicateUtils.h" #include "gpopt/xforms/CSubqueryHandler.h" #include "gpopt/xforms/CXformUtils.h" #include "naucrates/md/IMDIndex.h" using namespace gpmd; using namespace gpopt; //--------------------------------------------------------------------------- // @function: // CXformJoin2IndexApply::Exfp // // @doc: // Compute xform promise for a given expression handle; // //--------------------------------------------------------------------------- CXform::EXformPromise CXformJoin2IndexApply::Exfp(CExpressionHandle &exprhdl) const { if (0 == exprhdl.DeriveUsedColumns(2)->Size() || exprhdl.DeriveHasSubquery(2) || exprhdl.HasOuterRefs()) { return CXform::ExfpNone; } return CXform::ExfpHigh; } //--------------------------------------------------------------------------- // @function: // CXformJoin2IndexApply::ComputeColumnSets // // @doc: // Based on the inner and the scalar expression, it computes scalar expression // columns, outer references and required columns. // Caller does not take ownership of ppcrsScalarExpr. // Caller takes ownership of ppcrsOuterRefs and ppcrsReqd. // //--------------------------------------------------------------------------- void CXformJoin2IndexApply::ComputeColumnSets(CMemoryPool *mp, CExpression *pexprInner, CExpression *pexprScalar, CColRefSet **ppcrsScalarExpr, CColRefSet **ppcrsOuterRefs, CColRefSet **ppcrsReqd) { CColRefSet *pcrsInnerOutput = pexprInner->DeriveOutputColumns(); *ppcrsScalarExpr = pexprScalar->DeriveUsedColumns(); *ppcrsOuterRefs = GPOS_NEW(mp) CColRefSet(mp, **ppcrsScalarExpr); (*ppcrsOuterRefs)->Difference(pcrsInnerOutput); *ppcrsReqd = GPOS_NEW(mp) CColRefSet(mp); (*ppcrsReqd)->Include(pcrsInnerOutput); (*ppcrsReqd)->Include(*ppcrsScalarExpr); (*ppcrsReqd)->Difference(*ppcrsOuterRefs); } //--------------------------------------------------------------------------- // @function: // CXformJoin2IndexApply::CreateFullIndexApplyAlternatives // // @doc: // Helper to add IndexApply expression to given xform results container // //--------------------------------------------------------------------------- void CXformJoin2IndexApply::CreateHomogeneousIndexApplyAlternatives( CMemoryPool *mp, COperator *joinOp, CExpression *pexprOuter, CExpression *pexprInner, CExpression *pexprScalar, CExpression *origJoinPred, CExpression *nodesToInsertAboveIndexGet, CExpression *endOfNodesToInsertAboveIndexGet, CTableDescriptor *ptabdescInner, CXformResult *pxfres, IMDIndex::EmdindexType emdtype) const { GPOS_ASSERT(nullptr != pexprOuter); GPOS_ASSERT(nullptr != pexprInner); GPOS_ASSERT(nullptr != pexprScalar); GPOS_ASSERT(nullptr != ptabdescInner); GPOS_ASSERT(nullptr != pxfres); GPOS_ASSERT(IMDIndex::EmdindBtree == emdtype || IMDIndex::EmdindBitmap == emdtype); const ULONG ulIndices = ptabdescInner->IndexCount(); if (0 == ulIndices) { return; } // derive the scalar and relational properties to build set of required columns CColRefSet *pcrsScalarExpr = nullptr; CColRefSet *outer_refs = nullptr; CColRefSet *pcrsReqd = nullptr; ComputeColumnSets(mp, pexprInner, pexprScalar, &pcrsScalarExpr, &outer_refs, &pcrsReqd); if (IMDIndex::EmdindBtree == emdtype) { CreateHomogeneousBtreeIndexApplyAlternatives( mp, joinOp, pexprOuter, pexprInner, pexprScalar, origJoinPred, nodesToInsertAboveIndexGet, endOfNodesToInsertAboveIndexGet, ptabdescInner, pcrsScalarExpr, outer_refs, pcrsReqd, ulIndices, pxfres); } else { CreateHomogeneousBitmapIndexApplyAlternatives( mp, joinOp, pexprOuter, pexprInner, pexprScalar, origJoinPred, nodesToInsertAboveIndexGet, endOfNodesToInsertAboveIndexGet, ptabdescInner, outer_refs, pcrsReqd, pxfres); } //clean-up pcrsReqd->Release(); outer_refs->Release(); } //--------------------------------------------------------------------------- // @function: // CXformJoin2IndexApply::CreateHomogeneousBtreeIndexApplyAlternatives // // @doc: // Helper to add IndexApply expression to given xform results container // for homogeneous b-tree indexes // //--------------------------------------------------------------------------- void CXformJoin2IndexApply::CreateHomogeneousBtreeIndexApplyAlternatives( CMemoryPool *mp, COperator *joinOp, CExpression *pexprOuter, CExpression *pexprInner, CExpression *pexprScalar, CExpression *origJoinPred, CExpression *nodesToInsertAboveIndexGet, CExpression *endOfNodesToInsertAboveIndexGet, CTableDescriptor *ptabdescInner, CColRefSet *pcrsScalarExpr, CColRefSet *outer_refs, CColRefSet *pcrsReqd, ULONG ulIndices, CXformResult *pxfres) { // array of expressions in the scalar expression CExpressionArray *pdrgpexpr = CPredicateUtils::PdrgpexprConjuncts(mp, pexprScalar); GPOS_ASSERT(pdrgpexpr->Size() > 0); // find the indexes whose included columns meet the required columns CMDAccessor *md_accessor = COptCtxt::PoctxtFromTLS()->Pmda(); const IMDRelation *pmdrel = md_accessor->RetrieveRel(ptabdescInner->MDId()); for (ULONG ul = 0; ul < ulIndices; ul++) { IMDId *pmdidIndex = pmdrel->IndexMDidAt(ul); const IMDIndex *pmdindex = md_accessor->RetrieveIndex(pmdidIndex); CreateAlternativesForBtreeIndex( mp, joinOp, pexprOuter, pexprInner, origJoinPred, nodesToInsertAboveIndexGet, endOfNodesToInsertAboveIndexGet, md_accessor, pdrgpexpr, pcrsScalarExpr, outer_refs, pcrsReqd, pmdrel, pmdindex, pxfres); } //clean-up pdrgpexpr->Release(); } //--------------------------------------------------------------------------- // @function: // CXformJoin2IndexApply::CreateAlternativesForBtreeIndex // // @doc: // Helper to add IndexApply expression to given xform results container // for homogeneous b-tree indexes. // //--------------------------------------------------------------------------- void CXformJoin2IndexApply::CreateAlternativesForBtreeIndex( CMemoryPool *mp, COperator *joinOp, CExpression *pexprOuter, CExpression *pexprInner, CExpression *origJoinPred, CExpression *nodesToInsertAboveIndexGet, CExpression *endOfNodesToInsertAboveIndexGet, CMDAccessor *md_accessor, CExpressionArray *pdrgpexprConjuncts, CColRefSet *pcrsScalarExpr, CColRefSet *outer_refs, CColRefSet *pcrsReqd, const IMDRelation *pmdrel, const IMDIndex *pmdindex, CXformResult *pxfres) { CExpression *pexprLogicalIndexGet = CXformUtils::PexprLogicalIndexGet( mp, md_accessor, pexprInner, joinOp->UlOpId(), pdrgpexprConjuncts, pcrsReqd, pcrsScalarExpr, outer_refs, pmdindex, pmdrel); if (nullptr != pexprLogicalIndexGet) { // second child has residual predicates, create an apply of outer and inner // and add it to xform results CColRefArray *colref_array = outer_refs->Pdrgpcr(mp); CExpression *indexGetWithOptionalSelect = pexprLogicalIndexGet; if (COperator::EopLogicalDynamicGet == pexprInner->Pop()->Eopid()) { indexGetWithOptionalSelect = CXformUtils::PexprRedundantSelectForDynamicIndex( mp, pexprLogicalIndexGet); pexprLogicalIndexGet->Release(); } CExpression *rightChildOfApply = CXformUtils::AddALinearStackOfUnaryExpressions( mp, indexGetWithOptionalSelect, nodesToInsertAboveIndexGet, endOfNodesToInsertAboveIndexGet); BOOL isOuterJoin = false; switch (joinOp->Eopid()) { case COperator::EopLogicalInnerJoin: isOuterJoin = false; break; case COperator::EopLogicalLeftOuterJoin: isOuterJoin = true; break; default: // this type of join operator is not supported return; } pexprOuter->AddRef(); CExpression *pexprIndexApply = GPOS_NEW(mp) CExpression( mp, GPOS_NEW(mp) CLogicalIndexApply(mp, colref_array, isOuterJoin, origJoinPred), pexprOuter, rightChildOfApply, CPredicateUtils::PexprConjunction(mp, nullptr /*pdrgpexpr*/)); pxfres->Add(pexprIndexApply); } } //--------------------------------------------------------------------------- // @function: // CXformJoin2IndexApply::CreateHomogeneousBitmapIndexApplyAlternatives // // @doc: // Helper to add IndexApply expression to given xform results container // for homogeneous bitmap indexes. // //--------------------------------------------------------------------------- void CXformJoin2IndexApply::CreateHomogeneousBitmapIndexApplyAlternatives( CMemoryPool *mp, COperator *joinOp, CExpression *pexprOuter, CExpression *pexprInner, CExpression *pexprScalar, CExpression *origJoinPred, CExpression *nodesToInsertAboveIndexGet, CExpression *endOfNodesToInsertAboveIndexGet, CTableDescriptor *ptabdescInner, CColRefSet *outer_refs, CColRefSet *pcrsReqd, CXformResult *pxfres) { CLogical *popGet = CLogical::PopConvert(pexprInner->Pop()); CExpression *pexprLogicalIndexGet = CXformUtils::PexprBitmapTableGet( mp, popGet, joinOp->UlOpId(), ptabdescInner, pexprScalar, outer_refs, pcrsReqd); if (nullptr != pexprLogicalIndexGet) { // second child has residual predicates, create an apply of outer and inner // and add it to xform results CColRefArray *colref_array = outer_refs->Pdrgpcr(mp); CExpression *indexGetWithOptionalSelect = pexprLogicalIndexGet; if (COperator::EopLogicalDynamicGet == popGet->Eopid()) { indexGetWithOptionalSelect = CXformUtils::PexprRedundantSelectForDynamicIndex( mp, pexprLogicalIndexGet); pexprLogicalIndexGet->Release(); } CExpression *rightChildOfApply = CXformUtils::AddALinearStackOfUnaryExpressions( mp, indexGetWithOptionalSelect, nodesToInsertAboveIndexGet, endOfNodesToInsertAboveIndexGet); BOOL isOuterJoin = false; switch (joinOp->Eopid()) { case COperator::EopLogicalInnerJoin: isOuterJoin = false; break; case COperator::EopLogicalLeftOuterJoin: isOuterJoin = true; break; default: // this type of join operator is not supported return; } pexprOuter->AddRef(); CExpression *pexprIndexApply = GPOS_NEW(mp) CExpression( mp, GPOS_NEW(mp) CLogicalIndexApply(mp, colref_array, isOuterJoin, origJoinPred), pexprOuter, rightChildOfApply, CPredicateUtils::PexprConjunction(mp, nullptr /*pdrgpexpr*/)); pxfres->Add(pexprIndexApply); } } // EOF
4,025
335
{ "word": "Pinpoint", "definitions": [ "Find or identify with great accuracy or precision." ], "parts-of-speech": "Verb" }
61
852
/* L2TauIsolationInfo Class Holds output of the Tau L2 IsolationProducer Author: <NAME> University of Wisconsin-Madison e-mail: <EMAIL> */ #ifndef L2TAUISOLATION_INFO_H #define L2TAUISOLATION_INFO_H #include <vector> namespace reco { class L2TauIsolationInfo { public: L2TauIsolationInfo() { ecalIsolEt_ = 0.; seedEcalHitEt_ = -1.; ecalClusterShape_.push_back(0.); ecalClusterShape_.push_back(0.); ecalClusterShape_.push_back(0.); nEcalHits_ = 0; hcalIsolEt_ = 0.; seedHcalHitEt_ = -1.; hcalClusterShape_.push_back(0.); hcalClusterShape_.push_back(0.); hcalClusterShape_.push_back(0.); nHcalHits_ = 0; } ~L2TauIsolationInfo() {} //getters double ecalIsolEt() const { return ecalIsolEt_; } double seedEcalHitEt() const { return seedEcalHitEt_; } std::vector<double> ecalClusterShape() const { return ecalClusterShape_; } int nEcalHits() const { return nEcalHits_; } double hcalIsolEt() const { return hcalIsolEt_; } double seedHcalHitEt() const { return seedHcalHitEt_; } std::vector<double> hcalClusterShape() const { return hcalClusterShape_; } int nHcalHits() const { return nHcalHits_; } //setters void setEcalIsolEt(double et) { ecalIsolEt_ = et; } void setSeedEcalHitEt(double et) { seedEcalHitEt_ = et; } void setEcalClusterShape(const std::vector<double>& shape) { ecalClusterShape_ = shape; } void setNEcalHits(int hits) { nEcalHits_ = hits; } void setHcalIsolEt(double et) { hcalIsolEt_ = et; } void setSeedHcalHitEt(double et) { seedHcalHitEt_ = et; } void setHcalClusterShape(const std::vector<double>& shape) { hcalClusterShape_ = shape; } void setNHcalHits(int hits) { nHcalHits_ = hits; } private: //ECAL Isolation double ecalIsolEt_; double seedEcalHitEt_; std::vector<double> ecalClusterShape_; int nEcalHits_; //HCAL Isolation double hcalIsolEt_; double seedHcalHitEt_; std::vector<double> hcalClusterShape_; int nHcalHits_; }; } // namespace reco #endif
933
1,178
/* * Copyright 2020 Makani Technologies LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef AVIONICS_GROUND_POWER_Q7_INVERTER_H_ #define AVIONICS_GROUND_POWER_Q7_INVERTER_H_ #include <modbus.h> #include <stdbool.h> #include "avionics/ground_power/q7/inverter_types.h" typedef struct { uint8_t id; modbus_t *mb; bool mb_status; } Inverter; typedef struct { int32_t size; Inverter inverter_array[kNumInverters]; } InverterBank; typedef struct { uint16_t modbus_register; uint16_t modbus_datalen; } InverterModbusReadParam; void InverterBankInit(InverterBank *inverters); void *InverterCommunication(void *inverter_arg); #endif // AVIONICS_GROUND_POWER_Q7_INVERTER_H_
418
4,054
// Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root. #pragma once // These are used by FAST Web Search for host name anchoring. // NB! Should be changed to uppercase once the functionality is implemented!! static const char *ANCHOR_START_OF_HOST = "StArThOsT"; static const char *ANCHOR_END_OF_HOST = "EnDhOsT"; // These are used in the query parser when parsing fields with parsemode // 'boundaries'. Not used otherwise. Lowercased for performance reasons. #define ANCHOR_LEFT_BOUNDARY "fastpbfast" #define ANCHOR_RIGHT_BOUNDARY "fastpbfast"
185
1,232
<reponame>JunZCn/DBus<gh_stars>1000+ { "admin": [ { "name": "resource-manage", "id": "app.components.navigator.resourceManage","text":"数据源管理","icon": "hdd"}, { "name": "project-manage", "id": "app.components.navigator.projectManage","text":"项目管理","icon": "appstore-o"}, { "name": "config-manage", "id": "app.components.navigator.configManage","text":"配置管理","icon": "setting"}, { "name": "sink-manage", "id": "app.components.navigator.sinkManage","text":"Sink管理","icon": "usb"}, { "name": "user-manage", "id": "app.components.navigator.userManage","text":"用户管理" ,"icon": "user","href":"/user-manage/list"}, { "name": "monitor-manage", "id": "app.components.navigator.sourceMonitor","text":"贴源监控","icon": "area-chart","href":"/monitor-manage/list" }, { "name": "tool-set", "id": "app.components.navigator.toolMonitor","text":"小工具集","icon": "tool" }, { "name": "self-check", "id": "app.components.navigator.selfCheck","text":"集群状态","icon": "safety" } ], "user": [ { "name": "project", "id": "app.components.navigator.projectManage","text":"项目管理","icon": "appstore-o"} ] }
544
568
<filename>src/test/java/com/fincatto/documentofiscal/nfe310/classes/nota/NFInfoProdutorRuralReferenciadaTest.java package com.fincatto.documentofiscal.nfe310.classes.nota; import com.fincatto.documentofiscal.DFUnidadeFederativa; import org.junit.Assert; import org.junit.Test; public class NFInfoProdutorRuralReferenciadaTest { @Test(expected = IllegalStateException.class) public void naoDevePermitirCNPJComTamanhoInvalido() { try { new NFInfoProdutorRuralReferenciada().setCnpjEmitente("1234567890123"); } catch (final IllegalStateException e) { new NFInfoProdutorRuralReferenciada().setCnpjEmitente("123456789012345"); } } @Test(expected = IllegalStateException.class) public void naoDevePermitirCPFComTamanhoInvalido() { try { new NFInfoProdutorRuralReferenciada().setCpfEmitente("1234567890"); } catch (final IllegalStateException e) { new NFInfoProdutorRuralReferenciada().setCpfEmitente("123456789012"); } } @Test(expected = NumberFormatException.class) public void naoDevePermitirSerieDocumentoFiscalComTamanhoInvalido() { new NFInfoProdutorRuralReferenciada().setSerieDocumentoFiscal(1000); } @Test(expected = NumberFormatException.class) public void naoDevePermitirNumeroDocumentoFiscalComTamanhoInvalido() { new NFInfoProdutorRuralReferenciada().setNumeroDocumentoFiscal(1000000000); } @Test(expected = IllegalStateException.class) public void naoDevePermitirModeloDocumentoFiscalComTamanhoInvalido() { try { new NFInfoProdutorRuralReferenciada().setModeloDocumentoFiscal("I"); } catch (final IllegalStateException e) { new NFInfoProdutorRuralReferenciada().setModeloDocumentoFiscal("IE1"); } } @Test(expected = IllegalStateException.class) public void naoDevePermitirCNPJSetadoQuandoCPFEstaSetado() { final NFInfoProdutorRuralReferenciada referenciada = new NFInfoProdutorRuralReferenciada(); referenciada.setCpfEmitente("12345678901"); referenciada.setCnpjEmitente("12345678901234"); } @Test(expected = IllegalStateException.class) public void naoDevePermitirCPFSetadoQuandoCNPJEstaSetado() { final NFInfoProdutorRuralReferenciada referenciada = new NFInfoProdutorRuralReferenciada(); referenciada.setCnpjEmitente("12345678901234"); referenciada.setCpfEmitente("12345678901"); } @Test(expected = IllegalStateException.class) public void naoDevePermitirAnoMesEmissaoReferenciadaNulo() { final NFInfoProdutorRuralReferenciada referenciada = new NFInfoProdutorRuralReferenciada(); referenciada.setCnpjEmitente("12345678901234"); referenciada.setIeEmitente("ISENTO"); referenciada.setModeloDocumentoFiscal("IE"); referenciada.setNumeroDocumentoFiscal(999999); referenciada.setSerieDocumentoFiscal(999); referenciada.setUfEmitente(DFUnidadeFederativa.SC); referenciada.toString(); } @Test public void devePermitirCpfEmitenteNulo() { final NFInfoProdutorRuralReferenciada referenciada = new NFInfoProdutorRuralReferenciada(); referenciada.setAnoMesEmissao("1402"); referenciada.setCnpjEmitente("12345678901234"); referenciada.setIeEmitente("ISENTO"); referenciada.setModeloDocumentoFiscal("IE"); referenciada.setNumeroDocumentoFiscal(999999); referenciada.setSerieDocumentoFiscal(999); referenciada.setUfEmitente(DFUnidadeFederativa.SC); referenciada.toString(); } @Test public void devePermitirCnpjEmitenteNulo() { final NFInfoProdutorRuralReferenciada referenciada = new NFInfoProdutorRuralReferenciada(); referenciada.setAnoMesEmissao("1402"); referenciada.setCpfEmitente("12345678901"); referenciada.setIeEmitente("ISENTO"); referenciada.setModeloDocumentoFiscal("IE"); referenciada.setNumeroDocumentoFiscal(999999); referenciada.setSerieDocumentoFiscal(999); referenciada.setUfEmitente(DFUnidadeFederativa.SC); referenciada.toString(); } @Test(expected = IllegalStateException.class) public void naoDevePermitirIeEmitenteNulo() { final NFInfoProdutorRuralReferenciada referenciada = new NFInfoProdutorRuralReferenciada(); referenciada.setAnoMesEmissao("1402"); referenciada.setCnpjEmitente("12345678901234"); referenciada.setModeloDocumentoFiscal("IE"); referenciada.setNumeroDocumentoFiscal(999999); referenciada.setSerieDocumentoFiscal(999); referenciada.setUfEmitente(DFUnidadeFederativa.SC); referenciada.toString(); } @Test(expected = IllegalStateException.class) public void naoDevePermitirModeloDocumentoFiscalNulo() { final NFInfoProdutorRuralReferenciada referenciada = new NFInfoProdutorRuralReferenciada(); referenciada.setAnoMesEmissao("1402"); referenciada.setCnpjEmitente("12345678901234"); referenciada.setIeEmitente("ISENTO"); referenciada.setNumeroDocumentoFiscal(999999); referenciada.setSerieDocumentoFiscal(999); referenciada.setUfEmitente(DFUnidadeFederativa.SC); referenciada.toString(); } @Test(expected = IllegalStateException.class) public void naoDevePermitirNumeroDocumentoFiscalNulo() { final NFInfoProdutorRuralReferenciada referenciada = new NFInfoProdutorRuralReferenciada(); referenciada.setAnoMesEmissao("1402"); referenciada.setCnpjEmitente("12345678901234"); referenciada.setIeEmitente("ISENTO"); referenciada.setModeloDocumentoFiscal("IE"); referenciada.setSerieDocumentoFiscal(999); referenciada.setUfEmitente(DFUnidadeFederativa.SC); referenciada.toString(); } @Test(expected = IllegalStateException.class) public void naoDevePermitirSerieDocumentoFiscalNulo() { final NFInfoProdutorRuralReferenciada referenciada = new NFInfoProdutorRuralReferenciada(); referenciada.setAnoMesEmissao("1402"); referenciada.setCnpjEmitente("12345678901234"); referenciada.setIeEmitente("ISENTO"); referenciada.setModeloDocumentoFiscal("IE"); referenciada.setNumeroDocumentoFiscal(999999); referenciada.setUfEmitente(DFUnidadeFederativa.SC); referenciada.toString(); } @Test(expected = IllegalStateException.class) public void naoDevePermitirUfEmitenteNulo() { final NFInfoProdutorRuralReferenciada referenciada = new NFInfoProdutorRuralReferenciada(); referenciada.setAnoMesEmissao("1402"); referenciada.setCnpjEmitente("12345678901234"); referenciada.setIeEmitente("ISENTO"); referenciada.setModeloDocumentoFiscal("IE"); referenciada.setNumeroDocumentoFiscal(999999); referenciada.setSerieDocumentoFiscal(999); referenciada.toString(); } @Test public void deveGerarXMLDeAcordoComOPadraoEstabelecido() { final NFInfoProdutorRuralReferenciada referenciada = new NFInfoProdutorRuralReferenciada(); referenciada.setAnoMesEmissao("1402"); referenciada.setCnpjEmitente("12345678901234"); referenciada.setIeEmitente("ISENTO"); referenciada.setModeloDocumentoFiscal("IE"); referenciada.setNumeroDocumentoFiscal(999999); referenciada.setSerieDocumentoFiscal(999); referenciada.setUfEmitente(DFUnidadeFederativa.SC); final String xmlEsperado = "<NFInfoProdutorRuralReferenciada><cUF>42</cUF><AAMM>1402</AAMM><CNPJ>12345678901234</CNPJ><IE>ISENTO</IE><mod>IE</mod><serie>999</serie><nNF>999999</nNF></NFInfoProdutorRuralReferenciada>"; Assert.assertEquals(xmlEsperado, referenciada.toString()); } }
3,373
11,396
import base64 import os from awx.main.utils import get_awx_version def csp(request): return {'csp_nonce': base64.encodebytes(os.urandom(32)).decode().rstrip()} def version(request): context = getattr(request, 'parser_context', {}) return { 'version': get_awx_version(), 'tower_version': get_awx_version(), 'short_tower_version': get_awx_version().split('-')[0], 'deprecated': getattr(context.get('view'), 'deprecated', False), }
195
2,338
//===-- Definitions of common POSIX types ---------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // This header file does not have a header guard. It is internal to LLVM libc // and intended to be used to pick specific definitions without polluting the // public headers with unnecessary definitions. #if defined(__need_off_t) && !defined(__llvm_libc_off_t_defined) typedef __INT64_TYPE__ off_t; #define __llvm_libc_off_t_defined #endif // __need_off_t #if defined(__need_ssize_t) && !defined(__llvm_libc_ssize_t_defined) typedef __INT64_TYPE__ ssize_t; #define __llvm_libc_ssize_t_defined #endif // __need_ssize_t
275
852
#ifndef FWCore_Framework_IntersectingIOVRecordIntervalFinder_h #define FWCore_Framework_IntersectingIOVRecordIntervalFinder_h // -*- C++ -*- // // Package: Framework // Class : IntersectingIOVRecordIntervalFinder // /**\class IntersectingIOVRecordIntervalFinder IntersectingIOVRecordIntervalFinder.h FWCore/Framework/interface/IntersectingIOVRecordIntervalFinder.h Description: A RecordIntervalFinder which determines IOVs by taking the intersection of IOVs of other RecordIntervalFinders Usage: Used internally by the framework */ // // Original Author: <NAME> // Created: Tue Aug 19 13:20:34 EDT 2008 // // system include files #include <memory> #include <vector> // user include files #include "FWCore/Framework/interface/EventSetupRecordIntervalFinder.h" #include "FWCore/Utilities/interface/propagate_const.h" // forward declarations namespace edm { namespace eventsetup { class IntersectingIOVRecordIntervalFinder : public EventSetupRecordIntervalFinder { public: explicit IntersectingIOVRecordIntervalFinder(const EventSetupRecordKey&); IntersectingIOVRecordIntervalFinder(const IntersectingIOVRecordIntervalFinder&) = delete; const IntersectingIOVRecordIntervalFinder& operator=(const IntersectingIOVRecordIntervalFinder&) = delete; ~IntersectingIOVRecordIntervalFinder() override; void swapFinders(std::vector<edm::propagate_const<std::shared_ptr<EventSetupRecordIntervalFinder>>>&); protected: void setIntervalFor(const EventSetupRecordKey&, const IOVSyncValue&, ValidityInterval&) override; private: void doResetInterval(const eventsetup::EventSetupRecordKey&) override; bool isConcurrentFinder() const override; bool isNonconcurrentAndIOVNeedsUpdate(const EventSetupRecordKey&, const IOVSyncValue&) const override; // ---------- member data -------------------------------- std::vector<edm::propagate_const<std::shared_ptr<EventSetupRecordIntervalFinder>>> finders_; }; } // namespace eventsetup } // namespace edm #endif
675
1,079
<filename>ReactUbuntu/runtime/src/ubuntudatepickermanager.h /** * Copyright (C) 2016, Canonical Ltd. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. An additional grant * of patent rights can be found in the PATENTS file in the same directory. * * Author: <NAME> <<EMAIL>> * */ #ifndef UBUNTUDATEPICKERMANAGER_H #define UBUNTUDATEPICKERMANAGER_H #include <QString> #include <QMap> #include "reactviewmanager.h" class QQuickItem; class UbuntuDatePickerManager : public ReactViewManager { Q_OBJECT // Q_PLUGIN_METADATA(IID ReactModuleInterface_IID) Q_INTERFACES(ReactModuleInterface) public: UbuntuDatePickerManager(QObject* parent = 0); ~UbuntuDatePickerManager(); void setBridge(ReactBridge* bridge) override; ReactViewManager* viewManager() override; ReactPropertyHandler* propertyHandler(QObject* object); QString moduleName() override; QList<ReactModuleMethod*> methodsToExport() override; QVariantMap constantsToExport() override; QStringList customBubblingEventTypes() override; QQuickItem* view(const QVariantMap& properties) const override; private Q_SLOTS: void onDateChanged(); private: void configureView(QQuickItem* view) const; mutable int m_id; }; #endif // UBUNTUDATEPICKERMANAGER_H
443
1,577
from abc import (ABC, abstractmethod) class Evaluator(ABC): """Evaluates predictions for a set of scenes.""" @abstractmethod def process(self, scenes): pass
64
1,587
package io.reflectoring.staticdata; import org.springframework.boot.context.properties.ConfigurationProperties; import org.springframework.stereotype.Component; import java.util.List; @Component @ConfigurationProperties("static") class QuotesProperties { private final List<Quote> quotes; QuotesProperties(List<Quote> quotes) { this.quotes = quotes; } List<Quote> getQuotes() { return this.quotes; } }
151
1,738
<reponame>jeikabu/lumberyard<filename>dev/Code/Sandbox/Editor/SurfaceInfoPicker.h /* * All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or * its licensors. * * For complete copyright and license terms please see the LICENSE at the root of this * distribution (the "License"). All use of this software is governed by the License, * or, if provided, by the license below or the license accompanying this file. Do not * remove or modify any license notices. This file is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * */ // Original file Copyright Crytek GMBH or its affiliates, used under license. #ifndef CRYINCLUDE_EDITOR_SURFACEINFOPICKER_H #define CRYINCLUDE_EDITOR_SURFACEINFOPICKER_H #pragma once class CKDTree; ////////////////////////////////////////////////////////////////////////// class SANDBOX_API CSurfaceInfoPicker { public: CSurfaceInfoPicker(); class CExcludedObjects { public: CExcludedObjects(){} ~CExcludedObjects(){} CExcludedObjects(const CExcludedObjects& excluded) { objects = excluded.objects; } void Add(CBaseObject* pObject) { objects.insert(pObject); } void Clear() { objects.clear(); } bool Contains(CBaseObject* pObject) const { return objects.find(pObject) != objects.end(); } private: std::set<CBaseObject*> objects; }; enum EPickedObjectGroup { ePOG_BrushObject = BIT(0), ePOG_Prefabs = BIT(1), ePOG_Solid = BIT(2), ePOG_DesignerObject = BIT(3), ePOG_Vegetation = BIT(4), ePOG_Entity = BIT(5), ePOG_Terrain = BIT(6), ePOG_GeneralObjects = ePOG_BrushObject | ePOG_Prefabs | ePOG_Solid | ePOG_DesignerObject | ePOG_Entity, ePOG_All = 0xFFFFFFFF, }; enum EPickOption { ePickOption_IncludeFrozenObject = BIT(0), }; void SetPickOptionFlag(int nFlag) { m_PickOption = nFlag; } public: bool PickObject(const QPoint& point, SRayHitInfo& outHitInfo, CBaseObject* pObject); bool PickObject(const Vec3& vWorldRaySrc, const Vec3& vWorldRayDir, SRayHitInfo& outHitInfo, CBaseObject* pObject); bool Pick(const Vec3& vWorldRaySrc, const Vec3& vWorldRayDir, _smart_ptr<IMaterial>& ppOutLastMaterial, SRayHitInfo& outHitInfo, CExcludedObjects* pExcludedObjects = NULL, int nFlag = ePOG_All) { return PickImpl(vWorldRaySrc, vWorldRayDir, &ppOutLastMaterial, outHitInfo, pExcludedObjects, nFlag); } bool Pick(const Vec3& vWorldRaySrc, const Vec3& vWorldRayDir, SRayHitInfo& outHitInfo, CExcludedObjects* pExcludedObjects = NULL, int nFlag = ePOG_All) { return PickImpl(vWorldRaySrc, vWorldRayDir, NULL, outHitInfo, pExcludedObjects, nFlag); } bool Pick(const QPoint& point, SRayHitInfo& outHitInfo, CExcludedObjects* pExcludedObjects = NULL, int nFlag = ePOG_All) { return PickImpl(point, NULL, outHitInfo, pExcludedObjects, nFlag); } bool Pick(const QPoint& point, _smart_ptr<IMaterial>& ppOutLastMaterial, SRayHitInfo& outHitInfo, CExcludedObjects* pExcludedObjects = NULL, int nFlag = ePOG_All) { return PickImpl(point, &ppOutLastMaterial, outHitInfo, pExcludedObjects, nFlag); } bool PickByAABB(const QPoint& point, int nFlag = ePOG_All, IDisplayViewport* pView = NULL, CExcludedObjects* pExcludedObjects = NULL, std::vector<CBaseObjectPtr>* pOutObjects = NULL); void SetObjects(CBaseObjectsArray* pSetObjects) { m_pSetObjects = pSetObjects; } CBaseObjectPtr GetPickedObject() { return m_pPickedObject; } void SetActiveView(IDisplayViewport* view); public: static bool RayWorldToLocal( const Matrix34A& WorldTM, const Vec3& vWorldRaySrc, const Vec3& vWorldRayDir, Vec3& outRaySrc, Vec3& outRayDir); private: bool IsFrozen(CBaseObject* pBaseObject) const { return !(m_PickOption & ePickOption_IncludeFrozenObject) && pBaseObject->IsFrozen(); } bool PickImpl(const QPoint& point, _smart_ptr<IMaterial>* ppOutLastMaterial, SRayHitInfo& outHitInfo, CExcludedObjects* pExcludedObjects = NULL, int nFlag = ePOG_All); bool PickImpl(const Vec3& vWorldRaySrc, const Vec3& vWorldRayDir, _smart_ptr<IMaterial>* ppOutLastMaterial, SRayHitInfo& outHitInfo, CExcludedObjects* pExcludedObjects = NULL, int nFlag = ePOG_All); void FindNearestInfoFromBrushObjects( const Vec3& vWorldRaySrc, const Vec3& vWorldRayDir, _smart_ptr<IMaterial>* ppOutLastMaterial, SRayHitInfo& outHitInfo) const; void FindNearestInfoFromPrefabs( const Vec3& vWorldRaySrc, const Vec3& vWorldRayDir, _smart_ptr<IMaterial>* ppOutLastMaterial, SRayHitInfo& outHitInfo) const; void FindNearestInfoFromSolids(int nFlag, const Vec3& vWorldRaySrc, const Vec3& vWorldRayDir, _smart_ptr<IMaterial>* ppOutLastMaterial, SRayHitInfo& outHitInfo) const; void FindNearestInfoFromVegetations( const Vec3& vWorldRaySrc, const Vec3& vWorldRayDir, _smart_ptr<IMaterial>* ppOutLastMaterial, SRayHitInfo& outHitInfo) const; void FindNearestInfoFromDecals( const Vec3& vWorldRaySrc, const Vec3& vWorldRayDir, _smart_ptr<IMaterial>* ppOutLastMaterial, SRayHitInfo& outHitInfo) const; void FindNearestInfoFromEntities( const Vec3& vWorldRaySrc, const Vec3& vWorldRayDir, _smart_ptr<IMaterial>* ppOutLastMaterial, SRayHitInfo& outHitInfo) const; void FindNearestInfoFromTerrain( const Vec3& vWorldRaySrc, const Vec3& vWorldRayDir, _smart_ptr<IMaterial>* ppOutLastMaterial, SRayHitInfo& outHitInfo) const; /// Detect ray intersection with a IRenderNode, IEntity, IStatObj, or ICharacterInstance. /// But only if the intersection is closer than the one already in outHitInfo. static bool RayIntersection( const Vec3& vWorldRaySrc, const Vec3& vWorldRayDir, IRenderNode* pRenderNode, IEntity* pEntity, IStatObj* pStatObj, ICharacterInstance* pCharacterInstance, const Matrix34A& WorldTM, SRayHitInfo& outHitInfo, _smart_ptr<IMaterial>* ppOutLastMaterial); /// Detect ray intersection with a IStatObj static bool RayIntersection_IStatObj( const Vec3& vWorldRaySrc, const Vec3& vWorldRayDir, IStatObj* pStatObj, _smart_ptr<IMaterial>* ppOutLastMaterial, const Matrix34A& WorldTM, SRayHitInfo& outHitInfo); /// Detect ray intersection with a IGeomCacheRenderNode static bool RayIntersection_IGeomCacheRenderNode( const Vec3& vWorldRaySrc, const Vec3& vWorldRayDir, IGeomCacheRenderNode* pGeomCacheRenderNode, _smart_ptr<IMaterial>* ppOutLastMaterial, const Matrix34A& worldTM, SRayHitInfo& outHitInfo); /// Detect ray intersection with a IRenderNode static bool RayIntersection_IRenderNode( const Vec3& vWorldRaySrc, const Vec3& vWorldRayDir, IRenderNode* pRenderNode, _smart_ptr<IMaterial>* ppOutLastMaterial, const Matrix34A& WorldTM, SRayHitInfo& outHitInfo); /// Detect ray intersection with a IEntity static bool RayIntersection_IEntity( const Vec3& vWorldRaySrc, const Vec3& vWorldRayDir, IEntity* pEntity, _smart_ptr<IMaterial>* ppOutLastMaterial, const Matrix34A& WorldTM, SRayHitInfo& outHitInfo); /// Detect ray intersection with a ICharacterInstance static bool RayIntersection_ICharacterInstance( const Vec3& vWorldRaySrc, const Vec3& vWorldRayDir, ICharacterInstance* pCharacterInstance, _smart_ptr<IMaterial>* ppOutLastMaterial, const Matrix34A& WorldTM, SRayHitInfo& outHitInfo); /// Detect ray intersection with a CBaseObject static bool RayIntersection_CBaseObject( const Vec3& vWorldRaySrc, const Vec3& vWorldRayDir, CBaseObject* pBaseObject, _smart_ptr<IMaterial>* ppOutLastMaterial, SRayHitInfo& outHitInfo); static void AssignObjectMaterial(CBaseObject* pObject, const SRayHitInfo& outHitInfo, _smart_ptr<IMaterial>* pOutMaterial); static void AssignMaterial(_smart_ptr<IMaterial> pObject, const SRayHitInfo& outHitInfo, _smart_ptr<IMaterial>* pOutMaterial); static bool IsMaterialValid(CMaterial* pMaterial); private: int m_PickOption; CBaseObjectsArray* m_pObjects; CBaseObjectsArray* m_pSetObjects; CBaseObjectsArray m_objects; IDisplayViewport* m_pActiveView; CExcludedObjects m_ExcludedObjects; mutable CBaseObjectPtr m_pPickedObject; }; #endif // CRYINCLUDE_EDITOR_SURFACEINFOPICKER_H
3,903
66,985
<gh_stars>1000+ /* * Copyright 2012-2021 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.springframework.boot.actuate.health; import org.springframework.boot.actuate.endpoint.web.WebServerNamespace; import org.springframework.util.Assert; import org.springframework.util.StringUtils; /** * Value object that represents an additional path for a {@link HealthEndpointGroup}. * * @author <NAME> * @author <NAME> * @since 2.6.0 */ public final class AdditionalHealthEndpointPath { private final WebServerNamespace namespace; private final String value; private final String canonicalValue; private AdditionalHealthEndpointPath(WebServerNamespace namespace, String value) { this.namespace = namespace; this.value = value; this.canonicalValue = (!value.startsWith("/")) ? "/" + value : value; } /** * Returns the {@link WebServerNamespace} associated with this path. * @return the server namespace */ public WebServerNamespace getNamespace() { return this.namespace; } /** * Returns the value corresponding to this path. * @return the path */ public String getValue() { return this.value; } /** * Returns {@code true} if this path has the given {@link WebServerNamespace}. * @param webServerNamespace the server namespace * @return the new instance */ public boolean hasNamespace(WebServerNamespace webServerNamespace) { return this.namespace.equals(webServerNamespace); } @Override public boolean equals(Object obj) { if (this == obj) { return true; } if (obj == null || getClass() != obj.getClass()) { return false; } AdditionalHealthEndpointPath other = (AdditionalHealthEndpointPath) obj; boolean result = true; result = result && this.namespace.equals(other.namespace); result = result && this.canonicalValue.equals(other.canonicalValue); return result; } @Override public int hashCode() { final int prime = 31; int result = 1; result = prime * result + this.namespace.hashCode(); result = prime * result + this.canonicalValue.hashCode(); return result; } @Override public String toString() { return this.namespace.getValue() + ":" + this.value; } /** * Creates an {@link AdditionalHealthEndpointPath} from the given input. The input * must contain a prefix and value separated by a `:`. The value must be limited to * one path segment. For example, `server:/healthz`. * @param value the value to parse * @return the new instance */ public static AdditionalHealthEndpointPath from(String value) { Assert.hasText(value, "Value must not be null"); String[] values = value.split(":"); Assert.isTrue(values.length == 2, "Value must contain a valid namespace and value separated by ':'."); Assert.isTrue(StringUtils.hasText(values[0]), "Value must contain a valid namespace."); WebServerNamespace namespace = WebServerNamespace.from(values[0]); validateValue(values[1]); return new AdditionalHealthEndpointPath(namespace, values[1]); } /** * Creates an {@link AdditionalHealthEndpointPath} from the given * {@link WebServerNamespace} and value. * @param webServerNamespace the server namespace * @param value the value * @return the new instance */ public static AdditionalHealthEndpointPath of(WebServerNamespace webServerNamespace, String value) { Assert.notNull(webServerNamespace, "The server namespace must not be null."); Assert.notNull(value, "The value must not be null."); validateValue(value); return new AdditionalHealthEndpointPath(webServerNamespace, value); } private static void validateValue(String value) { Assert.isTrue(StringUtils.countOccurrencesOf(value, "/") <= 1 && value.indexOf("/") <= 0, "Value must contain only one segment."); } }
1,292
367
from utils import utils def write_rows(rows, domain, base_domain, scanner, csv_writer, meta=None): # If we didn't get any info, we'll still output information about why the scan failed. if rows is None: empty_row = [None] * len(scanner.headers) rows = [empty_row] # Always output Domain and Base Domain. standard_prefix = [ domain, base_domain, ] # If requested, add local and Lambda scan data. meta_fields = [] if meta: meta_fields.append(" ".join(meta.get('errors', []))) meta_fields.append(utils.utc_timestamp(meta.get("start_time"))) meta_fields.append(utils.utc_timestamp(meta.get("end_time"))) meta_fields.append(utils.just_microseconds(meta.get("duration"))) if meta.get("lambda") is not None: meta_fields.append(meta['lambda'].get('request_id')) meta_fields.append(meta['lambda'].get('log_group_name')) meta_fields.append(meta['lambda'].get('log_stream_name')) meta_fields.append(utils.utc_timestamp(meta['lambda'].get('start_time'))) meta_fields.append(utils.utc_timestamp(meta['lambda'].get('end_time'))) meta_fields.append(meta['lambda'].get('memory_limit')) meta_fields.append(utils.just_microseconds(meta['lambda'].get('measured_duration'))) # Write out prefix, scan data, and meta scan data. for row in rows: csv_writer.writerow(standard_prefix + row + meta_fields)
608
2,936
["gli", "dove", "a", "fossero", "stiano", "alle", "avevano", "hanno", "mie", "sar\u00f2", "suoi", "stai", "questo", "un", "nei", "anche", "facessimo", "starebbe", "stemmo", "questa", "stesse", "sua", "dov", "o", "dallo", "ero", "dell", "starei", "stando", "negl", "fossi", "all", "sarai", "di", "suo", "far\u00f2", "tu", "si", "stavate", "facciano", "degli", "vostra", "avreste", "foste", "avranno", "ha", "facevo", "quelli", "sareste", "loro", "in", "degl", "come", "stanno", "ad", "lo", "avremo", "facciate", "avessi", "dalla", "vostro", "coi", "sugl", "con", "una", "quelle", "avuti", "eri", "eravamo", "eravate", "sono", "fanno", "stessero", "abbiamo", "chi", "sia", "alla", "nello", "tra", "nostra", "nostre", "avemmo", "sar\u00e0", "saremmo", "col", "al", "dei", "da", "facevano", "faceste", "mi", "facesse", "i", "avete", "\u00e8", "siate", "dai", "tuoi", "dal", "avevo", "farete", "avute", "allo", "avr\u00e0", "avuto", "farei", "io", "tua", "avevate", "negli", "l", "la", "faremo", "vostri", "saresti", "stette", "stavo", "avendo", "sarete", "stavamo", "fosse", "faranno", "perch\u00e9", "staremo", "voi", "delle", "noi", "stareste", "stava", "dagl", "se", "avrete", "quanto", "della", "nella", "sull", "sulle", "vi", "facesti", "li", "faceva", "facciamo", "miei", "sul", "fui", "avrai", "avessero", "avuta", "stiamo", "del", "stavi", "agl", "avevi", "erano", "uno", "abbiate", "stessi", "quanta", "staresti", "fosti", "sue", "stettero", "faremmo", "vostre", "nostri", "avevamo", "avrei", "abbia", "sulla", "le", "sarebbero", "quale", "quante", "quella", "ed", "nell", "tue", "far\u00e0", "fossimo", "farebbero", "siano", "aveste", "siamo", "saranno", "star\u00e0", "feci", "sugli", "lui", "fummo", "fai", "stetti", "ebbi", "ebbero", "furono", "ne", "non", "farai", "faccio", "pi\u00f9", "dagli", "avrebbe", "mio", "avesse", "era", "stia", "questi", "starai", "su", "il", "ho", "dalle", "nelle", "sui", "tutto", "ti", "star\u00f2", "fareste", "dello", "stesti", "facessero", "tuo", "aveva", "avessimo", "siete", "essendo", "staranno", "nostro", "ma", "c", "avresti", "stiate", "per", "queste", "stavano", "ci", "ebbe", "sto", "starete", "starebbero", "cui", "nel", "facevate", "fecero", "facendo", "e", "farebbe", "avr\u00f2", "quello", "avrebbero", "dall", "saremo", "ai", "avremmo", "fu", "fece", "stessimo", "contro", "sarebbe", "facevamo", "steste", "avesti", "faccia", "facessi", "agli", "quanti", "abbiano", "facevi", "sta", "facemmo", "faresti", "hai", "sei", "staremmo", "sullo", "mia", "sarei", "lei", "che", "tutti"]
1,214
657
# Generated by Django 2.1 on 2018-08-24 09:41 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ("mainapp", "0084_contributor_contribution_type"), ] operations = [ migrations.RemoveField(model_name="contributor", name="commodities",), migrations.AddField( model_name="contributor", name="contrib_details", field=models.TextField(default="", verbose_name="Details of contribution Eg: 10 shirts"), ), ]
217
382
<reponame>pemmasanikrishna/clouddriver<gh_stars>100-1000 /* * Copyright 2021 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.netflix.spinnaker.clouddriver.cloudfoundry.client.retry; import groovy.util.logging.Slf4j; import io.github.resilience4j.retry.IntervalFunction; import io.github.resilience4j.retry.Retry; import io.github.resilience4j.retry.RetryConfig; import java.io.IOException; import java.net.SocketTimeoutException; import java.time.Duration; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; import okhttp3.Interceptor; import okhttp3.Response; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @Slf4j public class RetryInterceptor implements Interceptor { private Logger logger = LoggerFactory.getLogger(RetryInterceptor.class); private final int maxAttempts; public RetryInterceptor(int maxAttempts) { this.maxAttempts = maxAttempts; } @Override public Response intercept(Chain chain) throws IOException { final String callName = "cf.api.call"; AtomicInteger currentAttempts = new AtomicInteger(); Retry retry = Retry.of( callName, RetryConfig.custom() .maxAttempts(maxAttempts) .intervalFunction(IntervalFunction.ofExponentialBackoff(Duration.ofSeconds(10), 3)) .retryExceptions(SocketTimeoutException.class, RetryableApiException.class) .build()); logger.trace("cf request: " + chain.request().url()); AtomicReference<Response> lastResponse = new AtomicReference<>(); try { return retry.executeCallable( () -> { currentAttempts.incrementAndGet(); Response response = chain.proceed(chain.request()); lastResponse.set(response); switch (response.code()) { case 502: case 503: case 504: // after retries fail, the response body for these status codes will get wrapped up // into a CloudFoundryApiException if (currentAttempts.get() < maxAttempts) { response.close(); } throw new RetryableApiException( "Response Code " + response.code() + ": " + chain.request().url() + " attempting retry"); } return response; }); } catch (Exception e) { final Response response = lastResponse.get(); if (response == null) { throw new IllegalStateException(e); } return response; } } private static class RetryableApiException extends RuntimeException { RetryableApiException(String message) { super(message); } } }
1,336
1,034
__author__ = '<NAME>' import os import sys import fnmatch import matplotlib.pyplot as plt sys.path.append("../../modelzoo/") from generators import * from multiprocessing.dummy import Pool from urllib import urlretrieve def prep_folders(): if not os.path.isdir("data"): os.mkdir("data") if not os.path.isdir("data/validation"): os.mkdir("data/validation") if not os.path.isdir("data/training"): os.mkdir("data/training") if not os.path.isdir("data/test"): os.mkdir("data/test") if not os.path.isdir("data/validation/sat_img"): os.mkdir("data/validation/sat_img") if not os.path.isdir("data/validation/map"): os.mkdir("data/validation/map") if not os.path.isdir("data/training/sat_img"): os.mkdir("data/training/sat_img") if not os.path.isdir("data/training/map"): os.mkdir("data/training/map") if not os.path.isdir("data/test/sat_img"): os.mkdir("data/test/sat_img") if not os.path.isdir("data/test/map"): os.mkdir("data/test/map") def prep_urls(): valid_data_url = valid_target_url = np.loadtxt("mass_roads_validation.txt", dtype=str) valid_data_str = "https://www.cs.toronto.edu/~vmnih/data/mass_roads/valid/sat/" valid_target_str = "https://www.cs.toronto.edu/~vmnih/data/mass_roads/valid/map/" train_data_url = train_target_url = np.loadtxt("mass_roads_train.txt", dtype=str) train_data_str = "https://www.cs.toronto.edu/~vmnih/data/mass_roads/train/sat/" train_target_str = "https://www.cs.toronto.edu/~vmnih/data/mass_roads/train/map/" test_data_url = test_target_url = np.loadtxt("mass_roads_test.txt", dtype=str) test_data_str = "https://www.cs.toronto.edu/~vmnih/data/mass_roads/test/sat/" test_target_str = "https://www.cs.toronto.edu/~vmnih/data/mass_roads/test/map/" all_tasks = [] # save url along with the filename for each file for img_name in train_data_url: all_tasks.append(tuple([train_data_str + img_name + "f", "data/training/sat_img/%sf"%img_name])) all_tasks.append(tuple([train_target_str + img_name, "data/training/map/%s"%img_name])) for img_name in valid_data_url: all_tasks.append(tuple([valid_data_str + img_name, "data/validation/sat_img/%s"%img_name])) all_tasks.append(tuple([valid_target_str + img_name[:-1], "data/validation/map/%s"%img_name[:-1]])) for img_name in test_data_url: all_tasks.append(tuple([test_data_str + img_name, "data/test/sat_img/%s"%img_name])) all_tasks.append(tuple([test_target_str + img_name[:-1], "data/test/map/%s"%img_name[:-1]])) return all_tasks def download_dataset(all_tasks, num_workers=4): def urlretrieve_star(args): return urlretrieve(*args) pool = Pool(num_workers) pool.map(urlretrieve_star, all_tasks) pool.close() pool.join() def load_data(folder): images_sat = [img for img in os.listdir(os.path.join(folder, "sat_img")) if fnmatch.fnmatch(img, "*.tif*")] images_map = [img for img in os.listdir(os.path.join(folder, "map")) if fnmatch.fnmatch(img, "*.tif*")] assert(len(images_sat) == len(images_map)) images_sat.sort() images_map.sort() # images are 1500 by 1500 pixels each data = np.zeros((len(images_sat), 3, 1500, 1500), dtype=np.uint8) target = np.zeros((len(images_sat), 1, 1500, 1500), dtype=np.uint8) ctr = 0 for sat_im, map_im in zip(images_sat, images_map): data[ctr] = plt.imread(os.path.join(folder, "sat_img", sat_im)).transpose((2, 0, 1)) # target has values 0 and 255. make that 0 and 1 target[ctr, 0] = plt.imread(os.path.join(folder, "map", map_im))/255 ctr += 1 return data, target def prepare_dataset(): prep_folders() all_tasks = prep_urls() download_dataset(all_tasks) print "download done..." try: data_train, target_train = load_data("data/training") data_valid, target_valid = load_data("data/validation") data_test, target_test = load_data("data/test") # loading np arrays is much faster than loading the images one by one every time np.save("data_train.npy", data_train) np.save("target_train.npy", target_train) np.save("data_valid.npy", data_valid) np.save("target_valid.npy", target_valid) np.save("data_test.npy", data_test) np.save("target_test.npy", target_test) except: print "something went wrong, maybe the download?" if __name__ == "__main__": prepare_dataset()
1,970
2,151
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "base/run_loop.h" #include "build/build_config.h" #include "components/signin/core/browser/account_info.h" #include "components/signin/core/browser/account_tracker_service.h" #include "components/signin/core/browser/fake_profile_oauth2_token_service.h" #include "components/signin/core/browser/fake_signin_manager.h" #include "components/signin/core/browser/test_signin_client.h" #include "components/sync_preferences/testing_pref_service_syncable.h" #include "mojo/public/cpp/bindings/binding_set.h" #include "services/identity/identity_service.h" #include "services/identity/public/cpp/account_state.h" #include "services/identity/public/cpp/scope_set.h" #include "services/identity/public/mojom/constants.mojom.h" #include "services/identity/public/mojom/identity_manager.mojom.h" #include "services/service_manager/public/cpp/binder_registry.h" #include "services/service_manager/public/cpp/service_context.h" #include "services/service_manager/public/cpp/service_test.h" #include "services/service_manager/public/mojom/service_factory.mojom.h" namespace identity { namespace { #if defined(OS_CHROMEOS) using SigninManagerForTest = FakeSigninManagerBase; #else using SigninManagerForTest = FakeSigninManager; #endif // OS_CHROMEOS const char kTestGaiaId[] = "dummyId"; const char kTestEmail[] = "<EMAIL>"; const char kSecondaryTestGaiaId[] = "secondaryDummyId"; const char kSecondaryTestEmail[] = "<EMAIL>"; const char kTestRefreshToken[] = "<PASSWORD>-refresh-token"; const char kTestAccessToken[] = "access_token"; class ServiceTestClient : public service_manager::test::ServiceTestClient, public service_manager::mojom::ServiceFactory { public: ServiceTestClient(service_manager::test::ServiceTest* test, AccountTrackerService* account_tracker, SigninManagerBase* signin_manager, ProfileOAuth2TokenService* token_service) : service_manager::test::ServiceTestClient(test), account_tracker_(account_tracker), signin_manager_(signin_manager), token_service_(token_service) { registry_.AddInterface<service_manager::mojom::ServiceFactory>( base::BindRepeating(&ServiceTestClient::Create, base::Unretained(this))); } protected: void OnBindInterface(const service_manager::BindSourceInfo& source_info, const std::string& interface_name, mojo::ScopedMessagePipeHandle interface_pipe) override { registry_.BindInterface(interface_name, std::move(interface_pipe)); } void CreateService( service_manager::mojom::ServiceRequest request, const std::string& name, service_manager::mojom::PIDReceiverPtr pid_receiver) override { if (name == mojom::kServiceName) { identity_service_context_.reset(new service_manager::ServiceContext( std::make_unique<IdentityService>(account_tracker_, signin_manager_, token_service_), std::move(request))); } } void Create(service_manager::mojom::ServiceFactoryRequest request) { service_factory_bindings_.AddBinding(this, std::move(request)); } private: AccountTrackerService* account_tracker_; SigninManagerBase* signin_manager_; ProfileOAuth2TokenService* token_service_; service_manager::BinderRegistry registry_; mojo::BindingSet<service_manager::mojom::ServiceFactory> service_factory_bindings_; std::unique_ptr<service_manager::ServiceContext> identity_service_context_; }; class IdentityManagerImplTest : public service_manager::test::ServiceTest { public: IdentityManagerImplTest() : ServiceTest("identity_unittests"), signin_client_(&pref_service_), #if defined(OS_CHROMEOS) signin_manager_(&signin_client_, &account_tracker_) { #else signin_manager_(&signin_client_, &token_service_, &account_tracker_, nullptr) { #endif AccountTrackerService::RegisterPrefs(pref_service_.registry()); SigninManagerBase::RegisterProfilePrefs(pref_service_.registry()); SigninManagerBase::RegisterPrefs(pref_service_.registry()); account_tracker_.Initialize(&signin_client_); } void TearDown() override { // Shut down the SigninManager so that the IdentityManagerImpl doesn't end // up outliving it. signin_manager_.Shutdown(); ServiceTest::TearDown(); } void OnReceivedPrimaryAccountInfo( base::RepeatingClosure quit_closure, const base::Optional<AccountInfo>& account_info, const AccountState& account_state) { primary_account_info_ = account_info; primary_account_state_ = account_state; quit_closure.Run(); } void OnPrimaryAccountAvailable(base::RepeatingClosure quit_closure, AccountInfo* caller_account_info, AccountState* caller_account_state, const AccountInfo& account_info, const AccountState& account_state) { *caller_account_info = account_info; *caller_account_state = account_state; quit_closure.Run(); } void OnReceivedAccountInfoFromGaiaId( base::RepeatingClosure quit_closure, const base::Optional<AccountInfo>& account_info, const AccountState& account_state) { account_info_from_gaia_id_ = account_info; account_state_from_gaia_id_ = account_state; quit_closure.Run(); } void OnGotAccounts(base::RepeatingClosure quit_closure, std::vector<mojom::AccountPtr>* output, std::vector<mojom::AccountPtr> accounts) { *output = std::move(accounts); quit_closure.Run(); } void OnReceivedAccessToken(base::RepeatingClosure quit_closure, const base::Optional<std::string>& access_token, base::Time expiration_time, const GoogleServiceAuthError& error) { access_token_ = access_token; access_token_error_ = error; quit_closure.Run(); } protected: void SetUp() override { ServiceTest::SetUp(); } mojom::IdentityManager* GetIdentityManagerImpl() { if (!identity_manager_) connector()->BindInterface(mojom::kServiceName, &identity_manager_); return identity_manager_.get(); } void ResetIdentityManagerImpl() { identity_manager_.reset(); } void FlushIdentityManagerImplForTesting() { GetIdentityManagerImpl(); identity_manager_.FlushForTesting(); } void SetIdentityManagerImplConnectionErrorHandler( base::RepeatingClosure handler) { GetIdentityManagerImpl(); identity_manager_.set_connection_error_handler(handler); } // service_manager::test::ServiceTest: std::unique_ptr<service_manager::Service> CreateService() override { return std::make_unique<ServiceTestClient>( this, &account_tracker_, &signin_manager_, &token_service_); } mojom::IdentityManagerPtr identity_manager_; base::Optional<AccountInfo> primary_account_info_; AccountState primary_account_state_; base::Optional<AccountInfo> account_info_from_gaia_id_; AccountState account_state_from_gaia_id_; base::Optional<std::string> access_token_; GoogleServiceAuthError access_token_error_; AccountTrackerService* account_tracker() { return &account_tracker_; } SigninManagerBase* signin_manager() { return &signin_manager_; } FakeProfileOAuth2TokenService* token_service() { return &token_service_; } private: sync_preferences::TestingPrefServiceSyncable pref_service_; AccountTrackerService account_tracker_; TestSigninClient signin_client_; SigninManagerForTest signin_manager_; FakeProfileOAuth2TokenService token_service_; DISALLOW_COPY_AND_ASSIGN(IdentityManagerImplTest); }; // Tests that it is not possible to connect to the Identity Manager if // initiated after SigninManager shutdown. TEST_F(IdentityManagerImplTest, SigninManagerShutdownBeforeConnection) { AccountInfo sentinel; sentinel.account_id = "sentinel"; primary_account_info_ = sentinel; // Ensure that the Identity Service has actually been created before // invoking SigninManagerBase::Shutdown(), since otherwise this test will // spin forever. Then reset the Identity Manager so that the next request // makes a fresh connection. FlushIdentityManagerImplForTesting(); ResetIdentityManagerImpl(); // Make a call to connect to the IdentityManagerImpl *after* SigninManager // shutdown; it should get notified of an error when the Identity Service // drops the connection. signin_manager()->Shutdown(); base::RunLoop run_loop; SetIdentityManagerImplConnectionErrorHandler(run_loop.QuitClosure()); GetIdentityManagerImpl()->GetPrimaryAccountInfo(base::BindRepeating( &IdentityManagerImplTest::OnReceivedPrimaryAccountInfo, base::Unretained(this), run_loop.QuitClosure())); run_loop.Run(); // Verify that the callback to GetPrimaryAccountInfo() was not invoked. EXPECT_TRUE(primary_account_info_); EXPECT_EQ("sentinel", primary_account_info_->account_id); } // Tests that the Identity Manager destroys itself on SigninManager shutdown. TEST_F(IdentityManagerImplTest, SigninManagerShutdownAfterConnection) { base::RunLoop run_loop; SetIdentityManagerImplConnectionErrorHandler(run_loop.QuitClosure()); // Ensure that the IdentityManagerImpl instance has actually been created // before invoking SigninManagerBase::Shutdown(), since otherwise this test // will spin forever. FlushIdentityManagerImplForTesting(); signin_manager()->Shutdown(); run_loop.Run(); } // Tests that the Identity Manager properly handles its own destruction in the // case where there is an active consumer request (i.e., a pending callback from // a Mojo call). In particular, this flow should not cause a DCHECK to fire in // debug mode. TEST_F(IdentityManagerImplTest, IdentityManagerImplShutdownWithActiveRequest) { base::RunLoop run_loop; SetIdentityManagerImplConnectionErrorHandler(run_loop.QuitClosure()); // Call a method on the IdentityManagerImpl that will cause it to store a // pending callback. This callback will never be invoked, so just pass dummy // arguments to it. GetIdentityManagerImpl()->GetPrimaryAccountWhenAvailable(base::BindRepeating( &IdentityManagerImplTest::OnPrimaryAccountAvailable, base::Unretained(this), base::RepeatingClosure(), nullptr, nullptr)); // Ensure that the IdentityManagerImpl has received the above call before // invoking SigninManagerBase::Shutdown(), as otherwise this test is // pointless. FlushIdentityManagerImplForTesting(); // This flow is what would cause a DCHECK to fire if IdentityManagerImpl is // not properly closing its binding on shutdown. signin_manager()->Shutdown(); run_loop.Run(); } // Check that the primary account info is null if not signed in. TEST_F(IdentityManagerImplTest, GetPrimaryAccountInfoNotSignedIn) { base::RunLoop run_loop; GetIdentityManagerImpl()->GetPrimaryAccountInfo(base::BindRepeating( &IdentityManagerImplTest::OnReceivedPrimaryAccountInfo, base::Unretained(this), run_loop.QuitClosure())); run_loop.Run(); EXPECT_FALSE(primary_account_info_); } // Check that the primary account info has expected values if signed in without // a refresh token available. TEST_F(IdentityManagerImplTest, GetPrimaryAccountInfoSignedInNoRefreshToken) { signin_manager()->SetAuthenticatedAccountInfo(kTestGaiaId, kTestEmail); base::RunLoop run_loop; GetIdentityManagerImpl()->GetPrimaryAccountInfo(base::BindRepeating( &IdentityManagerImplTest::OnReceivedPrimaryAccountInfo, base::Unretained(this), run_loop.QuitClosure())); run_loop.Run(); EXPECT_TRUE(primary_account_info_); EXPECT_EQ(signin_manager()->GetAuthenticatedAccountId(), primary_account_info_->account_id); EXPECT_EQ(kTestGaiaId, primary_account_info_->gaia); EXPECT_EQ(kTestEmail, primary_account_info_->email); EXPECT_FALSE(primary_account_state_.has_refresh_token); EXPECT_TRUE(primary_account_state_.is_primary_account); } // Check that the primary account info has expected values if signed in with a // refresh token available. TEST_F(IdentityManagerImplTest, GetPrimaryAccountInfoSignedInRefreshToken) { signin_manager()->SetAuthenticatedAccountInfo(kTestGaiaId, kTestEmail); token_service()->UpdateCredentials( signin_manager()->GetAuthenticatedAccountId(), kTestRefreshToken); base::RunLoop run_loop; GetIdentityManagerImpl()->GetPrimaryAccountInfo(base::BindRepeating( &IdentityManagerImplTest::OnReceivedPrimaryAccountInfo, base::Unretained(this), run_loop.QuitClosure())); run_loop.Run(); EXPECT_TRUE(primary_account_info_); EXPECT_EQ(signin_manager()->GetAuthenticatedAccountId(), primary_account_info_->account_id); EXPECT_EQ(kTestGaiaId, primary_account_info_->gaia); EXPECT_EQ(kTestEmail, primary_account_info_->email); EXPECT_TRUE(primary_account_state_.has_refresh_token); EXPECT_TRUE(primary_account_state_.is_primary_account); } // Check that GetPrimaryAccountWhenAvailable() returns immediately in the // case where the primary account is available when the call is received. TEST_F(IdentityManagerImplTest, GetPrimaryAccountWhenAvailableSignedIn) { signin_manager()->SetAuthenticatedAccountInfo(kTestGaiaId, kTestEmail); token_service()->UpdateCredentials( signin_manager()->GetAuthenticatedAccountId(), kTestRefreshToken); AccountInfo account_info; AccountState account_state; base::RunLoop run_loop; GetIdentityManagerImpl()->GetPrimaryAccountWhenAvailable(base::BindRepeating( &IdentityManagerImplTest::OnPrimaryAccountAvailable, base::Unretained(this), run_loop.QuitClosure(), base::Unretained(&account_info), base::Unretained(&account_state))); run_loop.Run(); EXPECT_EQ(signin_manager()->GetAuthenticatedAccountId(), account_info.account_id); EXPECT_EQ(kTestGaiaId, account_info.gaia); EXPECT_EQ(kTestEmail, account_info.email); EXPECT_TRUE(account_state.has_refresh_token); EXPECT_TRUE(account_state.is_primary_account); } // Check that GetPrimaryAccountWhenAvailable() returns the expected account // info in the case where the primary account is made available *after* the // call is received. TEST_F(IdentityManagerImplTest, GetPrimaryAccountWhenAvailableSignInLater) { AccountInfo account_info; AccountState account_state; base::RunLoop run_loop; GetIdentityManagerImpl()->GetPrimaryAccountWhenAvailable(base::BindRepeating( &IdentityManagerImplTest::OnPrimaryAccountAvailable, base::Unretained(this), run_loop.QuitClosure(), base::Unretained(&account_info), base::Unretained(&account_state))); // Verify that the primary account info is not currently available (this also // serves to ensure that the preceding call has been received by the Identity // Manager before proceeding). base::RunLoop run_loop2; GetIdentityManagerImpl()->GetPrimaryAccountInfo(base::BindRepeating( &IdentityManagerImplTest::OnReceivedPrimaryAccountInfo, base::Unretained(this), run_loop2.QuitClosure())); run_loop2.Run(); EXPECT_FALSE(primary_account_info_); // Make the primary account available and check that the callback is invoked // as expected. signin_manager()->SetAuthenticatedAccountInfo(kTestGaiaId, kTestEmail); token_service()->UpdateCredentials( signin_manager()->GetAuthenticatedAccountId(), kTestRefreshToken); run_loop.Run(); EXPECT_EQ(signin_manager()->GetAuthenticatedAccountId(), account_info.account_id); EXPECT_EQ(kTestGaiaId, account_info.gaia); EXPECT_EQ(kTestEmail, account_info.email); EXPECT_TRUE(account_state.has_refresh_token); EXPECT_TRUE(account_state.is_primary_account); } // Check that GetPrimaryAccountWhenAvailable() returns the expected account // info in the case where signin is done before the call is received but the // refresh token is made available only *after* the call is received. TEST_F(IdentityManagerImplTest, GetPrimaryAccountWhenAvailableTokenAvailableLater) { AccountInfo account_info; AccountState account_state; // Sign in, but don't set the refresh token yet. signin_manager()->SetAuthenticatedAccountInfo(kTestGaiaId, kTestEmail); base::RunLoop run_loop; GetIdentityManagerImpl()->GetPrimaryAccountWhenAvailable(base::BindRepeating( &IdentityManagerImplTest::OnPrimaryAccountAvailable, base::Unretained(this), run_loop.QuitClosure(), base::Unretained(&account_info), base::Unretained(&account_state))); // Verify that the primary account info is present, but that the primary // account is not yet considered available (this also // serves to ensure that the preceding call has been received by the Identity // Manager before proceeding). base::RunLoop run_loop2; GetIdentityManagerImpl()->GetPrimaryAccountInfo(base::BindRepeating( &IdentityManagerImplTest::OnReceivedPrimaryAccountInfo, base::Unretained(this), run_loop2.QuitClosure())); run_loop2.Run(); EXPECT_TRUE(primary_account_info_); EXPECT_EQ(signin_manager()->GetAuthenticatedAccountId(), primary_account_info_->account_id); EXPECT_TRUE(account_info.account_id.empty()); // Set the refresh token and check that the callback is invoked as expected // (i.e., the primary account is now considered available). token_service()->UpdateCredentials( signin_manager()->GetAuthenticatedAccountId(), kTestRefreshToken); run_loop.Run(); EXPECT_EQ(signin_manager()->GetAuthenticatedAccountId(), account_info.account_id); EXPECT_EQ(kTestGaiaId, account_info.gaia); EXPECT_EQ(kTestEmail, account_info.email); EXPECT_TRUE(account_state.has_refresh_token); EXPECT_TRUE(account_state.is_primary_account); } // Check that GetPrimaryAccountWhenAvailable() returns the expected account info // in the case where the token is available before the call is received but the // account is made authenticated only *after* the call is received. This test is // relevant only on non-ChromeOS platforms, as the flow being tested here is not // possible on ChromeOS. #if !defined(OS_CHROMEOS) TEST_F(IdentityManagerImplTest, GetPrimaryAccountWhenAvailableAuthenticationAvailableLater) { AccountInfo account_info; AccountState account_state; // Set the refresh token, but don't sign in yet. std::string account_id_to_use = account_tracker()->SeedAccountInfo(kTestGaiaId, kTestEmail); token_service()->UpdateCredentials(account_id_to_use, kTestRefreshToken); base::RunLoop run_loop; GetIdentityManagerImpl()->GetPrimaryAccountWhenAvailable(base::BindRepeating( &IdentityManagerImplTest::OnPrimaryAccountAvailable, base::Unretained(this), run_loop.QuitClosure(), base::Unretained(&account_info), base::Unretained(&account_state))); // Verify that the account is present and has a refresh token, but that the // primary account is not yet considered available (this also serves to ensure // that the preceding call has been received by the Identity Manager before // proceeding). base::RunLoop run_loop2; GetIdentityManagerImpl()->GetAccountInfoFromGaiaId( kTestGaiaId, base::BindRepeating( &IdentityManagerImplTest::OnReceivedAccountInfoFromGaiaId, base::Unretained(this), run_loop2.QuitClosure())); run_loop2.Run(); EXPECT_TRUE(account_info_from_gaia_id_); EXPECT_EQ(account_id_to_use, account_info_from_gaia_id_->account_id); EXPECT_EQ(kTestGaiaId, account_info_from_gaia_id_->gaia); EXPECT_EQ(kTestEmail, account_info_from_gaia_id_->email); EXPECT_TRUE(account_state_from_gaia_id_.has_refresh_token); EXPECT_FALSE(account_state_from_gaia_id_.is_primary_account); EXPECT_TRUE(account_info.account_id.empty()); // Sign the user in and check that the callback is invoked as expected (i.e., // the primary account is now considered available). Note that it is necessary // to call SignIn() here to ensure that GoogleSigninSucceeded() is fired by // the fake signin manager. static_cast<FakeSigninManager*>(signin_manager()) ->SignIn(kTestGaiaId, kTestEmail, "password"); run_loop.Run(); EXPECT_EQ(signin_manager()->GetAuthenticatedAccountId(), account_info.account_id); EXPECT_EQ(kTestGaiaId, account_info.gaia); EXPECT_EQ(kTestEmail, account_info.email); EXPECT_TRUE(account_state.has_refresh_token); EXPECT_TRUE(account_state.is_primary_account); } #endif // Check that GetPrimaryAccountWhenAvailable() returns the expected account // info to all callers in the case where the primary account is made available // after multiple overlapping calls have been received. TEST_F(IdentityManagerImplTest, GetPrimaryAccountWhenAvailableOverlappingCalls) { AccountInfo account_info1; AccountState account_state1; base::RunLoop run_loop; GetIdentityManagerImpl()->GetPrimaryAccountWhenAvailable(base::BindRepeating( &IdentityManagerImplTest::OnPrimaryAccountAvailable, base::Unretained(this), run_loop.QuitClosure(), base::Unretained(&account_info1), base::Unretained(&account_state1))); AccountInfo account_info2; AccountState account_state2; base::RunLoop run_loop2; GetIdentityManagerImpl()->GetPrimaryAccountWhenAvailable(base::BindRepeating( &IdentityManagerImplTest::OnPrimaryAccountAvailable, base::Unretained(this), run_loop2.QuitClosure(), base::Unretained(&account_info2), base::Unretained(&account_state2))); // Verify that the primary account info is not currently available (this also // serves to ensure that the preceding call has been received by the Identity // Manager before proceeding). base::RunLoop run_loop3; GetIdentityManagerImpl()->GetPrimaryAccountInfo(base::BindRepeating( &IdentityManagerImplTest::OnReceivedPrimaryAccountInfo, base::Unretained(this), run_loop3.QuitClosure())); run_loop3.Run(); EXPECT_FALSE(primary_account_info_); // Make the primary account available and check that the callbacks are invoked // as expected. signin_manager()->SetAuthenticatedAccountInfo(kTestGaiaId, kTestEmail); token_service()->UpdateCredentials( signin_manager()->GetAuthenticatedAccountId(), kTestRefreshToken); run_loop.Run(); run_loop2.Run(); EXPECT_EQ(signin_manager()->GetAuthenticatedAccountId(), account_info1.account_id); EXPECT_EQ(kTestGaiaId, account_info1.gaia); EXPECT_EQ(kTestEmail, account_info1.email); EXPECT_TRUE(account_state1.has_refresh_token); EXPECT_TRUE(account_state1.is_primary_account); EXPECT_EQ(signin_manager()->GetAuthenticatedAccountId(), account_info2.account_id); EXPECT_EQ(kTestGaiaId, account_info2.gaia); EXPECT_EQ(kTestEmail, account_info2.email); EXPECT_TRUE(account_state2.has_refresh_token); EXPECT_TRUE(account_state2.is_primary_account); } // Check that GetPrimaryAccountWhenAvailable() doesn't return the account as // available if the refresh token has an auth error. TEST_F(IdentityManagerImplTest, GetPrimaryAccountWhenAvailableRefreshTokenHasAuthError) { signin_manager()->SetAuthenticatedAccountInfo(kTestGaiaId, kTestEmail); token_service()->UpdateCredentials( signin_manager()->GetAuthenticatedAccountId(), kTestRefreshToken); token_service()->UpdateAuthErrorForTesting( signin_manager()->GetAuthenticatedAccountId(), GoogleServiceAuthError( GoogleServiceAuthError::State::INVALID_GAIA_CREDENTIALS)); AccountInfo account_info; AccountState account_state; base::RunLoop run_loop; GetIdentityManagerImpl()->GetPrimaryAccountWhenAvailable(base::BindRepeating( &IdentityManagerImplTest::OnPrimaryAccountAvailable, base::Unretained(this), run_loop.QuitClosure(), base::Unretained(&account_info), base::Unretained(&account_state))); // Flush the Identity Manager and check that the callback didn't fire. FlushIdentityManagerImplForTesting(); EXPECT_TRUE(account_info.account_id.empty()); // Clear the auth error, update credentials, and check that the callback // fires. token_service()->UpdateAuthErrorForTesting( signin_manager()->GetAuthenticatedAccountId(), GoogleServiceAuthError()); token_service()->UpdateCredentials( signin_manager()->GetAuthenticatedAccountId(), kTestRefreshToken); run_loop.Run(); EXPECT_EQ(signin_manager()->GetAuthenticatedAccountId(), account_info.account_id); EXPECT_EQ(kTestGaiaId, account_info.gaia); EXPECT_EQ(kTestEmail, account_info.email); EXPECT_TRUE(account_state.has_refresh_token); EXPECT_TRUE(account_state.is_primary_account); } // Check that the account info for a given GAIA ID is null if that GAIA ID is // unknown. TEST_F(IdentityManagerImplTest, GetAccountInfoForUnknownGaiaID) { base::RunLoop run_loop; GetIdentityManagerImpl()->GetAccountInfoFromGaiaId( kTestGaiaId, base::BindRepeating( &IdentityManagerImplTest::OnReceivedAccountInfoFromGaiaId, base::Unretained(this), run_loop.QuitClosure())); run_loop.Run(); EXPECT_FALSE(account_info_from_gaia_id_); } // Check that the account info for a given GAIA ID has expected values if that // GAIA ID is known and there is no refresh token available for it. TEST_F(IdentityManagerImplTest, GetAccountInfoForKnownGaiaIdNoRefreshToken) { std::string account_id = account_tracker()->SeedAccountInfo(kTestGaiaId, kTestEmail); base::RunLoop run_loop; GetIdentityManagerImpl()->GetAccountInfoFromGaiaId( kTestGaiaId, base::BindRepeating( &IdentityManagerImplTest::OnReceivedAccountInfoFromGaiaId, base::Unretained(this), run_loop.QuitClosure())); run_loop.Run(); EXPECT_TRUE(account_info_from_gaia_id_); EXPECT_EQ(account_id, account_info_from_gaia_id_->account_id); EXPECT_EQ(kTestGaiaId, account_info_from_gaia_id_->gaia); EXPECT_EQ(kTestEmail, account_info_from_gaia_id_->email); EXPECT_FALSE(account_state_from_gaia_id_.has_refresh_token); EXPECT_FALSE(account_state_from_gaia_id_.is_primary_account); } // Check that the account info for a given GAIA ID has expected values if that // GAIA ID is known and has a refresh token available. TEST_F(IdentityManagerImplTest, GetAccountInfoForKnownGaiaIdRefreshToken) { std::string account_id = account_tracker()->SeedAccountInfo(kTestGaiaId, kTestEmail); token_service()->UpdateCredentials(account_id, kTestRefreshToken); base::RunLoop run_loop; GetIdentityManagerImpl()->GetAccountInfoFromGaiaId( kTestGaiaId, base::BindRepeating( &IdentityManagerImplTest::OnReceivedAccountInfoFromGaiaId, base::Unretained(this), run_loop.QuitClosure())); run_loop.Run(); EXPECT_TRUE(account_info_from_gaia_id_); EXPECT_EQ(account_id, account_info_from_gaia_id_->account_id); EXPECT_EQ(kTestGaiaId, account_info_from_gaia_id_->gaia); EXPECT_EQ(kTestEmail, account_info_from_gaia_id_->email); EXPECT_TRUE(account_state_from_gaia_id_.has_refresh_token); EXPECT_FALSE(account_state_from_gaia_id_.is_primary_account); } // Check the implementation of GetAccounts() when there are no accounts. TEST_F(IdentityManagerImplTest, GetAccountsNoAccount) { token_service()->LoadCredentials("dummy"); std::vector<mojom::AccountPtr> accounts; // Check that an empty list is returned when there are no accounts. base::RunLoop run_loop; GetIdentityManagerImpl()->GetAccounts(base::BindRepeating( &IdentityManagerImplTest::OnGotAccounts, base::Unretained(this), run_loop.QuitClosure(), &accounts)); run_loop.Run(); EXPECT_EQ(0u, accounts.size()); } // Check the implementation of GetAccounts() when there is a single account, // which is the primary account. TEST_F(IdentityManagerImplTest, GetAccountsPrimaryAccount) { token_service()->LoadCredentials("dummy"); std::vector<mojom::AccountPtr> accounts; // Add a primary account. signin_manager()->SetAuthenticatedAccountInfo(kTestGaiaId, kTestEmail); token_service()->UpdateCredentials( signin_manager()->GetAuthenticatedAccountId(), kTestRefreshToken); base::RunLoop run_loop; GetIdentityManagerImpl()->GetAccounts(base::BindRepeating( &IdentityManagerImplTest::OnGotAccounts, base::Unretained(this), run_loop.QuitClosure(), &accounts)); run_loop.Run(); // Verify that |accounts| contains the primary account. EXPECT_EQ(1u, accounts.size()); const mojom::AccountPtr& primary_account = accounts[0]; EXPECT_EQ(signin_manager()->GetAuthenticatedAccountId(), primary_account->info.account_id); EXPECT_EQ(kTestGaiaId, primary_account->info.gaia); EXPECT_EQ(kTestEmail, primary_account->info.email); EXPECT_TRUE(primary_account->state.has_refresh_token); EXPECT_TRUE(primary_account->state.is_primary_account); } // Check the implementation of GetAccounts() when there are multiple accounts, // in particular that ProfileOAuth2TokenService is the source of truth for // whether an account is present. TEST_F(IdentityManagerImplTest, GetAccountsMultipleAccounts) { token_service()->LoadCredentials("dummy"); std::vector<mojom::AccountPtr> accounts; // Add a primary account. signin_manager()->SetAuthenticatedAccountInfo(kTestGaiaId, kTestEmail); token_service()->UpdateCredentials( signin_manager()->GetAuthenticatedAccountId(), kTestRefreshToken); // Add a secondary account with AccountTrackerService, but don't yet make // ProfileOAuth2TokenService aware of it. std::string secondary_account_id = account_tracker()->SeedAccountInfo( kSecondaryTestGaiaId, kSecondaryTestEmail); base::RunLoop run_loop; GetIdentityManagerImpl()->GetAccounts(base::BindRepeating( &IdentityManagerImplTest::OnGotAccounts, base::Unretained(this), run_loop.QuitClosure(), &accounts)); run_loop.Run(); // Verify that |accounts| contains only the primary account at this time. EXPECT_EQ(1u, accounts.size()); const mojom::AccountPtr& primary_account = accounts[0]; EXPECT_EQ(signin_manager()->GetAuthenticatedAccountId(), primary_account->info.account_id); EXPECT_EQ(kTestGaiaId, primary_account->info.gaia); EXPECT_EQ(kTestEmail, primary_account->info.email); EXPECT_TRUE(primary_account->state.has_refresh_token); EXPECT_TRUE(primary_account->state.is_primary_account); // Make PO2TS aware of the secondary account. token_service()->UpdateCredentials(secondary_account_id, kTestRefreshToken); base::RunLoop run_loop2; GetIdentityManagerImpl()->GetAccounts(base::BindRepeating( &IdentityManagerImplTest::OnGotAccounts, base::Unretained(this), run_loop2.QuitClosure(), &accounts)); run_loop2.Run(); // Verify that |accounts| contains both accounts, with the primary account // being first and having the same information as previously. EXPECT_EQ(2u, accounts.size()); const mojom::AccountPtr& primary_account_redux = accounts[0]; EXPECT_EQ(signin_manager()->GetAuthenticatedAccountId(), primary_account_redux->info.account_id); EXPECT_EQ(kTestGaiaId, primary_account_redux->info.gaia); EXPECT_EQ(kTestEmail, primary_account_redux->info.email); EXPECT_TRUE(primary_account_redux->state.has_refresh_token); EXPECT_TRUE(primary_account_redux->state.is_primary_account); const mojom::AccountPtr& secondary_account = accounts[1]; EXPECT_EQ(secondary_account_id, secondary_account->info.account_id); EXPECT_EQ(kSecondaryTestGaiaId, secondary_account->info.gaia); EXPECT_EQ(kSecondaryTestEmail, secondary_account->info.email); EXPECT_TRUE(secondary_account->state.has_refresh_token); EXPECT_FALSE(secondary_account->state.is_primary_account); } // Check that the expected error is received if requesting an access token when // not signed in. TEST_F(IdentityManagerImplTest, GetAccessTokenNotSignedIn) { base::RunLoop run_loop; GetIdentityManagerImpl()->GetAccessToken( kTestGaiaId, ScopeSet(), "dummy_consumer", base::BindRepeating(&IdentityManagerImplTest::OnReceivedAccessToken, base::Unretained(this), run_loop.QuitClosure())); run_loop.Run(); EXPECT_FALSE(access_token_); EXPECT_EQ(GoogleServiceAuthError::State::USER_NOT_SIGNED_UP, access_token_error_.state()); } // Check that the expected access token is received if requesting an access // token when signed in. TEST_F(IdentityManagerImplTest, GetAccessTokenSignedIn) { signin_manager()->SetAuthenticatedAccountInfo(kTestGaiaId, kTestEmail); std::string account_id = signin_manager()->GetAuthenticatedAccountId(); token_service()->UpdateCredentials(account_id, kTestRefreshToken); token_service()->set_auto_post_fetch_response_on_message_loop(true); base::RunLoop run_loop; GetIdentityManagerImpl()->GetAccessToken( account_id, ScopeSet(), "dummy_consumer", base::BindRepeating(&IdentityManagerImplTest::OnReceivedAccessToken, base::Unretained(this), run_loop.QuitClosure())); run_loop.Run(); EXPECT_TRUE(access_token_); EXPECT_EQ(kTestAccessToken, access_token_.value()); EXPECT_EQ(GoogleServiceAuthError::State::NONE, access_token_error_.state()); } } // namespace } // namespace identity
11,393
1,179
<reponame>RavenSpear/optee_os<gh_stars>1000+ // SPDX-License-Identifier: BSD-2-Clause /* * Copyright (c) 2014, STMicroelectronics International N.V. */ #include <stdlib.h> #include <string.h> char *strdup(const char *s) { size_t l = strlen(s) + 1; char *p = malloc(l); if (p) memcpy(p, s, l); return p; }
144
355
<reponame>chefmoensch/TomP2P<gh_stars>100-1000 package net.tomp2p.relay.android.gcm; import java.util.ArrayList; import java.util.Collection; import net.tomp2p.futures.FutureResponse; import net.tomp2p.message.Message; import net.tomp2p.message.Message.Type; import net.tomp2p.p2p.Peer; import net.tomp2p.peers.PeerAddress; import net.tomp2p.relay.RelayUtils; import net.tomp2p.rpc.RPC; import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** * Send GCM messages to other well-known peers (direct messages) which then send it to the Google Cloud * Messaging servers. This is basically used if one of the relay peers serving an Android device does not have * the GCM authentication key. This key is needed to send messages over GCM and can be obtained at Google's * developer console. * * @author <NAME> * */ public class RemoteGCMSender implements IGCMSender { private static final Logger LOG = LoggerFactory.getLogger(RemoteGCMSender.class); private static final int TIMEOUT_MS = 10000; private final Peer peer; private Collection<PeerAddress> gcmServers; public RemoteGCMSender(Peer peer, Collection<PeerAddress> gcmServers) { this.gcmServers = gcmServers; this.peer = peer; } @Override public void send(final FutureGCM futureGCM) { final Collection<PeerAddress> copy; synchronized (gcmServers) { copy = new ArrayList<PeerAddress>(gcmServers); } if (copy.isEmpty()) { LOG.error("Cannot send GCM messages because no GCM server is known"); futureGCM.failed("Cannot send GCM messages because no GCM server is known"); return; } // send in separate thread to not block the caller peer.connectionBean().timer().submit(new Runnable() { @Override public void run() { // send to one of the servers for (PeerAddress gcmServer : copy) { LOG.debug("Try sending message to {}", gcmServer); Message message = new Message().recipient(gcmServer).sender(peer.peerAddress()) .command(RPC.Commands.GCM.getNr()).type(Type.REQUEST_1).version(peer.p2pId()) .buffer(RelayUtils.encodeString(futureGCM.registrationId())); FutureResponse futureResponse = RelayUtils.connectAndSend(peer, message); if (futureResponse.awaitUninterruptibly(TIMEOUT_MS) && futureResponse.isSuccess()) { LOG.debug("GCM server {} sent the message successfully", gcmServer); return; } else { LOG.debug("GCM server {} did not accept the message. Reason: {}", futureResponse.failedReason()); // go to next server } } LOG.error("Could not send the message to any of the GCM servers"); futureGCM.failed("Could not send the message to any of the GCM servers"); } }); } /** * Update the gcm servers */ public void gcmServers(Collection<PeerAddress> gcmServers) { synchronized (this.gcmServers) { this.gcmServers = gcmServers; } } }
1,025
2,542
// ------------------------------------------------------------ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License (MIT). See License.txt in the repo root for license information. // ------------------------------------------------------------ #pragma once namespace Naming { class FabricApplicationGatewayManager : public Common::RootedObject , public Common::TextTraceComponent<Common::TraceTaskCodes::NamingGateway> , public Naming::IGatewayManager , public std::enable_shared_from_this<FabricApplicationGatewayManager> { public: FabricApplicationGatewayManager( Common::FabricNodeConfigSPtr const & nodeConfig, Hosting2::IFabricActivatorClientSPtr const & activatorClient, Common::ComponentRoot const & componentRoot) : RootedObject(componentRoot) , nodeConfig_(nodeConfig) , activatorClient_(activatorClient) { } virtual bool RegisterGatewaySettingsUpdateHandler(); virtual Common::AsyncOperationSPtr BeginActivateGateway( std::wstring const &gatewayIpcServerListenAddress, Common::TimeSpan const&, Common::AsyncCallback const &, Common::AsyncOperationSPtr const &); virtual Common::ErrorCode EndActivateGateway( Common::AsyncOperationSPtr const &); virtual Common::AsyncOperationSPtr BeginDeactivateGateway( Common::TimeSpan const&, Common::AsyncCallback const &, Common::AsyncOperationSPtr const &); virtual Common::ErrorCode EndDeactivateGateway( Common::AsyncOperationSPtr const &); virtual void AbortGateway(); private: Common::ErrorCode GetHostedServiceParameters(__out Hosting2::HostedServiceParameters &); Common::ErrorCode GetGatewayCertThumbprints( Common::Thumbprint const* loadedCredThumbprint, __out Common::Thumbprint & serverCertThumbprint); void SettingsUpdateHandler(std::weak_ptr<FabricApplicationGatewayManager> const &); Common::ErrorCode OnSettingsUpdated(); Common::ErrorCode ActivateOnSettingsUpdate(); void CreateCertMonitorTimerIfNeeded(); void StopCertMonitorTimer(); void CertMonitorCallback(); Common::Thumbprint certThumbprint_; Common::FabricNodeConfigSPtr nodeConfig_; Hosting2::IFabricActivatorClientSPtr activatorClient_; Common::RwLock certMonitorTimerLock_; Common::TimerSPtr certMonitorTimer_; }; }
961
5,718
<reponame>raymondmuller/vue-strap { "name": "vue-strap", "homepage": "https://github.com/yuche/vue-strap", "authors": [ "yuche <<EMAIL>>" ], "description": "Boostrap components built with Vue.js", "main": [ "dist/vue-strap.js", "dist/vue-strap.js.map", "dist/vue-strap.min.js" ], "moduleType": ["amd","globals"], "keywords": [ "bootstrap", "vue.js", "vue-components" ], "license": "MIT", "ignore": [ "**/.*", "node_modules", "bower_components", "docs", "src", "favicon.ico", "index.html", "webpack*.js" ] }
288
887
<reponame>trajchen/javers package org.javers.core.graph; import org.javers.core.metamodel.object.GlobalId; import org.javers.core.metamodel.object.ValueObjectId; import org.javers.core.metamodel.object.ValueObjectIdWithHash; import org.javers.core.metamodel.property.Property; import org.javers.core.metamodel.type.ManagedType; import java.util.List; import java.util.Optional; import java.util.function.Supplier; import java.util.stream.Collectors; import static org.javers.common.validation.Validate.argumentIsNotNull; /** * Wrapper for live client's domain object (aka CDO) * * @author <NAME> */ abstract class LiveCdo extends Cdo { private GlobalId globalId; LiveCdo(GlobalId globalId, ManagedType managedType) { super(managedType); this.globalId = globalId; } void enrichHashIfNeeded(LiveCdoFactory liveCdoFactory, Supplier<List<LiveCdo>> descendants) { if (requiresObjectHasher()) { List<LiveCdo> descendantVOs = descendants.get().stream() .filter(cdo -> cdo.getGlobalId() instanceof ValueObjectId) .collect(Collectors.toList()); ValueObjectId newId = liveCdoFactory.regenerateValueObjectHash(this, descendantVOs); swapId(newId); } } void reloadHashFromParentIfNeeded(LiveCdoFactory liveCdoFactory) { if (hasHashOnParent()) { ValueObjectIdWithHash id = (ValueObjectIdWithHash)getGlobalId(); swapId(id.freeze()); } } @Override public GlobalId getGlobalId() { return globalId; } @Override public Object getPropertyValue(String propertyName) { argumentIsNotNull(propertyName); Property property = getManagedType().getProperty(propertyName); return getPropertyValue(property); } @Override public Object getPropertyValue(Property property) { argumentIsNotNull(property); return property.get(wrappedCdo()); } /** * never returns empty */ @Override public Optional<Object> getWrappedCdo() { return Optional.of(wrappedCdo()); } @Override public boolean isNull(Property property) { argumentIsNotNull(property); return property.isNull(wrappedCdo()); } abstract Object wrappedCdo(); private boolean requiresObjectHasher() { return globalId instanceof ValueObjectIdWithHash && ((ValueObjectIdWithHash) getGlobalId()).requiresHash(); } private boolean hasHashOnParent() { return globalId instanceof ValueObjectIdWithHash && ((ValueObjectIdWithHash) getGlobalId()).hasHashOnParent(); } private void swapId(GlobalId globalId) { this.globalId = globalId; } }
1,082
485
<reponame>708yamaguchi/MaixPy_scripts # This file is part of MaixPY # Copyright (c) sipeed.com # # Licensed under the MIT license: # http://www.opensource.org/licenses/mit-license.php # from network_espat import wifi wifi.reset() print(wifi.at_cmd("AT\r\n")) print(wifi.at_cmd("AT+GMR\r\n")) ''' >>> reset... b'\r\n\r\nOK\r\n' b'AT version:1.1.0.0(May 11 2016 18:09:56)\r\nSDK version:1.5.4(baaeaebb)\r\ncompile time:May 20 2016 15:06:44\r\nOK\r\n' MicroPython v0.5.1-136-g039f72b6c-dirty on 2020-11-18; Sipeed_M1 with kendryte-k210 Type "help()" for more information. >>> '''
272
370
<filename>audit/projects/k8s-staging-sp-operator/services/logging/logs.json [ "projects/k8s-staging-sp-operator/logs/cloudaudit.googleapis.com%2Factivity", "projects/k8s-staging-sp-operator/logs/cloudbuild" ]
88
692
<reponame>utdsimmons/ohpc /* Copyright (c) 2013, Intel Corporation Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /******************************************************************* NAME: Pipeline PURPOSE: This program tests the efficiency with which point-to-point synchronization can be carried out. It does so by executing a pipelined algorithm on an m*n grid. The first array dimension is distributed among the ranks (stripwise decomposition). USAGE: The program takes as input the dimensions of the grid, and the number of times we loop over the grid <progname> <# iterations> <m> <n> The output consists of diagnostics to make sure the algorithm worked, and of timing statistics. FUNCTIONS CALLED: Other than SHMEM or standard C functions, the following functions are used in this program: wtime() bail_out() HISTORY: - Written by <NAME>, March 2006. - modified by <NAME>, August 2006: * changed boundary conditions and stencil computation to avoid overflow * introduced multiple iterations over grid and dependency between iterations - modified by <NAME>, March 2015: * adapted for SHMEM **********************************************************************************/ #include <par-res-kern_general.h> #include <par-res-kern_shmem.h> #define ARRAY(i,j) vector[i+1+(j)*(segment_size+1)] int main(int argc, char ** argv) { int my_ID; /* MPI rank */ int root; /* ID of master rank */ int m, n; /* grid dimensions */ double *pipeline_time, /* timing parameters */ *local_pipeline_time, avgtime; double epsilon = 1.e-8; /* error tolerance */ double corner_val; /* verification value at top right corner of grid */ int i, j, iter, ID; /* dummies */ int iterations; /* number of times to run the pipeline algorithm */ int *start, *end; /* starts and ends of grid slices */ int segment_size; /* x-dimension of grid slice owned by calling rank */ int error=0; /* error flag */ int Num_procs; /* Number of ranks */ double *vector; /* array holding grid values */ long total_length; /* total required length to store grid values */ int *flag_snd; /* synchronization flags */ double *dst; /* target address of communication */ double *src; /* source address of communication */ long pSync[_SHMEM_BCAST_SYNC_SIZE]; /* work space for SHMEM collectives */ double pWrk [_SHMEM_BCAST_SYNC_SIZE]; /* work space for SHMEM collectives */ /********************************************************************************* ** Initialize the SHMEM environment **********************************************************************************/ start_pes (0); my_ID = shmem_my_pe(); Num_procs = shmem_n_pes(); /* we set root equal to the highest rank, because this is also the rank that reports on the verification value */ root = Num_procs-1; /********************************************************************* ** process, test and broadcast input parameter *********************************************************************/ if (argc != 4){ if (my_ID == root) printf("Usage: %s <#iterations> <1st array dimension> <2nd array dimension>\n", *argv); error = 1; goto ENDOFTESTS; } iterations = atoi(*++argv); if (iterations < 1){ if (my_ID==root) printf("ERROR: iterations must be >= 1 : %d \n",iterations); error = 1; goto ENDOFTESTS; } m = atoi(*++argv); n = atoi(*++argv); if (m < 1 || n < 1){ if (my_ID == root) printf("ERROR: grid dimensions must be positive: %d, %d \n", m, n); error = 1; goto ENDOFTESTS; } /* initialize sync variables for error checks */ for (i = 0; i < SHMEM_BCAST_SYNC_SIZE; i += 1) { pSync[i] = _SHMEM_SYNC_VALUE; } if (m<=Num_procs) { if (my_ID == root) printf("ERROR: First grid dimension %d must be > #ranks %d\n", m, Num_procs); error = 1; } ENDOFTESTS:; bail_out (error, pSync); if (my_ID == root) { printf("SHMEM pipeline execution on 2D grid\n"); printf("Number of ranks = %d\n",Num_procs); printf("Grid sizes = %d, %d\n", m, n); printf("Number of iterations = %d\n", iterations); } flag_snd = (int *) shmalloc (sizeof(int) * n); dst = (double *) shmalloc (sizeof(double) * (n)); src = (double *) shmalloc (sizeof(double) * (n)); local_pipeline_time = (double *) shmalloc (sizeof(double)); pipeline_time = (double *) shmalloc (sizeof(double)); if (!flag_snd || !dst || !src || !local_pipeline_time || !pipeline_time) { printf("ERROR: could not allocate flags or communication buffers on rank %d\n", my_ID); error = 1; } bail_out(error, pSync); start = (int *) shmalloc(2*Num_procs*sizeof(int)); if (!start) { printf("ERROR: Could not allocate space for array of slice boundaries on rank %d\n", my_ID); error = 1; } bail_out(error,pSync); end = start + Num_procs; start[0] = 0; for (ID=0; ID<Num_procs; ID++) { segment_size = m/Num_procs; if (ID < (m%Num_procs)) segment_size++; if (ID>0) start[ID] = end[ID-1]+1; end[ID] = start[ID]+segment_size-1; } /* now set segment_size to the value needed by the calling rank */ segment_size = end[my_ID] - start[my_ID] + 1; /* total_length takes into account one ghost cell on left side of segment */ total_length = ((end[my_ID]-start[my_ID]+1)+1)*n; vector = (double *) shmalloc(total_length*sizeof(double)); if (vector == NULL) { printf("Could not allocate space for grid slice of %d by %d points", segment_size, n); printf(" on rank %d\n", my_ID); error = 1; } bail_out(error, pSync); /* clear the array */ for (j=0; j<n; j++) for (i=start[my_ID]-1; i<=end[my_ID]; i++) { ARRAY(i-start[my_ID],j) = 0.0; } /* set boundary values (bottom and left side of grid */ if (my_ID==0) for (j=0; j<n; j++) ARRAY(0,j) = (double) j; for (i=start[my_ID]-1; i<=end[my_ID]; i++) ARRAY(i-start[my_ID],0) = (double) i; /* redefine start and end for calling rank to reflect local indices */ if (my_ID==0) start[my_ID] = 1; else start[my_ID] = 0; end[my_ID] = segment_size-1; /* initialize synchronization flags */ for (j=0; j<n; j++) flag_snd[j] = 0; for (iter=0; iter<=iterations; iter++) { if (iter == 1) { shmem_barrier_all (); local_pipeline_time [0] = wtime(); } for (j=1; j<n; j++) { /* if I am not at the left boundary, wait for left neighbor to send data */ if (my_ID > 0) { shmem_int_wait_until (&flag_snd [j], SHMEM_CMP_NE, iter%2); ARRAY(start[my_ID]-1,j) = dst[j]; } for (i=start[my_ID]; i<= end[my_ID]; i++) { ARRAY(i,j) = ARRAY(i-1,j) + ARRAY(i,j-1) - ARRAY(i-1,j-1); } /* if I am not on the right boundary, send data to my right neighbor */ if (my_ID != Num_procs-1) { src[j] = ARRAY (end[my_ID],j); shmem_putmem(&dst[j], &src[j], sizeof(double), my_ID+1); shmem_fence(); shmem_int_swap (&flag_snd [j], !(iter%2), my_ID+1); } } corner_val = 0.; /* copy top right corner value to bottom left corner to create dependency */ if (Num_procs >1) { if (my_ID==root) { corner_val = -ARRAY(end[my_ID],n-1); src [0] = corner_val; shmem_putmem(&dst[0], &src[0], sizeof(double), 0); shmem_fence(); shmem_int_swap(&flag_snd[0], !(iter%2), 0); } if (my_ID==0) { shmem_int_wait_until (&flag_snd[0], SHMEM_CMP_NE, iter%2); ARRAY(0,0) = dst[0]; } } else ARRAY(0,0)= -ARRAY(end[my_ID],n-1); } local_pipeline_time [0] = wtime() - local_pipeline_time [0]; shmem_double_max_to_all(pipeline_time, local_pipeline_time, 1, 0, 0, Num_procs, pWrk, pSync); /* verify correctness, using top right value */ corner_val = (double) ((iterations+1)*(m+n-2)); if (my_ID == root) { if (abs(ARRAY(end[my_ID],n-1)-corner_val)/corner_val >= epsilon) { printf("ERROR: checksum %lf does not match verification value %lf\n", ARRAY(end[my_ID],n-1), corner_val); error = 1; } } bail_out(error, pSync); if (my_ID == root) { avgtime = pipeline_time [0]/iterations; #ifdef VERBOSE printf("Solution validates; verification value = %lf\n", corner_val); printf("Point-to-point synchronizations/s: %lf\n", ((float)((n-1)*(Num_procs-1)))/(avgtime)); #else printf("Solution validates\n"); #endif printf("Rate (MFlops/s): %lf, Avg time (s): %lf\n", 1.0E-06 * 2 * ((double)((m-1)*(n-1)))/avgtime, avgtime); } exit(EXIT_SUCCESS); } /* end of main */
4,903
764
<gh_stars>100-1000 { "symbol": "YOO", "address": "0x1D4105534dA120DA243281cfC3f26Aaf038E2D6f", "overview":{ "en": "Yooba is a decentralized e-commerce system based on blockchain technology, where anyone can securely and privately provide services and purchase services", "zh": "一个去中心化的,全球性的,安全的,隐私的,基于区块链的电子商务系统。致力于让所有消费者,安全、隐私、轻易的进行全球性的消费。" }, "email": "<EMAIL>", "website": "https://yooba.org", "whitepaper": "https://yooba.org/assets/YoobaWhitePaper_en.pdf", "state": "NORMAL", "published_on": "2018-03-29", "initial_price":{ "ETH":"0.000002857 ETH", "USD":"0.002 USD", "BTC":"0.0000002 BTC" }, "links": { "blog": "https://blog.yooba.org", "twitter": "https://twitter.com/YoobaProject", "telegram": "https://t.me/yoobaproject", "github": "https://github.com/yooba-team", "facebook": "https://fb.me/YoobaProject", "reddit": "https://www.reddit.com/r/Yooba/" } }
504
1,738
<filename>dev/Code/Framework/GridMate/GridMate/Replica/Interest/InterestQueryResult.h /* * All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or * its licensors. * * For complete copyright and license terms please see the LICENSE at the root of this * distribution (the "License"). All use of this software is governed by the License, * or, if provided, by the license below or the license accompanying this file. Do not * remove or modify any license notices. This file is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * */ #ifndef GM_REPLICA_INTERESTQUERYRESULT_H #define GM_REPLICA_INTERESTQUERYRESULT_H /* #include <AzCore/base.h> #include <GridMate/containers/vector.h> #include <GridMate/containers/unordered_map.h> #include <GridMate/Replica/Interest/InterestDefs.h> #include <GridMate/Replica/ReplicaCommon.h> namespace GridMate { using InterestPeerList = vector<PeerId>; using InterestQueryResult = unordered_map<ReplicaId, InterestPeerList>; } // GridMate */ #endif // GM_REPLICA_INTERESTQUERYRESULT_H
358
365
<gh_stars>100-1000 // Copyright 2013 Blender Foundation. All rights reserved. // // This program is free software; you can redistribute it and/or // modify it under the terms of the GNU General Public License // as published by the Free Software Foundation; either version 2 // of the License, or (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with this program; if not, write to the Free Software Foundation, // Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. #include "internal/base/util.h" namespace blender { namespace opensubdiv { void stringSplit(vector<string> *tokens, const string &str, const string &separators, bool skip_empty) { size_t token_start = 0, token_length = 0; for (size_t i = 0; i < str.length(); ++i) { const char ch = str[i]; if (separators.find(ch) == string::npos) { // Append non-separator char to a token. ++token_length; } else { // Append current token to the list (if any). if (token_length > 0 || !skip_empty) { string token = str.substr(token_start, token_length); tokens->push_back(token); } // Re-set token pointers. token_start = i + 1; token_length = 0; } } // Append token which might be at the end of the string. if ((token_length != 0) || (!skip_empty && token_start > 0 && separators.find(str[token_start - 1]) != string::npos)) { string token = str.substr(token_start, token_length); tokens->push_back(token); } } } // namespace opensubdiv } // namespace blender
667
2,728
# ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. # ------------------------------------ import json SUPPORTED_VERSIONS = {"1.0"} class AuthenticationRecord(object): """Non-secret account information for an authenticated user This class enables :class:`DeviceCodeCredential` and :class:`InteractiveBrowserCredential` to access previously cached authentication data. Applications shouldn't construct instances of this class. They should instead acquire one from a credential's **authenticate** method, such as :func:`InteractiveBrowserCredential.authenticate`. See the user_authentication sample for more details. """ def __init__(self, tenant_id, client_id, authority, home_account_id, username): # type: (str, str, str, str, str) -> None self._authority = authority self._client_id = client_id self._home_account_id = home_account_id self._tenant_id = tenant_id self._username = username @property def authority(self): # type: () -> str return self._authority @property def client_id(self): # type: () -> str return self._client_id @property def home_account_id(self): # type: () -> str return self._home_account_id @property def tenant_id(self): # type: () -> str return self._tenant_id @property def username(self): # type: () -> str """The authenticated user's username""" return self._username @classmethod def deserialize(cls, data): # type: (str) -> AuthenticationRecord """Deserialize a record. :param str data: a serialized record """ deserialized = json.loads(data) version = deserialized.get("version") if version not in SUPPORTED_VERSIONS: raise ValueError( 'Unexpected version "{}". This package supports these versions: {}'.format(version, SUPPORTED_VERSIONS) ) return cls( authority=deserialized["authority"], client_id=deserialized["clientId"], home_account_id=deserialized["homeAccountId"], tenant_id=deserialized["tenantId"], username=deserialized["username"], ) def serialize(self): # type: () -> str """Serialize the record. :rtype: str """ record = { "authority": self._authority, "clientId": self._client_id, "homeAccountId": self._home_account_id, "tenantId": self._tenant_id, "username": self._username, "version": "1.0", } return json.dumps(record)
1,107
1,178
<reponame>leozz37/makani /* * Copyright 2020 Makani Technologies LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "avionics/motor/firmware/selftest.h" #include "avionics/motor/firmware/calib_params.h" #include "avionics/motor/firmware/config_params.h" #include "avionics/firmware/serial/motor_serial_params.h" #include "avionics/firmware/util/selftest.h" #include "common/macros.h" void SelfTest(void) { const HardwareSpec valid_hardware[] = { {kHardwareTypeMotor, kMotorHardwareGinA1}, {kHardwareTypeMotor, kMotorHardwareGinA2}, {kHardwareTypeMotor, kMotorHardwareGinA3}, {kHardwareTypeMotor, kMotorHardwareGinA4Clk16}, {kHardwareTypeMotor, kMotorHardwareGinA4Clk8}, {kHardwareTypeMotor, kMotorHardwareOzoneA1} }; SelfTestCommon(ARRAYSIZE(valid_hardware), valid_hardware); SelfTestCalibParameters(MotorCalibParamsGetTypeVersion()); SelfTestConfigParameters(MotorConfigParamsGetTypeVersion()); }
476
7,892
<gh_stars>1000+ /* A program to test real 2d forward and inverse fast fourier transform routines */ #include <stdio.h> #include <stdlib.h> #include <fp.h> #include <math.h> #include "fftlib.h" #include "fftext.h" #include "fft2d.h" #if macintosh #include <timer.h> #endif #define BIPRAND(a) (2.0/(RAND_MAX+1.0)*a-1.0) void main(){ long N2 = 64; /* the number of rows in 2d ffts, must be power of 2 */ long N = 256; /* the number of cols in 2d ffts, must be power of 2 */ float *a; float maxerrfft; long i1; long TheErr; long M; long M2; FILE *fdataout; /* output file */ unsigned int randseed = 777; int rannum; #if macintosh UnsignedWide TheTime1; Microseconds(&TheTime1); randseed = TheTime1.lo; #endif printf(" %6d Byte Floats \n", sizeof(a[0])); printf(" randseed = %10u\n", randseed); srand(randseed); M = roundtol(LOG2(N)); N = POW2(M); M2 = roundtol(LOG2(N2)); N2 = POW2(M2); printf("fft size = %6d X%6d, ", N2, N); TheErr = 0; if(!TheErr){ TheErr = fft2dInit(M2, M); } a = (float *) malloc(N2*N*sizeof(float) ); if (a == 0) TheErr = 2; if(!TheErr){ fdataout = fopen("fftdat.dr2", "wb"); if (fdataout == NULL) TheErr = -50; } if(!TheErr){ /* write sizes to fdataout */ fwrite(&N, sizeof(N), 1, fdataout); fwrite(&N2, sizeof(N2), 1, fdataout); /* set up a simple test case and write to fdataout */ for (i1=0; i1<N2*N; i1++){ rannum = rand(); a[i1] = BIPRAND(rannum); } fwrite(a, N2*N*sizeof(float), 1, fdataout); /* real, 2d fast fourier transform */ rfft2d(a, M2, M); /* write out answer */ fwrite(a, N2*N*sizeof(float), 1, fdataout); fclose(fdataout); /* compute and check inverse transform */ rifft2d(a, M2, M); maxerrfft = 0; srand(randseed); for (i1=0; i1<N2*N; i1++){ rannum = rand(); maxerrfft = fmax(maxerrfft, fabs(BIPRAND(rannum)-a[i1])); } printf("maxerr rfft = %6.4e\n", maxerrfft); free(a); fft2dFree(); } else{ if(TheErr==2) printf(" out of memory \n"); else printf(" error \n"); fft2dFree(); } printf(" Done. \n"); return; }
958
325
<gh_stars>100-1000 // Tencent is pleased to support the open source community by making Mars available. // Copyright (C) 2016 THL A29 Limited, a Tencent company. All rights reserved. // Licensed under the MIT License (the "License"); you may not use this file except in // compliance with the License. You may obtain a copy of the License at // http://opensource.org/licenses/MIT // Unless required by applicable law or agreed to in writing, software distributed under the License is // distributed on an "AS IS" basis, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, // either express or implied. See the License for the specific language governing permissions and // limitations under the License. /* * smart_heartbeat.h * * Created on: 2014-1-22 * Author: phoenixzuo */ #ifndef STN_SRC_SMART_HEARTBEAT_H_ #define STN_SRC_SMART_HEARTBEAT_H_ #include <string> #include "mars/comm/thread/mutex.h" #include "mars/comm/singleton.h" #include "mars/stn/config.h" #include "special_ini.h" enum HeartbeatReportType { kReportTypeCompute = 1, // report info of compute smart heartbeat kReportTypeSuccRate = 2, // report succuss rate when smart heartbeat is stabled }; class SmartHeartbeat; class NetHeartbeatInfo { public: NetHeartbeatInfo(); void Clear(); public: // NetHeartbeatInfo(const NetHeartbeatInfo&); // NetHeartbeatInfo& operator=(const NetHeartbeatInfo&); private: std::string net_detail_; int net_type_; unsigned int cur_heart_; bool is_stable_; unsigned int fail_heart_count_; // accumulated failed counts on curHeart time_t last_modify_time_; unsigned int success_curr_heart_count_; friend class SmartHeartbeat; }; class SmartHeartbeat { public: SmartHeartbeat(); ~SmartHeartbeat(); void OnHeartbeatStart(); void OnLongLinkEstablished(); void OnLongLinkDisconnect(); void OnHeartResult(bool _sucess, bool _fail_of_timeout); unsigned int GetNextHeartbeatInterval(bool& _use_smart_heartbeat); // bIsUseSmartBeat is add by andrewu for stat // MIUI align alarm response at Times of five minutes, We should handle this case specailly. void JudgeMIUIStyle(); private: void __DumpHeartInfo(); bool __IsMIUIStyle(); void __LimitINISize(); void __LoadINI(); void __SaveINI(); private: bool is_wait_heart_response_; unsigned int xiaomi_style_count_; unsigned int success_heart_count_; // the total success heartbeat based on single alive TCP, And heartbeat interval can be different. unsigned int last_heart_; NetHeartbeatInfo current_net_heart_info_; Mutex _mutex_; SpecialINI ini_; }; #endif // STN_SRC_SMART_HEARTBEAT_H_
936
376
<gh_stars>100-1000 import dataclasses import pytest import numpy as np import whynot as wn from whynot.gym import envs from spec_list import spec_list # This runs a smoketest on each official registered env. We may want # to try also running environments which are not officially registered # envs. @pytest.mark.parametrize("spec", spec_list) def test_env(spec): # Capture warnings with pytest.warns(None) as warnings: env = spec.make() # Check that dtype is explicitly declared for gym.Box spaces for warning_msg in warnings: assert not "autodetected dtype" in str(warning_msg.message) ob_space = env.observation_space act_space = env.action_space ob = env.reset() assert ob_space.contains(ob), "Reset observation: {!r} not in space".format(ob) a = act_space.sample() observation, reward, done, _info = env.step(a) assert ob_space.contains(observation), "Step observation: {!r} not in space".format( observation ) assert np.isscalar(reward), "{} is not a scalar for {}".format(reward, env) assert isinstance(done, bool), "Expected {} to be a boolean".format(done) for mode in env.metadata.get("render.modes", []): env.render(mode=mode) # Make sure we can render the environment after close. for mode in env.metadata.get("render.modes", []): env.render(mode=mode) env.close() # Run a longer rollout on some environments @pytest.mark.parametrize("spec", ["HIV-v0", "world3-v0", "opioid-v0"]) def test_random_rollout(spec): for env in [envs.make(spec), envs.make(spec), envs.make(spec)]: def agent(ob): return env.action_space.sample() ob = env.reset() for _ in range(10): assert env.observation_space.contains(ob) print("Observation: ", ob) a = agent(ob) assert env.action_space.contains(a) (ob, _reward, done, _info) = env.step(a) if done: break env.close() @pytest.mark.parametrize("spec", ["HIV-v0", "world3-v0", "opioid-v0", "Zika-v0"]) def test_config(spec): """Test setting simulator config via gym.make""" base_env = envs.make(spec) base_config = base_env.config new_config = dataclasses.replace(base_config, delta_t=-100) new_env = envs.make(spec, config=new_config) assert base_env.config.delta_t == base_config.delta_t assert new_env.config.delta_t == new_config.delta_t def test_credit_config(): """Set simulator config for Credit sim via gym.make""" base_features = wn.credit.Config().changeable_features new_features = np.array([0, 1, 2]) base_env = envs.make("Credit-v0") config = wn.credit.Config(changeable_features=new_features) env = envs.make("Credit-v0", config=config) assert np.allclose(base_env.config.changeable_features, base_features) assert np.allclose(env.config.changeable_features, new_features) base_env.close() env.close() def test_credit_initial_state(): """Test initial state for Credit sim via gym.make""" base_env = envs.make("Credit-v0") original_state = base_env.reset() features, labels = original_state["features"], original_state["labels"] subsampled_state = wn.credit.State(features[:100], labels[:100]) env = envs.make("Credit-v0", initial_state=subsampled_state) assert np.allclose(env.initial_state.features, subsampled_state.features) assert np.allclose(env.initial_state.labels, subsampled_state.labels) ob = env.reset() assert np.allclose(ob["features"], subsampled_state.features) assert np.allclose(ob["labels"], subsampled_state.labels) for idx in range(10): print(env.observation_space) print(ob["features"].shape, ob["labels"].shape) assert env.observation_space.contains(ob) a = env.action_space.sample() assert env.action_space.contains(a) (ob, _reward, done, _info) = env.step(a) if done: break ob = env.reset() assert np.allclose(ob["features"], subsampled_state.features) assert np.allclose(ob["labels"], subsampled_state.labels) env.close()
1,666
809
/** * @file * @brief Adoptaion of NRF24 library to STM32F4 Cube * * @date 27.06.18 * @author <NAME> */ #ifndef NRF24_STM32_CUBE #define NRF24_STM32_CUBE #include <stdint.h> extern int nrf24_hw_init(void); extern void nrf24_ce_digitalWrite(uint8_t state); extern void nrf24_csn_digitalWrite(uint8_t state); /* send and receive multiple bytes over SPI */ extern void nrf24_spi_transfer(uint8_t *tx, uint8_t *rx, uint8_t len); /* send multiple bytes over SPI */ extern void nrf24_spi_transmit(uint8_t *tx, uint8_t len); /* receive multiple bytes over SPI */ extern void nrf24_spi_receive(uint8_t *rx, uint8_t len); #endif /* NRF24_STM32_CUBE */
278
387
""" ################################################################################################## # Copyright Info : Copyright (c) <NAME> @ Hikvision Research Institute. All rights reserved. # Filename : east.py # Abstract : The main pipeline definition of EAST model # Current Version: 1.0.0 # Date : 2020-06-08 #################################################################################################### """ from mmdet.models.builder import DETECTORS from .seg_based_det import SegBasedDet @DETECTORS.register_module() class EAST(SegBasedDet): """ Implementation of EAST [1] Ref: [1] An Efficient and Accurate Scene Text Detector. CVPR-2017 """ def __init__(self, backbone, neck=None, mask_head=None, train_cfg=None, test_cfg=None, pretrained=None): """ Args: backbone(dict): network backbone (e.g. ResNet) neck(dict): network neck (e.g., EASTMerge) head(dict): head for loss calculation (e.g., EASTHead) train_cfg(dict): related parameters for training test_cfg(dict): related parameters for test pretrained(dict): pretrained model """ super().__init__(backbone=backbone, neck=neck, mask_head=mask_head, train_cfg=train_cfg, test_cfg=test_cfg, pretrained=pretrained)
611
2,111
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2016 // <NAME> Codeplay Software Ltd. // <NAME> Codeplay Software Ltd. // <NAME> Codeplay Software Ltd. // Contact: <<EMAIL>> // <NAME> <<EMAIL>> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #define EIGEN_TEST_NO_LONGDOUBLE #define EIGEN_TEST_NO_COMPLEX #define EIGEN_DEFAULT_DENSE_INDEX_TYPE int64_t #define EIGEN_USE_SYCL #include "main.h" #include <unsupported/Eigen/CXX11/Tensor> using Eigen::array; using Eigen::SyclDevice; using Eigen::Tensor; using Eigen::TensorMap; using Eigen::Tensor; using Eigen::RowMajor; template <typename DataType, int DataLayout, typename IndexType> static void test_image_op_sycl(const Eigen::SyclDevice &sycl_device) { IndexType sizeDim1 = 245; IndexType sizeDim2 = 343; IndexType sizeDim3 = 577; array<IndexType, 3> input_range ={{sizeDim1, sizeDim2, sizeDim3}}; array<IndexType, 3> slice_range ={{sizeDim1-1, sizeDim2, sizeDim3}}; Tensor<DataType, 3,DataLayout, IndexType> tensor1(input_range); Tensor<DataType, 3,DataLayout, IndexType> tensor2(input_range); Tensor<DataType, 3, DataLayout, IndexType> tensor3(slice_range); Tensor<DataType, 3, DataLayout, IndexType> tensor3_cpu(slice_range); typedef Eigen::DSizes<IndexType, 3> Index3; Index3 strides1(1L,1L, 1L); Index3 indicesStart1(1L, 0L, 0L); Index3 indicesStop1(sizeDim1, sizeDim2, sizeDim3); Index3 strides2(1L,1L, 1L); Index3 indicesStart2(0L, 0L, 0L); Index3 indicesStop2(sizeDim1-1, sizeDim2, sizeDim3); Eigen::DSizes<IndexType, 3> sizes(sizeDim1-1,sizeDim2,sizeDim3); tensor1.setRandom(); tensor2.setRandom(); DataType* gpu_data1 = static_cast<DataType*>(sycl_device.allocate(tensor1.size()*sizeof(DataType))); DataType* gpu_data2 = static_cast<DataType*>(sycl_device.allocate(tensor2.size()*sizeof(DataType))); DataType* gpu_data3 = static_cast<DataType*>(sycl_device.allocate(tensor3.size()*sizeof(DataType))); TensorMap<Tensor<DataType, 3, DataLayout, IndexType>> gpu1(gpu_data1, input_range); TensorMap<Tensor<DataType, 3, DataLayout, IndexType>> gpu2(gpu_data2, input_range); TensorMap<Tensor<DataType, 3, DataLayout, IndexType>> gpu3(gpu_data3, slice_range); sycl_device.memcpyHostToDevice(gpu_data1, tensor1.data(),(tensor1.size())*sizeof(DataType)); sycl_device.memcpyHostToDevice(gpu_data2, tensor2.data(),(tensor2.size())*sizeof(DataType)); gpu3.device(sycl_device)= gpu1.slice(indicesStart1, sizes) - gpu2.slice(indicesStart2, sizes); sycl_device.memcpyDeviceToHost(tensor3.data(), gpu_data3,(tensor3.size())*sizeof(DataType)); tensor3_cpu = tensor1.stridedSlice(indicesStart1,indicesStop1,strides1) - tensor2.stridedSlice(indicesStart2,indicesStop2,strides2); for (IndexType i = 0; i <slice_range[0] ; ++i) { for (IndexType j = 0; j < slice_range[1]; ++j) { for (IndexType k = 0; k < slice_range[2]; ++k) { VERIFY_IS_EQUAL(tensor3_cpu(i,j,k), tensor3(i,j,k)); } } } sycl_device.deallocate(gpu_data1); sycl_device.deallocate(gpu_data2); sycl_device.deallocate(gpu_data3); } template<typename DataType, typename dev_Selector> void sycl_computing_test_per_device(dev_Selector s){ QueueInterface queueInterface(s); auto sycl_device = Eigen::SyclDevice(&queueInterface); test_image_op_sycl<DataType, RowMajor, int64_t>(sycl_device); } EIGEN_DECLARE_TEST(cxx11_tensor_image_op_sycl) { for (const auto& device :Eigen::get_sycl_supported_devices()) { CALL_SUBTEST(sycl_computing_test_per_device<float>(device)); #ifdef EIGEN_SYCL_DOUBLE_SUPPORT CALL_SUBTEST(sycl_computing_test_per_device<double>(device)); #endif } }
1,528
379
<reponame>rychallener/george<gh_stars>100-1000 # -*- coding: utf-8 -*- from __future__ import division, print_function __all__ = ["HODLRSolver"] import numpy as np from .basic import BasicSolver from ._hodlr import HODLRSolver as HODLRSolverInterface class HODLRSolver(BasicSolver): def __init__(self, kernel, min_size=100, tol=0.1, seed=42): self.min_size = min_size self.tol = tol self.seed = seed super(HODLRSolver, self).__init__(kernel) def compute(self, x, yerr): self.solver = HODLRSolverInterface() self.solver.compute(self.kernel, x, yerr, self.min_size, self.tol, self.seed) self._log_det = self.solver.log_determinant self.computed = self.solver.computed def apply_inverse(self, y, in_place=False): return self.solver.apply_inverse(y, in_place=in_place) def dot_solve(self, y): return self.solver.dot_solve(y) def apply_sqrt(self, r): raise NotImplementedError("apply_sqrt is not implemented for the " "HODLRSolver") def get_inverse(self): return self.solver.get_inverse() def __getstate__(self): state = self.__dict__.copy() state["_computed"] = False del state["solver"] return state def __setstate__(self, state): self.__dict__.update(state)
648
14,668
// Copyright 2021 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chrome/browser/ash/policy/reporting/metrics_reporting/audio/audio_events_observer.h" #include <utility> #include "chromeos/services/cros_healthd/public/cpp/service_connection.h" #include "components/reporting/proto/synced/metric_data.pb.h" namespace reporting { AudioEventsObserver::AudioEventsObserver() : CrosHealthdEventsObserverBase< chromeos::cros_healthd::mojom::CrosHealthdAudioObserver>(this) {} AudioEventsObserver::~AudioEventsObserver() = default; void AudioEventsObserver::OnUnderrun() { // No action done with this type of event } void AudioEventsObserver::OnSevereUnderrun() { MetricData metric_data; metric_data.mutable_event_data()->set_type( reporting::MetricEventType::AUDIO_SEVERE_UNDERRUN); OnEventObserved(std::move(metric_data)); } void AudioEventsObserver::AddObserver() { chromeos::cros_healthd::ServiceConnection::GetInstance()->AddAudioObserver( BindNewPipeAndPassRemote()); } } // namespace reporting
379
1,350
<reponame>billwert/azure-sdk-for-java // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. // Code generated by Microsoft (R) AutoRest Code Generator. package com.azure.resourcemanager.dataprotection.implementation; import com.azure.core.annotation.BodyParam; import com.azure.core.annotation.Delete; import com.azure.core.annotation.ExpectedResponses; import com.azure.core.annotation.Get; import com.azure.core.annotation.HeaderParam; import com.azure.core.annotation.Headers; import com.azure.core.annotation.Host; import com.azure.core.annotation.HostParam; import com.azure.core.annotation.Patch; import com.azure.core.annotation.PathParam; import com.azure.core.annotation.Post; import com.azure.core.annotation.Put; import com.azure.core.annotation.QueryParam; import com.azure.core.annotation.ReturnType; import com.azure.core.annotation.ServiceInterface; import com.azure.core.annotation.ServiceMethod; import com.azure.core.annotation.UnexpectedResponseExceptionType; import com.azure.core.http.rest.PagedFlux; import com.azure.core.http.rest.PagedIterable; import com.azure.core.http.rest.PagedResponse; import com.azure.core.http.rest.PagedResponseBase; import com.azure.core.http.rest.Response; import com.azure.core.http.rest.RestProxy; import com.azure.core.management.exception.ManagementException; import com.azure.core.management.polling.PollResult; import com.azure.core.util.Context; import com.azure.core.util.FluxUtil; import com.azure.core.util.logging.ClientLogger; import com.azure.core.util.polling.PollerFlux; import com.azure.core.util.polling.SyncPoller; import com.azure.resourcemanager.dataprotection.fluent.BackupVaultsClient; import com.azure.resourcemanager.dataprotection.fluent.models.BackupVaultResourceInner; import com.azure.resourcemanager.dataprotection.fluent.models.CheckNameAvailabilityResultInner; import com.azure.resourcemanager.dataprotection.models.BackupVaultResourceList; import com.azure.resourcemanager.dataprotection.models.CheckNameAvailabilityRequest; import com.azure.resourcemanager.dataprotection.models.PatchResourceRequestInput; import java.nio.ByteBuffer; import reactor.core.publisher.Flux; import reactor.core.publisher.Mono; /** An instance of this class provides access to all the operations defined in BackupVaultsClient. */ public final class BackupVaultsClientImpl implements BackupVaultsClient { private final ClientLogger logger = new ClientLogger(BackupVaultsClientImpl.class); /** The proxy service used to perform REST calls. */ private final BackupVaultsService service; /** The service client containing this operation class. */ private final DataProtectionClientImpl client; /** * Initializes an instance of BackupVaultsClientImpl. * * @param client the instance of the service client containing this operation class. */ BackupVaultsClientImpl(DataProtectionClientImpl client) { this.service = RestProxy.create(BackupVaultsService.class, client.getHttpPipeline(), client.getSerializerAdapter()); this.client = client; } /** * The interface defining all the services for DataProtectionClientBackupVaults to be used by the proxy service to * perform REST calls. */ @Host("{$host}") @ServiceInterface(name = "DataProtectionClient") private interface BackupVaultsService { @Headers({"Content-Type: application/json"}) @Get("/subscriptions/{subscriptionId}/providers/Microsoft.DataProtection/backupVaults") @ExpectedResponses({200}) @UnexpectedResponseExceptionType(ManagementException.class) Mono<Response<BackupVaultResourceList>> list( @HostParam("$host") String endpoint, @QueryParam("api-version") String apiVersion, @PathParam("subscriptionId") String subscriptionId, @HeaderParam("Accept") String accept, Context context); @Headers({"Content-Type: application/json"}) @Get( "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataProtection" + "/backupVaults") @ExpectedResponses({200}) @UnexpectedResponseExceptionType(ManagementException.class) Mono<Response<BackupVaultResourceList>> listByResourceGroup( @HostParam("$host") String endpoint, @QueryParam("api-version") String apiVersion, @PathParam("resourceGroupName") String resourceGroupName, @PathParam("subscriptionId") String subscriptionId, @HeaderParam("Accept") String accept, Context context); @Headers({"Content-Type: application/json"}) @Get( "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataProtection" + "/backupVaults/{vaultName}") @ExpectedResponses({200}) @UnexpectedResponseExceptionType(ManagementException.class) Mono<Response<BackupVaultResourceInner>> getByResourceGroup( @HostParam("$host") String endpoint, @QueryParam("api-version") String apiVersion, @PathParam("resourceGroupName") String resourceGroupName, @PathParam("vaultName") String vaultName, @PathParam("subscriptionId") String subscriptionId, @HeaderParam("Accept") String accept, Context context); @Headers({"Content-Type: application/json"}) @Put( "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataProtection" + "/backupVaults/{vaultName}") @ExpectedResponses({200, 201}) @UnexpectedResponseExceptionType(ManagementException.class) Mono<Response<Flux<ByteBuffer>>> createOrUpdate( @HostParam("$host") String endpoint, @QueryParam("api-version") String apiVersion, @PathParam("vaultName") String vaultName, @PathParam("resourceGroupName") String resourceGroupName, @PathParam("subscriptionId") String subscriptionId, @BodyParam("application/json") BackupVaultResourceInner parameters, @HeaderParam("Accept") String accept, Context context); @Headers({"Content-Type: application/json"}) @Delete( "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataProtection" + "/backupVaults/{vaultName}") @ExpectedResponses({200, 202, 204}) @UnexpectedResponseExceptionType(ManagementException.class) Mono<Response<Void>> delete( @HostParam("$host") String endpoint, @QueryParam("api-version") String apiVersion, @PathParam("resourceGroupName") String resourceGroupName, @PathParam("vaultName") String vaultName, @PathParam("subscriptionId") String subscriptionId, @HeaderParam("Accept") String accept, Context context); @Headers({"Content-Type: application/json"}) @Patch( "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataProtection" + "/backupVaults/{vaultName}") @ExpectedResponses({200, 202}) @UnexpectedResponseExceptionType(ManagementException.class) Mono<Response<Flux<ByteBuffer>>> update( @HostParam("$host") String endpoint, @QueryParam("api-version") String apiVersion, @PathParam("vaultName") String vaultName, @PathParam("resourceGroupName") String resourceGroupName, @PathParam("subscriptionId") String subscriptionId, @BodyParam("application/json") PatchResourceRequestInput parameters, @HeaderParam("Accept") String accept, Context context); @Headers({"Content-Type: application/json"}) @Post( "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataProtection" + "/locations/{location}/checkNameAvailability") @ExpectedResponses({200}) @UnexpectedResponseExceptionType(ManagementException.class) Mono<Response<CheckNameAvailabilityResultInner>> checkNameAvailability( @HostParam("$host") String endpoint, @QueryParam("api-version") String apiVersion, @PathParam("resourceGroupName") String resourceGroupName, @PathParam("subscriptionId") String subscriptionId, @PathParam("location") String location, @BodyParam("application/json") CheckNameAvailabilityRequest parameters, @HeaderParam("Accept") String accept, Context context); @Headers({"Content-Type: application/json"}) @Get("{nextLink}") @ExpectedResponses({200}) @UnexpectedResponseExceptionType(ManagementException.class) Mono<Response<BackupVaultResourceList>> getInSubscriptionNext( @PathParam(value = "nextLink", encoded = true) String nextLink, @HostParam("$host") String endpoint, @HeaderParam("Accept") String accept, Context context); @Headers({"Content-Type: application/json"}) @Get("{nextLink}") @ExpectedResponses({200}) @UnexpectedResponseExceptionType(ManagementException.class) Mono<Response<BackupVaultResourceList>> getInResourceGroupNext( @PathParam(value = "nextLink", encoded = true) String nextLink, @HostParam("$host") String endpoint, @HeaderParam("Accept") String accept, Context context); } /** * Returns resource collection belonging to a subscription. * * @throws ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return backupVaultResourceList. */ @ServiceMethod(returns = ReturnType.SINGLE) private Mono<PagedResponse<BackupVaultResourceInner>> listSinglePageAsync() { if (this.client.getEndpoint() == null) { return Mono .error( new IllegalArgumentException( "Parameter this.client.getEndpoint() is required and cannot be null.")); } if (this.client.getSubscriptionId() == null) { return Mono .error( new IllegalArgumentException( "Parameter this.client.getSubscriptionId() is required and cannot be null.")); } final String accept = "application/json"; return FluxUtil .withContext( context -> service .list( this.client.getEndpoint(), this.client.getApiVersion(), this.client.getSubscriptionId(), accept, context)) .<PagedResponse<BackupVaultResourceInner>>map( res -> new PagedResponseBase<>( res.getRequest(), res.getStatusCode(), res.getHeaders(), res.getValue().value(), res.getValue().nextLink(), null)) .contextWrite(context -> context.putAll(FluxUtil.toReactorContext(this.client.getContext()).readOnly())); } /** * Returns resource collection belonging to a subscription. * * @param context The context to associate with this operation. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return backupVaultResourceList. */ @ServiceMethod(returns = ReturnType.SINGLE) private Mono<PagedResponse<BackupVaultResourceInner>> listSinglePageAsync(Context context) { if (this.client.getEndpoint() == null) { return Mono .error( new IllegalArgumentException( "Parameter this.client.getEndpoint() is required and cannot be null.")); } if (this.client.getSubscriptionId() == null) { return Mono .error( new IllegalArgumentException( "Parameter this.client.getSubscriptionId() is required and cannot be null.")); } final String accept = "application/json"; context = this.client.mergeContext(context); return service .list( this.client.getEndpoint(), this.client.getApiVersion(), this.client.getSubscriptionId(), accept, context) .map( res -> new PagedResponseBase<>( res.getRequest(), res.getStatusCode(), res.getHeaders(), res.getValue().value(), res.getValue().nextLink(), null)); } /** * Returns resource collection belonging to a subscription. * * @throws ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return backupVaultResourceList. */ @ServiceMethod(returns = ReturnType.COLLECTION) private PagedFlux<BackupVaultResourceInner> listAsync() { return new PagedFlux<>(() -> listSinglePageAsync(), nextLink -> getInSubscriptionNextSinglePageAsync(nextLink)); } /** * Returns resource collection belonging to a subscription. * * @param context The context to associate with this operation. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return backupVaultResourceList. */ @ServiceMethod(returns = ReturnType.COLLECTION) private PagedFlux<BackupVaultResourceInner> listAsync(Context context) { return new PagedFlux<>( () -> listSinglePageAsync(context), nextLink -> getInSubscriptionNextSinglePageAsync(nextLink, context)); } /** * Returns resource collection belonging to a subscription. * * @throws ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return backupVaultResourceList. */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedIterable<BackupVaultResourceInner> list() { return new PagedIterable<>(listAsync()); } /** * Returns resource collection belonging to a subscription. * * @param context The context to associate with this operation. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return backupVaultResourceList. */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedIterable<BackupVaultResourceInner> list(Context context) { return new PagedIterable<>(listAsync(context)); } /** * Returns resource collection belonging to a resource group. * * @param resourceGroupName The name of the resource group where the backup vault is present. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return backupVaultResourceList. */ @ServiceMethod(returns = ReturnType.SINGLE) private Mono<PagedResponse<BackupVaultResourceInner>> listByResourceGroupSinglePageAsync(String resourceGroupName) { if (this.client.getEndpoint() == null) { return Mono .error( new IllegalArgumentException( "Parameter this.client.getEndpoint() is required and cannot be null.")); } if (resourceGroupName == null) { return Mono .error(new IllegalArgumentException("Parameter resourceGroupName is required and cannot be null.")); } if (this.client.getSubscriptionId() == null) { return Mono .error( new IllegalArgumentException( "Parameter this.client.getSubscriptionId() is required and cannot be null.")); } final String accept = "application/json"; return FluxUtil .withContext( context -> service .listByResourceGroup( this.client.getEndpoint(), this.client.getApiVersion(), resourceGroupName, this.client.getSubscriptionId(), accept, context)) .<PagedResponse<BackupVaultResourceInner>>map( res -> new PagedResponseBase<>( res.getRequest(), res.getStatusCode(), res.getHeaders(), res.getValue().value(), res.getValue().nextLink(), null)) .contextWrite(context -> context.putAll(FluxUtil.toReactorContext(this.client.getContext()).readOnly())); } /** * Returns resource collection belonging to a resource group. * * @param resourceGroupName The name of the resource group where the backup vault is present. * @param context The context to associate with this operation. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return backupVaultResourceList. */ @ServiceMethod(returns = ReturnType.SINGLE) private Mono<PagedResponse<BackupVaultResourceInner>> listByResourceGroupSinglePageAsync( String resourceGroupName, Context context) { if (this.client.getEndpoint() == null) { return Mono .error( new IllegalArgumentException( "Parameter this.client.getEndpoint() is required and cannot be null.")); } if (resourceGroupName == null) { return Mono .error(new IllegalArgumentException("Parameter resourceGroupName is required and cannot be null.")); } if (this.client.getSubscriptionId() == null) { return Mono .error( new IllegalArgumentException( "Parameter this.client.getSubscriptionId() is required and cannot be null.")); } final String accept = "application/json"; context = this.client.mergeContext(context); return service .listByResourceGroup( this.client.getEndpoint(), this.client.getApiVersion(), resourceGroupName, this.client.getSubscriptionId(), accept, context) .map( res -> new PagedResponseBase<>( res.getRequest(), res.getStatusCode(), res.getHeaders(), res.getValue().value(), res.getValue().nextLink(), null)); } /** * Returns resource collection belonging to a resource group. * * @param resourceGroupName The name of the resource group where the backup vault is present. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return backupVaultResourceList. */ @ServiceMethod(returns = ReturnType.COLLECTION) private PagedFlux<BackupVaultResourceInner> listByResourceGroupAsync(String resourceGroupName) { return new PagedFlux<>( () -> listByResourceGroupSinglePageAsync(resourceGroupName), nextLink -> getInResourceGroupNextSinglePageAsync(nextLink)); } /** * Returns resource collection belonging to a resource group. * * @param resourceGroupName The name of the resource group where the backup vault is present. * @param context The context to associate with this operation. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return backupVaultResourceList. */ @ServiceMethod(returns = ReturnType.COLLECTION) private PagedFlux<BackupVaultResourceInner> listByResourceGroupAsync(String resourceGroupName, Context context) { return new PagedFlux<>( () -> listByResourceGroupSinglePageAsync(resourceGroupName, context), nextLink -> getInResourceGroupNextSinglePageAsync(nextLink, context)); } /** * Returns resource collection belonging to a resource group. * * @param resourceGroupName The name of the resource group where the backup vault is present. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return backupVaultResourceList. */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedIterable<BackupVaultResourceInner> listByResourceGroup(String resourceGroupName) { return new PagedIterable<>(listByResourceGroupAsync(resourceGroupName)); } /** * Returns resource collection belonging to a resource group. * * @param resourceGroupName The name of the resource group where the backup vault is present. * @param context The context to associate with this operation. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return backupVaultResourceList. */ @ServiceMethod(returns = ReturnType.COLLECTION) public PagedIterable<BackupVaultResourceInner> listByResourceGroup(String resourceGroupName, Context context) { return new PagedIterable<>(listByResourceGroupAsync(resourceGroupName, context)); } /** * Returns a resource belonging to a resource group. * * @param resourceGroupName The name of the resource group where the backup vault is present. * @param vaultName The name of the backup vault. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return backupVault Resource. */ @ServiceMethod(returns = ReturnType.SINGLE) private Mono<Response<BackupVaultResourceInner>> getByResourceGroupWithResponseAsync( String resourceGroupName, String vaultName) { if (this.client.getEndpoint() == null) { return Mono .error( new IllegalArgumentException( "Parameter this.client.getEndpoint() is required and cannot be null.")); } if (resourceGroupName == null) { return Mono .error(new IllegalArgumentException("Parameter resourceGroupName is required and cannot be null.")); } if (vaultName == null) { return Mono.error(new IllegalArgumentException("Parameter vaultName is required and cannot be null.")); } if (this.client.getSubscriptionId() == null) { return Mono .error( new IllegalArgumentException( "Parameter this.client.getSubscriptionId() is required and cannot be null.")); } final String accept = "application/json"; return FluxUtil .withContext( context -> service .getByResourceGroup( this.client.getEndpoint(), this.client.getApiVersion(), resourceGroupName, vaultName, this.client.getSubscriptionId(), accept, context)) .contextWrite(context -> context.putAll(FluxUtil.toReactorContext(this.client.getContext()).readOnly())); } /** * Returns a resource belonging to a resource group. * * @param resourceGroupName The name of the resource group where the backup vault is present. * @param vaultName The name of the backup vault. * @param context The context to associate with this operation. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return backupVault Resource. */ @ServiceMethod(returns = ReturnType.SINGLE) private Mono<Response<BackupVaultResourceInner>> getByResourceGroupWithResponseAsync( String resourceGroupName, String vaultName, Context context) { if (this.client.getEndpoint() == null) { return Mono .error( new IllegalArgumentException( "Parameter this.client.getEndpoint() is required and cannot be null.")); } if (resourceGroupName == null) { return Mono .error(new IllegalArgumentException("Parameter resourceGroupName is required and cannot be null.")); } if (vaultName == null) { return Mono.error(new IllegalArgumentException("Parameter vaultName is required and cannot be null.")); } if (this.client.getSubscriptionId() == null) { return Mono .error( new IllegalArgumentException( "Parameter this.client.getSubscriptionId() is required and cannot be null.")); } final String accept = "application/json"; context = this.client.mergeContext(context); return service .getByResourceGroup( this.client.getEndpoint(), this.client.getApiVersion(), resourceGroupName, vaultName, this.client.getSubscriptionId(), accept, context); } /** * Returns a resource belonging to a resource group. * * @param resourceGroupName The name of the resource group where the backup vault is present. * @param vaultName The name of the backup vault. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return backupVault Resource. */ @ServiceMethod(returns = ReturnType.SINGLE) private Mono<BackupVaultResourceInner> getByResourceGroupAsync(String resourceGroupName, String vaultName) { return getByResourceGroupWithResponseAsync(resourceGroupName, vaultName) .flatMap( (Response<BackupVaultResourceInner> res) -> { if (res.getValue() != null) { return Mono.just(res.getValue()); } else { return Mono.empty(); } }); } /** * Returns a resource belonging to a resource group. * * @param resourceGroupName The name of the resource group where the backup vault is present. * @param vaultName The name of the backup vault. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return backupVault Resource. */ @ServiceMethod(returns = ReturnType.SINGLE) public BackupVaultResourceInner getByResourceGroup(String resourceGroupName, String vaultName) { return getByResourceGroupAsync(resourceGroupName, vaultName).block(); } /** * Returns a resource belonging to a resource group. * * @param resourceGroupName The name of the resource group where the backup vault is present. * @param vaultName The name of the backup vault. * @param context The context to associate with this operation. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return backupVault Resource. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<BackupVaultResourceInner> getByResourceGroupWithResponse( String resourceGroupName, String vaultName, Context context) { return getByResourceGroupWithResponseAsync(resourceGroupName, vaultName, context).block(); } /** * Creates or updates a BackupVault resource belonging to a resource group. * * @param vaultName The name of the backup vault. * @param resourceGroupName The name of the resource group where the backup vault is present. * @param parameters Request body for operation. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return backupVault Resource. */ @ServiceMethod(returns = ReturnType.SINGLE) private Mono<Response<Flux<ByteBuffer>>> createOrUpdateWithResponseAsync( String vaultName, String resourceGroupName, BackupVaultResourceInner parameters) { if (this.client.getEndpoint() == null) { return Mono .error( new IllegalArgumentException( "Parameter this.client.getEndpoint() is required and cannot be null.")); } if (vaultName == null) { return Mono.error(new IllegalArgumentException("Parameter vaultName is required and cannot be null.")); } if (resourceGroupName == null) { return Mono .error(new IllegalArgumentException("Parameter resourceGroupName is required and cannot be null.")); } if (this.client.getSubscriptionId() == null) { return Mono .error( new IllegalArgumentException( "Parameter this.client.getSubscriptionId() is required and cannot be null.")); } if (parameters == null) { return Mono.error(new IllegalArgumentException("Parameter parameters is required and cannot be null.")); } else { parameters.validate(); } final String accept = "application/json"; return FluxUtil .withContext( context -> service .createOrUpdate( this.client.getEndpoint(), this.client.getApiVersion(), vaultName, resourceGroupName, this.client.getSubscriptionId(), parameters, accept, context)) .contextWrite(context -> context.putAll(FluxUtil.toReactorContext(this.client.getContext()).readOnly())); } /** * Creates or updates a BackupVault resource belonging to a resource group. * * @param vaultName The name of the backup vault. * @param resourceGroupName The name of the resource group where the backup vault is present. * @param parameters Request body for operation. * @param context The context to associate with this operation. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return backupVault Resource. */ @ServiceMethod(returns = ReturnType.SINGLE) private Mono<Response<Flux<ByteBuffer>>> createOrUpdateWithResponseAsync( String vaultName, String resourceGroupName, BackupVaultResourceInner parameters, Context context) { if (this.client.getEndpoint() == null) { return Mono .error( new IllegalArgumentException( "Parameter this.client.getEndpoint() is required and cannot be null.")); } if (vaultName == null) { return Mono.error(new IllegalArgumentException("Parameter vaultName is required and cannot be null.")); } if (resourceGroupName == null) { return Mono .error(new IllegalArgumentException("Parameter resourceGroupName is required and cannot be null.")); } if (this.client.getSubscriptionId() == null) { return Mono .error( new IllegalArgumentException( "Parameter this.client.getSubscriptionId() is required and cannot be null.")); } if (parameters == null) { return Mono.error(new IllegalArgumentException("Parameter parameters is required and cannot be null.")); } else { parameters.validate(); } final String accept = "application/json"; context = this.client.mergeContext(context); return service .createOrUpdate( this.client.getEndpoint(), this.client.getApiVersion(), vaultName, resourceGroupName, this.client.getSubscriptionId(), parameters, accept, context); } /** * Creates or updates a BackupVault resource belonging to a resource group. * * @param vaultName The name of the backup vault. * @param resourceGroupName The name of the resource group where the backup vault is present. * @param parameters Request body for operation. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return backupVault Resource. */ @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) private PollerFlux<PollResult<BackupVaultResourceInner>, BackupVaultResourceInner> beginCreateOrUpdateAsync( String vaultName, String resourceGroupName, BackupVaultResourceInner parameters) { Mono<Response<Flux<ByteBuffer>>> mono = createOrUpdateWithResponseAsync(vaultName, resourceGroupName, parameters); return this .client .<BackupVaultResourceInner, BackupVaultResourceInner>getLroResult( mono, this.client.getHttpPipeline(), BackupVaultResourceInner.class, BackupVaultResourceInner.class, Context.NONE); } /** * Creates or updates a BackupVault resource belonging to a resource group. * * @param vaultName The name of the backup vault. * @param resourceGroupName The name of the resource group where the backup vault is present. * @param parameters Request body for operation. * @param context The context to associate with this operation. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return backupVault Resource. */ @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) private PollerFlux<PollResult<BackupVaultResourceInner>, BackupVaultResourceInner> beginCreateOrUpdateAsync( String vaultName, String resourceGroupName, BackupVaultResourceInner parameters, Context context) { context = this.client.mergeContext(context); Mono<Response<Flux<ByteBuffer>>> mono = createOrUpdateWithResponseAsync(vaultName, resourceGroupName, parameters, context); return this .client .<BackupVaultResourceInner, BackupVaultResourceInner>getLroResult( mono, this.client.getHttpPipeline(), BackupVaultResourceInner.class, BackupVaultResourceInner.class, context); } /** * Creates or updates a BackupVault resource belonging to a resource group. * * @param vaultName The name of the backup vault. * @param resourceGroupName The name of the resource group where the backup vault is present. * @param parameters Request body for operation. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return backupVault Resource. */ @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) public SyncPoller<PollResult<BackupVaultResourceInner>, BackupVaultResourceInner> beginCreateOrUpdate( String vaultName, String resourceGroupName, BackupVaultResourceInner parameters) { return beginCreateOrUpdateAsync(vaultName, resourceGroupName, parameters).getSyncPoller(); } /** * Creates or updates a BackupVault resource belonging to a resource group. * * @param vaultName The name of the backup vault. * @param resourceGroupName The name of the resource group where the backup vault is present. * @param parameters Request body for operation. * @param context The context to associate with this operation. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return backupVault Resource. */ @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) public SyncPoller<PollResult<BackupVaultResourceInner>, BackupVaultResourceInner> beginCreateOrUpdate( String vaultName, String resourceGroupName, BackupVaultResourceInner parameters, Context context) { return beginCreateOrUpdateAsync(vaultName, resourceGroupName, parameters, context).getSyncPoller(); } /** * Creates or updates a BackupVault resource belonging to a resource group. * * @param vaultName The name of the backup vault. * @param resourceGroupName The name of the resource group where the backup vault is present. * @param parameters Request body for operation. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return backupVault Resource. */ @ServiceMethod(returns = ReturnType.SINGLE) private Mono<BackupVaultResourceInner> createOrUpdateAsync( String vaultName, String resourceGroupName, BackupVaultResourceInner parameters) { return beginCreateOrUpdateAsync(vaultName, resourceGroupName, parameters) .last() .flatMap(this.client::getLroFinalResultOrError); } /** * Creates or updates a BackupVault resource belonging to a resource group. * * @param vaultName The name of the backup vault. * @param resourceGroupName The name of the resource group where the backup vault is present. * @param parameters Request body for operation. * @param context The context to associate with this operation. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return backupVault Resource. */ @ServiceMethod(returns = ReturnType.SINGLE) private Mono<BackupVaultResourceInner> createOrUpdateAsync( String vaultName, String resourceGroupName, BackupVaultResourceInner parameters, Context context) { return beginCreateOrUpdateAsync(vaultName, resourceGroupName, parameters, context) .last() .flatMap(this.client::getLroFinalResultOrError); } /** * Creates or updates a BackupVault resource belonging to a resource group. * * @param vaultName The name of the backup vault. * @param resourceGroupName The name of the resource group where the backup vault is present. * @param parameters Request body for operation. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return backupVault Resource. */ @ServiceMethod(returns = ReturnType.SINGLE) public BackupVaultResourceInner createOrUpdate( String vaultName, String resourceGroupName, BackupVaultResourceInner parameters) { return createOrUpdateAsync(vaultName, resourceGroupName, parameters).block(); } /** * Creates or updates a BackupVault resource belonging to a resource group. * * @param vaultName The name of the backup vault. * @param resourceGroupName The name of the resource group where the backup vault is present. * @param parameters Request body for operation. * @param context The context to associate with this operation. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return backupVault Resource. */ @ServiceMethod(returns = ReturnType.SINGLE) public BackupVaultResourceInner createOrUpdate( String vaultName, String resourceGroupName, BackupVaultResourceInner parameters, Context context) { return createOrUpdateAsync(vaultName, resourceGroupName, parameters, context).block(); } /** * Deletes a BackupVault resource from the resource group. * * @param resourceGroupName The name of the resource group where the backup vault is present. * @param vaultName The name of the backup vault. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the completion. */ @ServiceMethod(returns = ReturnType.SINGLE) private Mono<Response<Void>> deleteWithResponseAsync(String resourceGroupName, String vaultName) { if (this.client.getEndpoint() == null) { return Mono .error( new IllegalArgumentException( "Parameter this.client.getEndpoint() is required and cannot be null.")); } if (resourceGroupName == null) { return Mono .error(new IllegalArgumentException("Parameter resourceGroupName is required and cannot be null.")); } if (vaultName == null) { return Mono.error(new IllegalArgumentException("Parameter vaultName is required and cannot be null.")); } if (this.client.getSubscriptionId() == null) { return Mono .error( new IllegalArgumentException( "Parameter this.client.getSubscriptionId() is required and cannot be null.")); } final String accept = "application/json"; return FluxUtil .withContext( context -> service .delete( this.client.getEndpoint(), this.client.getApiVersion(), resourceGroupName, vaultName, this.client.getSubscriptionId(), accept, context)) .contextWrite(context -> context.putAll(FluxUtil.toReactorContext(this.client.getContext()).readOnly())); } /** * Deletes a BackupVault resource from the resource group. * * @param resourceGroupName The name of the resource group where the backup vault is present. * @param vaultName The name of the backup vault. * @param context The context to associate with this operation. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the completion. */ @ServiceMethod(returns = ReturnType.SINGLE) private Mono<Response<Void>> deleteWithResponseAsync(String resourceGroupName, String vaultName, Context context) { if (this.client.getEndpoint() == null) { return Mono .error( new IllegalArgumentException( "Parameter this.client.getEndpoint() is required and cannot be null.")); } if (resourceGroupName == null) { return Mono .error(new IllegalArgumentException("Parameter resourceGroupName is required and cannot be null.")); } if (vaultName == null) { return Mono.error(new IllegalArgumentException("Parameter vaultName is required and cannot be null.")); } if (this.client.getSubscriptionId() == null) { return Mono .error( new IllegalArgumentException( "Parameter this.client.getSubscriptionId() is required and cannot be null.")); } final String accept = "application/json"; context = this.client.mergeContext(context); return service .delete( this.client.getEndpoint(), this.client.getApiVersion(), resourceGroupName, vaultName, this.client.getSubscriptionId(), accept, context); } /** * Deletes a BackupVault resource from the resource group. * * @param resourceGroupName The name of the resource group where the backup vault is present. * @param vaultName The name of the backup vault. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the completion. */ @ServiceMethod(returns = ReturnType.SINGLE) private Mono<Void> deleteAsync(String resourceGroupName, String vaultName) { return deleteWithResponseAsync(resourceGroupName, vaultName).flatMap((Response<Void> res) -> Mono.empty()); } /** * Deletes a BackupVault resource from the resource group. * * @param resourceGroupName The name of the resource group where the backup vault is present. * @param vaultName The name of the backup vault. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. */ @ServiceMethod(returns = ReturnType.SINGLE) public void delete(String resourceGroupName, String vaultName) { deleteAsync(resourceGroupName, vaultName).block(); } /** * Deletes a BackupVault resource from the resource group. * * @param resourceGroupName The name of the resource group where the backup vault is present. * @param vaultName The name of the backup vault. * @param context The context to associate with this operation. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return the response. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<Void> deleteWithResponse(String resourceGroupName, String vaultName, Context context) { return deleteWithResponseAsync(resourceGroupName, vaultName, context).block(); } /** * Updates a BackupVault resource belonging to a resource group. For example, updating tags for a resource. * * @param vaultName The name of the backup vault. * @param resourceGroupName The name of the resource group where the backup vault is present. * @param parameters Request body for operation. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return backupVault Resource. */ @ServiceMethod(returns = ReturnType.SINGLE) private Mono<Response<Flux<ByteBuffer>>> updateWithResponseAsync( String vaultName, String resourceGroupName, PatchResourceRequestInput parameters) { if (this.client.getEndpoint() == null) { return Mono .error( new IllegalArgumentException( "Parameter this.client.getEndpoint() is required and cannot be null.")); } if (vaultName == null) { return Mono.error(new IllegalArgumentException("Parameter vaultName is required and cannot be null.")); } if (resourceGroupName == null) { return Mono .error(new IllegalArgumentException("Parameter resourceGroupName is required and cannot be null.")); } if (this.client.getSubscriptionId() == null) { return Mono .error( new IllegalArgumentException( "Parameter this.client.getSubscriptionId() is required and cannot be null.")); } if (parameters == null) { return Mono.error(new IllegalArgumentException("Parameter parameters is required and cannot be null.")); } else { parameters.validate(); } final String accept = "application/json"; return FluxUtil .withContext( context -> service .update( this.client.getEndpoint(), this.client.getApiVersion(), vaultName, resourceGroupName, this.client.getSubscriptionId(), parameters, accept, context)) .contextWrite(context -> context.putAll(FluxUtil.toReactorContext(this.client.getContext()).readOnly())); } /** * Updates a BackupVault resource belonging to a resource group. For example, updating tags for a resource. * * @param vaultName The name of the backup vault. * @param resourceGroupName The name of the resource group where the backup vault is present. * @param parameters Request body for operation. * @param context The context to associate with this operation. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return backupVault Resource. */ @ServiceMethod(returns = ReturnType.SINGLE) private Mono<Response<Flux<ByteBuffer>>> updateWithResponseAsync( String vaultName, String resourceGroupName, PatchResourceRequestInput parameters, Context context) { if (this.client.getEndpoint() == null) { return Mono .error( new IllegalArgumentException( "Parameter this.client.getEndpoint() is required and cannot be null.")); } if (vaultName == null) { return Mono.error(new IllegalArgumentException("Parameter vaultName is required and cannot be null.")); } if (resourceGroupName == null) { return Mono .error(new IllegalArgumentException("Parameter resourceGroupName is required and cannot be null.")); } if (this.client.getSubscriptionId() == null) { return Mono .error( new IllegalArgumentException( "Parameter this.client.getSubscriptionId() is required and cannot be null.")); } if (parameters == null) { return Mono.error(new IllegalArgumentException("Parameter parameters is required and cannot be null.")); } else { parameters.validate(); } final String accept = "application/json"; context = this.client.mergeContext(context); return service .update( this.client.getEndpoint(), this.client.getApiVersion(), vaultName, resourceGroupName, this.client.getSubscriptionId(), parameters, accept, context); } /** * Updates a BackupVault resource belonging to a resource group. For example, updating tags for a resource. * * @param vaultName The name of the backup vault. * @param resourceGroupName The name of the resource group where the backup vault is present. * @param parameters Request body for operation. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return backupVault Resource. */ @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) private PollerFlux<PollResult<BackupVaultResourceInner>, BackupVaultResourceInner> beginUpdateAsync( String vaultName, String resourceGroupName, PatchResourceRequestInput parameters) { Mono<Response<Flux<ByteBuffer>>> mono = updateWithResponseAsync(vaultName, resourceGroupName, parameters); return this .client .<BackupVaultResourceInner, BackupVaultResourceInner>getLroResult( mono, this.client.getHttpPipeline(), BackupVaultResourceInner.class, BackupVaultResourceInner.class, Context.NONE); } /** * Updates a BackupVault resource belonging to a resource group. For example, updating tags for a resource. * * @param vaultName The name of the backup vault. * @param resourceGroupName The name of the resource group where the backup vault is present. * @param parameters Request body for operation. * @param context The context to associate with this operation. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return backupVault Resource. */ @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) private PollerFlux<PollResult<BackupVaultResourceInner>, BackupVaultResourceInner> beginUpdateAsync( String vaultName, String resourceGroupName, PatchResourceRequestInput parameters, Context context) { context = this.client.mergeContext(context); Mono<Response<Flux<ByteBuffer>>> mono = updateWithResponseAsync(vaultName, resourceGroupName, parameters, context); return this .client .<BackupVaultResourceInner, BackupVaultResourceInner>getLroResult( mono, this.client.getHttpPipeline(), BackupVaultResourceInner.class, BackupVaultResourceInner.class, context); } /** * Updates a BackupVault resource belonging to a resource group. For example, updating tags for a resource. * * @param vaultName The name of the backup vault. * @param resourceGroupName The name of the resource group where the backup vault is present. * @param parameters Request body for operation. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return backupVault Resource. */ @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) public SyncPoller<PollResult<BackupVaultResourceInner>, BackupVaultResourceInner> beginUpdate( String vaultName, String resourceGroupName, PatchResourceRequestInput parameters) { return beginUpdateAsync(vaultName, resourceGroupName, parameters).getSyncPoller(); } /** * Updates a BackupVault resource belonging to a resource group. For example, updating tags for a resource. * * @param vaultName The name of the backup vault. * @param resourceGroupName The name of the resource group where the backup vault is present. * @param parameters Request body for operation. * @param context The context to associate with this operation. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return backupVault Resource. */ @ServiceMethod(returns = ReturnType.LONG_RUNNING_OPERATION) public SyncPoller<PollResult<BackupVaultResourceInner>, BackupVaultResourceInner> beginUpdate( String vaultName, String resourceGroupName, PatchResourceRequestInput parameters, Context context) { return beginUpdateAsync(vaultName, resourceGroupName, parameters, context).getSyncPoller(); } /** * Updates a BackupVault resource belonging to a resource group. For example, updating tags for a resource. * * @param vaultName The name of the backup vault. * @param resourceGroupName The name of the resource group where the backup vault is present. * @param parameters Request body for operation. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return backupVault Resource. */ @ServiceMethod(returns = ReturnType.SINGLE) private Mono<BackupVaultResourceInner> updateAsync( String vaultName, String resourceGroupName, PatchResourceRequestInput parameters) { return beginUpdateAsync(vaultName, resourceGroupName, parameters) .last() .flatMap(this.client::getLroFinalResultOrError); } /** * Updates a BackupVault resource belonging to a resource group. For example, updating tags for a resource. * * @param vaultName The name of the backup vault. * @param resourceGroupName The name of the resource group where the backup vault is present. * @param parameters Request body for operation. * @param context The context to associate with this operation. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return backupVault Resource. */ @ServiceMethod(returns = ReturnType.SINGLE) private Mono<BackupVaultResourceInner> updateAsync( String vaultName, String resourceGroupName, PatchResourceRequestInput parameters, Context context) { return beginUpdateAsync(vaultName, resourceGroupName, parameters, context) .last() .flatMap(this.client::getLroFinalResultOrError); } /** * Updates a BackupVault resource belonging to a resource group. For example, updating tags for a resource. * * @param vaultName The name of the backup vault. * @param resourceGroupName The name of the resource group where the backup vault is present. * @param parameters Request body for operation. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return backupVault Resource. */ @ServiceMethod(returns = ReturnType.SINGLE) public BackupVaultResourceInner update( String vaultName, String resourceGroupName, PatchResourceRequestInput parameters) { return updateAsync(vaultName, resourceGroupName, parameters).block(); } /** * Updates a BackupVault resource belonging to a resource group. For example, updating tags for a resource. * * @param vaultName The name of the backup vault. * @param resourceGroupName The name of the resource group where the backup vault is present. * @param parameters Request body for operation. * @param context The context to associate with this operation. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return backupVault Resource. */ @ServiceMethod(returns = ReturnType.SINGLE) public BackupVaultResourceInner update( String vaultName, String resourceGroupName, PatchResourceRequestInput parameters, Context context) { return updateAsync(vaultName, resourceGroupName, parameters, context).block(); } /** * API to check for resource name availability. * * @param resourceGroupName The name of the resource group where the backup vault is present. * @param location The location in which uniqueness will be verified. * @param parameters Check name availability request. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return checkNameAvailabilityResult. */ @ServiceMethod(returns = ReturnType.SINGLE) private Mono<Response<CheckNameAvailabilityResultInner>> checkNameAvailabilityWithResponseAsync( String resourceGroupName, String location, CheckNameAvailabilityRequest parameters) { if (this.client.getEndpoint() == null) { return Mono .error( new IllegalArgumentException( "Parameter this.client.getEndpoint() is required and cannot be null.")); } if (resourceGroupName == null) { return Mono .error(new IllegalArgumentException("Parameter resourceGroupName is required and cannot be null.")); } if (this.client.getSubscriptionId() == null) { return Mono .error( new IllegalArgumentException( "Parameter this.client.getSubscriptionId() is required and cannot be null.")); } if (location == null) { return Mono.error(new IllegalArgumentException("Parameter location is required and cannot be null.")); } if (parameters == null) { return Mono.error(new IllegalArgumentException("Parameter parameters is required and cannot be null.")); } else { parameters.validate(); } final String accept = "application/json"; return FluxUtil .withContext( context -> service .checkNameAvailability( this.client.getEndpoint(), this.client.getApiVersion(), resourceGroupName, this.client.getSubscriptionId(), location, parameters, accept, context)) .contextWrite(context -> context.putAll(FluxUtil.toReactorContext(this.client.getContext()).readOnly())); } /** * API to check for resource name availability. * * @param resourceGroupName The name of the resource group where the backup vault is present. * @param location The location in which uniqueness will be verified. * @param parameters Check name availability request. * @param context The context to associate with this operation. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return checkNameAvailabilityResult. */ @ServiceMethod(returns = ReturnType.SINGLE) private Mono<Response<CheckNameAvailabilityResultInner>> checkNameAvailabilityWithResponseAsync( String resourceGroupName, String location, CheckNameAvailabilityRequest parameters, Context context) { if (this.client.getEndpoint() == null) { return Mono .error( new IllegalArgumentException( "Parameter this.client.getEndpoint() is required and cannot be null.")); } if (resourceGroupName == null) { return Mono .error(new IllegalArgumentException("Parameter resourceGroupName is required and cannot be null.")); } if (this.client.getSubscriptionId() == null) { return Mono .error( new IllegalArgumentException( "Parameter this.client.getSubscriptionId() is required and cannot be null.")); } if (location == null) { return Mono.error(new IllegalArgumentException("Parameter location is required and cannot be null.")); } if (parameters == null) { return Mono.error(new IllegalArgumentException("Parameter parameters is required and cannot be null.")); } else { parameters.validate(); } final String accept = "application/json"; context = this.client.mergeContext(context); return service .checkNameAvailability( this.client.getEndpoint(), this.client.getApiVersion(), resourceGroupName, this.client.getSubscriptionId(), location, parameters, accept, context); } /** * API to check for resource name availability. * * @param resourceGroupName The name of the resource group where the backup vault is present. * @param location The location in which uniqueness will be verified. * @param parameters Check name availability request. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return checkNameAvailabilityResult. */ @ServiceMethod(returns = ReturnType.SINGLE) private Mono<CheckNameAvailabilityResultInner> checkNameAvailabilityAsync( String resourceGroupName, String location, CheckNameAvailabilityRequest parameters) { return checkNameAvailabilityWithResponseAsync(resourceGroupName, location, parameters) .flatMap( (Response<CheckNameAvailabilityResultInner> res) -> { if (res.getValue() != null) { return Mono.just(res.getValue()); } else { return Mono.empty(); } }); } /** * API to check for resource name availability. * * @param resourceGroupName The name of the resource group where the backup vault is present. * @param location The location in which uniqueness will be verified. * @param parameters Check name availability request. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return checkNameAvailabilityResult. */ @ServiceMethod(returns = ReturnType.SINGLE) public CheckNameAvailabilityResultInner checkNameAvailability( String resourceGroupName, String location, CheckNameAvailabilityRequest parameters) { return checkNameAvailabilityAsync(resourceGroupName, location, parameters).block(); } /** * API to check for resource name availability. * * @param resourceGroupName The name of the resource group where the backup vault is present. * @param location The location in which uniqueness will be verified. * @param parameters Check name availability request. * @param context The context to associate with this operation. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return checkNameAvailabilityResult. */ @ServiceMethod(returns = ReturnType.SINGLE) public Response<CheckNameAvailabilityResultInner> checkNameAvailabilityWithResponse( String resourceGroupName, String location, CheckNameAvailabilityRequest parameters, Context context) { return checkNameAvailabilityWithResponseAsync(resourceGroupName, location, parameters, context).block(); } /** * Get the next page of items. * * @param nextLink The nextLink parameter. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return backupVaultResourceList. */ @ServiceMethod(returns = ReturnType.SINGLE) private Mono<PagedResponse<BackupVaultResourceInner>> getInSubscriptionNextSinglePageAsync(String nextLink) { if (nextLink == null) { return Mono.error(new IllegalArgumentException("Parameter nextLink is required and cannot be null.")); } if (this.client.getEndpoint() == null) { return Mono .error( new IllegalArgumentException( "Parameter this.client.getEndpoint() is required and cannot be null.")); } final String accept = "application/json"; return FluxUtil .withContext(context -> service.getInSubscriptionNext(nextLink, this.client.getEndpoint(), accept, context)) .<PagedResponse<BackupVaultResourceInner>>map( res -> new PagedResponseBase<>( res.getRequest(), res.getStatusCode(), res.getHeaders(), res.getValue().value(), res.getValue().nextLink(), null)) .contextWrite(context -> context.putAll(FluxUtil.toReactorContext(this.client.getContext()).readOnly())); } /** * Get the next page of items. * * @param nextLink The nextLink parameter. * @param context The context to associate with this operation. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return backupVaultResourceList. */ @ServiceMethod(returns = ReturnType.SINGLE) private Mono<PagedResponse<BackupVaultResourceInner>> getInSubscriptionNextSinglePageAsync( String nextLink, Context context) { if (nextLink == null) { return Mono.error(new IllegalArgumentException("Parameter nextLink is required and cannot be null.")); } if (this.client.getEndpoint() == null) { return Mono .error( new IllegalArgumentException( "Parameter this.client.getEndpoint() is required and cannot be null.")); } final String accept = "application/json"; context = this.client.mergeContext(context); return service .getInSubscriptionNext(nextLink, this.client.getEndpoint(), accept, context) .map( res -> new PagedResponseBase<>( res.getRequest(), res.getStatusCode(), res.getHeaders(), res.getValue().value(), res.getValue().nextLink(), null)); } /** * Get the next page of items. * * @param nextLink The nextLink parameter. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return backupVaultResourceList. */ @ServiceMethod(returns = ReturnType.SINGLE) private Mono<PagedResponse<BackupVaultResourceInner>> getInResourceGroupNextSinglePageAsync(String nextLink) { if (nextLink == null) { return Mono.error(new IllegalArgumentException("Parameter nextLink is required and cannot be null.")); } if (this.client.getEndpoint() == null) { return Mono .error( new IllegalArgumentException( "Parameter this.client.getEndpoint() is required and cannot be null.")); } final String accept = "application/json"; return FluxUtil .withContext( context -> service.getInResourceGroupNext(nextLink, this.client.getEndpoint(), accept, context)) .<PagedResponse<BackupVaultResourceInner>>map( res -> new PagedResponseBase<>( res.getRequest(), res.getStatusCode(), res.getHeaders(), res.getValue().value(), res.getValue().nextLink(), null)) .contextWrite(context -> context.putAll(FluxUtil.toReactorContext(this.client.getContext()).readOnly())); } /** * Get the next page of items. * * @param nextLink The nextLink parameter. * @param context The context to associate with this operation. * @throws IllegalArgumentException thrown if parameters fail the validation. * @throws ManagementException thrown if the request is rejected by server. * @throws RuntimeException all other wrapped checked exceptions if the request fails to be sent. * @return backupVaultResourceList. */ @ServiceMethod(returns = ReturnType.SINGLE) private Mono<PagedResponse<BackupVaultResourceInner>> getInResourceGroupNextSinglePageAsync( String nextLink, Context context) { if (nextLink == null) { return Mono.error(new IllegalArgumentException("Parameter nextLink is required and cannot be null.")); } if (this.client.getEndpoint() == null) { return Mono .error( new IllegalArgumentException( "Parameter this.client.getEndpoint() is required and cannot be null.")); } final String accept = "application/json"; context = this.client.mergeContext(context); return service .getInResourceGroupNext(nextLink, this.client.getEndpoint(), accept, context) .map( res -> new PagedResponseBase<>( res.getRequest(), res.getStatusCode(), res.getHeaders(), res.getValue().value(), res.getValue().nextLink(), null)); } }
30,433
333
__all__ = ['statannot'] from .statannot import add_stat_annotation from .statannot import stat_test from ._version import __version__
39