max_stars_count
int64
301
224k
text
stringlengths
6
1.05M
token_count
int64
3
727k
480
/* * Copyright [2013-2021], Alibaba Group Holding Limited * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.alibaba.polardbx.executor.balancer; import com.alibaba.polardbx.common.utils.TStringUtil; import org.apache.calcite.sql.SqlRebalance; /** * Options for balance * * @author moyi * @since 2021/04 */ public class BalanceOptions { public static long DEFAULT_MAX_PARTITION_SIZE = 512 << 20; public static final int MAX_PARTITION_COUNT = 8192; public static final int DEFAULT_MAX_ACTION = 50; public static final int DEFAULT_MIN_PARTITIONS = 4; public static final int SPLIT_PARTITION_MIN_COUNT = 2; public static final int SPLIT_PARTITION_MAX_COUNT = 16; /** * Whether this is just an explain */ public boolean explain = false; /** * Whether manually or automatic rebalance */ public boolean manually = false; /** * Max actions to perform in a job */ public int maxActions; /** * The policy to executed */ public String policy; /** * The threshold of a split-partition policy. */ public long maxPartitionSize; /** * Options of drain-node */ public String drainNode; /** * Debug mode, currently for MOVE DATABASE action */ public boolean debug = false; /** * Async or sync */ public boolean async = false; /** * Disk info about data-node */ public String diskInfo; private BalanceOptions() { } /** * Builders */ public static BalanceOptions withDefault() { BalanceOptions res = new BalanceOptions(); res.maxPartitionSize = DEFAULT_MAX_PARTITION_SIZE; res.maxActions = DEFAULT_MAX_ACTION; return res; } public static BalanceOptions withBackground() { BalanceOptions result = withDefault(); result.maxActions = 1; return result; } public static BalanceOptions fromSqlNode(SqlRebalance sqlNode) { BalanceOptions res = withDefault(); res.manually = true; res.policy = sqlNode.getPolicy(); res.explain = sqlNode.isExplain(); res.debug = sqlNode.isDebug(); res.async = sqlNode.isAsync(); res.diskInfo = sqlNode.getDiskInfo(); if (sqlNode.getMaxPartitionSize() != 0) { res.maxPartitionSize = sqlNode.getMaxPartitionSize(); } if (sqlNode.getMaxActions() != 0) { res.maxActions = sqlNode.getMaxActions(); } if (!TStringUtil.isBlank(sqlNode.getDrainNode())) { res.drainNode = sqlNode.getDrainNode(); } return res; } public BalanceOptions withDrainNode(String drainNode) { this.drainNode = drainNode; return this; } public BalanceOptions withDiskInfo(String diskInfo) { this.diskInfo = diskInfo; return this; } public static void setMaxPartitionSize(long value) { DEFAULT_MAX_PARTITION_SIZE = value; } public boolean isDrainNode() { return TStringUtil.isNotBlank(this.drainNode); } /** * Calculate split-count based on current number of partitions and partition size. * * @param partitionNum number of partitions in the group * @param partitionSize size of current partition */ public long estimateSplitCount(int partitionNum, long partitionSize) { long maxPartitionSize = estimateSplitPartitionSize(partitionNum); int numPartitions = (int) (partitionSize / maxPartitionSize); numPartitions = Math.max(SPLIT_PARTITION_MIN_COUNT, numPartitions); numPartitions = Math.min(SPLIT_PARTITION_MAX_COUNT, numPartitions); return numPartitions; } /** * Calculate split-size based on current number of partitions * * @param partitionNum number of partitions in the group */ public long estimateSplitPartitionSize(int partitionNum) { long maxPartitionSize = this.maxPartitionSize; if (partitionNum <= 2) { maxPartitionSize *= 0.2; } else if (partitionNum <= 8) { maxPartitionSize *= 0.4; } else if (partitionNum <= 16) { maxPartitionSize *= 0.75; } maxPartitionSize = Math.max(1, maxPartitionSize); return maxPartitionSize; } @Override public String toString() { return "BalanceOptions{" + "explain=" + explain + ", maxActions=" + maxActions + ", policy='" + policy + '\'' + ", maxPartitionSize=" + maxPartitionSize + ", drainNode='" + drainNode + '\'' + ", debug=" + debug + ", async=" + async + ", diskInfo='" + diskInfo + '\'' + '}'; } }
2,082
1,672
import os, sys, argparse from os import listdir from os.path import isfile, join from os import walk from dd_client import DD from annoy import AnnoyIndex import shelve import cv2 parser = argparse.ArgumentParser() parser.add_argument("--index",help="repository of images to be indexed") parser.add_argument("--index-batch-size",type=int,help="size of image batch when indexing",default=1) parser.add_argument("--search",help="image input file for similarity search") parser.add_argument("--search-size",help="number of nearest neighbors",type=int,default=10) parser.add_argument("--confidence-threshold",help="confidence threshold on bounding boxes",type=float,default=0.01) parser.add_argument("--nclasses",help="number of classes in the model",type=int,default=21) parser.add_argument("--model-dir",help="model directory",default="model") args = parser.parse_args() def batch(iterable, n=1): l = len(iterable) for ndx in range(0, l, n): yield iterable[ndx:min(ndx + n, l)] def image_resize(imgfile,width): imgquery = cv2.imread(imgfile) r = width / imgquery.shape[1] dim = (int(width), int(imgquery.shape[0] * r)) small = cv2.resize(imgquery,dim) return small host = 'localhost' port = 8200 sname = 'imageserv' description = 'image classification' mllib = 'caffe' mltype = 'supervised' extract_layer = 'rois' nclasses = args.nclasses width = height = 300 dd = DD(host,port) dd.set_return_format(dd.RETURN_PYTHON) ntrees = 1000 metric = 'angular' # or 'euclidean' # creating ML service model_repo = os.getcwd() + '/' + args.model_dir model = {'repository':model_repo,'templates':'../templates/caffe/'} parameters_input = {'connector':'image','width':width,'height':height} parameters_mllib = {'nclasses':nclasses} parameters_output = {} try: dd.put_service(sname,model,description,mllib, parameters_input,parameters_mllib,parameters_output,mltype) except: pass # reset call params parameters_input = {} parameters_mllib = {'gpu':True} parameters_output = {'rois':'rois','confidence_threshold':args.confidence_threshold,'best':1} if args.index: parameters_output['index'] = True # list files in image repository c = 0 d = 0 onlyfiles = [] for (dirpath, dirnames, filenames) in walk(args.index): nfilenames = [] for f in filenames: nfilenames.append(dirpath + '/' + f) onlyfiles.extend(nfilenames) for x in batch(onlyfiles,args.index_batch_size): classif = dd.post_predict(sname,x,parameters_input,parameters_mllib,parameters_output) for p in classif['body']['predictions']: c = c + 1 uri = p['uri'] rois = p['rois'] sys.stdout.write('\rIndexing image '+str(c)+'/'+str(len(onlyfiles)) + ' : ' + str(len(rois)) + ' rois total:' + str(d) + ' ') sys.stdout.flush() d += len(rois) if c >= 100: break # one last dumb predict call to build the index print 'building index...\n' parameters_output['index'] = False parameters_output['build_index']=True classif = dd.post_predict(sname,[nfilenames[0]],parameters_input,parameters_mllib,parameters_output) if args.search: parameters_output['search'] = True parameters_output['search_nn'] = args.search_size data = [args.search] classif = dd.post_predict(sname,data,parameters_input,parameters_mllib,parameters_output) # search for every roi res = classif['body']['predictions'][0]['rois'] print('number of ROI in query: ' + str(len(res))) for roi in res: # near = u.get_nns_by_vector(roi['vals'],args.search_size,include_distances=True) near = roi['nns'] print(near) # print query bbox img = cv2.imread(args.search) bbox = roi['bbox'] cat = roi['cat'] cv2.rectangle(img, (int(bbox['xmin']),int(bbox['ymax'])),(int(bbox['xmax']),int(bbox['ymin'])),(255,0,0),2) cv2.putText(img,cat,(int(bbox['xmin']),int(bbox['ymax'])),cv2.FONT_HERSHEY_PLAIN,1,255) cv2.imshow('query',img) cv2.waitKey(0) for n in near: resimg = cv2.imread(n['uri']) bbox = n['bbox'] cat = n['cat'] cv2.rectangle(resimg, (int(bbox['xmin']),int(bbox['ymax'])),(int(bbox['xmax']),int(bbox['ymin'])),(255,0,0),2) cv2.putText(resimg,cat,(int(bbox['xmin']),int(bbox['ymax'])),cv2.FONT_HERSHEY_PLAIN,1,255) cv2.imshow('res',resimg) cv2.waitKey(0) dd.delete_service(sname,clear='')
2,001
1,025
//================================================================================== // Copyright (c) 2016 , Advanced Micro Devices, Inc. All rights reserved. // /// \author AMD Developer Tools Team /// \file afHTMLUtils.h /// //================================================================================== #ifndef __AFHTMLUTILS_H #define __AFHTMLUTILS_H class apDebugProjectSettings; class apDebuggedProcessRunStartedEvent; class apDebuggedProcessTerminatedEvent; class apDebuggedProcessCreatedEvent; // Local: #include <AMDTApplicationFramework/Include/afApplicationFrameworkDLLBuild.h> /// ----------------------------------------------------------------------------------------------- /// \class Name: AF_API afHTMLUtils /// \brief Description: Used for creation of HTML properties strings /// ----------------------------------------------------------------------------------------------- class AF_API afHTMLUtils { public: afHTMLUtils(); void buildProcessRunStartedEventPropertiesString(const apDebugProjectSettings& processStartedData , const apDebuggedProcessRunStartedEvent& processRunStartedEvent, gtString& propertiesHTMLMessage); void buildProcessTerminationEventPropertiesString(const apDebugProjectSettings& processStartedData, const apDebuggedProcessTerminatedEvent& processTerminationEvent, gtString& propertiesHTMLMessage); void buildProcessCreatedEventPropertiesString(const apDebugProjectSettings& processCreationData, const apDebuggedProcessCreatedEvent& processCreatedEvent, gtString& propertiesHTMLMessage); }; #endif //__AFHTMLUTILS_H
359
2,661
<reponame>ezeeyahoo/earthenterprise<filename>earth_enterprise/src/fusion/gst/vectorquery/QuadSelector.cpp // Copyright 2017 Google Inc. // Copyright 2020 The Open GEE Contributors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "QuadSelector.h" #include <khProgressMeter.h> #include <notify.h> namespace vectorquery { // **************************************************************************** // *** QuadSelectorBase // **************************************************************************** QuadSelectorBase::QuadSelectorBase(khProgressMeter *progress, std::vector<FilterGeoIndex> &filters, std::vector<bool> &use_set, const khLevelCoverage &cov) : progress_(progress), num_filters_(filters.size()), filters_(filters), use_set_(use_set), cleared_set_(num_filters_, false), something_cleared_(false), something_left_(false), cov_(cov) { notify(NFY_DEBUG, "Selector lev:%u, row:%u->%u, col:%u->%u, tiles:%llu", cov_.level, cov_.extents.beginRow(), cov_.extents.endRow(), cov_.extents.beginCol(), cov_.extents.endCol(), static_cast<long long unsigned>(cov_.NumTiles())); } void QuadSelectorBase::RestoreCleared(void) { for (unsigned int i = 0; i < num_filters_; ++i) { if (cleared_set_[i]) { cleared_set_[i] = false; use_set_[i] = true; } } something_cleared_ = false; } // **************************************************************************** // *** MinifiedQuadSelector // **************************************************************************** void MinifiedQuadSelector::SelectQuad(std::uint32_t row, std::uint32_t col) { assert(Contains(row, col)); TryToClearSets(row, col); if (!something_left_) { // The index tells us we have nothing to contribute. But we have counted // the tiles below this as work that needs to be done. Calculate how // many tiles we're going to skip and mark them as skipped. #if 0 if ((cov_.level == 5) && (row == 20) && (col == 16)) { notify(NFY_WARN, "========== (lrc) 5,20,16: Nothing Left"); } #endif // figure out this tile's coverage at the target level khLevelCoverage this_target_cov = khTileAddr(cov_.level, row, col).MagnifiedToLevel(target_cov_.level); // interset that with the targetCoverage (what we really want to do) khExtents<std::uint32_t> skip_extents = khExtents<std::uint32_t>::Intersection(this_target_cov.extents, target_cov_.extents); // now we know how many we're going to skip std::uint64_t numSkipped = std::uint64_t(skip_extents.numRows()) * std::uint64_t(skip_extents.numCols()); progress_->incrementDone(numSkipped); #if 0 notify(NFY_VERBOSE, "MinifiedSelector pruning (lrc) %u,%u,%u", cov_.level, row, col); #endif } else { #if 0 if ((cov_.level == 5) && (row == 20) && (col == 16)) { notify(NFY_WARN, "========== (lrc) 5,20,16: Calling TryToSplitSets"); } #endif TryToSplitSets(row, col); // for each of the four source tiles / quadrants for(unsigned int quad = 0; quad < 4; ++quad) { // magnify the dest row/col/quad to get row/col for the next level std::uint32_t nextRow = 0; std::uint32_t nextCol = 0; QuadtreePath::MagnifyQuadAddr(row, col, quad, nextRow, nextCol); // check if quad exists the next row down if (next_->Contains(nextRow, nextCol)) { next_->SelectQuad(nextRow, nextCol); #if 0 } else if ((cov_.level+1 == 5) && (nextRow == 20) && (nextCol == 16)) { notify(NFY_WARN, "========== (lrc) 5,20,16: Not in coverage"); #endif } } if (something_split_) { RestoreSplit(); } something_left_ = false; } if (something_cleared_) { RestoreCleared(); } } MinifiedQuadSelector::MinifiedQuadSelector(QuadSelectorBase *next, const khLevelCoverage &cov, const khLevelCoverage &target_cov) : QuadSelectorBase(next->progress_, next->filters_, next->use_set_, cov), split_set_(num_filters_), something_split_(false), target_cov_(target_cov), next_(next) { } void MinifiedQuadSelector::SplitSet(unsigned int i, std::uint32_t row, std::uint32_t col) { something_split_ = true; split_set_[i] = filters_[i].geo_index_; filters_[i].geo_index_ = filters_[i].geo_index_->SplitCell(row, col, target_cov_); } void MinifiedQuadSelector::RestoreSplit(void) { for (unsigned int i = 0; i < num_filters_; ++i) { if (split_set_[i]) { // restore the one I split filters_[i].geo_index_ = split_set_[i]; // clear my copy so I don't restore it next time split_set_[i] = gstGeoIndexHandle(); } } something_split_ = false; } // **************************************************************************** // *** FullResQuadSelector // **************************************************************************** void FullResQuadSelector::SelectQuad(std::uint32_t row, std::uint32_t col) { assert(Contains(row, col)); TryToClearSets(row, col); if (!something_left_) { progress_->incrementDone(1); #if 0 notify(NFY_VERBOSE, "FullResSelector pruning (lrc) %u,%u,%u", cov_.level, row, col); #endif } else { QuadtreePath qpath(cov_.level, row, col); // get an empty output tile that we can reuse for this one WorkflowOutputTile *out_tile = manager_.GetEmptyOutputTile(); out_tile->Reset(); out_tile->path = qpath; // populate this output tile std::uint32_t numids = 0; for (unsigned int i = 0; i < num_filters_; ++i) { if (use_set_[i]) { FilterGeoIndex &filter = filters_[i]; khDeleteGuard<gstGeoIndexImpl::FeatureBucket> tmp_bucket; if (cov_.level == filter.geo_index_->GetCoverage().level) { // We are selecting from a single cell from the index filter.geo_index_->GetFeatureIdsFromBucket( row, col, out_tile->displayRules[i].selectedIds); } else if (cov_.level < filter.geo_index_->GetCoverage().level) { // this index is deeper than we are exporting, we need to merge // from several FeatureBuckets // NOTE: This should be very rare khLevelCoverage export_cov = khTileAddr(cov_.level, row, col). MagnifiedToLevel(filter.geo_index_->GetCoverage().level); khExtents<std::uint32_t> to_check = khExtents<std::uint32_t>::Intersection( export_cov.extents, filter.geo_index_->GetCoverage().extents); filter.geo_index_->GetFeatureIdsFromBuckets( to_check, out_tile->displayRules[i].selectedIds); } else { notify(NFY_FATAL, "Internal Error: SelectQuad called on un-split index"); } // increment numids numids += out_tile->displayRules[i].selectedIds.size(); } } if (numids) { manager_.HandleOutputTile(out_tile); } else { manager_.ReturnEmptyOutputTile(out_tile); } progress_->incrementDone(1); something_left_ = false; } if (something_cleared_) { RestoreCleared(); } } FullResQuadSelector::FullResQuadSelector( khProgressMeter *progress, FilterGeoIndexManager &manager, std::vector<FilterGeoIndex> &filters, const khLevelCoverage &cov) : FullResQuadSelectorPreStorage(filters.size()), QuadSelectorBase(progress, filters, use_set_storage_, cov), manager_(manager) { } } // namespace vectorquery
3,375
743
<gh_stars>100-1000 package io.adaptivecards.adaptivecardssample.CustomObjects.Actions; import android.app.Activity; import android.content.Context; import android.graphics.PorterDuff; import androidx.fragment.app.FragmentManager; import android.view.ViewGroup; import android.widget.Button; import org.json.JSONException; import org.json.JSONObject; import io.adaptivecards.adaptivecardssample.R; import io.adaptivecards.objectmodel.ActionElementParser; import io.adaptivecards.objectmodel.ActionType; import io.adaptivecards.objectmodel.BaseActionElement; import io.adaptivecards.objectmodel.HostConfig; import io.adaptivecards.objectmodel.JsonValue; import io.adaptivecards.objectmodel.ParseContext; import io.adaptivecards.renderer.BaseActionElementRenderer; import io.adaptivecards.renderer.RenderArgs; import io.adaptivecards.renderer.RenderedAdaptiveCard; import io.adaptivecards.renderer.Util; import io.adaptivecards.renderer.actionhandler.ICardActionHandler; public class CustomRedAction extends BaseActionElement { public CustomRedAction(ActionType type) { super(type); } public String getBackwardString() { return m_backwardsString; } public void setBackwardString(String s) { m_backwardsString = new String(); for(int i = s.length() - 1; i >= 0; i--) { m_backwardsString += s.charAt(i); } } public static class CustomRedActionParser extends ActionElementParser { @Override public BaseActionElement Deserialize(ParseContext context, JsonValue value) { CustomRedAction element = new CustomRedAction(ActionType.Custom); Util.deserializeBaseActionProperties(context, value, element); element.SetElementTypeString(CustomRedAction.CustomActionId); element.SetId("backwardActionDeserialize"); String val = value.getString(); try { JSONObject obj = new JSONObject(val); element.setBackwardString(obj.getString("backwardString")); } catch (JSONException e) { e.printStackTrace(); element.setBackwardString("deliaF"); } return element; } @Override public BaseActionElement DeserializeFromString(ParseContext context, String jsonString) { CustomRedAction element = new CustomRedAction(ActionType.Custom); Util.deserializeBaseActionPropertiesFromString(context, jsonString, element); element.SetElementTypeString(CustomRedAction.CustomActionId); element.SetId("backwardActionDeserialize"); try { JSONObject obj = new JSONObject(jsonString); element.setBackwardString(obj.getString("backwardString")); } catch (JSONException e) { e.printStackTrace(); element.setBackwardString("deliaF"); } return element; } } public static class CustomRedActionRenderer extends BaseActionElementRenderer { public CustomRedActionRenderer(Activity activity) { m_activity = activity; } @Override public Button render(RenderedAdaptiveCard renderedCard, Context context, FragmentManager fragmentManager, ViewGroup viewGroup, BaseActionElement baseActionElement, ICardActionHandler cardActionHandler, HostConfig hostConfig, RenderArgs renderArgs) { Button backwardActionButton = new Button(context); renderedCard.registerSubmitableAction(backwardActionButton, renderArgs); CustomRedAction customAction = (CustomRedAction) baseActionElement.findImplObj(); backwardActionButton.getBackground().setColorFilter(m_activity.getResources().getColor(R.color.redActionColor), PorterDuff.Mode.SRC_ATOP); backwardActionButton.setText(customAction.getBackwardString()); backwardActionButton.setAllCaps(false); backwardActionButton.setOnClickListener(new BaseActionElementRenderer.ActionOnClickListener(renderedCard, baseActionElement, cardActionHandler)); viewGroup.addView(backwardActionButton); return backwardActionButton; } private Activity m_activity; } private String m_backwardsString; public static final String CustomActionId = "redAction"; }
1,890
331
<filename>src/main/java/org/fordes/subview/entity/DTO/SearchDTO.java package org.fordes.subview.entity.DTO; import lombok.Data; import lombok.experimental.Accessors; import org.springframework.beans.factory.annotation.Value; /** * @author fordes on 2021/3/5 */ @Data @Accessors(chain = true) public class SearchDTO { @Value(value = "false") private boolean success; private int cursor_start; private int cursor_end; private String content; }
167
1,208
import random import pytest from aws_lambda_powertools.shared.cache_dict import LRUDict MAX_CACHE_ITEMS = 50 PREFILL_CACHE_ITEMS = 50 @pytest.fixture def populated_cache(): cache_dict = LRUDict(max_items=MAX_CACHE_ITEMS, **{f"key_{i}": f"val_{i}" for i in range(0, PREFILL_CACHE_ITEMS)}) return cache_dict def test_cache_order_init(populated_cache): first_item = list(populated_cache)[0] last_item = list(populated_cache)[-1] assert first_item == "key_0" assert last_item == f"key_{MAX_CACHE_ITEMS - 1}" def test_cache_order_getitem(populated_cache): random_value = random.randrange(0, MAX_CACHE_ITEMS) _ = populated_cache[f"key_{random_value}"] last_item = list(populated_cache)[-1] assert last_item == f"key_{random_value}" def test_cache_order_get(populated_cache): random_value = random.randrange(0, MAX_CACHE_ITEMS) _ = populated_cache.get(f"key_{random_value}") last_item = list(populated_cache)[-1] assert last_item == f"key_{random_value}" def test_cache_evict_over_max_items(populated_cache): assert "key_0" in populated_cache assert len(populated_cache) == MAX_CACHE_ITEMS populated_cache["new_item"] = "new_value" assert len(populated_cache) == MAX_CACHE_ITEMS assert "key_0" not in populated_cache assert "key_1" in populated_cache def test_setitem_moves_to_end(populated_cache): random_value = random.randrange(0, MAX_CACHE_ITEMS) populated_cache[f"key_{random_value}"] = f"new_val_{random_value}" last_item = list(populated_cache)[-1] assert last_item == f"key_{random_value}" assert populated_cache[f"key_{random_value}"] == f"new_val_{random_value}" def test_lru_pop_failing(): cache = LRUDict() key = "test" cache[key] = "value" try: cache.pop(key, None) pytest.fail("GitHub #300: LRUDict pop bug has been fixed :)") except KeyError as e: assert e.args[0] == key def test_lru_del(): cache = LRUDict() key = "test" cache[key] = "value" assert len(cache) == 1 if key in cache: del cache[key] assert key not in cache assert len(cache) == 0
890
22,688
/****************************************************************************** * Copyright 2018 The Apollo Authors. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. *****************************************************************************/ #include "modules/perception/inference/utils/util.h" #include "gtest/gtest.h" TEST(loadTest, test) { ACHECK(!apollo::perception::inference::load_binary_data("unknown.txt")); } TEST(UtilTest, test) { // std::vector<std::string> file_list = // {"ARZ034_12_1499218335_1499218635_500", // "ARZ034_12_1499218335_1499218635_520", // "ARZ034_12_1499218335_1499218635_540", // "ARZ034_12_1499218335_1499218635_560", // "ARZ034_12_1499218335_1499218635_580", // "ARZ034_12_1499218335_1499218635_606", // "ARZ034_12_1499218335_1499218635_834", // "ARZ034_12_1499218335_1499218635_854", // "ARZ034_12_1499218335_1499218635_874", // "ARZ034_12_1499218335_1499218635_894"}; // std::vector<std::string> gray_file_list = { // "KL000_15_1517627412_1517627472_313"}; // std::string image_root = // "modules/perception/inference/inference_test_data/images/"; // std::string image_ext = ".jpg"; // int h_small = 576; // int w_small = 1440; // // int h_large = 768; // // int w_large = 1920; // int roi_h = 300; // int roi_w = 500; // int roi_x = 400; // int roi_y = 312; // // bgr yolo detector resize test // { // int rgb_channel = 3; // std::shared_ptr<apollo::perception::base::Blob<float>> blob; // blob.reset(new apollo::perception::base::Blob<float>(1, h_small, w_small, // rgb_channel)); // std::shared_ptr<apollo::perception::base::SyncedMemory> src_gpu; // src_gpu.reset(new apollo::perception::base::SyncedMemory( // 1080 * 1920 * rgb_channel * sizeof(unsigned char), true)); // for (auto image_file : file_list) { // cv::Mat img = cv::imread(image_root + image_file + image_ext); // cv::Mat img_roi; // cv::Rect roi(0, roi_y, img.cols, img.rows - roi_y); // img_roi.create(img.rows, img.cols, CV_8UC3); // img.copyTo(img_roi); // img_roi = img_roi(roi); // cv::Mat img_small; // cv::resize(img_roi, img_small, cv::Size(w_small, h_small)); // src_gpu->set_cpu_data(img_roi.data); // ACHECK(apollo::perception::inference::resize( // img_roi.channels(), img_roi.rows, img_roi.cols, // img_roi.step1(0) / img_roi.step1(1), blob, src_gpu, 0)); // for (int i = 0; // i < img_small.rows * img_small.cols * img_small.channels(); i++) { // EXPECT_NEAR(img_small.data[i], blob->cpu_data()[i], 1) << " " << i; // } // } // } // // gray yolo detector resize test // { // int gray_channel = 1; // std::shared_ptr<apollo::perception::base::Blob<float>> blob; // blob.reset(new apollo::perception::base::Blob<float>(1, h_small, w_small, // gray_channel)); // std::shared_ptr<apollo::perception::base::SyncedMemory> src_gpu; // src_gpu.reset(new apollo::perception::base::SyncedMemory( // 712 * 1193 * 1 * sizeof(unsigned char), true)); // for (auto image_file : gray_file_list) { // cv::Mat img = cv::imread(image_root + image_file + image_ext, CV_8UC1); // cv::Mat img_roi; // cv::Rect roi(0, roi_y, img.cols, img.rows - roi_y); // img_roi.create(img.rows, img.cols, CV_8UC1); // img.copyTo(img_roi); // img_roi = img_roi(roi); // src_gpu->set_cpu_data(img_roi.data); // cv::Mat img_small; // cv::resize(img_roi, img_small, cv::Size(w_small, h_small)); // ACHECK(apollo::perception::inference::resize( // img_roi.channels(), img_roi.rows, img_roi.cols, // img_roi.step1(0) / img_roi.step1(1), blob, src_gpu, 0)); // for (int i = 0; // i < img_small.rows * img_small.cols * img_small.channels(); i++) { // EXPECT_NEAR(img_small.data[i], blob->cpu_data()[i], 1) << " " << i; // } // } // } // // roi resize // { // int rgb_channel = 3; // std::shared_ptr<apollo::perception::base::Blob<float>> blob; // blob.reset(new apollo::perception::base::Blob<float>(1, h_small, w_small, // rgb_channel)); // std::shared_ptr<apollo::perception::base::SyncedMemory> src_gpu; // src_gpu.reset(new apollo::perception::base::SyncedMemory( // 1080 * 1920 * 3 * sizeof(unsigned char), true)); // for (auto image_file : file_list) { // cv::Mat img = cv::imread(image_root + image_file + image_ext); // cv::Mat img_roi; // cv::Rect roi(roi_x, roi_y, roi_w, roi_h); // img_roi.create(img.rows, img.cols, CV_8UC3); // img.copyTo(img_roi); // img_roi = img_roi(roi); // src_gpu->set_cpu_data(img_roi.data); // cv::Mat img_small; // cv::resize(img_roi, img_small, cv::Size(w_small, h_small)); // ACHECK(apollo::perception::inference::resize( // img_roi.channels(), img_roi.rows, img_roi.cols, // img_roi.step1(0) / img_roi.step1(1), blob, src_gpu, 0)); // for (int i = 0; // i < img_small.rows * img_small.cols * img_small.channels(); i++) { // EXPECT_NEAR(img_small.data[i], blob->cpu_data()[i], 1) << " " << i; // } // } // } }
3,010
1,405
package com.network.android.monitor.observer; import android.database.ContentObserver; import com.network.e.a.a; final class d extends ContentObserver { /* renamed from: a reason: collision with root package name */ final /* synthetic */ a f89a; final /* synthetic */ a b; /* JADX INFO: super call moved to the top of the method (can break code semantics) */ d(a aVar, a aVar2) { super(null); this.b = aVar; this.f89a = aVar2; } public final void onChange(boolean z) { this.b.a(this.f89a); } }
222
418
from typing import Dict, Optional, Tuple, Union import torch from torch.distributions import Uniform from kornia.augmentation.random_generator.base import RandomGeneratorBase from kornia.augmentation.utils import _adapted_rsampling, _adapted_uniform, _common_param_check, _joint_range_check from kornia.geometry.bbox import bbox_generator from kornia.utils.helpers import _deprecated, _extract_device_dtype class CropGenerator(RandomGeneratorBase): r"""Get parameters for ```crop``` transformation for crop transform. Args: size (tuple): Desired size of the crop operation, like (h, w). If tensor, it must be (B, 2). resize_to (tuple): Desired output size of the crop, like (h, w). If None, no resize will be performed. Returns: params Dict[str, torch.Tensor]: parameters to be passed for transformation. - src (torch.Tensor): cropping bounding boxes with a shape of (B, 4, 2). - dst (torch.Tensor): output bounding boxes with a shape (B, 4, 2). Note: The generated random numbers are not reproducible across different devices and dtypes. By default, the parameters will be generated on CPU in float32. This can be changed by calling ``self.set_rng_device_and_dtype(device="cuda", dtype=torch.float64)``. """ def __init__(self, size: Union[Tuple[int, int], torch.Tensor], resize_to: Optional[Tuple[int, int]] = None) -> None: super().__init__() self.size = size self.resize_to = resize_to def __repr__(self) -> str: repr = f"crop_size={self.size}" if self.resize_to is not None: repr += f", resize_to={self.resize_to}" return repr def make_samplers(self, device: torch.device, dtype: torch.dtype) -> None: self.rand_sampler = Uniform( torch.tensor(0.0, device=device, dtype=dtype), torch.tensor(1.0, device=device, dtype=dtype) ) def forward(self, batch_shape: torch.Size, same_on_batch: bool = False) -> Dict[str, torch.Tensor]: # type:ignore batch_size = batch_shape[0] _common_param_check(batch_size, same_on_batch) _device, _dtype = _extract_device_dtype([self.size if isinstance(self.size, torch.Tensor) else None]) if batch_size == 0: return dict( src=torch.zeros([0, 4, 2], device=_device, dtype=_dtype), dst=torch.zeros([0, 4, 2], device=_device, dtype=_dtype), ) input_size = (batch_shape[-2], batch_shape[-1]) if not isinstance(self.size, torch.Tensor): size = torch.tensor(self.size, device=_device, dtype=_dtype).repeat(batch_size, 1) else: size = self.size.to(device=_device, dtype=_dtype) if size.shape != torch.Size([batch_size, 2]): raise AssertionError( "If `size` is a tensor, it must be shaped as (B, 2). " f"Got {size.shape} while expecting {torch.Size([batch_size, 2])}." ) if not (input_size[0] > 0 and input_size[1] > 0 and (size > 0).all()): raise AssertionError(f"Got non-positive input size or size. {input_size}, {size}.") size = size.floor() x_diff = input_size[1] - size[:, 1] + 1 y_diff = input_size[0] - size[:, 0] + 1 # Start point will be 0 if diff < 0 x_diff = x_diff.clamp(0) y_diff = y_diff.clamp(0) if same_on_batch: # If same_on_batch, select the first then repeat. x_start = ( _adapted_rsampling((batch_size,), self.rand_sampler, same_on_batch).to(x_diff) * x_diff[0] ).floor() y_start = ( _adapted_rsampling((batch_size,), self.rand_sampler, same_on_batch).to(y_diff) * y_diff[0] ).floor() else: x_start = (_adapted_rsampling((batch_size,), self.rand_sampler, same_on_batch).to(x_diff) * x_diff).floor() y_start = (_adapted_rsampling((batch_size,), self.rand_sampler, same_on_batch).to(y_diff) * y_diff).floor() crop_src = bbox_generator( x_start.view(-1).to(device=_device, dtype=_dtype), y_start.view(-1).to(device=_device, dtype=_dtype), torch.where(size[:, 1] == 0, torch.tensor(input_size[1], device=_device, dtype=_dtype), size[:, 1]), torch.where(size[:, 0] == 0, torch.tensor(input_size[0], device=_device, dtype=_dtype), size[:, 0]), ) if self.resize_to is None: crop_dst = bbox_generator( torch.tensor([0] * batch_size, device=_device, dtype=_dtype), torch.tensor([0] * batch_size, device=_device, dtype=_dtype), size[:, 1], size[:, 0], ) _output_size = size.to(dtype=torch.long) else: if not ( len(self.resize_to) == 2 and isinstance(self.resize_to[0], (int,)) and isinstance(self.resize_to[1], (int,)) and self.resize_to[0] > 0 and self.resize_to[1] > 0 ): raise AssertionError(f"`resize_to` must be a tuple of 2 positive integers. Got {self.resize_to}.") crop_dst = torch.tensor( [ [ [0, 0], [self.resize_to[1] - 1, 0], [self.resize_to[1] - 1, self.resize_to[0] - 1], [0, self.resize_to[0] - 1], ] ], device=_device, dtype=_dtype, ).repeat(batch_size, 1, 1) _output_size = torch.tensor(self.resize_to, device=_device, dtype=torch.long).expand(batch_size, -1) _input_size = torch.tensor(input_size, device=_device, dtype=torch.long).expand(batch_size, -1) return dict(src=crop_src, dst=crop_dst, input_size=_input_size, output_size=_output_size) class ResizedCropGenerator(CropGenerator): r"""Get cropping heights and widths for ```crop``` transformation for resized crop transform. Args: output_size (Tuple[int, int]): expected output size of each edge. scale (torch.Tensor): range of size of the origin size cropped with (2,) shape. ratio (torch.Tensor): range of aspect ratio of the origin aspect ratio cropped with (2,) shape. Returns: params Dict[str, torch.Tensor]: parameters to be passed for transformation. - size (torch.Tensor): element-wise cropping sizes with a shape of (B, 2). Note: The generated random numbers are not reproducible across different devices and dtypes. Examples: >>> _ = torch.manual_seed(42) >>> random_crop_size_generator(3, (30, 30), scale=torch.tensor([.7, 1.3]), ratio=torch.tensor([.9, 1.])) {'size': tensor([[29., 29.], [27., 28.], [26., 29.]])} """ def __init__( self, output_size: Tuple[int, int], scale: Union[torch.Tensor, Tuple[float, float]], ratio: Union[torch.Tensor, Tuple[float, float]], ) -> None: if not ( len(output_size) == 2 and isinstance(output_size[0], (int,)) and isinstance(output_size[1], (int,)) and output_size[0] > 0 and output_size[1] > 0 ): raise AssertionError(f"`output_size` must be a tuple of 2 positive integers. Got {output_size}.") super().__init__(size=output_size, resize_to=output_size) # fake an intermedia crop size self.scale = scale self.ratio = ratio self.output_size = output_size def __repr__(self) -> str: repr = f"scale={self.scale}, resize_to={self.ratio}, output_size={self.output_size}" return repr def make_samplers(self, device: torch.device, dtype: torch.dtype) -> None: scale = torch.as_tensor(self.scale, device=device, dtype=dtype) ratio = torch.as_tensor(self.ratio, device=device, dtype=dtype) _joint_range_check(scale, "scale") _joint_range_check(ratio, "ratio") self.rand_sampler = Uniform( torch.tensor(0.0, device=device, dtype=dtype), torch.tensor(1.0, device=device, dtype=dtype) ) self.log_ratio_sampler = Uniform(torch.log(ratio[0]), torch.log(ratio[1]), validate_args=False) def forward(self, batch_shape: torch.Size, same_on_batch: bool = False) -> Dict[str, torch.Tensor]: # type:ignore batch_size = batch_shape[0] size = (batch_shape[-2], batch_shape[-1]) _device, _dtype = _extract_device_dtype([self.scale, self.ratio]) if batch_size == 0: return dict( src=torch.zeros([0, 4, 2], device=_device, dtype=_dtype), dst=torch.zeros([0, 4, 2], device=_device, dtype=_dtype), size=torch.zeros([0, 2], device=_device, dtype=_dtype), ) rand = _adapted_rsampling((batch_size, 10), self.rand_sampler, same_on_batch).to(device=_device, dtype=_dtype) area = (rand * (self.scale[1] - self.scale[0]) + self.scale[0]) * size[0] * size[1] log_ratio = _adapted_rsampling((batch_size, 10), self.log_ratio_sampler, same_on_batch).to( device=_device, dtype=_dtype ) aspect_ratio = torch.exp(log_ratio) w = torch.sqrt(area * aspect_ratio).round().floor() h = torch.sqrt(area / aspect_ratio).round().floor() # Element-wise w, h condition cond = ((0 < w) * (w < size[0]) * (0 < h) * (h < size[1])).int() # torch.argmax is not reproducible across devices: https://github.com/pytorch/pytorch/issues/17738 # Here, we will select the first occurrence of the duplicated elements. cond_bool, argmax_dim1 = ((cond.cumsum(1) == 1) & cond.bool()).max(1) h_out = w[torch.arange(0, batch_size, device=_device, dtype=torch.long), argmax_dim1] w_out = h[torch.arange(0, batch_size, device=_device, dtype=torch.long), argmax_dim1] if not cond_bool.all(): # Fallback to center crop in_ratio = float(size[0]) / float(size[1]) _min = self.ratio.min() if isinstance(self.ratio, torch.Tensor) else min(self.ratio) if in_ratio < _min: # type:ignore h_ct = torch.tensor(size[0], device=_device, dtype=_dtype) w_ct = torch.round(h_ct / _min) elif in_ratio > _min: # type:ignore w_ct = torch.tensor(size[1], device=_device, dtype=_dtype) h_ct = torch.round(w_ct * _min) else: # whole image h_ct = torch.tensor(size[0], device=_device, dtype=_dtype) w_ct = torch.tensor(size[1], device=_device, dtype=_dtype) h_ct = h_ct.floor() w_ct = w_ct.floor() h_out = h_out.where(cond_bool, h_ct) w_out = w_out.where(cond_bool, w_ct) # Update the crop size. self.size = torch.stack([h_out, w_out], dim=1) return super().forward(batch_shape, same_on_batch) @_deprecated(replace_with=CropGenerator.__name__) def random_crop_generator( batch_size: int, input_size: Tuple[int, int], size: Union[Tuple[int, int], torch.Tensor], resize_to: Optional[Tuple[int, int]] = None, same_on_batch: bool = False, device: torch.device = torch.device('cpu'), dtype: torch.dtype = torch.float32, ) -> Dict[str, torch.Tensor]: r"""Get parameters for ```crop``` transformation for crop transform. Args: batch_size (int): the tensor batch size. input_size (tuple): Input image shape, like (h, w). size (tuple): Desired size of the crop operation, like (h, w). If tensor, it must be (B, 2). resize_to (tuple): Desired output size of the crop, like (h, w). If None, no resize will be performed. same_on_batch (bool): apply the same transformation across the batch. Default: False. device (torch.device): the device on which the random numbers will be generated. Default: cpu. dtype (torch.dtype): the data type of the generated random numbers. Default: float32. Returns: params Dict[str, torch.Tensor]: parameters to be passed for transformation. - src (torch.Tensor): cropping bounding boxes with a shape of (B, 4, 2). - dst (torch.Tensor): output bounding boxes with a shape (B, 4, 2). Note: The generated random numbers are not reproducible across different devices and dtypes. Example: >>> _ = torch.manual_seed(0) >>> crop_size = torch.tensor([[25, 28], [27, 29], [26, 28]]) >>> random_crop_generator(3, (30, 30), size=crop_size, same_on_batch=False) {'src': tensor([[[ 1., 0.], [28., 0.], [28., 24.], [ 1., 24.]], <BLANKLINE> [[ 1., 1.], [29., 1.], [29., 27.], [ 1., 27.]], <BLANKLINE> [[ 0., 3.], [27., 3.], [27., 28.], [ 0., 28.]]]), 'dst': tensor([[[ 0., 0.], [27., 0.], [27., 24.], [ 0., 24.]], <BLANKLINE> [[ 0., 0.], [28., 0.], [28., 26.], [ 0., 26.]], <BLANKLINE> [[ 0., 0.], [27., 0.], [27., 25.], [ 0., 25.]]]), 'input_size': tensor([[30, 30], [30, 30], [30, 30]])} """ _common_param_check(batch_size, same_on_batch) _device, _dtype = _extract_device_dtype([size if isinstance(size, torch.Tensor) else None]) # Use float point instead _dtype = _dtype if _dtype in [torch.float16, torch.float32, torch.float64] else dtype if not isinstance(size, torch.Tensor): size = torch.tensor(size, device=_device, dtype=_dtype).repeat(batch_size, 1) else: size = size.to(device=_device, dtype=_dtype) if size.shape != torch.Size([batch_size, 2]): raise AssertionError( "If `size` is a tensor, it must be shaped as (B, 2). " f"Got {size.shape} while expecting {torch.Size([batch_size, 2])}." ) if not (input_size[0] > 0 and input_size[1] > 0 and (size > 0).all()): raise AssertionError(f"Got non-positive input size or size. {input_size}, {size}.") size = size.floor() x_diff = input_size[1] - size[:, 1] + 1 y_diff = input_size[0] - size[:, 0] + 1 # Start point will be 0 if diff < 0 x_diff = x_diff.clamp(0) y_diff = y_diff.clamp(0) if batch_size == 0: return dict( src=torch.zeros([0, 4, 2], device=_device, dtype=_dtype), dst=torch.zeros([0, 4, 2], device=_device, dtype=_dtype), ) if same_on_batch: # If same_on_batch, select the first then repeat. x_start = _adapted_uniform((batch_size,), 0, x_diff[0].to(device=device, dtype=dtype), same_on_batch).floor() y_start = _adapted_uniform((batch_size,), 0, y_diff[0].to(device=device, dtype=dtype), same_on_batch).floor() else: x_start = _adapted_uniform((1,), 0, x_diff.to(device=device, dtype=dtype), same_on_batch).floor() y_start = _adapted_uniform((1,), 0, y_diff.to(device=device, dtype=dtype), same_on_batch).floor() crop_src = bbox_generator( x_start.view(-1).to(device=_device, dtype=_dtype), y_start.view(-1).to(device=_device, dtype=_dtype), torch.where(size[:, 1] == 0, torch.tensor(input_size[1], device=_device, dtype=_dtype), size[:, 1]), torch.where(size[:, 0] == 0, torch.tensor(input_size[0], device=_device, dtype=_dtype), size[:, 0]), ) if resize_to is None: crop_dst = bbox_generator( torch.tensor([0] * batch_size, device=_device, dtype=_dtype), torch.tensor([0] * batch_size, device=_device, dtype=_dtype), size[:, 1], size[:, 0], ) else: if not ( len(resize_to) == 2 and isinstance(resize_to[0], (int,)) and isinstance(resize_to[1], (int,)) and resize_to[0] > 0 and resize_to[1] > 0 ): raise AssertionError(f"`resize_to` must be a tuple of 2 positive integers. Got {resize_to}.") crop_dst = torch.tensor( [[[0, 0], [resize_to[1] - 1, 0], [resize_to[1] - 1, resize_to[0] - 1], [0, resize_to[0] - 1]]], device=_device, dtype=_dtype, ).repeat(batch_size, 1, 1) _input_size = torch.tensor(input_size, device=_device, dtype=torch.long).expand(batch_size, -1) return dict(src=crop_src, dst=crop_dst, input_size=_input_size) @_deprecated() def random_crop_size_generator( batch_size: int, size: Tuple[int, int], scale: torch.Tensor, ratio: torch.Tensor, same_on_batch: bool = False, device: torch.device = torch.device('cpu'), dtype: torch.dtype = torch.float32, ) -> Dict[str, torch.Tensor]: r"""Get cropping heights and widths for ```crop``` transformation for resized crop transform. Args: batch_size (int): the tensor batch size. size (Tuple[int, int]): expected output size of each edge. scale (torch.Tensor): range of size of the origin size cropped with (2,) shape. ratio (torch.Tensor): range of aspect ratio of the origin aspect ratio cropped with (2,) shape. same_on_batch (bool): apply the same transformation across the batch. Default: False. device (torch.device): the device on which the random numbers will be generated. Default: cpu. dtype (torch.dtype): the data type of the generated random numbers. Default: float32. Returns: params Dict[str, torch.Tensor]: parameters to be passed for transformation. - size (torch.Tensor): element-wise cropping sizes with a shape of (B, 2). Note: The generated random numbers are not reproducible across different devices and dtypes. Examples: >>> _ = torch.manual_seed(42) >>> random_crop_size_generator(3, (30, 30), scale=torch.tensor([.7, 1.3]), ratio=torch.tensor([.9, 1.])) {'size': tensor([[29., 29.], [27., 28.], [26., 29.]])} """ _common_param_check(batch_size, same_on_batch) _joint_range_check(scale, "scale") _joint_range_check(ratio, "ratio") if not (len(size) == 2 and type(size[0]) is int and size[1] > 0 and type(size[1]) is int and size[1] > 0): raise AssertionError(f"'height' and 'width' must be integers. Got {size}.") _device, _dtype = _extract_device_dtype([scale, ratio]) if batch_size == 0: return dict(size=torch.zeros([0, 2], device=_device, dtype=_dtype)) scale = scale.to(device=device, dtype=dtype) ratio = ratio.to(device=device, dtype=dtype) # 10 trails for each element area = _adapted_uniform((batch_size, 10), scale[0] * size[0] * size[1], scale[1] * size[0] * size[1], same_on_batch) log_ratio = _adapted_uniform((batch_size, 10), torch.log(ratio[0]), torch.log(ratio[1]), same_on_batch) aspect_ratio = torch.exp(log_ratio) w = torch.sqrt(area * aspect_ratio).round().floor() h = torch.sqrt(area / aspect_ratio).round().floor() # Element-wise w, h condition cond = ((0 < w) * (w < size[0]) * (0 < h) * (h < size[1])).int() # torch.argmax is not reproducible across devices: https://github.com/pytorch/pytorch/issues/17738 # Here, we will select the first occurrence of the duplicated elements. cond_bool, argmax_dim1 = ((cond.cumsum(1) == 1) & cond.bool()).max(1) h_out = w[torch.arange(0, batch_size, device=device, dtype=torch.long), argmax_dim1] w_out = h[torch.arange(0, batch_size, device=device, dtype=torch.long), argmax_dim1] if not cond_bool.all(): # Fallback to center crop in_ratio = float(size[0]) / float(size[1]) if in_ratio < ratio.min(): h_ct = torch.tensor(size[0], device=device, dtype=dtype) w_ct = torch.round(h_ct / ratio.min()) elif in_ratio > ratio.min(): w_ct = torch.tensor(size[1], device=device, dtype=dtype) h_ct = torch.round(w_ct * ratio.min()) else: # whole image h_ct = torch.tensor(size[0], device=device, dtype=dtype) w_ct = torch.tensor(size[1], device=device, dtype=dtype) h_ct = h_ct.floor() w_ct = w_ct.floor() h_out = h_out.where(cond_bool, h_ct) w_out = w_out.where(cond_bool, w_ct) return dict(size=torch.stack([h_out, w_out], dim=1).to(device=_device, dtype=_dtype)) def center_crop_generator( batch_size: int, height: int, width: int, size: Tuple[int, int], device: torch.device = torch.device('cpu') ) -> Dict[str, torch.Tensor]: r"""Get parameters for ```center_crop``` transformation for center crop transform. Args: batch_size (int): the tensor batch size. height (int) : height of the image. width (int): width of the image. size (tuple): Desired output size of the crop, like (h, w). device (torch.device): the device on which the random numbers will be generated. Default: cpu. Returns: params Dict[str, torch.Tensor]: parameters to be passed for transformation. - src (torch.Tensor): cropping bounding boxes with a shape of (B, 4, 2). - dst (torch.Tensor): output bounding boxes with a shape (B, 4, 2). Note: No random number will be generated. """ _common_param_check(batch_size) if not isinstance(size, (tuple, list)) and len(size) == 2: raise ValueError(f"Input size must be a tuple/list of length 2. Got {size}") if not (type(height) is int and height > 0 and type(width) is int and width > 0): raise AssertionError(f"'height' and 'width' must be integers. Got {height}, {width}.") if not (height >= size[0] and width >= size[1]): raise AssertionError(f"Crop size must be smaller than input size. Got ({height}, {width}) and {size}.") # unpack input sizes dst_h, dst_w = size src_h, src_w = height, width # compute start/end offsets dst_h_half = dst_h / 2 dst_w_half = dst_w / 2 src_h_half = src_h / 2 src_w_half = src_w / 2 start_x = int(src_w_half - dst_w_half) start_y = int(src_h_half - dst_h_half) end_x = start_x + dst_w - 1 end_y = start_y + dst_h - 1 # [y, x] origin # top-left, top-right, bottom-right, bottom-left points_src: torch.Tensor = torch.tensor( [[[start_x, start_y], [end_x, start_y], [end_x, end_y], [start_x, end_y]]], device=device, dtype=torch.long ).expand(batch_size, -1, -1) # [y, x] destination # top-left, top-right, bottom-right, bottom-left points_dst: torch.Tensor = torch.tensor( [[[0, 0], [dst_w - 1, 0], [dst_w - 1, dst_h - 1], [0, dst_h - 1]]], device=device, dtype=torch.long ).expand(batch_size, -1, -1) _input_size = torch.tensor((height, width), device=device, dtype=torch.long).expand(batch_size, -1) _output_size = torch.tensor(size, device=device, dtype=torch.long).expand(batch_size, -1) return dict(src=points_src, dst=points_dst, input_size=_input_size, output_size=_output_size)
10,874
488
<filename>quantlib/termstructures/volatility/api.py<gh_stars>100-1000 from .equityfx.black_constant_vol import BlackConstantVol from .equityfx.black_variance_curve import BlackVarianceCurve from .equityfx.black_variance_surface import BlackVarianceSurface from .volatilitytype import VolatilityType from .swaption.swaption_vol_matrix import SwaptionVolatilityMatrix from .swaption.swaption_constant_vol import ConstantSwaptionVolatility
136
3,527
package com.github.promeg.pinyinhelper; import com.github.prome.tinypinyin.jmh.FullDiffDict; import net.sourceforge.pinyin4j.PinyinHelper; import net.sourceforge.pinyin4j.format.HanyuPinyinCaseType; import net.sourceforge.pinyin4j.format.HanyuPinyinOutputFormat; import net.sourceforge.pinyin4j.format.HanyuPinyinToneType; import net.sourceforge.pinyin4j.format.HanyuPinyinVCharType; import net.sourceforge.pinyin4j.format.exception.BadHanyuPinyinOutputFormatCombination; import org.openjdk.jmh.annotations.Benchmark; import org.openjdk.jmh.annotations.Level; import org.openjdk.jmh.annotations.OutputTimeUnit; import org.openjdk.jmh.annotations.Scope; import org.openjdk.jmh.annotations.Setup; import org.openjdk.jmh.annotations.State; import org.openjdk.jmh.infra.BenchmarkParams; import java.util.concurrent.TimeUnit; /** * Created by guyacong on 2016/12/23. */ //CHECKSTYLE:OFF @State(Scope.Benchmark) @OutputTimeUnit(TimeUnit.MILLISECONDS) public class PinyinDictBenchmark2 { static HanyuPinyinOutputFormat format; static String inputStr; static { format = new HanyuPinyinOutputFormat(); format.setToneType(HanyuPinyinToneType.WITHOUT_TONE); format.setCaseType(HanyuPinyinCaseType.UPPERCASE); format.setVCharType(HanyuPinyinVCharType.WITH_V); } @Setup(Level.Iteration) public void setUp(BenchmarkParams params) { inputStr = BenchmarkUtils.genRandomString(1000); Pinyin.init(Pinyin.newConfig().with(FullDiffDict.getInstance())); } @Benchmark public void Pinyin4j_StringToPinyin() throws BadHanyuPinyinOutputFormatCombination { PinyinHelper.toHanyuPinyinString(inputStr, format, ","); } @Benchmark public void TinyPinyin_StringToPinyin_With_Large_Dict() { Pinyin.toPinyin(inputStr, ","); } } //CHECKSTYLE:ON
764
2,576
/* * Copyright (c) 2010-2018. Axon Framework * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.axonframework.test.deadline; import org.axonframework.deadline.DeadlineMessage; import org.axonframework.messaging.Scope; import org.axonframework.messaging.ScopeDescriptor; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import java.time.Duration; import java.time.Instant; import java.util.ArrayList; import java.util.List; import static org.junit.jupiter.api.Assertions.assertEquals; class StubDeadlineManagerTest { private StubDeadlineManager testSubject; @BeforeEach void setUp() { testSubject = new StubDeadlineManager(); } @Test void testMessagesCarryTriggerTimestamp() throws Exception { Instant triggerTime = Instant.now().plusSeconds(60); MockScope.execute(() -> testSubject.schedule(triggerTime, "gone") ); List<DeadlineMessage<?>> triggered = new ArrayList<>(); testSubject.advanceTimeBy(Duration.ofMinutes(75), (s, message) -> triggered.add(message)); assertEquals(1, triggered.size()); assertEquals(triggerTime, triggered.get(0).getTimestamp()); } private static class MockScope extends Scope { private static final MockScope instance = new MockScope(); public static void execute(Runnable task) throws Exception { instance.executeWithResult(() -> { task.run(); return null; }); } @Override public ScopeDescriptor describeScope() { return (ScopeDescriptor) () -> "Mock"; } } }
786
309
# -*- coding: utf-8 -*- """ The AdBandits bandit algorithm, mixing Thompson Sampling and BayesUCB. - Reference: [AdBandit: A New Algorithm For Multi-Armed Bandits, F.S.Truzzi, <NAME>, A.H.R.Costa, F.G.Cozman](http://sites.poli.usp.br/p/fabio.cozman/Publications/Article/truzzi-silva-costa-cozman-eniac2013.pdf) - Code inspired from: https://github.com/flaviotruzzi/AdBandits/ .. warning:: This policy is very not famous, but for stochastic bandits it works usually VERY WELL! It is not anytime thought. """ from __future__ import division, print_function # Python 2 compatibility __author__ = "<NAME> and <NAME>" __version__ = "0.9" from random import random, choice import numpy as np try: from .Posterior import Beta from .BasePolicy import BasePolicy from .with_proba import with_proba except ImportError: from Posterior import Beta from BasePolicy import BasePolicy from with_proba import with_proba # --- Data #: Default value for the parameter :math:`\alpha` for the :class:`AdBandits` class. ALPHA = 1 # --- Class class AdBandits(BasePolicy): """ The AdBandits bandit algorithm, mixing Thompson Sampling and BayesUCB. - Reference: [AdBandit: A New Algorithm For Multi-Armed Bandits, F.S.Truzzi, <NAME>, A.H.R.Costa, F.G.Cozman](http://sites.poli.usp.br/p/fabio.cozman/Publications/Article/truzzi-silva-costa-cozman-eniac2013.pdf) - Code inspired from: https://github.com/flaviotruzzi/AdBandits/ .. warning:: This policy is very not famous, but for stochastic bandits it works usually VERY WELL! It is not anytime thought. """ def __init__(self, nbArms, horizon=1000, alpha=ALPHA, posterior=Beta, lower=0., amplitude=1.): """ New policy.""" super(AdBandits, self).__init__(nbArms, lower=lower, amplitude=amplitude) self.alpha = alpha #: Parameter alpha self.horizon = int(horizon) #: Parameter :math:`T` = known horizon of the experiment. Default value is 1000. self.posterior = [None] * self.nbArms #: Posterior for each arm. List instead of dict, quicker access for arm in range(self.nbArms): self.posterior[arm] = posterior() def __str__(self): # OK, they all have knowledge of T, but it's good to display it to, remember it return r"AdBandits($T={}$, $\alpha={:.3g}$)".format(self.horizon, self.alpha) def startGame(self): """ Reset each posterior.""" super(AdBandits, self).startGame() for arm in range(self.nbArms): self.posterior[arm].reset() def getReward(self, arm, reward): """ Store the reward, and update the posterior for that arm.""" super(AdBandits, self).getReward(arm, reward) reward = (reward - self.lower) / self.amplitude self.posterior[arm].update(reward) # This decorator @property makes this method an attribute, cf. https://docs.python.org/3/library/functions.html#property @property def epsilon(self): r""" Time variating parameter :math:`\varepsilon(t)`.""" # Crop it to [0, 1] return max(0, min(1, float(self.t / (self.horizon * self.alpha)))) def choice(self): r""" With probability :math:`1 - \varepsilon(t)`, use a Thompson Sampling step, otherwise use a UCB-Bayes step, to choose one arm.""" # Thompson Exploration if with_proba(1 - self.epsilon): # with proba 1-epsilon upperbounds = [self.posterior[i].sample() for i in range(self.nbArms)] maxIndex = max(upperbounds) bestArms = [arm for (arm, index) in enumerate(upperbounds) if index == maxIndex] arm = choice(bestArms) # UCB-Bayes else: expectations = (1.0 + self.rewards) / (2.0 + self.pulls) upperbounds = [self.posterior[arm].quantile(1. - 1. / self.t) for arm in range(self.nbArms)] regret = np.max(upperbounds) - expectations admissible = np.nonzero(regret == np.min(regret))[0] arm = choice(admissible) return arm def choiceWithRank(self, rank=1): r""" With probability :math:`1 - \varepsilon(t)`, use a Thompson Sampling step, otherwise use a UCB-Bayes step, to choose one arm of a certain rank.""" if rank == 1: return self.choice() else: assert rank >= 1, "Error: for AdBandits = {}, in choiceWithRank(rank={}) rank has to be >= 1.".format(self, rank) # Thompson Exploration if with_proba(1 - self.epsilon): # with proba 1-epsilon indexes = [self.posterior[i].sample() for i in range(self.nbArms)] # UCB-Bayes else: expectations = (1.0 + self.rewards) / (2.0 + self.pulls) upperbounds = [self.posterior[arm].quantile(1. - 1. / self.t) for arm in range(self.nbArms)] indexes = expectations - np.max(upperbounds) # We computed the indexes, OK let's use them sortedRewards = np.sort(indexes) # XXX What happens here if two arms has the same index, being the max? chosenIndex = sortedRewards[-rank] # Uniform choice among the rank-th best arms return choice(np.nonzero(indexes == chosenIndex)[0])
2,125
791
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <fcntl.h> #include <sys/mman.h> #include <GL/gl.h> #include <zbuffer.h> #include <soso.h> #ifndef M_PI # define M_PI 3.14159265 #endif int __errno = 0; /* * Draw a gear wheel. You'll probably want to call this function when * building a display list since we do a lot of trig here. * * Input: inner_radius - radius of hole at center * outer_radius - radius at center of teeth * width - width of gear * teeth - number of teeth * tooth_depth - depth of tooth */ static void gear( GLfloat inner_radius, GLfloat outer_radius, GLfloat width, GLint teeth, GLfloat tooth_depth ) { GLint i; GLfloat r0, r1, r2; GLfloat angle, da; GLfloat u, v, len; r0 = inner_radius; r1 = outer_radius - tooth_depth/2.0; r2 = outer_radius + tooth_depth/2.0; da = 2.0*M_PI / teeth / 4.0; glShadeModel( GL_FLAT ); glNormal3f( 0.0, 0.0, 1.0 ); /* draw front face */ glBegin( GL_QUAD_STRIP ); for (i=0;i<=teeth;i++) { angle = i * 2.0*M_PI / teeth; glVertex3f( r0*cos(angle), r0*sin(angle), width*0.5 ); glVertex3f( r1*cos(angle), r1*sin(angle), width*0.5 ); glVertex3f( r0*cos(angle), r0*sin(angle), width*0.5 ); glVertex3f( r1*cos(angle+3*da), r1*sin(angle+3*da), width*0.5 ); } glEnd(); /* draw front sides of teeth */ glBegin( GL_QUADS ); da = 2.0*M_PI / teeth / 4.0; for (i=0;i<teeth;i++) { angle = i * 2.0*M_PI / teeth; glVertex3f( r1*cos(angle), r1*sin(angle), width*0.5 ); glVertex3f( r2*cos(angle+da), r2*sin(angle+da), width*0.5 ); glVertex3f( r2*cos(angle+2*da), r2*sin(angle+2*da), width*0.5 ); glVertex3f( r1*cos(angle+3*da), r1*sin(angle+3*da), width*0.5 ); } glEnd(); glNormal3f( 0.0, 0.0, -1.0 ); /* draw back face */ glBegin( GL_QUAD_STRIP ); for (i=0;i<=teeth;i++) { angle = i * 2.0*M_PI / teeth; glVertex3f( r1*cos(angle), r1*sin(angle), -width*0.5 ); glVertex3f( r0*cos(angle), r0*sin(angle), -width*0.5 ); glVertex3f( r1*cos(angle+3*da), r1*sin(angle+3*da), -width*0.5 ); glVertex3f( r0*cos(angle), r0*sin(angle), -width*0.5 ); } glEnd(); /* draw back sides of teeth */ glBegin( GL_QUADS ); da = 2.0*M_PI / teeth / 4.0; for (i=0;i<teeth;i++) { angle = i * 2.0*M_PI / teeth; glVertex3f( r1*cos(angle+3*da), r1*sin(angle+3*da), -width*0.5 ); glVertex3f( r2*cos(angle+2*da), r2*sin(angle+2*da), -width*0.5 ); glVertex3f( r2*cos(angle+da), r2*sin(angle+da), -width*0.5 ); glVertex3f( r1*cos(angle), r1*sin(angle), -width*0.5 ); } glEnd(); /* draw outward faces of teeth */ glBegin( GL_QUAD_STRIP ); for (i=0;i<teeth;i++) { angle = i * 2.0*M_PI / teeth; glVertex3f( r1*cos(angle), r1*sin(angle), width*0.5 ); glVertex3f( r1*cos(angle), r1*sin(angle), -width*0.5 ); u = r2*cos(angle+da) - r1*cos(angle); v = r2*sin(angle+da) - r1*sin(angle); len = sqrt( u*u + v*v ); u /= len; v /= len; glNormal3f( v, -u, 0.0 ); glVertex3f( r2*cos(angle+da), r2*sin(angle+da), width*0.5 ); glVertex3f( r2*cos(angle+da), r2*sin(angle+da), -width*0.5 ); glNormal3f( cos(angle), sin(angle), 0.0 ); glVertex3f( r2*cos(angle+2*da), r2*sin(angle+2*da), width*0.5 ); glVertex3f( r2*cos(angle+2*da), r2*sin(angle+2*da), -width*0.5 ); u = r1*cos(angle+3*da) - r2*cos(angle+2*da); v = r1*sin(angle+3*da) - r2*sin(angle+2*da); glNormal3f( v, -u, 0.0 ); glVertex3f( r1*cos(angle+3*da), r1*sin(angle+3*da), width*0.5 ); glVertex3f( r1*cos(angle+3*da), r1*sin(angle+3*da), -width*0.5 ); glNormal3f( cos(angle), sin(angle), 0.0 ); } glVertex3f( r1*cos(0), r1*sin(0), width*0.5 ); glVertex3f( r1*cos(0), r1*sin(0), -width*0.5 ); glEnd(); glShadeModel( GL_SMOOTH ); /* draw inside radius cylinder */ glBegin( GL_QUAD_STRIP ); for (i=0;i<=teeth;i++) { angle = i * 2.0*M_PI / teeth; glNormal3f( -cos(angle), -sin(angle), 0.0 ); glVertex3f( r0*cos(angle), r0*sin(angle), -width*0.5 ); glVertex3f( r0*cos(angle), r0*sin(angle), width*0.5 ); } glEnd(); } static GLfloat view_rotx=20.0, view_roty=30.0; static GLint gear1, gear2, gear3; static GLfloat angle = 0.0; void draw() { angle += 2.0; glPushMatrix(); glRotatef( view_rotx, 1.0, 0.0, 0.0 ); glRotatef( view_roty, 0.0, 1.0, 0.0 ); //glRotatef( view_rotz, 0.0, 0.0, 1.0 ); glPushMatrix(); glTranslatef( -3.0, -2.0, 0.0 ); glRotatef( angle, 0.0, 0.0, 1.0 ); glCallList(gear1); glPopMatrix(); glPushMatrix(); glTranslatef( 3.1, -2.0, 0.0 ); glRotatef( -2.0*angle-9.0, 0.0, 0.0, 1.0 ); glCallList(gear2); glPopMatrix(); glPushMatrix(); glTranslatef( -3.1, 4.2, 0.0 ); glRotatef( -2.0*angle-25.0, 0.0, 0.0, 1.0 ); glCallList(gear3); glPopMatrix(); glPopMatrix(); } void initScene() { static GLfloat pos[4] = {5.0, 5.0, 10.0, 0.0 }; static GLfloat red[4] = {0.8, 0.1, 0.0, 1.0 }; static GLfloat green[4] = {0.0, 0.8, 0.2, 1.0 }; static GLfloat blue[4] = {0.2, 0.2, 1.0, 1.0 }; glLightfv( GL_LIGHT0, GL_POSITION, pos ); glEnable( GL_CULL_FACE ); glEnable( GL_LIGHTING ); glEnable( GL_LIGHT0 ); glEnable( GL_DEPTH_TEST ); /* make the gears */ gear1 = glGenLists(1); glNewList(gear1, GL_COMPILE); glMaterialfv( GL_FRONT, GL_AMBIENT_AND_DIFFUSE, red ); gear( 1.0, 4.0, 1.0, 20, 0.7 ); glEndList(); gear2 = glGenLists(1); glNewList(gear2, GL_COMPILE); glMaterialfv( GL_FRONT, GL_AMBIENT_AND_DIFFUSE, green ); gear( 0.5, 2.0, 2.0, 10, 0.7 ); glEndList(); gear3 = glGenLists(1); glNewList(gear3, GL_COMPILE); glMaterialfv( GL_FRONT, GL_AMBIENT_AND_DIFFUSE, blue ); gear( 1.3, 2.0, 0.5, 10, 0.7 ); glEndList(); glEnable( GL_NORMALIZE ); } int main(int argc, char** argv) { int winSizeX = 640; int winSizeY = 480; int screenWidth = 1024; int pitch = screenWidth * 4; int mode = ZB_MODE_RGBA; ZBuffer *zBuffer = ZB_open( winSizeX, winSizeY, mode, 0, 0, 0, 0); glInit( zBuffer ); glClearColor (0.0, 0.0, 0.0, 0.0); glViewport (0, 0, winSizeX, winSizeY); glEnable(GL_DEPTH_TEST); GLfloat h = (GLfloat) winSizeY / (GLfloat) winSizeX; glMatrixMode(GL_PROJECTION); glLoadIdentity(); glFrustum( -1.0, 1.0, -h, h, 5.0, 60.0 ); glMatrixMode(GL_MODELVIEW); glLoadIdentity(); glTranslatef( 0.0, 0.0, -45.0 ); initScene(); int fd = open("/dev/fb0", 0); if (fd >= 0) { int* buffer = mmap(NULL, 1024*768*4, 0, 0, fd, 0); if (buffer != (int*)-1) { unsigned int previousTime = get_uptime_ms(); unsigned int frameCounter = 0; while (1) { glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT ); draw(); ZB_copyFrameBuffer(zBuffer, buffer, pitch); ++frameCounter; unsigned int time = get_uptime_ms(); unsigned int diff = time - previousTime; if (diff >= 1000) { printf("%d frames in %d milliseconds\n", frameCounter, diff); previousTime = time; frameCounter = 0; } } } else { printf("mmap failed\n"); } } else { printf("could not open /dev/fb0\n"); } ZB_close(zBuffer); return 0; }
4,175
2,023
<reponame>tdiprima/code<filename>recipes/Python/577283_Decorator_expose_local_variables_functiafter/recipe-577283.py import new import byteplay as bp import inspect def persistent_locals(f): """Function decorator to expose local variables after execution. Modify the function such that, at the exit of the function (regular exit or exceptions), the local dictionary is copied to a read-only function property 'locals'. This decorator wraps the function in a callable object, and modifies its bytecode by adding an external try...finally statement equivalent to the following: def f(self, *args, **kwargs): try: ... old code ... finally: self._locals = locals().copy() del self._locals['self'] """ # ### disassemble f f_code = bp.Code.from_code(f.func_code) # ### use bytecode injection to add try...finally statement around code finally_label = bp.Label() # try: code_before = (bp.SETUP_FINALLY, finally_label) # [original code here] # finally: code_after = [(finally_label, None), # self._locals = locals().copy() (bp.LOAD_GLOBAL, 'locals'), (bp.CALL_FUNCTION, 0), (bp.LOAD_ATTR, 'copy'), (bp.CALL_FUNCTION, 0), (bp.LOAD_FAST, 'self'), (bp.STORE_ATTR, '_locals'), # del self._locals['self'] (bp.LOAD_FAST, 'self'), (bp.LOAD_ATTR, '_locals'), (bp.LOAD_CONST, 'self'), (bp.DELETE_SUBSCR, None), (bp.END_FINALLY, None), (bp.LOAD_CONST, None), (bp.RETURN_VALUE, None)] f_code.code.insert(0, code_before) f_code.code.extend(code_after) # ### re-assemble f_code.args = ('self',) + f_code.args func = new.function(f_code.to_code(), f.func_globals, f.func_name, f.func_defaults, f.func_closure) return PersistentLocalsFunction(func) _docpostfix = """ This function has been decorated with the 'persistent_locals' decorator. You can access the dictionary of the variables in the inner scope of the function via the 'locals' attribute. For more information about the original function, query the self._func attribute. """ class PersistentLocalsFunction(object): """Wrapper class for the 'persistent_locals' decorator. Refer to the docstring of instances for help about the wrapped function. """ def __init__(self, func): self._locals = {} # make function an instance method self._func = new.instancemethod(func, self, PersistentLocalsFunction) # create nice-looking doc string for the class signature = inspect.getargspec(func) signature[0].pop(0) # remove 'self' argument signature = inspect.formatargspec(*signature) docprefix = func.func_name + signature default_doc = '<no docstring>' self.__doc__ = (docprefix + '\n\n' + (func.__doc__ or default_doc) + _docpostfix) def __call__(self, *args, **kwargs): return self._func(*args, **kwargs) @property def locals(self): return self._locals
1,535
2,996
<reponame>opl-/Terasology<gh_stars>1000+ // Copyright 2021 The Terasology Foundation // SPDX-License-Identifier: Apache-2.0 package org.terasology.engine.logic.players.event; import org.terasology.engine.entitySystem.event.Event; import org.terasology.engine.network.ServerEvent; @ServerEvent public class RespawnRequestEvent implements Event { }
111
722
//MIT License // //Copyright (c) 2017 <NAME> // //Permission is hereby granted, free of charge, to any person obtaining a copy //of this software and associated documentation files (the "Software"), to deal //in the Software without restriction, including without limitation the rights //to use, copy, modify, merge, publish, distribute, sublicense, and/or sell //copies of the Software, and to permit persons to whom the Software is //furnished to do so, subject to the following conditions: // //The above copyright notice and this permission notice shall be included in all //copies or substantial portions of the Software. // //THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR //IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, //FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE //AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER //LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, //OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE //SOFTWARE. #include <bitsery/ext/inheritance.h> #include <gmock/gmock.h> #include "serialization_test_utils.h" using bitsery::ext::BaseClass; using bitsery::ext::VirtualBaseClass; using SerContext = BasicSerializationContext<bitsery::ext::InheritanceContext>; using testing::Eq; /* * base class */ struct Base { uint8_t x{}; virtual ~Base() = default; }; template <typename S> void serialize(S& s, Base& o) { s.value1b(o.x); } /* * non virtual inheritance from base */ struct Derive1NonVirtually:Base { uint8_t y1{}; }; template <typename S> void serialize(S& s, Derive1NonVirtually& o) { s.ext(o, BaseClass<Base>{}); s.value1b(o.y1); } struct Derive2NonVirtually:Base { uint8_t y2{}; }; template <typename S> void serialize(S& s, Derive2NonVirtually& o) { //use lambda to serialize base s.ext(o, BaseClass<Base>{}, [](S& s, Base& b) { s.object(b); }); s.value1b(o.y2); } struct MultipleInheritanceNonVirtualBase: Derive1NonVirtually, Derive2NonVirtually { uint8_t z{}; }; template <typename S> void serialize(S& s, MultipleInheritanceNonVirtualBase& o) { s.ext(o, BaseClass<Derive1NonVirtually>{}); s.ext(o, BaseClass<Derive2NonVirtually>{}); s.value1b(o.z); } /* * virtual inheritance from base */ struct Derive1Virtually:virtual Base { uint8_t y1{}; }; template <typename S> void serialize(S& s, Derive1Virtually& o) { s.ext(o, VirtualBaseClass<Base>{}); s.value1b(o.y1); } struct Derive2Virtually:virtual Base { uint8_t y2{}; }; template <typename S> void serialize(S& s, Derive2Virtually& o) { s.ext(o, VirtualBaseClass<Base>{}); s.value1b(o.y2); } struct MultipleInheritanceVirtualBase: Derive1Virtually, Derive2Virtually { uint8_t z{}; MultipleInheritanceVirtualBase() = default; MultipleInheritanceVirtualBase(uint8_t x_, uint8_t y1_, uint8_t y2_, uint8_t z_) { x = x_; y1 = y1_; y2 = y2_; z = z_; } template <typename S> void serialize(S& s) { s.ext(*this, BaseClass<Derive1Virtually>{}); s.ext(*this, BaseClass<Derive2Virtually>{}); s.value1b(z); } }; bool operator == (const MultipleInheritanceVirtualBase& lhs, const MultipleInheritanceVirtualBase& rhs) { return std::tie(lhs.x, lhs.y1, lhs.y2, lhs.z) == std::tie(rhs.x, rhs.y1, rhs.y2, rhs.z); } TEST(SerializeExtensionInheritance, BaseClass) { Derive1NonVirtually d1{}; d1.x = 187; d1.y1 = 74; Derive1NonVirtually rd1{}; SerContext ctx{}; bitsery::ext::InheritanceContext inherCtxSer{}; bitsery::ext::InheritanceContext inherCtxDes{}; ctx.createSerializer(inherCtxSer).object(d1); ctx.createDeserializer(inherCtxDes).object(rd1); EXPECT_THAT(rd1.x, Eq(d1.x)); EXPECT_THAT(rd1.y1, Eq(d1.y1)); EXPECT_THAT(ctx.getBufferSize(), Eq(2)); } TEST(SerializeExtensionInheritance, VirtualBaseClass) { Derive1Virtually d1{}; d1.x = 15; d1.y1 = 87; Derive1Virtually rd1{}; SerContext ctx{}; bitsery::ext::InheritanceContext inherCtxSer{}; bitsery::ext::InheritanceContext inherCtxDes{}; ctx.createSerializer(inherCtxSer).object(d1); ctx.createDeserializer(inherCtxDes).object(rd1); EXPECT_THAT(rd1.x, Eq(d1.x)); EXPECT_THAT(rd1.y1, Eq(d1.y1)); EXPECT_THAT(ctx.getBufferSize(), Eq(2)); } TEST(SerializeExtensionInheritance, MultipleBasesWithoutVirtualInheritance) { MultipleInheritanceNonVirtualBase md{}; //x is ambiguous because we don't derive virtually static_cast<Derive1NonVirtually&>(md).x = 1; static_cast<Derive2NonVirtually&>(md).x = 2; md.y1 = 4; md.z = 5; md.y2 = 6; MultipleInheritanceNonVirtualBase res{}; SerContext ctx{}; bitsery::ext::InheritanceContext inherCtxSer{}; bitsery::ext::InheritanceContext inherCtxDes{}; ctx.createSerializer(inherCtxSer).object(md); ctx.createDeserializer(inherCtxDes).object(res); EXPECT_THAT(static_cast<Derive1NonVirtually&>(res).x, Eq(static_cast<Derive1NonVirtually&>(md).x)); EXPECT_THAT(static_cast<Derive2NonVirtually&>(res).x, Eq(static_cast<Derive2NonVirtually&>(md).x)); EXPECT_THAT(res.y1, Eq(md.y1)); EXPECT_THAT(res.y2, Eq(md.y2)); EXPECT_THAT(res.z, Eq(md.z)); EXPECT_THAT(ctx.getBufferSize(), Eq(5)); //5 because two bases } TEST(SerializeExtensionInheritance, WhenNoVirtualInheritanceExistsThenInheritanceContextIsNotRequired) { MultipleInheritanceNonVirtualBase md{}; //x is ambiguous because we don't derive virtually static_cast<Derive1NonVirtually&>(md).x = 1; static_cast<Derive2NonVirtually&>(md).x = 2; md.y1 = 4; md.z = 5; md.y2 = 6; MultipleInheritanceNonVirtualBase res{}; //without InheritanceContext SerializationContext ctx{}; ctx.createSerializer().object(md); ctx.createDeserializer().object(res); EXPECT_THAT(static_cast<Derive1NonVirtually&>(res).x, Eq(static_cast<Derive1NonVirtually&>(md).x)); EXPECT_THAT(static_cast<Derive2NonVirtually&>(res).x, Eq(static_cast<Derive2NonVirtually&>(md).x)); EXPECT_THAT(res.y1, Eq(md.y1)); EXPECT_THAT(res.y2, Eq(md.y2)); EXPECT_THAT(res.z, Eq(md.z)); EXPECT_THAT(ctx.getBufferSize(), Eq(5)); //5 because two bases } TEST(SerializeExtensionInheritance, MultipleBasesWithVirtualInheritance) { MultipleInheritanceVirtualBase md{3,7,5,15}; MultipleInheritanceVirtualBase res{}; SerContext ctx{}; bitsery::ext::InheritanceContext inherCtxSer{}; bitsery::ext::InheritanceContext inherCtxDes{}; ctx.createSerializer(inherCtxSer).object(md); ctx.createDeserializer(inherCtxDes).object(res); EXPECT_THAT(res, Eq(md)); EXPECT_THAT(ctx.getBufferSize(), Eq(4)); //4 because virtual base } TEST(SerializeExtensionInheritance, MultipleBasesWithVirtualInheritanceMultipleObjects) { std::vector<MultipleInheritanceVirtualBase> data; data.emplace_back(4,8,7,9); data.emplace_back(1,2,3,4); data.emplace_back(8,7,15,97); data.emplace_back(54,132,45,84); data.emplace_back(27,85,41,2); std::vector<MultipleInheritanceVirtualBase> res{}; SerContext ctx{}; bitsery::ext::InheritanceContext inherCtxSer{}; bitsery::ext::InheritanceContext inherCtxDes{}; ctx.createSerializer(inherCtxSer).container(data, 10); ctx.createDeserializer(inherCtxDes).container(res, 10); EXPECT_THAT(res, ::testing::ContainerEq(data)); EXPECT_THAT(ctx.getBufferSize(), Eq(1 + 4 * data.size())); //1 container size + 4 because virtual base * elements } // class BasePrivateSerialize { public: explicit BasePrivateSerialize(uint8_t v):_v{v} {} uint8_t getX() const { return _v; } private: uint8_t _v; friend bitsery::Access; template <typename S> void serialize(S& s) { s.value1b(_v); } }; class DerivedPrivateBase: public BasePrivateSerialize { public: explicit DerivedPrivateBase(uint8_t v) : BasePrivateSerialize(v) {} uint8_t z{}; }; template <typename S> void serialize(S& s, DerivedPrivateBase& o) { //use lambda for base serialization s.ext(o, BaseClass<BasePrivateSerialize>{}, [](S& s, BasePrivateSerialize& b) { s.object(b); }); s.value1b(o.z); } struct BaseNonMemberSerialize { uint8_t x{}; }; template <typename S> void serialize(S& s, BaseNonMemberSerialize& o) { s.value1b(o.x); } struct DerivedMemberSerialize: public BaseNonMemberSerialize { uint8_t z{}; template <typename S> void serialize(S& s) { s.ext(*this, BaseClass<BaseNonMemberSerialize>{}); s.value1b(z); } }; //explicitly select serialize functions, for types that has ambiguous serialize functions namespace bitsery { template <> struct SelectSerializeFnc<DerivedPrivateBase>:UseNonMemberFnc {}; template <> struct SelectSerializeFnc<DerivedMemberSerialize>:UseMemberFnc {}; } TEST(SerializeExtensionInheritance, WhenDerivedClassHasAmbiguousSerializeFunctionThenExplicitlySelectSpecialization) { DerivedPrivateBase data1{43}; data1.z = 87; DerivedMemberSerialize data2{}; data2.x = 71; data2.z = 22; DerivedPrivateBase res1{0}; DerivedMemberSerialize res2{}; SerContext ctx{}; bitsery::ext::InheritanceContext inherCtxSer{}; bitsery::ext::InheritanceContext inherCtxDes{}; ctx.createSerializer(inherCtxSer).object(data1); ctx.createSerializer(inherCtxSer).object(data2); ctx.createDeserializer(inherCtxDes).object(res1); ctx.createDeserializer(inherCtxDes).object(res2); EXPECT_THAT(res1.getX(), Eq(data1.getX())); EXPECT_THAT(res1.z, Eq(data1.z)); EXPECT_THAT(res2.x, Eq(data2.x)); EXPECT_THAT(res2.z, Eq(data2.z)); } struct AbstractBase { uint8_t x{}; virtual void exec() = 0; virtual ~AbstractBase() = default; template <typename S> void serialize(S& s) { s.value1b(x); } }; struct ImplementedBase:AbstractBase { uint8_t y{}; void exec() override {} template <typename S> void serialize(S& s) { s.ext(*this, BaseClass<AbstractBase>{}); s.value1b(y); } }; TEST(SerializeExtensionInheritance, CanSerializeAbstractClass) { ImplementedBase data{}; data.x = 4; data.y = 2; data.exec(); ImplementedBase res{}; SerContext ctx{}; bitsery::ext::InheritanceContext inherCtxSer{}; bitsery::ext::InheritanceContext inherCtxDes{}; ctx.createSerializer(inherCtxSer).object(data); ctx.createDeserializer(inherCtxDes).object(res); EXPECT_THAT(res.x, Eq(data.x)); EXPECT_THAT(res.y, Eq(data.y)); }
4,429
314
<reponame>kolinkrewinkel/Multiplex // // Generated by class-dump 3.5 (64 bit). // // class-dump is Copyright (C) 1997-1998, 2000-2001, 2004-2013 by <NAME>. // #import "CDStructures.h" #import "DVTDocumentLocation-Protocol.h" @interface DVTDocumentLocation (IDESourceControlDocumentLocationAdditions) + (id)fileDataTypeForFileDataType:(id)arg1; - (id)exportDocumentUsingTemplateDocument:(id)arg1 completionBlock:(dispatch_block_t)arg2 primaryBehavior:(BOOL)arg3; - (id)exportDocumentUsingTemplateDocument:(id)arg1 fromWorkspace:(id)arg2 completionBlock:(dispatch_block_t)arg3 primaryBehavior:(BOOL)arg4; @end
215
332
/* * Copyright 2013 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.springframework.xd.analytics.metrics.core; import org.springframework.data.annotation.PersistenceConstructor; import org.springframework.util.Assert; /** * Represents the data stored in a valueer of a single value. Operations on it are expected to increment or decrement * the value. The name property is a friendly user assigned name, and should be unique. * * Note: Additional metadata to help in searching for Counters, such as tags and last time updated will be coming. * * @author <NAME> * */ public class Counter implements Metric { private final String name; private long value; /** * Construct a new Counter given a name * * @param name the name of the Counter. */ public Counter(String name) { Assert.notNull(name); this.name = name; this.value = 0L; } /** * Construct a new Counter given a name and a initial value of the value * * @param name the name of the value * @param value initial value. */ @PersistenceConstructor public Counter(String name, long value) { Assert.notNull(name); this.name = name; this.value = value; } /** * @return the value */ public long getValue() { return value; } /** * Increment this counter by a given amount. Stores that manage their own value bookkeepingmay not use this method. */ public long increment(long amount) { return value += amount; } /** * Decrement this counter by a given amount. Stores that manage their own value bookkeepingmay not use this method. */ public long decrement(long amount) { return value -= amount; } /** * @return the name */ @Override public String getName() { return name; } @Override public final int hashCode() { return name.hashCode(); } @Override public final boolean equals(Object o) { if (this == o) return true; if (!(o instanceof Counter)) { return false; } Counter counter = (Counter) o; if (!name.equals(counter.name)) return false; return true; } @Override public String toString() { return "Counter [name=" + name + ", value=" + value + "]"; } }
820
678
/** * This header is generated by class-dump-z 0.2b. * * Source: /System/Library/PrivateFrameworks/OfficeImport.framework/OfficeImport */ #import <OfficeImport/OfficeImport-Structs.h> #import <OfficeImport/MFPath.h> @class NSBezierPathStub; __attribute__((visibility("hidden"))) @interface MFCocoaPath : MFPath { @private NSBezierPathStub *m_path; // 4 = 0x4 int m_state; // 8 = 0x8 } @property(readonly, assign) int state; // G=0x2c750d; converted property - (id)init; // 0x96d69 - (id)initWithPath:(id)path state:(int)state; // 0x9cae5 - (void)dealloc; // 0x91e5d - (id)copyWithZone:(NSZone *)zone; // 0x9c87d // converted property getter: - (int)state; // 0x2c750d - (BOOL)isOpen; // 0x9ae35 - (int)begin; // 0x15d979 - (int)end; // 0x15e2a9 - (int)abort; // 0x2c7569 - (CGPoint)currentPoint; // 0x15e205 - (int)closeFigure; // 0x15e1c5 - (int)flatten; // 0x2c7521 - (int)widen:(id)widen; // 0x2c751d - (int)stroke:(id)stroke; // 0x1d6545 - (int)fill:(id)fill; // 0x15e395 - (id)getBezierPath; // 0x1c1705 - (void)appendBezierPath:(id)path dc:(id)dc; // 0x15de29 @end
491
317
// // refract/dsd/Utils.h // librefract // // Created by <NAME> on 07/11/2017 // Copyright (c) 2017 Apiary Inc. All rights reserved. // #ifndef REFRACT_DSD_UTILS_H #define REFRACT_DSD_UTILS_H #include <memory> namespace refract { namespace utils { template <typename Container> void move_back(Container&) noexcept { // noop } template <typename Container, typename Arg, typename... Args> void move_back(Container& c, Arg&& arg, Args&&... args) { c.emplace_back(std::forward<Arg>(arg)); move_back(c, std::forward<Args>(args)...); } } } #endif
314
1,253
<reponame>avi-pal/al-go-rithms #include<iostream> #include<stack> using namespace std; bool isbalanced(char* a){ stack<char> s; for(int i=0;a[i]!='\0';i++){ char ch=a[i]; switch(ch){ case '(': case '{': case '[': s.push(ch); break; case ')': if(!s.empty() && s.top()=='('){ s.pop(); break; } else{ return false; } case '}': if(!s.empty() && s.top()=='{'){ s.pop(); break; } else{ return false; } case ']': if(!s.empty() && s.top()=='['){ s.pop(); break; } else{ return false; } } } return s.empty(); } int main() { stack<int>s; char a[]="{a+[b+(c+d)]+(e+f)}"; if(isbalanced(a)){ cout<<"Balanced."<<endl; } else{ cout<<"Not balanced."<<endl; } return 0; }
550
485
<reponame>bookdash/BookdashAndroidApp<filename>app/src/main/java/org/bookdash/android/presentation/main/MainActivity.java package org.bookdash.android.presentation.main; import android.content.ActivityNotFoundException; import android.content.Intent; import android.net.Uri; import android.os.Bundle; import android.text.Html; import android.text.method.LinkMovementMethod; import android.view.MenuItem; import android.view.View; import android.widget.TextView; import androidx.appcompat.app.ActionBar; import androidx.appcompat.app.AlertDialog; import androidx.appcompat.widget.Toolbar; import androidx.core.view.GravityCompat; import androidx.drawerlayout.widget.DrawerLayout; import androidx.fragment.app.Fragment; import androidx.fragment.app.FragmentManager; import androidx.fragment.app.FragmentTransaction; import com.google.android.material.navigation.NavigationView; import com.google.android.material.snackbar.Snackbar; import org.bookdash.android.BuildConfig; import org.bookdash.android.Injection; import org.bookdash.android.R; import org.bookdash.android.presentation.about.AboutFragment; import org.bookdash.android.presentation.activity.BaseAppCompatActivity; import org.bookdash.android.presentation.downloads.DownloadsFragment; import org.bookdash.android.presentation.listbooks.ListBooksFragment; import org.bookdash.android.presentation.settings.SettingsFragment; public class MainActivity extends BaseAppCompatActivity implements MainContract.MainView, NavDrawerInterface { private static final int INVITE_REQUEST_CODE = 1; private static final String TAG = "MainActivity"; private static final String GOOGLE_PLAY_STORE_URL = "http://play.google.com/store/apps/details?id="; private static final String GOOGLE_PLAY_MARKET_URL = "market://details?id="; private DrawerLayout drawerLayout; private NavigationView navigationView; private MainContract.MainUserActions mainPresenter; @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_main); drawerLayout = (DrawerLayout) findViewById(R.id.drawer_layout); navigationView = (NavigationView) findViewById(R.id.navigation_view); mainPresenter = new MainPresenter(this, Injection.provideAnalytics()); final ActionBar actionBar = getSupportActionBar(); if (actionBar != null) { actionBar.setHomeAsUpIndicator(R.drawable.ic_menu_24dp); actionBar.setDisplayHomeAsUpEnabled(true); } setUpNavDrawer(); showAllBooks(); } private void setUpNavDrawer() { navigationView.setCheckedItem(R.id.action_all_books); navigationView.setNavigationItemSelectedListener(new NavigationView.OnNavigationItemSelectedListener() { @Override public boolean onNavigationItemSelected(MenuItem menuItem) { switch (menuItem.getItemId()) { case R.id.action_all_books: { mainPresenter.clickViewAllBooks(); break; } case R.id.action_downloads: mainPresenter.clickViewDownloadBooks(); break; case R.id.action_about: showAboutPage(); break; case R.id.action_settings: { mainPresenter.clickShowSettings(); break; } case R.id.action_thanks: { showThanksPopover(); break; } case R.id.action_invite_friends: { mainPresenter.clickInvitePage(); break; } case R.id.action_rate_app: { mainPresenter.clickRateApp(); break; } default: } drawerLayout.closeDrawers(); if (menuItem.getItemId() == R.id.action_thanks || menuItem .getItemId() == R.id.action_invite_friends || menuItem.getItemId() == R.id.action_rate_app) { return false; } else { return true; } } }); } private void showAllBooks() { mainPresenter.clickViewAllBooks(); } public void showSettingsScreen() { FragmentManager fragmentManager = getSupportFragmentManager(); FragmentTransaction ft = fragmentManager.beginTransaction(); Fragment settingsFragment = new SettingsFragment(); ft.replace(R.id.fragment_content, settingsFragment, "SETTINGS"); ft.commit(); } @Override public void showThanksPopover() { AlertDialog.Builder thanksDialog = new AlertDialog.Builder(this); thanksDialog.setTitle(getString(R.string.contributions_to_app)); thanksDialog.setMessage(Html.fromHtml(getString(R.string.list_of_contributors))); thanksDialog.setPositiveButton(android.R.string.ok, null); AlertDialog ad = thanksDialog.show(); ((TextView) ad.findViewById(android.R.id.message)).setMovementMethod(LinkMovementMethod.getInstance()); } @Override public void showAboutPage() { FragmentManager fragmentManager = getSupportFragmentManager(); FragmentTransaction ft = fragmentManager.beginTransaction(); Fragment f = AboutFragment.newInstance(); ft.replace(R.id.fragment_content, f, "ABOUT"); ft.commit(); } @Override public void showRatingPlayStore() { Uri uri = Uri.parse(GOOGLE_PLAY_MARKET_URL + BuildConfig.APPLICATION_ID); Intent goToMarket = new Intent(Intent.ACTION_VIEW, uri); goToMarket.addFlags(Intent.FLAG_ACTIVITY_NO_HISTORY | Intent.FLAG_ACTIVITY_MULTIPLE_TASK); try { startActivity(goToMarket); } catch (ActivityNotFoundException e) { try { startActivity( new Intent(Intent.ACTION_VIEW, Uri.parse(GOOGLE_PLAY_STORE_URL + BuildConfig.APPLICATION_ID))); } catch (ActivityNotFoundException anfe) { Snackbar.make(navigationView, R.string.error_opening_app_rating, Snackbar.LENGTH_LONG); } } } @Override public void showAllBooksPage() { FragmentManager fragmentManager = getSupportFragmentManager(); FragmentTransaction ft = fragmentManager.beginTransaction(); Fragment f = ListBooksFragment.newInstance(); ft.replace(R.id.fragment_content, f, "ALLBOOKS"); ft.commit(); } @Override public void showDownloadedBooksPage() { FragmentManager fragmentManager = getSupportFragmentManager(); FragmentTransaction ft = fragmentManager.beginTransaction(); Fragment f = DownloadsFragment.newInstance(); ft.replace(R.id.fragment_content, f, "DOWNLOADED_BOOKS"); ft.commit(); } @Override public void inviteFriends() { try { Intent sendIntent = new Intent(); sendIntent.setAction(Intent.ACTION_SEND); sendIntent.putExtra(Intent.EXTRA_TEXT, getString(R.string.invitation_message)); sendIntent.putExtra(Intent.EXTRA_SUBJECT, getString(R.string.invitation_subject)); sendIntent.setType("text/plain"); startActivity(Intent.createChooser(sendIntent, getResources().getText(R.string.invite_using))); } catch (ActivityNotFoundException ac) { Snackbar.make(navigationView, R.string.invite_error_no_apps_found, Snackbar.LENGTH_LONG) .show(); } } @Override public String getScreenName() { return "MainActivity"; } @Override public void openNavDrawer() { drawerLayout.openDrawer(navigationView); } @Override public void closeNavDrawer() { drawerLayout.closeDrawer(navigationView); } @Override public void setToolbar(Toolbar toolbar) { setSupportActionBar(toolbar); toolbar.setNavigationOnClickListener(new View.OnClickListener() { @Override public void onClick(View v) { drawerLayout.openDrawer(GravityCompat.START); } }); } }
3,683
679
<reponame>Grosskopf/openoffice /************************************************************** * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. * *************************************************************/ /* * Created on 2005 * by <NAME> */ package com.sun.star.tooling.DirtyTags; import java.io.IOException; import java.util.Map; import com.sun.star.tooling.converter.ExtMap; /** * @author <NAME> 2005 * */ public class Tag { private static int indent=0; Map tagNames; private String tagType; private String tagName; private String tagString; public static Tag EMPTYTAG=new Tag("","",""); /** * Create a new Instance of Tag * * @param tagType * @param tagName * @param tagString */ public Tag(String tagType, String tagName, String tagString) { this.tagType=tagType; this.tagName=tagName; this.tagString=tagString; tagNames=new ExtMap(); tagNames.put("link","name"); tagNames.put("caption","xml-lang"); tagNames.put("alt","xml-lang"); } public String getWrappedTagString() throws IOException{ if(this.canHaveTranslateableContent()){ return this.wrapTagStringIntern(); }else{ return xmlString(this.tagString); } } private final String xmlString( final String string) throws java.io.IOException { if (string == null) return string; // "" String str = string; for(int i=0;i<str.length();i++){ if(str.charAt(i)=='&'){ str=str.substring(0, i)+"&amp;"+str.substring(i+1); continue; } if(str.charAt(i)=='<'){ str=str.substring(0, i)+"&lt;"+str.substring(i+1); continue; } if(str.charAt(i)=='>'){ str=str.substring(0, i)+"&gt;"+str.substring(i+1); continue; } if(str.charAt(i)=='"'){ str=str.substring(0, i)+"&quot;"+str.substring(i+1); continue; } if(str.charAt(i)=='\''){ str=str.substring(0, i)+"&apos;"+str.substring(i+1); continue; } } return str; } /** * @return */ private boolean canHaveTranslateableContent() { return (tagNames.containsKey(this.tagName)); } /** * @throws IOException * */ private String wrapTagStringIntern() throws IOException { String[] split=this.tagString.split("="); int length=split.length; // no attribute found; if (length==0) return xmlString(tagString); else{ int i=0; while(i<length-1/*the last part can only contain an attribute value*/){ String attributeName = split[i].trim(); if(split[i]. indexOf("</sub>")<0) split[i]=xmlString(split[i]); i++; String value; attributeName=(attributeName.substring(attributeName.lastIndexOf(" ")).trim()); if((value=translateableAttributeValue(this.tagName)).equals(attributeName)){ int valueStart=0; int valueEnd=0; // get the value to the found attribute name // it must either be surrounded by '"'... if((valueStart=split[i].indexOf('"'))>=0){ valueEnd = split[i].lastIndexOf('"'); //...or surrounded by "'" }else if((valueStart=split[i].indexOf("'"))>=0){ valueEnd = split[i].lastIndexOf("'"); }else{ // there seems to be an error, // we found an '=' (we split there) but no '"' or ''' // but although we don't check the syntax // we just continue continue; } //ok we found the border of a value that might be translated //now we wrap it with the tags split[i]=xmlString(split[i].substring(0,valueStart+1))+"<sub>"+xmlString(split[i].substring(valueStart+1,valueEnd))+"</sub>"+xmlString(split[i].substring(valueEnd)); } } String wrappedString=""; // we have the wrapped parts, now we put them together int j=0; for(j=0;j<split.length-1;j++){ wrappedString+=(split[j]+"="); } wrappedString+=split[j]; // System.out.println(this.tagString); // System.out.println(wrappedString); return wrappedString; } } /** * @param tagName the name of the tag to check * @return the name of the attribute that can contain translateable value */ private String translateableAttributeValue(String tagName) { return (String)this.tagNames.get(tagName); } /** * Create a new Instance of Tag * * */ public Tag(String tagString) { this(extractTagType(extractTagName(tagString)),extractTagName(tagString),tagString); } private static String extractTagName(String tagString){ int start=tagString.indexOf('<')+1; int end=tagString.lastIndexOf('\\'); if(start>=0&&end>0){ tagString=tagString.substring(start,end); if(tagString.indexOf(" ")>0){ tagString=tagString.substring(0,tagString.indexOf(" ")); } return tagString; }else{ return ""; } } private static String extractTagType(String tagName){ if(tagName.equals("")){ return "Text"; }else if(tagName.startsWith("/")){ return "EndTag"; }else if(tagName.endsWith("/")){ return "StartAndEndTag"; }else { return "StartTag"; } } /** * @return Returns the tagName. */ public String getTagName() { return this.tagName; } /** * @return Returns the tagString. */ public String getTagString() { return this.tagString; } /** * @return Returns the tagType. */ public String getTagType() { return this.tagType; } }
3,519
882
<reponame>gigliovale/h2o import unittest, random, sys, time sys.path.extend(['.','..','../..','py']) import h2o, h2o_cmd, h2o_rf, h2o_import as h2i, h2o_util paramDict = { # 2 new 'destination_key': ['model_keyA', '012345', '__hello'], 'cols': [None, None, None, None, None, '0,1,2,3,4,5,6,7,8','C1,C2,C3,C4,C5,C6,C7,C8'], # exclusion handled below, otherwise exception: # ...Arguments 'cols', 'ignored_cols_by_name', and 'ignored_cols' are exclusive 'ignored_cols_by_name': [None, None, None, None, 'C1','C2','C3','C4','C5','C6','C7','C8','C9'], # probably can't deal with mixtures of cols and ignore, so just use cols for now # could handle exclusion below # 'ignored_cols': [None, None, None, None, None, '0,1,2,3,4,5,6,7,8','C1,C2,C3,C4,C5,C6,C7,C8'], 'n_folds': [None, 2, 5], # has to be >= 2? 'keep_cross_validation_splits': [None, 0, 1], # 'classification': [None, 0, 1], # doesn't support regression yet 'classification': [None, 1], 'balance_classes': [None, 0, 1], # never run with unconstrained balance_classes size if random sets balance_classes..too slow 'max_after_balance_size': [.1, 1, 2], 'oobee': [None, 0, 1], 'sampling_strategy': [None, 'RANDOM'], 'select_stat_type': [None, 'ENTROPY', 'GINI'], 'response': [54, 'C55'], # equivalent. None is not legal 'validation': [None, 'covtype.data.hex'], 'ntrees': [1], # just do one tree 'importance': [None, 0, 1], 'max_depth': [None, 1,10,20,100], 'nbins': [None,5,10,100,1000], 'sample_rate': [None,0.20,0.40,0.60,0.80,0.90], 'seed': [None,'0','1','11111','19823134','1231231'], # Can't have more mtries than cols..force to 4 if cols is not None? 'mtries': [1,3,5,7], } class Basic(unittest.TestCase): def tearDown(self): h2o.check_sandbox_for_errors() @classmethod def setUpClass(cls): global SEED SEED = h2o.setup_random_seed() h2o.init(java_heap_GB=10) @classmethod def tearDownClass(cls): h2o.tear_down_cloud() def test_speedrf_params_rand2_fvec(self): csvPathname = 'standard/covtype.data' hex_key = 'covtype.data.hex' for trial in range(10): # params is mutable. This is default. # response is required for SpeeERF params = { 'response': 'C55', 'ntrees': 1, 'mtries': 7, 'balance_classes': 0, # never run with unconstrained balance_classes size if random sets balance_classes..too slow 'max_after_balance_size': 2, 'importance': 0} colX = h2o_util.pickRandParams(paramDict, params) if 'cols' in params and params['cols']: # exclusion if 'ignored_cols_by_name' in params: params['ignored_cols_by_name'] = None else: if 'ignored_cols_by_name' in params and params['ignored_cols_by_name']: params['mtries'] = random.randint(1,53) else: params['mtries'] = random.randint(1,54) kwargs = params.copy() # adjust timeoutSecs with the number of trees timeoutSecs = 80 + ((kwargs['ntrees']*80) * max(1,kwargs['mtries']/60) ) start = time.time() parseResult = h2i.import_parse(bucket='home-0xdiag-datasets', path=csvPathname, schema='put', hex_key=hex_key) h2o_cmd.runSpeeDRF(parseResult=parseResult, timeoutSecs=timeoutSecs, retryDelaySecs=1, **kwargs) elapsed = time.time()-start print "Trial #", trial, "completed in", elapsed, "seconds.", "%d pct. of timeout" % ((elapsed*100)/timeoutSecs) if __name__ == '__main__': h2o.unit_main()
1,865
12,278
<reponame>randolphwong/mcsema //===----------------------------------------------------------------------===// // // The LLVM Compiler Infrastructure // // This file is dual licensed under the MIT and the University of Illinois Open // Source Licenses. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // Adaptation to Boost of the libcxx // Copyright 2010 <NAME> // Distributed under the Boost Software License, Version 1.0. // See http://www.boost.org/LICENSE_1_0.txt // ToDuration shall be an instantiation of duration. #include <boost/chrono/chrono.hpp> void test() { typedef boost::chrono::system_clock Clock; typedef boost::chrono::time_point<Clock, boost::chrono::milliseconds> FromTimePoint; typedef boost::chrono::time_point<Clock, boost::chrono::minutes> ToTimePoint; boost::chrono::time_point_cast<ToTimePoint>(FromTimePoint(boost::chrono::milliseconds(3))); }
298
339
<filename>nextgen/bcbio/__init__.py """Blue collar bioinformatics main module. """ __import__('pkg_resources').declare_namespace(__name__)
48
2,816
//===----------------------------------------------------------------------===// // DuckDB // // duckdb/common/types/hash.hpp // // //===----------------------------------------------------------------------===// #pragma once #include "duckdb/common/common.hpp" #include "duckdb/common/types.hpp" namespace duckdb { struct string_t; // efficient hash function that maximizes the avalanche effect and minimizes // bias // see: https://nullprogram.com/blog/2018/07/31/ inline hash_t murmurhash64(uint64_t x) { return x * UINT64_C(0xbf58476d1ce4e5b9); } inline hash_t murmurhash32(uint32_t x) { return murmurhash64(x); } template <class T> hash_t Hash(T value) { return murmurhash32(value); } //! Combine two hashes by XORing them inline hash_t CombineHash(hash_t left, hash_t right) { return left ^ right; } template <> hash_t Hash(uint64_t val); template <> hash_t Hash(int64_t val); template <> hash_t Hash(hugeint_t val); template <> hash_t Hash(float val); template <> hash_t Hash(double val); template <> hash_t Hash(const char *val); template <> hash_t Hash(char *val); template <> hash_t Hash(string_t val); template <> hash_t Hash(interval_t val); hash_t Hash(const char *val, size_t size); hash_t Hash(uint8_t *val, size_t size); } // namespace duckdb
470
2,917
// // DynamicColorTvOs.h // DynamicColorTvOs // // Created by <NAME> on 15/01/2017. // Copyright © 2017 <NAME>. All rights reserved. // #import <UIKit/UIKit.h> //! Project version number for DynamicColorTvOs. FOUNDATION_EXPORT double DynamicColorTvOsVersionNumber; //! Project version string for DynamicColorTvOs. FOUNDATION_EXPORT const unsigned char DynamicColorTvOsVersionString[]; // In this header, you should import all the public headers of your framework using statements like #import <DynamicColorTvOs/PublicHeader.h>
163
575
// Copyright 2015 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "third_party/blink/renderer/core/dom/flat_tree_traversal.h" #include <memory> #include "testing/gtest/include/gtest/gtest.h" #include "third_party/blink/renderer/core/dom/document.h" #include "third_party/blink/renderer/core/dom/element.h" #include "third_party/blink/renderer/core/dom/node.h" #include "third_party/blink/renderer/core/dom/node_traversal.h" #include "third_party/blink/renderer/core/dom/shadow_root.h" #include "third_party/blink/renderer/core/frame/local_frame_view.h" #include "third_party/blink/renderer/core/html/html_element.h" #include "third_party/blink/renderer/core/testing/page_test_base.h" #include "third_party/blink/renderer/platform/bindings/exception_state.h" #include "third_party/blink/renderer/platform/geometry/int_size.h" #include "third_party/blink/renderer/platform/runtime_enabled_features.h" #include "third_party/blink/renderer/platform/testing/runtime_enabled_features_test_helpers.h" #include "third_party/blink/renderer/platform/wtf/std_lib_extras.h" #include "third_party/blink/renderer/platform/wtf/vector.h" namespace blink { // To avoid symbol collisions in jumbo builds. namespace flat_tree_traversal_test { class FlatTreeTraversalTest : public PageTestBase { public: FlatTreeTraversalTest() {} protected: // Sets |mainHTML| to BODY element with |innerHTML| property and attaches // shadow root to child with |shadowHTML|, then update distribution for // calling member functions in |FlatTreeTraversal|. void SetupSampleHTML(const char* main_html, const char* shadow_html, unsigned); void SetupDocumentTree(const char* main_html); void AttachOpenShadowRoot(Element& shadow_host, const char* shadow_inner_html); }; void FlatTreeTraversalTest::SetupSampleHTML(const char* main_html, const char* shadow_html, unsigned index) { Element* body = GetDocument().body(); body->setInnerHTML(String::FromUTF8(main_html)); auto* shadow_host = To<Element>(NodeTraversal::ChildAt(*body, index)); AttachOpenShadowRoot(*shadow_host, shadow_html); } void FlatTreeTraversalTest::SetupDocumentTree(const char* main_html) { Element* body = GetDocument().body(); body->setInnerHTML(String::FromUTF8(main_html)); } void FlatTreeTraversalTest::AttachOpenShadowRoot( Element& shadow_host, const char* shadow_inner_html) { ShadowRoot& shadow_root = shadow_host.AttachShadowRootInternal(ShadowRootType::kOpen); shadow_root.setInnerHTML(String::FromUTF8(shadow_inner_html)); } namespace { void TestCommonAncestor(Node* expected_result, const Node& node_a, const Node& node_b) { Node* result1 = FlatTreeTraversal::CommonAncestor(node_a, node_b); EXPECT_EQ(expected_result, result1) << "commonAncestor(" << node_a.textContent() << "," << node_b.textContent() << ")"; Node* result2 = FlatTreeTraversal::CommonAncestor(node_b, node_a); EXPECT_EQ(expected_result, result2) << "commonAncestor(" << node_b.textContent() << "," << node_a.textContent() << ")"; } } // namespace // Test case for // - childAt // - countChildren // - hasChildren // - index // - isDescendantOf TEST_F(FlatTreeTraversalTest, childAt) { const char* main_html = "<div id='m0'>" "<span slot='#m00' id='m00'>m00</span>" "<span slot='#m01' id='m01'>m01</span>" "</div>"; const char* shadow_html = "<a id='s00'>s00</a>" "<slot name='#m01'></slot>" "<a id='s02'>s02</a>" "<a id='s03'><slot name='#m00'></slot></a>" "<a id='s04'>s04</a>"; SetupSampleHTML(main_html, shadow_html, 0); Element* body = GetDocument().body(); Element* m0 = body->QuerySelector("#m0"); Element* m00 = m0->QuerySelector("#m00"); Element* m01 = m0->QuerySelector("#m01"); Element* shadow_host = m0; ShadowRoot* shadow_root = shadow_host->OpenShadowRoot(); Element* s00 = shadow_root->QuerySelector("#s00"); Element* s02 = shadow_root->QuerySelector("#s02"); Element* s03 = shadow_root->QuerySelector("#s03"); Element* s04 = shadow_root->QuerySelector("#s04"); const unsigned kNumberOfChildNodes = 5; Node* expected_child_nodes[5] = {s00, m01, s02, s03, s04}; ASSERT_EQ(kNumberOfChildNodes, FlatTreeTraversal::CountChildren(*shadow_host)); EXPECT_TRUE(FlatTreeTraversal::HasChildren(*shadow_host)); for (unsigned index = 0; index < kNumberOfChildNodes; ++index) { Node* child = FlatTreeTraversal::ChildAt(*shadow_host, index); EXPECT_EQ(index, FlatTreeTraversal::Index(*child)) << "FlatTreeTraversal::index(FlatTreeTraversal(*shadowHost, " << index << "))"; EXPECT_TRUE(FlatTreeTraversal::IsDescendantOf(*child, *shadow_host)) << "FlatTreeTraversal::isDescendantOf(*FlatTreeTraversal(*" "shadowHost, " << index << "), *shadowHost)"; bool is_slot_element = IsA<HTMLSlotElement>(child); if (is_slot_element) { child = FlatTreeTraversal::FirstChild(*child); } EXPECT_EQ(expected_child_nodes[index], child) << "FlatTreeTraversal::childAt(*shadowHost, " << index << ")"; EXPECT_EQ(is_slot_element ? 0 : index, FlatTreeTraversal::Index(*child)) << "FlatTreeTraversal::index(FlatTreeTraversal(*shadowHost, " << index << "))"; } EXPECT_EQ(nullptr, FlatTreeTraversal::ChildAt(*shadow_host, kNumberOfChildNodes + 1)) << "Out of bounds childAt() returns nullptr."; // Distributed node |m00| is child of slot in shadow tree |s03|. EXPECT_EQ( m00, FlatTreeTraversal::FirstChild(*FlatTreeTraversal::FirstChild(*s03))); } TEST_F(FlatTreeTraversalTest, ChildrenOf) { SetupSampleHTML( "<p id=sample>ZERO<span slot=three>three</b><span " "slot=one>one</b>FOUR</p>", "zero<slot name=one></slot>two<slot name=three></slot>four", 0); Element* const sample = GetDocument().getElementById("sample"); HeapVector<Member<Node>> expected_nodes; for (Node* runner = FlatTreeTraversal::FirstChild(*sample); runner; runner = FlatTreeTraversal::NextSibling(*runner)) { expected_nodes.push_back(runner); } HeapVector<Member<Node>> actual_nodes; for (Node& child : FlatTreeTraversal::ChildrenOf(*sample)) actual_nodes.push_back(&child); EXPECT_EQ(expected_nodes, actual_nodes); } // Test case for // - commonAncestor // - isDescendantOf TEST_F(FlatTreeTraversalTest, commonAncestor) { // We build following flat tree: // ____BODY___ // | | | // m0 m1 m2 m1 is shadow host having m10, m11, m12. // _|_ | __|__ // | | | | | // m00 m01 | m20 m21 // _____|_____________ // | | | | | // s10 s11 s12 s13 s14 // | // __|__ // | | | // m12 m10 m11 <-- distributed // where: each symbol consists with prefix, child index, child-child index. // prefix "m" means node in main tree, // prefix "d" means node in main tree and distributed // prefix "s" means node in shadow tree const char* main_html = "<a id='m0'><b id='m00'>m00</b><b id='m01'>m01</b></a>" "<div id='m1'>" "<b slot='#m10' id='m10'>m10</b>" "<b slot='#m11' id='m11'>m11</b>" "<b slot='#m12' id='m12'>m12</b>" "</div>" "<a id='m2'><b id='m20'>m20</b><b id='m21'>m21</b></a>"; const char* shadow_html = "<a id='s10'>s10</a>" "<a id='s11'><slot name='#m12'></slot></a>" "<a id='s12'>s12</a>" "<a id='s13'>" "<slot name='#m10'></slot>" "<slot name='#m11'></slot>" "</a>" "<a id='s14'>s14</a>"; SetupSampleHTML(main_html, shadow_html, 1); Element* body = GetDocument().body(); Element* m0 = body->QuerySelector("#m0"); Element* m1 = body->QuerySelector("#m1"); Element* m2 = body->QuerySelector("#m2"); Element* m00 = body->QuerySelector("#m00"); Element* m01 = body->QuerySelector("#m01"); Element* m10 = body->QuerySelector("#m10"); Element* m11 = body->QuerySelector("#m11"); Element* m12 = body->QuerySelector("#m12"); Element* m20 = body->QuerySelector("#m20"); Element* m21 = body->QuerySelector("#m21"); ShadowRoot* shadow_root = m1->OpenShadowRoot(); Element* s10 = shadow_root->QuerySelector("#s10"); Element* s11 = shadow_root->QuerySelector("#s11"); Element* s12 = shadow_root->QuerySelector("#s12"); Element* s13 = shadow_root->QuerySelector("#s13"); Element* s14 = shadow_root->QuerySelector("#s14"); TestCommonAncestor(body, *m0, *m1); TestCommonAncestor(body, *m1, *m2); TestCommonAncestor(body, *m1, *m20); TestCommonAncestor(body, *s14, *m21); TestCommonAncestor(m0, *m0, *m0); TestCommonAncestor(m0, *m00, *m01); TestCommonAncestor(m1, *m1, *m1); TestCommonAncestor(m1, *s10, *s14); TestCommonAncestor(m1, *s10, *m12); TestCommonAncestor(m1, *s12, *m12); TestCommonAncestor(m1, *m10, *m12); TestCommonAncestor(m01, *m01, *m01); TestCommonAncestor(s11, *s11, *m12); TestCommonAncestor(s13, *m10, *m11); s12->remove(ASSERT_NO_EXCEPTION); TestCommonAncestor(s12, *s12, *s12); TestCommonAncestor(nullptr, *s12, *s11); TestCommonAncestor(nullptr, *s12, *m01); TestCommonAncestor(nullptr, *s12, *m20); m20->remove(ASSERT_NO_EXCEPTION); TestCommonAncestor(m20, *m20, *m20); TestCommonAncestor(nullptr, *m20, *s12); TestCommonAncestor(nullptr, *m20, *m1); } // Test case for // - NextSkippingChildren // - PreviousAbsoluteSibling TEST_F(FlatTreeTraversalTest, SkippingChildrenFunctions) { const char* main_html = "<div id='m0'>m0</div>" "<div id='m1'>" "<span slot='#m10' id='m10'>m10</span>" "<span slot='#m11' id='m11'>m11</span>" "</div>" "<div id='m2'>m2</div>"; const char* shadow_html = "<slot name='#m11'></slot>" "<a id='s11'>s11</a>" "<a id='s12'>" "<b id='s120'>s120</b>" "<slot name='#m10'></slot>" "</a>"; SetupSampleHTML(main_html, shadow_html, 1); Element* body = GetDocument().body(); Element* m0 = body->QuerySelector("#m0"); Element* m1 = body->QuerySelector("#m1"); Element* m2 = body->QuerySelector("#m2"); Element* m10 = body->QuerySelector("#m10"); Element* m10_slot_parent = To<Element>(FlatTreeTraversal::Parent(*m10)); Element* m11 = body->QuerySelector("#m11"); Element* m11_slot_parent = To<Element>(FlatTreeTraversal::Parent(*m11)); ShadowRoot* shadow_root = m1->OpenShadowRoot(); Element* s11 = shadow_root->QuerySelector("#s11"); Element* s12 = shadow_root->QuerySelector("#s12"); Element* s120 = shadow_root->QuerySelector("#s120"); // Main tree node to main tree node EXPECT_EQ(*m1, FlatTreeTraversal::NextSkippingChildren(*m0)); EXPECT_EQ(*m0, FlatTreeTraversal::PreviousAbsoluteSibling(*m1)); // Distribute node to main tree node EXPECT_EQ(*m2, FlatTreeTraversal::NextSkippingChildren(*m10)); EXPECT_EQ(*m1, FlatTreeTraversal::PreviousAbsoluteSibling(*m2)); // Distribute node to node in shadow tree EXPECT_EQ(*s11, FlatTreeTraversal::NextSkippingChildren(*m11)); EXPECT_EQ(*m11_slot_parent, FlatTreeTraversal::PreviousAbsoluteSibling(*s11)); // Node in shadow tree to distributed node EXPECT_EQ(*s11, FlatTreeTraversal::NextSkippingChildren(*m11)); EXPECT_EQ(*m11_slot_parent, FlatTreeTraversal::PreviousAbsoluteSibling(*s11)); EXPECT_EQ(*m10_slot_parent, FlatTreeTraversal::NextSkippingChildren(*s120)); EXPECT_EQ(*s120, FlatTreeTraversal::PreviousAbsoluteSibling(*m10)); // Node in shadow tree to main tree EXPECT_EQ(*m2, FlatTreeTraversal::NextSkippingChildren(*s12)); EXPECT_EQ(*m1, FlatTreeTraversal::PreviousAbsoluteSibling(*m2)); } TEST_F(FlatTreeTraversalTest, AncestorsOf) { SetupDocumentTree("<div><div><div id=sample></div></div></div>"); Element* const sample = GetDocument().getElementById("sample"); HeapVector<Member<Node>> expected_nodes; for (Node* parent = FlatTreeTraversal::Parent(*sample); parent; parent = FlatTreeTraversal::Parent(*parent)) { expected_nodes.push_back(parent); } HeapVector<Member<Node>> actual_nodes; for (Node& ancestor : FlatTreeTraversal::AncestorsOf(*sample)) actual_nodes.push_back(&ancestor); EXPECT_EQ(expected_nodes, actual_nodes); } TEST_F(FlatTreeTraversalTest, InclusiveAncestorsOf) { SetupDocumentTree("<div><div><div id=sample></div></div></div>"); Element* const sample = GetDocument().getElementById("sample"); HeapVector<Member<Node>> expected_nodes; for (Node* parent = sample; parent; parent = FlatTreeTraversal::Parent(*parent)) { expected_nodes.push_back(parent); } HeapVector<Member<Node>> actual_nodes; for (Node& ancestor : FlatTreeTraversal::InclusiveAncestorsOf(*sample)) actual_nodes.push_back(&ancestor); EXPECT_EQ(expected_nodes, actual_nodes); } // Test case for // - lastWithin // - lastWithinOrSelf TEST_F(FlatTreeTraversalTest, lastWithin) { const char* main_html = "<div id='m0'>m0</div>" "<div id='m1'>" "<span slot='#m10' id='m10'>m10</span>" "<span slot='#m11' id='m11'>m11</span>" "<span id='m12'>m12</span>" // #m12 is not distributed. "</div>" "<div id='m2'></div>"; const char* shadow_html = "<slot name='#m11'></slot>" "<a id='s11'>s11</a>" "<a id='s12'>" "<slot name='#m10'></slot>" "</a>"; SetupSampleHTML(main_html, shadow_html, 1); Element* body = GetDocument().body(); Element* m0 = body->QuerySelector("#m0"); Element* m1 = body->QuerySelector("#m1"); Element* m2 = body->QuerySelector("#m2"); Element* m10 = body->QuerySelector("#m10"); ShadowRoot* shadow_root = m1->OpenShadowRoot(); Element* s11 = shadow_root->QuerySelector("#s11"); Element* s12 = shadow_root->QuerySelector("#s12"); EXPECT_EQ(m0->firstChild(), FlatTreeTraversal::LastWithin(*m0)); EXPECT_EQ(*m0->firstChild(), FlatTreeTraversal::LastWithinOrSelf(*m0)); EXPECT_EQ(m10->firstChild(), FlatTreeTraversal::LastWithin(*m1)); EXPECT_EQ(*m10->firstChild(), FlatTreeTraversal::LastWithinOrSelf(*m1)); EXPECT_EQ(nullptr, FlatTreeTraversal::LastWithin(*m2)); EXPECT_EQ(*m2, FlatTreeTraversal::LastWithinOrSelf(*m2)); EXPECT_EQ(s11->firstChild(), FlatTreeTraversal::LastWithin(*s11)); EXPECT_EQ(*s11->firstChild(), FlatTreeTraversal::LastWithinOrSelf(*s11)); EXPECT_EQ(m10->firstChild(), FlatTreeTraversal::LastWithin(*s12)); EXPECT_EQ(*m10->firstChild(), FlatTreeTraversal::LastWithinOrSelf(*s12)); } TEST_F(FlatTreeTraversalTest, previousPostOrder) { const char* main_html = "<div id='m0'>m0</div>" "<div id='m1'>" "<span slot='#m10' id='m10'>m10</span>" "<span slot='#m11' id='m11'>m11</span>" "</div>" "<div id='m2'>m2</div>"; const char* shadow_html = "<slot name='#m11'></slot>" "<a id='s11'>s11</a>" "<a id='s12'>" "<b id='s120'>s120</b>" "<slot name='#m10'></slot>" "</a>"; SetupSampleHTML(main_html, shadow_html, 1); Element* body = GetDocument().body(); Element* m0 = body->QuerySelector("#m0"); Element* m1 = body->QuerySelector("#m1"); Element* m2 = body->QuerySelector("#m2"); Element* m10 = body->QuerySelector("#m10"); Element* m10_slot_parent = To<Element>(FlatTreeTraversal::Parent(*m10)); Element* m11 = body->QuerySelector("#m11"); ShadowRoot* shadow_root = m1->OpenShadowRoot(); Element* s11 = shadow_root->QuerySelector("#s11"); Element* s12 = shadow_root->QuerySelector("#s12"); Element* s120 = shadow_root->QuerySelector("#s120"); EXPECT_EQ(*m0->firstChild(), FlatTreeTraversal::PreviousPostOrder(*m0)); EXPECT_EQ(*s12, FlatTreeTraversal::PreviousPostOrder(*m1)); EXPECT_EQ(*m10->firstChild(), FlatTreeTraversal::PreviousPostOrder(*m10)); EXPECT_EQ(*s120, FlatTreeTraversal::PreviousPostOrder(*m10->firstChild())); EXPECT_EQ(*s120, FlatTreeTraversal::PreviousPostOrder(*m10->firstChild(), s12)); EXPECT_EQ(*m11->firstChild(), FlatTreeTraversal::PreviousPostOrder(*m11)); EXPECT_EQ(*m0, FlatTreeTraversal::PreviousPostOrder(*m11->firstChild())); EXPECT_EQ(nullptr, FlatTreeTraversal::PreviousPostOrder(*m11->firstChild(), m11)); EXPECT_EQ(*m2->firstChild(), FlatTreeTraversal::PreviousPostOrder(*m2)); EXPECT_EQ(*s11->firstChild(), FlatTreeTraversal::PreviousPostOrder(*s11)); EXPECT_EQ(*m10_slot_parent, FlatTreeTraversal::PreviousPostOrder(*s12)); EXPECT_EQ(*s120->firstChild(), FlatTreeTraversal::PreviousPostOrder(*s120)); EXPECT_EQ(*s11, FlatTreeTraversal::PreviousPostOrder(*s120->firstChild())); EXPECT_EQ(nullptr, FlatTreeTraversal::PreviousPostOrder(*s120->firstChild(), s12)); } TEST_F(FlatTreeTraversalTest, nextSiblingNotInDocumentFlatTree) { const char* main_html = "<div id='m0'>m0</div>" "<div id='m1'>" "<span id='m10'>m10</span>" "<span id='m11'>m11</span>" "</div>" "<div id='m2'>m2</div>"; const char* shadow_html = "<content select='#m11'></content>"; SetupSampleHTML(main_html, shadow_html, 1); Element* body = GetDocument().body(); Element* m10 = body->QuerySelector("#m10"); EXPECT_EQ(nullptr, FlatTreeTraversal::NextSibling(*m10)); EXPECT_EQ(nullptr, FlatTreeTraversal::PreviousSibling(*m10)); } TEST_F(FlatTreeTraversalTest, v1Simple) { const char* main_html = "<div id='host'>" "<div id='child1' slot='slot1'></div>" "<div id='child2' slot='slot2'></div>" "</div>"; const char* shadow_html = "<div id='shadow-child1'></div>" "<slot name='slot1'></slot>" "<slot name='slot2'></slot>" "<div id='shadow-child2'></div>"; SetupDocumentTree(main_html); Element* body = GetDocument().body(); Element* host = body->QuerySelector("#host"); Element* child1 = body->QuerySelector("#child1"); Element* child2 = body->QuerySelector("#child2"); AttachOpenShadowRoot(*host, shadow_html); ShadowRoot* shadow_root = host->OpenShadowRoot(); Element* slot1 = shadow_root->QuerySelector("[name=slot1]"); Element* slot2 = shadow_root->QuerySelector("[name=slot2]"); Element* shadow_child1 = shadow_root->QuerySelector("#shadow-child1"); Element* shadow_child2 = shadow_root->QuerySelector("#shadow-child2"); EXPECT_TRUE(slot1); EXPECT_TRUE(slot2); EXPECT_EQ(shadow_child1, FlatTreeTraversal::FirstChild(*host)); EXPECT_EQ(slot1, FlatTreeTraversal::NextSibling(*shadow_child1)); EXPECT_EQ(nullptr, FlatTreeTraversal::NextSibling(*child1)); EXPECT_EQ(nullptr, FlatTreeTraversal::NextSibling(*child2)); EXPECT_EQ(slot2, FlatTreeTraversal::NextSibling(*slot1)); EXPECT_EQ(shadow_child2, FlatTreeTraversal::NextSibling(*slot2)); } TEST_F(FlatTreeTraversalTest, v1Redistribution) { // composed tree: // d1 // ├──/shadow-root // │ └── d1-1 // │ ├──/shadow-root // │ │ ├── d1-1-1 // │ │ ├── slot name=d1-1-s1 // │ │ ├── slot name=d1-1-s2 // │ │ └── d1-1-2 // │ ├── d1-2 // │ ├── slot id=d1-s0 // │ ├── slot name=d1-s1 slot=d1-1-s1 // │ ├── slot name=d1-s2 // │ ├── d1-3 // │ └── d1-4 slot=d1-1-s1 // ├── d2 slot=d1-s1 // ├── d3 slot=d1-s2 // ├── d4 slot=nonexistent // └── d5 // flat tree: // d1 // └── d1-1 // ├── d1-1-1 // ├── slot name=d1-1-s1 // │ ├── slot name=d1-s1 slot=d1-1-s1 // │ │ └── d2 slot=d1-s1 // │ └── d1-4 slot=d1-1-s1 // ├── slot name=d1-1-s2 // └── d1-1-2 const char* main_html = "<div id='d1'>" "<div id='d2' slot='d1-s1'></div>" "<div id='d3' slot='d1-s2'></div>" "<div id='d4' slot='nonexistent'></div>" "<div id='d5'></div>" "</div>" "<div id='d6'></div>"; const char* shadow_html1 = "<div id='d1-1'>" "<div id='d1-2'></div>" "<slot id='d1-s0'></slot>" "<slot name='d1-s1' slot='d1-1-s1'></slot>" "<slot name='d1-s2'></slot>" "<div id='d1-3'></div>" "<div id='d1-4' slot='d1-1-s1'></div>" "</div>"; const char* shadow_html2 = "<div id='d1-1-1'></div>" "<slot name='d1-1-s1'></slot>" "<slot name='d1-1-s2'></slot>" "<div id='d1-1-2'></div>"; SetupDocumentTree(main_html); Element* body = GetDocument().body(); Element* d1 = body->QuerySelector("#d1"); Element* d2 = body->QuerySelector("#d2"); Element* d3 = body->QuerySelector("#d3"); Element* d4 = body->QuerySelector("#d4"); Element* d5 = body->QuerySelector("#d5"); Element* d6 = body->QuerySelector("#d6"); AttachOpenShadowRoot(*d1, shadow_html1); ShadowRoot* shadow_root1 = d1->OpenShadowRoot(); Element* d11 = shadow_root1->QuerySelector("#d1-1"); Element* d12 = shadow_root1->QuerySelector("#d1-2"); Element* d13 = shadow_root1->QuerySelector("#d1-3"); Element* d14 = shadow_root1->QuerySelector("#d1-4"); Element* d1s0 = shadow_root1->QuerySelector("#d1-s0"); Element* d1s1 = shadow_root1->QuerySelector("[name=d1-s1]"); Element* d1s2 = shadow_root1->QuerySelector("[name=d1-s2]"); AttachOpenShadowRoot(*d11, shadow_html2); ShadowRoot* shadow_root2 = d11->OpenShadowRoot(); Element* d111 = shadow_root2->QuerySelector("#d1-1-1"); Element* d112 = shadow_root2->QuerySelector("#d1-1-2"); Element* d11s1 = shadow_root2->QuerySelector("[name=d1-1-s1]"); Element* d11s2 = shadow_root2->QuerySelector("[name=d1-1-s2]"); EXPECT_TRUE(d5); EXPECT_TRUE(d12); EXPECT_TRUE(d13); EXPECT_TRUE(d1s0); EXPECT_TRUE(d1s1); EXPECT_TRUE(d1s2); EXPECT_TRUE(d11s1); EXPECT_TRUE(d11s2); EXPECT_EQ(d11, FlatTreeTraversal::Next(*d1)); EXPECT_EQ(d111, FlatTreeTraversal::Next(*d11)); EXPECT_EQ(d11s1, FlatTreeTraversal::Next(*d111)); EXPECT_EQ(d1s1, FlatTreeTraversal::Next(*d11s1)); EXPECT_EQ(d2, FlatTreeTraversal::Next(*d1s1)); EXPECT_EQ(d14, FlatTreeTraversal::Next(*d2)); EXPECT_EQ(d11s2, FlatTreeTraversal::Next(*d14)); EXPECT_EQ(d112, FlatTreeTraversal::Next(*d11s2)); EXPECT_EQ(d6, FlatTreeTraversal::Next(*d112)); EXPECT_EQ(d112, FlatTreeTraversal::Previous(*d6)); EXPECT_EQ(d11, FlatTreeTraversal::Parent(*d111)); EXPECT_EQ(d11, FlatTreeTraversal::Parent(*d112)); EXPECT_EQ(d1s1, FlatTreeTraversal::Parent(*d2)); EXPECT_EQ(d11s1, FlatTreeTraversal::Parent(*d14)); EXPECT_EQ(d1s2, FlatTreeTraversal::Parent(*d3)); EXPECT_EQ(nullptr, FlatTreeTraversal::Parent(*d4)); } TEST_F(FlatTreeTraversalTest, v1SlotInDocumentTree) { const char* main_html = "<div id='parent'>" "<slot>" "<div id='child1'></div>" "<div id='child2'></div>" "</slot>" "</div>"; SetupDocumentTree(main_html); Element* body = GetDocument().body(); Element* parent = body->QuerySelector("#parent"); Element* slot = body->QuerySelector("slot"); Element* child1 = body->QuerySelector("#child1"); Element* child2 = body->QuerySelector("#child2"); EXPECT_EQ(slot, FlatTreeTraversal::FirstChild(*parent)); EXPECT_EQ(child1, FlatTreeTraversal::FirstChild(*slot)); EXPECT_EQ(child2, FlatTreeTraversal::NextSibling(*child1)); EXPECT_EQ(nullptr, FlatTreeTraversal::NextSibling(*child2)); EXPECT_EQ(slot, FlatTreeTraversal::Parent(*child1)); EXPECT_EQ(slot, FlatTreeTraversal::Parent(*child2)); EXPECT_EQ(parent, FlatTreeTraversal::Parent(*slot)); } TEST_F(FlatTreeTraversalTest, v1FallbackContent) { const char* main_html = "<div id='d1'></div>"; const char* shadow_html = "<div id='before'></div>" "<slot><p>fallback content</p></slot>" "<div id='after'></div>"; SetupDocumentTree(main_html); Element* body = GetDocument().body(); Element* d1 = body->QuerySelector("#d1"); AttachOpenShadowRoot(*d1, shadow_html); ShadowRoot* shadow_root = d1->OpenShadowRoot(); Element* before = shadow_root->QuerySelector("#before"); Element* after = shadow_root->QuerySelector("#after"); Element* fallback_content = shadow_root->QuerySelector("p"); Element* slot = shadow_root->QuerySelector("slot"); EXPECT_EQ(before, FlatTreeTraversal::FirstChild(*d1)); EXPECT_EQ(after, FlatTreeTraversal::LastChild(*d1)); EXPECT_EQ(slot, FlatTreeTraversal::Parent(*fallback_content)); EXPECT_EQ(slot, FlatTreeTraversal::NextSibling(*before)); EXPECT_EQ(after, FlatTreeTraversal::NextSibling(*slot)); EXPECT_EQ(nullptr, FlatTreeTraversal::NextSibling(*fallback_content)); EXPECT_EQ(nullptr, FlatTreeTraversal::NextSibling(*after)); EXPECT_EQ(slot, FlatTreeTraversal::PreviousSibling(*after)); EXPECT_EQ(before, FlatTreeTraversal::PreviousSibling(*slot)); EXPECT_EQ(nullptr, FlatTreeTraversal::PreviousSibling(*fallback_content)); EXPECT_EQ(nullptr, FlatTreeTraversal::PreviousSibling(*before)); } TEST_F(FlatTreeTraversalTest, v1FallbackContentSkippedInTraversal) { const char* main_html = "<div id='d1'><span></span></div>"; const char* shadow_html = "<div id='before'></div>" "<slot><p>fallback content</p></slot>" "<div id='after'></div>"; SetupDocumentTree(main_html); Element* body = GetDocument().body(); Element* d1 = body->QuerySelector("#d1"); Element* span = body->QuerySelector("span"); AttachOpenShadowRoot(*d1, shadow_html); ShadowRoot* shadow_root = d1->OpenShadowRoot(); Element* before = shadow_root->QuerySelector("#before"); Element* after = shadow_root->QuerySelector("#after"); Element* fallback_content = shadow_root->QuerySelector("p"); Element* slot = shadow_root->QuerySelector("slot"); EXPECT_EQ(before, FlatTreeTraversal::FirstChild(*d1)); EXPECT_EQ(after, FlatTreeTraversal::LastChild(*d1)); EXPECT_EQ(slot, FlatTreeTraversal::Parent(*span)); EXPECT_EQ(d1, FlatTreeTraversal::Parent(*slot)); EXPECT_EQ(slot, FlatTreeTraversal::NextSibling(*before)); EXPECT_EQ(after, FlatTreeTraversal::NextSibling(*slot)); EXPECT_EQ(nullptr, FlatTreeTraversal::NextSibling(*after)); EXPECT_EQ(slot, FlatTreeTraversal::PreviousSibling(*after)); EXPECT_EQ(before, FlatTreeTraversal::PreviousSibling(*slot)); EXPECT_EQ(nullptr, FlatTreeTraversal::PreviousSibling(*before)); EXPECT_EQ(nullptr, FlatTreeTraversal::Parent(*fallback_content)); EXPECT_EQ(nullptr, FlatTreeTraversal::NextSibling(*fallback_content)); EXPECT_EQ(nullptr, FlatTreeTraversal::PreviousSibling(*fallback_content)); } TEST_F(FlatTreeTraversalTest, v1AllFallbackContent) { const char* main_html = "<div id='d1'></div>"; const char* shadow_html = "<slot name='a'><p id='x'>fallback content X</p></slot>" "<slot name='b'><p id='y'>fallback content Y</p></slot>" "<slot name='c'><p id='z'>fallback content Z</p></slot>"; SetupDocumentTree(main_html); Element* body = GetDocument().body(); Element* d1 = body->QuerySelector("#d1"); AttachOpenShadowRoot(*d1, shadow_html); ShadowRoot* shadow_root = d1->OpenShadowRoot(); Element* slot_a = shadow_root->QuerySelector("slot[name=a]"); Element* slot_b = shadow_root->QuerySelector("slot[name=b]"); Element* slot_c = shadow_root->QuerySelector("slot[name=c]"); Element* fallback_x = shadow_root->QuerySelector("#x"); Element* fallback_y = shadow_root->QuerySelector("#y"); Element* fallback_z = shadow_root->QuerySelector("#z"); EXPECT_EQ(slot_a, FlatTreeTraversal::FirstChild(*d1)); EXPECT_EQ(slot_c, FlatTreeTraversal::LastChild(*d1)); EXPECT_EQ(fallback_x, FlatTreeTraversal::FirstChild(*slot_a)); EXPECT_EQ(fallback_y, FlatTreeTraversal::FirstChild(*slot_b)); EXPECT_EQ(fallback_z, FlatTreeTraversal::FirstChild(*slot_c)); EXPECT_EQ(slot_a, FlatTreeTraversal::Parent(*fallback_x)); EXPECT_EQ(slot_b, FlatTreeTraversal::Parent(*fallback_y)); EXPECT_EQ(slot_c, FlatTreeTraversal::Parent(*fallback_z)); EXPECT_EQ(d1, FlatTreeTraversal::Parent(*slot_a)); EXPECT_EQ(nullptr, FlatTreeTraversal::NextSibling(*fallback_x)); EXPECT_EQ(nullptr, FlatTreeTraversal::NextSibling(*fallback_y)); EXPECT_EQ(nullptr, FlatTreeTraversal::NextSibling(*fallback_z)); EXPECT_EQ(nullptr, FlatTreeTraversal::PreviousSibling(*fallback_z)); EXPECT_EQ(nullptr, FlatTreeTraversal::PreviousSibling(*fallback_y)); EXPECT_EQ(nullptr, FlatTreeTraversal::PreviousSibling(*fallback_x)); } } // namespace flat_tree_traversal_test } // namespace blink
11,734
489
<reponame>zjzh/wharfee<gh_stars>100-1000 # -*- coding: utf-8 -*- from __future__ import unicode_literals from __future__ import print_function """ Helpers to connect to docker. """ import sys # make sure docker-py client API class according to docker-py version from docker import version_info as docker_version_info if docker_version_info >= (2, 0, 0): from docker.api import APIClient as DockerAPIClient else: from docker import AutoVersionClient as DockerAPIClient from docker.utils import kwargs_from_env def init_docker_client(timeout=2): """ Init docker-py client. """ if sys.platform.startswith('darwin') \ or sys.platform.startswith('win32'): # mac or win kwargs = kwargs_from_env() if 'tls' in kwargs: kwargs['tls'].assert_hostname = False kwargs['timeout'] = timeout client = DockerAPIClient(**kwargs) else: # unix-based client = DockerAPIClient( timeout=timeout, base_url='unix://var/run/docker.sock') return client def pull_required_images(client): """ Make sure we have busybox image pulled. :param client: AutoVersionClient """ for line in client.pull('busybox:latest', stream=True): print(line)
518
348
<gh_stars>100-1000 {"nom":"Fismes","circ":"2ème circonscription","dpt":"Marne","inscrits":3957,"abs":2556,"votants":1401,"blancs":136,"nuls":73,"exp":1192,"res":[{"nuance":"LR","nom":"<NAME>","voix":605},{"nuance":"REM","nom":"<NAME>","voix":587}]}
101
543
<reponame>texus/TGUI ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // // TGUI - Texus' Graphical User Interface // Copyright (C) 2012-2021 <NAME> (<EMAIL>) // // This software is provided 'as-is', without any express or implied warranty. // In no event will the authors be held liable for any damages arising from the use of this software. // // Permission is granted to anyone to use this software for any purpose, // including commercial applications, and to alter it and redistribute it freely, // subject to the following restrictions: // // 1. The origin of this software must not be misrepresented; // you must not claim that you wrote the original software. // If you use this software in a product, an acknowledgment // in the product documentation would be appreciated but is not required. // // 2. Altered source versions must be plainly marked as such, // and must not be misrepresented as being the original software. // // 3. This notice may not be removed or altered from any source distribution. // ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// #include <TGUI/Backend/Font/BackendFont.hpp> #include <cmath> ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// namespace tgui { ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// bool BackendFont::loadFromFile(const String& filename) { std::size_t fileSize; const auto fileContents = readFileToMemory(filename, fileSize); if (!fileContents) throw Exception{"Failed to load '" + filename + "'"}; return loadFromMemory(fileContents.get(), fileSize); } ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// bool BackendFont::loadFromMemory(const void* data, std::size_t sizeInBytes) { auto copiedData = MakeUniqueForOverwrite<std::uint8_t[]>(sizeInBytes); std::memcpy(copiedData.get(), data, sizeInBytes); return loadFromMemory(std::move(copiedData), sizeInBytes); } ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// std::uint64_t BackendFont::constructGlyphKey(char32_t codePoint, unsigned int characterSize, bool bold, float outlineThickness) { // Create a unique key for every character. // Technically it would be possible to specify character sizes and outline thicknesses that can't uniquely be mapped // in the limited amount of bits, but this would require unrealistic sizes and would still unlikely cause a conflic // with another existing key. return (static_cast<std::uint64_t>(bold) << 63) // bit 64 = bold flag | (static_cast<std::uint64_t>(outlineThickness < 0) << 62) // bit 63 = sign of outline | (static_cast<std::uint64_t>(std::abs(outlineThickness) * 100) << 45) // bits 46-62 = outline | (static_cast<std::uint64_t>(characterSize) << 32) // bits 33-45 = character size | codePoint; // bits 1-32 = unicode codepoint } ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void BackendFont::setSmooth(bool smooth) { m_isSmooth = smooth; } ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// bool BackendFont::isSmooth() const { return m_isSmooth; } ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
1,036
6,197
package com.kickstarter.libs.utils; import com.kickstarter.libs.Range; import junit.framework.TestCase; import java.util.Arrays; import java.util.Collections; import java.util.List; public final class RangeUtilsTest extends TestCase { public void testRanges() { final List<Integer> xs = Arrays.asList(1, 3, 4, 5, 6, 8, 10, 11); final List<Range> expected = Arrays.asList( Range.create(0, 1), Range.create(1, 4), Range.create(5, 1), Range.create(6, 2) ); assertEquals(expected, RangeUtils.consecutiveRanges(xs)); } public void testRanges_WithEmptyArray() { final List<Integer> xs = Collections.emptyList(); final List<Range> expected = Collections.emptyList(); assertEquals(expected, RangeUtils.consecutiveRanges(xs)); } public void testRanges_WithRepeatedEntries() { final List<Integer> xs = Arrays.asList(1, 1, 1, 1); final List<Range> expected = Arrays.asList( Range.create(0, 4) ); assertEquals(expected, RangeUtils.consecutiveRanges(xs)); } public void testRanges_WithNonMonotonicArray() { final List<Integer> xs = Arrays.asList(1, 2, 1, 2, 1, 2); final List<Range> expected = Arrays.asList( Range.create(0, 2), Range.create(2, 2), Range.create(4, 2) ); assertEquals(expected, RangeUtils.consecutiveRanges(xs)); } public void testRanges_WithSingleton() { final List<Integer> xs = Arrays.asList(1); final List<Range> expected = Arrays.asList( Range.create(0, 1) ); assertEquals(expected, RangeUtils.consecutiveRanges(xs)); } }
626
13,057
<reponame>joeshannon/mockito /* * Copyright (c) 2017 Mockito contributors * This program is made available under the terms of the MIT License. */ package org.mockitousage.junitrule; import static org.junit.Assert.assertEquals; import static org.mockito.BDDMockito.given; import static org.mockito.BDDMockito.willReturn; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.verifyNoMoreInteractions; import static org.mockito.Mockito.when; import static org.mockitoutil.TestBase.filterLineNo; import org.assertj.core.api.Assertions; import org.junit.Rule; import org.junit.Test; import org.mockito.Mock; import org.mockito.exceptions.misusing.PotentialStubbingProblem; import org.mockito.exceptions.misusing.UnfinishedVerificationException; import org.mockito.exceptions.misusing.UnnecessaryStubbingException; import org.mockito.junit.MockitoJUnit; import org.mockito.quality.Strictness; import org.mockitousage.IMethods; import org.mockitousage.strictness.ProductionCode; import org.mockitoutil.SafeJUnitRule; public class StrictJUnitRuleTest { @Rule public SafeJUnitRule rule = new SafeJUnitRule(MockitoJUnit.rule().strictness(Strictness.STRICT_STUBS)); @Mock IMethods mock; @Mock IMethods mock2; @Test public void ok_when_no_stubbings() throws Throwable { mock.simpleMethod(); verify(mock).simpleMethod(); } @Test public void ok_when_all_stubbings_used() throws Throwable { given(mock.simpleMethod(10)).willReturn("foo"); mock.simpleMethod(10); } @Test public void ok_when_used_and_mismatched_argument() throws Throwable { given(mock.simpleMethod(10)).willReturn("foo"); mock.simpleMethod(10); mock.simpleMethod(15); } @Test public void fails_when_unused_stubbings() throws Throwable { // expect rule.expectFailure(UnnecessaryStubbingException.class); // when given(mock.simpleMethod(10)).willReturn("foo"); mock2.simpleMethod(10); } @Test public void test_failure_trumps_unused_stubbings() throws Throwable { // expect rule.expectFailure(AssertionError.class, "x"); // when given(mock.simpleMethod(10)).willReturn("foo"); mock.otherMethod(); throw new AssertionError("x"); } @Test public void why_do_return_syntax_is_useful() throws Throwable { // Trade-off of Mockito strictness documented in test // expect rule.expectFailure(PotentialStubbingProblem.class); // when when(mock.simpleMethod(10)).thenReturn("10"); ProductionCode.simpleMethod(mock, 20); } @Test public void fails_fast_when_stubbing_invoked_with_different_argument() throws Throwable { // expect rule.expectFailure( new SafeJUnitRule.FailureAssert() { public void doAssert(Throwable t) { Assertions.assertThat(t).isInstanceOf(PotentialStubbingProblem.class); assertEquals( filterLineNo( "\n" + "Strict stubbing argument mismatch. Please check:\n" + " - this invocation of 'simpleMethod' method:\n" + " mock.simpleMethod(15);\n" + " -> at org.mockitousage.strictness.ProductionCode.simpleMethod(ProductionCode.java:0)\n" + " - has following stubbing(s) with different arguments:\n" + " 1. mock.simpleMethod(20);\n" + " -> at org.mockitousage.junitrule.StrictJUnitRuleTest.fails_fast_when_stubbing_invoked_with_different_argument(StrictJUnitRuleTest.java:0)\n" + " 2. mock.simpleMethod(30);\n" + " -> at org.mockitousage.junitrule.StrictJUnitRuleTest.fails_fast_when_stubbing_invoked_with_different_argument(StrictJUnitRuleTest.java:0)\n" + "Typically, stubbing argument mismatch indicates user mistake when writing tests.\n" + "Mockito fails early so that you can debug potential problem easily.\n" + "However, there are legit scenarios when this exception generates false negative signal:\n" + " - stubbing the same method multiple times using 'given().will()' or 'when().then()' API\n" + " Please use 'will().given()' or 'doReturn().when()' API for stubbing.\n" + " - stubbed method is intentionally invoked with different arguments by code under test\n" + " Please use default or 'silent' JUnit Rule (equivalent of Strictness.LENIENT).\n" + "For more information see javadoc for PotentialStubbingProblem class."), filterLineNo(t.getMessage())); } }); // when stubbings in the test code: willReturn("10").given(mock).simpleMethod(10); // used willReturn("20").given(mock).simpleMethod(20); // unused willReturn("30").given(mock).simpleMethod(30); // unused // then mock.otherMethod(); // ok, different method mock.simpleMethod(10); // ok, stubbed with this argument // invocation in the code under test uses different argument and should fail immediately // this helps with debugging and is essential for Mockito strictness ProductionCode.simpleMethod(mock, 15); } @Test public void verify_no_more_interactions_ignores_stubs() throws Throwable { // when stubbing in test: given(mock.simpleMethod(10)).willReturn("foo"); // and code under test does: mock.simpleMethod(10); // implicitly verifies the stubbing mock.otherMethod(); // and in test we: verify(mock).otherMethod(); verifyNoMoreInteractions(mock); } @Test public void unused_stubs_with_multiple_mocks() throws Throwable { // expect rule.expectFailure( new SafeJUnitRule.FailureAssert() { public void doAssert(Throwable t) { assertEquals( filterLineNo( "\n" + "Unnecessary stubbings detected.\n" + "Clean & maintainable test code requires zero unnecessary code.\n" + "Following stubbings are unnecessary (click to navigate to relevant line of code):\n" + " 1. -> at org.mockitousage.junitrule.StrictJUnitRuleTest.unused_stubs_with_multiple_mocks(StrictJUnitRuleTest.java:0)\n" + " 2. -> at org.mockitousage.junitrule.StrictJUnitRuleTest.unused_stubs_with_multiple_mocks(StrictJUnitRuleTest.java:0)\n" + "Please remove unnecessary stubbings or use 'lenient' strictness. More info: javadoc for UnnecessaryStubbingException class."), filterLineNo(t.getMessage())); } }); // when test has given(mock.simpleMethod(10)).willReturn("foo"); given(mock2.simpleMethod(20)).willReturn("foo"); given(mock.otherMethod()).willReturn("foo"); // used and should not be reported // and code has mock.otherMethod(); mock2.booleanObjectReturningMethod(); } @SuppressWarnings({"MockitoUsage", "CheckReturnValue"}) @Test public void rule_validates_mockito_usage() throws Throwable { // expect rule.expectFailure(UnfinishedVerificationException.class); // when test contains unfinished verification verify(mock); } }
4,058
1,168
/** * The MIT License * Copyright © 2010 JmxTrans team * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ package com.googlecode.jmxtrans.model; import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonInclude; import com.fasterxml.jackson.annotation.JsonProperty; import com.fasterxml.jackson.annotation.JsonPropertyOrder; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableSet; import com.googlecode.jmxtrans.model.naming.typename.PrependingTypeNameValuesStringBuilder; import com.googlecode.jmxtrans.model.naming.typename.TypeNameValuesStringBuilder; import com.googlecode.jmxtrans.model.naming.typename.UseAllTypeNameValuesStringBuilder; import lombok.EqualsAndHashCode; import lombok.Getter; import lombok.Setter; import lombok.ToString; import lombok.experimental.Accessors; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import javax.annotation.Nonnull; import javax.annotation.concurrent.NotThreadSafe; import javax.annotation.concurrent.ThreadSafe; import javax.management.AttributeList; import javax.management.InstanceNotFoundException; import javax.management.IntrospectionException; import javax.management.MBeanAttributeInfo; import javax.management.MBeanInfo; import javax.management.MBeanServerConnection; import javax.management.MalformedObjectNameException; import javax.management.ObjectInstance; import javax.management.ObjectName; import javax.management.ReflectionException; import java.io.IOException; import java.rmi.UnmarshalException; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.Set; import static com.google.common.base.MoreObjects.firstNonNull; import static com.google.common.collect.ImmutableList.copyOf; import static com.google.common.collect.Lists.newArrayList; import static com.google.common.collect.Sets.newLinkedHashSet; import static java.util.Arrays.asList; /** * Represents a JMX Query to ask for obj, attr and one or more keys. * * @author jon */ @JsonInclude(JsonInclude.Include.NON_NULL) @JsonPropertyOrder(value = {"obj", "attr", "typeNames", "resultAlias", "keys", "allowDottedKeys", "useAllTypeNames", "outputWriters"}) @ThreadSafe @EqualsAndHashCode(exclude = {"outputWriters", "outputWriterInstances"}) @ToString(exclude = {"outputWriters", "typeNameValuesStringBuilder"}) public class Query { private static final Logger logger = LoggerFactory.getLogger(Query.class); /** The JMX object representation: java.lang:type=Memory */ @Nonnull @Getter private final ObjectName objectName; @Nonnull @Getter private final ImmutableList<String> keys; @Nonnull @Getter private final ImmutableList<String> attr; /** * The list of type names used in a JMX bean string when querying with a * wildcard which is used to expose the actual type name value to the key * string. e.g. for this JMX name * <p/> * typeName=name=PS Eden Space,type=MemoryPool * <p/> * If you add a typeName("name"), then it'll retrieve 'PS Eden Space' from * the string. * <p> * The order of the elements of this set matches the order provided by the * user. */ @Getter private final ImmutableSet<String> typeNames; /** * The alias allows you to specify what you would like the results of the * query to go into. */ @Getter private final String resultAlias; /** * The useObjDomainAsKey property allows you to specify the use of the Domain portion of the Object Name * as part of the output key instead of using the ClassName of the MBean which is the default behavior. */ @Getter private final boolean useObjDomainAsKey; @Getter private final boolean allowDottedKeys; @Getter private final boolean useAllTypeNames; @Nonnull @Getter private final ImmutableList<OutputWriterFactory> outputWriters; @Nonnull @Getter private final Iterable<OutputWriter> outputWriterInstances; private final TypeNameValuesStringBuilder typeNameValuesStringBuilder; @JsonCreator public Query( @JsonProperty("obj") String obj, @JsonProperty("keys") List<String> keys, @JsonProperty("attr") List<String> attr, @JsonProperty("typeNames") List<String> typeNames, @JsonProperty("resultAlias") String resultAlias, @JsonProperty("useObjDomainAsKey") boolean useObjDomainAsKey, @JsonProperty("allowDottedKeys") boolean allowDottedKeys, @JsonProperty("useAllTypeNames") boolean useAllTypeNames, @JsonProperty("outputWriters") List<OutputWriterFactory> outputWriters ) { // For typeName, note the using copyOf does not change the order of // the elements. this(obj, keys, attr, ImmutableSet.copyOf(firstNonNull(typeNames, Collections.<String>emptySet())), resultAlias, useObjDomainAsKey, allowDottedKeys, useAllTypeNames, outputWriters, ImmutableList.<OutputWriter>of()); } public Query( String obj, List<String> keys, List<String> attr, Set<String> typeNames, String resultAlias, boolean useObjDomainAsKey, boolean allowDottedKeys, boolean useAllTypeNames, List<OutputWriterFactory> outputWriters ) { this(obj, keys, attr, typeNames, resultAlias, useObjDomainAsKey, allowDottedKeys, useAllTypeNames, outputWriters, ImmutableList.<OutputWriter>of()); } public Query( String obj, List<String> keys, List<String> attr, Set<String> typeNames, String resultAlias, boolean useObjDomainAsKey, boolean allowDottedKeys, boolean useAllTypeNames, ImmutableList<OutputWriter> outputWriters ) { this(obj, keys, attr, typeNames, resultAlias, useObjDomainAsKey, allowDottedKeys, useAllTypeNames, ImmutableList.<OutputWriterFactory>of(), outputWriters); } private Query( String obj, List<String> keys, List<String> attr, Set<String> typeNames, String resultAlias, boolean useObjDomainAsKey, boolean allowDottedKeys, boolean useAllTypeNames, List<OutputWriterFactory> outputWriterFactories, List<OutputWriter> outputWriters ) { try { this.objectName = new ObjectName(obj); } catch (MalformedObjectNameException e) { throw new IllegalArgumentException("Invalid object name: " + obj, e); } this.attr = copyOf(firstNonNull(attr, Collections.<String>emptyList())); this.resultAlias = resultAlias; this.useObjDomainAsKey = firstNonNull(useObjDomainAsKey, false); this.keys = copyOf(firstNonNull(keys, Collections.<String>emptyList())); this.allowDottedKeys = allowDottedKeys; this.useAllTypeNames = useAllTypeNames; this.outputWriters = copyOf(firstNonNull(outputWriterFactories, ImmutableList.<OutputWriterFactory>of())); // We need to preserve the order of typeNames. So note that copyOf // does not mess with the order. this.typeNames = ImmutableSet.copyOf(firstNonNull(typeNames, Collections.<String>emptySet())); this.typeNameValuesStringBuilder = makeTypeNameValuesStringBuilder(); this.outputWriterInstances = copyOf(firstNonNull(outputWriters, ImmutableList.<OutputWriter>of())); } public String makeTypeNameValueString(List<String> typeNames, String typeNameStr) { return this.typeNameValuesStringBuilder.build(typeNames, typeNameStr); } public Iterable<ObjectName> queryNames(MBeanServerConnection mbeanServer) throws IOException { return mbeanServer.queryNames(objectName, null); } public Iterable<Result> fetchResults(MBeanServerConnection mbeanServer, ObjectName queryName) throws InstanceNotFoundException, IntrospectionException, ReflectionException, IOException { ObjectInstance oi = mbeanServer.getObjectInstance(queryName); List<String> attributes; if (attr.isEmpty()) { attributes = new ArrayList<>(); MBeanInfo info = mbeanServer.getMBeanInfo(queryName); for (MBeanAttributeInfo attrInfo : info.getAttributes()) { attributes.add(attrInfo.getName()); } } else { attributes = attr; } try { if (!attributes.isEmpty()) { logger.debug("Executing queryName [{}] from query [{}]", queryName.getCanonicalName(), this); AttributeList al = mbeanServer.getAttributes(queryName, attributes.toArray(new String[attributes.size()])); return new JmxResultProcessor(this, oi, al.asList(), oi.getClassName(), queryName.getDomain()).getResults(); } } catch (UnmarshalException ue) { if ((ue.getCause() != null) && (ue.getCause() instanceof ClassNotFoundException)) { logger.debug("Bad unmarshall, continuing. This is probably ok and due to something like this: " + "http://ehcache.org/xref/net/sf/ehcache/distribution/RMICacheManagerPeerListener.html#52", ue.getMessage()); } else { throw ue; } } return ImmutableList.of(); } private TypeNameValuesStringBuilder makeTypeNameValuesStringBuilder() { String separator = isAllowDottedKeys() ? "." : TypeNameValuesStringBuilder.DEFAULT_SEPARATOR; Set<String> typeNames = getTypeNames(); if (isUseAllTypeNames()) { return new UseAllTypeNameValuesStringBuilder(separator); } else if (typeNames != null && !typeNames.isEmpty()) { return new PrependingTypeNameValuesStringBuilder(separator, new ArrayList<>(typeNames)); } else { return new TypeNameValuesStringBuilder(separator); } } public static Builder builder() { return new Builder(); } public static Builder builder(Query query) { return new Builder(query); } public void runOutputWritersForQuery(Server server, Iterable<Result> results) throws Exception { for (OutputWriter writer : getOutputWriterInstances()) { writer.doWrite(server, this, results); } logger.debug("Finished running outputWriters for query: {}", this); } @NotThreadSafe @Accessors(chain = true) public static final class Builder { @Setter private String obj; private final List<String> attr = newArrayList(); @Setter private String resultAlias; private final List<String> keys = newArrayList(); @Setter private boolean useObjDomainAsKey; @Setter private boolean allowDottedKeys; @Setter private boolean useAllTypeNames; private final List<OutputWriterFactory> outputWriterFactories = newArrayList(); private final List<OutputWriter> outputWriters = newArrayList(); // We need to pick an order preserving Set implementation here to // avoid unpredictable ordering of typeNames. private final Set<String> typeNames = newLinkedHashSet(); private Builder() {} /** This builder does NOT copy output writers from the given query. */ private Builder(Query query) { this.obj = query.objectName.toString(); this.attr.addAll(query.attr); this.resultAlias = query.resultAlias; this.keys.addAll(query.keys); this.useObjDomainAsKey = query.useObjDomainAsKey; this.allowDottedKeys = query.allowDottedKeys; this.useAllTypeNames = query.useAllTypeNames; this.typeNames.addAll(query.typeNames); } public Builder addAttr(String... attr) { this.attr.addAll(asList(attr)); return this; } public Builder addKey(String keys) { return addKeys(keys); } public Builder addKeys(String... keys) { this.keys.addAll(asList(keys)); return this; } public Builder addOutputWriterFactory(OutputWriterFactory outputWriterFactory) { return addOutputWriterFactories(outputWriterFactory); } public Builder addOutputWriterFactories(OutputWriterFactory... outputWriterFactories) { this.outputWriterFactories.addAll(asList(outputWriterFactories)); return this; } public Builder addOutputWriters(Collection<OutputWriter> outputWriters) { this.outputWriters.addAll(outputWriters); return this; } public Builder setTypeNames(Collection<String> typeNames) { this.typeNames.addAll(typeNames); return this; } public Query build() { if (!outputWriterFactories.isEmpty()) { return new Query( this.obj, this.keys, this.attr, this.typeNames, this.resultAlias, this.useObjDomainAsKey, this.allowDottedKeys, this.useAllTypeNames, this.outputWriterFactories ); } return new Query( this.obj, this.keys, this.attr, this.typeNames, this.resultAlias, this.useObjDomainAsKey, this.allowDottedKeys, this.useAllTypeNames, copyOf(this.outputWriters) ); } } }
4,313
1,658
/* * Copyright (C) 2018-2022 Chatopera Inc, <https://www.chatopera.com> * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.chatopera.cc.plugins.chatbot; import com.chatopera.cc.basic.MainContext; import com.chatopera.cc.socketio.SocketIOServing; import com.corundumstudio.socketio.SocketIONamespace; import com.corundumstudio.socketio.SocketIOServer; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; import javax.annotation.PostConstruct; @Configuration public class ChatbotSocketIOConfigurer { private final static Logger logger = LoggerFactory.getLogger(ChatbotSocketIOConfigurer.class); private SocketIONamespace socketIONameSpace; @Autowired private SocketIOServing socketIOServing; @PostConstruct public void setup() { socketIONameSpace = socketIOServing.getServer().addNamespace(MainContext.NameSpaceEnum.CHATBOT.getNamespace()); } @Bean(name = "chatbotNamespace") public SocketIONamespace getSocketIONameSpace(SocketIOServer server) { socketIONameSpace.addListeners(new ChatbotEventHandler(server)); return socketIONameSpace; } }
563
1,130
<reponame>domesticmouse/SmallerC<filename>v0100/tests/chars.c /* How to compile for DOS (all mode(l)s: tiny/.COM, small/.EXE, huge/.EXE, unreal/.EXE, 32-bit DPMI/.EXE): smlrcc -dost chars.c -o charsdt.com smlrcc -doss chars.c -o charsds.exe smlrcc -dosh chars.c -o charsdh.exe smlrcc -dosu chars.c -o charsdu.exe smlrcc -dosp chars.c -o charsdp.exe How to compile for Windows: smlrcc -win chars.c -o charsw.exe How to compile for Linux: smlrcc -linux chars.c -o charsl How to compile for MacOS: smlrcc -macos chars.c -o charsm */ #include <limits.h> #include <stddef.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #define TEST_WIDE extern char StAtIcAsSeRt[CHAR_BIT == 8]; enum { WSZ = sizeof(wchar_t) }; int errors; void test_char(void) { static int a[][2] = { sizeof 'A', sizeof(int), sizeof 'AB', sizeof(int), 'A', 65, '"', 34, '?', 63, '\"', 34, '\?', 63, '\\', 92, '\'', 39, '\a', 7, '\b', 8, '\f', 12, '\n', 10, '\r', 13, '\t', 9, '\v', 11, '\x0', 0, '\x0000000000000000', 0, '\x10', 16, '\x00000000000000007F', 127, '\x00000000000000007f', 127, (unsigned char)'\xFf', 255, '\0', 0, '\10', 8, '\101', 65, '\177', 127, (unsigned char)'\377', 255, 'AB', 0x4142, '\x41\x42', 0x4142 #ifdef __SMALLER_C_16__ , '\xFF\xff', -1 #endif #ifdef __SMALLER_C_32__ , 'ABCD', 0x41424344 , '\x41\x42\x43\x44', 0x41424344 , '\xFf\xfF\xFF\xff', -1 #endif }; int i; puts("test_char()"); for (i = 0; i < sizeof a / sizeof a[0]; i++) if (a[i][0] != a[i][1]) printf("a[%d][0]=%d != a[%d][1]=%d\n", i, a[i][0], i, a[i][1]), ++errors; #ifdef __SMALLER_C_SCHAR__ if ('\xFF' != -1) printf("'\\xFF' != -1\n"), ++errors; if ('\377' != -1) printf("'\\377' != -1\n"), ++errors; #endif #ifdef __SMALLER_C_UCHAR__ if ('\xFF' != 255) printf("'\\xFF' != 255\n"), ++errors; if ('\377' != 255) printf("'\\377' != 255\n"), ++errors; #endif #ifdef __SMALLER_C_16__ if ('\xFF\xff' >= 0) printf("'\\xFF\\xff' >= 0\n"), ++errors; #endif #ifdef __SMALLER_C_32__ if ('\xFf\xfF\xFF\xff' >= 0) printf("'\\xFf\\xfF\\xFF\\xff' >= 0\n"), ++errors; #endif } #ifdef TEST_WIDE void test_wchar(void) { static wchar_t a[][2] = { sizeof L'A', sizeof(wchar_t), L'A', 65, L'"', 34, L'?', 63, L'\"', 34, L'\?', 63, L'\\', 92, L'\'', 39, L'\a', 7, L'\b', 8, L'\f', 12, L'\n', 10, L'\r', 13, L'\t', 9, L'\v', 11, L'\x0', 0, L'\x0000000000000000', 0, L'\x10', 16, L'\x00000000000000007F', 127, L'\x00000000000000007f', 127, L'\xFf', 255, L'\xfFfF', 0xFFFF, L'\0', 0, L'\10', 8, L'\101', 65, L'\177', 127, L'\377', 255 #ifdef __SMALLER_C_WCHAR16__ , L'\xFFff', 0xFFFF #endif #ifdef __SMALLER_C_WCHAR32__ , L'\x12345678', 0x12345678 , L'\xFFFFffff', 0xFFFFFFFF #endif }; int i; puts("test_wchar()"); for (i = 0; i < sizeof a / sizeof a[0]; i++) if (a[i][0] != a[i][1]) printf("a[%d][0]=%d != a[%d][1]=%d\n", i, a[i][0], i, a[i][1]), ++errors; #ifdef __SMALLER_C_WCHAR16__ #ifdef __SMALLER_C_SWCHAR__ if (L'\xFFff' >= 0) printf("L'\\xFFff' >= 0\n"), ++errors; #endif // __SMALLER_C_SWCHAR__ #ifdef __SMALLER_C_UWCHAR__ if (L'\xFFff' < 0) printf("L'\\xFFff' < 0\n"), ++errors; #endif // __SMALLER_C_UWCHAR__ #endif // __SMALLER_C_WCHAR16__ #ifdef __SMALLER_C_WCHAR32__ #ifdef __SMALLER_C_SWCHAR__ if (L'\xFffFFFff' >= 0) printf("L'\\xFffFFFff' >= 0\n"), ++errors; #endif // __SMALLER_C_SWCHAR__ #ifdef __SMALLER_C_UWCHAR__ if (L'\xFffFFFff' < 0) printf("L'\\xFffFFFff' < 0\n"), ++errors; #endif // __SMALLER_C_UWCHAR__ #endif // __SMALLER_C_WCHAR32__ } #endif void test_literal_concat(void) { static char a0[] = "a""b"; static char a1[] = { "A" "B" }; static char a2[2] = "a" "b"; static char a3[2] = { "A""B" }; char* p0 = "a""b"; char* p1 = { "A" "B" }; puts("test_literal_concat()"); if (sizeof a0 != 3) printf("sizeof a0 = %d != 3\n", (int)sizeof a0), ++errors; if (sizeof a1 != 3) printf("sizeof a1 = %d != 3\n", (int)sizeof a1), ++errors; if (sizeof a2 != 2) printf("sizeof a2 = %d != 2\n", (int)sizeof a2), ++errors; if (sizeof a3 != 2) printf("sizeof a3 = %d != 2\n", (int)sizeof a3), ++errors; if (sizeof "a" "b" != 3) printf("sizeof \"a\" \"b\" = %d != 3\n", (int)sizeof "a" "b"), ++errors; if (strcmp(a0, "ab")) printf("a0 = \"%.2s\" != \"ab\"\n", a0), ++errors; if (strcmp(a1, "AB")) printf("a1 = \"%.2s\" != \"AB\"\n", a1), ++errors; if (strcmp(p0, "ab")) printf("p0 = \"%.2s\" != \"ab\"\n", p0), ++errors; if (strcmp(p1, "AB")) printf("p1 = \"%.2s\" != \"AB\"\n", p1), ++errors; if (strncmp(a2, "ab", 2)) printf("a2 = \"%.2s\" != \"ab\"\n", a2), ++errors; if (strncmp(a3, "AB", 2)) printf("a3 = \"%.2s\" != \"AB\"\n", a3), ++errors; } #ifdef TEST_WIDE void test_wide_literal_concat(void) { static wchar_t a0[] = L"a" L"b"; static wchar_t a1[] = { L"A"L"B" }; static wchar_t a2[2] = L"a"L"b"; static wchar_t a3[2] = { L"A" L"B" }; wchar_t* p0 = L"a" L"b"; wchar_t* p1 = { L"A"L"B" }; puts("test_wide_literal_concat()"); if (sizeof a0 != 3 * WSZ) printf("sizeof a0 = %d != %d\n", (int)sizeof a0, 3 * WSZ), ++errors; if (sizeof a1 != 3 * WSZ) printf("sizeof a1 = %d != %d\n", (int)sizeof a1, 3 * WSZ), ++errors; if (sizeof a2 != 2 * WSZ) printf("sizeof a2 = %d != %d\n", (int)sizeof a2, 2 * WSZ), ++errors; if (sizeof a3 != 2 * WSZ) printf("sizeof a3 = %d != %d\n", (int)sizeof a3, 2 * WSZ), ++errors; if (sizeof L"a" L"b" != 3 * WSZ) printf("sizeof L\"a\" L\"b\" = %d != %d\n", (int)sizeof L"a" L"b", 3 * WSZ), ++errors; if (memcmp(a0, L"ab", 3 * WSZ)) printf("a0 != L\"ab\"\n"), ++errors; if (memcmp(a1, L"AB", 3 * WSZ)) printf("a1 != L\"AB\"\n"), ++errors; if (memcmp(p0, L"ab", 3 * WSZ)) printf("p0 != L\"ab\"\n"), ++errors; if (memcmp(p1, L"AB", 3 * WSZ)) printf("p1 != L\"AB\"\n"), ++errors; if (memcmp(a2, L"ab", 2 * WSZ)) printf("a2 != L\"ab\"\n"), ++errors; if (memcmp(a3, L"AB", 2 * WSZ)) printf("a3 != L\"AB\"\n"), ++errors; } #endif void test_literal(void) { static char ac[] = { "A" "'" "?" "\'" "\?" "\"" "\\" "\n" "\7" "\x0" "\x8" "\x00" "\78" "\7A" "\xfg" "\xFG" "\101101" "\x00000000000000007f" }; static int ai[] = { 65, 39, 63, 39, 63, 34, 92, 10, 7, 0, 8, 0, 7,56, 7,65, 15,103, 15,71, 65,49,48,49, 127, 0 }; extern char StAtIcAsSeRt[sizeof ac / sizeof ac[0] == sizeof ai / sizeof ai[0]]; int i; puts("test_literal()"); for (i = 0; i < sizeof ac / sizeof ac[0]; i++) if (ac[i] != ai[i]) printf("ac[%d]=%d != ai[%d]=%d\n", i, ac[i], i, ai[i]), ++errors; } #ifdef TEST_WIDE void test_wide_literal(void) { static wchar_t ac[] = { L"A" L"'" L"?" L"\'" L"\?" L"\"" L"\\" L"\n" L"\7" L"\x0" L"\x8" L"\x00" L"\78" L"\7A" L"\xfg" L"\xFG" L"\101101" L"\x00000000000000007f" }; static wchar_t ai[] = { 65, 39, 63, 39, 63, 34, 92, 10, 7, 0, 8, 0, 7,56, 7,65, 15,103, 15,71, 65,49,48,49, 127, 0 }; extern char StAtIcAsSeRt[sizeof ac / sizeof ac[0] == sizeof ai / sizeof ai[0]]; int i; puts("test_wide_literal()"); for (i = 0; i < sizeof ac / sizeof ac[0]; i++) if (ac[i] != ai[i]) printf("ac[%d]=%d != ai[%d]=%d\n", i, ac[i], i, ai[i]), ++errors; } #endif int main(void) { test_char(); #ifdef TEST_WIDE test_wchar(); #endif test_literal_concat(); #ifdef TEST_WIDE test_wide_literal_concat(); #endif test_literal(); #ifdef TEST_WIDE test_wide_literal(); #endif printf("%d errors\n\n", errors); return errors ? EXIT_FAILURE : 0; }
4,128
9,225
import pytest from _pytest.stash import Stash from _pytest.stash import StashKey def test_stash() -> None: stash = Stash() assert len(stash) == 0 assert not stash key1 = StashKey[str]() key2 = StashKey[int]() # Basic functionality - single key. assert key1 not in stash stash[key1] = "hello" assert key1 in stash assert stash[key1] == "hello" assert stash.get(key1, None) == "hello" stash[key1] = "world" assert stash[key1] == "world" # Has correct type (no mypy error). stash[key1] + "string" assert len(stash) == 1 assert stash # No interaction with another key. assert key2 not in stash assert stash.get(key2, None) is None with pytest.raises(KeyError): stash[key2] with pytest.raises(KeyError): del stash[key2] stash[key2] = 1 assert stash[key2] == 1 # Has correct type (no mypy error). stash[key2] + 20 del stash[key1] with pytest.raises(KeyError): del stash[key1] with pytest.raises(KeyError): stash[key1] # setdefault stash[key1] = "existing" assert stash.setdefault(key1, "default") == "existing" assert stash[key1] == "existing" key_setdefault = StashKey[bytes]() assert stash.setdefault(key_setdefault, b"default") == b"default" assert stash[key_setdefault] == b"default" assert len(stash) == 3 assert stash # Can't accidentally add attributes to stash object itself. with pytest.raises(AttributeError): stash.foo = "nope" # type: ignore[attr-defined] # No interaction with another stash. stash2 = Stash() key3 = StashKey[int]() assert key2 not in stash2 stash2[key2] = 100 stash2[key3] = 200 assert stash2[key2] + stash2[key3] == 300 assert stash[key2] == 1 assert key3 not in stash
742
369
/* * Copyright © 2018 <NAME>, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package io.cdap.cdap.internal.app.runtime.distributed.remote; import com.google.common.util.concurrent.Futures; import com.google.common.util.concurrent.Service; import com.google.common.util.concurrent.Uninterruptibles; import io.cdap.cdap.common.app.RunIds; import io.cdap.cdap.common.conf.CConfiguration; import io.cdap.cdap.common.conf.Constants; import io.cdap.cdap.common.service.Retries; import io.cdap.cdap.common.service.RetryStrategies; import io.cdap.cdap.common.service.RetryStrategy; import io.cdap.cdap.proto.id.ProgramRunId; import org.apache.twill.api.Command; import org.apache.twill.api.ResourceReport; import org.apache.twill.api.RunId; import org.apache.twill.api.ServiceController; import org.apache.twill.api.TwillController; import org.apache.twill.api.logging.LogEntry; import org.apache.twill.api.logging.LogHandler; import org.apache.twill.common.Threads; import org.apache.twill.discovery.ServiceDiscovered; import org.apache.twill.internal.ServiceListenerAdapter; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.Map; import java.util.Set; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CompletionStage; import java.util.concurrent.ExecutionException; import java.util.concurrent.Executor; import java.util.concurrent.Future; import java.util.concurrent.ScheduledExecutorService; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import javax.annotation.Nullable; /** * Implementation of {@link TwillController} that uses {@link RemoteProcessController} to control a running program. */ class RemoteExecutionTwillController implements TwillController { private static final Logger LOG = LoggerFactory.getLogger(RemoteExecutionTwillController.class); private final ProgramRunId programRunId; private final RunId runId; private final CompletionStage<TwillController> started; private final CompletableFuture<TwillController> completion; private final ScheduledExecutorService scheduler; private final RemoteProcessController remoteProcessController; private final RemoteExecutionService executionService; private final long gracefulShutdownMillis; private final long pollCompletedMillis; private volatile boolean terminateOnServiceStop; RemoteExecutionTwillController(CConfiguration cConf, ProgramRunId programRunId, CompletionStage<?> startupCompletionStage, RemoteProcessController remoteProcessController, ScheduledExecutorService scheduler, RemoteExecutionService service) { this.programRunId = programRunId; this.runId = RunIds.fromString(programRunId.getRun()); this.gracefulShutdownMillis = cConf.getLong(Constants.RuntimeMonitor.GRACEFUL_SHUTDOWN_MS); this.pollCompletedMillis = cConf.getLong(Constants.RuntimeMonitor.POLL_TIME_MS); // On start up task succeeded, complete the started stage to unblock the onRunning() // On start up task failure, mark this controller as terminated with exception CompletableFuture<TwillController> completion = new CompletableFuture<>(); this.started = startupCompletionStage.thenApply(o -> RemoteExecutionTwillController.this); this.terminateOnServiceStop = true; this.started.exceptionally(throwable -> { completion.completeExceptionally(throwable); return RemoteExecutionTwillController.this; }); service.addListener(new ServiceListenerAdapter() { @Override public void terminated(Service.State from) { if (terminateOnServiceStop) { completion.complete(RemoteExecutionTwillController.this); } } @Override public void failed(Service.State from, Throwable failure) { if (terminateOnServiceStop) { completion.completeExceptionally(failure); } } }, Threads.SAME_THREAD_EXECUTOR); this.completion = completion; this.scheduler = scheduler; this.remoteProcessController = remoteProcessController; this.executionService = service; } public void release() { terminateOnServiceStop = false; executionService.stop(); } public void complete() { terminateOnServiceStop = true; executionService.stop(); try { RetryStrategy retryStrategy = RetryStrategies.timeLimit( 5, TimeUnit.SECONDS, RetryStrategies.exponentialDelay(500, 2000, TimeUnit.MILLISECONDS)); // Make sure the remote execution is completed // Give 5 seconds for the remote process to shutdown. After 5 seconds, issues a kill. long startTime = System.currentTimeMillis(); while (Retries.callWithRetries(remoteProcessController::isRunning, retryStrategy, Exception.class::isInstance)) { if (System.currentTimeMillis() - startTime >= 5000) { throw new IllegalStateException("Remote process for " + programRunId + " is still running"); } TimeUnit.SECONDS.sleep(1); } } catch (Exception e) { // If there is exception, use the remote execution controller to try killing the remote process try { LOG.debug("Force termination of remote process for program run {}", programRunId); remoteProcessController.kill(); } catch (Exception ex) { LOG.warn("Failed to terminate remote process for program run {}", programRunId, ex); } } } @Override public Future<? extends ServiceController> terminate() { if (completion.isDone()) { return CompletableFuture.completedFuture(this); } CompletableFuture<TwillController> result = completion.thenApply(r -> r); scheduler.execute(() -> { try { remoteProcessController.terminate(); // Poll for completion long killTimeMillis = System.currentTimeMillis() + gracefulShutdownMillis + pollCompletedMillis * 5; scheduler.schedule(new Runnable() { @Override public void run() { try { if (!remoteProcessController.isRunning()) { completion.complete(RemoteExecutionTwillController.this); return; } // If the process is still running, kills it if it reaches the kill time. if (System.currentTimeMillis() >= killTimeMillis) { remoteProcessController.kill(); completion.complete(RemoteExecutionTwillController.this); return; } // Schedule to check again scheduler.schedule(this, pollCompletedMillis, TimeUnit.MILLISECONDS); } catch (Exception e) { result.completeExceptionally(e); } } }, pollCompletedMillis, TimeUnit.MILLISECONDS); } catch (Exception e) { // Only fail the result future. We have to keep the terminationFuture to be not completed so that the // caller can retry termination. result.completeExceptionally(e); } }); return result; } @Override public void kill() { try { remoteProcessController.kill(); } catch (Exception e) { throw new RuntimeException("Failed when requesting program " + programRunId + " to stop", e); } try { Uninterruptibles.getUninterruptibly(completion); } catch (ExecutionException e) { // We ignore termination error since we only care about killing the program, but not interested in the final state LOG.debug("Exception raised when terminating program {}", programRunId, e); } } @Override public void addLogHandler(LogHandler handler) { LOG.trace("LogHandler is not supported for {}", getClass().getSimpleName()); } @Override public ServiceDiscovered discoverService(String serviceName) { throw new UnsupportedOperationException(); } @Override public Future<Integer> changeInstances(String runnable, int newCount) { throw new UnsupportedOperationException(); } @Nullable @Override public ResourceReport getResourceReport() { return null; } @Override public Future<String> restartAllInstances(String runnable) { throw new UnsupportedOperationException(); } @Override public Future<Set<String>> restartInstances(Map<String, ? extends Set<Integer>> runnableToInstanceIds) { throw new UnsupportedOperationException(); } @Override public Future<String> restartInstances(String runnable, int instanceId, int... moreInstanceIds) { throw new UnsupportedOperationException(); } @Override public Future<String> restartInstances(String runnable, Set<Integer> instanceIds) { throw new UnsupportedOperationException(); } @Override public Future<Map<String, LogEntry.Level>> updateLogLevels(Map<String, LogEntry.Level> logLevels) { return Futures.immediateFailedFuture(new UnsupportedOperationException("updateLogLevels is not supported")); } @Override public Future<Map<String, LogEntry.Level>> updateLogLevels(String runnableName, Map<String, LogEntry.Level> logLevelsForRunnable) { return Futures.immediateFailedFuture(new UnsupportedOperationException("updateLogLevels is not supported")); } @Override public Future<String[]> resetLogLevels(String... loggerNames) { return Futures.immediateFailedFuture(new UnsupportedOperationException("resetLogLevels is not supported")); } @Override public Future<String[]> resetRunnableLogLevels(String runnableName, String... loggerNames) { return Futures.immediateFailedFuture(new UnsupportedOperationException("resetRunnableLogLevels is not supported")); } @Override public RunId getRunId() { return runId; } @Override public Future<Command> sendCommand(Command command) { return Futures.immediateFailedFuture(new UnsupportedOperationException("sendCommand is not supported")); } @Override public Future<Command> sendCommand(String runnableName, Command command) { return Futures.immediateFailedFuture(new UnsupportedOperationException("sendCommand is not supported")); } @Override public void onRunning(Runnable runnable, Executor executor) { started.thenRunAsync(runnable, executor); } @Override public void onTerminated(Runnable runnable, Executor executor) { completion.whenCompleteAsync((remoteExecutionTwillController, throwable) -> runnable.run(), executor); } @Override public void awaitTerminated() throws ExecutionException { Uninterruptibles.getUninterruptibly(completion); } @Override public void awaitTerminated(long timeout, TimeUnit timeoutUnit) throws TimeoutException, ExecutionException { Uninterruptibles.getUninterruptibly(completion, timeout, timeoutUnit); } @Nullable @Override public TerminationStatus getTerminationStatus() { if (!completion.isDone()) { return null; } try { awaitTerminated(); return TerminationStatus.SUCCEEDED; } catch (ExecutionException e) { return TerminationStatus.FAILED; } } }
3,896
8,772
package org.apereo.cas.pm; import org.apereo.cas.pm.impl.history.PasswordHistoryEntity; import org.springframework.core.Ordered; import java.util.Collection; /** * This is {@link PasswordHistoryService}. * * @author <NAME> * @since 6.1.0 */ public interface PasswordHistoryService extends Ordered { @Override default int getOrder() { return 0; } /** * Determine whether password request * can be accepted based on history requirements and tracking. * * @param changeRequest the change request * @return true/false */ boolean exists(PasswordChangeRequest changeRequest); /** * Store password request in history. * * @param changeRequest the change request * @return true/false */ boolean store(PasswordChangeRequest changeRequest); /** * Fetch all collection. * * @return the collection */ Collection<? extends PasswordHistoryEntity> fetchAll(); /** * Fetch collection. * * @param username the username * @return the collection */ Collection<? extends PasswordHistoryEntity> fetch(String username); /** * Remove. * * @param username the username */ void remove(String username); /** * Remove all. */ void removeAll(); }
470
2,151
<gh_stars>1000+ // Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "services/preferences/unittest_common.h" namespace prefs { PrefStoreObserverMock::PrefStoreObserverMock() {} PrefStoreObserverMock::~PrefStoreObserverMock() {} void ExpectPrefChange(PrefStore* pref_store, base::StringPiece key) { PrefStoreObserverMock observer; pref_store->AddObserver(&observer); base::RunLoop run_loop; EXPECT_CALL(observer, OnPrefValueChanged(key.as_string())) .WillOnce(testing::WithoutArgs( testing::Invoke([&run_loop]() { run_loop.Quit(); }))); run_loop.Run(); pref_store->RemoveObserver(&observer); } } // namespace prefs
265
13,648
import bench def test(num): for i in iter(range(num // 20)): enumerate([1, 2], 1) bench.run(test)
51
832
<filename>CDDStoreDemo/CDDStoreDemo/Classes/BeautyShop(美店)/Main/View/Cell/DCBeautyHotTopicCell.h<gh_stars>100-1000 // // DCBeautyHotTopicCell.h // CDDStoreDemo // // Created by 陈甸甸 on 2017/12/7. // Copyright © 2017年 RocketsChen. All rights reserved. // #import <UIKit/UIKit.h> @class DCBeautyHotItem; @interface DCBeautyHotTopicCell : UICollectionViewCell /* 数据 */ @property (strong , nonatomic)DCBeautyHotItem *hotItem; @end
182
1,220
package cn.enilu.flash.wrapper; import cn.enilu.flash.service.system.impl.ConstantFactory; import cn.enilu.flash.warpper.BaseControllerWrapper; import java.util.Map; /** * descript * * @Author enilu * @Date 2021/7/25 2:20 * @Version 1.0 */ public class TaskWrapper extends BaseControllerWrapper { public TaskWrapper(Object obj) { super(obj); } @Override protected void warpTheMap(Map<String, Object> map) { Long userid = Long.valueOf(map.get("createBy").toString()); map.put("userName", ConstantFactory.me().getUserNameById(userid)); } }
228
956
<reponame>ajitkhaparde/trex-core /* SPDX-License-Identifier: BSD-3-Clause * Copyright(c) 2017 Cavium, Inc */ #ifndef __OCTEONTX_POOL_LOGS_H__ #define __OCTEONTX_POOL_LOGS_H__ #include <rte_debug.h> #define FPAVF_LOG(level, fmt, args...) \ rte_log(RTE_LOG_ ## level, octeontx_logtype_fpavf,\ "%s() line %u: " fmt "\n", __func__, __LINE__, ## args) #define fpavf_log_info(fmt, ...) FPAVF_LOG(INFO, fmt, ##__VA_ARGS__) #define fpavf_log_dbg(fmt, ...) FPAVF_LOG(DEBUG, fmt, ##__VA_ARGS__) #define fpavf_log_err(fmt, ...) FPAVF_LOG(ERR, fmt, ##__VA_ARGS__) #define fpavf_func_trace fpavf_log_dbg extern int octeontx_logtype_fpavf; #endif /* __OCTEONTX_POOL_LOGS_H__*/
342
66,762
<filename>src/ci/docker/scripts/qemu-bare-bones-addentropy.c #include <assert.h> #include <stdint.h> #include <sys/ioctl.h> #include <stdio.h> #include <unistd.h> #include <fcntl.h> #include <linux/random.h> #define N 2048 struct entropy { int ent_count; int size; unsigned char data[N]; }; int main() { struct entropy buf; ssize_t n; int random_fd = open("/dev/random", O_RDWR); assert(random_fd >= 0); while ((n = read(0, &buf.data, N)) > 0) { buf.ent_count = n * 8; buf.size = n; if (ioctl(random_fd, RNDADDENTROPY, &buf) != 0) { perror("failed to add entropy"); } } return 0; }
276
5,169
{ "name": "JBLoginDataCommands", "version": "0.1.1", "summary": "Some protocols for formalizing the login process of an login view.", "description": "Some protocols for formalizing the login process of an login view (have a look at JBLoginView).", "homepage": "https://github.com/barteljan/JBLoginDataCommands", "license": "MIT", "authors": { "<NAME>": "<EMAIL>" }, "source": { "git": "https://github.com/barteljan/JBLoginDataCommands.git", "tag": "0.1.1" }, "social_media_url": "https://twitter.com/janbartel", "platforms": { "ios": "7.0" }, "requires_arc": true, "source_files": "Pod/Classes/**/*", "resource_bundles": { "JBLoginDataCommands": [ "Pod/Assets/*.png" ] }, "public_header_files": "Pod/Classes/*.h" }
315
1,056
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.netbeans.modules.hudson.ui.impl; import java.awt.Toolkit; import java.io.File; import java.io.IOException; import java.net.MalformedURLException; import java.net.URL; import java.util.logging.Level; import java.util.logging.Logger; import java.util.regex.Matcher; import java.util.regex.Pattern; import org.netbeans.api.project.Project; import org.netbeans.modules.hudson.api.HudsonJob; import org.netbeans.modules.hudson.spi.HudsonLogger; import org.netbeans.modules.hudson.spi.HudsonLogger.HudsonLogSession; import org.netbeans.modules.hudson.spi.HudsonSCM; import org.netbeans.modules.hudson.ui.spi.ProjectHudsonProvider; import org.openide.awt.HtmlBrowser.URLDisplayer; import org.openide.awt.StatusDisplayer; import org.openide.filesystems.FileObject; import org.openide.filesystems.FileUtil; import org.openide.util.Lookup; import org.openide.util.NbBundle.Messages; import org.openide.util.RequestProcessor; import org.openide.util.lookup.ServiceProvider; import org.openide.windows.OutputEvent; import org.openide.windows.OutputListener; import org.openide.windows.OutputWriter; @ServiceProvider(service=HudsonLogger.class, position = Integer.MAX_VALUE - 100) public class PlainLogger implements HudsonLogger { private static final Logger LOG = Logger.getLogger(PlainLogger.class.getName()); @Override public HudsonLogSession createSession(final HudsonJob job) { return new HudsonLogSession() { final PlainLoggerLogic logic = new PlainLoggerLogic(job, job.getName()); public boolean handle(String line, OutputWriter stream) { OutputListener link = logic.findHyperlink(line); if (link != null) { try { stream.println(line, link); return true; } catch (IOException x) { LOG.log(Level.INFO, null, x); } } stream.println(line); return true; } }; } static class PlainLoggerLogic { private static final Pattern REMOTE_URL = Pattern.compile("\\b(https?://[^\\s)>]+)"); private final HudsonJob job; /** Looks for errors mentioning workspace files. Prefix captures Maven's [WARNING], Ant's [javac], etc. */ private final Pattern hyperlinkable; PlainLoggerLogic(HudsonJob job, String jobName) { this.job = job; // XXX support Windows build servers (using backslashes) String jobNameQ = Pattern.quote(jobName); hyperlinkable = Pattern.compile("\\s*(?:\\[.+\\] )?/.+?/(?:jobs/" + jobNameQ + "/workspace|workspace/" + jobNameQ + // NOI18N ")/([^:]+):(?:\\[?([0-9]+)[:,](?:([0-9]+)[]:])?)? (?:warning: )?(.+)"); // NOI18N } OutputListener findHyperlink(String line) { try { Matcher m = hyperlinkable.matcher(line); if (m.matches()) { final String path = m.group(1); final int row = m.group(2) != null ? Integer.parseInt(m.group(2)) - 1 : -1; final int col = m.group(3) != null ? Integer.parseInt(m.group(3)) - 1 : -1; final String message = m.group(4); return new Hyperlink(job, path, message, row, col); } m = REMOTE_URL.matcher(line); if (m.matches()) { return new URLHyperlink(new URL(m.group())); } } catch (MalformedURLException x) { LOG.log(Level.FINE, null, x); } return null; } } private static class Hyperlink implements OutputListener { private static final RequestProcessor RP = new RequestProcessor(Hyperlink.class); private final HudsonJob job; private final String path; private final String message; private final int row; private final int col; public Hyperlink(HudsonJob job, String path, String message, int row, int col) { this.job = job; this.path = path; this.message = message; this.row = row; this.col = col; } @Override public void outputLineAction(OutputEvent ev) { acted(true); } @Override public void outputLineSelected(OutputEvent ev) { acted(false); } @Messages({"# {0} - file path in workspace", "Hyperlinker.looking_for=Looking for {0}...", "# {0} - file path in workspace", "Hyperlinker.not_found=No file {0} found in remote workspace."}) private void acted(final boolean force) { RP.post(new Runnable() { @Override public void run() { FileObject f = null; Project p = ProjectHudsonProvider.getDefault().findAssociatedProject(ProjectHudsonProvider.Association.forJob(job)); if (p != null) { String localPath = null; File localRoot = FileUtil.toFile(p.getProjectDirectory()); if (localRoot != null) { for (HudsonSCM scm : Lookup.getDefault().lookupAll(HudsonSCM.class)) { localPath = scm.translateWorkspacePath(job, path, localRoot); if (localPath != null) { LOG.log(Level.FINE, "Translating remote path {0} to {1} using {2}", new Object[] {path, localPath, scm}); break; } } } if (localPath == null) { LOG.fine("Falling back to guess that remote workspace is a project root"); localPath = path; } // XXX permit localPath to include ../ segments; for Hg this is reasonable f = p.getProjectDirectory().getFileObject(localPath); LOG.log(Level.FINE, "Tried to find local file in {0} at {1} using {2}", new Object[] {p, f, localPath}); // XXX #159829: consider aligning local line number with remote line number somehow } if (f == null) { StatusDisplayer.getDefault().setStatusText(Bundle.Hyperlinker_looking_for(path)); f = job.getRemoteWorkspace().findResource(path); LOG.log(Level.FINE, "Tried to find remote file at {0} using {1}", new Object[] {f, path}); } if (f == null) { if (force) { StatusDisplayer.getDefault().setStatusText(Bundle.Hyperlinker_not_found(path)); Toolkit.getDefaultToolkit().beep(); } return; } // XXX could be useful to select this file in the workspace node (see related #159838) StatusDisplayer.getDefault().setStatusText(message); HudsonLoggerHelper.openAt(f, row, col, force); } }); } @Override public void outputLineCleared(OutputEvent ev) {} public @Override String toString() { return path + ":" + row + ":" + col + ":" + message; // NOI18N } } private static class URLHyperlink implements OutputListener { private final URL u; URLHyperlink(URL u) { this.u = u; } @Override public void outputLineAction(OutputEvent ev) { URLDisplayer.getDefault().showURL(u); } @Override public void outputLineSelected(OutputEvent ev) {} @Override public void outputLineCleared(OutputEvent ev) {} public @Override String toString() { return u.toString(); } } }
4,246
3,428
<reponame>ghalimi/stdlib {"id":"00234","group":"spam-2","checksum":{"type":"MD5","value":"64c94421011e896adab852386cd314d8"},"text":"From <EMAIL> Wed Jul 3 12:07:51 2002\nReturn-Path: <<EMAIL>>\nDelivered-To: <EMAIL>inc.com\nReceived: from localhost (localhost [127.0.0.1])\n\tby phobos.labs.netnoteinc.com (Postfix on SuSE Linux 8.0 (i386)) with ESMTP id CD9C814F8B1\n\tfor <jm@localhost>; Wed, 3 Jul 2002 12:04:55 +0100 (IST)\nReceived: from dogma.slashnull.org [212.17.35.15]\n\tby localhost with IMAP (fetchmail-5.9.0)\n\tfor jm@localhost (single-drop); Wed, 03 Jul 2002 12:04:55 +0100 (IST)\nReceived: from mandark.labs.netnoteinc.com ([213.105.180.140]) by\n dogma.slashnull.org (8.11.6/8.11.6) with ESMTP id g45NLLu29588 for\n <<EMAIL>>; Mon, 6 May 2002 00:21:21 +0100\nReceived: from george.jokerville.com (george.jokerville.com\n [80.64.131.41]) by mandark.labs.netnoteinc.com (8.11.2/8.11.2) with SMTP\n id g45NL9D29507 for <<EMAIL>>; Mon, 6 May 2002 00:21:15 +0100\nMessage-Id: <<EMAIL>>\nContent-Disposition: inline\nMIME-Version: 1.0\nDate: Sun, 5 May 2002 23:21:15 UT\nX-X: +,3(M-38T-#8V-C0`\nSubject: Joke-Of -The- Day\nX-List-Unsubscribe: <<EMAIL>>\nFrom: \"Joke-Of -The- Day\" <<EMAIL>>\nReply-To: \"Joke-Of -The- Day\" <<EMAIL>>\nX-Stormpost-To: <EMAIL> 56446664 12\nTo: \"<EMAIL>\" <<EMAIL>>\nX-Mailer: StormPost 1.0\nContent-Type: text/html\nContent-Transfer-Encoding: 7bit\n\n<html><body><center><a href=http://www.vitafactory.com/><img src=http://www.vitafactory.com/images/vitafactory-mail-logo.jpg border=0><hr><a href=http://www.vitafactory.com/ad-saturday/1.html><img src=http://www.vitafactory.com/ad-saturday/1-adnew.gif border=0></a><a href=http://www.vitafactory.com/ad-saturday/2.html><img src=http://www.vitafactory.com/ad-saturday/2-adnew.gif border=0><hr></a> </b><p align=left><font color=#616161 face=\"Trebuchet MS\" size=1><br></font><font color=#616161 face=Verdana size=2><b>Unsubscribe:</b><br>Please send a blank mail to:<br><EMAIL> </font><br></body></html>\n\n\n"}
877
13,162
#include "Operators.h" namespace IR { const char* getOpcodeName(Opcode opcode) { switch(opcode) { #define VISIT_OPCODE(encoding,name,nameString,Imm,...) case Opcode::name: return nameString; ENUM_OPERATORS(VISIT_OPCODE) #undef VISIT_OPCODE default: return "unknown"; }; } }
125
2,151
<reponame>zipated/src { "name": "content_utility", "display_name": "Content (utility process)", "interface_provider_specs": { "service_manager:connector": { "provides": { "browser": [ "content.mojom.Child", "content.mojom.ChildControl", "content.mojom.ChildHistogramFetcher", "content.mojom.ChildHistogramFetcherFactory", "content.mojom.ResourceUsageReporter", "IPC.mojom.ChannelBootstrap", "printing.mojom.PdfToEmfConverterFactory", "printing.mojom.PdfToPwgRasterConverter", "service_manager.mojom.ServiceFactory" ], "service_manager:service_factory": [ "service_manager.mojom.ServiceFactory" ] }, "requires": { "*": [ "app" ], "content_browser": [ "dwrite_font_proxy", "field_trials", "font_cache" ], "device": [ "device:power_monitor", "device:time_zone_monitor" ] } } }, "required_files" : { "v8_natives_data" : [ { "path": "natives_blob.bin", "platform": "linux" }, { "path": "assets/natives_blob.bin", "platform": "android" } ] } }
665
371
{ "name": "relative-deps", "version": "1.0.5", "description": "Installs local dependencies for optimal developer experience", "main": "index.js", "scripts": { "test": "echo \"Error: no test specified\" && exit 1" }, "bin": { "relative-deps": "./cli.js" }, "repository": { "type": "git", "url": "git+https://github.com/mweststrate/relative-deps.git" }, "keywords": [ "yarn", "npm", "link", "relative", "dependencies" ], "author": "<NAME>", "license": "MIT", "bugs": { "url": "https://github.com/mweststrate/relative-deps/issues" }, "homepage": "https://github.com/mweststrate/relative-deps#readme", "dependencies": { "checksum": "^0.1.1", "globby": "^9.2.0", "lodash": "^4.17.15", "read-pkg-up": "^6.0.0", "rimraf": "^2.6.3", "tar": "^6.0.5", "yargs": "^15.0.2", "yarn-or-npm": "^3.0.1" } }
424
312
/******************************************************************************* * Copyright (c) 2015 Eclipse RDF4J contributors, Aduna, and others. * All rights reserved. This program and the accompanying materials * are made available under the terms of the Eclipse Distribution License v1.0 * which accompanies this distribution, and is available at * http://www.eclipse.org/org/documents/edl-v10.php. *******************************************************************************/ package org.eclipse.rdf4j.sail.base; import org.eclipse.rdf4j.sail.SailException; /** * Common interface to objects that throw {@link SailException} on close. * * @author <NAME> */ public interface SailClosable extends AutoCloseable { /** * Closes this resource, relinquishing any underlying resources. * * @throws SailException if this resource cannot be closed */ @Override void close() throws SailException; }
228
692
from collections import namedtuple import torch from torch.nn import Conv2d, BatchNorm2d, PReLU, ReLU, Sigmoid, MaxPool2d, AdaptiveAvgPool2d, Sequential, Module, Linear import torch.nn.functional as F """ ArcFace implementation from [TreB1eN](https://github.com/TreB1eN/InsightFace_Pytorch) """ class Flatten(Module): def forward(self, input): return input.view(input.size(0), -1) def l2_norm(input, axis=1): norm = torch.norm(input, 2, axis, True) output = torch.div(input, norm) return output class Bottleneck(namedtuple('Block', ['in_channel', 'depth', 'stride'])): """ A named tuple describing a ResNet block. """ def get_block(in_channel, depth, num_units, stride=2): return [Bottleneck(in_channel, depth, stride)] + [Bottleneck(depth, depth, 1) for i in range(num_units - 1)] def get_blocks(num_layers): if num_layers == 50: blocks = [ get_block(in_channel=64, depth=64, num_units=3), get_block(in_channel=64, depth=128, num_units=4), get_block(in_channel=128, depth=256, num_units=14), get_block(in_channel=256, depth=512, num_units=3) ] elif num_layers == 100: blocks = [ get_block(in_channel=64, depth=64, num_units=3), get_block(in_channel=64, depth=128, num_units=13), get_block(in_channel=128, depth=256, num_units=30), get_block(in_channel=256, depth=512, num_units=3) ] elif num_layers == 152: blocks = [ get_block(in_channel=64, depth=64, num_units=3), get_block(in_channel=64, depth=128, num_units=8), get_block(in_channel=128, depth=256, num_units=36), get_block(in_channel=256, depth=512, num_units=3) ] else: raise ValueError("Invalid number of layers: {}. Must be one of [50, 100, 152]".format(num_layers)) return blocks class SEModule(Module): def __init__(self, channels, reduction): super(SEModule, self).__init__() self.avg_pool = AdaptiveAvgPool2d(1) self.fc1 = Conv2d(channels, channels // reduction, kernel_size=1, padding=0, bias=False) self.relu = ReLU(inplace=True) self.fc2 = Conv2d(channels // reduction, channels, kernel_size=1, padding=0, bias=False) self.sigmoid = Sigmoid() def forward(self, x): module_input = x x = self.avg_pool(x) x = self.fc1(x) x = self.relu(x) x = self.fc2(x) x = self.sigmoid(x) return module_input * x class bottleneck_IR(Module): def __init__(self, in_channel, depth, stride): super(bottleneck_IR, self).__init__() if in_channel == depth: self.shortcut_layer = MaxPool2d(1, stride) else: self.shortcut_layer = Sequential( Conv2d(in_channel, depth, (1, 1), stride, bias=False), BatchNorm2d(depth) ) self.res_layer = Sequential( BatchNorm2d(in_channel), Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False), PReLU(depth), Conv2d(depth, depth, (3, 3), stride, 1, bias=False), BatchNorm2d(depth) ) def forward(self, x): shortcut = self.shortcut_layer(x) res = self.res_layer(x) return res + shortcut class bottleneck_IR_SE(Module): def __init__(self, in_channel, depth, stride): super(bottleneck_IR_SE, self).__init__() if in_channel == depth: self.shortcut_layer = MaxPool2d(1, stride) else: self.shortcut_layer = Sequential( Conv2d(in_channel, depth, (1, 1), stride, bias=False), BatchNorm2d(depth) ) self.res_layer = Sequential( BatchNorm2d(in_channel), Conv2d(in_channel, depth, (3, 3), (1, 1), 1, bias=False), PReLU(depth), Conv2d(depth, depth, (3, 3), stride, 1, bias=False), BatchNorm2d(depth), SEModule(depth, 16) ) def forward(self, x): shortcut = self.shortcut_layer(x) res = self.res_layer(x) return res + shortcut class SeparableConv2d(torch.nn.Module): def __init__(self, in_channels, out_channels, kernel_size, bias=False): super(SeparableConv2d, self).__init__() self.depthwise = Conv2d(in_channels, in_channels, kernel_size=kernel_size, groups=in_channels, bias=bias, padding=1) self.pointwise = Conv2d(in_channels, out_channels, kernel_size=1, bias=bias) def forward(self, x): out = self.depthwise(x) out = self.pointwise(out) return out def _upsample_add(x, y): """Upsample and add two feature maps. Args: x: (Variable) top feature map to be upsampled. y: (Variable) lateral feature map. Returns: (Variable) added feature map. Note in PyTorch, when input size is odd, the upsampled feature map with `F.upsample(..., scale_factor=2, mode='nearest')` maybe not equal to the lateral feature map size. e.g. original input size: [N,_,15,15] -> conv2d feature map size: [N,_,8,8] -> upsampled feature map size: [N,_,16,16] So we choose bilinear upsample which supports arbitrary output sizes. """ _, _, H, W = y.size() return F.interpolate(x, size=(H, W), mode='bilinear', align_corners=True) + y class SeparableBlock(Module): def __init__(self, input_size, kernel_channels_in, kernel_channels_out, kernel_size): super(SeparableBlock, self).__init__() self.input_size = input_size self.kernel_size = kernel_size self.kernel_channels_in = kernel_channels_in self.kernel_channels_out = kernel_channels_out self.make_kernel_in = Linear(input_size, kernel_size * kernel_size * kernel_channels_in) self.make_kernel_out = Linear(input_size, kernel_size * kernel_size * kernel_channels_out) self.kernel_linear_in = Linear(kernel_channels_in, kernel_channels_in) self.kernel_linear_out = Linear(kernel_channels_out, kernel_channels_out) def forward(self, features): features = features.view(-1, self.input_size) kernel_in = self.make_kernel_in(features).view(-1, self.kernel_size, self.kernel_size, 1, self.kernel_channels_in) kernel_out = self.make_kernel_out(features).view(-1, self.kernel_size, self.kernel_size, self.kernel_channels_out, 1) kernel = torch.matmul(kernel_out, kernel_in) kernel = self.kernel_linear_in(kernel).permute(0, 1, 2, 4, 3) kernel = self.kernel_linear_out(kernel) kernel = kernel.permute(0, 4, 3, 1, 2) return kernel
2,378
360
import torch import torch.nn.functional as F from lietorch import SE3, Sim3 MIN_DEPTH = 0.1 def extract_intrinsics(intrinsics): return intrinsics[...,None,None,:].unbind(dim=-1) def iproj(disps, intrinsics): """ pinhole camera inverse projection """ ht, wd = disps.shape[2:] fx, fy, cx, cy = extract_intrinsics(intrinsics) y, x = torch.meshgrid( torch.arange(ht).to(disps.device).float(), torch.arange(wd).to(disps.device).float()) i = torch.ones_like(disps) X = (x - cx) / fx Y = (y - cy) / fy return torch.stack([X, Y, i, disps], dim=-1) def proj(Xs, intrinsics, jacobian=False): """ pinhole camera projection """ fx, fy, cx, cy = extract_intrinsics(intrinsics) X, Y, Z, D = Xs.unbind(dim=-1) d = torch.where(Z.abs() < 0.001, torch.zeros_like(Z), 1.0/Z) x = fx * (X * d) + cx y = fy * (Y * d) + cy coords = torch.stack([x,y, D*d], dim=-1) if jacobian: B, N, H, W = d.shape o = torch.zeros_like(d) proj_jac = torch.stack([ fx*d, o, -fx*X*d*d, o, o, fy*d, -fy*Y*d*d, o, o, o, -D*d*d, d, ], dim=-1).view(B, N, H, W, 3, 4) return coords, proj_jac return coords, None def actp(Gij, X0, jacobian=False): """ action on point cloud """ X1 = Gij[:,:,None,None] * X0 if jacobian: X, Y, Z, d = X1.unbind(dim=-1) o = torch.zeros_like(d) B, N, H, W = d.shape if isinstance(Gij, SE3): Ja = torch.stack([ d, o, o, o, Z, -Y, o, d, o, -Z, o, X, o, o, d, Y, -X, o, o, o, o, o, o, o, ], dim=-1).view(B, N, H, W, 4, 6) elif isinstance(Gij, Sim3): Ja = torch.stack([ d, o, o, o, Z, -Y, X, o, d, o, -Z, o, X, Y, o, o, d, Y, -X, o, Z, o, o, o, o, o, o, o ], dim=-1).view(B, N, H, W, 4, 7) return X1, Ja return X1, None def projective_transform(poses, depths, intrinsics, ii, jj, jacobian=False): """ map points from ii->jj """ # inverse project (pinhole) X0 = iproj(depths[:,ii], intrinsics[:,ii]) # transform Gij = poses[:,jj] * poses[:,ii].inv() X1, Ja = actp(Gij, X0, jacobian=jacobian) # project (pinhole) x1, Jp = proj(X1, intrinsics[:,jj], jacobian=jacobian) # exclude points too close to camera valid = ((X1[...,2] > MIN_DEPTH) & (X0[...,2] > MIN_DEPTH)).float() valid = valid.unsqueeze(-1) if jacobian: Jj = torch.matmul(Jp, Ja) Ji = -Gij[:,:,None,None,None].adjT(Jj) return x1, valid, (Ji, Jj) return x1, valid def induced_flow(poses, disps, intrinsics, ii, jj): """ optical flow induced by camera motion """ ht, wd = disps.shape[2:] y, x = torch.meshgrid( torch.arange(ht).to(disps.device).float(), torch.arange(wd).to(disps.device).float()) coords0 = torch.stack([x, y], dim=-1) coords1, valid = projective_transform(poses, disps, intrinsics, ii, jj) return coords1[...,:2] - coords0, valid
1,733
450
<reponame>OpenHFT/Chronicle-Core /* * Copyright 2016-2020 chronicle.software * * https://chronicle.software * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package net.openhft.chronicle.core.cleaner.impl.reflect; import net.openhft.chronicle.core.Jvm; import net.openhft.chronicle.core.cleaner.spi.ByteBufferCleanerService; import net.openhft.chronicle.core.internal.util.DirectBufferUtil; import java.lang.invoke.MethodHandle; import java.lang.invoke.MethodHandles; import java.lang.invoke.MethodType; import java.nio.ByteBuffer; import java.util.logging.Logger; public final class ReflectionBasedByteBufferCleanerService implements ByteBufferCleanerService { private static final String JDK8_CLEANER_CLASS_NAME = "sun.misc.Cleaner"; private static final String JDK9_CLEANER_CLASS_NAME = "jdk.internal.ref.Cleaner"; private static final MethodHandle CLEANER_METHOD; private static final MethodHandle CLEAN_METHOD; private static final Impact IMPACT; static { final MethodHandles.Lookup lookup = MethodHandles.lookup(); final String cleanerClassname = Jvm.isJava9Plus() ? JDK9_CLEANER_CLASS_NAME : JDK8_CLEANER_CLASS_NAME; MethodHandle cleaner = null; MethodHandle clean = null; Impact impact = Impact.SOME_IMPACT; try { final Class<?> cleanerClass = Class.forName(cleanerClassname); cleaner = lookup.findVirtual(DirectBufferUtil.directBufferClass(), "cleaner", MethodType.methodType(cleanerClass)); clean = lookup.findVirtual(cleanerClass, "clean", MethodType.methodType(void.class)); } catch (NoSuchMethodException | ClassNotFoundException | IllegalAccessException e) { // Don't want to record this in tests so just send to slf4j Logger.getLogger(ReflectionBasedByteBufferCleanerService.class.getName()) .warning("Make sure you have set the command line option " + "\"--illegal-access=permit --add-exports java.base/jdk.internal.ref=ALL-UNNAMED\" " + "to enable " + ReflectionBasedByteBufferCleanerService.class.getSimpleName()); impact = Impact.UNAVAILABLE; } CLEAN_METHOD = clean; CLEANER_METHOD = cleaner; IMPACT = impact; } @Override public void clean(final ByteBuffer buffer) { if (IMPACT == Impact.UNAVAILABLE) { // There might not be a cleaner after all. // See https://github.com/OpenHFT/Chronicle-Core/issues/140 Logger.getLogger(ReflectionBasedByteBufferCleanerService.class.getName()) .warning("Cleaning is not available. The ByteBuffer 0x" + Integer.toHexString(System.identityHashCode(buffer)) + " could not be explicitly cleaned and will thus linger until the next GC."); } else { try { final Object cleaner = CLEANER_METHOD.invoke(DirectBufferUtil.directBufferClass().cast(buffer)); CLEAN_METHOD.invoke(cleaner); } catch (Throwable throwable) { throw Jvm.rethrow(throwable); } } } @Override public Impact impact() { return IMPACT; } }
1,457
964
<gh_stars>100-1000 [ { "queryName": "IAM Audit Not Properly Configured", "severity": "HIGH", "line": 3 }, { "queryName": "IAM Audit Not Properly Configured", "severity": "HIGH", "line": 9 }, { "queryName": "IAM Audit Not Properly Configured", "severity": "HIGH", "line": 19 }, { "queryName": "IAM Audit Not Properly Configured", "severity": "HIGH", "line": 23 } ]
189
679
/************************************************************** * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. * *************************************************************/ #ifndef INCLUDED_unotools_OPTIONS_HXX #define INCLUDED_unotools_OPTIONS_HXX #include "sal/config.h" #include "unotools/unotoolsdllapi.h" /* The class utl::detail::Options provides a kind of multiplexer. It implements a ConfigurationListener that is usually registered at a ConfigItem class. At the same time it implements a ConfigurationBroadcaster that allows further ("external") listeners to register. Once the class deriving from Options is notified about configuration changes by the ConfigItem if its content has been changed by calling some of its methods, a call of the Options::NotifyListeners() method will send out notifications to all external listeners. */ namespace utl { class ConfigurationBroadcaster; class IMPL_ConfigurationListenerList; // interface for configuration listener class UNOTOOLS_DLLPUBLIC ConfigurationListener { public: virtual void ConfigurationChanged( ConfigurationBroadcaster* p, sal_uInt32 nHint=0 ) = 0; }; // complete broadcasting implementation class UNOTOOLS_DLLPUBLIC ConfigurationBroadcaster { IMPL_ConfigurationListenerList* mpList; sal_Int32 m_nBroadcastBlocked; // broadcast only if this is 0 sal_uInt32 m_nBlockedHint; public: void AddListener( utl::ConfigurationListener* pListener ); void RemoveListener( utl::ConfigurationListener* pListener ); // notify listeners; nHint is an implementation detail of the particular class deriving from ConfigurationBroadcaster void NotifyListeners( sal_uInt32 nHint ); ConfigurationBroadcaster(); virtual ~ConfigurationBroadcaster(); virtual void BlockBroadcasts( bool bBlock ); }; namespace detail { // A base class for the various option classes supported by // unotools/source/config/itemholderbase.hxx (which must be public, as it is // shared between unotools, svl and svt) // It also provides an implementation for a Configuration Listener and inherits a broadcaster implementation class UNOTOOLS_DLLPUBLIC Options : public utl::ConfigurationBroadcaster, public utl::ConfigurationListener { public: Options(); virtual ~Options() = 0; private: UNOTOOLS_DLLPRIVATE Options(Options &); // not defined UNOTOOLS_DLLPRIVATE void operator =(Options &); // not defined protected: virtual void ConfigurationChanged( ::utl::ConfigurationBroadcaster* p, sal_uInt32 nHint=0 ); }; } } #endif
963
1,900
/* * Copyright Terracotta, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.ehcache.config.units; import org.ehcache.config.ResourceUnit; /** * A {@link ResourceUnit} that designates memory quantity. */ public enum MemoryUnit implements ResourceUnit { /** * Bytes. */ B("B", 0), /** * Kilobytes. */ KB("kB", 10), /** * Megabytes. */ MB("MB", 20), /** * Gigabytes. */ GB("GB", 30), /** * Terabytes. */ TB("TB", 40), /** * Petabytes. */ PB("PB", 50); /** the index of this unit */ private final int index; private final String stringForm; /** Internal constructor */ MemoryUnit(String stringForm, int index) { this.stringForm = stringForm; this.index = index; } /** * Computes <pre>amount * 2^delta</pre>. * * The result is always rounded toward zero. * * @param delta log<sub>2</sub>(divisor) * @param amount dividend * @throws ArithmeticException if the result overflows */ private static long doConvert(int delta, long amount) throws ArithmeticException { if (delta == 0 || amount == 0) { return amount; } else if (delta < 0) { // Hacker's Delight : 10-1 long t = amount >> (-delta - 1); t >>>= 64 + delta; t += amount; return t >> -delta; } else if (delta >= Long.numberOfLeadingZeros(amount < 0 ? ~amount : amount)) { throw new ArithmeticException("Conversion overflows"); } else { return amount << delta; } } /** * Converts {@code quantity} in this unit to bytes. * * @param quantity the quantity * @return the quantity in bytes */ public long toBytes(long quantity) { return doConvert(index - B.index, quantity); } /** * Converts {@code quantity} in {@code unit} into this unit. * * @param quantity quantity to convert * @param unit {@code quantity}'s unit * @return the quantity in this unit */ public long convert(long quantity, MemoryUnit unit) { return doConvert(unit.index - index, quantity); } /** * {@inheritDoc} */ @Override public String toString() { return stringForm; } /** * {@inheritDoc} */ @Override public int compareTo(long thisSize, long thatSize, ResourceUnit thatUnit) throws IllegalArgumentException { if (thatUnit instanceof MemoryUnit) { MemoryUnit mThatUnit = (MemoryUnit) thatUnit; if (index < mThatUnit.index) { try { return Long.signum(thisSize - convert(thatSize, mThatUnit)); } catch (ArithmeticException e) { return Long.signum(mThatUnit.convert(thisSize, this) - thatSize); } } else { try { return Long.signum(mThatUnit.convert(thisSize, this) - thatSize); } catch (ArithmeticException e) { return Long.signum(thisSize - convert(thatSize, mThatUnit)); } } } else { throw new IllegalArgumentException(); } } }
1,253
456
#include <ospray/ospray.h> #include <iostream> int main(int argc, char* argv[]) { if (ospInit() != OSP_NO_ERROR) { std::cout << "Could not initialize ospray" << std::endl; return 0; } ospShutdown(); return 0; }
123
1,607
<filename>src/test/java/org/assertj/core/internal/strings/Strings_assertContainsAnyOf_Test.java /* * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. * * Copyright 2012-2022 the original author or authors. */ package org.assertj.core.internal.strings; import static org.assertj.core.api.Assertions.catchThrowable; import static org.assertj.core.api.BDDAssertions.then; import static org.assertj.core.error.ShouldContainAnyOf.shouldContainAnyOf; import static org.assertj.core.error.ShouldNotBeNull.shouldNotBeNull; import static org.assertj.core.test.TestData.someInfo; import static org.assertj.core.util.Arrays.array; import static org.assertj.core.util.AssertionsUtil.expectAssertionError; import org.assertj.core.internal.StandardComparisonStrategy; import org.assertj.core.internal.StringsBaseTest; import org.junit.jupiter.api.Test; class Strings_assertContainsAnyOf_Test extends StringsBaseTest { @Test void should_fail_if_actual_is_null() { // GIVEN CharSequence actual = null; CharSequence[] values = array("Yoda", "Luke"); // WHEN AssertionError assertionError = expectAssertionError(() -> strings.assertContainsAnyOf(someInfo(), actual, values)); // THEN then(assertionError).hasMessage(shouldNotBeNull().create()); } @Test void should_fail_if_values_is_null() { // GIVEN CharSequence actual = "Master Yoda"; CharSequence[] values = null; // WHEN Throwable thrown = catchThrowable(() -> strings.assertContainsAnyOf(someInfo(), actual, values)); // THEN then(thrown).isInstanceOf(NullPointerException.class) .hasMessage("The array of values to look for should not be null"); } @Test void should_fail_if_values_is_empty() { // GIVEN CharSequence actual = "<NAME>"; CharSequence[] values = array(); // WHEN Throwable thrown = catchThrowable(() -> strings.assertContainsAnyOf(someInfo(), actual, values)); // THEN then(thrown).isInstanceOf(IllegalArgumentException.class) .hasMessage("The array of values to look for should not be empty"); } @Test void should_fail_if_values_contains_null() { // GIVEN CharSequence actual = "<NAME>"; CharSequence[] values = array("Yoda", "Luke", null); // WHEN Throwable thrown = catchThrowable(() -> strings.assertContainsAnyOf(someInfo(), actual, values)); // THEN then(thrown).isInstanceOf(NullPointerException.class) .hasMessage("Expecting CharSequence elements not to be null but found one at index 2"); } @Test void should_fail_if_actual_does_not_contain_any_value() { // GIVEN CharSequence actual = "Leia"; CharSequence[] values = array("Yoda", "Luke"); // WHEN AssertionError assertionError = expectAssertionError(() -> strings.assertContainsAnyOf(someInfo(), actual, values)); // THEN then(assertionError).hasMessage(shouldContainAnyOf(actual, values, StandardComparisonStrategy.instance()).create()); } @Test void should_pass_if_actual_contains_any_value() { // GIVEN CharSequence actual = "<NAME>"; CharSequence[] values = array("Yoda", "Luke"); // WHEN/THEN strings.assertContainsAnyOf(someInfo(), actual, values); } @Test void should_pass_if_actual_contains_any_value_according_to_custom_comparison_strategy() { // GIVEN CharSequence actual = "<NAME>"; CharSequence[] values = array("YODA", "LUKE"); // WHEN/THEN stringsWithCaseInsensitiveComparisonStrategy.assertContainsAnyOf(someInfo(), actual, values); } }
1,359
12,711
{"type":"tag","loc":{"start":{"line":1,"column":1},"filename":"/cases/escape-chars.pug","end":{"line":1,"column":7}},"val":"script"} {"type":"dot","loc":{"start":{"line":1,"column":7},"filename":"/cases/escape-chars.pug","end":{"line":1,"column":8}}} {"type":"start-pipeless-text","loc":{"start":{"line":1,"column":8},"filename":"/cases/escape-chars.pug","end":{"line":1,"column":8}}} {"type":"text","loc":{"start":{"line":2,"column":3},"filename":"/cases/escape-chars.pug","end":{"line":2,"column":18}},"val":"var re = /\\d+/;"} {"type":"end-pipeless-text","loc":{"start":{"line":2,"column":18},"filename":"/cases/escape-chars.pug","end":{"line":2,"column":18}}} {"type":"eos","loc":{"start":{"line":2,"column":18},"filename":"/cases/escape-chars.pug","end":{"line":2,"column":18}}}
270
711
<reponame>jingetiema2100/MicroCommunity<gh_stars>100-1000 package com.java110.order.smo.impl; import com.alibaba.fastjson.JSONArray; import com.alibaba.fastjson.JSONObject; import com.java110.core.client.RestTemplate; import com.java110.core.factory.GenerateCodeFactory; import com.java110.dto.order.OrderDto; import com.java110.dto.order.OrderItemDto; import com.java110.order.dao.ICenterServiceDAO; import com.java110.order.smo.IOIdServiceSMO; import com.java110.utils.util.BeanConvertUtil; import com.java110.utils.util.DateUtil; import com.java110.utils.util.StringUtil; import com.java110.vo.ResultVo; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.http.*; import org.springframework.stereotype.Service; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; /** * 事务处理服务类 * Created by wuxw on 2018/4/13. */ @Service("oIdServiceSMOImpl") public class OIdServiceSMOImpl implements IOIdServiceSMO { private static Logger logger = LoggerFactory.getLogger(OIdServiceSMOImpl.class); public static final String FALLBACK_URL = "http://SERVICE_NAME/businessApi/fallBack"; public static final String SERVICE_NAME = "SERVICE_NAME"; @Autowired private ICenterServiceDAO centerServiceDAOImpl; @Autowired private RestTemplate restTemplate; @Override public ResponseEntity<String> createOId(OrderDto orderDto) { orderDto.setoId(GenerateCodeFactory.getOId()); if (StringUtil.isEmpty(orderDto.getAppId())) { throw new IllegalArgumentException("未包含appId"); } if (StringUtil.isEmpty(orderDto.getExtTransactionId())) { throw new IllegalArgumentException("未包含交互日志"); } if (StringUtil.isEmpty(orderDto.getRequestTime())) { throw new IllegalArgumentException("未包含请求时间"); } if (StringUtil.isEmpty(orderDto.getUserId())) { throw new IllegalArgumentException("未包含用户ID"); } //保存订单信息 centerServiceDAOImpl.saveOrder(BeanConvertUtil.beanCovertMap(orderDto)); return new ResponseEntity<String>(JSONObject.toJSONString(orderDto), HttpStatus.OK); } @Override public ResponseEntity<String> fallBackOId(OrderDto orderDto) { if (StringUtil.isEmpty(orderDto.getoId())) { throw new IllegalArgumentException("未包含事务ID"); } //判断OID是否存在 orderDto = BeanConvertUtil.covertBean(centerServiceDAOImpl.getOrder(BeanConvertUtil.beanCovertMap(orderDto)), OrderDto.class); if (orderDto == null) { return new ResponseEntity<String>("没有需要回退事务", HttpStatus.NOT_FOUND); } //查询 事务项 Map orderItem = new HashMap(); orderItem.put("oId", orderDto.getoId()); List<Map> orderItemMaps = centerServiceDAOImpl.getOrderItems(orderItem); if (orderItemMaps == null || orderItemMaps.size() < 1) { return new ResponseEntity<String>("没有需要回退事务", HttpStatus.NOT_FOUND); } List<OrderItemDto> orderItemDtos = BeanConvertUtil.covertBeanList(orderItemMaps, OrderItemDto.class); HttpEntity<String> httpEntity = null; HttpHeaders header = new HttpHeaders(); List<OrderItemDto> errorOrderItemDtos = new ArrayList<>(); for (OrderItemDto orderItemDto : orderItemDtos) { try { JSONArray params = generateParam(orderItemDto); httpEntity = new HttpEntity<String>(params.toJSONString(), header); restTemplate.exchange(FALLBACK_URL.replace(SERVICE_NAME, orderItemDto.getServiceName()), HttpMethod.POST, httpEntity, String.class); //标记为订单项失败 Map info = new HashMap(); info.put("finishTime", DateUtil.getNow(DateUtil.DATE_FORMATE_STRING_A)); info.put("statusCd", "E"); info.put("bId", orderItemDto.getbId()); info.put("oId", orderDto.getoId()); centerServiceDAOImpl.updateOrderItem(info); //删除 事务日志 //centerServiceDAOImpl.deleteUnItemLog(info); } catch (Exception e) { logger.error("回退事务失败", e); errorOrderItemDtos.add(orderItemDto); } } //标记为订单失败 Map info = new HashMap(); info.put("finishTime", DateUtil.getNow(DateUtil.DATE_FORMATE_STRING_A)); info.put("statusCd", "E"); info.put("oId", orderDto.getoId()); centerServiceDAOImpl.updateOrder(info); if (errorOrderItemDtos.size() > 0) { return new ResponseEntity<String>(JSONArray.toJSONString(errorOrderItemDtos), HttpStatus.BAD_REQUEST); } else { return new ResponseEntity<String>("", HttpStatus.OK); } } /** * 生成回滚sql * * @param orderItemDto * @return */ private JSONArray generateParam(OrderItemDto orderItemDto) { JSONArray params = null; switch (orderItemDto.getAction()) { case "ADD": params = generateDeleteSql(orderItemDto); break; case "MOD": params = generateUpdateSql(orderItemDto); break; case "DEL": params = generateInsertSql(orderItemDto); break; } return params; } /** * 生成insert语句 * * @param orderItemDto * @return */ private JSONArray generateInsertSql(OrderItemDto orderItemDto) { JSONArray params = new JSONArray(); JSONObject param = null; String sql = ""; String logText = orderItemDto.getLogText(); JSONObject logTextObj = JSONObject.parseObject(logText); JSONArray preValues = logTextObj.getJSONArray("preValue"); for (int preValueIndex = 0; preValueIndex < preValues.size(); preValueIndex++) { sql = "insert into " + orderItemDto.getActionObj() + " "; param = new JSONObject(); JSONObject keyValue = preValues.getJSONObject(preValueIndex); if (keyValue.isEmpty()) { continue; } String keySql = "( "; String valueSql = " values ("; for (String key : keyValue.keySet()) { keySql += (key + ","); valueSql += (keyValue.getString(key) + ","); } if (keySql.endsWith(",")) { keySql = keySql.substring(0, keySql.length() - 1); } if (valueSql.endsWith(",")) { valueSql = valueSql.substring(0, valueSql.length() - 1); } sql = sql + keySql + ") " + valueSql + ") "; param.put("fallBackSql", sql); params.add(param); } return params; } private JSONArray generateUpdateSql(OrderItemDto orderItemDto) { JSONArray params = new JSONArray(); JSONObject param = null; String sql = ""; String logText = orderItemDto.getLogText(); JSONObject logTextObj = JSONObject.parseObject(logText); JSONArray preValues = logTextObj.getJSONArray("preValue"); JSONArray afterValues = logTextObj.getJSONArray("afterValue"); for (int preValueIndex = 0; preValueIndex < preValues.size(); preValueIndex++) { sql = "update " + orderItemDto.getActionObj() + " set "; param = new JSONObject(); JSONObject keyValue = preValues.getJSONObject(preValueIndex); JSONObject afterKeyValue = afterValues.getJSONObject(preValueIndex); if (keyValue.isEmpty() || afterKeyValue.isEmpty()) { continue; } String whereSql = " where 1=1 "; for (String key : keyValue.keySet()) { sql += (key + "=" + keyValue.getString(key) + ","); if ("''".equals(afterKeyValue.getString(key))) { //条件中不拼写 为空的结果 continue; } whereSql += (" and " + key + " = " + afterKeyValue.getString(key)); } if (sql.endsWith(",")) { sql = sql.substring(0, sql.length() - 1); } if (sql.endsWith(whereSql)) { // 说明没有条件 不做回退 回退整个表是有问题的 continue; } sql += whereSql; param.put("fallBackSql", sql); params.add(param); } return params; } /** * 生成删除语句 * * @param orderItemDto */ private JSONArray generateDeleteSql(OrderItemDto orderItemDto) { JSONArray params = new JSONArray(); JSONObject param = null; String sql = ""; String logText = orderItemDto.getLogText(); JSONObject logTextObj = JSONObject.parseObject(logText); JSONArray afterValues = logTextObj.getJSONArray("afterValue"); String whereSql = " where 1=1 "; for (int preValueIndex = 0; preValueIndex < afterValues.size(); preValueIndex++) { sql = "delete from " + orderItemDto.getActionObj() + whereSql; param = new JSONObject(); JSONObject keyValue = afterValues.getJSONObject(preValueIndex); if (keyValue.isEmpty()) { continue; } for (String key : keyValue.keySet()) { if (!StringUtil.isEmpty(keyValue.getString(key))) { sql += (" and " + key + "=" + keyValue.getString(key)); } } if (sql.endsWith(whereSql)) { // 说明没有条件 不做回退 回退整个表是有问题的 continue; } sql += " limit 1";//防止程序异常删除 尴尬 根据业务场景 没有需要删除多余 1条的场景 param.put("fallBackSql", sql); params.add(param); } return params; } @Override public ResponseEntity<String> createOrderItem(OrderItemDto orderItemDto) { if (StringUtil.isEmpty(orderItemDto.getoId())) { return new ResponseEntity<String>("请求报文中未包含事务ID", HttpStatus.NOT_FOUND); } if (StringUtil.isEmpty(orderItemDto.getAction())) { return new ResponseEntity<String>("请求报文中未包含动作", HttpStatus.NOT_FOUND); } if (StringUtil.isEmpty(orderItemDto.getActionObj())) { return new ResponseEntity<String>("请求报文中未包含动作对象", HttpStatus.NOT_FOUND); } if (StringUtil.isEmpty(orderItemDto.getServiceName())) { return new ResponseEntity<String>("请求报文中未包含服务", HttpStatus.NOT_FOUND); } if (StringUtil.isEmpty(orderItemDto.getLogText())) { return new ResponseEntity<String>("请求报文中未包含回滚日志", HttpStatus.NOT_FOUND); } if (StringUtil.isEmpty(orderItemDto.getbId()) || orderItemDto.getbId().startsWith("-")) { orderItemDto.setbId(GenerateCodeFactory.getBId()); } //判断OID是否存在 OrderDto orderDto = BeanConvertUtil.covertBean(centerServiceDAOImpl.getOrder(BeanConvertUtil.beanCovertMap(orderItemDto)), OrderDto.class); if (orderDto == null || "E".equals(orderDto.getStatusCd())) { return new ResponseEntity<String>("当前没有事务或者事务已经回滚", HttpStatus.NOT_FOUND); } centerServiceDAOImpl.saveOrderItem(BeanConvertUtil.beanCovertMap(orderItemDto)); return ResultVo.createResponseEntity(ResultVo.CODE_OK, ResultVo.MSG_OK); } /** * 完成事务 * * @param orderDto * @return */ @Override public ResponseEntity<String> finishOrder(OrderDto orderDto) { if (StringUtil.isEmpty(orderDto.getoId())) { return new ResponseEntity<String>("请求报文中未包含事务ID", HttpStatus.NOT_FOUND); } //完成订单项 Map info = new HashMap(); info.put("finishTime", DateUtil.getNow(DateUtil.DATE_FORMATE_STRING_A)); info.put("statusCd", "C"); info.put("oId", orderDto.getoId()); centerServiceDAOImpl.updateOrderItem(info); //删除 事务日志 //centerServiceDAOImpl.deleteUnItemLog(info); //完成订单 info = new HashMap(); info.put("finishTime", DateUtil.getNow(DateUtil.DATE_FORMATE_STRING_A)); info.put("statusCd", "C"); info.put("oId", orderDto.getoId()); centerServiceDAOImpl.updateOrder(info); return ResultVo.createResponseEntity(ResultVo.CODE_OK, ResultVo.MSG_OK); } }
6,232
369
<reponame>zuxqoj/cdap<filename>cdap-common/src/main/java/io/cdap/cdap/gateway/handlers/ConfigService.java /* * Copyright © 2015 <NAME>, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. */ package io.cdap.cdap.gateway.handlers; import com.google.common.collect.Lists; import com.google.inject.Inject; import io.cdap.cdap.common.conf.CConfiguration; import io.cdap.cdap.proto.ConfigEntry; import org.apache.hadoop.conf.Configuration; import java.io.IOException; import java.io.StringWriter; import java.util.List; import java.util.Map; /** * Exposes {@link CConfiguration} and {@link Configuration}. */ public class ConfigService { private final CConfiguration cConf; private final Configuration hConf; @Inject public ConfigService(CConfiguration cConf, Configuration hConf) { this.cConf = cConf; this.hConf = hConf; } public List<ConfigEntry> getCConf() { return toConfigEntries(cConf); } public String getCConfXMLString() throws IOException { StringWriter stringWriter = new StringWriter(); cConf.writeXml(stringWriter); return stringWriter.toString(); } public List<ConfigEntry> getHConf() { return toConfigEntries(hConf); } public String getHConfXMLString() throws IOException { StringWriter stringWriter = new StringWriter(); hConf.writeXml(stringWriter); return stringWriter.toString(); } private String getFirstElement(String[] array) { if (array != null && array.length >= 1) { return array[0]; } else { return null; } } private List<ConfigEntry> toConfigEntries(Configuration configuration) { List<ConfigEntry> result = Lists.newArrayList(); for (Map.Entry<String, String> entry : configuration) { String source = getFirstElement(configuration.getPropertySources(entry.getKey())); result.add(new ConfigEntry(entry.getKey(), entry.getValue(), source)); } return result; } private List<ConfigEntry> toConfigEntries(CConfiguration configuration) { List<ConfigEntry> result = Lists.newArrayList(); for (Map.Entry<String, String> entry : configuration) { String source = getFirstElement(configuration.getPropertySources(entry.getKey())); result.add(new ConfigEntry(entry.getKey(), entry.getValue(), source)); } return result; } }
892
6,180
import unittest import pytest import numpy import cupy from cupy import testing class TestPiecewise(unittest.TestCase): @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_piecewise(self, xp, dtype): x = xp.linspace(2.5, 12.5, 6, dtype=dtype) condlist = [x < 0, x >= 0, x < 5, x >= 1.5] funclist = [-1, 1, 2, 5] return xp.piecewise(x, condlist, funclist) @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_piecewise_scalar_input(self, xp, dtype): x = dtype(2) condlist = [x < 0, x >= 0] funclist = [-10, 10] return xp.piecewise(x, condlist, funclist) @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_piecewise_scalar_condition(self, xp, dtype): x = testing.shaped_random(shape=(2, 3, 5), xp=xp, dtype=dtype) condlist = True funclist = [-10, 10] return xp.piecewise(x, condlist, funclist) @testing.for_signed_dtypes() @testing.numpy_cupy_array_equal() def test_piecewise_otherwise_condition1(self, xp, dtype): x = xp.linspace(-2, 20, 12, dtype=dtype) condlist = [x > 15, x <= 5, x == 0, x == 10] funclist = [-1, 0, 2, 3, -5] return xp.piecewise(x, condlist, funclist) @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_piecewise_otherwise_condition2(self, xp, dtype): x = xp.array([-10, 20, 30, 40], dtype=dtype) condlist = [ xp.array([True, False, False, True]), xp.array([True, False, False, True]), ] funclist = [-1, 1, 2] return xp.piecewise(x, condlist, funclist) @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_piecewise_zero_dim_input(self, xp, dtype): x = testing.shaped_random(shape=(), xp=xp, dtype=dtype) condlist = [x < 0, x > 0] funclist = [-1, 1, 2] return xp.piecewise(x, condlist, funclist) @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_piecewise_ndim_input(self, xp, dtype): x = testing.shaped_random(shape=(2, 3, 5), xp=xp, dtype=dtype) condlist = [x < 0, x > 0] funclist = [-1, 1, 2] return xp.piecewise(x, condlist, funclist) @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_piecewise_zero_dim_condlist(self, xp, dtype): x = testing.shaped_random(shape=(), xp=xp, dtype=dtype) condlist = [testing.shaped_random(shape=(), xp=xp, dtype=bool)] funclist = [-1, 0] return xp.piecewise(x, condlist, funclist) @testing.for_all_dtypes() @testing.numpy_cupy_array_equal() def test_piecewise_ndarray_condlist_funclist(self, xp, dtype): x = xp.linspace(1, 20, 12, dtype=dtype) condlist = xp.array([x > 15, x <= 5, x == 0, x == 10]) funclist = xp.array([-1, 0, 2, 3, -5], dtype=dtype) return xp.piecewise(x, condlist, funclist) @testing.for_all_dtypes_combination( names=['dtype1', 'dtype2'], no_complex=True) @testing.numpy_cupy_array_equal() def test_piecewise_diff_types_funclist(self, xp, dtype1, dtype2): x = xp.linspace(1, 20, 12, dtype=dtype1) condlist = [x > 15, x <= 5, x == 0, x == 10] funclist = xp.array([1, 0, 2, 3, 5], dtype=dtype2) return xp.piecewise(x, condlist, funclist) @testing.for_all_dtypes() def test_mismatched_lengths(self, dtype): funclist = [-1, 0, 2, 4, 5] for xp in (numpy, cupy): x = xp.linspace(-2, 4, 6, dtype=dtype) condlist = [x < 0, x >= 0] with pytest.raises(ValueError): xp.piecewise(x, condlist, funclist) @testing.for_all_dtypes() def test_callable_funclist(self, dtype): x = cupy.linspace(-2, 4, 6, dtype=dtype) condlist = [x < 0, x > 0] funclist = [lambda x: -x, lambda x: x] with pytest.raises(NotImplementedError): cupy.piecewise(x, condlist, funclist) @testing.for_all_dtypes() def test_mixed_funclist(self, dtype): x = cupy.linspace(-2, 2, 6, dtype=dtype) condlist = [x < 0, x == 0, x > 0] funclist = [-10, lambda x: -x, 10, lambda x: x] with pytest.raises(NotImplementedError): cupy.piecewise(x, condlist, funclist)
2,131
1,433
{ "schemaVersion": 1, "id": "fabric-tool-attribute-api-v1-testmod", "name": "Fabric Tool Attribute API (v1) Test Mod", "version": "1.0.0", "environment": "*", "license": "Apache-2.0", "depends": { "fabric-tool-attribute-api-v1": "*" }, "entrypoints": { "main": [ "net.fabricmc.fabric.test.tool.attribute.ToolAttributeTest" ] } }
165
461
<filename>11-Binary-Tree/2-Importance/0236-lowest-common-ancestor-of-a-binary-tree/src/Trie2.java public class Trie2 { private Node root; private class Node { private Node[] next; private boolean isWord; public Node() { // word 和 prefix 仅由小写英文字母组成 next = new Node[26]; this.isWord = false; } } public Trie2() { root = new Node(); } public void insert(String word) { int len = word.length(); Node curNode = root; for (int i = 0; i < len; i++) { char curChar = word.charAt(i); Node next = curNode.next[curChar - 'a']; if (next == null) { curNode.next[curChar - 'a'] = new Node(); } curNode = curNode.next[curChar - 'a']; } if (!curNode.isWord) { curNode.isWord = true; } } public boolean search(String word) { int len = word.length(); Node curNode = root; for (int i = 0; i < len; i++) { char curC = word.charAt(i); Node next = curNode.next[curC - 'a']; if (next == null) { return false; } else { curNode = next; } } return curNode.isWord; } public boolean startsWith(String prefix) { int len = prefix.length(); Node curNode = root; for (int i = 0; i < len; i++) { char curC = prefix.charAt(i); Node next = curNode.next[curC - 'a']; if (next == null) { return false; } else { curNode = next; } } return true; } }
945
3,428
<gh_stars>1000+ {"id":"00231","group":"easy-ham-1","checksum":{"type":"MD5","value":"b295668c907d5f4d50f8e9db78ae5714"},"text":"From <EMAIL> Wed Aug 28 13:45:01 2002\nReturn-Path: <<EMAIL>>\nDelivered-To: zzzz<EMAIL>.netnoteinc.com\nReceived: from localhost (localhost [127.0.0.1])\n\tby phobos.labs.netnoteinc.com (Postfix) with ESMTP id 96C6E43F99\n\tfor <zzzz@localhost>; Wed, 28 Aug 2002 08:45:00 -0400 (EDT)\nReceived: from phobos [127.0.0.1]\n\tby localhost with IMAP (fetchmail-5.9.0)\n\tfor zzzz@localhost (single-drop); Wed, 28 Aug 2002 13:45:00 +0100 (IST)\nReceived: from egwn.net (ns2.egwn.net [192.168.127.12]) by\n dogma.slashnull.org (8.11.6/8.11.6) with ESMTP id g7SCbXZ24192 for\n <<EMAIL>>; Wed, 28 Aug 2002 13:37:33 +0100\nReceived: from auth02.nl.egwn.net (localhost [127.0.0.1]) by egwn.net\n (8.11.6/8.11.6/EGWN) with ESMTP id g7SCX2J20905; Wed, 28 Aug 2002 14:33:03\n +0200\nReceived: from bonzo.nirvana (pD9E7EF40.dip.t-dialin.net [217.231.239.64])\n by egwn.net (8.11.6/8.11.6/EGWN) with ESMTP id g7SCWfJ20844 for\n <<EMAIL>>; Wed, 28 Aug 2002 14:32:41 +0200\nFrom: <NAME> <<EMAIL>>\nTo: [email protected]\nSubject: /home/dude\nMessage-Id: <20020828143235.<EMAIL>7<EMAIL>>\nMIME-Version: 1.0\nContent-Type: text/plain; charset=us-ascii\nContent-Disposition: inline\nUser-Agent: Mutt/1.2.5.1i\nX-Mailscanner: Found to be clean, Found to be clean\nSender: [email protected]\nErrors-To: [email protected]\nX-Beenthere: [email protected]\nX-Mailman-Version: 2.0.11\nPrecedence: bulk\nReply-To: [email protected]\nList-Help: <mailto:<EMAIL>?subject=help>\nList-Post: <mailto:<EMAIL>>\nList-Subscribe: <http://lists.freshrpms.net/mailman/listinfo/rpm-zzzlist>,\n <mailto:<EMAIL>?subject=subscribe>\nList-Id: Freshrpms RPM discussion list <rpm-zzzlist.freshrpms.net>\nList-Unsubscribe: <http://lists.freshrpms.net/mailman/listinfo/rpm-zzzlist>,\n <mailto:<EMAIL>?subject=unsubscribe>\nList-Archive: <http://lists.freshrpms.net/pipermail/rpm-zzzlist/>\nX-Original-Date: Wed, 28 Aug 2002 14:32:35 +0200\nDate: Wed, 28 Aug 2002 14:32:35 +0200\n\nHi,\n\nsome time now the following messages were haunting me:\n\n automount[11593]: attempting to mount entry /home/dude\n\nIt just came to my attention, that only freshrpm benefitting hosts showed this\nup. I grepped through the binaries and found referrences to /home/dude.\n\n# grep /home/dude /usr/bin/*\nBinary file /usr/bin/aaxine matches\nBinary file /usr/bin/gentoo matches\nBinary file /usr/bin/gphoto2 matches\nBinary file /usr/bin/gtkam matches\n...\n\nI am now relaxed again ;), and pass this info on. Probably Matthias Saou\nhimself is \"dude\", and some package has hardwired a path in his build\ndirectory. It would be nice to find out which and fix it, but I am using too\nmany of the freshrpm suite to narrow it down.\n\nRegards, Axel.\n-- \nAxel.Thimm@<EMAIL>.fu-berlin.de\n\n_______________________________________________\nRPM-List mailing list <<EMAIL>>\nhttp://lists.freshrpms.net/mailman/listinfo/rpm-list\n\n"}
1,274
3,804
//------------------------------------------------------------------------------ /* This file is part of rippled: https://github.com/ripple/rippled Copyright 2020 Ripple Labs Inc. Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies. THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL , DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ //============================================================================== #include <ripple/basics/make_SSLContext.h> #include <ripple/beast/unit_test.h> #include <ripple/overlay/impl/OverlayImpl.h> #include <ripple/overlay/impl/PeerImp.h> #include <ripple/peerfinder/impl/SlotImp.h> #include <test/jtx/Env.h> namespace ripple { namespace test { class tx_reduce_relay_test : public beast::unit_test::suite { public: using socket_type = boost::asio::ip::tcp::socket; using middle_type = boost::beast::tcp_stream; using stream_type = boost::beast::ssl_stream<middle_type>; using shared_context = std::shared_ptr<boost::asio::ssl::context>; private: void doTest(const std::string& msg, bool log, std::function<void(bool)> f) { testcase(msg); f(log); } void testConfig(bool log) { doTest("Config Test", log, [&](bool log) { auto test = [&](bool enable, bool metrics, std::uint16_t min, std::uint16_t pct, bool success = true) { std::stringstream str("[reduce_relay]"); str << "[reduce_relay]\n" << "tx_enable=" << static_cast<int>(enable) << "\n" << "tx_metrics=" << static_cast<int>(metrics) << "\n" << "tx_min_peers=" << min << "\n" << "tx_relay_percentage=" << pct << "\n"; Config c; try { c.loadFromString(str.str()); BEAST_EXPECT(c.TX_REDUCE_RELAY_ENABLE == enable); BEAST_EXPECT(c.TX_REDUCE_RELAY_METRICS == metrics); BEAST_EXPECT(c.TX_REDUCE_RELAY_MIN_PEERS == min); BEAST_EXPECT(c.TX_RELAY_PERCENTAGE == pct); if (success) pass(); else fail(); } catch (...) { if (success) fail(); else pass(); } }; test(true, true, 20, 25); test(false, false, 20, 25); test(false, false, 20, 0, false); test(false, false, 20, 101, false); test(false, false, 9, 10, false); test(false, false, 10, 9, false); }); } class PeerTest : public PeerImp { public: PeerTest( Application& app, std::shared_ptr<PeerFinder::Slot> const& slot, http_request_type&& request, PublicKey const& publicKey, ProtocolVersion protocol, Resource::Consumer consumer, std::unique_ptr<tx_reduce_relay_test::stream_type>&& stream_ptr, OverlayImpl& overlay) : PeerImp( app, sid_, slot, std::move(request), publicKey, protocol, consumer, std::move(stream_ptr), overlay) { sid_++; } ~PeerTest() = default; void run() override { } void send(std::shared_ptr<Message> const&) override { sendTx_++; } void addTxQueue(const uint256& hash) override { queueTx_++; } static void init() { queueTx_ = 0; sendTx_ = 0; sid_ = 0; } inline static std::size_t sid_ = 0; inline static std::uint16_t queueTx_ = 0; inline static std::uint16_t sendTx_ = 0; }; std::uint16_t lid_{0}; std::uint16_t rid_{1}; shared_context context_; ProtocolVersion protocolVersion_; boost::beast::multi_buffer read_buf_; public: tx_reduce_relay_test() : context_(make_SSLContext("")), protocolVersion_{1, 7} { } private: void addPeer( jtx::Env& env, std::vector<std::shared_ptr<PeerTest>>& peers, std::uint16_t& nDisabled) { auto& overlay = dynamic_cast<OverlayImpl&>(env.app().overlay()); boost::beast::http::request<boost::beast::http::dynamic_body> request; (nDisabled == 0) ? (void)request.insert( "X-Protocol-Ctl", makeFeaturesRequestHeader(false, false, true, false)) : (void)nDisabled--; auto stream_ptr = std::make_unique<stream_type>( socket_type(std::forward<boost::asio::io_service&>( env.app().getIOService())), *context_); beast::IP::Endpoint local( beast::IP::Address::from_string("172.1.1." + std::to_string(lid_))); beast::IP::Endpoint remote( beast::IP::Address::from_string("172.1.1." + std::to_string(rid_))); PublicKey key(std::get<0>(randomKeyPair(KeyType::ed25519))); auto consumer = overlay.resourceManager().newInboundEndpoint(remote); auto slot = overlay.peerFinder().new_inbound_slot(local, remote); auto const peer = std::make_shared<PeerTest>( env.app(), slot, std::move(request), key, protocolVersion_, consumer, std::move(stream_ptr), overlay); overlay.add_active(peer); peers.emplace_back(peer); // overlay stores week ptr to PeerImp lid_ += 2; rid_ += 2; assert(lid_ <= 254); } void testRelay( std::string const& test, bool txRREnabled, std::uint16_t nPeers, std::uint16_t nDisabled, std::uint16_t minPeers, std::uint16_t relayPercentage, std::uint16_t expectRelay, std::uint16_t expectQueue, std::set<Peer::id_t> const& toSkip = {}) { testcase(test); jtx::Env env(*this); std::vector<std::shared_ptr<PeerTest>> peers; env.app().config().TX_REDUCE_RELAY_ENABLE = txRREnabled; env.app().config().TX_REDUCE_RELAY_MIN_PEERS = minPeers; env.app().config().TX_RELAY_PERCENTAGE = relayPercentage; PeerTest::init(); lid_ = 0; rid_ = 0; for (int i = 0; i < nPeers; i++) addPeer(env, peers, nDisabled); protocol::TMTransaction m; m.set_rawtransaction("transaction"); m.set_deferred(false); m.set_status(protocol::TransactionStatus::tsNEW); env.app().overlay().relay(uint256{0}, m, toSkip); BEAST_EXPECT( PeerTest::sendTx_ == expectRelay && PeerTest::queueTx_ == expectQueue); } void run() override { bool log = false; std::set<Peer::id_t> skip = {0, 1, 2, 3, 4}; testConfig(log); // relay to all peers, no hash queue testRelay("feature disabled", false, 10, 0, 10, 25, 10, 0); // relay to nPeers - skip (10-5=5) testRelay("feature disabled & skip", false, 10, 0, 10, 25, 5, 0, skip); // relay to all peers because min is greater than nPeers testRelay("relay all 1", true, 10, 0, 20, 25, 10, 0); // relay to all peers because min + disabled is greater thant nPeers testRelay("relay all 2", true, 20, 15, 10, 25, 20, 0); // relay to minPeers + 25% of nPeers-minPeers (20+0.25*(60-20)=30), // queue the rest (30) testRelay("relay & queue", true, 60, 0, 20, 25, 30, 30); // relay to minPeers + 25% of (nPeers - nPeers) - skip // (20+0.25*(60-20)-5=25), queue the rest, skip counts towards relayed // (60-25-5=30) testRelay("skip", true, 60, 0, 20, 25, 25, 30, skip); // relay to minPeers + disabled + 25% of (nPeers - minPeers - disalbed) // (20+10+0.25*(70-20-10)=40), queue the rest (30) testRelay("disabled", true, 70, 10, 20, 25, 40, 30); // relay to minPeers + disabled-not-in-skip + 25% of (nPeers - minPeers // - disabled) (20+5+0.25*(70-20-10)=35), queue the rest, skip counts // towards relayed (70-35-5=30)) testRelay("disabled & skip", true, 70, 10, 20, 25, 35, 30, skip); // relay to minPeers + disabled + 25% of (nPeers - minPeers - disabled) // - skip (10+5+0.25*(15-10-5)-10=5), queue the rest, skip counts // towards relayed (15-5-10=0) skip = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}; testRelay("disabled & skip, no queue", true, 15, 5, 10, 25, 5, 0, skip); // relay to minPeers + disabled + 25% of (nPeers - minPeers - disabled) // - skip (10+2+0.25*(20-10-2)-14=0), queue the rest, skip counts // towards relayed (20-14=6) skip = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13}; testRelay("disabled & skip, no relay", true, 20, 2, 10, 25, 0, 6, skip); } }; BEAST_DEFINE_TESTSUITE(tx_reduce_relay, ripple_data, ripple); } // namespace test } // namespace ripple
4,995
1,364
<filename>src/kilim/examples/Group.java /* Copyright (c) 2006, <NAME> * * You may distribute this software under the terms of the license * specified in the file "License" */ package kilim.examples; import kilim.Pausable; import kilim.Task; import kilim.TaskGroup; public class Group { public static void main(String[] args) { TaskGroup tg = new TaskGroup(); tg.add(new GroupTask().start()); tg.add(new GroupTask().start()); tg.joinb(); System.exit(0); } static class GroupTask extends Task { public void execute() throws Pausable { System.out.println("Task #" + id + "sleeping"); Task.sleep(1000); System.out.println("Task #" + id + "done"); } } }
316
14,668
<filename>tools/android/push_apps_to_background/src/org/chromium/push_apps_to_background/PushAppsToBackgroundActivity.java // Copyright 2016 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. package org.chromium.push_apps_to_background; import android.app.Activity; import android.os.Bundle; /** * This activity is used in performance tests to push other apps * to the background while running automated user stories. */ public class PushAppsToBackgroundActivity extends Activity { @Override public void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_push_apps_to_background); } }
225
636
# demonstrate how to use PyMOL's atom pick "events" in a Wizard # Note: To pick an atom or bond, you need to use the button mouse actions "PkAt" # or "PkTB" respectively. By default, the "PkTB" is not available in the # default 3-Button Viewing mode, but it can be used in the 3-Button Editing # mode by double clicking the right button. # # Run this file as: # DOS/Unix> pymol pick_wiz.py # or # PyMOL> run pick_wiz.py from pymol.wizard import Wizard from pymol import cmd import pymol class PickWizard(Wizard): def reset(self): self.pk1_st = None self.pk2_st = None self.pk1_xyz = None cmd.refresh_wizard() def __init__(self): Wizard.__init__(self) self.reset() def get_prompt(self): if self.pk2_st!=None: return ["You picked the bond between %s and %s"%( self.pk1_st, self.pk2_st)] elif self.pk1_st!=None: return ["You picked atom %s"%(self.pk1_st), "At X=%1.2f Y=%1.2f Z=%1.2f"%self.pk1_xyz] else: return ["Please pick an atom or a bond..."] def do_pick(self,picked_bond): self.reset() cmd.iterate("pk1","setattr(cmd.get_wizard(),'pk1_st'," "'%s/%s/%s/%s/%s'%(model,segi,chain,resi,name))") if picked_bond: cmd.iterate("pk1","setattr(cmd.get_wizard(),'pk2_st'," "'%s/%s/%s/%s/%s'%(model,segi,chain,resi,name))") else: # for single atom, also get 3D coordinates (EXAMPLE) cmd.iterate_state( cmd.get_state(), "pk1","setattr(cmd.get_wizard(),'pk1_xyz',(x,y,z))") cmd.unpick() cmd.refresh_wizard() def get_panel(self): return [ [ 1, 'Example Wizard',''], [ 2, 'Reset','cmd.get_wizard().reset()'], [ 2, 'Done','cmd.set_wizard()'], ] # create an instane wiz = PickWizard() # make this the active wizard cmd.set_wizard(wiz)
1,018
2,542
// ------------------------------------------------------------ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License (MIT). See License.txt in the repo root for license information. // ------------------------------------------------------------ #include "stdafx.h" namespace Naming { using namespace std; using namespace Common; const std::wstring EnumerateSubNamesToken::Delimiter = L"$"; EnumerateSubNamesToken::EnumerateSubNamesToken() : lastEnumeratedName_() , subnamesVersion_(-1) , isValid_(false) { } EnumerateSubNamesToken::EnumerateSubNamesToken( NamingUri const & lastEnumeratedName, _int64 subnamesVersion) : lastEnumeratedName_(lastEnumeratedName) , subnamesVersion_(subnamesVersion) , isValid_(true) { } ErrorCode EnumerateSubNamesToken::Create(std::wstring const & escapedToken, __out EnumerateSubNamesToken & token) { std::wstring unescapedToken; auto error = NamingUri::UnescapeString(escapedToken, unescapedToken); if (!error.IsSuccess()) { return error; } StringCollection tokenElements; StringUtility::Split<std::wstring>(unescapedToken, tokenElements, Delimiter); if (tokenElements.size() != 2) { return ErrorCode( ErrorCodeValue::InvalidArgument, wformatString(GET_COMMON_RC(Invalid_Continuation_Token), escapedToken)); } _int64 subNamesVersion; bool parseSuccess = StringUtility::TryFromWString<_int64>(tokenElements[1], subNamesVersion); if (!parseSuccess) { return ErrorCode( ErrorCodeValue::InvalidArgument, wformatString(GET_COMMON_RC(Invalid_Continuation_Token), escapedToken)); } NamingUri lastEnumeratedName; parseSuccess = NamingUri::TryParse(tokenElements[0], lastEnumeratedName); if (!parseSuccess) { return ErrorCode( ErrorCodeValue::InvalidArgument, wformatString(GET_COMMON_RC(Invalid_Continuation_Token), escapedToken)); } token = EnumerateSubNamesToken(lastEnumeratedName, subNamesVersion); return ErrorCode::Success(); } ErrorCode EnumerateSubNamesToken::ToEscapedString(__out std::wstring & escapedToken) const { std::wstring token; token += lastEnumeratedName_.ToString(); token += Delimiter; token += StringUtility::ToWString(subnamesVersion_); return NamingUri::EscapeString(token, escapedToken); } void EnumerateSubNamesToken::WriteTo(__in Common::TextWriter & w, Common::FormatOptions const &) const { if (isValid_) { w << "Token[LastName: " << lastEnumeratedName_<< "(" << subnamesVersion_ << ")]"; } else { w << "Token[Invalid]"; } } }
1,246
415
/************************************************ ** framebufferObject.cpp ** ** --------------------- ** ** ** ** This is the frame-work for general purpose ** ** initialization of a framebuffer object, ** ** as specified in the OpenGL extension: ** ** GL_EXT_FRAMEBUFFER_OBJECT ** ** ** ** Since this is an OpenGL extension, not WGL, ** ** it should be much more portable (and ** ** supposedly) faster than p-buffers and ** ** render-to-texture. ** ** ** ** <NAME> (4/27/2005) ** ************************************************/ #include <stdio.h> #include <string.h> #include "framebufferObject.h" FrameBuffer::FrameBuffer( char *name ) { glGetIntegerv( GL_MAX_COLOR_ATTACHMENTS_EXT, &maxColorBuffers ); colorIDs = new GLuint[maxColorBuffers]; depthID = 0; stencilID = 0; for (int i=0; i<maxColorBuffers; i++) colorIDs[i] = 0; prevFrameBuf = 0; width = height = 0; glGenFramebuffersEXT( 1, &ID ); if (!name) sprintf( fbName, "Framebuffer %d", ID ); else strncpy( fbName, name, 79 ); } FrameBuffer::FrameBuffer( int width, int height, char *name ) : width( width ), height( height ) { glGetIntegerv( GL_MAX_COLOR_ATTACHMENTS_EXT, &maxColorBuffers ); colorIDs = new GLuint[maxColorBuffers]; depthID = 0; stencilID = 0; for (int i=0; i<maxColorBuffers; i++) colorIDs[i] = 0; prevFrameBuf = 0; glGenFramebuffersEXT( 1, &ID ); if (!name) sprintf( fbName, "Framebuffer %d", ID ); else strncpy( fbName, name, 79 ); } FrameBuffer::~FrameBuffer( ) { // unbind this buffer, if bound GLint tmpFB; glGetIntegerv( GL_FRAMEBUFFER_BINDING_EXT, &tmpFB ); if (tmpFB == ID) glBindFramebufferEXT( GL_FRAMEBUFFER_EXT, prevFrameBuf ); // delete the stencil & depth renderbuffers if (depthID) glDeleteRenderbuffersEXT(1, &depthID); if (stencilID) glDeleteRenderbuffersEXT(1, &stencilID); // delete the framebuffer glDeleteFramebuffersEXT( 1, &ID ); delete [] colorIDs; } // check to see if the framebuffer 'fb' is complete (i.e., renderable) // if fb==NULL, then check the currently bound framebuffer GLenum FrameBuffer::CheckFramebufferStatus( int printMessage ) { GLenum error; GLint oldFB = 0; glGetIntegerv( GL_FRAMEBUFFER_BINDING_EXT, &oldFB ); // there may be some other framebuffer currently bound... if so, save it if ( oldFB != ID ) glBindFramebufferEXT( GL_FRAMEBUFFER_EXT, ID); // check the error status of this framebuffer */ error = glCheckFramebufferStatusEXT(GL_FRAMEBUFFER_EXT); // if error != GL_FRAMEBUFFER_COMPLETE_EXT, there's an error of some sort if (printMessage) { switch(error) { case GL_FRAMEBUFFER_COMPLETE_EXT: break; case GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT_EXT: printf("Error! %s missing a required image/buffer attachment!\n", fbName); break; case GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT_EXT: printf("Error! %s has no images/buffers attached!\n", fbName); break; case GL_FRAMEBUFFER_INCOMPLETE_DIMENSIONS_EXT: printf("Error! %s has mismatched image/buffer dimensions!\n", fbName); break; case GL_FRAMEBUFFER_INCOMPLETE_FORMATS_EXT: printf("Error! %s's colorbuffer attachments have different types!\n", fbName); break; case GL_FRAMEBUFFER_INCOMPLETE_DRAW_BUFFER_EXT: printf("Error! %s trying to draw to non-attached color buffer!\n", fbName); break; case GL_FRAMEBUFFER_INCOMPLETE_READ_BUFFER_EXT: printf("Error! %s trying to read from a non-attached color buffer!\n", fbName); break; case GL_FRAMEBUFFER_UNSUPPORTED_EXT: printf("Error! %s format is not supported by current graphics card/driver!\n", fbName); break; default: printf("*UNKNOWN ERROR* reported from glCheckFramebufferStatusEXT() for %s!\n", fbName); break; } } // if this was not the current framebuffer, switch back! if ( oldFB != ID ) glBindFramebufferEXT( GL_FRAMEBUFFER_EXT, oldFB ); return error; } // attach a texture (colorTexID) to one of the color buffer attachment points // This function is not completely general, as it does not allow specification // of which MIPmap level to draw to (it uses the base, level 0). int FrameBuffer::AttachColorTexture( GLuint colorTexID, int colorBuffer ) { // If the colorBuffer value is valid, then bind the texture to the color buffer. if (colorBuffer < maxColorBuffers) { BindBuffer(); glFramebufferTexture2DEXT( GL_FRAMEBUFFER_EXT, GL_COLOR_ATTACHMENT0_EXT+colorBuffer, GL_TEXTURE_2D, colorTexID, 0); UnbindBuffer(); } else return 0; colorIDs[colorBuffer] = colorTexID; return 1; } // attach a texture (depthTexID) to the depth buffer attachment point. int FrameBuffer::AttachDepthTexture( GLuint depthTexID ) { BindBuffer(); glFramebufferTexture2DEXT(GL_FRAMEBUFFER_EXT, GL_DEPTH_ATTACHMENT_EXT, GL_TEXTURE_2D, depthTexID, 0); depthID = depthTexID; UnbindBuffer(); return 1; } // attach a texture (stencilTexID) to the stencil buffer attachment point. int FrameBuffer::AttachStencilTexture( GLuint stencilTexID ) { BindBuffer(); glFramebufferTexture2DEXT(GL_FRAMEBUFFER_EXT, GL_STENCIL_ATTACHMENT_EXT, GL_TEXTURE_2D, stencilTexID, 0); stencilID = stencilTexID; UnbindBuffer(); return 1; } // attach a renderbuffer (colorBufID) to one of the color buffer attachment points int FrameBuffer::AttachColorBuffer( GLuint colorBufID, int colorBuffer ) { // If the colorBuffer value is valid, then bind the texture to the color buffer. if (colorBuffer < maxColorBuffers) { BindBuffer(); glBindRenderbufferEXT(GL_RENDERBUFFER_EXT, colorBufID); glRenderbufferStorageEXT(GL_RENDERBUFFER_EXT, GL_RGBA, width, height); glFramebufferRenderbufferEXT( GL_FRAMEBUFFER_EXT, GL_COLOR_ATTACHMENT0_EXT+colorBuffer, GL_RENDERBUFFER_EXT, colorBufID); UnbindBuffer(); } else return 0; colorIDs[colorBuffer] = colorBufID; return 1; } // attach a renderbuffer (depthBufID) to the depth buffer attachment point. int FrameBuffer::AttachDepthBuffer( GLuint depthBufID ) { BindBuffer(); //glBindRenderbufferEXT(GL_RENDERBUFFER_EXT, depthBufID); //glRenderbufferStorageEXT(GL_RENDERBUFFER_EXT, GL_DEPTH_COMPONENT24, // width, height); glFramebufferRenderbufferEXT(GL_FRAMEBUFFER_EXT, GL_DEPTH_ATTACHMENT_EXT, GL_RENDERBUFFER_EXT, depthBufID); depthID = depthBufID; UnbindBuffer(); return 1; } // attach a renderbuffer (stencilBufID) to the stencil buffer attachment point. int FrameBuffer::AttachStencilBuffer( GLuint stencilBufID ) { BindBuffer(); //glBindRenderbufferEXT(GL_RENDERBUFFER_EXT, stencilBufID); //glRenderbufferStorageEXT(GL_RENDERBUFFER_EXT, GL_STENCIL_INDEX8_EXT, // width, height); glFramebufferRenderbufferEXT(GL_FRAMEBUFFER_EXT, GL_STENCIL_ATTACHMENT_EXT, GL_RENDERBUFFER_EXT, stencilBufID); stencilID = stencilBufID; UnbindBuffer(); return 1; } // Bind this framebuffer as the current one. Store the old one to reattach // when we unbind. Also return the ID of the previous framebuffer. GLuint FrameBuffer::BindBuffer( void ) { GLint tmp; glGetIntegerv( GL_FRAMEBUFFER_BINDING_EXT, &tmp ); prevFrameBuf = tmp; glBindFramebufferEXT( GL_FRAMEBUFFER_EXT, ID ); return prevFrameBuf; } // This function unbinds this framebuffer to whatever buffer was attached // previously... If for some reason the binding have changed so we're // no longer the current buffer, DO NOT unbind, return 0. Else, unbind // and return 1. int FrameBuffer::UnbindBuffer( void ) { GLint tmpFB; glGetIntegerv( GL_FRAMEBUFFER_BINDING_EXT, &tmpFB ); if (tmpFB != ID) return 0; glBindFramebufferEXT( GL_FRAMEBUFFER_EXT, prevFrameBuf ); prevFrameBuf = 0; return 1; } void FrameBuffer::DrawToColorMipmapLevel( GLuint colorBuffer, GLuint level ) { glFramebufferTexture2DEXT(GL_FRAMEBUFFER_EXT, GL_COLOR_ATTACHMENT0_EXT+colorBuffer, GL_TEXTURE_2D, GetColorTextureID( colorBuffer ), level); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_BASE_LEVEL, level-1); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAX_LEVEL, level-1); glBindTexture( GL_TEXTURE_2D, GetColorTextureID( colorBuffer ) ); glEnable(GL_TEXTURE_2D); } void FrameBuffer::DoneDrawingMipmapLevels( void ) { glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_BASE_LEVEL, 0); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAX_LEVEL, 1000); }
3,475
375
/* * * Copyright 2018 <NAME> * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy of * the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations under * the License. * */ package com.github.wnameless.json.flattener; import org.apache.commons.text.translate.CharSequenceTranslator; /** * * {@link CharSequenceTranslatorFactory} is designed to enhance the * {@link StringEscapePolicy}.<br> * Any method which accepts a {@link StringEscapePolicy} (eg: * {@link JsonFlattener#withStringEscapePolicy(CharSequenceTranslatorFactory) * JsonFlattener#withStringEscapePolicy}) now accepts * {@link CharSequenceTranslatorFactory} as well.<br> * <br> * Furthermore, anyone can provide their own {@link StringEscapePolicy} by * implementing a {@link CharSequenceTranslatorFactory}. * * @author <NAME> * @since v0.5.0 * */ public interface CharSequenceTranslatorFactory { /** * Returns a {@link CharSequenceTranslator} * * @return {@link CharSequenceTranslator} */ CharSequenceTranslator getCharSequenceTranslator(); }
430
348
<gh_stars>100-1000 {"nom":"Verzy","circ":"3ème circonscription","dpt":"Marne","inscrits":802,"abs":425,"votants":377,"blancs":5,"nuls":0,"exp":372,"res":[{"nuance":"REM","nom":"<NAME>","voix":149},{"nuance":"LR","nom":"Mme <NAME>","voix":76},{"nuance":"FN","nom":"<NAME>","voix":60},{"nuance":"FI","nom":"M. <NAME>","voix":41},{"nuance":"COM","nom":"M. <NAME>","voix":12},{"nuance":"DVD","nom":"M. <NAME>","voix":11},{"nuance":"ECO","nom":"<NAME>","voix":8},{"nuance":"DLF","nom":"M. <NAME>","voix":5},{"nuance":"EXG","nom":"<NAME>","voix":4},{"nuance":"ECO","nom":"Mme <NAME>","voix":4},{"nuance":"DIV","nom":"M. <NAME>","voix":2}]}
258
2,087
from PyQt5.QtGui import * from PyQt5.QtWidgets import * from PyQt5.QtCore import * from MainWindow import Ui_MainWindow from datetime import datetime import json import os import sys import requests from urllib.parse import urlencode OPENWEATHERMAP_API_KEY = os.environ.get('OPENWEATHERMAP_API_KEY') """ Get an API key from https://openweathermap.org/ to use with this application. """ def from_ts_to_time_of_day(ts): dt = datetime.fromtimestamp(ts) return dt.strftime("%I%p").lstrip("0") class WorkerSignals(QObject): ''' Defines the signals available from a running worker thread. ''' finished = pyqtSignal() error = pyqtSignal(str) result = pyqtSignal(dict, dict) class WeatherWorker(QRunnable): ''' Worker thread for weather updates. ''' signals = WorkerSignals() is_interrupted = False def __init__(self, location): super(WeatherWorker, self).__init__() self.location = location @pyqtSlot() def run(self): try: params = dict( q=self.location, appid=OPENWEATHERMAP_API_KEY ) url = 'http://api.openweathermap.org/data/2.5/weather?%s&units=metric' % urlencode(params) r = requests.get(url) weather = json.loads(r.text) # Check if we had a failure (the forecast will fail in the same way). if weather['cod'] != 200: raise Exception(weather['message']) url = 'http://api.openweathermap.org/data/2.5/forecast?%s&units=metric' % urlencode(params) r = requests.get(url) forecast = json.loads(r.text) self.signals.result.emit(weather, forecast) except Exception as e: self.signals.error.emit(str(e)) self.signals.finished.emit() class MainWindow(QMainWindow, Ui_MainWindow): def __init__(self, *args, **kwargs): super(MainWindow, self).__init__(*args, **kwargs) self.setupUi(self) self.pushButton.pressed.connect(self.update_weather) self.threadpool = QThreadPool() self.show() def alert(self, message): alert = QMessageBox.warning(self, "Warning", message) def update_weather(self): worker = WeatherWorker(self.lineEdit.text()) worker.signals.result.connect(self.weather_result) worker.signals.error.connect(self.alert) self.threadpool.start(worker) def weather_result(self, weather, forecasts): self.latitudeLabel.setText("%.2f °" % weather['coord']['lat']) self.longitudeLabel.setText("%.2f °" % weather['coord']['lon']) self.windLabel.setText("%.2f m/s" % weather['wind']['speed']) self.temperatureLabel.setText("%.1f °C" % weather['main']['temp']) self.pressureLabel.setText("%d" % weather['main']['pressure']) self.humidityLabel.setText("%d" % weather['main']['humidity']) self.sunriseLabel.setText(from_ts_to_time_of_day(weather['sys']['sunrise'])) self.weatherLabel.setText("%s (%s)" % ( weather['weather'][0]['main'], weather['weather'][0]['description'] ) ) self.set_weather_icon(self.weatherIcon, weather['weather']) for n, forecast in enumerate(forecasts['list'][:5], 1): getattr(self, 'forecastTime%d' % n).setText(from_ts_to_time_of_day(forecast['dt'])) self.set_weather_icon(getattr(self, 'forecastIcon%d' % n), forecast['weather']) getattr(self, 'forecastTemp%d' % n).setText("%.1f °C" % forecast['main']['temp']) def set_weather_icon(self, label, weather): label.setPixmap( QPixmap(os.path.join('images', "%s.png" % weather[0]['icon'] ) ) ) if __name__ == '__main__': app = QApplication([]) window = MainWindow() app.exec_()
1,815
572
from datetime import datetime from peewee import ( SqliteDatabase, Model, TextField, ForeignKeyField, DateTimeField, IntegerField ) db = SqliteDatabase('notas.db') class BaseModel(Model): class Meta: database = db class Pessoa(BaseModel): nome = TextField() email = TextField(unique=True) senha = TextField() idade = IntegerField() class Grupo(BaseModel): nome = TextField() dona = ForeignKeyField(Pessoa, backref='grupos') class Nota(BaseModel): dona = ForeignKeyField(Pessoa, backref='notas') grupo = ForeignKeyField(Grupo, backref='notas', null=True, default=None) titulo = TextField() nota = TextField() criada_em = DateTimeField(default=datetime.now) modificada_em = DateTimeField(default=datetime.now) Pessoa.create_table() db.create_tables([Grupo, Nota])
323
3,631
<reponame>kostola/drools /* * Copyright (c) 2020. Red Hat, Inc. and/or its affiliates. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.drools.mvel.asm; import org.kie.internal.utils.ChainedProperties; import static org.kie.memorycompiler.JavaConfiguration.JAVA_LANG_LEVEL_PROPERTY; import static org.mvel2.asm.Opcodes.V10; import static org.mvel2.asm.Opcodes.V11; import static org.mvel2.asm.Opcodes.V12; import static org.mvel2.asm.Opcodes.V1_5; import static org.mvel2.asm.Opcodes.V1_6; import static org.mvel2.asm.Opcodes.V1_7; import static org.mvel2.asm.Opcodes.V1_8; import static org.mvel2.asm.Opcodes.V9; public class ClassLevel { private static volatile int javaVersion = -1; public static int getJavaVersion(ClassLoader classLoader) { if (javaVersion < 0) { synchronized (ClassGenerator.class) { if (javaVersion < 0) { findJavaVersion(classLoader); } } } return javaVersion; } private static void findJavaVersion(ClassLoader classLoader) { ChainedProperties chainedProperties = ChainedProperties.getChainedProperties( classLoader ); if (chainedProperties.getProperty("drools.dialect.java", null) == null) { chainedProperties = ChainedProperties.getChainedProperties( ClassGenerator.class.getClassLoader() ); } javaVersion = findJavaVersion(chainedProperties); } public static int findJavaVersion(ChainedProperties chainedProperties) { String level = chainedProperties.getProperty(JAVA_LANG_LEVEL_PROPERTY, System.getProperty("java.version")); if ( level.startsWith( "1.5" ) ) { return V1_5; } else if ( level.startsWith( "1.6" ) ) { return V1_6; } else if ( level.startsWith( "1.7" ) ) { return V1_7; } else if ( level.startsWith( "1.8" ) ) { return V1_8; } else if ( level.startsWith( "9" ) ) { return V9; } else if ( level.startsWith( "10" ) ) { return V10; } else if ( level.startsWith( "11" ) ) { return V11; } else if ( level.startsWith( "12" ) ) { return V12; } else { return V1_8; } } }
1,203
1,026
import math import numpy as np import torch from hivemind.compression.base import CompressionBase, CompressionInfo from hivemind.proto import runtime_pb2 class Float16Compression(CompressionBase): compression_type = runtime_pb2.CompressionType.FLOAT16 FP16_MIN, FP16_MAX = torch.finfo(torch.float16).min, torch.finfo(torch.float16).max def compress(self, tensor: torch.Tensor, info: CompressionInfo, allow_inplace: bool = False) -> runtime_pb2.Tensor: dtype_name = tensor.numpy().dtype.name tensor = tensor.detach().cpu().float() tensor = tensor if allow_inplace else tensor.clone() tensor = tensor.clamp_(self.FP16_MIN, self.FP16_MAX).to(torch.float16) return runtime_pb2.Tensor( compression=self.compression_type, buffer=tensor.numpy().tobytes(), size=tensor.shape, dtype=dtype_name, requires_grad=tensor.requires_grad, ) def extract(self, serialized_tensor: runtime_pb2.Tensor) -> torch.Tensor: original_dtype = np.dtype(serialized_tensor.dtype) array = np.frombuffer(serialized_tensor.buffer, dtype=np.float16) return torch.as_tensor(np.asarray(array, dtype=original_dtype)).reshape(tuple(serialized_tensor.size)) def estimate_compression_ratio(self, info: CompressionInfo) -> float: return 16.0 / get_num_bits(info.descriptor.dtype) class ScaledFloat16Compression(Float16Compression): """A compression strategy that applies mean-std scaling over last axis before casting to float16""" compression_type = runtime_pb2.CompressionType.MEANSTD_16BIT FP32_BYTES = torch.finfo(torch.float32).bits // 8 FP32_EPS = torch.finfo(torch.float32).eps def compress(self, tensor: torch.Tensor, info: CompressionInfo, allow_inplace: bool = False) -> runtime_pb2.Tensor: dtype_name = tensor.numpy().dtype.name tensor = tensor.detach().cpu().float() tensor = tensor if allow_inplace else tensor.clone() means = torch.mean(tensor, dim=-1, keepdim=True) tensor.sub_(means) stds = tensor.norm(dim=-1, keepdim=True) / math.sqrt(tensor.shape[-1]) stds.clamp_min_(self.FP32_EPS) tensor.div_(stds) tensor = tensor.clamp_(self.FP16_MIN, self.FP16_MAX).to(torch.float16) data = b"".join((tensor.numpy().tobytes(), means.float().numpy().tobytes(), stds.float().numpy().tobytes())) return runtime_pb2.Tensor( compression=self.compression_type, buffer=data, size=tensor.shape, dtype=dtype_name, requires_grad=tensor.requires_grad, ) def extract(self, serialized_tensor: runtime_pb2.Tensor) -> torch.Tensor: stats_shape = list(serialized_tensor.size) stats_shape[-1] = 1 stats_count = np.prod(stats_shape) means_offset = len(serialized_tensor.buffer) - 2 * stats_count * self.FP32_BYTES stds_offset = len(serialized_tensor.buffer) - stats_count * self.FP32_BYTES array = np.frombuffer(serialized_tensor.buffer, dtype=np.float16, count=np.prod(serialized_tensor.size)) means = np.frombuffer(serialized_tensor.buffer, dtype=np.float32, offset=means_offset, count=stats_count) stds = np.frombuffer(serialized_tensor.buffer, dtype=np.float32, offset=stds_offset, count=stats_count) means = torch.as_tensor(means).reshape(stats_shape) stds = torch.as_tensor(stds).reshape(stats_shape) tensor = torch.as_tensor(np.asarray(array, dtype=serialized_tensor.dtype)).reshape( list(serialized_tensor.size) ) return tensor.mul_(stds).add_(means) def get_num_bits(dtype: torch.dtype) -> int: if dtype == torch.bool: return 8 # see https://github.com/pytorch/pytorch/issues/41571 elif dtype.is_floating_point: return torch.finfo(dtype).bits else: try: return torch.iinfo(dtype).bits except TypeError: raise TypeError(f"Could not infer size for tensor type {dtype}")
1,755
1,781
<filename>src/chapter1/section4/Exercise14_4Sum.java package chapter1.section4; import edu.princeton.cs.algs4.StdOut; import java.util.*; /** * Created by <NAME> on 9/29/16. */ public class Exercise14_4Sum { public static void main(String[] args) { // Method 1 StdOut.println("Method 1"); int[] array1 = {5, 2, -2, -5, -2}; StdOut.println("4 sum: " + fourSum(array1)); StdOut.println("Expected: 2"); int[] array2 = {1, 2, 3, 4, -4, -5, -6, 2, 4, -1}; StdOut.println("4 sum: " + fourSum(array2)); StdOut.println("Expected: 13"); // Method 2 StdOut.println("\nMethod 2"); StdOut.println("4 sum: " + fourSum2(array1)); StdOut.println("Expected: 2"); StdOut.println("4 sum: " + fourSum2(array2)); StdOut.println("Expected: 13"); } private static class Pair { int index1; int index2; Pair(int index1, int index2) { this.index1 = index1; this.index2 = index2; } } // O(n^3) private static int fourSum(int[] array) { Map<Integer, List<Pair>> sumMap = new HashMap<>(); for (int i = 0; i < array.length; i++) { for (int j = i + 1; j < array.length; j++) { int sum = array[i] + array[j]; if (!sumMap.containsKey(sum)) { sumMap.put(sum, new ArrayList<>()); } sumMap.get(sum).add(new Pair(i, j)); } } int count = 0; for (int key : sumMap.keySet()) { if (sumMap.containsKey(-key)) { List<Pair> pairs = sumMap.get(key); List<Pair> pairsComplement = sumMap.get(-key); for (Pair pair1 : pairs) { for (Pair pair2 : pairsComplement) { if (pair1.index2 < pair2.index1) { count++; } } } } } return count; } // O(n^3 lg n) private static int fourSum2(int[] array) { Arrays.sort(array); int count = 0; for(int i = 0; i < array.length; i++) { for (int j = i + 1; j < array.length; j++) { for(int k = j + 1; k < array.length; k++) { int searchElement = -1 * (array[i] + array[j] + array[k]); int elementIndexLeft = binarySearch(array, searchElement, 0, array.length - 1, true); if (elementIndexLeft == -1) { continue; } int elementIndexRight = binarySearch(array, searchElement, 0, array.length - 1, false); if (elementIndexLeft < k + 1) { if (elementIndexRight >= k +1) { elementIndexLeft = k + 1; } else { continue; } } // Debug // for (int d = elementIndexLeft; d <= elementIndexRight; d++) { // StdOut.println(array[i] + " " + array[j] + " " + array[k] + " " + array[d]); // } count += elementIndexRight - elementIndexLeft + 1; } } } return count; } private static int binarySearch(int[] array, int target, int low, int high, boolean searchLow) { if (low > high) { return -1; } int middle = low + (high - low) / 2; if (array[middle] > target) { return binarySearch(array, target, low, middle - 1, searchLow); } else if (array[middle] < target) { return binarySearch(array, target, middle + 1, high, searchLow); } else { int nextIndex; if (searchLow) { nextIndex = binarySearch(array, target, low, middle - 1, true); } else { nextIndex = binarySearch(array, target, middle + 1, high, false); } if (nextIndex != -1) { return nextIndex; } else { return middle; } } } }
2,341
11,356
/* * Copyright 2010-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). * You may not use this file except in compliance with the License. * A copy of the License is located at * * http://aws.amazon.com/apache2.0 * * or in the "license" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ #pragma once #include <aws/s3/S3_EXPORTS.h> #include <aws/s3/model/JSONType.h> #include <utility> namespace Aws { namespace Utils { namespace Xml { class XmlNode; } // namespace Xml } // namespace Utils namespace S3 { namespace Model { /** * <p>Specifies JSON as object's input serialization format.</p><p><h3>See * Also:</h3> <a * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/JSONInput">AWS API * Reference</a></p> */ class AWS_S3_API JSONInput { public: JSONInput(); JSONInput(const Aws::Utils::Xml::XmlNode& xmlNode); JSONInput& operator=(const Aws::Utils::Xml::XmlNode& xmlNode); void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const; /** * <p>The type of JSON. Valid values: Document, Lines.</p> */ inline const JSONType& GetType() const{ return m_type; } /** * <p>The type of JSON. Valid values: Document, Lines.</p> */ inline bool TypeHasBeenSet() const { return m_typeHasBeenSet; } /** * <p>The type of JSON. Valid values: Document, Lines.</p> */ inline void SetType(const JSONType& value) { m_typeHasBeenSet = true; m_type = value; } /** * <p>The type of JSON. Valid values: Document, Lines.</p> */ inline void SetType(JSONType&& value) { m_typeHasBeenSet = true; m_type = std::move(value); } /** * <p>The type of JSON. Valid values: Document, Lines.</p> */ inline JSONInput& WithType(const JSONType& value) { SetType(value); return *this;} /** * <p>The type of JSON. Valid values: Document, Lines.</p> */ inline JSONInput& WithType(JSONType&& value) { SetType(std::move(value)); return *this;} private: JSONType m_type; bool m_typeHasBeenSet; }; } // namespace Model } // namespace S3 } // namespace Aws
874
377
#ifndef PRECOMPILED_FOR_PLUGIN_H #define PRECOMPILED_FOR_PLUGIN_H #include <iostream> #include <vector> #include <memory> #include <string> #include <map> #include <functional> using namespace std; #endif
82
939
<reponame>srihari-humbarwadi/neural-structured-learning /*Copyright 2020 Google LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #include "research/carls/knowledge_bank/initializer_helper.h" #include "absl/base/const_init.h" #include "absl/base/thread_annotations.h" #include "absl/random/random.h" #include "absl/synchronization/mutex.h" #include "research/carls/embedding.pb.h" // proto to pb namespace carls { absl::Status ValidateInitializer(const int embedding_dimension, const EmbeddingInitializer& initializer) { if (initializer.has_default_embedding()) { if (embedding_dimension != initializer.default_embedding().value_size()) { return absl::InvalidArgumentError( absl::StrCat("Inconsistent dimension of default_embedding: ", initializer.default_embedding().value_size(), ", expect ", embedding_dimension)); } return absl::OkStatus(); } if (initializer.has_zero_initializer()) { return absl::OkStatus(); } else if (initializer.has_random_uniform_initializer()) { const auto& init = initializer.random_uniform_initializer(); if (init.high() <= init.low()) { return absl::InvalidArgumentError(absl::StrCat( "Invalid (low, high) pair: (", init.low(), ", ", init.high(), ")")); } return absl::OkStatus(); } else if (initializer.has_random_normal_initializer()) { const auto& init = initializer.random_normal_initializer(); if (init.stddev() <= 0) { return absl::InvalidArgumentError("stddev should be greater than 0."); } return absl::OkStatus(); } return absl::InvalidArgumentError(absl::StrCat( "Initializer is not supported: ", initializer.DebugString())); } EmbeddingVectorProto InitializeEmbedding( const int embedding_dimension, const EmbeddingInitializer& initializer) { if (initializer.has_default_embedding()) { return initializer.default_embedding(); } EmbeddingVectorProto result; result.mutable_value()->Reserve(embedding_dimension); if (initializer.has_zero_initializer()) { for (int i = 0; i < embedding_dimension; ++i) { result.add_value(0.0f); } return result; } if (initializer.has_random_uniform_initializer()) { absl::BitGen bit_gen; const auto& init = initializer.random_uniform_initializer(); for (int i = 0; i < embedding_dimension; ++i) { result.add_value(absl::Uniform<float>(bit_gen, init.low(), init.high())); } return result; } if (initializer.has_random_normal_initializer()) { absl::BitGen bit_gen; const auto& init = initializer.random_normal_initializer(); for (int i = 0; i < embedding_dimension; ++i) { result.add_value( absl::Gaussian<double>(bit_gen, init.mean(), init.stddev())); } return result; } LOG(FATAL) << "Initializer is not supported: " << initializer.DebugString(); return result; } EmbeddingVectorProto InitializeEmbeddingWithSeed( const int embedding_dimension, const EmbeddingInitializer& initializer, RandomEngine* engine, absl::Mutex* mu) { CHECK(engine != nullptr); CHECK(mu != nullptr); if (initializer.has_default_embedding()) { return initializer.default_embedding(); } EmbeddingVectorProto result; result.mutable_value()->Reserve(embedding_dimension); if (initializer.has_zero_initializer()) { for (int i = 0; i < embedding_dimension; ++i) { result.add_value(0.0f); } return result; } if (initializer.has_random_uniform_initializer()) { absl::MutexLock l(mu); const auto& init = initializer.random_uniform_initializer(); std::uniform_real_distribution<float> distribution(init.low(), init.high()); for (int i = 0; i < embedding_dimension; ++i) { result.add_value(distribution(*engine)); } return result; } if (initializer.has_random_normal_initializer()) { absl::MutexLock l(mu); const auto& init = initializer.random_normal_initializer(); std::normal_distribution<double> distribution(init.mean(), init.stddev()); for (int i = 0; i < embedding_dimension; ++i) { result.add_value(distribution(*engine)); } return result; } LOG(FATAL) << "Initializer is not supported: " << initializer.DebugString(); return result; } } // namespace carls
1,753
448
<gh_stars>100-1000 // // Created by SuperMan on 2020/10/19. // #include <utils/Android/FindClass.h> #include <utils/Android/NewStringUTF.h> #include <utils/Android/NewByteArray.h> #include <utils/Android/NewLinkedList.h> #include "JEncryptionInfo.h" jclass jEncryptionInfo_class = nullptr; jmethodID jEncryptionInfo_init = nullptr; jfieldID jEncryptionInfo_crypt_byte_block = nullptr; jfieldID jEncryptionInfo_skip_byte_block = nullptr; jmethodID jEncryptionInfo_setScheme = nullptr; jmethodID jEncryptionInfo_setKeyId = nullptr; jmethodID jEncryptionInfo_setIv = nullptr; jmethodID jEncryptionInfo_setSubsamples = nullptr; jclass jSubsampleEncryptionInfo_class = nullptr; jmethodID jSubsampleEncryptionInfo_init = nullptr; jfieldID jSubsampleEncryptionInfo_bytes_of_clear_data = nullptr; jfieldID jSubsampleEncryptionInfo_bytes_of_protected_data = nullptr; void JEncryptionInfo::init(JNIEnv *env) { if (jEncryptionInfo_class == nullptr) { FindClass infoClass(env, "com/cicada/player/utils/media/EncryptionInfo"); jEncryptionInfo_class = (jclass) (env->NewGlobalRef(infoClass.getClass())); jEncryptionInfo_init = env->GetMethodID(jEncryptionInfo_class, "<init>", "()V"); jEncryptionInfo_setScheme = env->GetMethodID(jEncryptionInfo_class, "setScheme", "(Ljava/lang/String;)V"); jEncryptionInfo_crypt_byte_block = env->GetFieldID(jEncryptionInfo_class, "crypt_byte_block", "I"); jEncryptionInfo_skip_byte_block = env->GetFieldID(jEncryptionInfo_class, "skip_byte_block", "I"); jEncryptionInfo_setKeyId = env->GetMethodID(jEncryptionInfo_class, "setKeyId", "([B)V"); jEncryptionInfo_setIv = env->GetMethodID(jEncryptionInfo_class, "setIv", "([B)V"); jEncryptionInfo_setSubsamples = env->GetMethodID(jEncryptionInfo_class, "setSubsamples", "(Ljava/lang/Object;)V"); } if (jSubsampleEncryptionInfo_class == nullptr) { FindClass infoClass(env, "com/cicada/player/utils/media/SubsampleEncryptionInfo"); jSubsampleEncryptionInfo_class = (jclass) (env->NewGlobalRef(infoClass.getClass())); jSubsampleEncryptionInfo_init = env->GetMethodID(jSubsampleEncryptionInfo_class, "<init>", "()V"); jSubsampleEncryptionInfo_bytes_of_clear_data = env->GetFieldID( jSubsampleEncryptionInfo_class, "bytes_of_clear_data", "I"); jSubsampleEncryptionInfo_bytes_of_protected_data = env->GetFieldID( jSubsampleEncryptionInfo_class, "bytes_of_protected_data", "I"); } } void JEncryptionInfo::unInit(JNIEnv *env) { if (jEncryptionInfo_class != nullptr) { env->DeleteGlobalRef(jEncryptionInfo_class); jEncryptionInfo_class = nullptr; } if (jSubsampleEncryptionInfo_class != nullptr) { env->DeleteGlobalRef(jSubsampleEncryptionInfo_class); jSubsampleEncryptionInfo_class = nullptr; } } jobject JEncryptionInfo::convert(JNIEnv *env, IAFPacket::EncryptionInfo *info) { jobject jEncryptionInfo = env->NewObject(jEncryptionInfo_class, jEncryptionInfo_init); NewStringUTF scheme(env, info->scheme.c_str()); env->CallVoidMethod(jEncryptionInfo, jEncryptionInfo_setScheme, scheme.getString()); env->SetIntField(jEncryptionInfo, jEncryptionInfo_crypt_byte_block, info->crypt_byte_block); env->SetIntField(jEncryptionInfo, jEncryptionInfo_skip_byte_block, info->skip_byte_block); NewByteArray key(env, info->key_id, info->key_id_size); env->CallVoidMethod(jEncryptionInfo, jEncryptionInfo_setKeyId, key.getArray()); NewByteArray iv(env, info->iv, info->iv_size); env->CallVoidMethod(jEncryptionInfo, jEncryptionInfo_setIv, iv.getArray()); if (info->subsample_count > 0) { NewLinkedList subsmaplesList(env); for (auto &subsampeInfo : info->subsamples) { jobject jSubSampleInfo = env->NewObject(jSubsampleEncryptionInfo_class, jSubsampleEncryptionInfo_init); env->SetIntField(jSubSampleInfo, jSubsampleEncryptionInfo_bytes_of_clear_data, subsampeInfo.bytes_of_clear_data); env->SetIntField(jSubSampleInfo, jSubsampleEncryptionInfo_bytes_of_protected_data, subsampeInfo.bytes_of_protected_data); subsmaplesList.add(jSubSampleInfo); env->DeleteLocalRef(jSubSampleInfo); } jobject pJobject = subsmaplesList.getList(); env->CallVoidMethod(jEncryptionInfo , jEncryptionInfo_setSubsamples , pJobject); } return jEncryptionInfo; }
2,125
322
/* This file was automatically generated by CasADi. The CasADi copyright holders make no ownership claim of its contents. */ #ifdef __cplusplus extern "C" { #endif #ifndef casadi_real #define casadi_real double #endif #ifndef casadi_int #define casadi_int int #endif int position(const casadi_real** arg, casadi_real** res, casadi_int* iw, casadi_real* w, void* mem); void position_incref(void); void position_decref(void); casadi_int position_n_in(void); casadi_int position_n_out(void); const char* position_name_in(casadi_int i); const char* position_name_out(casadi_int i); const casadi_int* position_sparsity_in(casadi_int i); const casadi_int* position_sparsity_out(casadi_int i); int position_work(casadi_int *sz_arg, casadi_int* sz_res, casadi_int *sz_iw, casadi_int *sz_w); #ifdef __cplusplus } /* extern "C" */ #endif
311
342
/* * Copyright 1999-2019 Alibaba Group Holding Ltd. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.alibaba.chaosblade.exec.plugin.jvm.script.base; import java.util.Map; import java.util.concurrent.atomic.AtomicBoolean; import com.alibaba.chaosblade.exec.common.util.ObjectsUtil; import com.alibaba.chaosblade.exec.plugin.jvm.script.base.cache.LruScriptCache; import com.alibaba.chaosblade.exec.plugin.jvm.script.base.cache.ScriptCache; import com.alibaba.chaosblade.exec.plugin.jvm.script.base.finder.ScriptEngineFinder; /** * @author RinaisSuper */ public abstract class AbstractScriptEngineService implements ScriptEngineService { private ScriptMetrics scriptMetrics; private ScriptEngineFinder scriptEngineFinder; private ScriptCache<String, CompiledScript> scriptCache; private int CACHE_SIZE = 100; private AtomicBoolean initialized = new AtomicBoolean(false); public AbstractScriptEngineService() { } public void setScriptMetrics(ScriptMetrics scriptMetrics) { this.scriptMetrics = scriptMetrics; } public void setScriptEngineFinder( ScriptEngineFinder scriptEngineFinder) { this.scriptEngineFinder = scriptEngineFinder; } public void setScriptCache( ScriptCache<String, CompiledScript> scriptCache) { this.scriptCache = scriptCache; } @Override public CompiledScript compile(ClassLoader classLoader, Script script, Map<String, String> config) { checkInitialized(); ObjectsUtil.requireNonNull(script); ObjectsUtil.requireNonNull(script.getId()); ObjectsUtil.requireNonNull(script.getContent()); ObjectsUtil.requireNonNull(script.getSignature()); ObjectsUtil.requireNonNull(script.getLanguage()); CompiledScript compiledScript = scriptCache.get(script.getId()); if (compiledScript == null) { return doCompile(classLoader, script, config); } else { String oldSignature = compiledScript.getSignature(); if (oldSignature.equals(script.getSignature())) { return compiledScript; } return doCompile(classLoader, script, config); } } protected CompiledScript doCompile(ClassLoader classLoader, Script script, Map<String, String> options) { String scriptId = script.getId(); ScriptEngine scriptEngine = getScriptEngineForLang(script.getLanguage()); Object compiledObject = scriptEngine.compile(script, classLoader, options); CompiledScript compiledScript = new CompiledScript(scriptId, script.getLanguage(), compiledObject, script.getName(), script.getSignature()); scriptCache.put(scriptId, compiledScript); scriptMetrics.incrCompiledScript(); return compiledScript; } @Override public ExecutableScript execute(CompiledScript compiledScript, Map<String, Object> params) { checkInitialized(); return getScriptEngineForLang(compiledScript.getLanguage()).execute(compiledScript, params); } private ScriptEngine getScriptEngineForLang(String lang) { ScriptEngine scriptEngine = scriptEngineFinder.findByLang(lang); if (scriptEngine == null) { throw new IllegalArgumentException("Script language not supported [" + lang + "]"); } return scriptEngine; } @Override public void initialize() { if (initialized.compareAndSet(false, true)) { initScriptMetrics(); initScriptEngineFinder(); initScriptCache(); } } private void initScriptCache() { if (this.scriptCache == null) { this.scriptCache = new LruScriptCache<String, CompiledScript>(CACHE_SIZE); } } private void initScriptEngineFinder() { if (scriptEngineFinder == null) { this.scriptEngineFinder = new ServiceProviderScriptEngineFinder(this); } } private void initScriptMetrics() { if (scriptMetrics == null) { scriptMetrics = new DefaultScriptMetrics(); } } @Override public boolean cleanCompiledScript(String scriptId) { return this.scriptCache.evict(scriptId); } @Override public void cleanAllCompiledScripts() { this.scriptCache.clean(); } private void checkInitialized() { if (!initialized.get()) { throw new IllegalStateException("Script engine service must initialize first"); } } }
1,794
377
<reponame>gburd/Kundera /******************************************************************************* * * Copyright 2012 Impetus Infotech. * * * * Licensed under the Apache License, Version 2.0 (the "License"); * * you may not use this file except in compliance with the License. * * You may obtain a copy of the License at * * * * http://www.apache.org/licenses/LICENSE-2.0 * * * * Unless required by applicable law or agreed to in writing, software * * distributed under the License is distributed on an "AS IS" BASIS, * * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * * See the License for the specific language governing permissions and * * limitations under the License. ******************************************************************************/ package com.impetus.client.redis; import java.util.Collections; import java.util.HashMap; import java.util.Map; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.impetus.kundera.configure.AbstractPropertyReader; import com.impetus.kundera.configure.ClientProperties; import com.impetus.kundera.configure.ClientProperties.DataStore; import com.impetus.kundera.configure.PropertyReader; import com.impetus.kundera.metadata.model.PersistenceUnitMetadata; /** * Property reader responsible for: a) Reads property file (xml or .properties) * * @author vivek.mishra * */ public class RedisPropertyReader extends AbstractPropertyReader implements PropertyReader { /** log instance */ private static Logger log = LoggerFactory.getLogger(RedisPropertyReader.class); /** MongoDB schema metadata instance */ public static RedisSchemaMetadata rsmd; public RedisPropertyReader(Map externalProperties, final PersistenceUnitMetadata puMetadata) { super(externalProperties, puMetadata); rsmd = new RedisSchemaMetadata(); } /* * (non-Javadoc) * * @see * com.impetus.kundera.configure.AbstractPropertyReader#onXml(com.impetus * .kundera.configure.ClientProperties) */ @Override protected void onXml(ClientProperties cp) { if (cp != null) { rsmd.setClientProperties(cp); } } public class RedisSchemaMetadata { private static final String PORT = "port"; private static final String HOST = "host"; private ClientProperties clientProperties; private HashMap<String, String> properties = new HashMap<String, String>(); private String host; private String port; public RedisSchemaMetadata() { } /** * @param clientProperties * the clientProperties to set */ private void setClientProperties(ClientProperties clientProperties) { this.clientProperties = clientProperties; properties = initializeProperties(); this.host = properties.get(HOST); this.port = properties.get(PORT); } public Map<String, String> getProperties() { properties.remove(HOST); properties.remove(PORT); return (Map<String, String>) Collections.unmodifiableMap(properties); } public String getHost() { return host; } public String getPort() { return port; } public String getPassword() { return properties.get("requirepass"); } private HashMap<String, String> initializeProperties() { if (clientProperties != null && clientProperties.getDatastores() != null) { for (DataStore dataStore : clientProperties.getDatastores()) { if (dataStore.getName() != null && dataStore.getName().trim().equalsIgnoreCase("redis")) { return new HashMap(dataStore.getConnection().getProperties()); } } } return null; } } }
1,636
1,738
/* * All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or * its licensors. * * For complete copyright and license terms please see the LICENSE at the root of this * distribution (the "License"). All use of this software is governed by the License, * or, if provided, by the license below or the license accompanying this file. Do not * remove or modify any license notices. This file is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * */ // Original file Copyright Crytek GMBH or its affiliates, used under license. #pragma once #include "CloudParticle.h" #include "CloudComponentRenderNode.h" namespace CloudsGem { /** * CloudVolumeSprite */ class CloudVolumeSprite : public ICloudVolume { public: CloudVolumeSprite(); virtual ~CloudVolumeSprite(); const AABB& GetBoundingBox() const override { return m_worldBoundingBox; } MaterialPtr GetMaterial() override { return m_material; } void SetBoundingBox(const AABB& boundingBox) override { m_worldBoundingBox = boundingBox; } void SetMaterial(MaterialPtr material) override { m_material = material; } void SetDensity(float density) { /* not used for sprite cloud*/ } void Update(const Matrix34& worldMatrix, const Vec3& offset) override; void Refresh(CloudParticleData& cloudData, const Matrix34& worldMatrix) override; void Render(const struct SRendParams& rParams, const SRenderingPassInfo& passInfo, float alpha, int isAfterWater) override; // Members AABB m_localBoundingBox; AABB m_worldBoundingBox; Vec3 m_origin{ 0.0f, 0.0f, 0.0f }; Matrix34 m_worldMatrix; _smart_ptr<IMaterial> m_material{ nullptr }; CloudRenderElement* m_renderElement{ nullptr }; CloudImposterRenderElement* m_pREImposter{ nullptr }; }; }
670
1,799
<reponame>sidharta/cucumber-jvm<gh_stars>1000+ package io.cucumber.core.runner; import io.cucumber.core.stepexpression.ExpressionArgument; import io.cucumber.plugin.event.Argument; import java.util.ArrayList; import java.util.Collection; import java.util.List; import java.util.stream.Collectors; final class DefinitionArgument implements Argument { private final ExpressionArgument argument; private final io.cucumber.cucumberexpressions.Group group; private DefinitionArgument(ExpressionArgument argument) { this.argument = argument; this.group = argument.getGroup(); } static List<Argument> createArguments(List<io.cucumber.core.stepexpression.Argument> match) { List<Argument> args = new ArrayList<>(); for (io.cucumber.core.stepexpression.Argument argument : match) { if (argument instanceof ExpressionArgument) { ExpressionArgument expressionArgument = (ExpressionArgument) argument; args.add(new DefinitionArgument(expressionArgument)); } } return args; } @Override public String getParameterTypeName() { return argument.getParameterTypeName(); } @Override public String getValue() { return group == null ? null : group.getValue(); } @Override public int getStart() { return group == null ? -1 : group.getStart(); } @Override public int getEnd() { return group == null ? -1 : group.getEnd(); } @Override public io.cucumber.plugin.event.Group getGroup() { return group == null ? null : new Group(group); } private static final class Group implements io.cucumber.plugin.event.Group { private final io.cucumber.cucumberexpressions.Group group; private final List<io.cucumber.plugin.event.Group> children; private Group(io.cucumber.cucumberexpressions.Group group) { this.group = group; children = group.getChildren().stream() .map(Group::new) .collect(Collectors.toList()); } @Override public Collection<io.cucumber.plugin.event.Group> getChildren() { return children; } @Override public String getValue() { return group.getValue(); } @Override public int getStart() { return group.getStart(); } @Override public int getEnd() { return group.getEnd(); } } }
1,055
2,644
#pragma once #define __NVIC_PRIO_BITS 3 #define DEFIRQ(x) x##_IRQn, typedef enum { Reset_IRQn = -15, NonMaskableInt_IRQn = -14, HardFault_IRQn = -13, MemoryManagement_IRQn = -12, BusFault_IRQn = -11, UsageFault_IRQn = -10, SVCall_IRQn = -5, DebugMonitor_IRQn = -4, PendSV_IRQn = -2, SysTick_IRQn = -1, #include <platform/defirq.h> } IRQn_Type; #undef DEFIRQ
220
2,360
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class RProcessx(RPackage): """Execute and Control System Processes Tools to run system processes in the background. It can check if a background process is running; wait on a background process to finish; get the exit status of finished processes; kill background processes. It can read the standard output and error of the processes, using non-blocking connections. 'processx' can poll a process for standard output or error, with a timeout. It can also poll several processes at once.""" homepage = "https://github.com/r-lib/processx" cran = "processx" version('3.5.2', sha256='ed6f2d1047461c6061e6ed58fb6de65a289b56009867892abad76c6bba46fc2b') version('3.4.5', sha256='e368103aa6a6894bfa8e78b12a25598464bcd2c19a8b6334f24ee397db13bb14') version('3.4.1', sha256='f1abddb48fa78f2b176552e2ec5d808d4d87d79ce72e9b3d25c9a7d715bbd1bc') version('3.3.1', sha256='6123dbdf9f3bb6e5e8678980fb4587dcefb56d2190adf2ef494d7cd199720bae') version('3.2.0', sha256='c4ba602fcbdc032ae9d94701b3e6b83a2dab1b53d0b4f9937b07a84eae22fddf') version('3.1.0', sha256='11ac120ab4e4aa0e99c9b2eda87d07bc683bab735f1761e95e5ddacd311b5972') version('3.0.3', sha256='53781dba3c538605a02e28b3b577e7de79e2064bfc502025f7ec0e5945e302bf') version('2.0.0.1', sha256='8f61b2952d0f2d13c74465bfba174ce11eee559475c2f7b9be6bcb9e2e1d827b') version('2.0.0', sha256='8325b56a60a276909228756281523cda9256bc754c5f3ca03b41c5c17cc398ad') depends_on('[email protected]:', when='@3.2.0:', type=('build', 'run')) depends_on('r-r6', type=('build', 'run')) depends_on('r-assertthat', when='@:3.2.9', type=('build', 'run')) depends_on('r-crayon', when='@:3.2.9', type=('build', 'run')) depends_on('r-debugme', when='@:3.0.9', type=('build', 'run'))
903
465
<gh_stars>100-1000 /* * Copyright (c) 2018 Uber Technologies, Inc. * Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated * documentation files (the "Software"), to deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO * THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ package com.uber.marmaray.common.configuration; import com.google.common.base.Preconditions; import com.uber.hoodie.config.HoodieIndexConfig; import com.uber.hoodie.index.HoodieIndex; import com.uber.hoodie.index.HoodieIndex.IndexType; import com.uber.marmaray.common.exceptions.JobRuntimeException; import com.uber.marmaray.utilities.StringTypes; import lombok.Getter; import lombok.NonNull; import lombok.extern.slf4j.Slf4j; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HTableDescriptor; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.io.compress.Compression; import org.apache.hadoop.hbase.regionserver.BloomType; import org.apache.hadoop.hbase.util.Bytes; import org.hibernate.validator.constraints.NotEmpty; import java.io.IOException; @Slf4j public class HoodieIndexConfiguration extends HoodieConfiguration { // Hoodie Index config public static final String HOODIE_INDEX_PROPERTY_PREFIX = HoodieConfiguration.HOODIE_COMMON_PROPERTY_PREFIX + "index."; /** * Hoodie index types */ public static final String HOODIE_BLOOM_INDEX = "bloom"; public static final String HOODIE_HBASE_INDEX = "hbase"; public static final String HOODIE_IN_MEMORY_INDEX = "in_memory"; public static final String HOODIE_HBASE_INDEX_PREFIX = "hbase."; public static final String HOODIE_INDEX_TYPE = HOODIE_INDEX_PROPERTY_PREFIX + "type"; public static final String HOODIE_INDEX_ZKNODE = "zknode."; public static final String DEFAULT_HOODIE_INDEX_TYPE = HOODIE_BLOOM_INDEX; /** * Hoodie index zookeeper */ public static final String HOODIE_INDEX_ZOOKEEPER_QUORUM = HOODIE_INDEX_PROPERTY_PREFIX + "zookeeper_quorum"; public static final String HOODIE_INDEX_ZOKEEPER_PORT = HOODIE_INDEX_PROPERTY_PREFIX + "zookeeper_port"; public static final String HOODIE_INDEX_HBASE_ZK_ZNODEPARENT = HOODIE_INDEX_PROPERTY_PREFIX + HOODIE_HBASE_INDEX_PREFIX + HOODIE_INDEX_ZKNODE + "path"; /** * Hoodie index get batch size */ public static final String HOODIE_INDEX_GET_BATCH_SIZE = HOODIE_INDEX_PROPERTY_PREFIX + "get_batch_size"; public static final int DEFAULT_HOODIE_INDEX_GET_BATCH_SIZE = 1000; /** * Hoodie index QPS fraction */ public static final String HOODIE_INDEX_QPS_FRACTION = HOODIE_INDEX_PROPERTY_PREFIX + "qps_fraction"; public static final double DEFAULT_HOODIE_INDEX_QPS_FRACTION = 0.002f; /** * Hoodie index max QPS per region server */ public static final String HOODIE_INDEX_MAX_QPS_PER_REGION_SERVER = HOODIE_INDEX_PROPERTY_PREFIX + "max_qps_per_region_server"; public static final int DEFAULT_HOODIE_INDEX_MAX_QPS_PER_REGION_SERVER = 32000; public static final String DEFAULT_VERSION = ""; /** * Hoodie HBase index table name. Required if the index type is hbase. */ public static final String HOODIE_HBASE_INDEX_TABLE_NAME = HOODIE_INDEX_PROPERTY_PREFIX + "hbase_index_table"; @Getter private final Configuration conf; @Getter private final String tableKey; public HoodieIndexConfiguration(@NonNull final Configuration conf, @NotEmpty final String tableKey) { super(conf, tableKey); this.conf = conf; this.tableKey = tableKey; } public HoodieIndex.IndexType getHoodieIndexType() { final String indexName = getProperty(HOODIE_INDEX_TYPE, DEFAULT_HOODIE_INDEX_TYPE); if (HOODIE_BLOOM_INDEX.equals(indexName.toLowerCase())) { return HoodieIndex.IndexType.BLOOM; } else if (HOODIE_HBASE_INDEX.equals(indexName.toLowerCase())) { return HoodieIndex.IndexType.HBASE; } else if (HOODIE_IN_MEMORY_INDEX.equals(indexName.toLowerCase())) { return IndexType.INMEMORY; } else { throw new IllegalStateException("Unsupported index type " + indexName); } } public String getHoodieIndexZookeeperQuorum() { final String value = getProperty(HOODIE_INDEX_ZOOKEEPER_QUORUM, StringTypes.EMPTY); Preconditions.checkState(!value.isEmpty(), "%s must not be empty", HOODIE_INDEX_ZOOKEEPER_QUORUM); return value; } public String getHoodieHbaseIndexTableName() { final String value = getProperty(HOODIE_HBASE_INDEX_TABLE_NAME, StringTypes.EMPTY); Preconditions.checkState(!value.isEmpty(), "%s must not be empty", HOODIE_HBASE_INDEX_TABLE_NAME); return value; } public int getHoodieIndexZookeeperPort() { final int value = getProperty(HOODIE_INDEX_ZOKEEPER_PORT, 0); Preconditions.checkState(value > 0, "%s must be greater than zero", HOODIE_INDEX_ZOKEEPER_PORT); return value; } public String getZkZnodeParent() { final String value = getProperty(HOODIE_INDEX_HBASE_ZK_ZNODEPARENT, StringTypes.EMPTY); Preconditions.checkState(!value.isEmpty(), "%s must always be set", HOODIE_INDEX_HBASE_ZK_ZNODEPARENT); return value; } public int getHoodieIndexMaxQpsPerRegionServer() { final int value = getProperty(HOODIE_INDEX_MAX_QPS_PER_REGION_SERVER, DEFAULT_HOODIE_INDEX_MAX_QPS_PER_REGION_SERVER); Preconditions.checkState(value > 0, "%s must be greater than zero", HOODIE_INDEX_MAX_QPS_PER_REGION_SERVER); return value; } public double getHoodieIndexQPSFraction() { final double value = getProperty(HOODIE_INDEX_QPS_FRACTION, DEFAULT_HOODIE_INDEX_QPS_FRACTION); Preconditions.checkState(value > 0 && value <= 1, "%s must be between 0 and 1", HOODIE_INDEX_QPS_FRACTION); return value; } public int getHoodieIndexGetBatchSize() { final int value = getProperty(HOODIE_INDEX_GET_BATCH_SIZE, DEFAULT_HOODIE_INDEX_GET_BATCH_SIZE); Preconditions.checkState(value > 0, "%s must be greater than zero", HOODIE_INDEX_GET_BATCH_SIZE); return value; } /** * Configure the Hoodie HBase index. */ public HoodieIndexConfig configureHoodieIndex() { final String version; if (getVersion().isPresent()) { version = getVersion().get(); } else { version = DEFAULT_VERSION; } final String topicName = getTableName(); final HoodieIndexConfig.Builder builder = HoodieIndexConfig.newBuilder() .withIndexType(getHoodieIndexType()); if (HoodieIndex.IndexType.HBASE.equals(getHoodieIndexType())) { final String quorum = getHoodieIndexZookeeperQuorum(); final Integer port = getHoodieIndexZookeeperPort(); final String zkZnodeParent = getZkZnodeParent(); createHbaseIndexTableIfNotExists(topicName, quorum, port.toString(), zkZnodeParent, version); builder .hbaseIndexGetBatchSize(getHoodieIndexGetBatchSize()) .hbaseTableName(getHoodieHbaseIndexTableName()) .hbaseZkPort(port) .hbaseZkQuorum(quorum); } return builder.build(); } public void createHbaseIndexTableIfNotExists(@NotEmpty final String dataFeed, @NotEmpty final String zkQuorum, @NotEmpty final String zkPort, @NotEmpty final String zkZnodeParent, @NotEmpty final String version) { final String tableName = getHoodieHbaseIndexTableName(); final String family = "_s"; final org.apache.hadoop.conf.Configuration hbaseConfig = new org.apache.hadoop.conf.Configuration(); hbaseConfig.set("hbase.zookeeper.quorum", zkQuorum); hbaseConfig.set("hbase.zookeeper.property.clientPort", zkPort); hbaseConfig.set("zookeeper.znode.parent", zkZnodeParent); try { try (final Connection connection = ConnectionFactory.createConnection(hbaseConfig)) { if (!connection.getAdmin().tableExists(TableName.valueOf(tableName))) { final HTableDescriptor tableDesc = new HTableDescriptor(TableName.valueOf(tableName)); final HColumnDescriptor familyDesc = new HColumnDescriptor(Bytes.toBytes(family)); familyDesc.setBloomFilterType(BloomType.ROW); familyDesc.setCompressionType(Compression.Algorithm.SNAPPY); tableDesc.addFamily(familyDesc); connection.getAdmin().createTable(tableDesc); log.info("Created HBase table {} with family {}", tableName, family); } else { log.debug("HBase table {} exists", tableName); } } } catch (IOException e) { //todo: better handle try catch log.error("Error creating HBase table {} ", tableName, e); throw new JobRuntimeException(e); } } }
4,104
319
<reponame>Celebrate-future/openimaj /** * Copyright (c) 2011, The University of Southampton and the individual contributors. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * * Neither the name of the University of Southampton nor the names of its * contributors may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ package org.openimaj.rdf; import java.io.File; import java.net.URL; import org.openimaj.rdf.owl2java.Generator; import org.openimaj.rdf.owl2java.Generator.GeneratorOptions; /** * Generates the SIOC ontology java classes * @author <NAME> (<EMAIL>) * */ public class SIOCPlay { private static final String OPENIMAJ_HOME = "/Users/ss/Development/java/openimaj/trunk"; /** * @param args * @throws Exception */ public static void main(String[] args) throws Exception { GeneratorOptions opts = new GeneratorOptions(); opts.mavenArtifactId = "sioc"; opts.mavenProject = "org.openimaj"; opts.targetDirectory = new File(OPENIMAJ_HOME,"knowledge/ontologies/sioc").toString(); opts.mavenParent = "org.openimaj:openimaj-ontologies:1.0.6-SNAPSHOT"; opts.mavenVersionNumber = "1.0.6-SNAPSHOT"; opts.separateImplementations = false; Generator.generate(new URL("http://rdfs.org/sioc/ns#").openStream(), opts); } }
813
3,428
<reponame>ghalimi/stdlib {"id":"00220","group":"easy-ham-2","checksum":{"type":"MD5","value":"c07529e1ed4db87e5f59922634b6c2ec"},"text":"From <EMAIL> Fri Aug 2 09:46:04 2002\nReturn-Path: <<EMAIL>>\nDelivered-To: y<EMAIL>.netnoteinc.com\nReceived: from localhost (localhost [127.0.0.1])\n\tby phobos.labs.netnoteinc.com (Postfix) with ESMTP id 0C8FF440F3\n\tfor <jm@localhost>; Fri, 2 Aug 2002 04:45:59 -0400 (EDT)\nReceived: from phobos [127.0.0.1]\n\tby localhost with IMAP (fetchmail-5.9.0)\n\tfor jm@localhost (single-drop); Fri, 02 Aug 2002 09:45:59 +0100 (IST)\nReceived: from lugh.tuatha.org (<EMAIL> [194.125.145.45]) by\n dogma.slashnull.org (8.11.6/8.11.6) with ESMTP id g728hu213075 for\n <<EMAIL>>; Fri, 2 Aug 2002 09:43:56 +0100\nReceived: from lugh (root@localhost [127.0.0.1]) by lugh.tuatha.org\n (8.9.3/8.9.3) with ESMTP id JAA26897; Fri, 2 Aug 2002 09:42:15 +0100\nReceived: from gatekeeper.fineos.com ([217.173.101.209]) by\n lugh.tuatha.org (8.9.3/8.9.3) with ESMTP id JAA26841 for <<EMAIL>>;\n Fri, 2 Aug 2002 09:42:06 +0100\nX-Authentication-Warning: lugh.tuatha.org: Host [172.16.58.3] claimed\n to be gatekeeper.fineos.com\nReceived: from oasis003.msc.ie (oasis003.msc.ie [192.168.125.248]) by\n gatekeeper.fineos.com (8.11.0/8.8.5) with ESMTP id g728hcU00405 for\n <<EMAIL>>; Fri, 2 Aug 2002 09:43:38 +0100\nReceived: from oasis010.msc.ie (oasis010.msc.ie) by oasis003.msc.ie\n (Content Technologies SMTPRS 4.2.5) with ESMTP id\n <<EMAIL>> for <<EMAIL>>;\n Fri, 2 Aug 2002 09:50:11 +0100\nReceived: from oasis006.msc.ie (oasis006.msc.ie [192.168.125.245]) by\n oasis010.msc.ie (8.11.0/8.9.1) with ESMTP id g728S3f29091 for\n <<EMAIL>>; Fri, 2 Aug 2002 09:28:03 +0100\nReceived: by oasis006.msc.ie with Internet Mail Service (5.5.2653.19) id\n <PP38BQQV>; Fri, 2 Aug 2002 09:41:53 +0100\nMessage-Id: <<EMAIL>>\nFrom: \"<NAME> (ext 722)\" <<EMAIL>>\nTo: \"Irish Linux User's Group (E-mail)\" <<EMAIL>>\nDate: Fri, 2 Aug 2002 09:41:52 +0100\nMIME-Version: 1.0\nX-Mailer: Internet Mail Service (5.5.2653.19)\nContent-Type: text/plain\nSubject: [ILUG] Quake 3\nSender: [email protected]\nErrors-To: [email protected]\nX-Mailman-Version: 1.1\nPrecedence: bulk\nList-Id: Irish Linux Users' Group <ilug.linux.ie>\nX-Beenthere: [email protected]\n\n\nHi,\n\nI've installed Quake 3 onto my computer and it starts to run up ok but as\nsoon as the graphics start it bails out. It complains that there is an\nerror in sis_drv.o, the line before this says that it is starting Quake in\n24 bit depth, now from http://www.winischhofer.net/linuxsis630.shtml I\nunderstand that the sis630 won't do 24 bit depth. So is there a way to make\nQuake start up in 16 bit depth? I've done some \"google\"ing but drawn a\nblank.....\n\nThanks\n\nGreg\n\n\n**************************************************************************\nThe information contained in this e-mail is confidential,\nmay be privileged and is intended only for the use of the\nrecipient named above. If you are not the intended\nrecipient or a representative of the intended recipient,\nyou have received this e-mail in error and must not copy,\nuse or disclose the contents of this email to anybody\nelse. If you have received this e-mail in error, please\nnotify the sender immediately by return e-mail and\npermanently delete the copy you received. This email has\nbeen swept for computer viruses. However, you should\ncarry out your own virus checks.\n\n\nRegistered in Ireland, No. 205721. http://www.FINEOS.com\n**************************************************************************\n\n\n-- \nIrish Linux Users' Group: <EMAIL>\nhttp://www.linux.ie/mailman/listinfo/ilug for (un)subscription information.\nList maintainer: <EMAIL>\n\n\n"}
1,449
3,084
<reponame>ixjf/Windows-driver-samples /***************************************************************************** * * imagefilter.cpp * * Copyright (c) 2003 Microsoft Corporation. All Rights Reserved. * * DESCRIPTION: * * Contains implementation of Image Processing Filer with "filtering stream". * The implementation uses GDI+ for cutting out images, for deskewing as well * as for implementing brightness and contrast. * *******************************************************************************/ #include "stdafx.h" #include <gdiplus.h> #include <math.h> #include <objidl.h> #include "imagefilter.h" #include "wiaitem.h" #include "gphelper.h" using namespace Gdiplus; /***************************************************************************** * * @func STDMETHODIMP | DoFiltering | Reads unfiltered data from input stream, cuts, deskews, rotates and filters the image data * and then writes fitlered data to output stream. * * @parm LONG | lBrightness | * The brightness set into the region we are filtering. Should be between -1000 and 1000 * * @parm LONG | lContrast | * The contrast set into the region we are filtering. Should be between -1000 and 1000 * * @parm LONG | regionRotate | * How much we should rotate the reion (note rotate happens after deskew!) * * @parm LONG | regionDeskewX | * WIA_IPS_DESKEW_X for region to deskew (note 0 means no deskew) * * @parm LONG | regionDeskewY | * WIA_IPS_DESKEW_Y for region to deskew (note 0 means no deskew) * * @parm IStream* | pInputStream | * Unfiltered image data, either directly from driver of from WIA Preview Component * * * @parm IStream* | pOutputStream | * Application stream where we write image data * * @parm LONG | inputXPOS | * X-position of upper left corner of region to "cut-out" from image in pInputStream. * Note that this parameter is relative to image in pInputStream which is not necessarily * its X-position on the flatbed. * * @parm LONG | inputYPOS | * Y-position of upper left corner of region to "cut-out" from image in pInputStream. * Note that this parameter is relative to image in pInputStream which is not necessarily * its Y-position on the flatbed. * * @parm LONG | boundingRegionWidth | * Width of bounding area to "cut-out" from pInputStream. A value of 0 means that we should not perform * any cutting, but instead filter the whole image. * boundingRegionWidth will be set to 0 when we receive the image data from the driver since the driver * will only send us the bounding rectangle of the selected region and not the entire flatbed. * Note: if there is not deskewing being performed this is the "actual" width of the region. * * @parm LONG | boundingRegionHeight | * Height of bounding area to "cut-out" from pInputStream. A value of 0 means that we should not perform * any cutting, but instead filter the whole image. * boundingRegionHeight will be set to 0 when we receive the image data from the driver since the driver * will only send us the bounding rectangle of the selected region and not the entire flatbed. * Note: if there is not deskewing being performed this is the "actual" height of the region. * * @comm * Note, our simple implementation of DoFiltering always write all the data * in one chunk to the application. An actual image processing filter should * be able to work on bands of data in the case where there is no rotation * or deskewing being performed. * This implementation also does not send callbacks (TransferCallback) messages * to the application indicating progress. A "real" implementation should do that! * * @rvalue S_OK | * The function succeeded. * @rvalue E_XXX | * The function failed * *****************************************************************************/ static HRESULT DoFiltering( LONG lBrightness, LONG lContrast, LONG regionRotate, LONG regionDeskewX, LONG regionDeskewY, _In_ IStream *pInputStream, _In_ IStream *pOutputStream, _Inout_ ULONG64 *pulBytesWrittenToOutputStream, LONG inputXPOS = 0, LONG inputYPOS = 0, LONG boundingRegionWidth = 0, LONG boundingRegionHeight = 0 ) { HRESULT hr = S_OK; Bitmap *pOriginalBitmap = NULL; Bitmap *pTargetBitmap = NULL; CLSID formatEncoder = {0}; GdiplusStartupInput gdiplusStartupInput; ULONG_PTR ulImageLibraryToken = 0; if (SUCCEEDED(hr)) { hr = GDISTATUS_TO_HRESULT(GdiplusStartup(&ulImageLibraryToken, &gdiplusStartupInput, NULL)); } // // Create a Bitmap object on the unfiltered input stream // if (SUCCEEDED(hr)) { #pragma prefast(suppress:__WARNING_ALIASED_MEMORY_LEAK_EXCEPTION, "Sample code only. Production code should handle exceptions.") pOriginalBitmap = new Bitmap(pInputStream, TRUE); if (pOriginalBitmap) { hr = GDISTATUS_TO_HRESULT(pOriginalBitmap->GetLastStatus()); } else { hr = E_OUTOFMEMORY; } if (SUCCEEDED(hr)) { hr = GDISTATUS_TO_HRESULT(GetEncoderGUIDFromImage(pOriginalBitmap, &formatEncoder)); } } // // If boundingRegionWidth or boundingRegionHeight is 0, this means that we should not perform any // "cutting" but instead just filter the whole input image. // if (SUCCEEDED(hr)) { if ((0 == boundingRegionWidth) || (0 == boundingRegionHeight)) { inputXPOS = 0; inputYPOS = 0; boundingRegionWidth = pOriginalBitmap->GetWidth(); boundingRegionHeight = pOriginalBitmap->GetHeight(); } } // // Perform filtering. This is done in 3 steps: // 1. Create a new bitmap with the dimensions of the final, filtered image. // 2. "Cut-out" and deskew final image from full image. This is done by a translate // followed by a rotate transformtation. // 3. Apply color matrix to to perform brightness and contrast modifications. // if (SUCCEEDED(hr)) { PixelFormat originalPixelFormat = pOriginalBitmap->GetPixelFormat(); double dblDeskewAngle = 0.0; LONG lXdelta = 0; LONG lYdelta = 0; LONG lActualRegionWidth = 0; LONG lActualRegionHeight = 0; // // No deskew, just cut out a rectangular area! // if ((regionDeskewX) == 0 || (regionDeskewY == 0)) { lActualRegionWidth = boundingRegionWidth; lActualRegionHeight = boundingRegionHeight; dblDeskewAngle = 0.0; } else { if (regionDeskewX > regionDeskewY) { lYdelta = regionDeskewY; dblDeskewAngle = atan2((double)regionDeskewY, (double)regionDeskewX); lActualRegionWidth = (LONG) sqrt((double) (regionDeskewX * regionDeskewX + regionDeskewY * regionDeskewY)); lActualRegionHeight = (LONG) (((double) (boundingRegionHeight - regionDeskewY)) / cos(dblDeskewAngle)); } else { lXdelta = regionDeskewX; dblDeskewAngle = atan2((double)regionDeskewX, (double)regionDeskewY); lActualRegionWidth = (LONG) (((double) (boundingRegionWidth - regionDeskewX)) / cos(dblDeskewAngle)); lActualRegionHeight = (LONG) sqrt((double) (regionDeskewX * regionDeskewX + regionDeskewY * regionDeskewY)); dblDeskewAngle = -dblDeskewAngle; } } pTargetBitmap = new Bitmap(lActualRegionWidth, lActualRegionHeight, originalPixelFormat); if (pTargetBitmap) { hr = GDISTATUS_TO_HRESULT(pTargetBitmap->GetLastStatus()); } else { hr = E_OUTOFMEMORY; } if (SUCCEEDED(hr)) { Graphics graphics(pTargetBitmap); ImageAttributes imageAttributes; hr = GDISTATUS_TO_HRESULT(graphics.GetLastStatus()); if (SUCCEEDED(hr)) { graphics.TranslateTransform((REAL)-(inputXPOS + lXdelta), (REAL)-(inputYPOS + lYdelta)); hr = GDISTATUS_TO_HRESULT(graphics.GetLastStatus()); } if (dblDeskewAngle != 0.0) { if (SUCCEEDED(hr)) { graphics.RotateTransform((REAL)(dblDeskewAngle * 180.0 / PI), MatrixOrderAppend); hr = GDISTATUS_TO_HRESULT(graphics.GetLastStatus()); } } if (SUCCEEDED(hr)) { // // Calculate the values needed for the matrix // REAL scale = 0.0; REAL trans = 0.0; // // Normalize brightness and contrast to 0 to 1000. // This assumes valid settings are - 1000 to 1000. // CalculateBrightnessAndContrastParams( (lBrightness + 1000) /2, (lContrast + 1000) / 2, &scale, &trans ); // // Prepare the matrix for brightness and contrast transforms // ColorMatrix brightnessAndContrast = {scale, 0, 0, 0, 0, 0, scale, 0, 0, 0, 0, 0, scale, 0, 0, 0, 0, 0, 1, 0, trans, trans, trans, 0, 1}; hr = imageAttributes.SetColorMatrix(&brightnessAndContrast); } if (SUCCEEDED(hr)) { UINT uWidth = pOriginalBitmap->GetWidth(); UINT uHeight = pOriginalBitmap->GetHeight(); Rect rect( 0, 0, uWidth, uHeight ); hr = GDISTATUS_TO_HRESULT(graphics.DrawImage(pOriginalBitmap,rect,0,0,uWidth, uHeight,UnitPixel,&imageAttributes)); } } } // // The last step for us to perform is rotating the region // if (SUCCEEDED(hr) && (regionRotate != PORTRAIT)) { RotateFlipType rotateFlipType; switch (regionRotate) { case LANSCAPE: rotateFlipType = Rotate270FlipNone; break; case ROT180: rotateFlipType = Rotate180FlipNone; break; case ROT270: rotateFlipType = Rotate90FlipNone; break; default: // // We should never get here! // rotateFlipType = RotateNoneFlipNone; } hr = GDISTATUS_TO_HRESULT(pTargetBitmap->RotateFlip(rotateFlipType)); } // // The GDI+ Bitmap::Save method does not work very well for images that // an application displays band by band since it results in a large number // of small Write calls. Instead we do a LockBits to read the bits from // the bitmap and then write them to the application's stream. // if (SUCCEEDED(hr)) { hr = WriteBitmapToStream(pTargetBitmap, pOutputStream, pulBytesWrittenToOutputStream); } if (pOriginalBitmap) { delete pOriginalBitmap; } if (pTargetBitmap) { delete pTargetBitmap; } if(ulImageLibraryToken) { GdiplusShutdown(ulImageLibraryToken); ulImageLibraryToken = 0; } return hr; } /******************************************************************************* Routine Name: ConvertBMPImageToRaw Routine Description: Converts an uncompressed 24-bpp RGB BMP image Stream to a RAW Stream Arguments: input Stream Return Value: Output Stream if conversion successful HRESULT (S_OK in case the operation succeeds) *******************************************************************************/ HRESULT ConvertBMPImageToRaw(IStream * pStreamIn, IStream *pStreamOut, ULONG64 * pcbWritten = NULL) { HRESULT hr = S_OK; ULONG ulRead = 0, ulWrite = 0; WIA_RAW_HEADER RawHeader = {0}; BITMAPFILEHEADER bmfh = {0}; BITMAPINFOHEADER bmih = {0}; if (!pStreamIn || !pStreamOut) { hr = E_POINTER; } // // Seek to the beginning of Input Stream // if (SUCCEEDED(hr)) { if (pcbWritten) { *pcbWritten = (ULONG64)0; } LARGE_INTEGER li = {0}; hr = pStreamIn->Seek(li, STREAM_SEEK_SET, NULL); } // // Attempt to read the Bitmap File Header // if (SUCCEEDED(hr)) { hr = pStreamIn->Read(&bmfh, sizeof(bmfh), &ulRead); if (SUCCEEDED(hr)) { if (ulRead != sizeof(bmfh)) { hr = E_FAIL; } } } // // Attempt to read the Bitmap File Header // if (SUCCEEDED(hr)) { hr = pStreamIn->Read(&bmih, sizeof(bmih), &ulRead); if (SUCCEEDED(hr)) { if (ulRead != sizeof(bmih)) { hr = E_FAIL; } } } // // todo: check the bitmap info header and bitmap file header for validity // if (SUCCEEDED(hr)) { // // The 'WRAW' 4 ASCII character signature is required at the begining of all WIA Raw transfers: // const char szSignature[] = "WRAW"; memcpy(&RawHeader.Tag, szSignature, sizeof(DWORD)); // // Fill in the fields describing version identity for this header: // RawHeader.Version = 0x00010000; RawHeader.HeaderSize = sizeof(WIA_RAW_HEADER); // // Fill in all the fields that we can retrieve directly from the current MINIDRV_TRANSFER_CONTEXT: // // // Resolution values must be converted to DPI (pixels per inch) from pixels per meter: // // (1" = 25.4 mm, 1 m ~ 39.37") // RawHeader.XRes = (LONG)((float)bmih.biXPelsPerMeter / 39.37f); RawHeader.YRes = (LONG)((float)bmih.biYPelsPerMeter / 39.37f); RawHeader.XExtent = bmih.biWidth; RawHeader.YExtent = bmih.biHeight; RawHeader.BytesPerLine = bmih.biWidth * 3; RawHeader.BitsPerPixel = bmih.biBitCount; RawHeader.ChannelsPerPixel = 3; RawHeader.DataType = WIA_DATA_RAW_RGB; ZeroMemory(RawHeader.BitsPerChannel, sizeof(RawHeader.BitsPerChannel)); RawHeader.BitsPerChannel[0] = 8; RawHeader.BitsPerChannel[1] = 8; RawHeader.BitsPerChannel[2] = 8; RawHeader.Compression = WIA_COMPRESSION_NONE; RawHeader.PhotometricInterp = bmih.biSizeImage; RawHeader.LineOrder = WIA_LINE_ORDER_BOTTOM_TO_TOP; // // Raw data: the offset is the size of the header (we don't have a color palette in this case): // RawHeader.RawDataOffset = RawHeader.HeaderSize; RawHeader.RawDataSize = bmih.biSizeImage; RawHeader.PaletteSize = 0; RawHeader.PaletteOffset = 0; } // // Save the RawHeader: Dont Seek // if (SUCCEEDED(hr)) { hr = pStreamOut->Write(&RawHeader, sizeof(RawHeader), &ulWrite); if (SUCCEEDED(hr)) { if (pcbWritten) { (*pcbWritten)+= (ULONG64)ulWrite; } if (ulWrite != sizeof(RawHeader)) { hr = E_FAIL; } } } // // Save the DIB data: Read from in stream and write to out stream // ULONG ulBufferSize = 100000; // approx 100 KB BYTE *pbImageData = NULL; if (SUCCEEDED(hr)) { pbImageData = (BYTE *)malloc(ulBufferSize); if (!pbImageData) { hr = E_OUTOFMEMORY; } // // memory allocated. now copy // while(S_OK == hr) { hr = pStreamIn->Read(pbImageData, ulBufferSize, &ulRead); if (SUCCEEDED(hr)) { hr = pStreamOut->Write(pbImageData, ulRead, &ulWrite); if (SUCCEEDED(hr)) { if (pcbWritten) { (*pcbWritten)+= (ULONG64)ulWrite; } if (ulRead != ulWrite) { hr = E_FAIL; } } } if (ulRead != ulBufferSize) { break; } } } // // Clean-up: // if (pbImageData) { free(pbImageData); pbImageData = NULL; } return hr; } /******************************************************************************* Routine Name: ConvertRawImageToBMP Routine Description: Converts an uncompressed 24-bpp RGB raw image Stream to a DIB Stream Arguments: input Stream Return Value: Output Stream if conversion successful HRESULT (S_OK in case the operation succeeds) *******************************************************************************/ HRESULT ConvertRawImageToBMP(IStream * pStreamIn, IStream **ppStreamOut, ULONG64 * pcbWritten = NULL) { HRESULT hr = S_OK; ULONG ulRead = 0, ulWrite = 0; IStream *pStreamOut = NULL; WIA_RAW_HEADER RawHeader = {0}; BITMAPFILEHEADER bmfh = {0}; BITMAPINFOHEADER bmih = {0}; if (!pStreamIn || !ppStreamOut) { hr = E_POINTER; } if (SUCCEEDED(hr)) { if(pcbWritten) { *pcbWritten = (ULONG64)0; } (*ppStreamOut) = NULL; // // Seek to the beginning of Input Stream // LARGE_INTEGER li = {0}; hr = pStreamIn->Seek(li, STREAM_SEEK_SET, NULL); } // // Attempt to read the WIA_RAW_HEADER: // if (SUCCEEDED(hr)) { hr = pStreamIn->Read(&RawHeader, sizeof(WIA_RAW_HEADER), &ulRead); if (SUCCEEDED(hr)) { if (ulRead != sizeof(WIA_RAW_HEADER)) { hr = E_FAIL; } } } // // Verify the WIA raw header signature: // if (SUCCEEDED(hr)) { const char szSignature[] = "WRAW"; if (memcmp(&RawHeader.Tag, szSignature, sizeof(DWORD))) { hr = E_FAIL; } } // // Verify the WIA raw header reported size and version number: // if (SUCCEEDED(hr)) { if ((0x00010000 != RawHeader.Version) || (sizeof(WIA_RAW_HEADER) != RawHeader.HeaderSize)) { hr = E_FAIL; } } // // Verify if the raw image format - this sample supports only uncompressed 24-bpp RGB data: // if (SUCCEEDED(hr)) { if ((WIA_COMPRESSION_NONE != RawHeader.Compression) || (24 != RawHeader.BitsPerPixel) || (3 != RawHeader.ChannelsPerPixel) || (RawHeader.PaletteSize) || (8 != RawHeader.BitsPerChannel[0]) || (8 != RawHeader.BitsPerChannel[1]) || (8 != RawHeader.BitsPerChannel[2])) { hr = E_FAIL; } } // // Build the BITMAPFILEHEADER and the BITMAPINFOHEADER structures needed // to convert the raw uncompressed 24-bpp RGB image to a DIB: // if (SUCCEEDED(hr)) { // // BITMAPFILEHEADER: // const char szBM[] = "BM"; memcpy(&bmfh.bfType, szBM, sizeof(WORD)); bmfh.bfSize = sizeof(BITMAPFILEHEADER); bmfh.bfOffBits = sizeof(BITMAPFILEHEADER) + sizeof(BITMAPINFOHEADER); // // BITMAPINFOHEADER: // bmih.biSize = sizeof(BITMAPINFOHEADER); bmih.biWidth = RawHeader.XExtent; bmih.biHeight = (WIA_LINE_ORDER_BOTTOM_TO_TOP == RawHeader.LineOrder) ? ((LONG)RawHeader.YExtent) : (-(LONG)RawHeader.YExtent); bmih.biPlanes = 1; bmih.biBitCount = (WORD)RawHeader.BitsPerPixel; bmih.biCompression = BI_RGB; bmih.biSizeImage = RawHeader.RawDataSize; bmih.biClrUsed = 0; bmih.biClrImportant = 0; // // Resolution values must be converted from DPI (pixels per inch) to pixels per meter: // // (1" = 25.4 mm, 1 m ~ 39.37") // bmih.biXPelsPerMeter = (LONG)((float)RawHeader.XRes * 39.37f); bmih.biYPelsPerMeter = (LONG)((float)RawHeader.YRes * 39.37f); } if (SUCCEEDED(hr)) { hr = CreateStreamOnHGlobal(0, TRUE, &pStreamOut); } // // Save the BITMAPFILEHEADER: // if (SUCCEEDED(hr)) { hr = pStreamOut->Write(&bmfh, bmfh.bfSize, &ulWrite); if (SUCCEEDED(hr)) { if (pcbWritten) { (*pcbWritten)+= (ULONG64)ulWrite; } if (ulWrite != bmfh.bfSize) { hr = E_FAIL; } } } // // Save the BITMAPINFOHEADER: // if (SUCCEEDED(hr)) { hr = pStreamOut->Write(&bmih, bmih.biSize, &ulWrite); if (SUCCEEDED(hr)) { if (pcbWritten) { (*pcbWritten)+= (ULONG64)ulWrite; } if (ulWrite != bmih.biSize) { hr = E_FAIL; } } } // // Save the DIB data: Read from in stream and write to out stream // ULONG ulBufferSize = 100000; // approx 100 KB BYTE *pbImageData = NULL; if (SUCCEEDED(hr)) { pbImageData = (BYTE *)malloc(ulBufferSize); if (!pbImageData) { hr = E_OUTOFMEMORY; } // // memory allocated. now copy // while(S_OK == hr) { hr = pStreamIn->Read(pbImageData, ulBufferSize, &ulRead); if (SUCCEEDED(hr)) { hr = pStreamOut->Write(pbImageData, ulRead, &ulWrite); if (SUCCEEDED(hr)) { if (pcbWritten) { (*pcbWritten)+= (ULONG64)ulWrite; } if (ulRead != ulWrite) { hr = E_FAIL; } } } if (ulRead != ulBufferSize) { break; } } } // // Clean-up: // if (pbImageData) { free(pbImageData); pbImageData = NULL; } if (SUCCEEDED(hr)) { // // Seek to the beginning of Output Stream (We can do this since we created the stream) // LARGE_INTEGER li = {0}; hr = pStreamOut->Seek(li, STREAM_SEEK_SET, NULL); } if (pStreamOut) { if (SUCCEEDED(hr)) { *ppStreamOut = pStreamOut; } else { pStreamOut->Release(); } } return hr; } /***************************************************************************** * * CMyFilterStream is our implemetation of the filtering stream. * The only IStream method that it implements is Write(). * * The stream keeps a reference to the applications stream into which it writes * the filtered data (the header is not modified however). * *******************************************************************************/ /// /// Constructor - note sets reference count to 1 /// CMyFilterStream::CMyFilterStream( VOID) : m_pAppStream(NULL) , m_pCachingStream(NULL), m_nRefCount(0), m_cBytesWritten(0), m_lBrightness(0), m_lContrast(0), m_lRotation(0), m_lDeskewX(0), m_lDeskewY(0) { // // Note: Do not initialize refcount to 1 as it will break module locking, instead call AddRef() // AddRef(); } /// /// Destructor: /// CMyFilterStream::~CMyFilterStream( VOID) { if (m_pAppStream) { m_pAppStream->Release(); m_pAppStream = NULL; } if (m_pCachingStream) { m_pCachingStream->Release(); m_pCachingStream = NULL; } } /// /// Initilize stores a reference to the application's stream. It also creates /// its own stream with CreateStreamOnHGlobal into which it stores all the /// unfiltered image data before it performs its filtering (in Flush). /// HRESULT CMyFilterStream::Initialize( _In_ IStream *pAppStream, LONG lBrightness, LONG lContrast, LONG lRotation, LONG lDeskewX, LONG lDeskewY, LONG lXExtent, LONG lYExtent, LONG lBitDepth, GUID guidFormat) { UNREFERENCED_PARAMETER(lXExtent); UNREFERENCED_PARAMETER(lYExtent); UNREFERENCED_PARAMETER(lBitDepth); HRESULT hr = S_OK; hr = pAppStream ? S_OK : E_INVALIDARG; if (SUCCEEDED(hr)) { m_pAppStream = pAppStream; m_pAppStream->AddRef(); } if (SUCCEEDED(hr)) { hr = CreateStreamOnHGlobal(0, TRUE, &m_pCachingStream); } if (SUCCEEDED(hr)) { m_lBrightness = lBrightness; m_lContrast = lContrast; m_lRotation = lRotation; m_lDeskewX = lDeskewX; m_lDeskewY = lDeskewY; m_guidFormat = guidFormat; } return hr; } /***************************************************************************** * * @func STDMETHODIMP | CMyFilterStream::Flush | Reads unfiltered data, performs filtering and writes * data to application stream * * @comm * * Flush is called when the image processing filter receives a WIA_TRANSFER_MSG_END_OF_STREAM message. * Flush calls DoFiltering where the actual filtering is done. * * Note that this simple implementation performs all its filtering only after it has received all * unfiltered image data and stored it in m_pCachingStream. A "real" implementation should be able * to work on bands of data (at least if no deskew and rotation has to be performed). * * @rvalue S_OK | * The function succeeded. * @rvalue E_XXX | * The function failed * *****************************************************************************/ HRESULT CMyFilterStream::Flush( VOID) { HRESULT hr = S_OK; IStream * tempStream = NULL; IStream * tempOutStream = NULL; if(IsEqualGUID(m_guidFormat, WiaImgFmt_RAW)) { if (SUCCEEDED(hr)) { hr = ConvertRawImageToBMP(m_pCachingStream, &tempStream); if (SUCCEEDED(hr) && tempStream) { hr = CreateStreamOnHGlobal(0, TRUE, &tempOutStream); if (SUCCEEDED(hr) && tempOutStream) { ULONG64 ulDummy = 0; hr = DoFiltering(m_lBrightness, m_lContrast, m_lRotation, m_lDeskewX, m_lDeskewY, tempStream, tempOutStream, &ulDummy); if (SUCCEEDED(hr)) { hr = ConvertBMPImageToRaw(tempOutStream, m_pAppStream, &m_cBytesWritten); } tempOutStream->Release(); } tempStream->Release(); } } } else { hr = DoFiltering( m_lBrightness, m_lContrast, m_lRotation, m_lDeskewX, m_lDeskewY, m_pCachingStream, m_pAppStream, &m_cBytesWritten); } // // Note: m_pAppStream and m_pCachingStream are released by ReleaseStreams which must be always called after Flush // return hr; } /// /// Query Interface /// STDMETHODIMP CMyFilterStream::QueryInterface(_In_ const IID& iid_requested, _Out_ void** ppInterfaceOut) { HRESULT hr = S_OK; hr = ppInterfaceOut ? S_OK : E_POINTER; if (SUCCEEDED(hr)) { *ppInterfaceOut = NULL; } // // We support IID_IUnknown and IID_IStream // if (SUCCEEDED(hr)) { if (IID_IUnknown == iid_requested) { *ppInterfaceOut = static_cast<IUnknown*>(this); } else if (IID_IStream == iid_requested) { *ppInterfaceOut = static_cast<IStream*>(this); } else { hr = E_NOINTERFACE; } } if (SUCCEEDED(hr)) { reinterpret_cast<IUnknown*>(*ppInterfaceOut)->AddRef(); } return hr; } /// /// AddRef /// STDMETHODIMP_(ULONG) CMyFilterStream::AddRef(void) { if (m_nRefCount == 0) { LockModule(); } return InterlockedIncrement(&m_nRefCount); } /// /// Release /// STDMETHODIMP_(ULONG) CMyFilterStream::Release(void) { ULONG nRetval = InterlockedDecrement(&m_nRefCount); if (0 == nRetval) { delete this; UnlockModule(); } return nRetval; } STDMETHODIMP CMyFilterStream::Seek(LARGE_INTEGER dlibMove, DWORD dwOrigin, _Out_ ULARGE_INTEGER *plibNewPosition) { return m_pCachingStream->Seek(dlibMove,dwOrigin,plibNewPosition); } STDMETHODIMP CMyFilterStream::SetSize(ULARGE_INTEGER libNewSize) { return m_pCachingStream->SetSize(libNewSize); } STDMETHODIMP CMyFilterStream::LockRegion(ULARGE_INTEGER libOffset, ULARGE_INTEGER cb, DWORD dwLockType) { return m_pCachingStream->LockRegion(libOffset,cb,dwLockType); } STDMETHODIMP CMyFilterStream::CopyTo(_In_ IStream *pstm, ULARGE_INTEGER cb, _Out_ ULARGE_INTEGER *pcbRead, _Out_ ULARGE_INTEGER *pcbWritten) { return m_pCachingStream->CopyTo(pstm,cb,pcbRead,pcbWritten); } STDMETHODIMP CMyFilterStream::Commit(DWORD grfCommitFlags) { return m_pCachingStream->Commit(grfCommitFlags); } STDMETHODIMP CMyFilterStream::Revert(void) { return m_pCachingStream->Revert(); } STDMETHODIMP CMyFilterStream::UnlockRegion(ULARGE_INTEGER libOffset, ULARGE_INTEGER cb, DWORD dwLockType) { return m_pCachingStream->UnlockRegion(libOffset,cb,dwLockType); } STDMETHODIMP CMyFilterStream::Stat(_Out_ STATSTG *pstatstg, DWORD grfStatFlag) { return m_pCachingStream->Stat(pstatstg, grfStatFlag); } STDMETHODIMP CMyFilterStream::Clone(_Out_ IStream **ppstm) { return m_pCachingStream->Clone(ppstm); } STDMETHODIMP CMyFilterStream::Read(_Out_ void *pv, ULONG cb, _Out_ ULONG *pcbRead) { return m_pCachingStream->Read(pv,cb,pcbRead); } STDMETHODIMP CMyFilterStream::ReleaseStreams() { if (m_pAppStream) { m_pAppStream->Release(); m_pAppStream = NULL; } if (m_pCachingStream) { m_pCachingStream->Release(); m_pCachingStream = NULL; } return S_OK; } /***************************************************************************** * * @func STDMETHODIMP | CMyFilterStream::Write | Filtering streams implementation of Write * * @parm const void * | pv | * Pointer to the memory buffer. * * @parm ULONG | cb | * Specifies the number of bytes of data to write from the stream object. * * @parm ULONG | pcbWritten | * Pointer to a ULONG variable that receives the actual number of bytes written from the stream object. * * @comm * Write simply writes unfiltered data from the driver into its internal caching stream. * * @rvalue S_OK | * The function succeeded. * @rvalue E_XXX | * The function failed * *****************************************************************************/ STDMETHODIMP CMyFilterStream::Write(_In_ const void *pv, ULONG cb, _Out_ ULONG *pcbWritten) { return m_pCachingStream->Write(pv, cb, pcbWritten); } /// /// Constructor /// CImageFilter::CImageFilter( VOID) : m_pWiaItem(NULL), m_pAppWiaTransferCallback(NULL), m_nRefCount(0), m_pCurrentStream(NULL) { // // Nothing // } /// /// Destructor /// CImageFilter::~CImageFilter( VOID) { if (m_pWiaItem) { m_pWiaItem->Release(); m_pWiaItem = NULL; } if (m_pAppWiaTransferCallback) { m_pAppWiaTransferCallback->Release(); m_pAppWiaTransferCallback = NULL; } } /// /// QueryInterface /// STDMETHODIMP CImageFilter::QueryInterface(_In_ const IID& iid_requested, _Out_ void** ppInterfaceOut) { HRESULT hr = S_OK; hr = ppInterfaceOut ? S_OK : E_POINTER; if (SUCCEEDED(hr)) { *ppInterfaceOut = NULL; } // // We support IID_IUnknown, IID_IWiaImageFilter and IID_IWiaTransferCallback // if (SUCCEEDED(hr)) { if (IID_IUnknown == iid_requested) { *ppInterfaceOut = static_cast<IWiaImageFilter*>(this); } else if (IID_IWiaImageFilter == iid_requested) { *ppInterfaceOut = static_cast<IWiaImageFilter*>(this); } else if (IID_IWiaTransferCallback == iid_requested) { *ppInterfaceOut = static_cast<IWiaTransferCallback*>(this); } else { hr = E_NOINTERFACE; } } if (SUCCEEDED(hr)) { reinterpret_cast<IUnknown*>(*ppInterfaceOut)->AddRef(); } return hr; } /// /// AddRef /// STDMETHODIMP_(ULONG) CImageFilter::AddRef(void) { if (m_nRefCount == 0) { LockModule(); } return InterlockedIncrement(&m_nRefCount); } /// /// Release /// STDMETHODIMP_(ULONG) CImageFilter::Release(void) { ULONG nRetval = InterlockedDecrement(&m_nRefCount); if (0 == nRetval) { delete this; UnlockModule(); } return nRetval; } /***************************************************************************** * * @func STDMETHODIMP | CImageFilter::InitializeFilter | Initializes image processing filter * * @parm IWiaItem2 | pWiaItem | * The WIA item we are doing the download for. This will actually be the parent item * for some of the item we acquire the image for. See implementation of GetNextStream * for more details. * * @parm IWiaTransferCallback | pWiaTransferCallback | * Application's callback function * * @comm * Initializes image processing filter. Stores references to applications callback interface * and IWiaItem2 * * @rvalue S_OK | * The function succeeded. * @rvalue E_XXX | * The function failed * *****************************************************************************/ STDMETHODIMP CImageFilter::InitializeFilter( _In_ IN IWiaItem2 *pWiaItem, __callback IN IWiaTransferCallback *pWiaTransferCallback) { HRESULT hr = S_OK; m_bTransferCancelled = FALSE; hr = (pWiaItem && pWiaTransferCallback) ? S_OK : E_INVALIDARG; // // Image processing filters supplied with WIA drivers do not // support storage items. // if (SUCCEEDED(hr)) { GUID guidItemCategory = {0}; hr = pWiaItem->GetItemCategory(&guidItemCategory); if (SUCCEEDED(hr)) { if ((WIA_CATEGORY_FINISHED_FILE == guidItemCategory) || (WIA_CATEGORY_FOLDER == guidItemCategory) || (WIA_CATEGORY_ROOT == guidItemCategory)) { hr = E_NOTIMPL; } } } // // InitializeFilter should only be called once ... but we still Release // any resources we might reference // if (SUCCEEDED(hr)) { if (m_pWiaItem) { m_pWiaItem->Release(); m_pWiaItem = NULL; } if (m_pAppWiaTransferCallback) { m_pAppWiaTransferCallback->Release(); m_pAppWiaTransferCallback = NULL; } } if (SUCCEEDED(hr)) { m_pWiaItem = pWiaItem; m_pWiaItem->AddRef(); m_pAppWiaTransferCallback = pWiaTransferCallback; m_pAppWiaTransferCallback->AddRef(); } return hr; } /***************************************************************************** * * @func STDMETHODIMP | CImageFilter::SetNewCallback | Sets new callback for image processing filter to use * * @parm IWiaTransferCallback | pWiaTransferCallback | * The new application callback which the filter should use. * * @comm * Since an application can change the callback to use in the IWiaPreview::UpdatePreview call the image * processing filter must "get notified" of this. * Note, the image processing filter is always required to release its current callback even if it is * passed NULL for the callback. * * @rvalue S_OK | * The function succeeded. * @rvalue E_XXX | * The function failed * *****************************************************************************/ STDMETHODIMP CImageFilter::SetNewCallback( _In_opt_ __callback IN IWiaTransferCallback *pWiaTransferCallback) { if (m_pAppWiaTransferCallback) { m_pAppWiaTransferCallback->Release(); m_pAppWiaTransferCallback = NULL; } if (pWiaTransferCallback) { m_pAppWiaTransferCallback = pWiaTransferCallback; m_pAppWiaTransferCallback->AddRef(); } return S_OK; } /***************************************************************************** * * @func STDMETHODIMP | CImageFilter::FilterPreviewImage | FilterPreviewImage implementation * * @parm IWiaItem2 | pWiaChildItem | * pWiaChildItem2 is the item which the image process is to process. * This item must be a child item of the item, m_pWiaItem, that was passed into InitializeFilter. * * @parm RECT | InputImageExtents | * The coordinates (on the flatbed scanner) of the image that the preview component caches internally, * which is also the image that is passed into the pInputStream parameter. * We need this parameter since it is possible that the cached image (pInputStream) was not captured * with XPOS=YPOS=0. * * @parm IStream | pInputStream | * Unfiltered image that is stored by WIA Preview Component. * * @comm * FilterPreviewImage is called by the preview component, when an application calls UpdatePreview. * We simply read all the properties from pWiaChildItem that are required for us to do the filtering * and then retrieve the application stream. The actual filtering is then performed in DoFiltering. * * @rvalue S_OK | * The function succeeded. * @rvalue E_XXX | * The function failed * *****************************************************************************/ STDMETHODIMP CImageFilter::FilterPreviewImage( IN LONG lFlags, _In_ IN IWiaItem2 *pWiaChildItem, IN RECT InputImageExtents, _In_ IN IStream *pInputStream) { UNREFERENCED_PARAMETER(lFlags); IStream *pAppStream = NULL; BSTR bstrItemName = NULL; BSTR bstrFullItemName = NULL; GUID guidItemCategory = {0}; LONG xpos = 0, ypos = 0, width = 0, height = 0; LONG lBrightness = 0; LONG lContrast = 0; LONG lDeskewX = 0; LONG lDeskewY = 0; LONG lRotation = PORTRAIT; HRESULT hr = S_OK; IStream * tempStream = NULL; IStream * tempOutStream = NULL; BOOL bConvertedRawImage = FALSE; ULONG64 ulBytesWrittenToOutputStream = 0; // // Parameter validation // hr = (pWiaChildItem && pInputStream) ? S_OK : E_INVALIDARG; if (SUCCEEDED(hr)) { // // Check whether the image extents are correct. // Error if the right or bottom coordinate is zero. // Or Left >= Right or Top >= Bottom. // if ((0 == InputImageExtents.right) || (0 == InputImageExtents.bottom) || (InputImageExtents.left >= InputImageExtents.right) || (InputImageExtents.top >= InputImageExtents.bottom)) { hr = E_INVALIDARG; } } if (SUCCEEDED(hr)) { hr = m_pAppWiaTransferCallback ? S_OK : E_UNEXPECTED; } // // Read all properties we need // if (SUCCEEDED(hr)) { CWiaItem *pWiaItemWrapper = new CWiaItem(); hr = pWiaItemWrapper ? S_OK : E_OUTOFMEMORY; if (SUCCEEDED(hr)) { hr = pWiaItemWrapper->SetIWiaItem(pWiaChildItem); } if (SUCCEEDED(hr)) { hr = pWiaItemWrapper->ReadRequiredPropertyGUID(WIA_IPA_ITEM_CATEGORY, &guidItemCategory); if (SUCCEEDED(hr) && ((guidItemCategory == WIA_CATEGORY_ROOT) || (guidItemCategory == WIA_CATEGORY_FINISHED_FILE) || (WIA_CATEGORY_FOLDER == guidItemCategory))) { // // We should never get here for storage items! // hr = E_INVALIDARG; } } // // Error if the following is not satisfied: // WIA_IPS_MIN_HORIZONTAL_SIZE <= right - left <= WIA_IPS_MAX_HORIZONTAL_SIZE // WIA_IPS_MIN_VERTICAL_SIZE <= bottom - top <= WIA_IPS_MAX_VERTICAL_SIZE // if (SUCCEEDED(hr)) { LONG lHorMin = 0, lHorMax = 0, lHorExtent = InputImageExtents.right - InputImageExtents.left; LONG lVerMin = 0, lVerMax = 0, lVerExtent = InputImageExtents.bottom - InputImageExtents.top; if (SUCCEEDED(hr)) { hr = pWiaItemWrapper->ReadRequiredPropertyLong(WIA_IPS_MIN_HORIZONTAL_SIZE, &lHorMin); } if (SUCCEEDED(hr)) { hr = pWiaItemWrapper->ReadRequiredPropertyLong(WIA_IPS_MAX_HORIZONTAL_SIZE, &lHorMax); } if (SUCCEEDED(hr)) { hr = pWiaItemWrapper->ReadRequiredPropertyLong(WIA_IPS_MIN_VERTICAL_SIZE, &lVerMin); } if (SUCCEEDED(hr)) { hr = pWiaItemWrapper->ReadRequiredPropertyLong(WIA_IPS_MAX_VERTICAL_SIZE, &lVerMax); } if (SUCCEEDED(hr)) { if ((lHorExtent < lHorMin) || (lHorExtent > lHorMax) || (lVerExtent < lVerMin) || (lVerExtent > lVerMax)) { hr = E_INVALIDARG; } } } if (SUCCEEDED(hr)) { hr = pWiaItemWrapper->ReadRequiredPropertyLong(WIA_IPS_XPOS, &xpos); } if (SUCCEEDED(hr)) { hr = pWiaItemWrapper->ReadRequiredPropertyLong(WIA_IPS_YPOS, &ypos); } if (SUCCEEDED(hr)) { hr = pWiaItemWrapper->ReadRequiredPropertyLong(WIA_IPS_XEXTENT, &width); } if (SUCCEEDED(hr)) { hr = pWiaItemWrapper->ReadRequiredPropertyLong(WIA_IPS_YEXTENT, &height); } if (SUCCEEDED(hr)) { hr = pWiaItemWrapper->ReadRequiredPropertyBSTR(WIA_IPA_ITEM_NAME, &bstrItemName); if (SUCCEEDED(hr) && !bstrItemName) { hr = E_UNEXPECTED; } } if (SUCCEEDED(hr)) { hr = pWiaItemWrapper->ReadRequiredPropertyBSTR(WIA_IPA_FULL_ITEM_NAME, &bstrFullItemName); if (SUCCEEDED(hr) && !bstrFullItemName) { hr = E_UNEXPECTED; } } if (SUCCEEDED(hr)) { hr = pWiaItemWrapper->ReadRequiredPropertyLong(WIA_IPS_BRIGHTNESS, &lBrightness); } if (SUCCEEDED(hr)) { hr = pWiaItemWrapper->ReadRequiredPropertyLong(WIA_IPS_CONTRAST, &lContrast); } if (SUCCEEDED(hr)) { hr = pWiaItemWrapper->ReadRequiredPropertyLong(WIA_IPS_ROTATION, &lRotation); } if (SUCCEEDED(hr)) { hr = pWiaItemWrapper->ReadRequiredPropertyLong(WIA_IPS_DESKEW_X, &lDeskewX); } if (SUCCEEDED(hr)) { hr = pWiaItemWrapper->ReadRequiredPropertyLong(WIA_IPS_DESKEW_Y, &lDeskewY); } if(SUCCEEDED(hr)) { GUID guidItemFormat = {0}; hr = pWiaItemWrapper->ReadRequiredPropertyGUID(WIA_IPA_FORMAT, &guidItemFormat); if((SUCCEEDED(hr)) && (IsEqualGUID(guidItemFormat, WiaImgFmt_RAW))) { hr = ConvertRawImageToBMP(pInputStream, &tempStream); if (SUCCEEDED(hr) && tempStream) { bConvertedRawImage = TRUE; pInputStream = tempStream; } } } if (pWiaItemWrapper) { delete pWiaItemWrapper; } } // // If the upper left corner of the passed image does not correspond to (0,0) // on the flatbed we have to adjust xpos and ypos accordingly in order for us // to "cut out" the correct region represented by pWiaChildItem // if (SUCCEEDED(hr)) { xpos = xpos - InputImageExtents.left; ypos = ypos - InputImageExtents.top; } // // Now get the application stream and write to it // if (SUCCEEDED(hr)) { hr = m_pAppWiaTransferCallback->GetNextStream(0, bstrItemName, bstrFullItemName, &pAppStream); if (SUCCEEDED(hr) && !pAppStream) { hr = E_UNEXPECTED; } } if (SUCCEEDED(hr)) { if (bConvertedRawImage) { hr = CreateStreamOnHGlobal(0, TRUE, &tempOutStream); if (SUCCEEDED(hr)) { ULONG64 ulDummy = 0; hr = DoFiltering(lBrightness, lContrast, lRotation, lDeskewX, lDeskewY, pInputStream, tempOutStream, &ulDummy, xpos, ypos, width, height ); } if (SUCCEEDED(hr)) { hr = ConvertBMPImageToRaw(tempOutStream, pAppStream,&ulBytesWrittenToOutputStream); } } else { hr = DoFiltering(lBrightness, lContrast, lRotation, lDeskewX, lDeskewY, pInputStream, pAppStream, &ulBytesWrittenToOutputStream, xpos, ypos, width, height ); } } if (pAppStream) { pAppStream->Release(); } if (tempStream) { tempStream->Release(); } if (tempOutStream) { tempOutStream->Release(); } return hr; } /***************************************************************************** * * @func STDMETHODIMP | CImageFilter::ApplyProperties | Apply properties after filtering. * * @parm IWiaPropertyStorage | pWiaPropertyStorage | * Pointer to property storage that the image processing filter can write properties to. * * @comm * ApplyProperties is called by the WIA service after the image processing filter has processed * the raw data. This method allows the image processing filter to write data back to the driver and device. * This may be necessary for filters that implement things such as auto-exposure. * Note, an image processing filter should only use the WriteMultiple method to write properties into * the provided storage. * * @rvalue S_OK | * The function succeeded. * @rvalue E_XXX | * The function failed * *****************************************************************************/ STDMETHODIMP CImageFilter::ApplyProperties( _Inout_ IN IWiaPropertyStorage *pWiaPropertyStorage) { HRESULT hr = S_OK; hr = pWiaPropertyStorage ? S_OK : E_INVALIDARG; // // This filter only writes the MY_TEST_FILTER_PROP property for // illustrational purposes. // In general if a filter does not need to write any properties it // should just return S_OK. // if (SUCCEEDED(hr)) { PROPSPEC PropSpec[1] = {0}; PROPVARIANT PropVariant[1] = {0}; PropVariantInit(PropVariant); PropSpec[0].ulKind = PRSPEC_PROPID; PropSpec[0].propid = MY_TEST_FILTER_PROP; PropVariant[0].vt = VT_I4; PropVariant[0].lVal = 1; // // Set the properties // hr = pWiaPropertyStorage->WriteMultiple( 1, PropSpec, PropVariant, WIA_IPA_FIRST ); } return hr; } /***************************************************************************** * * @func STDMETHODIMP | CImageFilter::TransferCallback | TransferCallback implementation * * @parm LONG | lFlags | * Flags * * @parm WiaTransferParams | pWiaTransferParams | * Contains transfer status * * @comm * TransferCallback delegates to the application's callback. It changes the * number of bytes written since we always cache all the data before writing to * the application's stream. We do however not change the percentage since this * represents percentage of total transfer time (a "real" implementation probably * would take the filtering into account here). * We do not write the data to the application's stream until when we receive * a WIA_TRANSFER_MSG_END_OF_STREAM message. * * @rvalue S_OK | * The function succeeded. * @rvalue E_XXX | * The function failed * *****************************************************************************/ STDMETHODIMP CImageFilter::TransferCallback( IN LONG lFlags, _In_ IN WiaTransferParams *pWiaTransferParams) { HRESULT hr = S_OK; if (!m_pAppWiaTransferCallback) { hr = E_UNEXPECTED; } if ((SUCCEEDED(hr)) && (!pWiaTransferParams)) { hr = E_INVALIDARG; } if (SUCCEEDED(hr)) { if (m_pCurrentStream) { pWiaTransferParams->ulTransferredBytes = m_pCurrentStream->m_cBytesWritten; } // // Note the percent reflects the amount of scanning the driver reports // whereas the "BytesWritten" member is the actual number of bytes // that we have sent to the application stream. // if (m_pCurrentStream && (pWiaTransferParams->lMessage == WIA_TRANSFER_MSG_END_OF_STREAM)) { if (!m_bTransferCancelled) { hr = m_pCurrentStream->Flush(); pWiaTransferParams->ulTransferredBytes = m_pCurrentStream->m_cBytesWritten; } m_pCurrentStream -> ReleaseStreams(); } // // Call this regardless of hr because applications need termination messages // HRESULT hrInner = m_pAppWiaTransferCallback->TransferCallback(lFlags, pWiaTransferParams); // // Don't overwrite the original error if there was one // if (SUCCEEDED(hr)) { hr = hrInner; } if (m_pCurrentStream && (pWiaTransferParams->lMessage == WIA_TRANSFER_MSG_END_OF_STREAM)) { m_pCurrentStream->Release(); m_pCurrentStream = NULL; } // // To indicate not to write to the stream later // if ( S_OK != hr) { m_bTransferCancelled = TRUE; } } return hr; } /***************************************************************************** * * @func STDMETHODIMP | CImageFilter::GetNextStream | Implementation of GetNextStream * * @parm LONG | lFlags | * Flags * * @parm BSTR | bstrItemName | * Name of item * * @parm BSTR | bstrFullItemName | * Full name of item * * @parm IStream | ppDestination | * Upon successful return this will contain the filtering stream * * @comm * GetNextStream creates a filtering stream. Since the item represented by * bstrFullItemName may be a child item of the item passed into InitializeFilter * we have to call FindItemByName to retrieve the actual item. * * @rvalue S_OK | * The function succeeded. * @rvalue E_XXXXXX | * Failure * *****************************************************************************/ STDMETHODIMP #pragma warning(suppress: 6101) CImageFilter::GetNextStream( LONG lFlags, _In_z_ BSTR bstrItemName, _In_z_ BSTR bstrFullItemName, _Outptr_result_maybenull_ _At_(*ppDestination, _When_(return == S_OK, _Post_notnull_)) IStream **ppDestination) { HRESULT hr; IStream *pAppStream = NULL; IWiaItem2 *pCurrentWiaItem = NULL; BOOL bStorageItem = FALSE; LONG lBrightness = 0; LONG lContrast = 0; LONG lDeskewX = 0; LONG lDeskewY = 0; LONG lRotation = PORTRAIT; LONG lXExtent = 0; LONG lYExtent = 0; LONG lBitDepth = 0; GUID guidItemFormat = {0}; hr = (bstrItemName && bstrFullItemName && ppDestination) ? S_OK : E_INVALIDARG; if (SUCCEEDED(hr)) { *ppDestination = NULL; hr = m_pAppWiaTransferCallback ? S_OK : E_UNEXPECTED; } if (m_pCurrentStream) { m_pCurrentStream->Release(); m_pCurrentStream = NULL; } if (SUCCEEDED(hr)) { hr = m_pAppWiaTransferCallback->GetNextStream(lFlags, bstrItemName, bstrFullItemName, &pAppStream); if (SUCCEEDED(hr) && !pAppStream) { hr = E_UNEXPECTED; } } // // Return immediately following cancellations or skips // if ((S_FALSE == hr) || (WIA_STATUS_SKIP_ITEM == hr)) { return hr; } if (SUCCEEDED(hr)) { hr = m_pWiaItem->FindItemByName(0, bstrFullItemName, &pCurrentWiaItem); } // // Here we read all properties from pCurrentWiaItem that we need in order to // do the the filtering - in this specific case only brightness. // if (SUCCEEDED(hr)) { CWiaItem *pIWiaItemWrapper = NULL; pIWiaItemWrapper = new CWiaItem(); hr = pIWiaItemWrapper ? S_OK : E_OUTOFMEMORY; if (SUCCEEDED(hr)) { hr = pIWiaItemWrapper->SetIWiaItem(pCurrentWiaItem); } if (SUCCEEDED(hr)) { GUID guidItemCategory = {0}; hr = pIWiaItemWrapper->ReadRequiredPropertyGUID(WIA_IPA_ITEM_CATEGORY,&guidItemCategory); bStorageItem = ((guidItemCategory == WIA_CATEGORY_FINISHED_FILE) || (WIA_CATEGORY_FOLDER == guidItemCategory)); } if(SUCCEEDED(hr)) { hr = pIWiaItemWrapper->ReadRequiredPropertyGUID(WIA_IPA_FORMAT, &guidItemFormat); } if (!bStorageItem) { if (SUCCEEDED(hr)) { hr = pIWiaItemWrapper->ReadRequiredPropertyLong(WIA_IPS_BRIGHTNESS,&lBrightness); } if (SUCCEEDED(hr)) { hr = pIWiaItemWrapper->ReadRequiredPropertyLong(WIA_IPS_CONTRAST,&lContrast); } if (SUCCEEDED(hr)) { hr = pIWiaItemWrapper->ReadRequiredPropertyLong(WIA_IPS_ROTATION, &lRotation); } if (SUCCEEDED(hr)) { hr = pIWiaItemWrapper->ReadRequiredPropertyLong(WIA_IPS_DESKEW_X, &lDeskewX); } if (SUCCEEDED(hr)) { hr = pIWiaItemWrapper->ReadRequiredPropertyLong(WIA_IPS_DESKEW_Y, &lDeskewY); } if (SUCCEEDED(hr)) { hr = pIWiaItemWrapper->ReadRequiredPropertyLong(WIA_IPS_XEXTENT, &lXExtent); } if (SUCCEEDED(hr)) { hr = pIWiaItemWrapper->ReadRequiredPropertyLong(WIA_IPS_YEXTENT, &lYExtent); } if (SUCCEEDED(hr)) { hr = pIWiaItemWrapper->ReadRequiredPropertyLong(WIA_IPA_DEPTH, &lBitDepth); } } if (pIWiaItemWrapper) { delete pIWiaItemWrapper; } } if (SUCCEEDED(hr)) { if (!bStorageItem) { // // We could easily improve the performace by creating a separate filtering stream // which simply delegates all calls directly to the application's stream in case // Rotation, DeskewX, DeskewY, Brightness and Contrast are all set to 0. // m_pCurrentStream = new CMyFilterStream(); if (m_pCurrentStream) { hr = m_pCurrentStream->Initialize(pAppStream, lBrightness, lContrast, lRotation, lDeskewX, lDeskewY, lXExtent, lYExtent, lBitDepth, guidItemFormat); } else { hr = E_OUTOFMEMORY; } } else { (*ppDestination) = pAppStream; (*ppDestination)->AddRef(); } } if (SUCCEEDED(hr) && m_pCurrentStream) { hr = m_pCurrentStream->QueryInterface(IID_IStream, (void**)ppDestination); } if (pAppStream) { pAppStream->Release(); } if (pCurrentWiaItem) { pCurrentWiaItem->Release(); } return hr; } /***************************************************************************** * * Class Object * *******************************************************************************/ class CFilterClass : public IClassFactory { public: STDMETHODIMP QueryInterface(_In_ const IID& iid_requested, _Out_ void** ppInterfaceOut) { HRESULT hr = S_OK; hr = ppInterfaceOut ? S_OK : E_POINTER; if (SUCCEEDED(hr)) { *ppInterfaceOut = NULL; } // // We support IID_IUnknown and IID_IClassFactory // if (SUCCEEDED(hr)) { if (IID_IUnknown == iid_requested) { *ppInterfaceOut = static_cast<IUnknown*>(this); } else if (IID_IClassFactory == iid_requested) { *ppInterfaceOut = static_cast<IClassFactory*>(this); } else { hr = E_NOINTERFACE; } } if (SUCCEEDED(hr)) { reinterpret_cast<IUnknown*>(*ppInterfaceOut)->AddRef(); } return hr; } STDMETHODIMP_(ULONG) AddRef(void) { LockModule(); return 2; } STDMETHODIMP_(ULONG) Release(void) { UnlockModule(); return 1; } STDMETHODIMP CreateInstance(_In_ IUnknown *pUnkOuter, _In_ REFIID riid, _Out_ void **ppv) { CImageFilter *pImageFilter = NULL; HRESULT hr; hr = ppv ? S_OK : E_POINTER; if (SUCCEEDED(hr)) { *ppv = 0; } if (SUCCEEDED(hr)) { if (pUnkOuter) { hr = CLASS_E_NOAGGREGATION; } } if (SUCCEEDED(hr)) { pImageFilter = new CImageFilter(); hr = pImageFilter ? S_OK : E_OUTOFMEMORY; } if (SUCCEEDED(hr)) { pImageFilter->AddRef(); hr = pImageFilter->QueryInterface(riid, ppv); pImageFilter->Release(); } return hr; } STDMETHODIMP LockServer(BOOL bLock) { if (bLock) { LockModule(); } else { UnlockModule(); } return S_OK; } }; STDAPI DllCanUnloadNow(void) { return (g_cLocks == 0) ? S_OK : S_FALSE; } STDAPI DllGetClassObject(_In_ REFCLSID rclsid, _In_ REFIID riid, _Outptr_ void **ppv) { static CFilterClass s_FilterClass; if (rclsid == CLSID_WiaImageFilter) { return s_FilterClass.QueryInterface(riid, ppv); } *ppv = 0; return CLASS_E_CLASSNOTAVAILABLE; } // // Registered in driver INF file - what about un-regestering? // STDAPI DllUnregisterServer() { return S_OK; } STDAPI DllRegisterServer() { return S_OK; }
33,167
2,805
<filename>ckan/views/dashboard.py # encoding: utf-8 import logging from flask import Blueprint import ckan.lib.base as base import ckan.lib.helpers as h import ckan.logic as logic import ckan.model as model from ckan.common import _, g, request from ckan.views.user import _extra_template_variables log = logging.getLogger(__name__) dashboard = Blueprint(u'dashboard', __name__, url_prefix=u'/dashboard') @dashboard.before_request def before_request(): if not g.userobj: h.flash_error(_(u'Not authorized to see this page')) return h.redirect_to(u'user.login') try: context = dict(model=model, user=g.user, auth_user_obj=g.userobj) logic.check_access(u'site_read', context) except logic.NotAuthorized: base.abort(403, _(u'Not authorized to see this page')) def _get_dashboard_context(filter_type=None, filter_id=None, q=None): u'''Return a dict needed by the dashboard view to determine context.''' def display_name(followee): u'''Return a display name for a user, group or dataset dict.''' display_name = followee.get(u'display_name') fullname = followee.get(u'fullname') title = followee.get(u'title') name = followee.get(u'name') return display_name or fullname or title or name if (filter_type and filter_id): context = { u'model': model, u'session': model.Session, u'user': g.user, u'auth_user_obj': g.userobj, u'for_view': True } data_dict = {u'id': filter_id, u'include_num_followers': True} followee = None action_functions = { u'dataset': u'package_show', u'user': u'user_show', u'group': u'group_show', u'organization': u'organization_show', } action_function = logic.get_action(action_functions.get(filter_type)) # Is this a valid type? if action_function is None: base.abort(404, _(u'Follow item not found')) try: followee = action_function(context, data_dict) except (logic.NotFound, logic.NotAuthorized): base.abort(404, _(u'{0} not found').format(filter_type)) if followee is not None: return { u'filter_type': filter_type, u'q': q, u'context': display_name(followee), u'selected_id': followee.get(u'id'), u'dict': followee, } return { u'filter_type': filter_type, u'q': q, u'context': _(u'Everything'), u'selected_id': False, u'dict': None, } def index(offset=0): context = { u'model': model, u'session': model.Session, u'user': g.user, u'auth_user_obj': g.userobj, u'for_view': True } data_dict = {u'user_obj': g.userobj, u'offset': offset} extra_vars = _extra_template_variables(context, data_dict) q = request.params.get(u'q', u'') filter_type = request.params.get(u'type', u'') filter_id = request.params.get(u'name', u'') extra_vars[u'followee_list'] = logic.get_action(u'followee_list')( context, { u'id': g.userobj.id, u'q': q }) extra_vars[u'dashboard_activity_stream_context'] = _get_dashboard_context( filter_type, filter_id, q) extra_vars[u'dashboard_activity_stream'] = h.dashboard_activity_stream( g.userobj.id, filter_type, filter_id, offset) # Mark the user's new activities as old whenever they view their # dashboard page. logic.get_action(u'dashboard_mark_activities_old')(context, {}) return base.render(u'user/dashboard.html', extra_vars) def datasets(): context = {u'for_view': True, u'user': g.user, u'auth_user_obj': g.userobj} data_dict = {u'user_obj': g.userobj, u'include_datasets': True} extra_vars = _extra_template_variables(context, data_dict) return base.render(u'user/dashboard_datasets.html', extra_vars) def organizations(): context = {u'for_view': True, u'user': g.user, u'auth_user_obj': g.userobj} data_dict = {u'user_obj': g.userobj} extra_vars = _extra_template_variables(context, data_dict) return base.render(u'user/dashboard_organizations.html', extra_vars) def groups(): context = {u'for_view': True, u'user': g.user, u'auth_user_obj': g.userobj} data_dict = {u'user_obj': g.userobj} extra_vars = _extra_template_variables(context, data_dict) return base.render(u'user/dashboard_groups.html', extra_vars) dashboard.add_url_rule( u'/', view_func=index, strict_slashes=False, defaults={ u'offset': 0 }) dashboard.add_url_rule(u'/<int:offset>', view_func=index) dashboard.add_url_rule(u'/datasets', view_func=datasets) dashboard.add_url_rule(u'/groups', view_func=groups) dashboard.add_url_rule(u'/organizations', view_func=organizations)
2,194
584
/****************************************************************************** * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * The full GNU General Public License is included in this distribution * in the file called COPYING. * * Contact Information: * Intel Linux Wireless <<EMAIL>> * Intel Corporation, 5200 N.E. <NAME>, Hillsboro, OR 97124-6497 * * BSD LICENSE * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *****************************************************************************/ #ifndef __iwl_fw_api_phy_ctxt_h__ #define __iwl_fw_api_phy_ctxt_h__ /* Supported bands */ #define PHY_BAND_5 (0) #define PHY_BAND_24 (1) /* Supported channel width, vary if there is VHT support */ #define PHY_VHT_CHANNEL_MODE20 (0x0) #define PHY_VHT_CHANNEL_MODE40 (0x1) #define PHY_VHT_CHANNEL_MODE80 (0x2) #define PHY_VHT_CHANNEL_MODE160 (0x3) /* * Control channel position: * For legacy set bit means upper channel, otherwise lower. * For VHT - bit-2 marks if the control is lower/upper relative to center-freq * bits-1:0 mark the distance from the center freq. for 20Mhz, offset is 0. * center_freq * | * 40Mhz |_______|_______| * 80Mhz |_______|_______|_______|_______| * 160Mhz |_______|_______|_______|_______|_______|_______|_______|_______| * code 011 010 001 000 | 100 101 110 111 */ #define PHY_VHT_CTRL_POS_1_BELOW (0x0) #define PHY_VHT_CTRL_POS_2_BELOW (0x1) #define PHY_VHT_CTRL_POS_3_BELOW (0x2) #define PHY_VHT_CTRL_POS_4_BELOW (0x3) #define PHY_VHT_CTRL_POS_1_ABOVE (0x4) #define PHY_VHT_CTRL_POS_2_ABOVE (0x5) #define PHY_VHT_CTRL_POS_3_ABOVE (0x6) #define PHY_VHT_CTRL_POS_4_ABOVE (0x7) /* * struct iwl_fw_channel_info_v1 - channel information * * @band: PHY_BAND_* * @channel: channel number * @width: PHY_[VHT|LEGACY]_CHANNEL_* * @ctrl channel: PHY_[VHT|LEGACY]_CTRL_* */ struct iwl_fw_channel_info_v1 { u8 band; u8 channel; u8 width; u8 ctrl_pos; } __packed; /* CHANNEL_CONFIG_API_S_VER_1 */ /* * struct iwl_fw_channel_info - channel information * * @channel: channel number * @band: PHY_BAND_* * @width: PHY_[VHT|LEGACY]_CHANNEL_* * @ctrl channel: PHY_[VHT|LEGACY]_CTRL_* * @reserved: for future use and alignment */ struct iwl_fw_channel_info { __le32 channel; u8 band; u8 width; u8 ctrl_pos; u8 reserved; } __packed; /*CHANNEL_CONFIG_API_S_VER_2 */ #define PHY_RX_CHAIN_DRIVER_FORCE_POS (0) #define PHY_RX_CHAIN_DRIVER_FORCE_MSK \ (0x1 << PHY_RX_CHAIN_DRIVER_FORCE_POS) #define PHY_RX_CHAIN_VALID_POS (1) #define PHY_RX_CHAIN_VALID_MSK \ (0x7 << PHY_RX_CHAIN_VALID_POS) #define PHY_RX_CHAIN_FORCE_SEL_POS (4) #define PHY_RX_CHAIN_FORCE_SEL_MSK \ (0x7 << PHY_RX_CHAIN_FORCE_SEL_POS) #define PHY_RX_CHAIN_FORCE_MIMO_SEL_POS (7) #define PHY_RX_CHAIN_FORCE_MIMO_SEL_MSK \ (0x7 << PHY_RX_CHAIN_FORCE_MIMO_SEL_POS) #define PHY_RX_CHAIN_CNT_POS (10) #define PHY_RX_CHAIN_CNT_MSK \ (0x3 << PHY_RX_CHAIN_CNT_POS) #define PHY_RX_CHAIN_MIMO_CNT_POS (12) #define PHY_RX_CHAIN_MIMO_CNT_MSK \ (0x3 << PHY_RX_CHAIN_MIMO_CNT_POS) #define PHY_RX_CHAIN_MIMO_FORCE_POS (14) #define PHY_RX_CHAIN_MIMO_FORCE_MSK \ (0x1 << PHY_RX_CHAIN_MIMO_FORCE_POS) /* TODO: fix the value, make it depend on firmware at runtime? */ #define NUM_PHY_CTX 3 /* TODO: complete missing documentation */ /** * struct iwl_phy_context_cmd_tail - tail of iwl_phy_ctx_cmd for alignment with * various channel structures. * * @txchain_info: ??? * @rxchain_info: ??? * @acquisition_data: ??? * @dsp_cfg_flags: set to 0 */ struct iwl_phy_context_cmd_tail { __le32 txchain_info; __le32 rxchain_info; __le32 acquisition_data; __le32 dsp_cfg_flags; } __packed; /** * struct iwl_phy_context_cmd - config of the PHY context * ( PHY_CONTEXT_CMD = 0x8 ) * @id_and_color: ID and color of the relevant Binding * @action: action to perform, one of FW_CTXT_ACTION_* * @apply_time: 0 means immediate apply and context switch. * other value means apply new params after X usecs * @tx_param_color: ??? * @ci: channel info * @tail: command tail */ struct iwl_phy_context_cmd { /* COMMON_INDEX_HDR_API_S_VER_1 */ __le32 id_and_color; __le32 action; /* PHY_CONTEXT_DATA_API_S_VER_1 */ __le32 apply_time; __le32 tx_param_color; struct iwl_fw_channel_info ci; struct iwl_phy_context_cmd_tail tail; } __packed; /* PHY_CONTEXT_CMD_API_VER_1 */ struct iwl_phy_ctx { uint16_t id; uint16_t color; uint32_t ref; struct apple80211_channel *channel; }; #endif /* __iwl_fw_api_phy_ctxt_h__ */
2,970