max_stars_count
int64
301
224k
text
stringlengths
6
1.05M
token_count
int64
3
727k
319
{ "$schema": "http://json-schema.org/draft-07/schema", "$id": "https://raw.githubusercontent.com/todogroup/repolinter/master/rules/script-passes-config.json", "type": "object", "properties": { "schema": { "$ref": "http://json-schema.org/draft-07/schema" }, "glob": { "type": "string" }, "nocase": { "type": "boolean", "default": false }, "succeed-on-non-existent": { "type": "boolean", "default": false }, "human-readable-message": { "type": "string" } }, "required": ["glob", "schema"] }
251
739
import pyjd # this is dummy in pyjs from pyjamas import logging from pyjamas.ui.Button import Button from pyjamas.ui.RootPanel import RootPanel from pyjamas.ui.HTML import HTML from pyjamas.ui.DockPanel import DockPanel from pyjamas.ui import HasAlignment from pyjamas.ui.Hyperlink import Hyperlink from pyjamas.ui.VerticalPanel import VerticalPanel from pyjamas.ui.Sink import SinkList from pyjamas import History from pyjamas import Window import sink.Info as Info import sink.Buttons as Buttons import sink.Layouts as Layouts import sink.Images as Images import sink.Menus as Menus import sink.Lists as Lists import sink.Popups as Popups import sink.Tables as Tables import sink.Text as Text import sink.Trees as Trees import sink.Frames as Frames import sink.Tabs as Tabs from sink.Logger import Logger log = logging.getAppendLogger(__name__, logging.DEBUG, logging.PLAIN_FORMAT) class KitchenSink: def onHistoryChanged(self, token): log.debug("onHistoryChanged: %s", token) info = self.sink_list.find(token) if info is not None: self.show(info, False) else: self.showInfo() def onModuleLoad(self): self.curInfo='' self.curSink=None self.description=HTML() self.sink_list=SinkList() self.panel=DockPanel() self.loadSinks() self.sinkContainer = DockPanel() self.sinkContainer.setStyleName("ks-Sink") vp=VerticalPanel() vp.setWidth("100%") vp.add(self.description) vp.add(self.sinkContainer) self.description.setStyleName("ks-Info") self.panel.add(self.sink_list, DockPanel.WEST) self.panel.add(vp, DockPanel.CENTER) self.panel.setCellVerticalAlignment(self.sink_list, HasAlignment.ALIGN_TOP) self.panel.setCellWidth(vp, "100%") History.addHistoryListener(self) RootPanel().add(self.panel) RootPanel().add(Logger()) #Show the initial screen. initToken = History.getToken() if len(initToken): self.onHistoryChanged(initToken) else: self.showInfo() def show(self, info, affectHistory): if info == self.curInfo: return self.curInfo = info #log.debug("showing " + info.getName()) if self.curSink is not None: #log.debug("removing " + str(self.curSink)) self.curSink.onHide() self.sinkContainer.remove(self.curSink) self.curSink = info.getInstance() self.sink_list.setSinkSelection(info.getName()) self.description.setHTML(info.getDescription()) if (affectHistory): History.newItem(info.getName()) self.sinkContainer.add(self.curSink, DockPanel.CENTER) self.sinkContainer.setCellWidth(self.curSink, "100%") self.sinkContainer.setCellHeight(self.curSink, "100%") self.sinkContainer.setCellVerticalAlignment(self.curSink, HasAlignment.ALIGN_TOP) self.curSink.onShow() def loadSinks(self): self.sink_list.add(Info.init()) self.sink_list.add(Buttons.init()) self.sink_list.add(Menus.init()) self.sink_list.add(Images.init()) self.sink_list.add(Layouts.init()) self.sink_list.add(Lists.init()) self.sink_list.add(Popups.init()) self.sink_list.add(Tables.init()) self.sink_list.add(Text.init()) self.sink_list.add(Trees.init()) self.sink_list.add(Frames.init()) self.sink_list.add(Tabs.init()) def showInfo(self): self.show(self.sink_list.find("Info"), False) if __name__ == '__main__': pyjd.setup("public/KitchenSink.html") app = KitchenSink() app.onModuleLoad() pyjd.run()
1,636
342
import toppra import pytest @pytest.fixture(name='path') def setup_geometric_path(): yield toppra.SplineInterpolator([0, 1, 2], [(0, 0), (1, 2), (2, 0)]) def test_initialzie(path): gridpoints = [0, 0.5, 1, 1.5, 2] velocities = [1, 2, 2, 1, 0] # xd = [1, 4, 4, 1, 0] # ud = [6.0, 0, -6.0, -2.0] path_new = toppra.ParametrizeSpline(path, gridpoints, velocities) assert path_new.path_interval[0] == 0 assert path_new.path_interval[-1] > 0
225
435
{ "copyright_text": "Creative Commons Attribution license (reuse allowed)", "description": "<NAME>\nhttps://kiwi.pycon.org/schedule/presentation/124/\nWhat would happen to all the cat videos if YouTube were to disappear? It would be a cat-atstrophe! GNU MediaGoblin is Python-based media publishing system for artists \u2014 an alternative to centralised, censored and surveilled systems like Flickr, YouTube and SoundCloud. MediaGoblin gives people privacy, choice and control of their own media, something we need now more than ever.", "duration": 1795, "language": "eng", "recorded": "2016-09-11", "related_urls": [ "https://kiwi.pycon.org/schedule/presentation/124/" ], "speakers": [ "<NAME>" ], "tags": [], "thumbnail_url": "https://i.ytimg.com/vi/RLBaoMYnSuY/maxresdefault.jpg", "title": "Preventing Cat-astrophes with GNU MediaGoblin", "videos": [ { "type": "youtube", "url": "https://www.youtube.com/watch?v=RLBaoMYnSuY" } ] }
340
424
<filename>app/src/main/java/com/j256/ormlite/dao/RawRowObjectMapper.java package com.j256.ormlite.dao; import java.sql.SQLException; import com.j256.ormlite.field.DataType; import com.j256.ormlite.stmt.QueryBuilder; /** * Parameterized row mapper that takes output from the {@link GenericRawResults} and returns a T. Is used in the * {@link Dao#queryRaw(String, DataType[], RawRowObjectMapper, String...)} method. * * <p> * <b> NOTE: </b> If you need to map Strings instead then consider using the {@link RawRowMapper} with the * {@link Dao#queryRaw(String, RawRowMapper, String...)} method which allows you to iterate over the raw results as * String[]. * </p> * * @param <T> * Type that the mapRow returns. * @author graywatson */ public interface RawRowObjectMapper<T> { /** * Used to convert a raw results row to an object. * * <p> * <b>NOTE:</b> If you are using the {@link QueryBuilder#prepareStatementString()} to build your query, it may have * added the id column to the selected column list if the Dao object has an id you did not include it in the columns * you selected. So the results might have one more column than you are expecting. * </p> * * @return The created object with all of the fields set from the results. Return null if there is no object * generated from these results. * @param columnNames * Array of names of columns. * @param dataTypes * Array of the DataTypes of each of the columns as passed into the * {@link Dao#queryRaw(String, DataType[], RawRowObjectMapper, String...)} * @param resultColumns * Array of result columns. * @throws SQLException * If there is any critical error with the data and you want to stop the paging. */ public T mapRow(String[] columnNames, DataType[] dataTypes, Object[] resultColumns) throws SQLException; }
648
1,052
<filename>app/src/test/java/com/zegoggles/smssync/contacts/ContactGroupsTest.java package com.zegoggles.smssync.contacts; import com.zegoggles.smssync.mail.PersonRecord; import org.junit.Test; import org.junit.runner.RunWith; import org.robolectric.RobolectricTestRunner; import static com.google.common.truth.Truth.assertThat; @RunWith(RobolectricTestRunner.class) public class ContactGroupsTest { @Test public void shouldAddIds() throws Exception { ContactGroupIds ids = new ContactGroupIds(); assertThat(ids.getIds()).isEmpty(); assertThat(ids.getRawIds()).isEmpty(); ids.add(1, 4); ids.add(3, 4); assertThat(ids.getIds()).containsExactly(1L, 3L); assertThat(ids.getRawIds()).containsExactly(4L); } @Test public void shouldCheckForPerson() throws Exception { ContactGroupIds ids = new ContactGroupIds(); PersonRecord record = new PersonRecord(22, "Test", "<EMAIL>", "123"); assertThat(ids.contains(record)).isFalse(); ids.add(22L, 44L); assertThat(ids.contains(record)).isTrue(); } }
456
34,359
#pragma once #include "SampleMonarch.h" #include "SamplePeasant.h" #include "../../types/inc/utils.hpp" class AppState { public: bool areWeTheKing(const bool logPIDs = false); void initializeState(); static winrt::MonarchPeasantSample::Monarch instantiateMonarch(); void createMonarch(); bool processCommandline(); void remindKingWhoTheyAre(const winrt::MonarchPeasantSample::IPeasant& peasant); HANDLE hInput{ INVALID_HANDLE_VALUE }; HANDLE hOutput{ INVALID_HANDLE_VALUE }; winrt::MonarchPeasantSample::IPeasant peasant{ nullptr }; winrt::MonarchPeasantSample::Monarch monarch{ nullptr }; std::vector<winrt::hstring> args; private: void _setupConsole(); int _appLoop(); winrt::MonarchPeasantSample::IPeasant _createOurPeasant(); }; bool monarchAppLoop(AppState& state); // Defined in MonarchMain.cpp bool peasantAppLoop(AppState& state); // Defined in PeasantMain.cpp
362
335
<reponame>Safal08/Hacktoberfest-1 { "word": "Olfactometer", "definitions": [ "An instrument for measuring the intensity of an odour or the sensitivity of someone or something to an odour." ], "parts-of-speech": "Noun" }
93
669
<gh_stars>100-1000 /* * Copyright 2020 The TensorFlow Runtime Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // Unit test for DenseHostTensor. #include "tfrt/tensor/dense_host_tensor.h" #include <complex> #include "gmock/gmock.h" #include "gtest/gtest.h" #include "tfrt/cpp_tests/test_util.h" #include "tfrt/tensor/dense_host_tensor_view.h" #include "tfrt/tensor/dense_tensor_utils.h" namespace tfrt { namespace { TEST(DenseHostTensorTest, DefaultConstructible) { tfrt::DenseHostTensor dht; tfrt::DenseHostTensor other = dht.CopyRef(); EXPECT_EQ(dht.dtype(), DType::Invalid); EXPECT_EQ(other.dtype(), DType::Invalid); } TEST(DenseHostTensorTest, FillWithComplex64Type) { auto host = CreateHostContext(); auto dht_create_res_a = tfrt::DenseHostTensor::CreateUninitialized<std::complex<float>>( TensorShape({1, 2}), host.get()); ASSERT_TRUE(dht_create_res_a.hasValue()); DenseHostTensor dht_a(std::move(*dht_create_res_a)); MutableDHTArrayView<std::complex<float>> tensor_view_a(&dht_a); tensor_view_a.Fill({1.0, -2.0}); auto dht_create_res_b = tfrt::DenseHostTensor::CreateUninitialized<std::complex<float>>( TensorShape({1, 2}), host.get()); ASSERT_TRUE(dht_create_res_b.hasValue()); DenseHostTensor dht_b(std::move(*dht_create_res_b)); MutableDHTArrayView<std::complex<float>> tensor_view_b(&dht_b); tensor_view_b.Fill({1.0, -2.0}); EXPECT_TRUE(TensorApproxEqual<std::complex<float>>(dht_a, dht_b)); } TEST(DenseHostTensorTest, FillWithComplex128Type) { auto host = CreateHostContext(); auto dht_create_res_a = tfrt::DenseHostTensor::CreateUninitialized<std::complex<double>>( TensorShape({1, 2}), host.get()); ASSERT_TRUE(dht_create_res_a.hasValue()); DenseHostTensor dht_a(std::move(*dht_create_res_a)); MutableDHTArrayView<std::complex<double>> tensor_view_a(&dht_a); tensor_view_a.Fill({1.0, -2.0}); auto dht_create_res_b = tfrt::DenseHostTensor::CreateUninitialized<std::complex<double>>( TensorShape({1, 2}), host.get()); ASSERT_TRUE(dht_create_res_b.hasValue()); DenseHostTensor dht_b(std::move(*dht_create_res_b)); MutableDHTArrayView<std::complex<double>> tensor_view_b(&dht_b); tensor_view_b.Fill({1.0, -2.0}); EXPECT_TRUE(TensorApproxEqual<std::complex<double>>(dht_a, dht_b)); } TEST(DenseHostTensorTest, FillWithBF16Type) { // Test DType::BF16. The bf16 is a placeholder but not the actual // implementation of brain float 16. auto host = CreateHostContext(); auto dht_create_res_a = tfrt::DenseHostTensor::CreateUninitialized<bf16>( TensorShape({1, 1}), host.get()); ASSERT_TRUE(dht_create_res_a.hasValue()); DenseHostTensor dht_a(std::move(*dht_create_res_a)); MutableDHTArrayView<bf16> tensor_view_a(&dht_a); tensor_view_a.Fill(bf16{static_cast<uint16_t>(1.0)}); auto dht_create_res_b = tfrt::DenseHostTensor::CreateUninitialized<bf16>( TensorShape({1, 1}), host.get()); ASSERT_TRUE(dht_create_res_b.hasValue()); DenseHostTensor dht_b(std::move(*dht_create_res_b)); MutableDHTArrayView<bf16> tensor_view_b(&dht_b); tensor_view_b.Fill(bf16{static_cast<uint16_t>(1.0)}); // Compare the buffer value, which is uint_16. EXPECT_TRUE(tensor_view_a[0].value == tensor_view_b[0].value); } TEST(DenseHostTensorSharedTest, FillWithComplex64Type) { // Creates a HostBuffer that is shared between 2 distinct DenseHostTensors. // Validates the DenseHostTensor values against the parent HostBuffer. auto host = CreateHostContext(); auto parent_buffer = tfrt::HostBuffer::CreateUninitialized( /*size=*/16, /*alignment=*/sizeof(std::complex<float>), host->allocator()); // Create dht_a from 0 bytes to 7 bytes in the buffer. auto host_buffer_a = tfrt::HostBuffer::CreateFromExternal(parent_buffer, /*offset=*/0, /*size=*/8); auto dht_a = tfrt::DenseHostTensor( TensorMetadata(GetDType<std::complex<float>>(), TensorShape({1, 1})), std::move(host_buffer_a)); MutableDHTArrayView<std::complex<float>> tensor_view_a(&dht_a); tensor_view_a.Fill({1.0, -2.0}); // Create dht_b from 8 bytes to 15 bytes in the buffer. auto host_buffer_b = tfrt::HostBuffer::CreateFromExternal(parent_buffer, /*offset=*/8, /*size=*/8); auto dht_b = tfrt::DenseHostTensor( TensorMetadata(GetDType<std::complex<float>>(), TensorShape({1, 1})), std::move(host_buffer_b)); MutableDHTArrayView<std::complex<float>> tensor_view_b(&dht_b); tensor_view_b.Fill({3.0, -4.0}); // Compare the values of the parent buffer with the slices. ASSERT_EQ(parent_buffer->size(), 16); ASSERT_EQ(dht_a.DataSizeInBytes(), 8); ASSERT_EQ(dht_b.DataSizeInBytes(), 8); auto parent_data = static_cast<std::complex<float> *>(parent_buffer->data()); auto dht_a_data = static_cast<std::complex<float> *>(dht_a.data()); auto dht_b_data = static_cast<std::complex<float> *>(dht_b.data()); ASSERT_EQ(parent_data[0], dht_a_data[0]); ASSERT_EQ(parent_data[1], dht_b_data[0]); } TEST(DenseHostTensorSharedTest, FillWithInt32Type) { // Creates a HostBuffer that is shared between 3 overlapping DenseHostTensors. // A is 0-7 bytes, B is 8-15 bytes, C is 8-11 bytes. auto host = CreateHostContext(); auto parent_buffer = tfrt::HostBuffer::CreateUninitialized( /*size=*/16, /*alignment=*/sizeof(int), host->allocator()); // Create dht_a from 0 bytes to 7 bytes in the buffer. auto host_buffer_a = tfrt::HostBuffer::CreateFromExternal(parent_buffer, /*offset=*/0, /*size=*/8); auto dht_a = tfrt::DenseHostTensor( TensorMetadata(GetDType<int>(), TensorShape({1, 2})), std::move(host_buffer_a)); MutableDHTArrayView<int> tensor_view_a(&dht_a); tensor_view_a.Fill(1.0); auto dht_a_data = static_cast<int *>(dht_a.data()); ASSERT_EQ(dht_a_data[0], 1.0); ASSERT_EQ(dht_a_data[1], 1.0); // Create dht_b from 8 bytes to 15 bytes in the buffer. auto host_buffer_b = tfrt::HostBuffer::CreateFromExternal(parent_buffer, /*offset=*/8, /*size=*/8); auto dht_b = tfrt::DenseHostTensor( TensorMetadata(GetDType<int>(), TensorShape({1, 2})), std::move(host_buffer_b)); MutableDHTArrayView<int> tensor_view_b(&dht_b); tensor_view_b.Fill(3.0); auto dht_b_data = static_cast<int *>(dht_b.data()); ASSERT_EQ(dht_b_data[0], 3.0); ASSERT_EQ(dht_b_data[1], 3.0); // Create dht_c from 8 bytes to 11 bytes. auto host_buffer_c = tfrt::HostBuffer::CreateFromExternal(parent_buffer, /*offset=*/8, /*size=*/4); auto dht_c = tfrt::DenseHostTensor( TensorMetadata(GetDType<int>(), TensorShape({1, 1})), std::move(host_buffer_c)); MutableDHTArrayView<int> tensor_view_c(&dht_c); tensor_view_c.Fill(-1.0); // Check the values of all of the buffers. ASSERT_EQ(parent_buffer->size(), 16); ASSERT_EQ(dht_a.DataSizeInBytes(), 8); ASSERT_EQ(dht_b.DataSizeInBytes(), 8); ASSERT_EQ(dht_c.DataSizeInBytes(), 4); dht_a_data = static_cast<int *>(dht_a.data()); dht_b_data = static_cast<int *>(dht_b.data()); auto dht_c_data = static_cast<int *>(dht_c.data()); ASSERT_EQ(dht_a_data[0], 1.0); ASSERT_EQ(dht_a_data[1], 1.0); ASSERT_EQ(dht_c_data[0], -1.0); ASSERT_EQ(dht_b_data[0], -1.0); ASSERT_EQ(dht_b_data[1], 3.0); } } // namespace } // namespace tfrt
3,785
2,843
package com.ycbjie.gank.presenter; import android.graphics.Color; import android.text.TextUtils; import com.yc.httpserver.ExceptionUtils; import com.ycbjie.gank.api.GanKModel; import com.ycbjie.gank.bean.bean.SearchResult; import com.ycbjie.gank.contract.GanKSearchContract; import io.reactivex.Observer; import io.reactivex.android.schedulers.AndroidSchedulers; import io.reactivex.disposables.Disposable; import io.reactivex.schedulers.Schedulers; import rx.subscriptions.CompositeSubscription; /** * <pre> * @author yangchong * blog : https://github.com/yangchong211 * time : 2017/5/14 * desc : 干货集中营 * revise: * </pre> */ public class GanKSearchPresenter implements GanKSearchContract.Presenter { private GanKSearchContract.View mView; private CompositeSubscription mSubscriptions; public GanKSearchPresenter(GanKSearchContract.View androidView) { this.mView = androidView; mSubscriptions = new CompositeSubscription(); } @Override public void subscribe() { mView.setEditTextCursorColor(Color.WHITE); } @Override public void unSubscribe() { mSubscriptions.clear(); } /** * 开始搜索 */ @Override public void search(String searchText, boolean isLoadMore) { if (TextUtils.isEmpty(searchText)) { mView.showTip("搜索内容不能为空。"); return; } mView.showSearchResult(); startSearch(searchText,isLoadMore); } /** * 开始删除 */ @Override public void deleteAllHistory() { } private int mPage = 1; private void startSearch(String searchText, final boolean isLoadMore) { if (!isLoadMore) { mPage = 1; mView.setLoading(); } else { mPage += 1; } GanKModel model = GanKModel.getInstance(); model.getSearchResult(searchText, 10, mPage) .subscribeOn(Schedulers.io()) .observeOn(AndroidSchedulers.mainThread()) .subscribe(new Observer<SearchResult>() { @Override public void onSubscribe(Disposable d) { } @Override public void onNext(SearchResult searchResult) { if (!isLoadMore) { if (searchResult == null || searchResult.count == 0) { mView.showTip("没有搜索到结果"); mView.hideLoading(); mView.showSearchHistory(); mView.setEmpty(); return; } mView.setSearchItems(searchResult); mView.showSearchResult(); } else { mView.addSearchItems(searchResult); mView.showSearchResult(); } } @Override public void onError(Throwable e) { mView.showTip("搜索出错了。"); mView.hideLoading(); ExceptionUtils.handleException(e); } @Override public void onComplete() { } }); } }
1,863
903
<gh_stars>100-1000 package org.develnext.jphp.core.compiler.jvm.statement.expr.value; import org.develnext.jphp.core.compiler.jvm.statement.ExpressionStmtCompiler; import org.develnext.jphp.core.compiler.jvm.statement.expr.BaseExprCompiler; import org.develnext.jphp.core.tokenizer.token.expr.value.IntegerExprToken; import php.runtime.memory.LongMemory; public class IntValueCompiler extends BaseExprCompiler<IntegerExprToken> { public IntValueCompiler(ExpressionStmtCompiler exprCompiler) { super(exprCompiler); } @Override public void write(IntegerExprToken token, boolean returnValue) { expr.writePushMemory(new LongMemory(token.getValue())); } }
247
461
<gh_stars>100-1000 class MapSum: class Node: def __init__(self): self.val = 0 self.next = dict() def __init__(self): """ Initialize your data structure here. """ self.root = MapSum.Node() def insert(self, key: str, val: int) -> None: cur_node = self.root for alpha in key: if alpha not in cur_node.next: cur_node.next[alpha] = MapSum.Node() cur_node = cur_node.next[alpha] cur_node.val = val def sum(self, prefix: str) -> int: cur_node = self.root for pre in prefix: if pre not in cur_node.next: return 0 cur_node = cur_node.next[pre] return self.__presum(cur_node) def __presum(self, node): s = node.val for next in node.next: s += self.__presum(node.next[next]) return s # Your MapSum object will be instantiated and called as such: # obj = MapSum() # obj.insert(key,val) # param_2 = obj.sum(prefix)
520
513
import os BASE_DIR = os.path.dirname(__file__) SAML_PATH = os.path.join(BASE_DIR, 'saml') TEMPLATE_PATH = os.path.join(BASE_DIR, 'templates')
66
636
import pytest from indy import libindy # noinspection PyUnusedLocal @pytest.mark.sync def test_set_runtime_config(): libindy.set_runtime_config('{"crypto_thread_pool_size": 2}')
68
2,542
<reponame>vishnuk007/service-fabric<filename>src/prod/test/FabricTest/FabricTestRuntimeManager.cpp // ------------------------------------------------------------ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License (MIT). See License.txt in the repo root for license information. // ------------------------------------------------------------ #include "stdafx.h" using namespace Api; using namespace FabricTest; using namespace Federation; using namespace std; using namespace Transport; using namespace Common; using namespace ServiceModel; using namespace Fabric; using namespace TestCommon; using namespace FederationTestCommon; using namespace Reliability; using namespace Naming; using namespace Hosting2; const StringLiteral TraceSource("FabricTest.RuntimeManager"); const int FabricTestRuntimeManager::MaxRetryCount = 5; FabricTestRuntimeManager::FabricTestRuntimeManager( NodeId nodeId, wstring const & runtimeServiceAddress, wstring const& namingServiceListenAddress, bool retryCreateHost, SecuritySettings const & clientSecuritySettings) : nodeId_(nodeId), runtimeServiceAddress_(runtimeServiceAddress), namingServiceListenAddress_(namingServiceListenAddress), clientFactory_(), statelessFabricRuntime_(), statefulFabricRuntime_(), statelessHost_(), statefulHost_(), serviceFactory_(), serviceGroupFactory_(), statelessServiceTypeMap_(), statefulServiceTypeMap_(), retryCreateHost_(retryCreateHost) { vector<wstring> gatewayAddresses; gatewayAddresses.push_back(namingServiceListenAddress_); auto error = Client::ClientFactory::CreateClientFactory(move(gatewayAddresses), clientFactory_); TestSession::FailTestIfNot(error.IsSuccess(), "CreateClientFactory failed: {0}", error); if (clientSecuritySettings.SecurityProvider() != SecurityProvider::None) { IClientSettingsPtr settings; error = clientFactory_->CreateSettingsClient(settings); TestSession::FailTestIfNot(error.IsSuccess(), "CreateSettingsClient failed: {0}", error); error = settings->SetSecurity(SecuritySettings(clientSecuritySettings)); TestSession::FailTestIfNot(error.IsSuccess(), "SetSecurity failed: {0}", error); } serviceFactory_ = make_shared<TestServiceFactory>(nodeId_, clientFactory_); serviceGroupFactory_ = make_shared<SGServiceFactory>(nodeId_); } FabricTestRuntimeManager::~FabricTestRuntimeManager() { } void FabricTestRuntimeManager::CreateAllRuntimes() { vector<wstring> statelessServiceTypes; vector<wstring> statefulServiceTypes; AddStatelessFabricRuntime(statelessServiceTypes); AddStatefulFabricRuntime(statefulServiceTypes); } void FabricTestRuntimeManager::AddStatelessHost() { this->PerformRetryableHostOperation( L"StatelessHost.Open", [&, this] (TimeSpan timeout, Common::AsyncCallback const& callback) { auto error = Hosting2::NonActivatedApplicationHost::Create( nullptr, // KtlSystemBase runtimeServiceAddress_, this->statelessHost_); TestSession::FailTestIfNot(error.IsSuccess(), "NonActivatedApplicationHost::Create failed with error {0}", error); this->statelessHost_->BeginOpen(timeout, callback); }, [&, this] (Common::AsyncOperationSPtr const& operation) -> ErrorCode { return this->statelessHost_->EndOpen(operation); }); } void FabricTestRuntimeManager::AddStatefulHost() { this->PerformRetryableHostOperation( L"StatefulHost.Open", [&, this] (TimeSpan timeout, Common::AsyncCallback const& callback) { auto error = Hosting2::NonActivatedApplicationHost::Create( nullptr, // KtlSystemBase runtimeServiceAddress_, this->statefulHost_); TestSession::FailTestIfNot(error.IsSuccess(), "NonActivatedApplicationHost::Create failed with error {0}", error); this->statefulHost_->BeginOpen(timeout, callback); }, [&, this] (Common::AsyncOperationSPtr const& operation) -> ErrorCode { return this->statefulHost_->EndOpen(operation); }); } void FabricTestRuntimeManager::AddStatelessFabricRuntime(vector<wstring> serviceTypes) { if(!statelessFabricRuntime_) { AddStatelessHost(); this->PerformRetryableHostOperation( L"StatelessHost.CreateRuntime", [&, this] (TimeSpan timeout, Common::AsyncCallback const& callback) { this->statelessHost_->BeginCreateComFabricRuntime(timeout, callback, AsyncOperationSPtr()); }, [&, this] (Common::AsyncOperationSPtr const& operation) -> ErrorCode { return this->statelessHost_->EndCreateComFabricRuntime(operation, this->statelessFabricRuntime_); }); TestSession::WriteNoise( TraceSource, "ComFabricRuntime::FabricCreateRuntime for fabric node {0} completed with success.", nodeId_); if(serviceTypes.size() == 0) { serviceTypes = serviceFactory_->SupportedStatelessServiceTypes; } RegisterStatelessServiceType(serviceTypes); } } void FabricTestRuntimeManager::AddStatefulFabricRuntime(vector<wstring> serviceTypes) { if(!statefulFabricRuntime_) { AddStatefulHost(); this->PerformRetryableHostOperation( L"StatefulHost.CreateRuntime", [&, this] (TimeSpan timeout, Common::AsyncCallback const& callback) { this->statefulHost_->BeginCreateComFabricRuntime(timeout, callback, AsyncOperationSPtr()); }, [&, this] (Common::AsyncOperationSPtr const& operation) -> ErrorCode { return this->statefulHost_->EndCreateComFabricRuntime(operation, this->statefulFabricRuntime_); }); TestSession::WriteNoise( TraceSource, "ComFabricRuntime::FabricCreateRuntime for fabric node {0} completed with success.", nodeId_); if(serviceTypes.size() == 0) { serviceTypes = serviceFactory_->SupportedStatefulServiceTypes; } RegisterStatefulServiceType(serviceTypes); } } void FabricTestRuntimeManager::PerformRetryableHostOperation( std::wstring const & operationName, BeginHostOperationCallback const & begin, EndHostOperationCallback const & end) { TestSession::WriteNoise( TraceSource, "Performing FabricTestRuntimeManager::{0}()", operationName); ErrorCode error; int retryCount = 0; bool done = false; while(!done) { TimeSpan timeout = retryCreateHost_ ? TimeSpan::FromSeconds(10) : FabricTestSessionConfig::GetConfig().HostingOpenCloseTimeout; TimeSpan waitTimeSpan = timeout + timeout; // Double the timeout to decide how long to wait auto waiter = make_shared<AsyncOperationWaiter>(); begin( timeout, [this, waiter, &end] (AsyncOperationSPtr const & operation) { ErrorCode endError = end(operation); waiter->SetError(endError); waiter->Set(); }); bool waitResult = waiter->WaitOne(waitTimeSpan); TestSession::FailTestIfNot(waitResult, "WaitOne failed due to timeout in {0}", operationName); error = waiter->GetError(); if(!error.IsSuccess() && retryCreateHost_ && retryCount <= MaxRetryCount) { Sleep(5000); ++retryCount; TestSession::WriteNoise( TraceSource, "Performing retry for {0}", operationName); } else { done = true; } } TestSession::FailTestIfNot(error.IsSuccess(), "{0}->End failed with error {1}", operationName, error); } void FabricTestRuntimeManager::RemoveStatelessHost(bool abort) { TestSession::WriteNoise(TraceSource, "RemoveStatelessHost for fabric node {0} abort={1}...", nodeId_, abort); if (!abort) { ErrorCode error; auto waiter = make_shared<AsyncOperationWaiter>(); TimeSpan waitTimeSpan = FabricTestSessionConfig::GetConfig().HostingOpenCloseTimeout + FabricTestSessionConfig::GetConfig().HostingOpenCloseTimeout; // Double the timeout to decide how long to wait statelessHost_->BeginClose( FabricTestSessionConfig::GetConfig().HostingOpenCloseTimeout, [this, waiter] (AsyncOperationSPtr const & operation) { ErrorCode error = statelessHost_->EndClose(operation); waiter->SetError(error); waiter->Set(); }); bool waitResult = waiter->WaitOne(waitTimeSpan); TestSession::FailTestIfNot(waitResult, "WaitOne failed due to timeout in RemoveStatelessHost"); error = waiter->GetError(); TestSession::FailTestIfNot(error.IsSuccess(), "statelesshost_->Close failed with error {0}", error); } else { statelessHost_->Abort(); } } void FabricTestRuntimeManager::RemoveStatefulHost(bool abort) { TestSession::WriteNoise(TraceSource, "RemoveStatefulHost for fabric node {0} abort={1}...", nodeId_, abort); if (!abort) { ErrorCode error; auto waiter = make_shared<AsyncOperationWaiter>(); statefulHost_->BeginClose( FabricTestSessionConfig::GetConfig().HostingOpenCloseTimeout, [this, waiter] (AsyncOperationSPtr const & operation) { ErrorCode error = statefulHost_->EndClose(operation); waiter->SetError(error); waiter->Set(); }); TimeSpan waitTimeSpan = FabricTestSessionConfig::GetConfig().HostingOpenCloseTimeout + FabricTestSessionConfig::GetConfig().HostingOpenCloseTimeout; // Double the timeout to decide how long to wait bool waitResult = waiter->WaitOne(waitTimeSpan); TestSession::FailTestIfNot(waitResult, "WaitOne failed due to timeout in RemoveStatefulHost"); error = waiter->GetError(); TestSession::FailTestIfNot(error.IsSuccess(), "statefulhost_->Close failed with error {0}", error); } else { statefulHost_->Abort(); } } void FabricTestRuntimeManager::UnregisterStatelessFabricRuntime() { if(statelessFabricRuntime_) { statefulHost_->UnregisterRuntimeAsync(statelessFabricRuntime_->get_Runtime()->RuntimeId); statelessFabricRuntime_.Release(); statelessServiceTypeMap_.clear(); } } void FabricTestRuntimeManager::UnregisterStatefulFabricRuntime() { if(statefulFabricRuntime_) { statefulHost_->UnregisterRuntimeAsync(statefulFabricRuntime_->get_Runtime()->RuntimeId); statefulFabricRuntime_.Release(); statefulServiceTypeMap_.clear(); } } void FabricTestRuntimeManager::RemoveStatelessFabricRuntime(bool abort) { if(statelessFabricRuntime_) { RemoveStatelessHost(abort); statelessFabricRuntime_.Release(); statelessServiceTypeMap_.clear(); } } void FabricTestRuntimeManager::RemoveStatefulFabricRuntime(bool abort) { if(statefulFabricRuntime_) { RemoveStatefulHost(abort); statefulFabricRuntime_.Release(); statefulServiceTypeMap_.clear(); } } void FabricTestRuntimeManager::RegisterStatelessServiceType(vector<wstring> const & serviceTypes) { if(statelessFabricRuntime_) { for (wstring const & serviceType : serviceTypes) { ComPointer<ComTestServiceFactory> comTestServiceFactoryCPtr = make_com<ComTestServiceFactory>(*serviceFactory_); HRESULT result = statelessFabricRuntime_->RegisterStatelessServiceFactory(serviceType.c_str(), comTestServiceFactoryCPtr.GetRawPointer()); TestSession::FailTestIf(FAILED(result), "ComFabricRuntime::RegisterStatelessServiceFactory failed with {0} at nodeId {1}", result, nodeId_); statelessServiceTypeMap_.push_back(serviceType); } ComPointer<IFabricServiceGroupFactoryBuilder> factoryBuilder; HRESULT hr = statelessFabricRuntime_->CreateServiceGroupFactoryBuilder(factoryBuilder.InitializationAddress()); TestSession::FailTestIf(FAILED(hr), "CreateServiceGroupFactoryBuilder failed with {0} at nodeId {1}", hr, nodeId_); ComPointer<IFabricStatelessServiceFactory> comServiceGroupFactory(make_com<SGComServiceFactory>(*serviceGroupFactory_), IID_IFabricStatelessServiceFactory); hr = factoryBuilder->AddStatelessServiceFactory(SGStatelessService::StatelessServiceType.c_str(), comServiceGroupFactory.GetRawPointer()); TestSession::FailTestIf(FAILED(hr), "Add service factory {0} failed with {1} at nodeId {2}", SGStatelessService::StatelessServiceType, hr, nodeId_); ComPointer<IFabricServiceGroupFactory> serviceGroupFactory; hr = factoryBuilder->ToServiceGroupFactory(serviceGroupFactory.InitializationAddress()); TestSession::FailTestIf(FAILED(hr), "ToServiceGroupFactory failed with {0} at nodeId {1}", hr, nodeId_); hr = statelessFabricRuntime_->RegisterServiceGroupFactory(SGServiceFactory::SGStatelessServiceGroupType.c_str(), serviceGroupFactory.GetRawPointer()); TestSession::FailTestIf(FAILED(hr), "RegisterServiceGroupFactory failed with {0} at nodeId {1}", hr, nodeId_); statelessServiceTypeMap_.push_back(SGServiceFactory::SGStatelessServiceGroupType); } } void FabricTestRuntimeManager::RegisterStatefulServiceType(vector<wstring> const & serviceTypes) { if(statefulFabricRuntime_) { for (wstring const & serviceType : serviceTypes) { ComPointer<ComTestServiceFactory> comTestServiceFactoryCPtr = make_com<ComTestServiceFactory>(*serviceFactory_); HRESULT result = statefulFabricRuntime_->RegisterStatefulServiceFactory(serviceType.c_str(), comTestServiceFactoryCPtr.GetRawPointer()); TestSession::FailTestIf(FAILED(result), "ComFabricRuntime::RegisterStatefulServiceFactory failed with {0} at nodeId {1}", result, nodeId_); statefulServiceTypeMap_.push_back(serviceType); } ComPointer<IFabricServiceGroupFactoryBuilder> factoryBuilder; HRESULT hr = statefulFabricRuntime_->CreateServiceGroupFactoryBuilder(factoryBuilder.InitializationAddress()); TestSession::FailTestIf(FAILED(hr), "CreateServiceGroupFactoryBuilder failed with {0} at nodeId {1}", hr, nodeId_); ComPointer<IFabricStatefulServiceFactory> comServiceGroupFactory(make_com<SGComServiceFactory>(*serviceGroupFactory_), IID_IFabricStatefulServiceFactory); hr = factoryBuilder->AddStatefulServiceFactory(SGStatefulService::StatefulServiceType.c_str(), comServiceGroupFactory.GetRawPointer()); TestSession::FailTestIf(FAILED(hr), "Add service factory {0} failed with {1} at nodeId {2}", SGStatefulService::StatefulServiceType, hr, nodeId_); hr = factoryBuilder->AddStatefulServiceFactory(SGStatefulService::StatefulServiceECCType.c_str(), comServiceGroupFactory.GetRawPointer()); TestSession::FailTestIf(FAILED(hr), "Add service factory {0} failed with {1} at nodeId {2}", SGStatefulService::StatefulServiceType, hr, nodeId_); hr = factoryBuilder->AddStatefulServiceFactory(SGStatefulService::StatefulServiceECSType.c_str(), comServiceGroupFactory.GetRawPointer()); TestSession::FailTestIf(FAILED(hr), "Add service factory {0} failed with {1} at nodeId {2}", SGStatefulService::StatefulServiceType, hr, nodeId_); hr = factoryBuilder->AddStatefulServiceFactory(SGStatefulService::StatefulServiceNCCType.c_str(), comServiceGroupFactory.GetRawPointer()); TestSession::FailTestIf(FAILED(hr), "Add service factory {0} failed with {1} at nodeId {2}", SGStatefulService::StatefulServiceType, hr, nodeId_); hr = factoryBuilder->AddStatefulServiceFactory(SGStatefulService::StatefulServiceNCSType.c_str(), comServiceGroupFactory.GetRawPointer()); TestSession::FailTestIf(FAILED(hr), "Add service factory {0} failed with {1} at nodeId {2}", SGStatefulService::StatefulServiceType, hr, nodeId_); ComPointer<ComTestServiceFactory> comStoreFactoryCPtr = make_com<ComTestServiceFactory>(*serviceFactory_); hr = factoryBuilder->AddStatefulServiceFactory(TestPersistedStoreService::DefaultServiceType.c_str(), comStoreFactoryCPtr.GetRawPointer()); TestSession::FailTestIf(FAILED(hr), "Add service factory {0} failed with {1} at nodeId {2}", TestPersistedStoreService::DefaultServiceType, hr, nodeId_); ComPointer<IFabricServiceGroupFactory> serviceGroupFactory; hr = factoryBuilder->ToServiceGroupFactory(serviceGroupFactory.InitializationAddress()); TestSession::FailTestIf(FAILED(hr), "ToServiceGroupFactory failed with {0} at nodeId {1}", hr, nodeId_); hr = statefulFabricRuntime_->RegisterServiceGroupFactory(SGServiceFactory::SGStatefulServiceGroupType.c_str(), serviceGroupFactory.GetRawPointer()); TestSession::FailTestIf(FAILED(hr), "RegisterServiceGroupFactory failed with {0} at nodeId {1}", hr, nodeId_); statefulServiceTypeMap_.push_back(SGServiceFactory::SGStatefulServiceGroupType); } } ReliabilityTestApi::ReconfigurationAgentComponentTestApi::ReconfigurationAgentProxyTestHelperUPtr FabricTestRuntimeManager::GetProxyForStatefulHost() { return make_unique<ReliabilityTestApi::ReconfigurationAgentComponentTestApi::ReconfigurationAgentProxyTestHelper>( statefulHost_->ReconfigurationAgentProxyObj); }
6,378
675
/** * @file greedy_single_tree_traverser_impl.hpp * @author <NAME> * * A simple greedy traverser which always chooses the child with the best score * and doesn't do backtracking. The RuleType class must implement the method * 'GetBestChild()'. * * mlpack is free software; you may redistribute it and/or modify it under the * terms of the 3-clause BSD license. You should have received a copy of the * 3-clause BSD license along with mlpack. If not, see * http://www.opensource.org/licenses/BSD-3-Clause for more information. */ #ifndef MLPACK_CORE_TREE_GREEDY_SINGLE_TREE_TRAVERSER_IMPL_HPP #define MLPACK_CORE_TREE_GREEDY_SINGLE_TREE_TRAVERSER_IMPL_HPP // In case it hasn't been included yet. #include "greedy_single_tree_traverser.hpp" namespace mlpack { namespace tree { template<typename TreeType, typename RuleType> GreedySingleTreeTraverser<TreeType, RuleType>::GreedySingleTreeTraverser( RuleType& rule) : rule(rule), numPrunes(0), minBaseCases(0) { /* Nothing to do. */ } template<typename TreeType, typename RuleType> void GreedySingleTreeTraverser<TreeType, RuleType>::Traverse( const size_t queryIndex, TreeType& referenceNode) { // Run the base case as necessary for all the points in the reference node. for (size_t i = 0; i < referenceNode.NumPoints(); ++i) rule.BaseCase(queryIndex, referenceNode.Point(i)); size_t bestChild = rule.GetBestChild(queryIndex, referenceNode); size_t numDescendants; // Check that referencenode is not a leaf node while calculating number of // descendants of it's best child. if (!referenceNode.IsLeaf()) numDescendants = referenceNode.Child(bestChild).NumDescendants(); else numDescendants = referenceNode.NumPoints(); // If number of descendants are more than minBaseCases than we can go along // with best child otherwise we need to traverse for each descendant to // ensure that we calculate at least minBaseCases number of base cases. if (!referenceNode.IsLeaf()) { if (numDescendants > minBaseCases) { // We are prunning all but one child. numPrunes += referenceNode.NumChildren() - 1; // Recurse the best child. Traverse(queryIndex, referenceNode.Child(bestChild)); } else { // Run the base case over first minBaseCases number of descendants. for (size_t i = 0; i <= minBaseCases; ++i) rule.BaseCase(queryIndex, referenceNode.Descendant(i)); } } } } // namespace tree } // namespace mlpack #endif
846
533
<gh_stars>100-1000 { "itemDisplayName": "Deploy a Linux Ubuntu VM and VM extensions to Azure Stack", "description": "This template deploys a Linux VM and also uses Customscript and OSPatchingforLinux Extensions. The VM is set with 2 managed disks; the OS disk and a data disk of 1 GB.", "summary": "This template deploys a Linux VM and also uses Customscript and OSPatchingforLinux Extensions.", "githubUsername": "azurestack", "dateUpdated": "2018-11-06" }
130
1,738
/* * All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or * its licensors. * * For complete copyright and license terms please see the LICENSE at the root of this * distribution (the "License"). All use of this software is governed by the License, * or, if provided, by the license below or the license accompanying this file. Do not * remove or modify any license notices. This file is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * */ #include <PhysX_precompiled.h> #include <AzCore/Component/TransformBus.h> #include <AzCore/Math/IntersectSegment.h> #include <AzCore/Math/VectorFloat.h> #include <AzCore/Serialization/EditContext.h> #include <AzCore/Serialization/SerializeContext.h> #include <AzToolsFramework/ViewportSelection/EditorSelectionUtil.h> #include <Source/EditorFixedJointComponent.h> #include <Editor/EditorJointComponentMode.h> #include <Source/FixedJointComponent.h> namespace PhysX { void EditorFixedJointComponent::Reflect(AZ::ReflectContext* context) { if (auto* serializeContext = azrtti_cast<AZ::SerializeContext*>(context)) { serializeContext->Class<EditorFixedJointComponent, EditorJointComponent>() ->Version(2) ->Field("Component Mode", &EditorFixedJointComponent::m_componentModeDelegate) ; if (auto* editContext = serializeContext->GetEditContext()) { editContext->Class<EditorFixedJointComponent>( "PhysX Fixed Joint", "The fixed joint constraints the position and orientation of a body to another.") ->ClassElement(AZ::Edit::ClassElements::EditorData, "") ->Attribute(AZ::Edit::Attributes::Category, "PhysX") ->Attribute(AZ::Edit::Attributes::AppearsInAddComponentMenu, AZ_CRC("Game", 0x232b318c)) ->Attribute(AZ::Edit::Attributes::AutoExpand, true) ->DataElement(AZ::Edit::UIHandlers::Default, &EditorFixedJointComponent::m_componentModeDelegate, "Component Mode", "Fixed Joint Component Mode") ->Attribute(AZ::Edit::Attributes::Visibility, AZ::Edit::PropertyVisibility::ShowChildrenOnly) ; } } } void EditorFixedJointComponent::GetProvidedServices(AZ::ComponentDescriptor::DependencyArrayType& provided) { provided.push_back(AZ_CRC("PhysXJointService", 0x0d2f906f)); } void EditorFixedJointComponent::GetRequiredServices(AZ::ComponentDescriptor::DependencyArrayType& required) { required.push_back(AZ_CRC("TransformService", 0x8ee22c50)); required.push_back(AZ_CRC("PhysXColliderService", 0x4ff43f7c)); required.push_back(AZ_CRC("PhysXRigidBodyService", 0x1d4c64a8)); } void EditorFixedJointComponent::Activate() { EditorJointComponent::Activate(); const AZ::EntityId entityId = GetEntityId(); AzToolsFramework::EditorComponentSelectionRequestsBus::Handler::BusConnect(entityId); AzToolsFramework::EditorComponentSelectionNotificationsBus::Handler::BusConnect(entityId); AzToolsFramework::EditorComponentSelectionRequestsBus::Handler* selection = this; m_componentModeDelegate.ConnectWithSingleComponentMode < EditorFixedJointComponent, EditorFixedJointComponentMode>( AZ::EntityComponentIdPair(entityId, GetId()), selection); PhysX::EditorJointRequestBus::Handler::BusConnect(AZ::EntityComponentIdPair(entityId, GetId())); } void EditorFixedJointComponent::Deactivate() { PhysX::EditorJointRequestBus::Handler::BusDisconnect(); m_componentModeDelegate.Disconnect(); AzToolsFramework::EditorComponentSelectionNotificationsBus::Handler::BusDisconnect(); AzToolsFramework::EditorComponentSelectionRequestsBus::Handler::BusDisconnect(); EditorJointComponent::Deactivate(); } void EditorFixedJointComponent::BuildGameEntity(AZ::Entity* gameEntity) { m_config.m_followerEntity = GetEntityId(); // joint is always in the same entity as the follower body. gameEntity->CreateComponent<FixedJointComponent>(m_config.ToGameTimeConfig()); } }
1,595
679
/************************************************************** * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. * *************************************************************/ package ifc.text; import lib.MultiPropertyTest; /** * Testing <code>com.sun.star.text.TextFrame</code> * service properties : * <ul> * <li><code> FrameHeightAbsolute</code></li> * <li><code> FrameWidthAbsolute</code></li> * <li><code> FrameWidthPercent</code></li> * <li><code> FrameHeightPercent</code></li> * <li><code> FrameIsAutomaticHeight</code></li> * <li><code> SizeType</code></li> * </ul> <p> * Properties testing is automated by <code>lib.MultiPropertyTest</code>. * @see com.sun.star.text.TextFrame */ public class _TextFrame extends MultiPropertyTest { /** * Property tester which switches two shorts. */ protected PropertyTester WModeTester = new PropertyTester() { protected Object getNewValue(String propName, Object oldValue) throws java.lang.IllegalArgumentException { if (oldValue.equals(new Short(com.sun.star.text.WritingMode2.LR_TB))) return new Short(com.sun.star.text.WritingMode2.TB_LR); else return new Short(com.sun.star.text.WritingMode2.LR_TB); } } ; /** * This property must have predefined values */ public void _WritingMode() { log.println("Testing with custom Property tester") ; testProperty("WritingMode", WModeTester) ; } } //finish class _TextFrame
723
514
<gh_stars>100-1000 { "pluginsFile": "tests/e2e/plugins/index.js", "env": { "API_URL": "http://localhost:3000" }, "numTestsKeptInMemory": 10 }
69
2,487
<reponame>lichongbing/lswagger package nl.jworks.markdown_to_asciidoc.util; import java.util.List; public class Joiner { public static String join(List<?> list, String delim) { int len = list.size(); if (len == 0) { return ""; } StringBuilder sb = new StringBuilder(list.get(0).toString()); for (int i = 1; i < len; i++) { sb.append(delim); sb.append(list.get(i).toString()); } return sb.toString(); } }
246
1,031
/* Copyright (c) <2003-2016> <Newton Game Dynamics> * * This software is provided 'as-is', without any express or implied * warranty. In no event will the authors be held liable for any damages * arising from the use of this software. * * Permission is granted to anyone to use this software for any purpose, * including commercial applications, and to alter it and redistribute it * freely */ // //Auto generated Parser Generator class: dAssemblerCompiler.h // #ifndef __dAssemblerCompiler_h__ #define __dAssemblerCompiler_h__ #include <dTree.h> #include <dList.h> #include <dCRC.h> #include <dContainersStdAfx.h> #include <dVirtualMachine.h> #include "dAssemblerParser.h" #include "dAssemblerLexical.h" class dAssemblerCompiler: public dAssemblerParser { public: enum dSymbolType { functionName, localJumpLabel, }; class dReference { public: int m_location; dString m_symbol; }; class dSymbol { public: dSymbol () :m_type(functionName), m_symbolLocationInByte(0) { } dSymbolType m_type; int m_symbolLocationInByte; }; class dLocalSymbol: public dSymbol { public: }; class dGlobalSymbol: public dSymbol { public: dGlobalSymbol () :dSymbol (), m_isPublic(true) { } bool m_isPublic; dList<dReference> m_localReferences; dTree <dLocalSymbol, dString> m_localSymbols; }; class dSymbolTable: public dTree <dGlobalSymbol, dString> { }; dAssemblerCompiler(); virtual ~dAssemblerCompiler(); int CompileSource (dVirtualMachine* const virtualMachine, const char* const source); protected: /* virtual bool Parse(dAssemblerLexical& scanner); void EmitByteCode (int count, const dVirtualMachine::dOpCode* const code); void EmitBeginFunction (const dUserVariable& name, const dUserVariable& functionScope); void EmitEndFunction (); void EmitInstructionType0 (const dUserVariable& instruction); void EmitInstructionType1 (const dUserVariable& instruction, const dUserVariable& immediate); void EmitInstructionType2 (const dUserVariable& instruction, const dUserVariable& reg); void EmitInstructionType3 (const dUserVariable& instruction, const dUserVariable& dst, const dUserVariable& src); void EmitInstructionType4 (const dUserVariable& instruction, const dUserVariable& dst, const dUserVariable& src, const dUserVariable& immediate); void EmitInstructionType1_saveGlobalAdress (const dUserVariable& instruction, const dUserVariable& symbol); void EmitInstructionType1_saveLocalAdress (const dUserVariable& instruction, const dUserVariable& symbol); void EmitInstructionType4_saveLocalAdress (const dUserVariable& instruction, const dUserVariable& reg0, const dUserVariable& reg1, const dUserVariable& symbol); void EmitLocalLabel (const dUserVariable& symbol) const; // void EmitADDIConstantExpresion (const dUserVariable& dstRegister, const dUserVariable& srcRegister, const dUserVariable& constValue); // void EmitArithmeticInstrution (const dUserVariable& instruction, const dUserVariable& dst, const dUserVariable& src); // void EmitCompareAndJumpLocalLabel (const dUserVariable& instruction, const dUserVariable& reg0, const dUserVariable& reg1, const dUserVariable& label); // void EmitCompareAndJumpConstOffset (const dUserVariable& instruction, const dUserVariable& reg0, const dUserVariable& reg1, const dUserVariable& offset); // void EmitCALL (const dUserVariable& regStack, const dUserVariable& symbol); // void EmitRET (const dUserVariable& regStack); dUserVariable TypeCheckRegister (const dUserVariable& symbol); dUserVariable EmitSymbol (const dUserVariable& symbol) const; dUserVariable EmitDataType (const dUserVariable& dataType) const; dUserVariable EmitIntegerConst (const dUserVariable& symbol) const; void EmitUnInitilizedDataDeclaration (const dUserVariable& type, const dUserVariable& id) const; void EmitInitilizedDataDeclaration (const dUserVariable& type, const dUserVariable& id, const dUserVariable& initialValue) const; // pseudo instructions void EmitPushAndPop (const dUserVariable& instruction, const dUserVariable& regMask); dVirtualMachine* m_virtualMachine; dSymbolTable m_globalSymbols; dList<dReference> m_globalReferences; int m_codeSegmentSize; short* m_codeSegment; dGlobalSymbol* m_currentFunction; friend dAssemblerParser; */ }; #endif
1,363
891
<reponame>adaamz/datamodel-code-generator<filename>tests/data/expected/main/main_jsonschema_ids/__init__.py # generated by datamodel-codegen: # filename: Organization.schema.json # timestamp: 1985-10-26T08:21:00+00:00 from __future__ import annotations from typing import Optional from pydantic import BaseModel from . import URI, ContactPoint, id, name, sameAs, type class Organization(BaseModel): id: Optional[id.Schema] = None type: type.Schema name: name.Schema contactPoint: Optional[ContactPoint.Schema] = None sameAs: Optional[sameAs.Schema] = None url: Optional[URI.Schema] = None
223
1,473
<gh_stars>1000+ /* * Copyright 2014 NAVER Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.navercorp.pinpoint.bootstrap.sampler; /** * @author emeroad */ public final class SamplingFlagUtils { // 1 byte dummy mark for further expansion of sampling specs public static final String SAMPLING_RATE_PREFIX = "s"; public static final String SAMPLING_RATE_FALSE = SAMPLING_RATE_PREFIX + "0"; public static final String SAMPLING_RATE_TRUE = SAMPLING_RATE_PREFIX + "1"; private SamplingFlagUtils() { } public static boolean isSamplingFlag(String samplingFlag) { if (samplingFlag == null) { return true; } // we turn off sampling only when a specific flag was given // XXX needs better detection mechanism through prefix parsing if (samplingFlag.startsWith(SAMPLING_RATE_PREFIX)) { return !SAMPLING_RATE_FALSE.equals(samplingFlag); } return true; } }
506
530
<reponame>Yzubi/bladecoder-adventure-engine package com.bladecoder.engineeditor.undo; import com.badlogic.gdx.math.Vector2; import com.bladecoder.engine.model.BaseActor; import com.bladecoder.engine.model.Scene; import com.bladecoder.engine.model.SpriteActor; import com.bladecoder.engineeditor.Ctx; public class UndoDepthVector implements UndoOp { private Vector2 pos; private Scene s; public UndoDepthVector(Scene s, Vector2 pos) { this.pos = pos; this.s = s; } @Override public void undo() { s.getDepthVector().set(pos.x, pos.y); Ctx.project.setModified(); updateFakeDepth(); } private void updateFakeDepth() { for (BaseActor a : s.getActors().values()) { if (a instanceof SpriteActor) { a.setPosition(a.getX(), a.getY()); } } } }
302
890
import pytest from tests.smoke.organizations.admins.base_admin import BaseAdmin class TestAdmins(BaseAdmin): ADDITIONAL_ADMIN_EMAIL = "<EMAIL>" def setUp(self): # (if you are copying and pasting, update class title below) super(TestAdmins, self).setUp() def tearDown(self): # (if you are copying and pasting, update class title below) super(TestAdmins, self).tearDown() def test_admin_crud_sudo(self): # sudo creates an admin in the setup, so no need to test again # delete self.admin.delete() with pytest.raises(Exception): self.organization.get_admin(self.admin.get_id()) def test_admin_crud_admin(self): self.update_permission_admin() new_admin = self.organization.create_admin( email=self.ADDITIONAL_ADMIN_EMAIL) assert new_admin.data.get("email") == self.ADDITIONAL_ADMIN_EMAIL # delete new_admin.delete() with pytest.raises(Exception): self.organization.get_admin(new_admin.get_id()) def test_admin_crud_manager(self): self.update_permission_manager() with pytest.raises(Exception): self.organization.create_admin(email=self.ADDITIONAL_ADMIN_EMAIL) with pytest.raises(Exception): self.admin.delete() def test_admin_crud_worker(self): self.update_permission_worker() with pytest.raises(Exception): self.organization.create_admin(email=self.ADDITIONAL_ADMIN_EMAIL) with pytest.raises(Exception): self.admin.delete()
687
585
<filename>caffe2/operators/clip_op.h<gh_stars>100-1000 /** * Copyright (c) 2016-present, Facebook, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef CAFFE2_OPERATORS_CLIP_OP_H_ #define CAFFE2_OPERATORS_CLIP_OP_H_ #include <limits> #include "caffe2/core/context.h" #include "caffe2/core/logging.h" #include "caffe2/core/operator.h" #include "caffe2/utils/math.h" namespace caffe2 { template <typename T, class Context> class ClipOp final : public Operator<Context> { public: USE_OPERATOR_CONTEXT_FUNCTIONS; ClipOp(const OperatorDef& operator_def, Workspace* ws) : Operator<Context>(operator_def, ws), min_(std::numeric_limits<T>::lowest()), max_(std::numeric_limits<T>::max()) { if (HasArgument("min")) { min_ = static_cast<T>(OperatorBase::GetSingleArgument<float>("min", 0)); } if (HasArgument("max")) { max_ = static_cast<T>(OperatorBase::GetSingleArgument<float>("max", 0)); } } bool RunOnDevice() override; protected: T min_; T max_; }; template <typename T, class Context> class ClipGradientOp final : public Operator<Context> { public: USE_OPERATOR_CONTEXT_FUNCTIONS; ClipGradientOp(const OperatorDef& operator_def, Workspace* ws) : Operator<Context>(operator_def, ws), min_(std::numeric_limits<T>::lowest()), max_(std::numeric_limits<T>::max()) { if (HasArgument("min")) { min_ = static_cast<T>(OperatorBase::GetSingleArgument<float>("min", 0)); } if (HasArgument("max")) { max_ = static_cast<T>(OperatorBase::GetSingleArgument<float>("max", 0)); } } bool RunOnDevice() override; protected: T min_; T max_; // Input: Y, dY; Output: dX }; } // namespace caffe2 #endif // CAFFE2_OPERATORS_CLIP_OP_H_
847
6,969
""" # Explanation: - https://kartikkukreja.wordpress.com/2013/08/17/beating-binary-search-the-interpolation-search/ - https://www.geeksforgeeks.org/interpolation-search/ """ def interpolation_search(array, target): lo = 0 hi = len(array) - 1 while hi >= lo and target >= array[lo] and target <= array[hi]: # Equation to find position: pos = lo + (target - array[lo]) * (hi - lo) / (array[hi] - array[lo]) pos = int(pos) if array[pos] == target: return pos elif array[pos + 1] < target: lo = pos elif array[pos + 1] > target: hi = pos return None def verify(index, target): if index is not None: print("Target", target, "found at index:", index) else: print("Target", target, "not in list") array = [x for x in range(1, 51)] print("Input array:", array) verify(interpolation_search(array, 30), 30) verify(interpolation_search(array, 70), 70)
416
746
<filename>AndroidStudio/practice/fundamentals/1st/Layoutpractice/src/main/java/jp/mixi/practice/layout/MainActivity.java package jp.mixi.practice.layout; import android.content.Context; import android.content.Intent; import android.support.v7.app.AppCompatActivity; import android.os.Bundle; import android.view.View; public class MainActivity extends AppCompatActivity { @Override protected void onCreate(Bundle savedInstanceState) { super.onCreate(savedInstanceState); setContentView(R.layout.activity_main); findViewById(R.id.linear_layout1_button).setOnClickListener(new View.OnClickListener() { @Override public void onClick(View view) { Context context = view.getContext(); context.startActivity(new Intent(context, LinearLayout1Activity.class)); } }); findViewById(R.id.linear_layout2_button).setOnClickListener(new View.OnClickListener() { @Override public void onClick(View view) { Context context = view.getContext(); context.startActivity(new Intent(context, LinearLayout2Activity.class)); } }); findViewById(R.id.relative_layout1_button).setOnClickListener(new View.OnClickListener() { @Override public void onClick(View view) { Context context = view.getContext(); context.startActivity(new Intent(context, RelativeLayout1Activity.class)); } }); findViewById(R.id.relative_layout2_button).setOnClickListener(new View.OnClickListener() { @Override public void onClick(View view) { Context context = view.getContext(); context.startActivity(new Intent(context, RelativeLayout2Activity.class)); } }); findViewById(R.id.frame_layout1_button).setOnClickListener(new View.OnClickListener() { @Override public void onClick(View view) { Context context = view.getContext(); context.startActivity(new Intent(context, FrameLayout1Activity.class)); } }); findViewById(R.id.frame_layout2_button).setOnClickListener(new View.OnClickListener() { @Override public void onClick(View view) { Context context = view.getContext(); context.startActivity(new Intent(context, FrameLayout2Activity.class)); } }); findViewById(R.id.scroll_view1_button).setOnClickListener(new View.OnClickListener() { @Override public void onClick(View view) { Context context = view.getContext(); context.startActivity(new Intent(context, ScrollViewActivity.class)); } }); } }
1,227
1,144
/****************************************************************************** * Product: Adempiere ERP & CRM Smart Business Solution * * Copyright (C) 1999-2007 ComPiere, Inc. All Rights Reserved. * * This program is free software, you can redistribute it and/or modify it * * under the terms version 2 of the GNU General Public License as published * * by the Free Software Foundation. This program is distributed in the hope * * that it will be useful, but WITHOUT ANY WARRANTY, without even the implied * * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * * See the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * * with this program, if not, write to the Free Software Foundation, Inc., * * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. * * For the text or an alternative of this public license, you may reach us * * ComPiere, Inc., 2620 Augustine Dr. #245, Santa Clara, CA 95054, USA * * or via <EMAIL> or http://www.compiere.org/license.html * *****************************************************************************/ /** Generated Model - DO NOT CHANGE */ package org.compiere.model; import java.sql.ResultSet; import java.util.Properties; import org.compiere.util.KeyNamePair; /** Generated Model for AD_Error * @author Adempiere (generated) * @version Release 3.5.4a - $Id$ */ public class X_AD_Error extends PO implements I_AD_Error, I_Persistent { /** * */ private static final long serialVersionUID = 20090915L; /** Standard Constructor */ public X_AD_Error (Properties ctx, int AD_Error_ID, String trxName) { super (ctx, AD_Error_ID, trxName); /** if (AD_Error_ID == 0) { setAD_Error_ID (0); setName (null); } */ } /** Load Constructor */ public X_AD_Error (Properties ctx, ResultSet rs, String trxName) { super (ctx, rs, trxName); } /** AccessLevel * @return 6 - System - Client */ protected int get_AccessLevel() { return accessLevel.intValue(); } /** Load Meta Data */ protected POInfo initPO (Properties ctx) { POInfo poi = POInfo.getPOInfo (ctx, Table_ID, get_TrxName()); return poi; } public String toString() { StringBuffer sb = new StringBuffer ("X_AD_Error[") .append(get_ID()).append("]"); return sb.toString(); } /** Set Error. @param AD_Error_ID Error */ public void setAD_Error_ID (int AD_Error_ID) { if (AD_Error_ID < 1) set_ValueNoCheck (COLUMNNAME_AD_Error_ID, null); else set_ValueNoCheck (COLUMNNAME_AD_Error_ID, Integer.valueOf(AD_Error_ID)); } /** Get Error. @return Error */ public int getAD_Error_ID () { Integer ii = (Integer)get_Value(COLUMNNAME_AD_Error_ID); if (ii == null) return 0; return ii.intValue(); } /** AD_Language AD_Reference_ID=106 */ public static final int AD_LANGUAGE_AD_Reference_ID=106; /** Set Language. @param AD_Language Language for this entity */ public void setAD_Language (String AD_Language) { set_Value (COLUMNNAME_AD_Language, AD_Language); } /** Get Language. @return Language for this entity */ public String getAD_Language () { return (String)get_Value(COLUMNNAME_AD_Language); } /** Set Validation code. @param Code Validation Code */ public void setCode (String Code) { set_Value (COLUMNNAME_Code, Code); } /** Get Validation code. @return Validation Code */ public String getCode () { return (String)get_Value(COLUMNNAME_Code); } /** Set Name. @param Name Alphanumeric identifier of the entity */ public void setName (String Name) { set_Value (COLUMNNAME_Name, Name); } /** Get Name. @return Alphanumeric identifier of the entity */ public String getName () { return (String)get_Value(COLUMNNAME_Name); } /** Get Record ID/ColumnName @return ID/ColumnName pair */ public KeyNamePair getKeyNamePair() { return new KeyNamePair(get_ID(), getName()); } }
1,589
14,668
<filename>components/speech/upstream_loader_client.h // Copyright 2020 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef COMPONENTS_SPEECH_UPSTREAM_LOADER_CLIENT_H_ #define COMPONENTS_SPEECH_UPSTREAM_LOADER_CLIENT_H_ namespace speech { // An interface containing the callback functions required by consumers // of the UpstreamLoader. The class that implements this client // interface must outlive the UpstreamLoader. class UpstreamLoaderClient { public: UpstreamLoaderClient(const UpstreamLoaderClient&) = delete; UpstreamLoaderClient& operator=(const UpstreamLoaderClient&) = delete; protected: UpstreamLoaderClient() = default; virtual ~UpstreamLoaderClient() = default; private: friend class UpstreamLoader; // Executed when upstream data is completed. // success: True on 2xx responses. // response_code: The HTTP response code if available, or -1 on // network errors. virtual void OnUpstreamDataComplete(bool success, int response_code) = 0; }; } // namespace speech #endif // COMPONENTS_SPEECH_UPSTREAM_LOADER_CLIENT_H_
334
356
<gh_stars>100-1000 package com.zjb.volley.core.request; import android.net.Uri; import android.text.TextUtils; import java.io.UnsupportedEncodingException; import java.net.URLEncoder; import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; import java.util.Arrays; import java.util.Collections; import java.util.Iterator; import java.util.Map; import com.zjb.volley.core.cache.Cache; import com.zjb.volley.core.exception.AuthFailureError; import com.zjb.volley.core.exception.VolleyError; import com.zjb.volley.core.response.HttpResponse; import com.zjb.volley.core.response.NetworkResponse; import com.zjb.volley.core.response.Response; /** * Base class for all network requests. * * @param <T> The type of parsed response this request expects. */ public abstract class Request<T> implements Comparable<Request<T>> { private static final String DEFAULT_PARAMS_ENCODING = "UTF-8"; private final int mMethod; protected String mUrl; protected String mRedirectUrl; private String mIdentifier; private final int mDefaultTrafficStatsTag; private final Response.ErrorListener mErrorListener; private Integer mSequence; private boolean mShouldCache; private boolean mCanceled; private boolean mResponseDelivered; private RetryPolicy mRetryPolicy; private Cache.Entry mCacheEntry; private Object mTag; private static long sCounter; public Request(int method, String url, Response.ErrorListener listener) { this.mShouldCache = true; this.mCanceled = false; this.mResponseDelivered = false; this.mCacheEntry = null; this.mMethod = method; this.mUrl = url; this.mIdentifier = createIdentifier(method, url); this.mErrorListener = listener; this.setRetryPolicy(new DefaultRetryPolicy()); this.mDefaultTrafficStatsTag = findDefaultTrafficStatsTag(url); } public int getMethod() { return this.mMethod; } public Request<?> setTag(Object tag) { this.mTag = tag; return this; } public Object getTag() { return this.mTag; } public Response.ErrorListener getErrorListener() { return this.mErrorListener; } public int getTrafficStatsTag() { return this.mDefaultTrafficStatsTag; } private static int findDefaultTrafficStatsTag(String url) { if (!TextUtils.isEmpty(url)) { Uri uri = Uri.parse(url); if (uri != null) { String host = uri.getHost(); if (host != null) { return host.hashCode(); } } } return 0; } public Request<?> setRetryPolicy(RetryPolicy retryPolicy) { this.mRetryPolicy = retryPolicy; return this; } public final Request<?> setSequence(int sequence) { this.mSequence = sequence; return this; } public final int getSequence() { if (this.mSequence == null) { throw new IllegalStateException("getSequence called before setSequence"); } else { return this.mSequence; } } public String getUrl() { return this.mRedirectUrl != null ? this.mRedirectUrl : this.mUrl; } public String getOriginUrl() { return this.mUrl; } public String getIdentifier() { return this.mIdentifier; } public void setRedirectUrl(String redirectUrl) { this.mRedirectUrl = redirectUrl; } public String getCacheKey() { return this.mMethod + ":" + this.mUrl; } public Request<?> setCacheEntry(Cache.Entry entry) { this.mCacheEntry = entry; return this; } public Cache.Entry getCacheEntry() { return this.mCacheEntry; } public void cancel() { this.mCanceled = true; } public boolean isCanceled() { return this.mCanceled; } public Map<String, String> getHeaders() throws AuthFailureError { return Collections.emptyMap(); } protected Map<String, String> getParams() throws AuthFailureError { return null; } protected String getParamsEncoding() { return DEFAULT_PARAMS_ENCODING; } public String getBodyContentType() { return "application/x-www-form-urlencoded; charset=" + this.getParamsEncoding(); } public byte[] getBody() throws AuthFailureError { return this.encodeParameters(this.getParams(), this.getParamsEncoding()); } private byte[] encodeParameters(Map<String, String> params, String paramsEncoding) { if (params == null || params.isEmpty()) { return null; } StringBuilder encodedParams = new StringBuilder(); try { Iterator uee = params.entrySet().iterator(); boolean hasNext = uee.hasNext(); while (hasNext) { Map.Entry entry = (Map.Entry) uee.next(); encodedParams.append(URLEncoder.encode((String) entry.getKey(), paramsEncoding)); encodedParams.append('='); encodedParams.append(URLEncoder.encode((String) entry.getValue(), paramsEncoding)); if (hasNext = uee.hasNext()) { encodedParams.append('&'); } } return encodedParams.toString().getBytes(paramsEncoding); } catch (UnsupportedEncodingException e) { throw new RuntimeException("Encoding not supported: " + paramsEncoding, e); } } public final Request<?> setShouldCache(boolean shouldCache) { this.mShouldCache = shouldCache; return this; } public final boolean shouldCache() { return this.mShouldCache; } public Request.Priority getPriority() { return Request.Priority.NORMAL; } public final int getTimeoutMs() { return this.mRetryPolicy.getCurrentTimeout(); } public RetryPolicy getRetryPolicy() { return this.mRetryPolicy; } public void markDelivered() { this.mResponseDelivered = true; } public boolean hasHadResponseDelivered() { return this.mResponseDelivered; } public abstract HttpResponse<T> parseNetworkResponse(NetworkResponse response); protected VolleyError parseNetworkError(VolleyError volleyError) { return volleyError; } public void deliverError(VolleyError error) { if (this.mErrorListener != null) { this.mErrorListener.onErrorResponse(error); } } public int compareTo(Request<T> other) { Request.Priority left = this.getPriority(); Request.Priority right = other.getPriority(); return left == right ? this.mSequence.intValue() - other.mSequence.intValue() : right.ordinal() - left.ordinal(); } public String toString() { String trafficStatsTag = "0x" + Integer.toHexString(this.getTrafficStatsTag()); return (this.mCanceled ? "[X] " : "[ ] ") + this.getUrl() + " " + trafficStatsTag + " " + this.getPriority() + " " + this.mSequence; } private static String createIdentifier(int method, String url) { return sha1Hash("Request:" + method + ":" + url + ":" + System.currentTimeMillis() + ":" + sCounter++); } public static enum Priority { LOW, NORMAL, HIGH, IMMEDIATE } public interface Method { int DEPRECATED_GET_OR_POST = -1; int GET = 0; int POST = 1; int PUT = 2; int DELETE = 3; int HEAD = 4; int OPTIONS = 5; int TRACE = 6; int PATCH = 7; } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; Request<?> request = (Request<?>) o; if (mMethod != request.mMethod) return false; if ((mUrl != null ? !mUrl.equals(request.mUrl) : request.mUrl != null)) return false; try { return Arrays.equals(getBody(), request.getBody()); } catch (AuthFailureError authFailureError) { authFailureError.printStackTrace(); return false; } } /** * 对Request生成一个唯一的id * * @return * @throws AuthFailureError */ public String genRequestId() { StringBuilder builder = new StringBuilder(mUrl) .append(mMethod); try { builder.append(convertToHex(getBody())); } catch (AuthFailureError authFailureError) { authFailureError.printStackTrace(); } return builder.toString(); } private final static char[] HEX_CHARS = "0123456789ABCDEF".toCharArray(); private static String convertToHex(byte[] bytes) { if (bytes == null || bytes.length == 0) { return ""; } char[] hexChars = new char[bytes.length * 2]; for (int j = 0; j < bytes.length; j++) { int v = bytes[j] & 0xFF; hexChars[j * 2] = HEX_CHARS[v >>> 4]; hexChars[j * 2 + 1] = HEX_CHARS[v & 0x0F]; } return new String(hexChars); } public static String sha1Hash(String text) { String hash = null; try { final MessageDigest digest = MessageDigest.getInstance("SHA-1"); final byte[] bytes = text.getBytes("UTF-8"); digest.update(bytes, 0, bytes.length); hash = convertToHex(digest.digest()); } catch (NoSuchAlgorithmException e) { e.printStackTrace(); } catch (Exception e) { e.printStackTrace(); } return hash; } }
4,103
1,428
package day4; javax.swing.JOptionPane; public class Fibonacci { public static void main(String[] args) { int n = Integer.parseInt(JOptionPane.showInputDialog("Enter number of terms :")); String serie = ""; int[] fibonacci = new int[n]; fibonacci[0] = 1; fibonacci[1] = 1; for(int i=2;i<n;i++) { fibonacci[i] = fibonacci[i-1] + fibonacci[i-2]; } for(int j=0;j<n;j++){ if(j == n-1){ serie += (fibonacci[j]); }else{ serie += (fibonacci[j] + " , "); } } JOptionPane.showMessageDialog(null , "The series is: \n" + serie); } }
378
430
import argparse import os import subprocess import faiss import numpy as np import pandas as pd import torch import torch.nn.functional as F from torchvision import transforms from tqdm import tqdm from cirtorch.datasets.genericdataset import ImagesFromList from cirtorch.datasets.testdataset import configdataset from cirtorch.utils.evaluate import compute_map_and_print from cirtorch.utils.general import get_data_root from src import utils def extract_vectors(model, images, image_size=1024, eval_transform=None, bbxs=None, scales=(1,), tta_gem_p=1.0, tqdm_desc='' ): if eval_transform is None: eval_transform = transforms.Compose([ transforms.ToTensor(), transforms.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)), ]) local_eval_loader = torch.utils.data.DataLoader( ImagesFromList(root='', images=images, imsize=image_size, bbxs=bbxs, transform=eval_transform), batch_size=1, shuffle=False, num_workers=8, pin_memory=True ) ids, feats = [], [] for i, x in tqdm(enumerate(local_eval_loader), total=len(local_eval_loader), desc=tqdm_desc, miniters=None, ncols=55): batch_size, _, h, w = x.shape feat_blend = [] with torch.no_grad(): x = x.to('cuda') for s in scales: size = int(h * s // model.DIVIDABLE_BY * model.DIVIDABLE_BY), \ int(w * s // model.DIVIDABLE_BY * model.DIVIDABLE_BY) # round off scaled_x = F.interpolate(x, size=size, mode='bilinear', align_corners=True) feat = model.extract_feat(scaled_x) if tta_gem_p != 1.0: feat = feat ** tta_gem_p feat = feat.cpu().numpy() feat_blend.append(feat) feat_blend = np.mean(feat_blend, axis=0) feats.append(feat_blend) feats = np.concatenate(feats) if tta_gem_p != 1.0: feats = feats ** (1.0 / tta_gem_p) feats = utils.l2norm_numpy(feats) return feats def eval_datasets(model, datasets=('oxford5k', 'paris6k', 'roxford5k', 'rparis6k'), ms=False, tta_gem_p=1.0, logger=None ): model = model.eval() data_root = get_data_root() scales = [1 / 2 ** (1 / 2), 1.0, 2 ** (1 / 2)] if ms else [1.0] results = dict() for dataset in datasets: # prepare config structure for the test dataset cfg = configdataset(dataset, os.path.join(data_root, 'test')) images = [cfg['im_fname'](cfg, i) for i in range(cfg['n'])] qimages = [cfg['qim_fname'](cfg, i) for i in range(cfg['nq'])] bbxs = [tuple(cfg['gnd'][i]['bbx']) for i in range(cfg['nq'])] tqdm_desc = cfg['dataset'] db_feats = extract_vectors(model, images=images, bbxs=None, scales=scales, tta_gem_p=tta_gem_p, tqdm_desc=tqdm_desc) query_feats = extract_vectors(model, images=qimages, bbxs=bbxs, scales=scales, tta_gem_p=tta_gem_p, tqdm_desc=tqdm_desc) scores = np.dot(db_feats, query_feats.T) ranks = np.argsort(-scores, axis=0) results[dataset] = compute_map_and_print( dataset, ranks, cfg['gnd'], kappas=[1, 5, 10], logger=logger) return results if __name__ == '__main__': topk = 100 parser = argparse.ArgumentParser() parser.add_argument('index_dirs', help='directories containing features of index') parser.add_argument('test_dirs', help='directories containing features of test') parser.add_argument('--setting', default='') parser.add_argument('-w', '--weights', default='1') parser.add_argument('-d', '--devices', default='0', help='gpu device indexes') args = parser.parse_args() index_dirs = args.index_dirs.split(',') test_dirs = args.test_dirs.split(',') setting = args.setting weights = list(map(int, args.weights.split(','))) os.environ['CUDA_VISIBLE_DEVICES'] = args.devices n_gpus = len(args.devices.split(',')) ids_index, feats_index = utils.prepare_ids_and_feats(index_dirs, weights, normalize=True) ids_test, feats_test = utils.prepare_ids_and_feats(test_dirs, weights, normalize=True) # ids_train, feats_train = utils.prepare_ids_and_feats(train_dirs, weights, normalize=True) co = faiss.GpuMultipleClonerOptions() co.shard = True # co.float16 = False vres = [] for _ in range(n_gpus): res = faiss.StandardGpuResources() vres.append(res) print('build index...') cpu_index = faiss.IndexFlatL2(feats_index.shape[1]) gpu_index = faiss.index_cpu_to_gpu_multiple_py(vres, cpu_index, co) gpu_index.add(feats_index) dists, topk_idx = gpu_index.search(x=feats_test, k=topk) print('query search done.') retrieval_result = pd.DataFrame(ids_test, columns=['id']) retrieval_result['images'] = np.apply_along_axis(' '.join, axis=1, arr=ids_index[topk_idx]) output_name = f'../output/{setting}.csv.gz' retrieval_result.to_csv(output_name, compression='gzip', index=False) print('saved to ' + output_name) cmd = f'kaggle c submit -c landmark-retrieval-2019 -f {output_name} -m "" ' print(cmd) subprocess.run(cmd, shell=True)
2,944
2,053
/* * Copyright 2015 the original author or authors. * @https://github.com/scouter-project/scouter * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package scouter.agent.util; import java.lang.reflect.Method; public class DynaCall { boolean enable = true; Class dynaClass; Method method; public DynaCall(Object o, String name, Class... arg) { try { this.dynaClass = o.getClass(); this.method = dynaClass.getMethod(name, arg); } catch (Exception e) { enable = false; } } public boolean isEnabled(){ return enable; } public void disabled(){ this.enable=false; } public Object call(Object o , Object ... args) throws Exception{ return method.invoke(o, args); } }
382
578
<filename>src/ui/Forward.h #pragma once #include <infra/Config.h> #include <infra/Forward.h> #include <type/Forward.h> #include <math/Forward.h> #include <ctx/Forward.h> #ifndef TWO_UI_EXPORT #define TWO_UI_EXPORT TWO_IMPORT #endif namespace two { namespace ui { export_ enum class PopupFlags : unsigned int; export_ struct DropdownStyles; export_ struct MenuStyles; export_ struct ToolbarStyles; export_ struct TableStyles; export_ struct ExpandboxStyles; export_ struct TreeNodeStyles; export_ struct TabberStyles; export_ struct DragPoint; export_ struct CursorStyles; export_ struct DockStyles; export_ struct SliderState; export_ struct SliderMetrics; export_ struct NodeStyles; export_ struct CanvasStyles; export_ struct ScrollbarStyles; export_ struct WindowStyles; export_ struct FileStyles; export_ class Sequence; } } namespace two { export_ enum class Axis : unsigned int; export_ enum class FlowAxis : unsigned int; export_ enum class Pivot : unsigned int; export_ enum class Align : unsigned int; export_ enum class Solver : unsigned int; export_ enum class AutoLayout : unsigned int; export_ enum class LayoutFlow : unsigned int; export_ enum class Sizing : unsigned int; export_ enum class Preset : unsigned int; export_ enum class Clip : unsigned int; export_ enum class Opacity : unsigned int; export_ enum WidgetState : unsigned int; export_ enum DirtyLayout : unsigned int; export_ enum class CodePalette : unsigned char; export_ enum class TextFocusMode : unsigned int; export_ enum class WindowState : unsigned int; export_ enum class DropState : unsigned int; template <class T> struct v2; export_ class Style; export_ struct Subskin; export_ struct Space; export_ struct Styles; export_ struct UiRect; export_ class Frame; export_ class Widget; export_ struct TextGlyph; export_ struct TextRow; export_ struct Shadow; export_ struct Paint; export_ struct TextPaint; export_ struct TextMarker; export_ struct Gradient; export_ struct TextCursor; export_ struct TextSelection; export_ class Text; export_ class TextEdit; export_ struct Clipboard; export_ struct NodeConnection; export_ class Vg; export_ class UiRenderer; export_ class UiWindow; export_ class User; export_ struct KeyCombo; export_ class EventDispatch; export_ struct Identifier; export_ struct LanguageDefinition; export_ class Layer; export_ struct Layout; export_ class FrameSolver; export_ class RowSolver; export_ class CustomSolver; export_ class TableSolver; export_ class LineSolver; export_ class GridSolver; export_ class ScrollSheet; export_ class Tabber; export_ class Expandbox; export_ class TreeNode; export_ class Table; export_ struct Dock; export_ class Docksystem; export_ class Dockable; export_ class Docker; export_ class Dockspace; export_ class Dockbar; export_ class NodePlug; export_ class Node; export_ struct CanvasConnect; export_ class Canvas; export_ struct DropAction; export_ class Ui; export_ class Window; export_ struct ImageSkin; export_ struct InkStyle; export_ class Options; export_ class Styler; } #ifdef TWO_META_GENERATOR #include <stl/vector.h> #include <stl/span.h> #include <ui/Style/Style.h> namespace stl { export_ using cstring = const char*; extern template struct refl_ span_ span<float>; extern template struct refl_ span_ span<cstring>; //extern template class refl_ seque_ vector<string>; extern template class refl_ seque_ vector<two::Space>; extern template class refl_ seque_ vector<two::Subskin>; } #endif
1,357
322
<filename>eagle-security/eagle-security-common/src/main/java/org/apache/eagle/security/enrich/DataEnrichLCM.java /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.eagle.security.enrich; import java.io.Serializable; import java.util.Collection; import java.util.Map; /** * Since 8/16/16. * Data enrichment lifecycle methods * @param <T> entity type, this entity will be put into cache * @param <K> cache key */ public interface DataEnrichLCM<T, K> extends Serializable{ /** * load all external data for real time enrichment * * @return */ Collection<T> loadExternal(); /** * get cache key from one entity * * @param entity * @return */ K getCacheKey(T entity); }
445
2,338
// RUN: %clang_cc1 %s -flax-vector-conversions=none -ffreestanding -triple=x86_64-unknown-unknown -target-feature +avx512vp2intersect -target-feature +avx512vl -emit-llvm -o - -Wall -Werror | FileCheck %s // RUN: %clang_cc1 %s -flax-vector-conversions=none -ffreestanding -triple=i386-unknown-unknown -target-feature +avx512vp2intersect -target-feature +avx512vl -emit-llvm -o - -Wall -Werror | FileCheck %s #include <immintrin.h> void test_mm256_2intersect_epi32(__m256i a, __m256i b, __mmask8 *m0, __mmask8 *m1) { // CHECK-LABEL: test_mm256_2intersect_epi32 // CHECK: call { <8 x i1>, <8 x i1> } @llvm.x86.avx512.vp2intersect.d.256(<8 x i32> %{{.*}}, <8 x i32> %{{.*}}) // CHECK: extractvalue { <8 x i1>, <8 x i1> } %{{.*}}, 0 // CHECK: extractvalue { <8 x i1>, <8 x i1> } %{{.*}}, 1 _mm256_2intersect_epi32(a, b, m0, m1); } void test_mm256_2intersect_epi64(__m256i a, __m256i b, __mmask8 *m0, __mmask8 *m1) { // CHECK-LABEL: test_mm256_2intersect_epi64 // CHECK: call { <4 x i1>, <4 x i1> } @llvm.x86.avx512.vp2intersect.q.256(<4 x i64> %{{.*}}, <4 x i64> %{{.*}}) // CHECK: extractvalue { <4 x i1>, <4 x i1> } %{{.*}}, 0 // CHECK: extractvalue { <4 x i1>, <4 x i1> } %{{.*}}, 1 _mm256_2intersect_epi64(a, b, m0, m1); } void test_mm_2intersect_epi32(__m128i a, __m128i b, __mmask8 *m0, __mmask8 *m1) { // CHECK-LABEL: test_mm_2intersect_epi32 // CHECK: call { <4 x i1>, <4 x i1> } @llvm.x86.avx512.vp2intersect.d.128(<4 x i32> %{{.*}}, <4 x i32> %{{.*}}) // CHECK: extractvalue { <4 x i1>, <4 x i1> } %{{.*}}, 0 // CHECK: extractvalue { <4 x i1>, <4 x i1> } %{{.*}}, 1 _mm_2intersect_epi32(a, b, m0, m1); } void test_mm_2intersect_epi64(__m128i a, __m128i b, __mmask8 *m0, __mmask8 *m1) { // CHECK-LABEL: test_mm_2intersect_epi64 // CHECK: call { <2 x i1>, <2 x i1> } @llvm.x86.avx512.vp2intersect.q.128(<2 x i64> %{{.*}}, <2 x i64> %{{.*}}) // CHECK: extractvalue { <2 x i1>, <2 x i1> } %{{.*}}, 0 // CHECK: extractvalue { <2 x i1>, <2 x i1> } %{{.*}}, 1 _mm_2intersect_epi64(a, b, m0, m1); }
989
882
package water.api; import org.junit.Assert; import org.junit.Test; import water.Key; import water.TestUtil; import water.fvec.Frame; import java.util.Arrays; public class ConfusionMatrixTest extends TestUtil { final boolean debug = false; @Test public void testIdenticalVectors() { simpleCMTest( "smalldata/test/cm/v1.csv", "smalldata/test/cm/v1.csv", ar("A", "B", "C"), ar("A", "B", "C"), ar("A", "B", "C"), ar( ar(2L, 0L, 0L, 0L), ar(0L, 2L, 0L, 0L), ar(0L, 0L, 1L, 0L), ar(0L, 0L, 0L, 0L) ), debug); } @Test public void testVectorAlignment() { simpleCMTest( "smalldata/test/cm/v1.csv", "smalldata/test/cm/v2.csv", ar("A", "B", "C"), ar("A", "B", "C"), ar("A", "B", "C"), ar( ar(1L, 1L, 0L, 0L), ar(0L, 1L, 1L, 0L), ar(0L, 0L, 1L, 0L), ar(0L, 0L, 0L, 0L) ), debug); } /** Negative test testing expected exception if two vectors * of different lengths are provided. */ @Test(expected = IllegalArgumentException.class) public void testDifferentLenghtVectors() { simpleCMTest( "smalldata/test/cm/v1.csv", "smalldata/test/cm/v3.csv", ar("A", "B", "C"), ar("A", "B", "C"), ar("A", "B", "C"), ar( ar(1L, 1L, 0L, 0L), ar(0L, 1L, 1L, 0L), ar(0L, 0L, 1L, 0L), ar(0L, 0L, 0L, 0L) ), debug); } @Test public void testDifferentDomains() { simpleCMTest( "smalldata/test/cm/v1.csv", "smalldata/test/cm/v4.csv", ar("A", "B", "C"), ar("B", "C"), ar("A", "B", "C"), ar( ar(0L, 2L, 0L, 0L), ar(0L, 0L, 2L, 0L), ar(0L, 0L, 1L, 0L), ar(0L, 0L, 0L, 0L) ), debug); simpleCMTest( "smalldata/test/cm/v4.csv", "smalldata/test/cm/v1.csv", ar("B", "C"), ar("A", "B", "C"), ar("A", "B", "C"), ar( ar(0L, 0L, 0L, 0L), ar(2L, 0L, 0L, 0L), ar(0L, 2L, 1L, 0L), ar(0L, 0L, 0L, 0L) ), debug); simpleCMTest( "smalldata/test/cm/v2.csv", "smalldata/test/cm/v4.csv", ar("A", "B", "C"), ar("B", "C"), ar("A", "B", "C"), ar( ar(0L, 1L, 0L, 0L), ar(0L, 1L, 1L, 0L), ar(0L, 0L, 2L, 0L), ar(0L, 0L, 0L, 0L) ), debug); } @Test public void testSimpleNumericVectors() { simpleCMTest( "smalldata/test/cm/v1n.csv", "smalldata/test/cm/v1n.csv", ar("0", "1", "2"), ar("0", "1", "2"), ar("0", "1", "2"), ar( ar(2L, 0L, 0L, 0L), ar(0L, 2L, 0L, 0L), ar(0L, 0L, 1L, 0L), ar(0L, 0L, 0L, 0L) ), debug); simpleCMTest( "smalldata/test/cm/v1n.csv", "smalldata/test/cm/v2n.csv", ar("0", "1", "2"), ar("0", "1", "2"), ar("0", "1", "2"), ar( ar(1L, 1L, 0L, 0L), ar(0L, 1L, 1L, 0L), ar(0L, 0L, 1L, 0L), ar(0L, 0L, 0L, 0L) ), debug); } @Test public void testDifferentDomainsNumericVectors() { simpleCMTest( "smalldata/test/cm/v1n.csv", "smalldata/test/cm/v4n.csv", ar("0", "1", "2"), ar("1", "2"), ar("0", "1", "2"), ar( ar(0L, 2L, 0L, 0L), ar(0L, 0L, 2L, 0L), ar(0L, 0L, 1L, 0L), ar(0L, 0L, 0L, 0L) ), debug); simpleCMTest( "smalldata/test/cm/v4n.csv", "smalldata/test/cm/v1n.csv", ar("1", "2"), ar("0", "1", "2"), ar("0", "1", "2"), ar( ar(0L, 0L, 0L, 0L), ar(2L, 0L, 0L, 0L), ar(0L, 2L, 1L, 0L), ar(0L, 0L, 0L, 0L) ), debug); simpleCMTest( "smalldata/test/cm/v2n.csv", "smalldata/test/cm/v4n.csv", ar("0", "1", "2"), ar("1", "2"), ar("0", "1", "2"), ar( ar(0L, 1L, 0L, 0L), ar(0L, 1L, 1L, 0L), ar(0L, 0L, 2L, 0L), ar(0L, 0L, 0L, 0L) ), debug); } /** Test for PUB-216: * The case when vector domain is set to a value (0~A, 1~B, 2~C), but actual values stored in * vector references only a subset of domain (1~B, 2~C). The TransfVec was using minimum from * vector (i.e., value 1) to compute transformation but minimum was wrong since it should be 0. */ @Test public void testBadModelPrect() { simpleCMTest( frame("v1", vec(ar("A","B","C"), ari(0,0,1,1,2) )), frame("v2", vec(ar("A","B","C"), ari(1,1,2,2,2) )), ar("A","B","C"), ar("A","B","C"), ar("A","B","C"), ar( ar(0L, 2L, 0L, 0L), ar(0L, 0L, 2L, 0L), ar(0L, 0L, 1L, 0L), ar(0L, 0L, 0L, 0L) ), debug); simpleCMTest( frame("v1", vec(ar("B","C"), ari(0,0,1,1) )), frame("v2", vec(ar("A","B"), ari(1,1,0,0) )), ar("B","C"), ar("A","B"), ar("A","B","C"), ar( ar(0L, 0L, 0L, 0L), // A ar(0L, 2L, 0L, 0L), // B ar(2L, 0L, 0L, 0L), // C ar(0L, 0L, 0L, 0L) // NA ), debug); } @Test public void testBadModelPrect2() { simpleCMTest( frame("v1", vec(ari(-1,-1,0,0,1) )), frame("v2", vec(ari( 0, 0,1,1,1) )), ar("-1","0","1"), ar("0","1"), ar("-1","0","1"), ar( ar(0L, 2L, 0L, 0L), ar(0L, 0L, 2L, 0L), ar(0L, 0L, 1L, 0L), ar(0L, 0L, 0L, 0L) ), debug); simpleCMTest( frame("v1", vec(ari(-1,-1,0,0) )), frame("v2", vec(ari( 1, 1,0,0) )), ar("-1","0"), ar("0","1"), ar("-1","0","1"), ar( ar(0L, 0L, 2L, 0L), ar(0L, 2L, 0L, 0L), ar(0L, 0L, 0L, 0L), ar(0L, 0L, 0L, 0L) ), debug); // The case found by Nidhi on modified covtype dataset simpleCMTest( frame("v1", vec(ari( 1, 2, 3, 4, 5, 6, 7) )), frame("v2", vec(ari( 1, 2, 3, 4, 5, 6, -1) )), ar( "1","2","3","4","5","6","7"), ar("-1", "1","2","3","4","5","6"), ar("-1", "1","2","3","4","5","6","7"), ar( ar( 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), // "-1" ar( 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), // "1" ar( 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L), // "2" ar( 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L), // "3" ar( 0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L), // "4" ar( 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L), // "5" ar( 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L), // "6" ar( 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), // "7" ar( 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L) // "NAs" ), debug); // Another case simpleCMTest( frame("v1", vec(ari( 7, 8, 9, 10, 11) )), frame("v2", vec(ari( 7, 8, 13, 10, 11) )), ar("7","8", "9","10","11"), ar("7","8","10","11","13"), ar("7","8","9","10","11","13"), ar( ar( 1L, 0L, 0L, 0L, 0L, 0L, 0L), // "7" ar( 0L, 1L, 0L, 0L, 0L, 0L, 0L), // "8" ar( 0L, 0L, 0L, 0L, 0L, 1L, 0L), // "9" ar( 0L, 0L, 0L, 1L, 0L, 0L, 0L), // "10" ar( 0L, 0L, 0L, 0L, 1L, 0L, 0L), // "11" ar( 0L, 0L, 0L, 0L, 0L, 0L, 0L), // "13" ar( 0L, 0L, 0L, 0L, 0L, 0L, 0L) // "NAs" ), debug); // Mixed case simpleCMTest( frame("v1", vec(ar("-1", "1", "A"), ari( 0, 1, 2) )), frame("v2", vec(ar( "0", "1", "B"), ari( 0, 1, 2) )), ar("-1", "1", "A"), ar( "0", "1", "B"), ar( "-1", "0", "1", "A", "B"), ar( ar( 0L, 1L, 0L, 0L, 0L, 0L), // "-1" ar( 0L, 0L, 0L, 0L, 0L, 0L), // "0" ar( 0L, 0L, 1L, 0L, 0L, 0L), // "1" ar( 0L, 0L, 0L, 0L, 1L, 0L), // "A" ar( 0L, 0L, 0L, 0L, 0L, 0L), // "B" ar( 0L, 0L, 0L, 0L, 0L, 0L) // "NAs" ), false); // Mixed case with change of numeric ordering 1, 10, 9 -> 1,9,10 simpleCMTest( frame("v1", vec(ar("-1", "1", "10", "9", "A"), ari( 0, 1, 2, 3, 4) )), frame("v2", vec(ar( "0", "2", "8", "9", "B"), ari( 0, 1, 2, 3, 4) )), ar("-1", "1", "10", "9", "A"), ar( "0", "2", "8", "9", "B"), ar( "-1", "0", "1", "2", "8", "9", "10", "A", "B"), ar( ar( 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), // "-1" ar( 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), // "0" ar( 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L, 0L), // "1" ar( 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), // "2" ar( 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), // "8" ar( 0L, 0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L), // "9" ar( 0L, 0L, 0L, 0L, 1L, 0L, 0L, 0L, 0L, 0L), // "10" ar( 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 1L, 0L), // "A" ar( 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), // "B" ar( 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L) // "NAs" ), debug); } private void simpleCMTest(String f1, String f2, String[] expectedActualDomain, String[] expectedPredictDomain, String[] expectedDomain, long[][] expectedCM, boolean debug) { simpleCMTest(parseFrame(Key.make("v1.hex"), find_test_file(f1)), parseFrame(Key.make("v2.hex"), find_test_file(f2)), expectedActualDomain, expectedPredictDomain, expectedDomain, expectedCM, debug); } /** Delete v1, v2 after processing. */ private void simpleCMTest(Frame v1, Frame v2, String[] expectedActualDomain, String[] expectedPredictDomain, String[] expectedDomain, long[][] expectedCM, boolean debug) { try { ConfusionMatrix cm = computeCM(v1, v2); // -- DEBUG -- if (debug) { System.err.println(Arrays.toString(cm.actual_domain)); System.err.println(Arrays.toString(cm.predicted_domain)); for (int i=0; i<cm.cm.length; i++) System.err.println(Arrays.toString(cm.cm[i])); StringBuilder sb = new StringBuilder(); cm.toASCII(sb); System.err.println(sb.toString()); } // -- -- -- assertCMEqual(expectedActualDomain, expectedPredictDomain, expectedDomain, expectedCM, cm); } finally { if (v1 != null) v1.delete(); if (v2 != null) v2.delete(); } } private void assertCMEqual(String[] expectedActualDomain, String[] expectedPredictDomain, String[] expectedDomain, long[][] expectedCM, ConfusionMatrix actualCM) { Assert.assertArrayEquals("Actual CM domain differs", expectedActualDomain, actualCM.actual_domain); Assert.assertArrayEquals("Predicted CM domain differs", expectedPredictDomain, actualCM.predicted_domain); Assert.assertArrayEquals("Expected domain differs", expectedDomain, actualCM.domain); long[][] acm = actualCM.cm; Assert.assertEquals("CM dimension differs", expectedCM.length, acm.length); for (int i=0; i < acm.length; i++) Assert.assertArrayEquals("CM row " +i+" differs!", expectedCM[i], acm[i]); } private ConfusionMatrix computeCM(Frame v1, Frame v2) { assert v1.vecs().length == 1 && v2.vecs().length == 1 : "Test expect single vector frames!"; ConfusionMatrix cm = new ConfusionMatrix(); cm.actual = v1; cm.vactual = v1.vecs()[0]; cm.predict = v2; cm.vpredict = v2.vecs()[0]; // Ohh nooo, this is block call :-) // Finally time for joke: // """ Two men walk into a bar. The first one says "I'll have some H2O." The next man says "I'll have some H2O too" :-D """ cm.invoke(); return cm; } }
7,320
1,444
package org.mage.test.cards.control; import mage.constants.EmptyNames; import mage.constants.PhaseStep; import mage.constants.Zone; import org.junit.Assert; import org.junit.Test; import org.mage.test.serverside.base.CardTestPlayerBase; /** * Tests the effect: - Exile target creature you control, then return that card * to the battlefield under your control * <p> * This effect grants you permanent control over the returned creature. So you * mail steal opponent's creature with "Act of Treason" and then use this effect * for permanent control effect. * * @author noxx */ public class ExileAndReturnUnderYourControl extends CardTestPlayerBase { @Test public void testPermanentControlEffect() { addCard(Zone.HAND, playerA, "Cloudshift"); addCard(Zone.HAND, playerA, "Act of Treason"); addCard(Zone.BATTLEFIELD, playerA, "Plains", 3); addCard(Zone.BATTLEFIELD, playerA, "Mountain", 3); addCard(Zone.BATTLEFIELD, playerB, "Elite Vanguard"); castSpell(1, PhaseStep.PRECOMBAT_MAIN, playerA, "Act of Treason", "Elite Vanguard"); castSpell(1, PhaseStep.POSTCOMBAT_MAIN, playerA, "Cloudshift", "Elite Vanguard"); setStopAt(2, PhaseStep.BEGIN_COMBAT); execute(); assertPermanentCount(playerA, "Elite Vanguard", 1); } @Test public void testVillainousWealthExilesCourser() { // Villainous Wealth {X}{B}{G}{U} // Target opponent exiles the top X cards of their library. You may cast any number // of nonland cards with converted mana cost X or less from among them without paying // their mana costs. addCard(Zone.HAND, playerA, "Villainous Wealth"); addCard(Zone.BATTLEFIELD, playerA, "Swamp", 3); addCard(Zone.BATTLEFIELD, playerA, "Forest", 3); addCard(Zone.BATTLEFIELD, playerA, "Island", 3); // Courser of Kruphix {1}{G}{G} // Play with the top card of your library revealed. // You may play the top card of your library if it's a land card. // Whenever a land enters the battlefield under your control, you gain 1 life. addCard(Zone.LIBRARY, playerB, "<NAME> Kruphix"); skipInitShuffling(); // to keep this card on top of library castSpell(1, PhaseStep.PRECOMBAT_MAIN, playerA, "Villainous Wealth", playerB); setChoice(playerA, "X=3"); castSpell(1, PhaseStep.PRECOMBAT_MAIN, playerA, "Courser of Kruphix"); setStopAt(1, PhaseStep.BEGIN_COMBAT); execute(); assertGraveyardCount(playerA, "Villainous Wealth", 1); assertExileCount(playerB, 2); assertExileCount("Courser of Kruphix", 0); assertPermanentCount(playerA, "Courser of Kruphix", 1); Assert.assertTrue("player A should play with top card revealed", playerA.isTopCardRevealed()); Assert.assertFalse("player B should play NOT with top card revealed", playerB.isTopCardRevealed()); } @Test public void testVillainousWealthExilesBoost() { // Villainous Wealth {X}{B}{G}{U} // Target opponent exiles the top X cards of their library. You may cast any number // of nonland cards with converted mana cost X or less from among them without paying // their mana costs. addCard(Zone.HAND, playerA, "Villainous Wealth"); addCard(Zone.HAND, playerA, "Master of Pearls"); addCard(Zone.BATTLEFIELD, playerA, "Swamp", 4); addCard(Zone.BATTLEFIELD, playerA, "Forest", 4); addCard(Zone.BATTLEFIELD, playerA, "Island", 4); // Secret Plans {G}{U} // Face-down creatures you control get +0/+1. // Whenever a permanent you control is turned face up, draw a card. addCard(Zone.LIBRARY, playerB, "Secret Plans"); skipInitShuffling(); // to keep this card on top of library castSpell(1, PhaseStep.PRECOMBAT_MAIN, playerA, "Master of Pearls"); setChoice(playerA, true); // cast it face down as 2/2 creature castSpell(1, PhaseStep.PRECOMBAT_MAIN, playerA, "Villainous Wealth", playerB); setChoice(playerA, "X=3"); castSpell(1, PhaseStep.PRECOMBAT_MAIN, playerA, "Secret Plans"); setStopAt(1, PhaseStep.BEGIN_COMBAT); execute(); assertGraveyardCount(playerA, "Villainous Wealth", 1); assertExileCount(playerB, 2); assertExileCount("Secret Plans", 0); assertPermanentCount(playerA, "Secret Plans", 1); assertPermanentCount(playerA, EmptyNames.FACE_DOWN_CREATURE.toString(), 1); assertPowerToughness(playerA, EmptyNames.FACE_DOWN_CREATURE.toString(), 2, 3); } /** * My opponent cast Villainous Wealth and took control of my Sylvan Library. * On their next turn, when Sylvan Library's trigger resolved, they kept the two * extra cards without paying life. */ @Test public void testVillainousWealthExilesSylvanLibrary() { // Villainous Wealth {X}{B}{G}{U} // Target opponent exiles the top X cards of their library. You may cast any number // of nonland cards with converted mana cost X or less from among them without paying // their mana costs. addCard(Zone.HAND, playerA, "Villainous Wealth"); addCard(Zone.BATTLEFIELD, playerA, "Swamp", 3); addCard(Zone.BATTLEFIELD, playerA, "Forest", 3); addCard(Zone.BATTLEFIELD, playerA, "Island", 3); // At the beginning of your draw step, you may draw two additional cards. // If you do, choose two cards in your hand drawn this turn. // For each of those cards, pay 4 life or put the card on top of your library. addCard(Zone.LIBRARY, playerB, "Sylvan Library"); skipInitShuffling(); // to keep this card on top of library castSpell(1, PhaseStep.PRECOMBAT_MAIN, playerA, "Villainous Wealth", playerB); setChoice(playerA, "X=3"); castSpell(1, PhaseStep.PRECOMBAT_MAIN, playerA, "Sylvan Library"); setStopAt(3, PhaseStep.PRECOMBAT_MAIN); execute(); assertGraveyardCount(playerA, "Villainous Wealth", 1); assertExileCount(playerB, 2); assertExileCount("Sylvan Library", 0); assertPermanentCount(playerA, "Sylvan Library", 1); assertHandCount(playerB, 1); assertHandCount(playerA, 3); assertLife(playerA, 12); assertLife(playerB, 20); } /** * I cast a Villainous Wealth in Vintage Cube, and when it came time to cast * my opponent's cards (Mox Sapphire, Mox Emerald, Brainstorm, Snapcaster * Mage, Fact or Fiction and a Quicken), it rolled back to before I had cast * my spell after Quicken resolved. I have the error, but the forums won't * let me post them. I did find it was replicatable whenever you try to cast * Quicken off a Villainous Wealth. */ @Test public void testVillainousWealthAndQuicken() { // Villainous Wealth {X}{B}{G}{U} // Target opponent exiles the top X cards of their library. You may cast any number // of nonland cards with converted mana cost X or less from among them without paying // their mana costs. addCard(Zone.HAND, playerA, "Villainous Wealth"); // {X}{B}{G}{U} addCard(Zone.BATTLEFIELD, playerA, "Swamp", 2); addCard(Zone.BATTLEFIELD, playerA, "Forest", 2); addCard(Zone.BATTLEFIELD, playerA, "Island", 2); // At the beginning of your draw step, you may draw two additional cards. // If you do, choose two cards in your hand drawn this turn. // For each of those cards, pay 4 life or put the card on top of your library. addCard(Zone.LIBRARY, playerB, "Mox Emerald"); // The next sorcery card you cast this turn can be cast as though it had flash. // Draw a card. addCard(Zone.LIBRARY, playerB, "Quicken"); // Instant - {U} addCard(Zone.LIBRARY, playerB, "Mox Sapphire"); skipInitShuffling(); // to keep this card on top of library castSpell(1, PhaseStep.PRECOMBAT_MAIN, playerA, "Villainous Wealth", playerB); setChoice(playerA, "X=3"); castSpell(1, PhaseStep.PRECOMBAT_MAIN, playerA, "Mox Emerald"); castSpell(1, PhaseStep.PRECOMBAT_MAIN, playerA, "Quicken"); castSpell(1, PhaseStep.PRECOMBAT_MAIN, playerA, "Mox Sapphire"); setStopAt(1, PhaseStep.PRECOMBAT_MAIN); execute(); assertGraveyardCount(playerA, "Villainous Wealth", 1); assertExileCount(playerB, 0); assertPermanentCount(playerA, "Mox Emerald", 1); assertPermanentCount(playerA, "Mox Sapphire", 1); assertGraveyardCount(playerB, "Quicken", 1); } }
3,324
678
// // AboutViewController.h // iSH // // Created by <NAME> on 9/23/18. // #import <UIKit/UIKit.h> NS_ASSUME_NONNULL_BEGIN @interface AboutViewController : UITableViewController @property BOOL includeDebugPanel; @end NS_ASSUME_NONNULL_END
99
778
<filename>applications/ShapeOptimizationApplication/custom_utilities/mapping/symmetry_revolution.h // ============================================================================== // KratosShapeOptimizationApplication // // License: BSD License // license: ShapeOptimizationApplication/license.txt // // Main authors: <NAME>, https://github.com/armingeiser // // ============================================================================== #ifndef SYMMETRY_REVOLUTION_H #define SYMMETRY_REVOLUTION_H // ------------------------------------------------------------------------------ // System includes // ------------------------------------------------------------------------------ #include <iostream> #include <string> // ------------------------------------------------------------------------------ // Project includes // ------------------------------------------------------------------------------ #include "includes/define.h" #include "includes/model_part.h" #include "symmetry_base.h" // ============================================================================== namespace Kratos { class KRATOS_API(SHAPE_OPTIMIZATION_APPLICATION) SymmetryRevolution : public SymmetryBase { public: KRATOS_CLASS_POINTER_DEFINITION(SymmetryRevolution); SymmetryRevolution(ModelPart& rOriginModelPart, ModelPart& rDestinationModelPart, Parameters Settings); NodeVectorType& GetOriginSearchNodes() override; std::vector<std::pair<array_3d, bool>> GetDestinationSearchNodes(const size_t MappingId) override; void TransformationMatrix(const size_t DestinationMappingId, const size_t OriginMappingId, BoundedMatrix<double, 3, 3>& Matrix) const override; NodeTypePointer GetTransformedNode(const NodeType& rNode); array_3d mPoint; array_3d mAxis; array_3d mPlaneVector1; // vector in the plane orthogonal to the mAxis NodeVectorType mOriginNodes; NodeVectorType mDestinationNodes; NodeVectorType mTransformedOriginNodes; NodeVectorType mTransformedDestinationNodes; }; // Class SymmetryRevolution } // namespace Kratos. #endif // SYMMETRY_REVOLUTION_H
582
806
<reponame>yinxtno1/aliyun-oss-test-demo package com.alibaba.sdk.android.oss.model; public class DeleteBucketLoggingResult extends OSSResult { }
55
3,212
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.nifi.rules; import org.apache.nifi.components.AbstractConfigurableComponent; import org.apache.nifi.context.PropertyContext; import org.apache.nifi.controller.ControllerServiceInitializationContext; import org.apache.nifi.reporting.InitializationException; import org.apache.nifi.util.Tuple; import java.util.ArrayList; import java.util.List; import java.util.Map; import java.util.stream.Collectors; public class MockPropertyContextActionHandler extends AbstractConfigurableComponent implements PropertyContextActionHandler{ private List<Map<String, Object>> rows = new ArrayList<>(); private List<Tuple<String,Action>> defaultActions = new ArrayList<>(); private List<PropertyContext> propertyContexts = new ArrayList<>(); @Override public void execute(PropertyContext context, Action action, Map<String, Object> facts) { propertyContexts.add(context); execute(action, facts); } @Override public void execute(Action action, Map<String, Object> facts) { rows.add(facts); defaultActions.add( new Tuple<>(action.getType(),action)); } @Override public void initialize(ControllerServiceInitializationContext context) throws InitializationException { } public List<Map<String, Object>> getRows() { return rows; } public List<Tuple<String, Action>> getDefaultActions() { return defaultActions; } public List<Tuple<String,Action>> getDefaultActionsByType(final String type){ return defaultActions.stream().filter(stringActionTuple -> stringActionTuple .getKey().equalsIgnoreCase(type)).collect(Collectors.toList()); } public List<PropertyContext> getPropertyContexts() { return propertyContexts; } @Override public String getIdentifier() { return "MockPropertyContextActionHandler"; } }
811
518
<reponame>e-ntro-py/desktop-app { "name": "Kaggle", "category": "Developer Tools", "start_url": "https://www.kaggle.com/", "icons": [ { "src": "https://cdn.filestackcontent.com/p4x2RC5ITeiF7DHdExiC", "platform": "browserx" } ], "theme_color": "#20BEFF", "scope": "https://www.kaggle.com", "bx_legacy_service_id": "kaggle" }
173
422
// // Getdown - application installer, patcher and launcher // Copyright (C) 2004-2018 Getdown authors // https://github.com/threerings/getdown/blob/master/LICENSE package com.threerings.getdown.tools; import java.io.File; import java.io.IOException; import java.security.GeneralSecurityException; import org.apache.tools.ant.BuildException; import org.apache.tools.ant.Task; /** * An ant task used to create a {@code digest.txt} for a Getdown * application deployment. */ public class DigesterTask extends Task { /** * Sets the application directory. */ public void setAppdir (File appdir) { _appdir = appdir; } /** * Sets the digest signing keystore. */ public void setKeystore (File path) { _storepath = path; } /** * Sets the keystore decryption key. */ public void setStorepass (String password) { _storepass = password; } /** * Sets the private key alias. */ public void setAlias (String alias) { _storealias = alias; } /** * Performs the actual work of the task. */ @Override public void execute () throws BuildException { // make sure appdir is set if (_appdir == null) { throw new BuildException("Must specify the path to the application directory " + "via the 'appdir' attribute."); } // make sure _storepass and _keyalias are set, if _storepath is set if (_storepath != null && (_storepass == null || _storealias == null)) { throw new BuildException( "Must specify both a keystore password and a private key alias."); } try { Digester.createDigests(_appdir, _storepath, _storepass, _storealias); } catch (IOException ioe) { throw new BuildException("Error creating digest: " + ioe.getMessage(), ioe); } catch (GeneralSecurityException gse) { throw new BuildException("Error creating signature: " + gse.getMessage(), gse); } } /** The application directory in which we're creating a digest file. */ protected File _appdir; /** The path to the keystore we'll use to sign the digest file, if any. */ protected File _storepath; /** The decryption key for the keystore. */ protected String _storepass; /** The private key alias. */ protected String _storealias; }
947
23,901
# coding=utf-8 # Copyright 2021 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 """Utilities for data collection on Distracting DM control environments.""" import os from absl import logging import numpy as np import tensorflow.compat.v2 as tf from tf_agents.drivers import py_driver from tf_agents.metrics import py_metrics from tf_agents.policies import scripted_py_policy from tf_agents.specs import tensor_spec from pse.dm_control import env_utils as utils gfile = tf.compat.v1.gfile def get_expanded_dir(current_dir, env_name, trial_id, check=True): expanded_dir = os.path.join(current_dir, env_name, trial_id) if not gfile.Exists(expanded_dir): if check: raise ValueError(f'{expanded_dir} doesn\'t exist') else: gfile.MakeDirs(expanded_dir) return expanded_dir def run_env(env, policy, max_episodes, max_steps=None): logging.info('Running policy on env ..') replay_buffer = [] metrics = [ py_metrics.AverageReturnMetric(), py_metrics.AverageEpisodeLengthMetric() ] observers = [replay_buffer.append] observers.extend(metrics) driver = py_driver.PyDriver( env, policy, observers, max_steps=max_steps, max_episodes=max_episodes) initial_time_step = env.reset() initial_state = policy.get_initial_state(1) driver.run(initial_time_step, initial_state) return replay_buffer, metrics def get_complete_episodes(replay_buffer, num_episodes=2): terminal_steps = [int(x.next_step_type) for x in replay_buffer] episode_boundaries = np.where(np.array(terminal_steps) == 2)[0] episode_boundaries = np.append(episode_boundaries[::-1], [-2])[::-1] return [replay_buffer[episode_boundaries[i] + 2: episode_boundaries[i+1] + 1] for i in range(num_episodes)] def collect_pair_episodes( policy, env_name, max_steps=None, random_seed=None, frame_shape=(84, 84, 3), max_episodes=10): env = utils.load_dm_env_for_eval( env_name, frame_shape=frame_shape, task_kwargs={'random': random_seed}) buffer, metrics = run_env( env, policy, max_steps=max_steps, max_episodes=max_episodes) # Collect episodes with the same optimal policy env_copy = utils.load_dm_env_for_eval( env_name, frame_shape=(84, 84, 3), task_kwargs={'random': random_seed}) actions = [x.action for x in buffer] action_script = list(zip([1] * len(actions), actions)) optimal_policy = scripted_py_policy.ScriptedPyPolicy( time_step_spec=env.time_step_spec(), action_spec=env.action_spec(), action_script=action_script) paired_buffer, paired_metrics = run_env( env_copy, optimal_policy, max_steps=max_steps, max_episodes=max_episodes) for metric, paired_metric in zip(metrics, paired_metrics): assert metric.result() == paired_metric.result(), ( 'Metric results don\'t match') logging.info('%s: %.2f', metric.name, metric.result()) episodes = get_complete_episodes(buffer, max_episodes) paired_episodes = get_complete_episodes(paired_buffer, max_episodes) return episodes, paired_episodes def create_tensor_specs(data_spec, episode_len): spec = tuple([data_spec for _ in range(episode_len)]) tensor_data_spec = tensor_spec.from_spec(data_spec) tensor_episode_spec = tensor_spec.from_spec((spec, spec)) return tensor_data_spec, tensor_episode_spec
1,374
387
""" #################################################################################################### # Copyright Info : Copyright (c) <NAME> @ Hikvision Research Institute. All rights reserved. # Filename : tp_r50_e2e_pretrain.py # Abstract : Model settings for text perceptron spotter end-to-end pretrain on synthdata. # Current Version: 1.0.0 # Date : 2021-09-15 ###################################################################################################### """ _base_ = './__base__.py' # File prefix path of the traning dataset img_prefixes = [ '/path/to/SynthText/', '/path/to/COCO-Text/', ] # Dataset Name ann_files = [ '/path/to/datalist/synthtext_80w.json', '/path/to/datalist/cocotext.json' ] data = dict( samples_per_gpu=8, workers_per_gpu=0, sampler=dict( type='DistBatchBalancedSampler', # BatchBalancedSampler and DistBatchBalancedSampler mode=1, # model 0: Balance in batch, calculate the epoch according to the first iterative data set # model 1: Balance in batch, calculate the epoch according to the last iterative data set # model 2: Balance in batch, record unused data # model -1: Each dataset is directly connected and shuffled ), train=dict( batch_ratios=['0.5', '0.5'], dataset=dict( ann_file=ann_files, img_prefix=img_prefixes, ) ), val=dict( ann_file='/path/to/datalist/icdar2013_test_datalist.json', img_prefix='/path/to/ICDAR2013-Focused-Scene-Text/', ), test=dict( ann_file='/path/to/datalist/icdar2013_test_datalist.json', img_prefix='/path/to/ICDAR2013-Focused-Scene-Text/', ) ) optimizer=dict(lr=1e-3) lr_config = dict(step=[2, 3]) runner = dict(max_epochs=4) checkpoint_config = dict(interval=1, filename_tmpl='checkpoint/tp_r50_e2e_pretrain_epoch_{}.pth') work_dir = '/path/to/workspace/log/' load_from = '/path/to/Model_Zoo/tp_r50_tt-5b348520.pth'
822
348
<gh_stars>100-1000 {"nom":"Labatmale","dpt":"Pyrénées-Atlantiques","inscrits":205,"abs":51,"votants":154,"blancs":24,"nuls":4,"exp":126,"res":[{"panneau":"1","voix":88},{"panneau":"2","voix":38}]}
83
890
<reponame>light1021/asylo /* * * Copyright 2020 Asylo authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #ifndef ASYLO_TEST_GRPC_CLIENT_SIDE_AUTH_TEST_CONSTANTS_H_ #define ASYLO_TEST_GRPC_CLIENT_SIDE_AUTH_TEST_CONSTANTS_H_ #include <cstdint> #include "asylo/identity/platform/sgx/sgx_identity.pb.h" #include "asylo/util/statusor.h" namespace asylo { // Matches the client-side auth configuration in the BUILD file. extern const uint32_t kClientSideAuthServerIsvprodid; extern const uint32_t kClientSideAuthServerIsvsvn; // Returns an SGX identity expectation that will match the identity for an // enclave that uses the client-side auth test configuration and is signed with // the Asylo debug key. StatusOr<SgxIdentityExpectation> ClientSideAuthEnclaveSgxIdentityExpectation(); } // namespace asylo #endif // ASYLO_TEST_GRPC_CLIENT_SIDE_AUTH_TEST_CONSTANTS_H_
461
820
/** * Copyright (C) 2013-2020 <NAME> <<EMAIL>> * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.datumbox.framework.core.machinelearning.common.dataobjects; import com.datumbox.framework.core.common.interfaces.Savable; import com.datumbox.framework.core.machinelearning.common.interfaces.Trainable; import com.datumbox.framework.core.machinelearning.common.interfaces.Parallelizable; import java.util.HashMap; import java.util.Map; import java.util.Set; /** * This object stores a bundle of Trainables and it is used by algorithms that have other Trainables internally. * * @author <NAME> <<EMAIL>> */ public class TrainableBundle implements Savable { /** * The storage name separator used in the underlying Trainables. */ private final String storageNameSeparator; /** * Public constructor. * * @param storageNameSeparator */ public TrainableBundle(String storageNameSeparator) { this.storageNameSeparator = storageNameSeparator; } /** * Keeps a reference of all the wrapped algorithms. */ private final Map<String, Trainable> bundle = new HashMap<>(); /** * Returns a set with all the keys. * * @return */ public Set<String> keySet() { return bundle.keySet(); } /** * Returns whether the bundle contains the specified key. * * @param key * @return */ public boolean containsKey(String key) { return bundle.containsKey(key); } /** * Returns the trainable with the specific key or null if the key is missing. * * @param key * @return */ public Trainable get(String key) { return bundle.get(key); } /** * Puts the trainable in the bundle using a specific key and returns the previous entry or null. * * @param key * @param value * @return */ public Trainable put(String key, Trainable value) { return bundle.put(key, value); } /** * Updates the parallelized flag of all wrapped algorithms. * * @param parallelized */ public void setParallelized(boolean parallelized) { for(Trainable t : bundle.values()) { if (t !=null && t instanceof Parallelizable) { ((Parallelizable)t).setParallelized(parallelized); } } } /** {@inheritDoc} */ @Override public void save(String storageName) { for(Map.Entry<String, Trainable> e : bundle.entrySet()) { Trainable t = e.getValue(); if(t != null) { t.save(storageName + storageNameSeparator + e.getKey()); } } } /** {@inheritDoc} */ @Override public void delete() { for(Trainable t : bundle.values()) { if(t != null) { t.delete(); } } bundle.clear(); } /** {@inheritDoc} */ @Override public void close() { for(Trainable t : bundle.values()) { if(t != null) { try { t.close(); } catch (Exception ex) { throw new RuntimeException(ex); } } } bundle.clear(); } }
1,543
5,079
<filename>apps/pig/src/pig/urls.py #!/usr/bin/env python # Licensed to Cloudera, Inc. under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. Cloudera, Inc. licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys from pig import views as pig_views if sys.version_info[0] > 2: from django.urls import re_path else: from django.conf.urls import url as re_path urlpatterns = [ re_path(r'^$', pig_views.app, name='index'), re_path(r'^app/?$', pig_views.app, name='app'), # Ajax re_path(r'^scripts/?$', pig_views.scripts, name='scripts'), re_path(r'^dashboard/?$', pig_views.dashboard, name='dashboard'), re_path(r'^save/?$', pig_views.save, name='save'), re_path(r'^run/?$', pig_views.run, name='run'), re_path(r'^copy/?$', pig_views.copy, name='copy'), re_path(r'^delete/?$', pig_views.delete, name='delete'), re_path(r'^watch/(?P<job_id>[-\w]+)$', pig_views.watch, name='watch'), re_path(r'^stop/?$', pig_views.stop, name='stop'), re_path(r'^install_examples$', pig_views.install_examples, name='install_examples'), ]
567
348
<reponame>chamberone/Leaflet.PixiOverlay<gh_stars>100-1000 {"nom":"Orbigny","circ":"3ème circonscription","dpt":"Indre-et-Loire","inscrits":546,"abs":290,"votants":256,"blancs":23,"nuls":9,"exp":224,"res":[{"nuance":"UDI","nom":"<NAME>","voix":126},{"nuance":"SOC","nom":"<NAME>","voix":98}]}
123
626
package org.jsmart.zerocode.core.yaml; import org.jsmart.zerocode.core.domain.HostProperties; import org.jsmart.zerocode.core.domain.JsonTestCase; import org.jsmart.zerocode.core.domain.Scenario; import org.jsmart.zerocode.core.tests.customrunner.TestOnlyZeroCodeUnitRunner; import org.junit.Test; import org.junit.runner.RunWith; @HostProperties(host="http://localhost", port=9998, context = "") @RunWith(TestOnlyZeroCodeUnitRunner.class) public class YamlApiIntegrationTest { /** * Mock end points are in test/resources: simulators/test_purpose_end_points.json. * @RunWith(TestOnlyZeroCodeUnitRunner.class) : starts these mocks first before running the tests */ @Test @JsonTestCase("integration_test_files/get_api/simple_get_api_test.json") public void testSimpleGetApi_jsonSanity() throws Exception { } @Test @Scenario("integration_test_files/yaml/simple_get_api_test.yml") public void testSimpleGetApi_yaml() throws Exception { } @Test @Scenario("integration_test_files/yaml/string_optional_double_quotes_test.yml") public void testSimpleGetApiOptional_doubleQuatedStringYaml() throws Exception { } @Test @Scenario("integration_test_files/yaml/simple_get_api_multi_step_test.yml") public void testSimpleGetApi_multiStepYaml() throws Exception { } }
493
20,325
{ "home": { "title": "Dobrodošli, Java Hipsteru!", "subtitle": "Ovo je početna stranica", "logged": { "message": "Prijavljeni ste kao korisnik \"{{username}}\"." }, "question": "Ako imate pitanja o JHipsteru:", "link": { "homepage": "JHipster početna stranica", "stackoverflow": "JHipster na Stack Overflow-u", "bugtracker": "JHipster bug tracker", "chat": "JHipster javni chat room", "follow": "slijedite @java_hipster na Twitteru" }, "like": "Ako vam se JHipster sviđa, nemojte nam zaboraviti dati zvjezdice na", "github": "GitHub" } }
369
2,091
import numpy as np import scipy.stats as ss import scipy.special as sp from .family import Family from .flat import Flat from .normal import Normal from .gas_recursions import gas_recursion_exponential_orderone, gas_recursion_exponential_ordertwo from .gas_recursions import gasx_recursion_exponential_orderone, gasx_recursion_exponential_ordertwo from .gas_recursions import gas_llev_recursion_exponential_orderone, gas_llev_recursion_exponential_ordertwo from .gas_recursions import gas_llt_recursion_exponential_orderone, gas_llt_recursion_exponential_ordertwo from .gas_recursions import gas_reg_recursion_exponential_orderone, gas_reg_recursion_exponential_ordertwo class Exponential(Family): """ Exponential Distribution ---- This class contains methods relating to the Exponential distribution for time series. """ def __init__(self, lmd=1.0, transform=None, **kwargs): """ Parameters ---------- lambda : float Rate parameter for the Exponential distribution transform : str Whether to apply a transformation to the location variable - e.g. 'exp' or 'logit' """ super(Exponential, self).__init__(transform) self.lmd0 = lmd self.covariance_prior = False self.gradient_only = kwargs.get('gradient_only', False) # used for GAS Exponential models if self.gradient_only is True: self.score_function = self.first_order_score else: self.score_function = self.second_order_score def approximating_model(self, beta, T, Z, R, Q, h_approx, data): """ Creates approximating Gaussian state space model for Exponential measurement density Parameters ---------- beta : np.array Contains untransformed starting values for latent variables T, Z, R, Q : np.array State space matrices used in KFS algorithm h_approx : float The variance of the measurement density data: np.array The univariate time series data Returns ---------- H : np.array Approximating measurement variance matrix mu : np.array Approximating measurement constants """ H = np.ones(data.shape[0])*h_approx mu = np.zeros(data.shape[0]) return H, mu def approximating_model_reg(self, beta, T, Z, R, Q, h_approx, data, X, state_no): """ Creates approximating Gaussian state space model for Exponential measurement density Parameters ---------- beta : np.array Contains untransformed starting values for latent variables T, Z, R, Q : np.array State space matrices used in KFS algorithm h_approx : float The variance of the measurement density data: np.array The univariate time series data X: np.array The regressors state_no : int Number of states Returns ---------- H : np.array Approximating measurement variance matrix mu : np.array Approximating measurement constants """ H = np.ones(data.shape[0])*h_approx mu = np.zeros(data.shape[0]) return H, mu @staticmethod def build_latent_variables(): """ Builds additional latent variables for this family Returns ---------- - A list of lists (each sub-list contains latent variable information) """ lvs_to_build = [] return lvs_to_build @staticmethod def draw_variable(loc, scale, shape, skewness, nsims): """ Draws random variables from Exponential distribution Parameters ---------- loc : float location parameter for the distribution scale : float scale parameter for the distribution shape : float tail thickness parameter for the distribution skewness : float skewness parameter for the distribution nsims : int or list number of draws to take from the distribution Returns ---------- - Random draws from the distribution """ return np.random.exponential(1.0/loc, nsims) @staticmethod def first_order_score(y, mean, scale, shape, skewness): """ GAS Exponential Update term using gradient only - native Python function Parameters ---------- y : float datapoint for the time series mean : float location parameter for the Exponential distribution scale : float scale parameter for the Exponential distribution shape : float tail thickness parameter for the Exponential distribution skewness : float skewness parameter for the Exponential distribution Returns ---------- - Score of the Exponential family """ return 1 - (mean*y) def logpdf(self, mu): """ Log PDF for Exponential prior Parameters ---------- mu : float Latent variable for which the prior is being formed over Returns ---------- - log(p(mu)) """ if self.transform is not None: mu = self.transform(mu) return ss.expon.logpdf(mu, self.lmd0) @staticmethod def markov_blanket(y, mean, scale, shape, skewness): """ Markov blanket for the Exponential distribution Parameters ---------- y : np.ndarray univariate time series mean : np.ndarray array of location parameters for the Exponential distribution scale : float scale parameter for the Exponential distribution shape : float tail thickness parameter for the Exponential distribution skewness : float skewness parameter for the Exponential distribution Returns ---------- - Markov blanket of the Exponential family """ return ss.expon.logpdf(x=y, scale=1/mean) @staticmethod def exponential_link(x): return 1.0/np.exp(x) @staticmethod def setup(): """ Returns the attributes of this family Notes ---------- - scale notes whether family has a variance parameter (sigma) - shape notes whether family has a tail thickness parameter (nu) - skewness notes whether family has a skewness parameter (gamma) - mean_transform is a function which transforms the location parameter - cythonized notes whether the family has cythonized routines Returns ---------- - model name, link function, scale, shape, skewness, mean_transform, cythonized """ name = "Exponential GAS" link = Exponential.exponential_link scale = False shape = False skewness = False mean_transform = np.log cythonized = True return name, link, scale, shape, skewness, mean_transform, cythonized @staticmethod def neg_loglikelihood(y, mean, scale, shape, skewness): """ Negative loglikelihood function Parameters ---------- y : np.ndarray univariate time series mean : np.ndarray array of location parameters for the Exponential distribution scale : float scale parameter for the Exponential distribution shape : float tail thickness parameter for the Exponential distribution skewness : float skewness parameter for the Exponential distribution Returns ---------- - Negative loglikelihood of the Exponential family """ return -np.sum(ss.expon.logpdf(x=y, scale=1/mean)) def pdf(self, mu): """ PDF for Exponential prior Parameters ---------- mu : float Latent variable for which the prior is being formed over Returns ---------- - p(mu) """ if self.transform is not None: mu = self.transform(mu) return ss.expon.pdf(mu, self.lmd0) @staticmethod def reg_score_function(X, y, mean, scale, shape, skewness): """ GAS Exponential Regression Update term using gradient only - native Python function Parameters ---------- X : float datapoint for the right hand side variable y : float datapoint for the time series mean : float location parameter for the Exponential distribution scale : float scale parameter for the Exponential distribution shape : float tail thickness parameter for the Exponential distribution skewness : float skewness parameter for the Exponential distribution Returns ---------- - Score of the Exponential family """ return X*(1.0 - mean*y) @staticmethod def second_order_score(y, mean, scale, shape, skewness): """ GAS Exponential Update term potentially using second-order information - native Python function Parameters ---------- y : float datapoint for the time series mean : float location parameter for the Exponential distribution scale : float scale parameter for the Exponential distribution shape : float tail thickness parameter for the Exponential distribution skewness : float skewness parameter for the Exponential distribution Returns ---------- - Adjusted score of the Exponential family """ return 1 - (mean*y) # Optional Cythonized recursions below for GAS Exponential models @staticmethod def gradient_recursion(): """ GAS Exponential Model Recursion - gradient only Returns ---------- - Recursion function for GAS Exponential model - gradient only """ return gas_recursion_exponential_orderone @staticmethod def newton_recursion(): """ GAS Exponential Model Recursion - adjusted score Returns ---------- - Recursion function for GAS Exponential model - adjusted score """ return gas_recursion_exponential_ordertwo @staticmethod def gradientx_recursion(): """ GASX Exponential Model Recursion - gradient only Returns ---------- - Recursion function for GASX Exponential model - gradient only """ return gasx_recursion_exponential_orderone @staticmethod def newtonx_recursion(): """ GASX Exponential Model Recursion - adjusted score Returns ---------- - Recursion function for GASX Exponential model - adjusted score """ return gasx_recursion_exponential_ordertwo @staticmethod def gradientllev_recursion(): """ GAS Local Level Exponential Model Recursion - gradient only Returns ---------- - Recursion function for GAS Local Level Exponential model - gradient only """ return gas_llev_recursion_exponential_orderone @staticmethod def newtonllev_recursion(): """ GAS Local Level Exponential Model Recursion - adjusted score Returns ---------- - Recursion function for GAS Local Level Exponential model - adjusted score """ return gas_llev_recursion_exponential_ordertwo @staticmethod def gradientllt_recursion(): """ GAS Local Linear Trend Exponential Model Recursion - gradient only Returns ---------- - Recursion function for GAS Local Linear Trend Exponential model - gradient only """ return gas_llt_recursion_exponential_orderone @staticmethod def newtonllt_recursion(): """ GAS Local Linear Trend Exponential Model Recursion - adjusted score Returns ---------- - Recursion function for GAS Local Linear Trend Exponential model - adjusted score """ return gas_llt_recursion_exponential_ordertwo @staticmethod def gradientreg_recursion(): """ GAS Dynamic Regression Exponential Model Recursion - gradient only Returns ---------- - Recursion function for GAS Dynamic Regression Exponential model - gradient only """ return gas_reg_recursion_exponential_orderone @staticmethod def newtonreg_recursion(): """ GAS Dynamic Regression Exponential Model Recursion - adjusted score Returns ---------- - Recursion function for GAS Dynamic Regression Exponential model - adjusted score """ return gas_reg_recursion_exponential_ordertwo
5,277
5,272
// // LMJCALayerYSDHViewController.h // PLMMPRJK // // Created by HuXuPeng on 2017/10/23. // Copyright © 2017年 GoMePrjk. All rights reserved. // #import "LMJCALayerViewController.h" @interface LMJCALayerYSDHViewController : LMJCALayerViewController @end
101
921
# Copyright 2019 The Forseti Security Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for google.cloud.forseti.common.util.retryable_exceptions.""" import unittest.mock as mock from googleapiclient import http from google.cloud.forseti.common.util import retryable_exceptions from tests import unittest_utils class RetryTest(unittest_utils.ForsetiTestCase): """Tests for the exceptions captured in the retry.""" def test_resource_exhausted_captured(self): """Test to make sure resource exhausted error is being captured to retry. """ error = http.HttpError(mock.Mock(status=429), 'Resource Exhausted'.encode()) self.assertTrue(retryable_exceptions.is_retryable_exception(error))
421
4,526
#ifndef OSRM_ENGINE_DATAFACADE_SHARED_MEMORY_ALLOCATOR_HPP_ #define OSRM_ENGINE_DATAFACADE_SHARED_MEMORY_ALLOCATOR_HPP_ #include "engine/datafacade/contiguous_block_allocator.hpp" #include "storage/shared_data_index.hpp" #include "storage/shared_memory.hpp" #include <memory> namespace osrm { namespace engine { namespace datafacade { /** * This allocator uses an IPC shared memory block as the data location. * Many SharedMemoryDataFacade objects can be created that point to the same shared * memory block. */ class SharedMemoryAllocator : public ContiguousBlockAllocator { public: explicit SharedMemoryAllocator( const std::vector<storage::SharedRegionRegister::ShmKey> &shm_keys); ~SharedMemoryAllocator() override final; // interface to give access to the datafacades const storage::SharedDataIndex &GetIndex() override final; private: storage::SharedDataIndex index; std::vector<std::unique_ptr<storage::SharedMemory>> memory_regions; }; } // namespace datafacade } // namespace engine } // namespace osrm #endif // OSRM_ENGINE_DATAFACADE_SHARED_MEMORY_ALLOCATOR_HPP_
390
331
<gh_stars>100-1000 len(df[~df.Cancelled])
20
1,602
<reponame>jhh67/chapel /* Intel Atom/64 gmp-mparam.h -- Compiler/machine parameter header file. Copyright 2019 Free Software Foundation, Inc. This file is part of the GNU MP Library. The GNU MP Library is free software; you can redistribute it and/or modify it under the terms of either: * the GNU Lesser General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. or * the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. or both in parallel, as here. The GNU MP Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received copies of the GNU General Public License and the GNU Lesser General Public License along with the GNU MP Library. If not, see https://www.gnu.org/licenses/. */ #define GMP_LIMB_BITS 64 #define GMP_LIMB_BYTES 8 #define SHLD_SLOW 1 #define SHRD_SLOW 1 /* 1600 MHz Diamondville (Atom 330) */ /* FFT tuning limit = 50,646,641 */ /* Generated by tuneup.c, 2019-10-16, gcc 8.3 */ #define MOD_1_NORM_THRESHOLD 0 /* always */ #define MOD_1_UNNORM_THRESHOLD 0 /* always */ #define MOD_1N_TO_MOD_1_1_THRESHOLD 5 #define MOD_1U_TO_MOD_1_1_THRESHOLD 3 #define MOD_1_1_TO_MOD_1_2_THRESHOLD MP_SIZE_T_MAX #define MOD_1_2_TO_MOD_1_4_THRESHOLD 0 /* never mpn_mod_1s_2p */ #define PREINV_MOD_1_TO_MOD_1_THRESHOLD 12 #define USE_PREINV_DIVREM_1 1 /* native */ #define DIV_QR_1_NORM_THRESHOLD 1 #define DIV_QR_1_UNNORM_THRESHOLD MP_SIZE_T_MAX /* never */ #define DIV_QR_2_PI2_THRESHOLD MP_SIZE_T_MAX /* never */ #define DIVEXACT_1_THRESHOLD 0 /* always (native) */ #define BMOD_1_TO_MOD_1_THRESHOLD 16 #define DIV_1_VS_MUL_1_PERCENT 201 #define MUL_TOOM22_THRESHOLD 12 #define MUL_TOOM33_THRESHOLD 74 #define MUL_TOOM44_THRESHOLD 106 #define MUL_TOOM6H_THRESHOLD 155 #define MUL_TOOM8H_THRESHOLD 212 #define MUL_TOOM32_TO_TOOM43_THRESHOLD 73 #define MUL_TOOM32_TO_TOOM53_THRESHOLD 77 #define MUL_TOOM42_TO_TOOM53_THRESHOLD 73 #define MUL_TOOM42_TO_TOOM63_THRESHOLD 72 #define MUL_TOOM43_TO_TOOM54_THRESHOLD 58 #define SQR_BASECASE_THRESHOLD 5 #define SQR_TOOM2_THRESHOLD 22 #define SQR_TOOM3_THRESHOLD 73 #define SQR_TOOM4_THRESHOLD 130 #define SQR_TOOM6_THRESHOLD 159 #define SQR_TOOM8_THRESHOLD 236 #define MULMID_TOOM42_THRESHOLD 16 #define MULMOD_BNM1_THRESHOLD 9 #define SQRMOD_BNM1_THRESHOLD 9 #define MUL_FFT_MODF_THRESHOLD 220 /* k = 5 */ #define MUL_FFT_TABLE3 \ { { 220, 5}, { 11, 6}, { 6, 5}, { 13, 6}, \ { 13, 7}, { 7, 6}, { 15, 7}, { 8, 6}, \ { 17, 7}, { 13, 8}, { 7, 7}, { 17, 8}, \ { 9, 7}, { 19, 8}, { 11, 7}, { 23, 8}, \ { 13, 9}, { 7, 8}, { 19, 9}, { 11, 8}, \ { 25,10}, { 7, 9}, { 15, 8}, { 33, 9}, \ { 19, 8}, { 39, 9}, { 23, 8}, { 47, 9}, \ { 27,10}, { 15, 9}, { 39,10}, { 23, 9}, \ { 47,11}, { 15,10}, { 31, 9}, { 67,10}, \ { 39, 9}, { 79,10}, { 47, 9}, { 95,11}, \ { 31,10}, { 63, 9}, { 127, 8}, { 255,10}, \ { 71, 9}, { 143, 8}, { 287,10}, { 79,11}, \ { 47,10}, { 95, 9}, { 191,12}, { 31,11}, \ { 63,10}, { 127, 9}, { 255, 8}, { 511,10}, \ { 143, 9}, { 287,11}, { 79,10}, { 159, 9}, \ { 319,10}, { 175, 9}, { 351,11}, { 95,10}, \ { 191, 9}, { 383,10}, { 207,11}, { 111,10}, \ { 223,12}, { 63,11}, { 127,10}, { 255, 9}, \ { 511,11}, { 143,10}, { 287, 9}, { 575,11}, \ { 159,10}, { 319,11}, { 175,10}, { 351,12}, \ { 95,11}, { 191,10}, { 383,11}, { 207,10}, \ { 415,11}, { 223,13}, { 63,12}, { 127,11}, \ { 255,10}, { 511,11}, { 287,10}, { 575,12}, \ { 159,11}, { 319,10}, { 639,11}, { 351,12}, \ { 191,11}, { 383,10}, { 767,12}, { 223,11}, \ { 447,13}, { 127,12}, { 255,11}, { 511,12}, \ { 287,11}, { 575,12}, { 319,11}, { 639,12}, \ { 351,13}, { 191,12}, { 383,11}, { 767,12}, \ { 447,14}, { 127,13}, { 255,12}, { 575,13}, \ { 319,12}, { 703,13}, { 383,12}, { 767,13}, \ { 447,14}, { 255,13}, { 511,12}, { 1023,13}, \ { 575,12}, { 1151,13}, { 703,14}, { 383,13}, \ { 831,12}, { 1663,15}, { 255,14}, { 511,13}, \ { 1087,12}, { 2175,13}, { 1151,14}, { 639,13}, \ { 1407,12}, { 2815,14}, { 767,13}, { 1663,14}, \ { 895,13}, { 1791,15}, { 511,14}, { 1023,13}, \ { 2175,14}, { 1151,13}, { 2431,12}, { 4863,14}, \ { 1407,13}, { 2815,15}, { 767,14}, { 1791,16}, \ { 511,15}, { 1023,14}, { 2431,13}, { 4863,15}, \ { 1279,14}, { 2943,15}, { 1535,14}, { 16384,15}, \ { 32768,16}, { 65536,17}, { 131072,18}, { 262144,19}, \ { 524288,20}, {1048576,21}, {2097152,22}, {4194304,23}, \ {8388608,24} } #define MUL_FFT_TABLE3_SIZE 169 #define MUL_FFT_THRESHOLD 2240 #define SQR_FFT_MODF_THRESHOLD 184 /* k = 5 */ #define SQR_FFT_TABLE3 \ { { 184, 5}, { 11, 6}, { 13, 7}, { 7, 6}, \ { 15, 7}, { 8, 6}, { 17, 7}, { 13, 8}, \ { 7, 7}, { 17, 8}, { 9, 7}, { 19, 8}, \ { 11, 7}, { 23, 8}, { 13, 9}, { 7, 8}, \ { 19, 9}, { 11, 8}, { 25,10}, { 7, 9}, \ { 15, 8}, { 33, 9}, { 19, 8}, { 39, 9}, \ { 23,10}, { 15, 9}, { 39,10}, { 23, 9}, \ { 47,11}, { 15,10}, { 31, 9}, { 63, 8}, \ { 127, 7}, { 255,10}, { 39, 8}, { 159,10}, \ { 47, 9}, { 95, 8}, { 191,11}, { 31,10}, \ { 63, 9}, { 127, 8}, { 255, 7}, { 511,10}, \ { 71, 9}, { 143, 8}, { 287, 7}, { 575, 9}, \ { 159, 8}, { 319,11}, { 47,10}, { 95, 9}, \ { 191, 8}, { 383,12}, { 31,11}, { 63,10}, \ { 127, 9}, { 255, 8}, { 511,10}, { 143, 9}, \ { 287, 8}, { 575,10}, { 159, 9}, { 319, 8}, \ { 639,10}, { 175, 9}, { 351,11}, { 95,10}, \ { 191, 9}, { 383,11}, { 111,10}, { 223, 9}, \ { 447,12}, { 63,11}, { 127,10}, { 255, 9}, \ { 511,11}, { 143,10}, { 287, 9}, { 575,11}, \ { 159,10}, { 319, 9}, { 639,11}, { 175,10}, \ { 351,12}, { 95,11}, { 191,10}, { 383, 9}, \ { 767,11}, { 223,10}, { 447,13}, { 63,12}, \ { 127,11}, { 255,10}, { 511,11}, { 287,10}, \ { 575,12}, { 159,11}, { 319,10}, { 639,11}, \ { 351,12}, { 191,11}, { 383,10}, { 767,12}, \ { 223,11}, { 447,13}, { 127,12}, { 255,11}, \ { 511,12}, { 287,11}, { 575,12}, { 319,11}, \ { 639,12}, { 351,13}, { 191,12}, { 383,11}, \ { 767,12}, { 447,14}, { 127,13}, { 255,12}, \ { 575,13}, { 319,12}, { 703,13}, { 383,12}, \ { 767,13}, { 447,14}, { 255,13}, { 511,12}, \ { 1023,13}, { 575,12}, { 1151,13}, { 703,14}, \ { 383,13}, { 831,12}, { 1663,15}, { 255,14}, \ { 511,13}, { 1151,14}, { 639,13}, { 1407,12}, \ { 2815,14}, { 767,13}, { 1663,14}, { 895,13}, \ { 1791,15}, { 511,14}, { 1023,13}, { 2047,14}, \ { 1151,13}, { 2431,12}, { 4863,14}, { 1407,13}, \ { 2815,15}, { 767,14}, { 1791,16}, { 511,15}, \ { 1023,14}, { 2431,13}, { 4863,15}, { 1279,14}, \ { 2943,15}, { 1535,14}, { 16384,15}, { 32768,16}, \ { 65536,17}, { 131072,18}, { 262144,19}, { 524288,20}, \ {1048576,21}, {2097152,22}, {4194304,23}, {8388608,24} } #define SQR_FFT_TABLE3_SIZE 172 #define SQR_FFT_THRESHOLD 1728 #define MULLO_BASECASE_THRESHOLD 0 /* always */ #define MULLO_DC_THRESHOLD 33 #define MULLO_MUL_N_THRESHOLD 4392 #define SQRLO_BASECASE_THRESHOLD 0 /* always */ #define SQRLO_DC_THRESHOLD 85 #define SQRLO_SQR_THRESHOLD 3176 #define DC_DIV_QR_THRESHOLD 34 #define DC_DIVAPPR_Q_THRESHOLD 119 #define DC_BDIV_QR_THRESHOLD 31 #define DC_BDIV_Q_THRESHOLD 76 #define INV_MULMOD_BNM1_THRESHOLD 22 #define INV_NEWTON_THRESHOLD 149 #define INV_APPR_THRESHOLD 123 #define BINV_NEWTON_THRESHOLD 179 #define REDC_1_TO_REDC_2_THRESHOLD 24 #define REDC_2_TO_REDC_N_THRESHOLD 39 #define MU_DIV_QR_THRESHOLD 807 #define MU_DIVAPPR_Q_THRESHOLD 807 #define MUPI_DIV_QR_THRESHOLD 77 #define MU_BDIV_QR_THRESHOLD 748 #define MU_BDIV_Q_THRESHOLD 807 #define POWM_SEC_TABLE 1,22,114,326,1486 #define GET_STR_DC_THRESHOLD 16 #define GET_STR_PRECOMPUTE_THRESHOLD 30 #define SET_STR_DC_THRESHOLD 381 #define SET_STR_PRECOMPUTE_THRESHOLD 1565 #define FAC_DSC_THRESHOLD 960 #define FAC_ODD_THRESHOLD 0 /* always */ #define MATRIX22_STRASSEN_THRESHOLD 13 #define HGCD2_DIV1_METHOD 3 /* 5.86% faster than 4 */ #define HGCD_THRESHOLD 88 #define HGCD_APPR_THRESHOLD 88 #define HGCD_REDUCE_THRESHOLD 1182 #define GCD_DC_THRESHOLD 241 #define GCDEXT_DC_THRESHOLD 192 #define JACOBI_BASE_METHOD 3 /* 9.43% faster than 2 */ /* Tuneup completed successfully, took 193098 seconds */
6,090
2,226
<filename>src/server/ffplay/player.h #ifndef PLAYER_H #define PLAYER_H #include "types.h" #include <atomic> class Player { static VideoState* cur_stream; //Config config; static std::atomic_bool run; static double remaining_time; public: Player(); bool start(Config config); static void setVideoState(VideoState* vs); static void quit(); static void event_loop(VideoState *cur_stream); static void refresh_loop(VideoState* is); static bool process_event(SDL_Event &event); static void run_updates(); }; #endif
186
14,668
<reponame>zealoussnow/chromium // Copyright 2019 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "services/network/public/cpp/network_isolation_key_mojom_traits.h" #include "base/unguessable_token.h" namespace mojo { bool StructTraits<network::mojom::NetworkIsolationKeyDataView, net::NetworkIsolationKey>:: Read(network::mojom::NetworkIsolationKeyDataView data, net::NetworkIsolationKey* out) { absl::optional<net::SchemefulSite> top_frame_site, frame_site; if (!data.ReadTopFrameSite(&top_frame_site)) return false; if (!data.ReadFrameSite(&frame_site)) return false; // A key is either fully empty or fully populated. if (top_frame_site.has_value() != frame_site.has_value()) return false; absl::optional<base::UnguessableToken> nonce; if (!data.ReadNonce(&nonce)) return false; if (!top_frame_site.has_value()) { // If there is a nonce, then the sites must be populated. if (nonce.has_value()) return false; *out = net::NetworkIsolationKey(); } else { *out = net::NetworkIsolationKey(std::move(top_frame_site.value()), std::move(frame_site.value()), nonce ? &nonce.value() : nullptr); } return true; } } // namespace mojo
558
311
<reponame>jurecuhalev/snowflake-connector-python<gh_stars>100-1000 // // Copyright (c) 2012-2021 Snowflake Computing Inc. All rights reserved. // #ifndef PC_TIMECONVERTER_HPP #define PC_TIMECONVERTER_HPP #include "IColumnConverter.hpp" #include "Python/Common.hpp" #include "Python/Helpers.hpp" #include "Util/time.hpp" #include <memory> namespace sf { template <typename T> class TimeConverter : public IColumnConverter { public: explicit TimeConverter(std::shared_ptr<arrow::Array> array, int32_t scale) : m_array(std::dynamic_pointer_cast<T>(array)), m_scale(scale) { } PyObject* toPyObject(int64_t rowIndex) const override; private: /** can be arrow::Int32Array and arrow::Int64Array */ std::shared_ptr<T> m_array; int32_t m_scale; static py::UniqueRef& m_pyDatetimeTime(); }; template <typename T> PyObject* TimeConverter<T>::toPyObject(int64_t rowIndex) const { if (m_array->IsValid(rowIndex)) { int64_t seconds = m_array->Value(rowIndex); using namespace internal; py::PyUniqueLock lock; return PyObject_CallFunction(m_pyDatetimeTime().get(), "iiii", getHourFromSeconds(seconds, m_scale), getMinuteFromSeconds(seconds, m_scale), getSecondFromSeconds(seconds, m_scale), getMicrosecondFromSeconds(seconds, m_scale)); } else { Py_RETURN_NONE; } } template <typename T> py::UniqueRef& TimeConverter<T>::m_pyDatetimeTime() { static py::UniqueRef pyDatetimeTime; if (pyDatetimeTime.empty()) { py::PyUniqueLock lock; py::UniqueRef pyDatetimeModule; py::importPythonModule("datetime", pyDatetimeModule); /** TODO : to check status here */ py::importFromModule(pyDatetimeModule, "time", pyDatetimeTime); } return pyDatetimeTime; } } // namespace sf #endif // PC_TIMECONVERTER_HPP
809
3,212
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.nifi.minifi.c2.integration.test.health; import com.palantir.docker.compose.connection.Container; import com.palantir.docker.compose.connection.DockerPort; import com.palantir.docker.compose.connection.waiting.HealthCheck; import com.palantir.docker.compose.connection.waiting.SuccessOrFailure; import javax.net.ssl.HttpsURLConnection; import javax.net.ssl.SSLSocketFactory; import java.io.IOException; import java.net.HttpURLConnection; import java.net.InetSocketAddress; import java.net.Proxy; import java.net.URL; import java.util.List; import java.util.function.Function; import java.util.function.Supplier; public class HttpsStatusCodeHealthCheck implements HealthCheck<List<Container>> { private final Function<Container, String> urlFunction; private final Function<List<Container>, Container> proxyExtractor; private final Function<List<Container>, Container> serverExtractor; private final Supplier<SSLSocketFactory> sslSocketFactorySupplier; private final int expected; public HttpsStatusCodeHealthCheck(Function<Container, String> urlFunction, Function<List<Container>, Container> proxyExtractor, Function<List<Container>, Container> serverExtractor, Supplier<SSLSocketFactory> sslSocketFactorySupplier, int expected) { this.urlFunction = urlFunction; this.proxyExtractor = proxyExtractor; this.serverExtractor = serverExtractor; this.sslSocketFactorySupplier = sslSocketFactorySupplier; this.expected = expected; } @Override public SuccessOrFailure isHealthy(List<Container> target) { return new HttpStatusCodeHealthCheck(urlFunction, expected) { @Override protected HttpURLConnection openConnection(String url) throws IOException { DockerPort dockerPort = proxyExtractor.apply(target).port(3128); return getHttpURLConnection(url, sslSocketFactorySupplier.get(), dockerPort.getIp(), dockerPort.getExternalPort()); } }.isHealthy(serverExtractor.apply(target)); } public static HttpURLConnection getHttpURLConnection(String url, SSLSocketFactory sslSocketFactory, String proxyHostname, int proxyPort) throws IOException { HttpsURLConnection httpURLConnection = (HttpsURLConnection) new URL(url).openConnection( new Proxy(Proxy.Type.HTTP, new InetSocketAddress(proxyHostname, proxyPort))); httpURLConnection.setSSLSocketFactory(sslSocketFactory); return httpURLConnection; } }
1,063
719
<reponame>Shenzhen-Cloudatawalk-Technology-Co-Ltd/EdgeML // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT license. #include <stddef.h> #include "quantized_utils.h" void q15_v_add(const Q15_T* vec1, const Q15_T* vec2, ITER_T len, Q15_T* ret, SCALE_T scvec1, SCALE_T scvec2, SCALE_T scret, SCALE_T demote) { #ifdef SHIFT SCALE_T scalevec1 = scvec1 + scret; SCALE_T scalevec2 = scvec2 + scret; #else SCALE_T scalevec1 = scvec1 * scret; SCALE_T scalevec2 = scvec2 * scret; #endif #ifdef LOOP_UNROLL ITER_T len_unroll = len >> 2; len = len % 4; while (len_unroll--) { #ifdef SHIFT *ret++ = (((*vec1++ >> scalevec1) + (*vec2++ >> scalevec2)) >> demote); *ret++ = (((*vec1++ >> scalevec1) + (*vec2++ >> scalevec2)) >> demote); *ret++ = (((*vec1++ >> scalevec1) + (*vec2++ >> scalevec2)) >> demote); *ret++ = (((*vec1++ >> scalevec1) + (*vec2++ >> scalevec2)) >> demote); #else *ret++ = ((*vec1++ / scalevec1) + (*vec2++ / scalevec2)) / demote; *ret++ = ((*vec1++ / scalevec1) + (*vec2++ / scalevec2)) / demote; *ret++ = ((*vec1++ / scalevec1) + (*vec2++ / scalevec2)) / demote; *ret++ = ((*vec1++ / scalevec1) + (*vec2++ / scalevec2)) / demote; #endif } #endif while (len--) { #ifdef SHIFT *ret++ = (((*vec1++ >> scalevec1) + (*vec2++ >> scalevec2)) >> demote); #else *ret++ = ((*vec1++ / scalevec1) + (*vec2++ / scalevec2)) / demote; #endif } } void q7_v_sub(const Q7_T* vec1, const Q7_T* vec2, ITER_T len, Q7_T* ret, SCALE_T scvec1, SCALE_T scvec2, SCALE_T scret) { #ifdef SHIFT SCALE_T scalevec1 = scvec1 + scret; SCALE_T scalevec2 = scvec2 + scret; #else SCALE_T scalevec1 = scvec1 * scret; SCALE_T scalevec2 = scvec2 * scret; #endif #ifdef LOOP_UNROLL ITER_T len_unroll = len >> 2; len = len % 4; while (len_unroll--) { #ifdef SHIFT *ret++ = ((*vec1++ >> scalevec1) - (*vec2++ >> scalevec2)); *ret++ = ((*vec1++ >> scalevec1) - (*vec2++ >> scalevec2)); *ret++ = ((*vec1++ >> scalevec1) - (*vec2++ >> scalevec2)); *ret++ = ((*vec1++ >> scalevec1) - (*vec2++ >> scalevec2)); #else *ret++ = ((*vec1++ / scalevec1) - (*vec2++ / scalevec2)); *ret++ = ((*vec1++ / scalevec1) - (*vec2++ / scalevec2)); *ret++ = ((*vec1++ / scalevec1) - (*vec2++ / scalevec2)); *ret++ = ((*vec1++ / scalevec1) - (*vec2++ / scalevec2)); #endif } #endif while (len--) { #ifdef SHIFT *ret++ = ((*vec1++ >> scalevec1) - (*vec2++ >> scalevec2)); #else *ret++ = ((*vec1++ / scalevec1) - (*vec2++ / scalevec2)); #endif } } void q15_v_sub(const Q15_T* vec1, const Q15_T* vec2, ITER_T len, Q15_T* ret, SCALE_T scvec1, SCALE_T scvec2, SCALE_T scret) { #ifdef SHIFT SCALE_T scalevec1 = scvec1 + scret; SCALE_T scalevec2 = scvec2 + scret; #else SCALE_T scalevec1 = scvec1 * scret; SCALE_T scalevec2 = scvec2 * scret; #endif #ifdef LOOP_UNROLL ITER_T len_unroll = len >> 2; len = len % 4; while (len_unroll--) { #ifdef SHIFT *ret++ = ((*vec1++ >> scalevec1) - (*vec2++ >> scalevec2)); *ret++ = ((*vec1++ >> scalevec1) - (*vec2++ >> scalevec2)); *ret++ = ((*vec1++ >> scalevec1) - (*vec2++ >> scalevec2)); *ret++ = ((*vec1++ >> scalevec1) - (*vec2++ >> scalevec2)); #else *ret++ = ((*vec1++ / scalevec1) - (*vec2++ / scalevec2)); *ret++ = ((*vec1++ / scalevec1) - (*vec2++ / scalevec2)); *ret++ = ((*vec1++ / scalevec1) - (*vec2++ / scalevec2)); *ret++ = ((*vec1++ / scalevec1) - (*vec2++ / scalevec2)); #endif } #endif while (len--) { #ifdef SHIFT *ret++ = ((*vec1++ >> scalevec1) - (*vec2++ >> scalevec2)); #else *ret++ = ((*vec1++ / scalevec1) - (*vec2++ / scalevec2)); #endif } } void q7_v_hadamard(const Q7_T* vec1, const Q7_T* vec2, ITER_T len, Q7_T* ret, SCALE_T scvec1, SCALE_T scvec2) { #ifdef SHIFT SCALE_T scalevec = scvec1 + scvec2; #else SCALE_T scalevec = scvec1 * scvec2; #endif #ifdef LOOP_UNROLL ITER_T len_unroll = len >> 2; len = len % 4; while (len_unroll--) { #ifdef SHIFT *ret++ = (((Q15_T)(*vec1++) * (Q15_T)(*vec2++)) >> scalevec); *ret++ = (((Q15_T)(*vec1++) * (Q15_T)(*vec2++)) >> scalevec); *ret++ = (((Q15_T)(*vec1++) * (Q15_T)(*vec2++)) >> scalevec); *ret++ = (((Q15_T)(*vec1++) * (Q15_T)(*vec2++)) >> scalevec); #else *ret++ = ((Q15_T)(*vec1++) * (Q15_T)(*vec2++)) / scalevec; *ret++ = ((Q15_T)(*vec1++) * (Q15_T)(*vec2++)) / scalevec; *ret++ = ((Q15_T)(*vec1++) * (Q15_T)(*vec2++)) / scalevec; *ret++ = ((Q15_T)(*vec1++) * (Q15_T)(*vec2++)) / scalevec; #endif } #endif while (len--) { #ifdef SHIFT *ret++ = (((Q15_T)(*vec1++) * (Q15_T)(*vec2++)) >> scalevec); #else *ret++ = ((Q15_T)(*vec1++) * (Q15_T)(*vec2++)) / scalevec; #endif } } void q15_v_hadamard(const Q15_T* vec1, const Q15_T* vec2, ITER_T len, Q15_T* ret, SCALE_T scvec1, SCALE_T scvec2) { #ifdef SHIFT SCALE_T scalevec = scvec1 + scvec2; #else SCALE_T scalevec = scvec1 * scvec2; #endif #ifdef LOOP_UNROLL ITER_T len_unroll = len >> 2; len = len % 4; while (len_unroll--) { #ifdef SHIFT *ret++ = (((Q31_T)(*vec1++) * (Q31_T)(*vec2++)) >> scalevec); *ret++ = (((Q31_T)(*vec1++) * (Q31_T)(*vec2++)) >> scalevec); *ret++ = (((Q31_T)(*vec1++) * (Q31_T)(*vec2++)) >> scalevec); *ret++ = (((Q31_T)(*vec1++) * (Q31_T)(*vec2++)) >> scalevec); #else *ret++ = ((Q31_T)(*vec1++) * (Q31_T)(*vec2++)) / scalevec; *ret++ = ((Q31_T)(*vec1++) * (Q31_T)(*vec2++)) / scalevec; *ret++ = ((Q31_T)(*vec1++) * (Q31_T)(*vec2++)) / scalevec; *ret++ = ((Q31_T)(*vec1++) * (Q31_T)(*vec2++)) / scalevec; #endif } #endif while (len--) { #ifdef SHIFT *ret++ = (((Q31_T)(*vec1++) * (Q31_T)(*vec2++)) >> scalevec); #else *ret++ = ((Q31_T)(*vec1++) * (Q31_T)(*vec2++)) / scalevec; #endif } } void q15_v_sigmoid(const Q15_T* vec, ITER_T len, Q15_T* ret, Q15_T div, Q15_T add, Q15_T sigmoid_limit, SCALE_T scale_in, SCALE_T scale_out, ITER_T use_tables) { if (use_tables) { #ifdef LOOP_UNROLL ITER_T len_unroll = len >> 2; len = len % 4; while (len_unroll--) { Q15_T w = *vec++; Q15_T x = *vec++; Q15_T y = *vec++; Q15_T z = *vec++; *ret++ = (w <= 0) ? (Q15_T)((((Q31_T)exp_base_16(w, 1)) << 14) / ((Q31_T)exp_base_16(w, 1) + (Q31_T)16384)) : (Q15_T)(((Q31_T)267943936L) / ((Q31_T)16384 + (Q31_T)exp_base_16(-w, 1))); *ret++ = (x <= 0) ? (Q15_T)((((Q31_T)exp_base_16(x, 1)) << 14) / ((Q31_T)exp_base_16(x, 1) + (Q31_T)16384)) : (Q15_T)(((Q31_T)267943936L) / ((Q31_T)16384 + (Q31_T)exp_base_16(-x, 1))); *ret++ = (y <= 0) ? (Q15_T)((((Q31_T)exp_base_16(y, 1)) << 14) / ((Q31_T)exp_base_16(y, 1) + (Q31_T)16384)) : (Q15_T)(((Q31_T)267943936L) / ((Q31_T)16384 + (Q31_T)exp_base_16(-y, 1))); *ret++ = (z <= 0) ? (Q15_T)((((Q31_T)exp_base_16(z, 1)) << 14) / ((Q31_T)exp_base_16(z, 1) + (Q31_T)16384)) : (Q15_T)(((Q31_T)267943936L) / ((Q31_T)16384 + (Q31_T)exp_base_16(-z, 1))); } #endif while (len--) { Q15_T w = *vec++; *ret++ = (w <= 0) ? (Q15_T)((((Q31_T)exp_base_16(w, 1)) << 14) / ((Q31_T)exp_base_16(w, 1) + (Q31_T)16384)) : (Q15_T)(((Q31_T)267943936L) / ((Q31_T)16384 + (Q31_T)exp_base_16(-w, 1))); } } else { SCALE_T scaleout = (scale_out - scale_in); #ifdef LOOP_UNROLL ITER_T len_unroll = len >> 2; len = len % 4; while (len_unroll--) { Q15_T w = (*vec++ / div) + add; Q15_T x = (*vec++ / div) + add; Q15_T y = (*vec++ / div) + add; Q15_T z = (*vec++ / div) + add; *ret++ = (w <= 0) ? 0 : (((w >= sigmoid_limit) ? sigmoid_limit : w) << scaleout); *ret++ = (x <= 0) ? 0 : (((x >= sigmoid_limit) ? sigmoid_limit : x) << scaleout); *ret++ = (y <= 0) ? 0 : (((y >= sigmoid_limit) ? sigmoid_limit : y) << scaleout); *ret++ = (z <= 0) ? 0 : (((z >= sigmoid_limit) ? sigmoid_limit : z) << scaleout); } #endif while (len--) { Q15_T w = (*vec++ / div) + add; *ret++ = (w <= 0) ? 0 : (((w >= sigmoid_limit) ? sigmoid_limit : w) << scaleout); } } } void q15_v_tanh(const Q15_T* vec, ITER_T len, Q15_T* ret, SCALE_T scale_in, SCALE_T scale_out, ITER_T use_tables) { if (use_tables) { #ifdef LOOP_UNROLL ITER_T len_unroll = len >> 2; len = len % 4; while (len_unroll--) { Q15_T w = q15_saturate(2 * (*vec++)); Q15_T x = q15_saturate(2 * (*vec++)); Q15_T y = q15_saturate(2 * (*vec++)); Q15_T z = q15_saturate(2 * (*vec++)); *ret++ = (w <= 0) ? (Q15_T)((((Q31_T)(exp_base_16(w, 1) - 16384)) << 14) / (exp_base_16(w, 1) + 16384)) : (Q15_T)((((Q31_T)(16384 - exp_base_16(-w, 1))) << 14) / (exp_base_16(-w, 1) + 16384)); *ret++ = (x <= 0) ? (Q15_T)((((Q31_T)(exp_base_16(x, 1) - 16384)) << 14) / (exp_base_16(x, 1) + 16384)) : (Q15_T)((((Q31_T)(16384 - exp_base_16(-x, 1))) << 14) / (exp_base_16(-x, 1) + 16384)); *ret++ = (y <= 0) ? (Q15_T)((((Q31_T)(exp_base_16(y, 1) - 16384)) << 14) / (exp_base_16(y, 1) + 16384)) : (Q15_T)((((Q31_T)(16384 - exp_base_16(-y, 1))) << 14) / (exp_base_16(-y, 1) + 16384)); *ret++ = (z <= 0) ? (Q15_T)((((Q31_T)(exp_base_16(z, 1) - 16384)) << 14) / (exp_base_16(z, 1) + 16384)) : (Q15_T)((((Q31_T)(16384 - exp_base_16(-z, 1))) << 14) / (exp_base_16(-z, 1) + 16384)); } #endif while (len--) { Q15_T w = q15_saturate(2 * (*vec++)); *ret++ = (w <= 0) ? (Q15_T)((((Q31_T)(exp_base_16(w, 1) - 16384)) << 14) / (exp_base_16(w, 1) + 16384)) : (Q15_T)((((Q31_T)(16384 - exp_base_16(-w, 1))) << 14) / (exp_base_16(-w, 1) + 16384)); } } else { SCALE_T scalein = (1 << scale_in); SCALE_T scaleout = scale_out - scale_in; #ifdef LOOP_UNROLL ITER_T len_unroll = len >> 2; len = len % 4; while (len_unroll--) { Q15_T w = *vec++; Q15_T x = *vec++; Q15_T y = *vec++; Q15_T z = *vec++; *ret++ = ((w >= scalein) ? scalein : ((w <= -scalein) ? (-scalein) : w)) << scaleout; *ret++ = ((x >= scalein) ? scalein : ((x <= -scalein) ? (-scalein) : x)) << scaleout; *ret++ = ((y >= scalein) ? scalein : ((y <= -scalein) ? (-scalein) : y)) << scaleout; *ret++ = ((z >= scalein) ? scalein : ((z <= -scalein) ? (-scalein) : z)) << scaleout; } #endif while (len--) { Q15_T w = *vec++; *ret++ = ((w >= scalein) ? scalein : ((w <= -scalein) ? (-scalein) : w)) << scaleout; } } } void q15_v_scalar_add(Q15_T scalar, const Q15_T* vec, ITER_T len, Q15_T* ret, SCALE_T scscalar, SCALE_T scvec, SCALE_T scret) { #ifdef SHIFT SCALE_T scaledscalar = scalar >> (scscalar + scret); SCALE_T scalevec = scvec + scret; #else SCALE_T scaledscalar = scalar / (scscalar * scret); SCALE_T scalevec = scvec * scret; #endif #ifdef LOOP_UNROLL ITER_T len_unroll = len >> 2; len = len % 4; while (len_unroll--) { #ifdef SHIFT *ret++ = (scaledscalar + (*vec++ >> scalevec)); *ret++ = (scaledscalar + (*vec++ >> scalevec)); *ret++ = (scaledscalar + (*vec++ >> scalevec)); *ret++ = (scaledscalar + (*vec++ >> scalevec)); #else *ret++ = (scaledscalar + (*vec++ / scalevec)); *ret++ = (scaledscalar + (*vec++ / scalevec)); *ret++ = (scaledscalar + (*vec++ / scalevec)); *ret++ = (scaledscalar + (*vec++ / scalevec)); #endif } #endif while (len--) { #ifdef SHIFT *ret++ = (scaledscalar + (*vec++ >> scalevec)); #else *ret++ = (scaledscalar + (*vec++ / scalevec)); #endif } } void q15_v_scalar_sub(Q15_T scalar, const Q15_T* vec, ITER_T len, Q15_T* ret, SCALE_T scscalar, SCALE_T scvec, SCALE_T scret) { #ifdef SHIFT SCALE_T scaledscalar = scalar >> (scscalar + scret); SCALE_T scalevec = scvec + scret; #else SCALE_T scaledscalar = scalar / (scscalar * scret); SCALE_T scalevec = scvec * scret; #endif #ifdef LOOP_UNROLL ITER_T len_unroll = len >> 2; len = len % 4; while (len_unroll--) { #ifdef SHIFT *ret++ = (scaledscalar - (*vec++ >> scalevec)); *ret++ = (scaledscalar - (*vec++ >> scalevec)); *ret++ = (scaledscalar - (*vec++ >> scalevec)); *ret++ = (scaledscalar - (*vec++ >> scalevec)); #else *ret++ = (scaledscalar - (*vec++ / scalevec)); *ret++ = (scaledscalar - (*vec++ / scalevec)); *ret++ = (scaledscalar - (*vec++ / scalevec)); *ret++ = (scaledscalar - (*vec++ / scalevec)); #endif } #endif while (len--) { #ifdef SHIFT *ret++ = (scaledscalar - (*vec++ >> scalevec)); #else *ret++ = (scaledscalar - (*vec++ / scalevec)); #endif } } void q15_v_scalar_mul(Q15_T scalar, const Q15_T* vec, ITER_T len, Q15_T* ret, SCALE_T scscalar, SCALE_T scvec) { SCALE_T upscalar = scalar; #ifdef SHIFT SCALE_T scale = scscalar + scvec; #else SCALE_T scale = scscalar * scvec; #endif #ifdef LOOP_UNROLL ITER_T len_unroll = len >> 2; len = len % 4; while (len_unroll--) { #ifdef SHIFT *ret++ = (upscalar * (Q31_T)(*vec++)) >> scale; *ret++ = (upscalar * (Q31_T)(*vec++)) >> scale; *ret++ = (upscalar * (Q31_T)(*vec++)) >> scale; *ret++ = (upscalar * (Q31_T)(*vec++)) >> scale; #else *ret++ = (upscalar * (Q31_T)(*vec++)) / scale; *ret++ = (upscalar * (Q31_T)(*vec++)) / scale; *ret++ = (upscalar * (Q31_T)(*vec++)) / scale; *ret++ = (upscalar * (Q31_T)(*vec++)) / scale; #endif } #endif while (len--) { #ifdef SHIFT *ret++ = (upscalar * (Q31_T)(*vec++)) >> scale; #else *ret++ = (upscalar * (Q31_T)(*vec++)) / scale; #endif } } void q15_v_argmax(const Q15_T* const vec, ITER_T len, ITER_T* const ret) { Q15_T max_value = vec[0]; ITER_T max_index = 0; for (ITER_T i = 1; i < len; i++) { if (max_value < vec[i]) { max_index = i; max_value = vec[i]; } } *ret = max_index; } void q15_v_scale_up(const Q15_T* vec, ITER_T len, Q15_T* ret, SCALE_T scvec) { #ifdef LOOP_UNROLL ITER_T len_unroll = len >> 2; len = len % 4; while (len_unroll--) { #ifdef SHIFT *ret++ = ((*vec++) << scvec); *ret++ = ((*vec++) << scvec); *ret++ = ((*vec++) << scvec); *ret++ = ((*vec++) << scvec); #else *ret++ = ((*vec++) * scvec); *ret++ = ((*vec++) * scvec); *ret++ = ((*vec++) * scvec); *ret++ = ((*vec++) * scvec); #endif } #endif while (len--) { #ifdef SHIFT *ret++ = ((*vec++) << scvec); #else *ret++ = ((*vec++) * scvec); #endif } } void q15_v_scale_down(const Q15_T* vec, ITER_T len, Q15_T* ret, SCALE_T scvec) { #ifdef LOOP_UNROLL ITER_T len_unroll = len >> 2; len = len % 4; while (len_unroll--) { #ifdef SHIFT *ret++ = ((*vec++) >> scvec); *ret++ = ((*vec++) >> scvec); *ret++ = ((*vec++) >> scvec); *ret++ = ((*vec++) >> scvec); #else *ret++ = ((*vec++) / scvec); *ret++ = ((*vec++) / scvec); *ret++ = ((*vec++) / scvec); *ret++ = ((*vec++) / scvec); #endif } #endif while (len--) { #ifdef SHIFT *ret++ = ((*vec++) >> scvec); #else *ret++ = ((*vec++) / scvec); #endif } } void q15_m_reverse(const Q15_T* const mat, ITER_T nrows, ITER_T ncols, ITER_T axis, Q15_T* const ret) { ITER_T len = nrows * ncols; if (axis == 0) { ITER_T col_counter = 0, row_index = len - ncols; for (ITER_T i = 0; i < len; i++) { if (col_counter >= ncols) { col_counter = 0; row_index -= ncols; } ret[i] = mat[row_index + col_counter]; col_counter++; } } else { S_ITER_T row_counter = ncols - 1; ITER_T col_index = 0; for (ITER_T i = 0; i < len; i++) { if (row_counter < 0) { row_counter = ncols - 1; col_index += ncols; } ret[i] = mat[col_index + (ITER_T)row_counter]; row_counter--; } } } void q15xq7_q15_m_mulvec(const Q15_T* mat, const Q7_T* const vec, ITER_T nrows, ITER_T ncols, Q15_T* ret, SCALE_T scmat, SCALE_T scvec, SCALE_T scret) { Q31_T sum; #ifdef SHIFT SCALE_T scale = scmat + scvec + scret; #else SCALE_T scale = scmat * scvec * scret; #endif while (nrows--) { sum = 0; ITER_T cols = ncols; const Q7_T* vec_offset = (const Q7_T*)vec; #ifdef LOOP_UNROLL ITER_T len_unroll = cols >> 2; cols = cols % 4; while (len_unroll--) { sum += (Q31_T)(*mat++) * (Q31_T)(*vec_offset++); sum += (Q31_T)(*mat++) * (Q31_T)(*vec_offset++); sum += (Q31_T)(*mat++) * (Q31_T)(*vec_offset++); sum += (Q31_T)(*mat++) * (Q31_T)(*vec_offset++); } #endif while (cols--) { sum += (Q31_T)(*mat++) * (Q31_T)(*vec_offset++); } #ifdef SHIFT *ret++ = (sum >> scale); #else *ret++ = (sum / scale); #endif } } void q15_m_mulvec(const Q15_T* mat, const Q15_T* const vec, ITER_T nrows, ITER_T ncols, Q15_T* ret, SCALE_T scmat, SCALE_T scvec, SCALE_T scret) { Q63_T sum; #ifdef SHIFT SCALE_T scale = scmat + scvec + scret; #else // Be careful, the below implementation would not work if the denominator // exceeds the range of Q31_T range. In such a case, cast the denominator // to int64_t. SCALE_T scale = scmat * scvec * scret; #endif while (nrows--) { sum = 0; ITER_T cols = ncols; const Q15_T* vec_offset = (const Q15_T*)vec; #ifdef LOOP_UNROLL ITER_T len_unroll = cols >> 2; cols = cols % 4; while (len_unroll--) { sum += (Q31_T)(*mat++) * (Q31_T)(*vec_offset++); sum += (Q31_T)(*mat++) * (Q31_T)(*vec_offset++); sum += (Q31_T)(*mat++) * (Q31_T)(*vec_offset++); sum += (Q31_T)(*mat++) * (Q31_T)(*vec_offset++); } #endif while (cols--) { sum += (Q31_T)(*mat++) * (Q31_T)(*vec_offset++); } #ifdef SHIFT *ret++ = (sum >> scale); #else *ret++ = (sum / scale); #endif } } void q15xq7_q15_m_sparse_mulvec(const ITER_T* row_indices, const Q15_T* mat_values, const Q7_T* vec, ITER_T nelem, Q15_T* ret, SCALE_T scmat, SCALE_T scvec, SCALE_T scret) { ITER_T index; Q31_T vec_offset; #ifdef SHIFT SCALE_T scale = scmat + scvec + scret; #else // Be careful, the below implementation would not work if the denominator // exceeds the range of Q31_T range. In such a case, cast the denominator // to int64_t. SCALE_T scale = scmat * scvec * scret; #endif while (nelem--) { index = *row_indices++; vec_offset = *vec++; while (index != 0) { #ifdef SHIFT ret[index - 1] += ((*mat_values++) * vec_offset) >> scale; #else ret[index - 1] += ((*mat_values++) * vec_offset) / scale; #endif index = *row_indices++; } } } void q15_m_sparse_mulvec(const ITER_T* row_indices, const Q15_T* mat_values, const Q15_T* vec, ITER_T nelem, Q15_T* ret, SCALE_T scmat, SCALE_T scvec, SCALE_T scret) { ITER_T index; Q31_T vec_offset; #ifdef SHIFT SCALE_T scale = scmat + scvec + scret; #else // Be careful, the below implementation would not work if the denominator // exceeds the range of Q31_T range. In such a case, cast the denominator // to int64_t. SCALE_T scale = scmat * scvec * scret; #endif while (nelem--) { index = *row_indices++; vec_offset = *vec++; while (index != 0) { #ifdef SHIFT ret[index - 1] += ((*mat_values++) * vec_offset) >> scale; #else ret[index - 1] += ((*mat_values++) * vec_offset) / scale; #endif index = *row_indices++; } } } void q7_t_add(const Q7_T* ten1, const Q7_T* ten2, ITER_T nbatches, ITER_T nrows, ITER_T ncols, ITER_T nchannels, Q7_T* ret, SCALE_T scten1, SCALE_T scten2, SCALE_T scret) { ITER_T len = nbatches * nrows * ncols * nchannels; #ifdef SHIFT SCALE_T scaleten1 = scten1 + scret; SCALE_T scaleten2 = scten2 + scret; #else SCALE_T scaleten1 = scten1 * scret; SCALE_T scaleten2 = scten2 * scret; #endif #ifdef LOOP_UNROLL ITER_T len_unroll = len >> 2; len = len % 4; while (len_unroll--) { #ifdef SHIFT *ret++ = ((*ten1++ >> scaleten1) + (*ten2++ >> scaleten2)); *ret++ = ((*ten1++ >> scaleten1) + (*ten2++ >> scaleten2)); *ret++ = ((*ten1++ >> scaleten1) + (*ten2++ >> scaleten2)); *ret++ = ((*ten1++ >> scaleten1) + (*ten2++ >> scaleten2)); #else *ret++ = ((*ten1++ / scaleten1) + (*ten2++ / scaleten2)); *ret++ = ((*ten1++ / scaleten1) + (*ten2++ / scaleten2)); *ret++ = ((*ten1++ / scaleten1) + (*ten2++ / scaleten2)); *ret++ = ((*ten1++ / scaleten1) + (*ten2++ / scaleten2)); #endif } #endif while (len--) { #ifdef SHIFT *ret++ = ((*ten1++ >> scaleten1) + (*ten2++ >> scaleten2)); #else *ret++ = ((*ten1++ / scaleten1) + (*ten2++ / scaleten2)); #endif } } void q15_t_add(const Q15_T* ten1, const Q15_T* ten2, ITER_T nbatches, ITER_T nrows, ITER_T ncols, ITER_T nchannels, Q15_T* ret, SCALE_T scten1, SCALE_T scten2, SCALE_T scret) { ITER_T len = nbatches * nrows * ncols * nchannels; #ifdef SHIFT SCALE_T scaleten1 = scten1 + scret; SCALE_T scaleten2 = scten2 + scret; #else SCALE_T scaleten1 = scten1 * scret; SCALE_T scaleten2 = scten2 * scret; #endif #ifdef LOOP_UNROLL ITER_T len_unroll = len >> 2; len = len % 4; while (len_unroll--) { #ifdef SHIFT *ret++ = ((*ten1++ >> scaleten1) + (*ten2++ >> scaleten2)); *ret++ = ((*ten1++ >> scaleten1) + (*ten2++ >> scaleten2)); *ret++ = ((*ten1++ >> scaleten1) + (*ten2++ >> scaleten2)); *ret++ = ((*ten1++ >> scaleten1) + (*ten2++ >> scaleten2)); #else *ret++ = ((*ten1++ / scaleten1) + (*ten2++ / scaleten2)); *ret++ = ((*ten1++ / scaleten1) + (*ten2++ / scaleten2)); *ret++ = ((*ten1++ / scaleten1) + (*ten2++ / scaleten2)); *ret++ = ((*ten1++ / scaleten1) + (*ten2++ / scaleten2)); #endif } #endif while (len--) { #ifdef SHIFT *ret++ = ((*ten1++ >> scaleten1) + (*ten2++ >> scaleten2)); #else *ret++ = ((*ten1++ / scaleten1) + (*ten2++ / scaleten2)); #endif } } void q7xq15_q7_t_add_vec(const Q7_T* ten, const Q15_T* const vec, ITER_T nbatches, ITER_T nrows, ITER_T ncols, ITER_T nchannels, Q7_T* ret, SCALE_T scten, SCALE_T scvec, SCALE_T scret) { ITER_T len = nbatches * nrows * ncols; #ifdef SHIFT SCALE_T scaleten = scten + scret; SCALE_T scalevec = scvec + scret; #else SCALE_T scaleten = scten * scret; SCALE_T scalevec = scvec * scret; #endif while (len--) { ITER_T channels = nchannels; const Q15_T* vec_offset = (const Q15_T*)vec; #ifdef LOOP_UNROLL ITER_T len_unroll = channels >> 2; channels = channels % 4; while (len_unroll--) { #ifdef SHIFT *ret++ = ((*ten++ >> scaleten) + (*vec_offset++ >> scalevec)); *ret++ = ((*ten++ >> scaleten) + (*vec_offset++ >> scalevec)); *ret++ = ((*ten++ >> scaleten) + (*vec_offset++ >> scalevec)); *ret++ = ((*ten++ >> scaleten) + (*vec_offset++ >> scalevec)); #else *ret++ = ((*ten++ / scaleten) + (*vec_offset++ / scalevec)); *ret++ = ((*ten++ / scaleten) + (*vec_offset++ / scalevec)); *ret++ = ((*ten++ / scaleten) + (*vec_offset++ / scalevec)); *ret++ = ((*ten++ / scaleten) + (*vec_offset++ / scalevec)); #endif } #endif while (channels--) { #ifdef SHIFT *ret++ = ((*ten++ >> scaleten) + (*vec_offset++ >> scalevec)); #else *ret++ = ((*ten++ / scaleten) + (*vec_offset++ / scalevec)); #endif } } } void q15_t_add_vec(const Q15_T* ten, const Q15_T* const vec, ITER_T nbatches, ITER_T nrows, ITER_T ncols, ITER_T nchannels, Q15_T* ret, SCALE_T scten, SCALE_T scvec, SCALE_T scret) { ITER_T len = nbatches * nrows * ncols; #ifdef SHIFT SCALE_T scaleten = scten + scret; SCALE_T scalevec = scvec + scret; #else SCALE_T scaleten = scten * scret; SCALE_T scalevec = scvec * scret; #endif while (len--) { ITER_T channels = nchannels; const Q15_T* vec_offset = (const Q15_T*)vec; #ifdef LOOP_UNROLL ITER_T len_unroll = channels >> 2; channels = channels % 4; while (len_unroll--) { #ifdef SHIFT *ret++ = ((*ten++ >> scaleten) + (*vec_offset++ >> scalevec)); *ret++ = ((*ten++ >> scaleten) + (*vec_offset++ >> scalevec)); *ret++ = ((*ten++ >> scaleten) + (*vec_offset++ >> scalevec)); *ret++ = ((*ten++ >> scaleten) + (*vec_offset++ >> scalevec)); #else *ret++ = ((*ten++ / scaleten) + (*vec_offset++ / scalevec)); *ret++ = ((*ten++ / scaleten) + (*vec_offset++ / scalevec)); *ret++ = ((*ten++ / scaleten) + (*vec_offset++ / scalevec)); *ret++ = ((*ten++ / scaleten) + (*vec_offset++ / scalevec)); #endif } #endif while (channels--) { #ifdef SHIFT *ret++ = ((*ten++ >> scaleten) + (*vec_offset++ >> scalevec)); #else *ret++ = ((*ten++ / scaleten) + (*vec_offset++ / scalevec)); #endif } } } void q7_t_relu(const Q7_T* ten, ITER_T nbatches, ITER_T nrows, ITER_T ncols, ITER_T nchannels, Q7_T* ret, Q7_T limit, Q7_T div) { ITER_T len = nbatches * nrows * ncols * nchannels; #ifdef LOOP_UNROLL ITER_T len_unroll = len >> 2; len = len % 4; while (len_unroll--) { *ret++ = q7_relu(*ten++, limit) / div; *ret++ = q7_relu(*ten++, limit) / div; *ret++ = q7_relu(*ten++, limit) / div; *ret++ = q7_relu(*ten++, limit) / div; } #endif while (len--) { *ret++ = q7_relu(*ten++, limit) / div; } } void q15_t_l2_norm(const Q15_T* ten, ITER_T nbatches, ITER_T nrows, ITER_T ncols, ITER_T nchannels, Q15_T* ret, SCALE_T scale_in, SCALE_T scale_out) { ITER_T len = nbatches * nrows * ncols; #ifndef SHIFT SCALE_T scdiv = (1 << scale_out); #endif for (ITER_T i = 0; i < len; i++) { Q31_T sum_square = 0; ITER_T channels = nchannels; const Q15_T* ten_offset = ten; #ifdef LOOP_UNROLL ITER_T len_unroll = channels >> 2; channels = channels % 4; while (len_unroll--) { Q31_T w = *ten_offset++; Q31_T x = *ten_offset++; Q31_T y = *ten_offset++; Q31_T z = *ten_offset++; sum_square += ((w * w) >> (2 * scale_out)); sum_square += ((x * x) >> (2 * scale_out)); sum_square += ((y * y) >> (2 * scale_out)); sum_square += ((z * z) >> (2 * scale_out)); } #endif while (channels--) { Q31_T w = *ten_offset++; sum_square += ((w * w) >> (2 * scale_out)); } Q15_T inverse_norm_low = 1; Q15_T inverse_norm_high = (1 << (scale_out - 1)); Q31_T one = (1 << (-(2 * scale_in + 2))); while (inverse_norm_low + 1 < inverse_norm_high) { Q15_T mid = ((inverse_norm_high + inverse_norm_low) >> 1); if ((Q63_T)sum_square * mid * mid > one) { inverse_norm_high = mid; } else { inverse_norm_low = mid; } } channels = nchannels; #ifdef LOOP_UNROLL len_unroll = channels >> 2; channels = channels % 4; while (len_unroll--) { #ifdef SHIFT *ret++ = ((*ten++) >> scale_out) * inverse_norm_low; *ret++ = ((*ten++) >> scale_out) * inverse_norm_low; *ret++ = ((*ten++) >> scale_out) * inverse_norm_low; *ret++ = ((*ten++) >> scale_out) * inverse_norm_low; #else *ret++ = ((*ten++) / scdiv) * inverse_norm_low; *ret++ = ((*ten++) / scdiv) * inverse_norm_low; *ret++ = ((*ten++) / scdiv) * inverse_norm_low; *ret++ = ((*ten++) / scdiv) * inverse_norm_low; #endif } #endif while (channels--) { #ifdef SHIFT *ret++ = ((*ten++) >> scale_out) * inverse_norm_low; #else *ret++ = ((*ten++) / scdiv) * inverse_norm_low; #endif } } } void q7xq15_q7_convolution(const Q7_T* const input, const Q15_T* const filter, Q7_T* const output, ITER_T N, ITER_T H, ITER_T W, ITER_T CIn, ITER_T HF, ITER_T WF, ITER_T CF, ITER_T COut, ITER_T HOut, ITER_T WOut, ITER_T G, S_ITER_T HPadU, S_ITER_T HPadD, S_ITER_T WPadL, S_ITER_T WPadR, ITER_T HStride, ITER_T WStride, ITER_T HDilation, ITER_T WDilation, SCALE_T scinput, SCALE_T scoutput, SCALE_T demote) { S_ITER_T HOffsetFL = ((HF - 1) >> 1); S_ITER_T HOffsetFR = (HF >> 1); S_ITER_T WOffsetFL = ((WF - 1) >> 1); S_ITER_T WOffsetFR = (WF >> 1); S_ITER_T HOffsetL = ((S_ITER_T)HDilation * HOffsetFL) - HPadU; S_ITER_T WOffsetL = ((S_ITER_T)WDilation * WOffsetFL) - WPadL; S_ITER_T HOffsetR = ((S_ITER_T)HDilation * HOffsetFR) - HPadD; S_ITER_T WOffsetR = ((S_ITER_T)WDilation * WOffsetFR) - WPadR; ITER_T HOffsetIn = W * CIn; ITER_T NOffsetIn = H * HOffsetIn; ITER_T WOffsetF = CF * COut; ITER_T HOffsetF = WF * WOffsetF; ITER_T GOffsetF = HF * HOffsetF; ITER_T WOffsetOut = (COut * G); ITER_T HOffsetOut = WOut * WOffsetOut; ITER_T NOffsetOut = HOut * HOffsetOut; Q31_T sum; #ifdef SHIFT SCALE_T scale = scinput + scoutput + demote; #else SCALE_T scale = scinput * scoutput * demote; #endif for (ITER_T n = 0; n < N; n++) { ITER_T hout = 0; ITER_T NIndexIn = n * NOffsetIn; ITER_T NIndexOut = n * NOffsetOut; for (S_ITER_T h = HOffsetL; h < (S_ITER_T)H - HOffsetR; h += (S_ITER_T)HStride, hout++) { ITER_T wout = 0; ITER_T HIndexOut = hout * HOffsetOut + NIndexOut; for (S_ITER_T w = WOffsetL; w < (S_ITER_T)W - WOffsetR; w += (S_ITER_T)WStride, wout++) { ITER_T WIndexOut = wout * WOffsetOut + HIndexOut; for (ITER_T g = 0; g < G; g++) { ITER_T CIndexIn = g * CF + NIndexIn; ITER_T GIndexF = g * GOffsetF; Q7_T* output_offset = ((Q7_T*)output) + g * COut + WIndexOut; for (ITER_T c = 0; c < COut; c++) { sum = 0; for (S_ITER_T hf = -HOffsetFL; hf <= HOffsetFR; hf++) { S_ITER_T hoffset = h + ((S_ITER_T)HDilation * hf); if ((hoffset < 0) || (hoffset >= (S_ITER_T)H)) { continue; } ITER_T HIndexIn = ((ITER_T)hoffset) * HOffsetIn + CIndexIn; ITER_T HIndexF = ((ITER_T)(hf + HOffsetFL)) * HOffsetF + GIndexF + c; for (S_ITER_T wf = -WOffsetFL; wf <= WOffsetFR; wf++) { S_ITER_T woffset = w + ((S_ITER_T)WDilation * wf); if ((woffset < 0) || (woffset >= (S_ITER_T)W)) { continue; } const Q7_T* input_offset = ((const Q7_T*)input) + ((ITER_T)woffset) * CIn + HIndexIn; const Q15_T* filter_offset = ((const Q15_T*)filter) + ((ITER_T)(wf + WOffsetFL)) * WOffsetF + HIndexF; ITER_T channels = CF; #ifdef LOOP_UNROLL ITER_T len_unroll = CF >> 2; channels = CF % 4; while (len_unroll--) { sum += ((Q31_T)(*input_offset++)) * ((Q31_T)(*filter_offset)); filter_offset += COut; sum += ((Q31_T)(*input_offset++)) * ((Q31_T)(*filter_offset)); filter_offset += COut; sum += ((Q31_T)(*input_offset++)) * ((Q31_T)(*filter_offset)); filter_offset += COut; sum += ((Q31_T)(*input_offset++)) * ((Q31_T)(*filter_offset)); filter_offset += COut; } #endif while (channels--) { sum += ((Q31_T)(*input_offset++)) * ((Q31_T)(*filter_offset)); filter_offset += COut; } } } #ifdef SHIFT *output_offset++ = (sum >> scale); #else *output_offset++ = (sum / scale); #endif } } } } } } void q7xq15_q15_convolution(const Q7_T* const input, const Q15_T* const filter, Q15_T* const output, ITER_T N, ITER_T H, ITER_T W, ITER_T CIn, ITER_T HF, ITER_T WF, ITER_T CF, ITER_T COut, ITER_T HOut, ITER_T WOut, ITER_T G, S_ITER_T HPadU, S_ITER_T HPadD, S_ITER_T WPadL, S_ITER_T WPadR, ITER_T HStride, ITER_T WStride, ITER_T HDilation, ITER_T WDilation, SCALE_T scinput, SCALE_T scoutput, SCALE_T demote) { S_ITER_T HOffsetFL = ((HF - 1) >> 1); S_ITER_T HOffsetFR = (HF >> 1); S_ITER_T WOffsetFL = ((WF - 1) >> 1); S_ITER_T WOffsetFR = (WF >> 1); S_ITER_T HOffsetL = ((S_ITER_T)HDilation * HOffsetFL) - HPadU; S_ITER_T WOffsetL = ((S_ITER_T)WDilation * WOffsetFL) - WPadL; S_ITER_T HOffsetR = ((S_ITER_T)HDilation * HOffsetFR) - HPadD; S_ITER_T WOffsetR = ((S_ITER_T)WDilation * WOffsetFR) - WPadR; ITER_T HOffsetIn = W * CIn; ITER_T NOffsetIn = H * HOffsetIn; ITER_T WOffsetF = CF * COut; ITER_T HOffsetF = WF * WOffsetF; ITER_T GOffsetF = HF * HOffsetF; ITER_T WOffsetOut = (COut * G); ITER_T HOffsetOut = WOut * WOffsetOut; ITER_T NOffsetOut = HOut * HOffsetOut; Q31_T sum; #ifdef SHIFT SCALE_T scale = scinput + scoutput + demote; #else SCALE_T scale = scinput * scoutput * demote; #endif for (ITER_T n = 0; n < N; n++) { ITER_T hout = 0; ITER_T NIndexIn = n * NOffsetIn; ITER_T NIndexOut = n * NOffsetOut; for (S_ITER_T h = HOffsetL; h < (S_ITER_T)H - HOffsetR; h += (S_ITER_T)HStride, hout++) { ITER_T wout = 0; ITER_T HIndexOut = hout * HOffsetOut + NIndexOut; for (S_ITER_T w = WOffsetL; w < (S_ITER_T)W - WOffsetR; w += (S_ITER_T)WStride, wout++) { ITER_T WIndexOut = wout * WOffsetOut + HIndexOut; for (ITER_T g = 0; g < G; g++) { ITER_T CIndexIn = g * CF + NIndexIn; ITER_T GIndexF = g * GOffsetF; Q15_T* output_offset = ((Q15_T*)output) + g * COut + WIndexOut; for (ITER_T c = 0; c < COut; c++) { sum = 0; for (S_ITER_T hf = -HOffsetFL; hf <= HOffsetFR; hf++) { S_ITER_T hoffset = h + ((S_ITER_T)HDilation * hf); if ((hoffset < 0) || (hoffset >= (S_ITER_T)H)) { continue; } ITER_T HIndexIn = ((ITER_T)hoffset) * HOffsetIn + CIndexIn; ITER_T HIndexF = ((ITER_T)(hf + HOffsetFL)) * HOffsetF + GIndexF + c; for (S_ITER_T wf = -WOffsetFL; wf <= WOffsetFR; wf++) { S_ITER_T woffset = w + ((S_ITER_T)WDilation * wf); if ((woffset < 0) || (woffset >= (S_ITER_T)W)) { continue; } const Q7_T* input_offset = ((const Q7_T*)input) + ((ITER_T)woffset) * CIn + HIndexIn; const Q15_T* filter_offset = ((const Q15_T*)filter) + ((ITER_T)(wf + WOffsetFL)) * WOffsetF + HIndexF; ITER_T channels = CF; #ifdef LOOP_UNROLL ITER_T len_unroll = CF >> 2; channels = CF % 4; while (len_unroll--) { sum += ((Q31_T)(*input_offset++)) * ((Q31_T)(*filter_offset)); filter_offset += COut; sum += ((Q31_T)(*input_offset++)) * ((Q31_T)(*filter_offset)); filter_offset += COut; sum += ((Q31_T)(*input_offset++)) * ((Q31_T)(*filter_offset)); filter_offset += COut; sum += ((Q31_T)(*input_offset++)) * ((Q31_T)(*filter_offset)); filter_offset += COut; } #endif while (channels--) { sum += ((Q31_T)(*input_offset++)) * ((Q31_T)(*filter_offset)); filter_offset += COut; } } } #ifdef SHIFT *output_offset++ = (sum >> scale); #else *output_offset++ = (sum / scale); #endif } } } } } } void q15_convolution(const Q15_T* const input, const Q15_T* const filter, Q15_T* const output, ITER_T N, ITER_T H, ITER_T W, ITER_T CIn, ITER_T HF, ITER_T WF, ITER_T CF, ITER_T COut, ITER_T HOut, ITER_T WOut, ITER_T G, S_ITER_T HPadU, S_ITER_T HPadD, S_ITER_T WPadL, S_ITER_T WPadR, ITER_T HStride, ITER_T WStride, ITER_T HDilation, ITER_T WDilation, SCALE_T scinput, SCALE_T scoutput, SCALE_T demote) { S_ITER_T HOffsetFL = ((HF - 1) >> 1); S_ITER_T HOffsetFR = (HF >> 1); S_ITER_T WOffsetFL = ((WF - 1) >> 1); S_ITER_T WOffsetFR = (WF >> 1); S_ITER_T HOffsetL = ((S_ITER_T)HDilation * HOffsetFL) - HPadU; S_ITER_T WOffsetL = ((S_ITER_T)WDilation * WOffsetFL) - WPadL; S_ITER_T HOffsetR = ((S_ITER_T)HDilation * HOffsetFR) - HPadD; S_ITER_T WOffsetR = ((S_ITER_T)WDilation * WOffsetFR) - WPadR; ITER_T HOffsetIn = W * CIn; ITER_T NOffsetIn = H * HOffsetIn; ITER_T WOffsetF = CF * COut; ITER_T HOffsetF = WF * WOffsetF; ITER_T GOffsetF = HF * HOffsetF; ITER_T WOffsetOut = (COut * G); ITER_T HOffsetOut = WOut * WOffsetOut; ITER_T NOffsetOut = HOut * HOffsetOut; Q63_T sum; #ifdef SHIFT SCALE_T scale = scinput + scoutput + demote; #else SCALE_T scale = scinput * scoutput * demote; #endif for (ITER_T n = 0; n < N; n++) { ITER_T hout = 0; ITER_T NIndexIn = n * NOffsetIn; ITER_T NIndexOut = n * NOffsetOut; for (S_ITER_T h = HOffsetL; h < (S_ITER_T)H - HOffsetR; h += (S_ITER_T)HStride, hout++) { ITER_T wout = 0; ITER_T HIndexOut = hout * HOffsetOut + NIndexOut; for (S_ITER_T w = WOffsetL; w < (S_ITER_T)W - WOffsetR; w += (S_ITER_T)WStride, wout++) { ITER_T WIndexOut = wout * WOffsetOut + HIndexOut; for (ITER_T g = 0; g < G; g++) { ITER_T CIndexIn = g * CF + NIndexIn; ITER_T GIndexF = g * GOffsetF; Q15_T* output_offset = ((Q15_T*)output) + g * COut + WIndexOut; for (ITER_T c = 0; c < COut; c++) { sum = 0; for (S_ITER_T hf = -HOffsetFL; hf <= HOffsetFR; hf++) { S_ITER_T hoffset = h + ((S_ITER_T)HDilation * hf); if ((hoffset < 0) || (hoffset >= (S_ITER_T)H)) { continue; } ITER_T HIndexIn = ((ITER_T)hoffset) * HOffsetIn + CIndexIn; ITER_T HIndexF = ((ITER_T)(hf + HOffsetFL)) * HOffsetF + GIndexF + c; for (S_ITER_T wf = -WOffsetFL; wf <= WOffsetFR; wf++) { S_ITER_T woffset = w + ((S_ITER_T)WDilation * wf); if ((woffset < 0) || (woffset >= (S_ITER_T)W)) { continue; } const Q15_T* input_offset = ((const Q15_T*)input) + ((ITER_T)woffset) * CIn + HIndexIn; const Q15_T* filter_offset = ((const Q15_T*)filter) + ((ITER_T)(wf + WOffsetFL)) * WOffsetF + HIndexF; ITER_T channels = CF; #ifdef LOOP_UNROLL ITER_T len_unroll = CF >> 2; channels = CF % 4; while (len_unroll--) { sum += ((Q31_T)(*input_offset++)) * ((Q31_T)(*filter_offset)); filter_offset += COut; sum += ((Q31_T)(*input_offset++)) * ((Q31_T)(*filter_offset)); filter_offset += COut; sum += ((Q31_T)(*input_offset++)) * ((Q31_T)(*filter_offset)); filter_offset += COut; sum += ((Q31_T)(*input_offset++)) * ((Q31_T)(*filter_offset)); filter_offset += COut; } #endif while (channels--) { sum += ((Q31_T)(*input_offset++)) * ((Q31_T)(*filter_offset)); filter_offset += COut; } } } #ifdef SHIFT *output_offset++ = (sum >> scale); #else *output_offset++ = (sum / scale); #endif } } } } } }
22,897
799
<reponame>diCagri/content { "associatedToAll": false, "associatedTypes": [ "Shadow IT" ], "caseInsensitive": true, "cliName": "shadowitcloudaccounttype", "closeForm": false, "content": true, "editForm": true, "group": 0, "hidden": false, "id": "incident_shadowitcloudaccounttype", "isReadOnly": false, "locked": false, "name": "Shadow IT Cloud Account Type", "neverSetAsRequired": false, "ownerOnly": false, "required": false, "selectValues": [ "Unknown", "AWS", "Google", "Azure", "Oracle", "Alibaba", "Other" ], "sla": 0, "system": false, "threshold": 72, "type": "singleSelect", "unmapped": false, "unsearchable": false, "useAsKpi": true, "version": -1, "fromVersion": "6.0.0" }
400
517
package ro.isdc.wro; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertSame; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; import ro.isdc.wro.config.Context; /** * @author <NAME> */ public class TestWroRuntimeException { @BeforeClass public static void onBeforeClass() { assertEquals(0, Context.countActive()); } @AfterClass public static void onAfterClass() { assertEquals(0, Context.countActive()); } @Test public void shouldPreserveOriginalExceptionMessageWhenWrap() { final String message = "someMessage"; Exception e = new IllegalArgumentException(message); Exception result = WroRuntimeException.wrap(e); assertEquals(e.getMessage(), result.getMessage()); } @Test public void shouldNotWrapWhenExceptionIsAWroRuntimeException() { final String message = "someMessage"; Exception e = new WroRuntimeException(message); Exception result = WroRuntimeException.wrap(e); assertSame(e, result); } }
344
348
{"nom":"Larroque","circ":"8ème circonscription","dpt":"Haute-Garonne","inscrits":309,"abs":70,"votants":239,"blancs":32,"nuls":0,"exp":207,"res":[{"nuance":"REM","nom":"M. <NAME>","voix":117},{"nuance":"SOC","nom":"<NAME>","voix":90}]}
95
1,127
<reponame>ryanloney/openvino-1 // Copyright (C) 2018-2022 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // #pragma once #include <gtest/gtest.h> #include <tests_common.hpp> #include <legacy/ie_layers_internal.hpp> #include <legacy/details/ie_cnn_network_iterator.hpp> #include <functional_test_utils/plugin_cache.hpp> #include "single_layer_common.hpp" #include "conv_ref.hpp" #include "deconv_ref.hpp" #include "def_conv_ref.hpp" #include "pool_ref.hpp" #include "single_layer_common.hpp" #include "common_layers_params.hpp" #include <xml_net_builder.hpp> using namespace InferenceEngine; struct PluginDependentParam { std::string deviceName; InferenceEngine::Layout layout; InferenceEngine::Precision precision; float tolerance; }; class LayerTestHelper { protected: std::string type; public: using Ptr = std::shared_ptr<LayerTestHelper>; explicit LayerTestHelper(const std::string &_type) : type(_type) {} virtual ~LayerTestHelper() = default; LayerTestHelper() = default; virtual void updatePaddingValues(const InferenceEngine::CNNNetwork &network) = 0; virtual std::map<std::string, std::string> getMapParams() const = 0; virtual size_t getWeightByteSize(size_t elementSize, size_t numChannels) const = 0; virtual size_t getBiasByteSize(size_t elementSize) const = 0; std::string getType() const { return type; } virtual void ref_fp32(const std::vector<InferenceEngine::Blob::Ptr> srcs, InferenceEngine::Blob &dst, const float *weights_data, size_t weights_size, const float *bias_data, size_t bias_size) const = 0; virtual void ref_fp16(const std::vector<InferenceEngine::Blob::Ptr> srcs, InferenceEngine::Blob &dst, const InferenceEngine::ie_fp16 *weights_data, size_t weights_size, const InferenceEngine::ie_fp16 *bias_data, size_t bias_size) const = 0; InferenceEngine::Blob::Ptr getRefBlob(size_t weightSize, size_t biasSize, const InferenceEngine::TBlob<uint8_t>::Ptr &weights, const std::vector<InferenceEngine::Blob::Ptr> srcs, const InferenceEngine::TensorDesc &dstTensorDesc, const InferenceEngine::Precision &precision) const; static std::string propertyToString(const InferenceEngine::PropertyVector<unsigned int> &propertyVector); }; class ConvolutionTestHelper : public LayerTestHelper { protected: CommonTestUtils::conv_common_params convParams; public: explicit ConvolutionTestHelper(const CommonTestUtils::conv_common_params &_convParams); void updatePaddingValues(const InferenceEngine::CNNNetwork &network) override; std::map<std::string, std::string> getMapParams() const override; size_t getWeightByteSize(size_t elementSize, size_t numChannels) const override; size_t getBiasByteSize(size_t elementSize) const override; void ref_fp32(const std::vector<InferenceEngine::Blob::Ptr> srcs, InferenceEngine::Blob &dst, const float *weights_data, size_t weights_size, const float *bias_data, size_t bias_size) const override; void ref_fp16(const std::vector<InferenceEngine::Blob::Ptr> srcs, InferenceEngine::Blob &dst, const InferenceEngine::ie_fp16 *weights_data, size_t weights_size, const InferenceEngine::ie_fp16 *bias_data, size_t bias_size) const override; }; class DeconvolutionTestHelper : public ConvolutionTestHelper { public: explicit DeconvolutionTestHelper(const CommonTestUtils::conv_common_params &_convParams); void ref_fp32(const std::vector<InferenceEngine::Blob::Ptr> srcs, InferenceEngine::Blob &dst, const float *weights_data, size_t weights_size, const float *bias_data, size_t bias_size) const override; void ref_fp16(const std::vector<InferenceEngine::Blob::Ptr> srcs, InferenceEngine::Blob &dst, const InferenceEngine::ie_fp16 *weights_data, size_t weights_size, const InferenceEngine::ie_fp16 *bias_data, size_t bias_size) const override; }; class DeformableConvolutionTestHelper : public ConvolutionTestHelper { protected: CommonTestUtils::def_conv_common_params defConvParams; public: explicit DeformableConvolutionTestHelper(const CommonTestUtils::conv_common_params &_convParams, const int deformable_group); void updatePaddingValues(const InferenceEngine::CNNNetwork &network) override; std::map<std::string, std::string> getMapParams() const override; void ref_fp32(const std::vector<InferenceEngine::Blob::Ptr> srcs, InferenceEngine::Blob &dst, const float *weights_data, size_t weights_size, const float *bias_data, size_t bias_size) const override; void ref_fp16(const std::vector<InferenceEngine::Blob::Ptr> srcs, InferenceEngine::Blob &dst, const InferenceEngine::ie_fp16 *weights_data, size_t weights_size, const InferenceEngine::ie_fp16 *bias_data, size_t bias_size) const override; }; class PoolingTestHelper : public LayerTestHelper { protected: CommonTestUtils::pool_common_params poolParams; public: explicit PoolingTestHelper(const CommonTestUtils::pool_common_params &_poolParams); void ref_fp32(const std::vector<InferenceEngine::Blob::Ptr> srcs, InferenceEngine::Blob &dst, const float *weights_data, size_t weights_size, const float *bias_data, size_t bias_size) const override; void ref_fp16(const std::vector<InferenceEngine::Blob::Ptr> srcs, InferenceEngine::Blob &dst, const InferenceEngine::ie_fp16 *weights_data, size_t weights_size, const InferenceEngine::ie_fp16 *bias_data, size_t bias_size) const override; std::map<std::string, std::string> getMapParams() const override; void updatePaddingValues(const InferenceEngine::CNNNetwork &network) override; size_t getWeightByteSize(size_t elementSize, size_t numChannels) const override; size_t getBiasByteSize(size_t elementSize) const override; }; PRETTY_PARAM(InitialShapes, CommonTestUtils::InOutShapes) PRETTY_PARAM(NewShapes, CommonTestUtils::InOutShapes) PRETTY_PARAM(ConvParams, CommonTestUtils::conv_common_params) PRETTY_PARAM(PluginParams, PluginDependentParam) PRETTY_PARAM(Helper, LayerTestHelper::Ptr) Blob::Ptr LayerTestHelper::getRefBlob(size_t weightSize, size_t biasSize, const TBlob<uint8_t>::Ptr &weights, const std::vector<InferenceEngine::Blob::Ptr> srcs, const TensorDesc &dstTensorDesc, const Precision &precision) const { Blob::Ptr dst_ref; if (precision == Precision::FP32) { dst_ref = make_shared_blob<float>(dstTensorDesc); dst_ref->allocate(); const auto *weights_data = weights->buffer().as<const float *>(); ref_fp32(srcs, *dst_ref.get(), weights_data, weightSize, weights_data + weightSize, biasSize); } else { dst_ref = make_shared_blob<ie_fp16>(dstTensorDesc); dst_ref->allocate(); const auto *weights_data = weights->buffer().as<const ie_fp16 *>(); ref_fp16(srcs, *dst_ref.get(), weights_data, weightSize, weights_data + weightSize, biasSize); } return dst_ref; } std::string LayerTestHelper::propertyToString(const PropertyVector<unsigned int> &propertyVector) { if (!propertyVector.size()) return ""; std::string result = std::to_string(propertyVector[0]); for (int i = 1; i < propertyVector.size(); i++) { result += "," + std::to_string(propertyVector[i]); } return result; } ConvolutionTestHelper::ConvolutionTestHelper(const CommonTestUtils::conv_common_params &_convParams) : LayerTestHelper("Convolution"), convParams(_convParams) {} void ConvolutionTestHelper::updatePaddingValues(const CNNNetwork &network) { details::CNNNetworkIterator i(network), end; auto found = std::find_if(i, end, [this](const CNNLayer::Ptr &layer) { return layer->type == type; }); ASSERT_NE(found, end); auto castedLayer = std::dynamic_pointer_cast<ConvolutionLayer>(*found); auto allPad = getPaddings(*castedLayer.get()); convParams.pads_end = allPad.end; convParams.pads_begin = allPad.begin; } std::map<std::string, std::string> ConvolutionTestHelper::getMapParams() const { std::map<std::string, std::string> params; if (!convParams.auto_pad.empty()) { params["auto_pad"] = convParams.auto_pad; } params["group"] = std::to_string(convParams.group); params["output"] = std::to_string(convParams.out_c); auto propertyToString = [](const PropertyVector<unsigned int> &propertyVector) -> std::string { if (!propertyVector.size()) return ""; std::string result = std::to_string(propertyVector[0]); for (int i = 1; i < propertyVector.size(); i++) { result += "," + std::to_string(propertyVector[i]); } return result; }; params["kernel"] = propertyToString(convParams.kernel); params["strides"] = propertyToString(convParams.stride); params["pads_begin"] = propertyToString(convParams.pads_begin); params["pads_end"] = propertyToString(convParams.pads_end); params["dilations"] = propertyToString(convParams.dilation); return params; } size_t ConvolutionTestHelper::getWeightByteSize(size_t elementSize, size_t numChannels) const { return (convParams.kernel[X_AXIS] * convParams.kernel[Y_AXIS] * convParams.out_c * numChannels * elementSize) / convParams.group; } size_t ConvolutionTestHelper::getBiasByteSize(size_t elementSize) const { return convParams.out_c * elementSize; } void ConvolutionTestHelper::ref_fp32(const std::vector<InferenceEngine::Blob::Ptr> srcs, Blob &dst, const float *weights_data, size_t weights_size, const float *bias_data, size_t bias_size) const { ref_conv_common<>(srcs, dst, weights_data, weights_size, bias_data, bias_size, convParams); } void ConvolutionTestHelper::ref_fp16(const std::vector<InferenceEngine::Blob::Ptr> srcs, Blob &dst, const ie_fp16 *weights_data, size_t weights_size, const ie_fp16 *bias_data, size_t bias_size) const { ref_conv_common<>(srcs, dst, weights_data, weights_size, bias_data, bias_size, convParams); } DeconvolutionTestHelper::DeconvolutionTestHelper(const CommonTestUtils::conv_common_params &_convParams) : ConvolutionTestHelper( _convParams) { type = "Deconvolution"; } void DeconvolutionTestHelper::ref_fp32(const std::vector<InferenceEngine::Blob::Ptr> srcs, Blob &dst, const float *weights_data, size_t weights_size, const float *bias_data, size_t bias_size) const { ref_deconv_common<float>(srcs, dst, weights_data, weights_size, bias_data, bias_size, convParams); } void DeconvolutionTestHelper::ref_fp16(const std::vector<InferenceEngine::Blob::Ptr> srcs, Blob &dst, const ie_fp16 *weights_data, size_t weights_size, const ie_fp16 *bias_data, size_t bias_size) const { ref_deconv_common<ie_fp16>(srcs, dst, weights_data, weights_size, bias_data, bias_size, convParams); } DeformableConvolutionTestHelper::DeformableConvolutionTestHelper(const CommonTestUtils::conv_common_params &_convParams, const int deformable_group) : defConvParams(convParams), ConvolutionTestHelper( _convParams) { defConvParams.deformable_group = deformable_group; type = "DeformableConvolution"; } void DeformableConvolutionTestHelper::ref_fp32(const std::vector<InferenceEngine::Blob::Ptr> srcs, Blob &dst, const float *weights_data, size_t weights_size, const float *bias_data, size_t bias_size) const { ref_def_conv_common<float>(srcs, dst, weights_data, weights_size, bias_data, bias_size, defConvParams); } void DeformableConvolutionTestHelper::ref_fp16(const std::vector<InferenceEngine::Blob::Ptr> srcs, Blob &dst, const ie_fp16 *weights_data, size_t weights_size, const ie_fp16 *bias_data, size_t bias_size) const { ref_def_conv_common<ie_fp16>(srcs, dst, weights_data, weights_size, bias_data, bias_size, defConvParams); } void DeformableConvolutionTestHelper::updatePaddingValues(const CNNNetwork &network) { details::CNNNetworkIterator i(network), end; auto found = std::find_if(i, end, [this](const CNNLayer::Ptr &layer) { return layer->type == type; }); ASSERT_NE(found, end); auto castedLayer = std::dynamic_pointer_cast<ConvolutionLayer>(*found); auto allPad = getPaddings(*castedLayer.get()); defConvParams.pads_end = allPad.end; defConvParams.pads_begin = allPad.begin; } std::map<std::string, std::string> DeformableConvolutionTestHelper::getMapParams() const { std::map<std::string, std::string> params; if (!defConvParams.auto_pad.empty()) { params["auto_pad"] = defConvParams.auto_pad; } params["group"] = std::to_string(defConvParams.group); params["output"] = std::to_string(defConvParams.out_c); params["deformable_group"] = std::to_string(defConvParams.deformable_group); auto propertyToString = [](const PropertyVector<unsigned int> &propertyVector) -> std::string { if (!propertyVector.size()) return ""; std::string result = std::to_string(propertyVector[0]); for (int i = 1; i < propertyVector.size(); i++) { result += "," + std::to_string(propertyVector[i]); } return result; }; params["kernel"] = propertyToString(defConvParams.kernel); params["strides"] = propertyToString(defConvParams.stride); params["pads_begin"] = propertyToString(defConvParams.pads_begin); params["pads_end"] = propertyToString(defConvParams.pads_end); params["dilations"] = propertyToString(defConvParams.dilation); return params; } PoolingTestHelper::PoolingTestHelper(const CommonTestUtils::pool_common_params &_poolParams) : LayerTestHelper("Pooling"), poolParams(_poolParams) { } std::map<std::string, std::string> PoolingTestHelper::getMapParams() const { std::map<std::string, std::string> params; if (!poolParams.auto_pad.empty()) { params["auto_pad"] = poolParams.auto_pad; } params["kernel"] = propertyToString(poolParams.kernel); params["strides"] = propertyToString(poolParams.stride); auto padStr = propertyToString(poolParams.pads_begin); if (!padStr.empty()) params["pads_begin"] = padStr; padStr = propertyToString(poolParams.pads_end); if (!padStr.empty()) params["pads_end"] = padStr; params["exclude-pad"] = poolParams.exclude_pad ? "true" : "false"; params["pool-method"] = poolParams.avg ? "avg" : "max"; return params; } void PoolingTestHelper::ref_fp32(const std::vector<InferenceEngine::Blob::Ptr> srcs, Blob &dst, const float *weights_data, size_t weights_size, const float *bias_data, size_t bias_size) const { ref_pool_common<float>(srcs, dst, poolParams); } void PoolingTestHelper::ref_fp16(const std::vector<InferenceEngine::Blob::Ptr> srcs, Blob &dst, const ie_fp16 *weights_data, size_t weights_size, const ie_fp16 *bias_data, size_t bias_size) const { ref_pool_common<ie_fp16>(srcs, dst, poolParams); } void PoolingTestHelper::updatePaddingValues(const InferenceEngine::CNNNetwork &network) { details::CNNNetworkIterator i(network), end; auto found = std::find_if(i, end, [this](const CNNLayer::Ptr &layer) { return layer->type == type; }); ASSERT_NE(found, end); auto castedLayer = std::dynamic_pointer_cast<PoolingLayer>(*found); auto allPad = getPaddings(*castedLayer.get()); poolParams.pads_end = allPad.end; poolParams.pads_begin = allPad.begin; } size_t PoolingTestHelper::getWeightByteSize(size_t elementSize, size_t numChannels) const { return 0; } size_t PoolingTestHelper::getBiasByteSize(size_t elementSize) const { return 0; } class CommonSingleLayerTest : public testing::WithParamInterface<std::tuple<InitialShapes, NewShapes, PluginParams, Helper>>, public ::testing::Test { protected: void SetUp() override { auto params = GetParam(); initialShapes = std::get<0>(params); newShapes = std::get<1>(params); pluginParams = std::get<2>(params); layerHelper = std::get<3>(params); PluginCache::get().reset(); } ICNNNetwork::InputShapes setInputShapes(CNNNetwork &network, const std::vector<SizeVector> &dims) { auto inputShapes = network.getInputShapes(); int i = 0; IE_ASSERT(inputShapes.size() == dims.size()); for (auto &pair : inputShapes) { pair.second = dims[i++]; } return inputShapes; } TBlob<uint8_t>::Ptr createWeights(size_t elementSize, size_t weightByteSize, size_t biasByteSize) const { TBlob<uint8_t>::Ptr weights = make_shared_blob<uint8_t>({Precision::U8, {weightByteSize + biasByteSize}, Layout::C}); weights->allocate(); BufferWrapper wrappedWeights(weights, this->pluginParams.precision); fill_data_common(wrappedWeights, weights->size() / elementSize); return weights; } template<int Version = 3> static InferenceEngine::CNNNetwork buildSingleLayerNetwork(const std::string &layerType, const CommonTestUtils::InOutShapes &inOutShapes, std::map<std::string, std::string> *params, const std::string &layerDataName = "data", const Precision &precision = Precision::FP32, size_t weightsSize = 0, size_t biasesSize = 0, const TBlob<uint8_t>::Ptr &weights = nullptr) { return buildSingleLayerNetworkCommon<Version>(layerType, inOutShapes, params, layerDataName, precision, weightsSize, biasesSize, weights); } protected: CommonTestUtils::InOutShapes initialShapes; CommonTestUtils::InOutShapes newShapes; PluginDependentParam pluginParams; LayerTestHelper::Ptr layerHelper; InputInfo::Ptr inputData; std::string inputName; InputInfo::Ptr transData; std::string transName; DataPtr outputData; std::string outputName; }; TEST_P(CommonSingleLayerTest, inferAfterReshape) { Core ie; auto params = layerHelper->getMapParams(); size_t elementSize = Precision(pluginParams.precision).size(); ASSERT_EQ(initialShapes.inDims[0][1], newShapes.inDims[0][1]); size_t numChannels = initialShapes.inDims[0][1]; size_t weightByteSize = layerHelper->getWeightByteSize(elementSize, numChannels); size_t biasByteSize = layerHelper->getBiasByteSize(elementSize); auto weights = createWeights(elementSize, weightByteSize, biasByteSize); auto network = buildSingleLayerNetwork<3>(layerHelper->getType(), initialShapes, &params, "data", pluginParams.precision, weightByteSize, biasByteSize, weights); std::tie(inputName, inputData) = (*network.getInputsInfo().begin()); inputData->setPrecision(pluginParams.precision); inputData->setLayout(pluginParams.layout); std::tie(outputName, outputData) = (*network.getOutputsInfo().begin()); outputData->setPrecision(pluginParams.precision); outputData->setLayout(pluginParams.layout); if (layerHelper->getType() == "DeformableConvolution") { std::tie(transName, transData) = (*network.getInputsInfo().find("Input1")); transData->setPrecision(pluginParams.precision); transData->setLayout(pluginParams.layout); } auto inputShapes = setInputShapes(network, newShapes.inDims); network.reshape(inputShapes); layerHelper->updatePaddingValues(network); auto exeNetwork = ie.LoadNetwork(network, pluginParams.deviceName); auto request = exeNetwork.CreateInferRequest(); auto src = request.GetBlob(inputName); GenRandomDataCommon(src); size_t weights_size = weightByteSize / elementSize; size_t biases_size = biasByteSize / elementSize; if (layerHelper->getType() == "DeformableConvolution") { auto trans = request.GetBlob(transName); GenRandomDataCommon(trans); request.Infer(); auto dst = request.GetBlob(outputName); Blob::Ptr dst_ref = layerHelper->getRefBlob(weights_size, biases_size, weights, { src, trans }, dst->getTensorDesc(), pluginParams.precision); CompareCommonAbsolute(dst, dst_ref, pluginParams.tolerance); BufferWrapper src_ptr(src); BufferWrapper trans_ptr(trans); BufferWrapper dst_ptr(dst_ref); } else { request.Infer(); auto dst = request.GetBlob(outputName); Blob::Ptr dst_ref = layerHelper->getRefBlob(weights_size, biases_size, weights, { src }, dst->getTensorDesc(), pluginParams.precision); CompareCommonAbsolute(dst, dst_ref, pluginParams.tolerance); } }
9,129
999
<gh_stars>100-1000 # Copyright 2020 Netflix, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import datetime from unittest.mock import patch import pytest from dateutil.parser import parse as ts_parse from repokid.exceptions import IntegrityError from repokid.exceptions import MissingRepoableServices from repokid.exceptions import ModelError from repokid.exceptions import RoleNotFoundError from repokid.role import Role from tests import vars def test_create_role(role_dict): r = Role(**role_dict) assert r assert r.aa_data == vars.aa_data assert r.active == vars.active assert r.arn == vars.arn assert r.assume_role_policy_document == vars.assume_role_policy_document assert r.create_date == vars.create_date assert r.disqualified_by == vars.disqualified_by assert r.last_updated == vars.last_updated assert r.no_repo_permissions == vars.no_repo_permissions assert r.opt_out == vars.opt_out assert r.policies == vars.policies assert r.refreshed == vars.refreshed assert r.repoable_permissions == vars.repoable_permissions assert r.repoable_services == vars.repoable_services assert r.repoed == vars.repoed assert r.repo_scheduled == vars.repo_scheduled assert r.role_id == vars.role_id assert r.role_name == vars.role_name assert r.scheduled_perms == vars.scheduled_perms assert r.stats == vars.stats assert r.tags == vars.tags assert r.total_permissions == vars.total_permissions def test_create_role_from_aliases(role_dict_with_aliases): r = Role(**role_dict_with_aliases) assert r assert r.aa_data == vars.aa_data assert r.active == vars.active assert r.arn == vars.arn assert r.assume_role_policy_document == vars.assume_role_policy_document assert r.create_date == vars.create_date assert r.disqualified_by == vars.disqualified_by assert r.last_updated == vars.last_updated assert r.no_repo_permissions == vars.no_repo_permissions assert r.opt_out == vars.opt_out assert r.policies == vars.policies assert r.refreshed == vars.refreshed assert r.repoable_permissions == vars.repoable_permissions assert r.repoable_services == vars.repoable_services assert r.repoed == vars.repoed assert r.repo_scheduled == vars.repo_scheduled assert r.role_id == vars.role_id assert r.role_name == vars.role_name assert r.scheduled_perms == vars.scheduled_perms assert r.stats == vars.stats assert r.tags == vars.tags assert r.total_permissions == vars.total_permissions @patch("repokid.role.Role._calculate_no_repo_permissions") @patch("repokid.role.Role.store") def test_role_add_policy_version( mock_store, mock_calculate_no_repo_permissions, role_dict ): r = Role(**role_dict) source = "Test" fake_policy = {"what_do": "everything"} assert len(r.policies) == 1 r.add_policy_version(fake_policy, source=source, store=True) assert len(r.policies) == 2 assert r.policies[1]["Source"] == source assert r.policies[1]["Policy"] == fake_policy mock_calculate_no_repo_permissions.assert_called_once() mock_store.assert_called_once() assert mock_store.call_args[1]["fields"] == ["Policies", "NoRepoPermissions"] @patch("repokid.role.Role._calculate_no_repo_permissions") @patch("repokid.role.Role.store") def test_role_add_policy_version_duplicate( mock_store, mock_calculate_no_repo_permissions, role_dict ): r = Role(**role_dict) source = "Fixture" fake_policy = vars.policies[0]["Policy"] assert len(r.policies) == 1 r.add_policy_version(fake_policy, source=source, store=True) assert len(r.policies) == 1 assert r.policies[0]["Policy"] == fake_policy mock_calculate_no_repo_permissions.assert_not_called() mock_store.assert_not_called() @patch("repokid.role.convert_repoable_perms_to_perms_and_services") @patch("repokid.role.get_repoable_permissions") @patch("repokid.role.Role.get_permissions_for_policy_version") def test_role_calculate_repo_scores( mock_get_permissions_for_policy_version, mock_get_repoable_permissions, mock_convert_repoable_perms_to_perms_and_services, role_dict, ): mock_get_permissions_for_policy_version.return_value = ( {"service1:action1", "service1:action2", "service2", "service3:action3"}, {"service1:action2", "service2", "service3:action3"}, ) mock_get_repoable_permissions.return_value = {"service1:action2", "service2"} mock_convert_repoable_perms_to_perms_and_services.return_value = ( {"service1:action2"}, {"service2"}, ) r = Role(**role_dict) r.calculate_repo_scores(0, {}) mock_get_permissions_for_policy_version.assert_called_once() mock_get_repoable_permissions.assert_called_once() mock_convert_repoable_perms_to_perms_and_services.assert_called_once() assert r.total_permissions == 4 assert r.repoable_services == ["service1:action2", "service2"] assert r.repoable_permissions == 2 @patch("repokid.role.convert_repoable_perms_to_perms_and_services") @patch("repokid.role.get_repoable_permissions") @patch("repokid.role.Role.get_permissions_for_policy_version") def test_role_calculate_repo_scores_disqualified( mock_get_permissions_for_policy_version, mock_get_repoable_permissions, mock_convert_repoable_perms_to_perms_and_services, role_dict, ): mock_get_permissions_for_policy_version.return_value = ( {"service1:action1", "service1:action2", "service2", "service3:action3"}, {"service1:action2", "service2", "service3:action3"}, ) r = Role(**role_dict) r.disqualified_by = ["a filter"] r.calculate_repo_scores(0, {}) mock_get_permissions_for_policy_version.assert_called_once() mock_get_repoable_permissions.assert_not_called() mock_convert_repoable_perms_to_perms_and_services.assert_not_called() assert r.total_permissions == 4 assert r.repoable_services == [] assert r.repoable_permissions == 0 @patch("repokid.role.convert_repoable_perms_to_perms_and_services") @patch("repokid.role.get_repoable_permissions") @patch("repokid.role.Role.get_permissions_for_policy_version") def test_role_calculate_repo_scores_no_aa_data( mock_get_permissions_for_policy_version, mock_get_repoable_permissions, mock_convert_repoable_perms_to_perms_and_services, role_dict, ): mock_get_permissions_for_policy_version.return_value = ( {"service1:action1", "service1:action2", "service2", "service3:action3"}, {"service1:action2", "service2", "service3:action3"}, ) r = Role(**role_dict) r.aa_data = [] r.calculate_repo_scores(0, {}) mock_get_permissions_for_policy_version.assert_called_once() mock_get_repoable_permissions.assert_not_called() mock_convert_repoable_perms_to_perms_and_services.assert_not_called() assert r.total_permissions == 4 assert r.repoable_services == [] assert r.repoable_permissions == 0 @patch("repokid.role.get_permissions_in_policy") def test_role_get_permissions_for_policy_version( mock_get_permissions_in_policy, role_dict ): r = Role(**role_dict) r.get_permissions_for_policy_version() mock_get_permissions_in_policy.assert_called_once() assert mock_get_permissions_in_policy.call_args[0][0] == vars.policies[-1]["Policy"] assert not mock_get_permissions_in_policy.call_args[1]["warn_unknown_perms"] @patch("repokid.role.get_permissions_in_policy") def test_role_get_permissions_for_policy_version_no_policies( mock_get_permissions_in_policy, role_dict ): r = Role(**role_dict) r.policies = {} r.get_permissions_for_policy_version() mock_get_permissions_in_policy.assert_not_called() @patch("repokid.role.find_newly_added_permissions") def test_role_calculate_no_repo_permissions( mock_find_newly_added_permissions, role_dict ): mock_find_newly_added_permissions.return_value = { "service1:action1", "service1:action2", "service2:action3", } r = Role(**role_dict) r._calculate_no_repo_permissions() mock_find_newly_added_permissions.assert_called_once() assert mock_find_newly_added_permissions.call_args[0][0] == {} assert ( mock_find_newly_added_permissions.call_args[0][1] == vars.policies[-1]["Policy"] ) assert "service3:action4" not in r.no_repo_permissions assert "service1:action1" in r.no_repo_permissions assert "service1:action2" in r.no_repo_permissions assert "service2:action3" in r.no_repo_permissions assert r.no_repo_permissions["service1:action1"] > 0 assert r.no_repo_permissions["service1:action2"] > 0 assert r.no_repo_permissions["service2:action3"] > 0 @patch("repokid.role.get_repoed_policy") @patch("repokid.role.get_services_and_permissions_from_repoable") def test_role_get_repoed_policy( mock_get_services_and_permissions_from_repoable, mock_get_repoed_policy, role_dict ): mock_get_repoed_policy.return_value = ({"repoed": "woohoo"}, ["old_policy_name"]) r = Role(**role_dict) repoed_policies, deleted_policy_names = r.get_repoed_policy(scheduled=False) mock_get_repoed_policy.assert_called_once() mock_get_services_and_permissions_from_repoable.assert_not_called() assert mock_get_repoed_policy.call_args[0][0] == vars.policies[-1]["Policy"] assert mock_get_repoed_policy.call_args[0][1] == set(vars.repoable_services) assert repoed_policies == {"repoed": "woohoo"} assert deleted_policy_names == ["old_policy_name"] @patch("repokid.role.get_repoed_policy") @patch("repokid.role.get_services_and_permissions_from_repoable") def test_role_get_repoed_policy_scheduled( mock_get_services_and_permissions_from_repoable, mock_get_repoed_policy, role_dict ): mock_get_repoed_policy.return_value = ({"repoed": "woohoo"}, ["old_policy_name"]) mock_get_services_and_permissions_from_repoable.return_value = ( {"service1:action1", "service1:action2", "service2:action3"}, {"service3"}, ) r = Role(**role_dict) r.scheduled_perms = ["service1:action1"] repoed_policies, deleted_policy_names = r.get_repoed_policy(scheduled=True) mock_get_services_and_permissions_from_repoable.assert_called_once() mock_get_repoed_policy.assert_called_once() assert mock_get_repoed_policy.call_args[0][0] == vars.policies[-1]["Policy"] assert mock_get_repoed_policy.call_args[0][1] == {"service3", "service1:action1"} assert repoed_policies == {"repoed": "woohoo"} assert deleted_policy_names == ["old_policy_name"] @patch("repokid.role.get_repoed_policy") @patch("repokid.role.get_services_and_permissions_from_repoable") def test_role_get_repoed_policy_no_repoable_services( mock_get_services_and_permissions_from_repoable, mock_get_repoed_policy, role_dict ): r = Role(**role_dict) r.repoable_services = [] with pytest.raises(MissingRepoableServices): r.get_repoed_policy() mock_get_repoed_policy.assert_not_called() mock_get_services_and_permissions_from_repoable.assert_not_called() @patch("repokid.role.Role._stale_aa_services") def test_role_is_eligible_for_repo(mock_stale_aa_services, role_dict): mock_stale_aa_services.return_value = [] r = Role(**role_dict) eligible, reason = r.is_eligible_for_repo() mock_stale_aa_services.assert_called_once() assert eligible assert not reason @patch("repokid.role.Role._stale_aa_services") def test_role_is_eligible_for_repo_disqualified(mock_stale_aa_services, role_dict): r = Role(**role_dict) r.disqualified_by = ["filter1", "filter2"] eligible, reason = r.is_eligible_for_repo() mock_stale_aa_services.assert_not_called() assert not eligible assert reason == "disqualified by filter1, filter2" @patch("repokid.role.Role._stale_aa_services") def test_role_is_eligible_for_repo_no_aa_data(mock_stale_aa_services, role_dict): r = Role(**role_dict) r.aa_data = [] eligible, reason = r.is_eligible_for_repo() mock_stale_aa_services.assert_not_called() assert not eligible assert reason == "no Access Advisor data available" @patch("repokid.role.Role._stale_aa_services") def test_role_is_eligible_for_repo_no_repoable_permissions( mock_stale_aa_services, role_dict ): r = Role(**role_dict) r.repoable_permissions = [] r.scheduled_perms = [] eligible, reason = r.is_eligible_for_repo() mock_stale_aa_services.assert_not_called() assert not eligible assert reason == "no repoable permissions" @patch("repokid.role.Role._stale_aa_services") def test_role_is_eligible_for_repo_stale_aa_data(mock_stale_aa_services, role_dict): mock_stale_aa_services.return_value = ["service1", "service2"] r = Role(**role_dict) eligible, reason = r.is_eligible_for_repo() mock_stale_aa_services.assert_called_once() assert not eligible assert reason == "stale Access Advisor data for service1, service2" def test_role_stale_aa_services(role_dict): r = Role(**role_dict) r.config["repo_requirements"] = {"oldest_aa_data_days": 5} recent_dt = datetime.datetime.now() - datetime.timedelta(days=1) older_dt = datetime.datetime.now() - datetime.timedelta(days=14) r.aa_data = [ {"serviceName": "service1", "lastUpdated": recent_dt.isoformat()}, {"serviceName": "service2", "lastUpdated": recent_dt.isoformat()}, {"serviceName": "service3", "lastUpdated": older_dt.isoformat()}, {"serviceName": "service4", "lastUpdated": older_dt.isoformat()}, ] stale = r._stale_aa_services() assert "service1" not in stale assert "service2" not in stale assert "service3" in stale assert "service4" in stale def test_role_stale_aa_services_no_aa_data(role_dict): r = Role(**role_dict) r.config["repo_requirements"] = {"oldest_aa_data_days": 5} r.aa_data = [] stale = r._stale_aa_services() assert len(stale) == 0 def test_role_update_opt_out(role_dict): r = Role(**role_dict) recent_dt = datetime.datetime.now() - datetime.timedelta(days=1) r.opt_out = {"expire": recent_dt.timestamp()} r._update_opt_out() assert r.opt_out == {} def test_role_update_opt_out_future(role_dict): r = Role(**role_dict) future_dt = datetime.datetime.now() + datetime.timedelta(days=1) r.opt_out = {"expire": future_dt.timestamp()} r._update_opt_out() # opt out should not have been touched since it is not expired assert r.opt_out == {"expire": future_dt.timestamp()} @patch("repokid.role.Role.store") def test_role_mark_inactive(mock_store, role_dict): r = Role(**role_dict) r.active = True r.mark_inactive(store=True) assert not r.active mock_store.assert_called_once() assert mock_store.call_args[1]["fields"] == ["active"] @patch("repokid.role.Role.store") def test_role_mark_inactive_no_store(mock_store, role_dict): r = Role(**role_dict) r.active = True r.mark_inactive() assert not r.active mock_store.assert_not_called() def test_role_update(role_dict): r = Role(**role_dict) updates = {"repoable_permissions": 20} r.update(updates, store=False) assert r.repoable_permissions == 20 @patch("repokid.role.get_role_by_id") @patch("repokid.role.set_role_data") def test_role_update_store(mock_set_role_data, mock_get_role_by_id, role_dict): expected = {"RepoablePermissions": 20, "LastUpdated": vars.last_updated} mock_get_role_by_id.return_value = { "LastUpdated": vars.last_updated.strftime("%Y-%m-%d %H:%M") } r = Role(**role_dict) updates = {"repoable_permissions": 20} r.update(updates, store=True) assert r.repoable_permissions == 20 mock_set_role_data.assert_called_once() assert mock_set_role_data.call_args[0][0] == r.role_id # LastUpdated gets set when we store, so we just need to make sure it's different now assert mock_set_role_data.call_args[0][1]["LastUpdated"] > expected["LastUpdated"] # Remove LastUpdated from the fn call and expected dict so we can compare the rest mock_set_role_data.call_args[0][1].pop("LastUpdated") expected.pop("LastUpdated") assert mock_set_role_data.call_args[0][1] == expected def test_role_update_by_alias(role_dict): r = Role(**role_dict) updates = {"RepoablePermissions": 20} r.update(updates, store=False) assert r.repoable_permissions == 20 @patch("repokid.role.AccessAdvisorDatasource.get") @patch("repokid.role.AccessAdvisorDatasource.seed") def test_role_fetch_aa_data(mock_seed_aardvark_data, mock_get_aardvark_data, role_dict): mock_get_aardvark_data.return_value = [{"a": "b"}] r = Role(**role_dict) r.fetch_aa_data() assert r.aa_data[0] def test_role_fetch_aa_data_no_arn(role_dict): role_data = copy.deepcopy(role_dict) role_data.pop("arn") role_data.pop("account") r = Role(**role_data) with pytest.raises(ModelError): r.fetch_aa_data() @patch("repokid.role.get_role_by_id") def test_role_fetch(mock_get_role_by_id, role_dict): stored_role_data = copy.deepcopy(role_dict) stored_role_data["repoable_permissions"] = 20 mock_get_role_by_id.return_value = stored_role_data r = Role(**role_dict) assert r.repoable_permissions == 5 r.fetch() assert r.repoable_permissions == 20 @patch("repokid.role.get_role_by_arn") def test_role_fetch_no_id(mock_get_role_by_arn, role_dict): stored_role_data = copy.deepcopy(role_dict) stored_role_data["repoable_permissions"] = 20 mock_get_role_by_arn.return_value = stored_role_data local_role_data = copy.deepcopy(role_dict) local_role_data.pop("role_id") r = Role(**local_role_data) assert r.repoable_permissions == 5 r.fetch() assert r.repoable_permissions == 20 @patch("repokid.role.get_role_by_arn") def test_role_fetch_not_found(mock_get_role_by_arn, role_dict): mock_get_role_by_arn.side_effect = RoleNotFoundError local_role_data = copy.deepcopy(role_dict) local_role_data.pop("role_id") local_role_data.pop("role_name") local_role_data.pop("account") r = Role(**local_role_data) with pytest.raises(RoleNotFoundError): r.fetch() def test_role_fetch_dirty(role_dict): r = Role(**role_dict) r._dirty = True with pytest.raises(IntegrityError): r.fetch() @patch("repokid.role.get_role_by_id") @patch("repokid.role.set_role_data") def test_role_store( mock_set_role_data, mock_get_role_by_id, role_dict, role_dict_with_aliases ): expected = copy.deepcopy(role_dict_with_aliases) expected.pop("RoleId") expected.pop("Account") mock_get_role_by_id.return_value = { "LastUpdated": vars.last_updated.strftime("%Y-%m-%d %H:%M") } r = Role(**role_dict) r.store() mock_set_role_data.assert_called_once() assert mock_set_role_data.call_args[0][0] == r.role_id # LastUpdated gets set when we store, so we just need to make sure it's different now assert mock_set_role_data.call_args[0][1]["LastUpdated"] > expected["LastUpdated"] # Remove LastUpdated from the fn call and expected dict so we can compare the rest mock_set_role_data.call_args[0][1].pop("LastUpdated") expected.pop("LastUpdated") assert mock_set_role_data.call_args[0][1] == expected @patch("repokid.role.get_role_by_id") @patch("repokid.role.set_role_data") def test_role_store_fields( mock_set_role_data, mock_get_role_by_id, role_dict, role_dict_with_aliases ): expected = {"RepoablePermissions": 5, "LastUpdated": vars.last_updated} mock_get_role_by_id.return_value = { "LastUpdated": vars.last_updated.strftime("%Y-%m-%d %H:%M") } r = Role(**role_dict) r.store(fields=["repoable_permissions"]) mock_set_role_data.assert_called_once() assert mock_set_role_data.call_args[0][0] == r.role_id # LastUpdated gets set when we store, so we just need to make sure it's different now assert mock_set_role_data.call_args[0][1]["LastUpdated"] > expected["LastUpdated"] # Remove LastUpdated from the fn call and expected dict so we can compare the rest mock_set_role_data.call_args[0][1].pop("LastUpdated") expected.pop("LastUpdated") assert mock_set_role_data.call_args[0][1] == expected @patch("repokid.role.get_role_by_id") def test_role_store_remote_updated( mock_get_role_by_id, role_dict, role_dict_with_aliases ): expected = copy.deepcopy(role_dict_with_aliases) expected.pop("RoleId") expected.pop("RoleName") expected.pop("Account") # simulate the record having been updated in DynamoDB since we last fetched it last_updated = (vars.last_updated + datetime.timedelta(hours=2)).strftime( "%Y-%m-%d %H:%M" ) mock_get_role_by_id.return_value = {"LastUpdated": last_updated} r = Role(**role_dict) with pytest.raises(IntegrityError): r.store() @patch("repokid.role.get_role_by_id") @patch("repokid.utils.dynamo.create_dynamodb_entry") def test_role_store_create( mock_create_dynamodb_entry, mock_get_role_by_id, role_dict, role_dict_with_aliases ): expected = copy.deepcopy(role_dict_with_aliases) mock_get_role_by_id.side_effect = RoleNotFoundError r = Role(**role_dict) r.store() mock_create_dynamodb_entry.assert_called_once() # Remove LastUpdated from the fn call and expected dict so we can compare the rest mock_create_dynamodb_entry.call_args[0][0].pop("LastUpdated") expected.pop("LastUpdated") assert mock_create_dynamodb_entry.call_args[0][0] == expected def test_role_update_refreshed(role_dict): r = Role(**role_dict) old_refreshed = ts_parse(r.refreshed) r._update_refreshed() new_refreshed = ts_parse(r.refreshed) assert new_refreshed > old_refreshed
8,996
365
from core.redis import rds from core.triage import Triage from core.parser import ScanParser from db.db_paths import COMMON_WEB_PATHS from core.logging import logger class Rule: def __init__(self): self.rule = 'VLN_92F9' self.rule_severity = 4 self.rule_description = 'This rule checks for open Git Repositories' self.rule_confirm = 'Remote Server Exposes Git Repository' self.rule_details = '' self.rule_mitigation = '''Git repository was found to be accessible. \ Configure the server in a way that makes git repository unreachable to untrusted clients''' self.intensity = 3 self.uris = COMMON_WEB_PATHS def check_rule(self, ip, port, values, conf): t = Triage() p = ScanParser(port, values) domain = p.get_domain() module = p.get_module() if 'http' not in module: return resp = None for uri in self.uris: resp = t.http_request(ip, port, uri=uri + '/.git/HEAD') if resp and resp.text.startswith('ref:'): self.rule_details = 'Identified a git repository at {}'.format(resp.url) rds.store_vuln({ 'ip':ip, 'port':port, 'domain':domain, 'rule_id':self.rule, 'rule_sev':self.rule_severity, 'rule_desc':self.rule_description, 'rule_confirm':self.rule_confirm, 'rule_details':self.rule_details, 'rule_mitigation':self.rule_mitigation }) return
638
681
{ "id":"google", "factoryAlias":"oauth2", "title":"Google", "subtitle":"", "factoryData":"type: google | userEndpoint: NONE | clientId: FIXME | clientSecret: FIXME", "enabled":true }
84
3,200
/** * Copyright 2021 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_NN_RESIZE_BILINEAR_GRAD_GPU_KERNEL_H_ #define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_NN_RESIZE_BILINEAR_GRAD_GPU_KERNEL_H_ #include <vector> #include "backend/kernel_compiler/gpu/gpu_kernel.h" #include "backend/kernel_compiler/gpu/gpu_kernel_factory.h" #include "backend/kernel_compiler/gpu/cuda_impl/resize_bilinear_impl.cuh" namespace mindspore { namespace kernel { template <typename T> class ResizeBilinearGradGpuKernel : public GpuKernel { public: ResizeBilinearGradGpuKernel() { ResetResource(); } ~ResizeBilinearGradGpuKernel() override = default; const std::vector<size_t> &GetInputSizeList() const override { return input_size_list_; } const std::vector<size_t> &GetOutputSizeList() const override { return output_size_list_; } const std::vector<size_t> &GetWorkspaceSizeList() const override { return workspace_size_list_; } bool Launch(const std::vector<AddressPtr> &inputs, const std::vector<AddressPtr> &workspace, const std::vector<AddressPtr> &outputs, void *stream_ptr) override { if (is_null_input_) { return true; } T *dy = GetDeviceAddress<T>(inputs, 0); float *interim = GetDeviceAddress<float>(workspace, 0); T *dx = GetDeviceAddress<T>(outputs, 0); float h_scale = Scaling(dx_h_, dy_h_, align_corners_); float w_scale = Scaling(dx_w_, dy_w_, align_corners_); CHECK_CUDA_RET_WITH_EXCEPT(kernel_node_, cudaMemsetAsync(dx, 0, dx_size_, reinterpret_cast<cudaStream_t>(stream_ptr)), "cudaMemsetAsync dx failed"); CHECK_CUDA_RET_WITH_EXCEPT(kernel_node_, cudaMemsetAsync(interim, 0, workspace_size_, reinterpret_cast<cudaStream_t>(stream_ptr)), "cudaMemsetAsync dx_interim failed"); CalResizeBilinearGrad(dy, n_, c_, dy_h_, dy_w_, dx_h_, dx_w_, h_scale, w_scale, dx, interim, reinterpret_cast<cudaStream_t>(stream_ptr)); return true; } bool Init(const CNodePtr &kernel_node) override { kernel_node_ = kernel_node; size_t input_num = AnfAlgo::GetInputTensorNum(kernel_node); if (input_num != 2) { MS_LOG(ERROR) << "Input number is " << input_num << ", but ResizeBilinearGrad needs 1 input."; return false; } size_t output_num = AnfAlgo::GetOutputTensorNum(kernel_node); if (output_num != 1) { MS_LOG(ERROR) << "Output number is " << output_num << ", but ResizeBilinearGrad has 1 output."; return false; } std::vector<size_t> dy_shape = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 0); std::vector<size_t> x_shape = AnfAlgo::GetPrevNodeOutputInferShape(kernel_node, 1); std::vector<size_t> dx_shape = AnfAlgo::GetOutputInferShape(kernel_node, 0); is_null_input_ = CHECK_NULL_INPUT(dy_shape) || CHECK_NULL_INPUT(x_shape) || CHECK_NULL_INPUT(dx_shape); if (is_null_input_) { MS_LOG(WARNING) << "For 'ResizeBilinearGradGpuKernel', input or output is null."; InitSizeLists(); return true; } if (dy_shape.size() != 4) { MS_LOG(ERROR) << "Input is " << dy_shape.size() << "-D, but ResizeBilinearGrad supports only 4-D inputs."; return false; } if (x_shape.size() != 4) { MS_LOG(ERROR) << "Input is " << x_shape.size() << "-D, but ResizeBilinearGrad supports only 4-D inputs."; return false; } if (dx_shape.size() != 4) { MS_LOG(ERROR) << "For 'ResizeBilinearGradGpuKernel', the rank of output must be 4, but got " << dx_shape.size(); return false; } n_ = SizeToInt(dy_shape[0]); c_ = SizeToInt(dy_shape[1]); dy_h_ = SizeToInt(dy_shape[2]); dy_w_ = SizeToInt(dy_shape[3]); dx_h_ = SizeToInt(dx_shape[2]); dx_w_ = SizeToInt(dx_shape[3]); dy_size_ = sizeof(T); for (auto x : dy_shape) { dy_size_ *= x; } dx_size_ = sizeof(T); for (auto x : dx_shape) { dx_size_ *= x; } workspace_size_ = (dx_size_ / sizeof(T)) * sizeof(float); align_corners_ = GetAttr<bool>(kernel_node, "align_corners"); InitSizeLists(); return true; } void ResetResource() noexcept override { align_corners_ = false; is_null_input_ = false; n_ = 0; c_ = 0; dy_h_ = 0; dy_w_ = 0; dx_h_ = 0; dx_w_ = 0; dy_size_ = 0; dx_size_ = 0; workspace_size_ = 0; input_size_list_.clear(); output_size_list_.clear(); workspace_size_list_.clear(); } protected: void InitSizeLists() override { input_size_list_.push_back(dy_size_); workspace_size_list_.push_back(workspace_size_); output_size_list_.push_back(dx_size_); } private: float Scaling(const int in_size, const int out_size, bool align_corners) { return (align_corners && out_size > 1) ? (in_size - 1) / static_cast<float>(out_size - 1) : in_size / static_cast<float>(out_size); } bool align_corners_; bool is_null_input_; int n_; int c_; int dy_h_; int dy_w_; int dx_h_; int dx_w_; size_t dy_size_; size_t dx_size_; size_t workspace_size_; std::vector<size_t> input_size_list_; std::vector<size_t> output_size_list_; std::vector<size_t> workspace_size_list_; }; } // namespace kernel } // namespace mindspore #endif // MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_NN_RESIZE_BILINEAR_GRAD_GPU_KERNEL_H_
2,566
1,449
# -*- coding: utf-8 -*- # ------------------------------------------------------------------------------ # Name: common/misc.py # Purpose: Everything that doesn't fit into anything else. # # Authors: <NAME> # <NAME> # # Copyright: Copyright © 2009-2020 <NAME> and the music21 Project # License: BSD, see license.txt # ------------------------------------------------------------------------------ ''' If it doesn't fit anywhere else in the common directory, you'll find it here... ''' from typing import Tuple, List, Iterable, Optional, Callable import platform import re __all__ = [ 'flattenList', 'getMissingImportStr', 'getPlatform', 'macOSVersion', 'sortModules', 'pitchList', 'unique', 'runningUnderIPython', 'defaultDeepcopy', 'cleanedFlatNotation', ] import copy import os import sys import textwrap import time # ----------------------------------------------------------------------------- def flattenList(originalList: List) -> List: ''' Flatten a list of lists into a flat list but not a list of lists of lists... >>> l = [[1, 2, 3], [4, 5], [6]] >>> common.flattenList(l) [1, 2, 3, 4, 5, 6] ''' return [item for sublist in originalList for item in sublist] def unique(originalList: Iterable, *, key: Optional[Callable] = None) -> List: ''' Return a List of unique items from an iterable, preserving order. (unlike casting to a set and back) (And why is this not already in Python?) >>> common.misc.unique([3, 2, 4, 3, 2, 5]) [3, 2, 4, 5] Works on any iterable, but order might not be preserved for sets, etc. >>> common.misc.unique(range(5)) [0, 1, 2, 3, 4] If key is a function then use that to get the value: >>> s = converter.parse('tinyNotation: c4 E d C f# e a') >>> common.misc.unique(s.recurse().notes, key=lambda n: n.name) [<music21.note.Note C>, <music21.note.Note E>, <music21.note.Note D>, <music21.note.Note F#>, <music21.note.Note A>] ''' seen = set() out = [] for el in originalList: if key: elKey = key(el) else: elKey = el if elKey in seen: continue seen.add(elKey) out.append(el) return out # ------------------------------------------------------------------------------ # provide warning strings to users for use in conditional imports def getMissingImportStr(modNameList): ''' Given a list of missing module names, returns a nicely-formatted message to the user that gives instructions on how to expand music21 with optional packages. >>> print(common.getMissingImportStr(['matplotlib'])) Certain music21 functions might need the optional package matplotlib; if you run into errors, install it by following the instructions at http://mit.edu/music21/doc/installing/installAdditional.html >>> print(common.getMissingImportStr(['matplotlib', 'numpy'])) Certain music21 functions might need these optional packages: matplotlib, numpy; if you run into errors, install them by following the instructions at http://mit.edu/music21/doc/installing/installAdditional.html ''' if not modNameList: return None elif len(modNameList) == 1: m = modNameList[0] return textwrap.dedent(f'''Certain music21 functions might need the optional package {m}; if you run into errors, install it by following the instructions at http://mit.edu/music21/doc/installing/installAdditional.html''') else: m = ', '.join(modNameList) return textwrap.dedent( f'''Certain music21 functions might need these optional packages: {m}; if you run into errors, install them by following the instructions at http://mit.edu/music21/doc/installing/installAdditional.html''') def getPlatform() -> str: ''' Return the name of the platform, where platforms are divided between 'win' (for Windows), 'darwin' (for MacOS X), and 'nix' for (GNU/Linux and other variants). Does not discern between Linux/FreeBSD, etc. Lowercase names are for backwards compatibility -- this existed before the platform module. ''' # possible os.name values: 'posix', 'nt', 'os2', 'ce', 'java'. if platform.system() == 'Windows': return 'win' elif platform.system() == 'Darwin': return 'darwin' elif os.name == 'posix': # catch all other nix platforms return 'nix' # this must be after the Mac Darwin check, b/c Darwin is also posix else: return os.name def macOSVersion() -> Tuple[int, int, int]: # pragma: no cover ''' On a Mac returns the current version as a tuple of (currently 3) ints, such as: (10, 5, 6) for 10.5.6. On other systems, returns (0, 0, 0) ''' if getPlatform() != 'darwin': return (0, 0, 0) # Catch minor and maintenance as they could be missing, # e.g., macOS Big Sur 11.0.1 (20B28) corresponds to "10.16". major, *minor_and_maintenance = tuple(int(v) for v in platform.mac_ver()[0].split('.')) minor = minor_and_maintenance[0] if minor_and_maintenance else 0 maintenance = minor_and_maintenance[1] if len(minor_and_maintenance) > 1 else 0 return (major, minor, maintenance) def sortModules(moduleList) -> List[str]: ''' Sort a lost of imported module names such that most recently modified is first. In ties, last access time is used then module name Will return a different order each time depending on the last mod time ''' sort = [] modNameToMod = {} for mod in moduleList: modNameToMod[mod.__name__] = mod fp = mod.__file__ # returns the py or pyc file stat = os.stat(fp) lastmod = time.localtime(stat[8]) asctime = time.asctime(lastmod) sort.append((lastmod, asctime, mod.__name__)) sort.sort() sort.reverse() # just return module list outMods = [modNameToMod[modName] for lastmod, asctime, modName in sort] return outMods # ---------------------------- def pitchList(pitchL): ''' utility method that replicates the previous behavior of lists of pitches ''' return '[' + ', '.join([x.nameWithOctave for x in pitchL]) + ']' def runningUnderIPython() -> bool: ''' return bool if we are running under iPython Notebook (not iPython) (no tests, since will be different) This post: https://stackoverflow.com/questions/15411967/how-can-i-check-if-code-is-executed-in-the-ipython-notebook says not to do this, but really, I can't think of another way to have different output as default. Returns True also for Google Colab ''' if sys.stderr.__class__.__name__ == 'OutStream': return True else: return False # ---------------------------- # match collections, defaultdict() # NB -- temp files (tempFile) etc. are in environment.py # ------------------------------------------------------------------------------ def defaultDeepcopy(obj, memo, callInit=True): ''' Unfortunately, it is not possible to do something like:: def __deepcopy__(self, memo): if self._noDeepcopy: return self.__class__() else: copy.deepcopy(self, memo, ignore__deepcopy__=True) Or, else: return NotImplemented so that's what this is for:: def __deepcopy__(self, memo): if self._noDeepcopy: return self.__class__() else: return common.defaultDeepcopy(self, memo) looks through both __slots__ and __dict__ and does a deepcopy of anything in each of them and returns the new object. If callInit is False, then only __new__() is called. This is much faster if you're just going to overload every instance variable. ''' if callInit is False: new = obj.__class__.__new__(obj.__class__) else: new = obj.__class__() dictState = getattr(obj, '__dict__', None) if dictState is not None: for k in dictState: # noinspection PyArgumentList setattr(new, k, copy.deepcopy(dictState[k], memo=memo)) slots = set() for cls in obj.__class__.mro(): # it is okay that it's in reverse order, since it's just names slots.update(getattr(cls, '__slots__', ())) for slot in slots: slotValue = getattr(obj, slot, None) # might be none if slot was deleted; it will be recreated here setattr(new, slot, copy.deepcopy(slotValue)) return new def cleanedFlatNotation(music_str: str) -> str: ''' Returns a copy of the given string where each occurrence of a flat note specified with a 'b' is replaced by a '-'. music_str is a string containing a note specified (for example in a chord) Returns a new string with flats only specified with '-'. >>> common.cleanedFlatNotation('Cb') 'C-' ''' return re.sub('([A-Ga-g])b', r'\1-', music_str) if __name__ == '__main__': import music21 music21.mainTest()
3,390
527
<gh_stars>100-1000 """yt-dlg __main__ file. __main__ file is a python 'executable' file which calls the youtube_dl_gui.app main() function in order to start the app. It can be used to start the app from the package directory OR it can be used to start the app from a different directory after you have installed the youtube_dl_gui package. Example: In order to run the app from the package directory. $ cd <package directory> $ python __main__.py In order to run the app AFTER you have installed the package using setup.py. $ yt-dlg """ import sys from pathlib import Path if __package__ is None and not hasattr(sys, "frozen"): # direct call of __main__.py PATH = Path(__file__).resolve().parent sys.path.insert(0, str(PATH)) from youtube_dl_gui.app import main # type: ignore[attr-defined] sys.exit(main())
290
324
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.jclouds.docker.features; import static org.testng.Assert.assertFalse; import static org.testng.Assert.assertNotNull; import static org.testng.Assert.assertNull; import static org.testng.Assert.assertTrue; import java.io.IOException; import java.io.InputStream; import java.util.List; import org.jclouds.docker.compute.BaseDockerApiLiveTest; import org.jclouds.docker.domain.Config; import org.jclouds.docker.domain.Container; import org.jclouds.docker.domain.Image; import org.jclouds.docker.domain.Network; import org.jclouds.docker.options.CreateImageOptions; import org.testng.annotations.AfterClass; import org.testng.annotations.BeforeClass; import org.testng.annotations.Test; import com.google.common.base.Predicates; import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableMap; import com.google.common.collect.Iterables; @Test(groups = "live", testName = "NetworkApiLiveTest", singleThreaded = true) public class NetworkApiLiveTest extends BaseDockerApiLiveTest { private static final String NETWORK_NAME = "JCLOUDS_NETWORK"; private Network network = null; protected Image image = null; private Container container; @BeforeClass protected void init() { if (api.getImageApi().inspectImage(ALPINE_IMAGE_TAG) == null) { CreateImageOptions options = CreateImageOptions.Builder.fromImage(ALPINE_IMAGE_TAG); InputStream createImageStream = api.getImageApi().createImage(options); consumeStream(createImageStream); } image = api.getImageApi().inspectImage(ALPINE_IMAGE_TAG); assertNotNull(image); Config containerConfig = Config.builder().image(image.id()) .cmd(ImmutableList.of("sh", "-c", "touch hello; while true; do echo hello world; sleep 1; done")) .build(); container = api.getContainerApi().createContainer("jclouds-test-network", containerConfig); api.getContainerApi().startContainer(container.id()); container = api.getContainerApi().inspectContainer(container.id()); } @AfterClass(alwaysRun = true) protected void tearDown() { if (container != null) { api.getContainerApi().stopContainer(container.id()); api.getContainerApi().removeContainer(container.id()); } if (network != null) { api().removeNetwork(network.id()); } } public void testCreateNetwork() throws IOException, InterruptedException { network = api().createNetwork(Network.create(NETWORK_NAME, null, null, null, null, ImmutableMap.<String, Network.Details> of(), ImmutableMap.<String, String> of())); assertNotNull(network); assertNotNull(network.id()); } @Test(dependsOnMethods = "testCreateNetwork") public void testGetNetwork() { network = api().inspectNetwork(network.id()); assertNotNull(network); } @Test(dependsOnMethods = "testGetNetwork") public void testAttachContainerToNetwork() { api().connectContainerToNetwork(network.id(), container.id()); container = api.getContainerApi().inspectContainer(container.id()); assertTrue(Iterables.any(container.networkSettings().networks().keySet(), Predicates.equalTo(network.name()))); } @Test(dependsOnMethods = "testAttachContainerToNetwork") public void testDisconnectContainerFromNetwork() { api().disconnectContainerFromNetwork(network.id(), container.id()); container = api.getContainerApi().inspectContainer(container.id()); assertFalse(Iterables.any(container.networkSettings().networks().keySet(), Predicates.equalTo(network.name()))); } @Test(dependsOnMethods = "testCreateNetwork") public void testListNetworks() { List<Network> networks = api().listNetworks(); for (Network network : networks) { assertNotNull(network.id()); } } @Test(dependsOnMethods = "testDisconnectContainerFromNetwork") public void testRemoveNetwork() { api().removeNetwork(network.id()); assertNull(api().inspectNetwork(network.id())); network = null; } private NetworkApi api() { return api.getNetworkApi(); } }
1,603
4,538
/** ****************************************************************************** * @file rtl8721d_otf.h * @author * @version V1.0.0 * @date 2016-05-17 * @brief This file contains all the functions prototypes for the flash run time decrypt firmware * library. ****************************************************************************** * @attention * * This module is a confidential and proprietary property of RealTek and * possession or use of this module requires written permission of RealTek. * * Copyright(c) 2015, Realtek Semiconductor Corporation. All rights reserved. ****************************************************************************** */ #ifndef _RTL8721D_RSIP_H_ #define _RTL8721D_RSIP_H_ /** @addtogroup AmebaD_Platform * @{ */ /** @defgroup PROTECTION * @brief PROTECTION driver modules * @{ */ /** @addtogroup PROTECTION * @verbatim ***************************************************************************************** * RSIP(OTF) Introduction ***************************************************************************************** * -used for flash firmware protection, and flash firmware will be encrypted use AES. * -16B KEY shoud be written to EFUSE OTP KEY area use EFUSE_OTF_KEY. * -Enable should be write to EFUSE 0x19[5]. ***************************************************************************************** * @endverbatim */ /* Exported constants --------------------------------------------------------*/ /* Exported functions --------------------------------------------------------*/ /** @defgroup PROTECTION_Exported_Functions OTF Exported Functions * @{ */ _LONG_CALL_ void RSIP_Cmd(u32 NewStatus); _LONG_CALL_ void RSIP_OTF_init(u8* IV); _LONG_CALL_ void RSIP_OTF_Cmd(u32 NewStatus); _LONG_CALL_ void RSIP_OTF_Mask(u32 MaskIdx, u32 Addr, u32 Len, u32 NewStatus); _LONG_CALL_ u32 RSIP_KEY_Request(u32 KeyTypeBit); _LONG_CALL_ void RSIP_MMU_Config(u32 MMUIdx, u32 AddrStart, u32 AddrEnd, u32 IsMinus, u32 AddrOffset); _LONG_CALL_ void RSIP_MMU_Cmd(u32 MMUIdx, u32 NewStatus); /** * @} */ /* Registers Definitions --------------------------------------------------------*/ /**************************************************************************//** * @defgroup RSIP_Register_Definitions OTF Register Definitions * @{ *****************************************************************************/ /**************************************************************************//** * @defgroup OTF_DEC * @{ *****************************************************************************/ #define REG_SYS_OTF_DEC_CTRL 0x02D8 #define REG_SYS_OTF_DEC_ADDR_MASK0 0x02DC #define REG_SYS_OTF_DEC_ADDR_MASK1 0x02E4 #define REG_SYS_OTF_DEC_ADDR_MASK2 0x02E8 #define REG_SYS_OTF_DEC_ADDR_MASK3 0x02EC #define REG_SYS_OTF_DEC_IV_EXT 0x02F0 /** @} */ /**************************************************************************//** * @defgroup REG_OTF_DEC_CTRL * @{ *****************************************************************************/ #define OTF_FEN_OTFDEC ((u32)0x00000001) /*!<function enable of OTF decoder */ #define OTF_DEC_IV_BYTE_SWAP ((u32)0x00000002) /*!<Big/little endian conversion for input OTF IV */ #define OTF_DEC_KEY_BYTE_SWAP ((u32)0x00000004) /*!<Big/little endian conversion for input OTF KEY*/ #define OTF_DEC_CIPHER_BYTE_SWAP ((u32)0x00000008) /*!Big/little endian conversion for calculated cipher*/ /** @} */ /**************************************************************************//** * @defgroup OTF_MASK_ENTRYx_CTRL * @{ *****************************************************************************/ #define OTF_DEC_BIT_MASK_EN ((u32)0x00000001) /*!<Decoder mask enable for address~address+length */ #define OTF_DEC_BIT_MASK_SIZE ((u32)0x000000FF) /*!<Address range for decoder mask, unit is 4KB */ #define OTF_DEC_BIT_SHIFT_SIZE 8 #define IS_OTF_MASK_SIZE(SIZE) ((((SIZE) & ~OTF_DEC_BIT_MASK_SIZE) == 0x00) && (((SIZE) & OTF_DEC_BIT_MASK_SIZE) != 0x00)) /** @} */ /**************************************************************************//** * @defgroup MMU_ENTRYx_CTRL * @{ *****************************************************************************/ #define MMU_BIT_ENTRY_VALID ((u32)0x00000001) /*!< MMU entry_x valid */ #define MMU_BIT_ENTRY_OFFSET_MINUS ((u32)0x00000002) /*!< MMU_ENTRYx_OFFSET flag, 0 Plus, 1 Minus. */ /** @} */ /**************************************************************************//** * @defgroup RDP_ERROR_STATUS * @{ *****************************************************************************/ #define RDP_SYSTEMBIN_WRONG ((u32)0x00000001) /*!<system.bin not load to flash */ #define RDP_RDPBIN_WRONG ((u32)0x00000002) /*!<rdp.bin not load to flash */ #define RDP_KEY_REQUEST_TIMEOUT ((u32)0x00000003) /*!<Key request timeout */ #define RDP_NOT_ENABLE ((u32)0x00000004) /*!<RDP not enable in efuse */ #define RDP_CHECKSUM_ERROR ((u32)0x00000005) /*!<Check sum error */ /** @} */ /** @} */ /** * @} */ /** * @} */ /* Other definations --------------------------------------------------------*/ #define KEY_REQ_POLL_TIMES 0xFF #endif /******************* (C) COPYRIGHT 2016 Realtek Semiconductor *****END OF FILE****/
1,738
6,717
#include "RequestData.h" using namespace ApplicationInsights::core; RequestData::RequestData() : RequestData(L"Microsoft.ApplicationInsights.Request", L"RequestData") { } RequestData::RequestData(std::wstring envelopeName, std::wstring baseType) : Domain(envelopeName, baseType), m_ver(2) { } RequestData::~RequestData() { } void RequestData::Serialize(Serializer& serializer) const { Domain::Serialize(serializer); serializer.WritePropertyName(L"ver"); serializer.WriteIntegerValue(m_ver); serializer.WritePropertyName(L"id"); serializer.WriteStringValue(m_id); if (!m_name.empty()) { serializer.WritePropertyName(L"name"); serializer.WriteStringValue(m_name); } serializer.WritePropertyName(L"startTime"); serializer.WriteStringValue(m_startTime); serializer.WritePropertyName(L"duration"); serializer.WriteStringValue(m_duration); serializer.WritePropertyName(L"responseCode"); serializer.WriteStringValue(m_responseCode); serializer.WritePropertyName(L"success"); serializer.WriteBoolValue(m_success); if (!m_httpMethod.empty()) { serializer.WritePropertyName(L"httpMethod"); serializer.WriteStringValue(m_httpMethod); } if (!m_url.empty()) { serializer.WritePropertyName(L"url"); serializer.WriteStringValue(m_url); } if (m_properties.size() > 0) { serializer.WritePropertyName(L"properties"); serializer.BeginDictionaryValue(); for (auto &it : m_properties) { serializer.WritePropertyName(it.first); serializer.WriteStringValue(it.second); } serializer.EndDictionaryValue(); } if (m_measurements.size() > 0) { serializer.WritePropertyName(L"measurements"); serializer.BeginDictionaryValue(); for (auto &it : m_measurements) { serializer.WritePropertyName(it.first); serializer.WriteDoubleValue(it.second); } serializer.EndDictionaryValue(); } }
890
3,799
/* * Copyright 2017 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package androidx.recyclerview.selection.testing; import static org.junit.Assert.assertEquals; import androidx.recyclerview.selection.FocusDelegate; import androidx.recyclerview.selection.ItemDetailsLookup.ItemDetails; import androidx.recyclerview.widget.RecyclerView; public final class TestFocusDelegate<K> extends FocusDelegate<K> { private K mFocusItemId; private int mFocusPosition; @Override public void clearFocus() { mFocusPosition = RecyclerView.NO_POSITION; mFocusItemId = null; } @Override public void focusItem(ItemDetails<K> item) { mFocusItemId = item.getSelectionKey(); mFocusPosition = item.getPosition(); } @Override public int getFocusedPosition() { return mFocusPosition; } @Override public boolean hasFocusedItem() { return mFocusItemId != null; } public void assertHasFocus(boolean focused) { assertEquals(focused, hasFocusedItem()); } public void assertFocused(String expectedId) { assertEquals(expectedId, mFocusItemId); } }
561
3,861
#ifndef __PCCTS_ISTREAM_H__ #define __PCCTS_ISTREAM_H__ #ifdef PCCTS_USE_NAMESPACE_STD #include <istream> #else #include <istream.h> #endif #endif
81
779
<reponame>it-at-m/digiwf-json-schema<gh_stars>100-1000 package org.everit.json.schema.regexp; import static java.util.Objects.requireNonNull; import java.util.Optional; public interface Regexp { Optional<RegexpMatchingFailure> patternMatchingFailure(String input); } abstract class AbstractRegexp implements Regexp { private final String asString; AbstractRegexp(String asString) { this.asString = requireNonNull(asString, "asString cannot be null"); } @Override public String toString() { return asString; } }
201
1,837
<reponame>xincao9/Zebra<filename>zebra-client/src/main/java/com/dianping/zebra/group/config/DefaultSystemConfigManager.java /* * Copyright (c) 2011-2018, <NAME>. All Rights Reserved. * * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * *    http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.dianping.zebra.group.config; import java.beans.PropertyChangeEvent; import java.beans.PropertyChangeListener; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; import com.dianping.zebra.Constants; import com.dianping.zebra.config.ConfigService; import com.dianping.zebra.exception.ZebraConfigException; import com.dianping.zebra.group.config.system.entity.SqlFlowControl; import com.dianping.zebra.group.config.system.entity.SystemConfig; import com.dianping.zebra.group.config.system.transform.DefaultSaxParser; import com.dianping.zebra.util.AppPropertiesUtils; import com.dianping.zebra.util.StringUtils; public class DefaultSystemConfigManager extends AbstractConfigManager implements SystemConfigManager { public static final String DEFAULT_LOCAL_CONFIG = "zebra.system"; public static final int DEFAULT_BUCKET_NUMBER = 108; private SystemConfig systemConfig = new SystemConfig(); private volatile Map<String, SqlFlowControl> sqlFlowControlMap = new HashMap<String, SqlFlowControl>(); public DefaultSystemConfigManager(ConfigService configService) { super(configService); } @Override public void addListerner(PropertyChangeListener listener) { listeners.add(listener); } private String getKey(String namespace, String key) { return String.format("%s.%s.%s", namespace, "system", key); } @Override public SystemConfig getSystemConfig() { return this.systemConfig; } @Override public Map<String, SqlFlowControl> getSqlFlowControlMap() { return sqlFlowControlMap; } @Override public void init() { try { this.systemConfig = initSystemConfig(); } catch (Exception e) { throw new ZebraConfigException(String.format( "Fail to initialize DefaultSystemConfigManager with config file[%s].", DEFAULT_LOCAL_CONFIG), e); } } public SystemConfig initSystemConfig() { SystemConfig config = new SystemConfig(); buildRetryTimes(config); buildFlowControl(config); return config; } private void buildRetryTimes(SystemConfig config) { String appName = AppPropertiesUtils.getAppName(); if (!Constants.APP_NO_NAME.equals(appName)) { config.setRetryTimes(getProperty(getKey(appName + ".zebra", Constants.ELEMENT_RETRY_TIMES), config.getRetryTimes())); } } private void buildFlowControl(SystemConfig config) { String appName = AppPropertiesUtils.getAppName(); int bucketId = Math.abs(appName.hashCode()) % DEFAULT_BUCKET_NUMBER; String flowControlConfig = getProperty( getKey(Constants.DEFAULT_DATASOURCE_ZEBRA_SQL_BLACKLIST_PRFIX, Constants.ELEMENT_FLOW_CONTROL + "." + bucketId), null); if (StringUtils.isNotBlank(flowControlConfig)) { logger.info("start to build flow control..."); try { SystemConfig flowControl = DefaultSaxParser.parse(flowControlConfig); List<SqlFlowControl> tempConfig = new ArrayList<SqlFlowControl>(); if (!Constants.APP_NO_NAME.equals(appName)) { for (SqlFlowControl sqlFlowControl : flowControl.getSqlFlowControls()) { if (sqlFlowControl != null) { String app = sqlFlowControl.getApp(); if ("_global_".equalsIgnoreCase(app) || appName.equalsIgnoreCase(app)) { tempConfig.add(sqlFlowControl); logger.info(String.format("get new flow control [ %s : %d ]", sqlFlowControl.getSqlId(), sqlFlowControl.getAllowPercent())); } } } } else { tempConfig.addAll(flowControl.getSqlFlowControls()); } config.getSqlFlowControls().clear(); config.getSqlFlowControls().addAll(tempConfig); Map<String, SqlFlowControl> newSqlFlowControlMap = new HashMap<String, SqlFlowControl>(); for (SqlFlowControl sqlFlowControl : tempConfig) { newSqlFlowControlMap.put(sqlFlowControl.getSqlId(), sqlFlowControl); } this.sqlFlowControlMap = newSqlFlowControlMap; } catch (Exception ignore) { } } } protected void onPropertyUpdated(PropertyChangeEvent evt) { String key = evt.getPropertyName(); synchronized (this.systemConfig) { SystemConfig config = this.systemConfig; String appName = AppPropertiesUtils.getAppName(); int bucketId = Math.abs(appName.hashCode()) % DEFAULT_BUCKET_NUMBER; if (key.equals(getKey(appName + ".zebra", Constants.ELEMENT_RETRY_TIMES))) { config.setRetryTimes(getProperty(getKey(appName + ".zebra", Constants.ELEMENT_RETRY_TIMES), config.getRetryTimes())); } else if (key.equals(getKey(Constants.DEFAULT_DATASOURCE_ZEBRA_SQL_BLACKLIST_PRFIX, Constants.ELEMENT_FLOW_CONTROL + "." + bucketId))) { buildFlowControl(config); } } } }
1,936
765
/***************************************************************************** Licensed to Accellera Systems Initiative Inc. (Accellera) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. Accellera licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. *****************************************************************************/ /***************************************************************************** sc_length_param.h - Original Author: <NAME>, Synopsys, Inc., 2002-03-19 *****************************************************************************/ /***************************************************************************** MODIFICATION LOG - modifiers, enter your name, affiliation, date and changes you are making here. Name, Affiliation, Date: Description of Modification: *****************************************************************************/ // $Log: sc_length_param.h,v $ // Revision 1.3 2011/08/24 22:05:46 acg // <NAME>: initialization changes to remove warnings. // // Revision 1.2 2011/02/18 20:19:15 acg // <NAME>: updating Copyright notice. // // Revision 1.1.1.1 2006/12/15 20:20:05 acg // SystemC 2.3 // // Revision 1.4 2006/05/08 17:50:01 acg // <NAME>: Added David Long's declarations for friend operators, // functions, and methods, to keep the Microsoft compiler happy. // // Revision 1.3 2006/01/13 18:49:32 acg // Added $Log command so that CVS check in comments are reproduced in the // source. // #ifndef __SYSTEMC_EXT_DT_INT_SC_LENGTH_PARAM_HH__ #define __SYSTEMC_EXT_DT_INT_SC_LENGTH_PARAM_HH__ #include <iostream> #include "../fx/sc_context.hh" #include "../fx/sc_fxdefs.hh" namespace sc_dt { // classes defined in this module class sc_length_param; // friend operator declarations bool operator == (const sc_length_param &, const sc_length_param &); bool operator != (const sc_length_param &, const sc_length_param &); // ---------------------------------------------------------------------------- // CLASS : sc_length_param // // Length parameter type. // ---------------------------------------------------------------------------- class sc_length_param { public: sc_length_param(); sc_length_param(int); sc_length_param(const sc_length_param &); explicit sc_length_param(sc_without_context); sc_length_param &operator = (const sc_length_param &); friend bool operator == (const sc_length_param &, const sc_length_param &); friend bool operator != (const sc_length_param &, const sc_length_param &); int len() const; void len(int); const std::string to_string() const; void print(::std::ostream & =::std::cout) const; void dump(::std::ostream & =::std::cout) const; private: int m_len; }; } // namespace sc_dt // ---------------------------------------------------------------------------- // TYPEDEF : sc_length_context // // Context type for the length parameter type. // ---------------------------------------------------------------------------- namespace sc_dt { extern template class sc_global<sc_length_param>; extern template class sc_context<sc_length_param>; typedef sc_context<sc_length_param> sc_length_context; // IIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIIII inline sc_length_param::sc_length_param() : m_len() { *this = sc_length_context::default_value(); } inline sc_length_param::sc_length_param(int len_) : m_len(len_) { SC_CHECK_WL_(len_); } inline sc_length_param::sc_length_param(const sc_length_param &a) : m_len(a.m_len) {} inline sc_length_param::sc_length_param(sc_without_context) : m_len(SC_DEFAULT_WL_) {} inline sc_length_param & sc_length_param::operator = (const sc_length_param &a) { if (&a != this) { m_len = a.m_len; } return *this; } inline bool operator == (const sc_length_param &a, const sc_length_param &b) { return (a.m_len == b.m_len); } inline bool operator != (const sc_length_param &a, const sc_length_param &b) { return (a.m_len != b.m_len); } inline int sc_length_param::len() const { return m_len; } inline void sc_length_param::len(int len_) { SC_CHECK_WL_(len_); m_len = len_; } inline ::std::ostream & operator << (::std::ostream &os, const sc_length_param &a) { a.print(os); return os; } } // namespace sc_dt #endif // __SYSTEMC_EXT_DT_INT_SC_LENGTH_PARAM_HH__
1,584
521
/* $Id: RTPathSplit.cpp $ */ /** @file * IPRT - RTPathSplit */ /* * Copyright (C) 2013-2017 Oracle Corporation * * This file is part of VirtualBox Open Source Edition (OSE), as * available from http://www.virtualbox.org. This file is free software; * you can redistribute it and/or modify it under the terms of the GNU * General Public License (GPL) as published by the Free Software * Foundation, in version 2 as it comes in the "COPYING" file of the * VirtualBox OSE distribution. VirtualBox OSE is distributed in the * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind. * * The contents of this file may alternatively be used under the terms * of the Common Development and Distribution License Version 1.0 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the * VirtualBox OSE distribution, in which case the provisions of the * CDDL are applicable instead of those of the GPL. * * You may elect to license modified versions of this file under the * terms and conditions of either the GPL or the CDDL or both. */ /********************************************************************************************************************************* * Header Files * *********************************************************************************************************************************/ #include "internal/iprt.h" #include <iprt/path.h> #include <iprt/assert.h> #include <iprt/err.h> #include <iprt/string.h> RTDECL(int) RTPathSplit(const char *pszPath, PRTPATHSPLIT pSplit, size_t cbSplit, uint32_t fFlags) { /* * Input validation. */ AssertReturn(cbSplit >= RT_UOFFSETOF(RTPATHSPLIT, apszComps), VERR_INVALID_PARAMETER); AssertPtrReturn(pSplit, VERR_INVALID_POINTER); AssertPtrReturn(pszPath, VERR_INVALID_POINTER); AssertReturn(*pszPath, VERR_PATH_ZERO_LENGTH); AssertReturn(RTPATH_STR_F_IS_VALID(fFlags, 0), VERR_INVALID_FLAGS); /* * Use RTPathParse to do the parsing. * - This makes the ASSUMPTION that the output of this function is greater * or equal to that of RTPathParsed. * - We're aliasing the buffer here, so use volatile to avoid issues due to * compiler optimizations. */ RTPATHPARSED volatile *pParsedVolatile = (RTPATHPARSED volatile *)pSplit; RTPATHSPLIT volatile *pSplitVolatile = (RTPATHSPLIT volatile *)pSplit; AssertCompile(sizeof(*pParsedVolatile) <= sizeof(*pSplitVolatile)); AssertCompile(sizeof(pParsedVolatile->aComps[0]) <= sizeof(pSplitVolatile->apszComps[0])); int rc = RTPathParse(pszPath, (PRTPATHPARSED)pParsedVolatile, cbSplit, fFlags); if (RT_FAILURE(rc) && rc != VERR_BUFFER_OVERFLOW) return rc; /* * Calculate the required buffer space. */ uint16_t const cComps = pParsedVolatile->cComps; uint16_t const fProps = pParsedVolatile->fProps; uint16_t const cchPath = pParsedVolatile->cchPath; uint16_t const offSuffix = pParsedVolatile->offSuffix; uint32_t cbNeeded = RT_UOFFSETOF_DYN(RTPATHSPLIT, apszComps[cComps]) + cchPath + RTPATH_PROP_FIRST_NEEDS_NO_SLASH(fProps) /* zero terminator for root spec. */ - RT_BOOL(fProps & RTPATH_PROP_DIR_SLASH) /* counted by cchPath, not included in the comp str. */ + 1; /* zero terminator. */ if (cbNeeded > cbSplit) { pSplitVolatile->cbNeeded = cbNeeded; return VERR_BUFFER_OVERFLOW; } Assert(RT_SUCCESS(rc)); /* * Convert the array and copy the strings, both backwards. */ char *psz = (char *)pSplit + cbNeeded; uint32_t idxComp = cComps - 1; /* the final component first (because of suffix handling). */ uint16_t offComp = pParsedVolatile->aComps[idxComp].off; uint16_t cchComp = pParsedVolatile->aComps[idxComp].cch; *--psz = '\0'; psz -= cchComp; memcpy(psz, &pszPath[offComp], cchComp); pSplitVolatile->apszComps[idxComp] = psz; char *pszSuffix; if (offSuffix >= offComp + cchComp) pszSuffix = &psz[cchComp]; else pszSuffix = &psz[offSuffix - offComp]; /* the remainder */ while (idxComp-- > 0) { offComp = pParsedVolatile->aComps[idxComp].off; cchComp = pParsedVolatile->aComps[idxComp].cch; *--psz = '\0'; psz -= cchComp; memcpy(psz, &pszPath[offComp], cchComp); pSplitVolatile->apszComps[idxComp] = psz; } /* * Store / reshuffle the non-array bits. This MUST be done after finishing * the array processing because there may be members in RTPATHSPLIT * overlapping the array of RTPATHPARSED. */ AssertCompileMembersSameSizeAndOffset(RTPATHPARSED, cComps, RTPATHSPLIT, cComps); Assert(pSplitVolatile->cComps == cComps); AssertCompileMembersSameSizeAndOffset(RTPATHPARSED, fProps, RTPATHSPLIT, fProps); Assert(pSplitVolatile->fProps == fProps); AssertCompileMembersSameSizeAndOffset(RTPATHPARSED, cchPath, RTPATHSPLIT, cchPath); Assert(pSplitVolatile->cchPath == cchPath); pSplitVolatile->u16Reserved = 0; pSplitVolatile->cbNeeded = cbNeeded; pSplitVolatile->pszSuffix = pszSuffix; return rc; }
2,236
368
<gh_stars>100-1000 /* * Copyright (c) 2016-2019 <NAME> * * This software is provided 'as-is', without any express or implied * warranty. In no event will the authors be held liable for any damages * arising from the use of this software. * Permission is granted to anyone to use this software for any purpose, * including commercial applications, and to alter it and redistribute it * freely, subject to the following restrictions: * 1. The origin of this software must not be misrepresented; you must not * claim that you wrote the original software. If you use this software * in a product, an acknowledgment in the product documentation would be * appreciated but is not required. * 2. Altered source versions must be plainly marked as such, and must not be * misrepresented as being the original software. * 3. This notice may not be removed or altered from any source distribution. */ #include <bounce/collision/shapes/shape.h> #include <bounce/collision/shapes/sphere_shape.h> #include <bounce/collision/shapes/capsule_shape.h> #include <bounce/collision/shapes/triangle_shape.h> #include <bounce/collision/shapes/hull_shape.h> #include <bounce/collision/shapes/mesh_shape.h> #include <bounce/collision/geometry/hull.h> #include <bounce/collision/geometry/mesh.h> #include <bounce/common/memory/block_allocator.h> #include <bounce/common/draw.h> void b3Shape::Destroy(b3Shape* shape, b3BlockAllocator* allocator) { switch (shape->GetType()) { case e_sphere: { b3SphereShape* sphere = (b3SphereShape*)shape; sphere->~b3SphereShape(); allocator->Free(shape, sizeof(b3SphereShape)); break; } case e_capsule: { b3CapsuleShape* capsule = (b3CapsuleShape*)shape; capsule->~b3CapsuleShape(); allocator->Free(shape, sizeof(b3CapsuleShape)); break; } case e_triangle: { b3TriangleShape* triangle = (b3TriangleShape*)shape; triangle->~b3TriangleShape(); allocator->Free(shape, sizeof(b3TriangleShape)); break; } case e_hull: { b3HullShape* hull = (b3HullShape*)shape; hull->~b3HullShape(); allocator->Free(shape, sizeof(b3HullShape)); break; } case e_mesh: { b3MeshShape* mesh = (b3MeshShape*)shape; mesh->~b3MeshShape(); allocator->Free(shape, sizeof(b3MeshShape)); break; } default: { B3_ASSERT(false); } } } void b3Shape::Draw(b3Draw* draw, const b3Transform& xf, const b3Color& color) const { switch (m_type) { case b3Shape::e_sphere: { const b3SphereShape* sphere = (b3SphereShape*)this; b3Vec3 p = xf * sphere->m_center; draw->DrawPoint(p, scalar(4), color); break; } case b3Shape::e_capsule: { const b3CapsuleShape* capsule = (b3CapsuleShape*)this; b3Vec3 p1 = xf * capsule->m_vertex1; b3Vec3 p2 = xf * capsule->m_vertex2; draw->DrawPoint(p1, scalar(4), color); draw->DrawPoint(p2, scalar(4), color); draw->DrawSegment(p1, p2, color); break; } case b3Shape::e_triangle: { const b3TriangleShape* triangle = (b3TriangleShape*)this; b3Vec3 v1 = xf * triangle->m_vertex1; b3Vec3 v2 = xf * triangle->m_vertex2; b3Vec3 v3 = xf * triangle->m_vertex3; b3Vec3 n = b3Cross(v2 - v1, v3 - v1); n.Normalize(); draw->DrawTriangle(v1, v2, v3, color); break; } case b3Shape::e_hull: { const b3HullShape* hs = (b3HullShape*)this; const b3Hull* hull = hs->m_hull; for (u32 i = 0; i < hull->edgeCount; i += 2) { const b3HalfEdge* edge = hull->GetEdge(i); const b3HalfEdge* twin = hull->GetEdge(i + 1); b3Vec3 p1 = xf * hull->vertices[edge->origin]; b3Vec3 p2 = xf * hull->vertices[twin->origin]; draw->DrawSegment(p1, p2, color); } break; } case b3Shape::e_mesh: { const b3MeshShape* ms = (b3MeshShape*)this; const b3Mesh* mesh = ms->m_mesh; for (u32 i = 0; i < mesh->triangleCount; ++i) { const b3MeshTriangle* t = mesh->triangles + i; b3Vec3 p1 = xf * b3Mul(ms->m_scale, mesh->vertices[t->v1]); b3Vec3 p2 = xf * b3Mul(ms->m_scale, mesh->vertices[t->v2]); b3Vec3 p3 = xf * b3Mul(ms->m_scale, mesh->vertices[t->v3]); draw->DrawTriangle(p1, p2, p3, color); } break; } default: { break; } }; } void b3Shape::DrawSolid(b3Draw* draw, const b3Transform& xf, const b3Color& color) const { switch (m_type) { case b3Shape::e_sphere: { const b3SphereShape* sphere = (b3SphereShape*)this; b3Vec3 center = xf * sphere->m_center; draw->DrawSolidSphere(xf.rotation.GetYAxis(), center, sphere->m_radius, color); break; } case b3Shape::e_capsule: { const b3CapsuleShape* capsule = (b3CapsuleShape*)this; b3Vec3 c1 = xf * capsule->m_vertex1; b3Vec3 c2 = xf * capsule->m_vertex2; draw->DrawSolidCapsule(xf.rotation.GetYAxis(), c1, c2, capsule->m_radius, color); break; } case b3Shape::e_triangle: { const b3TriangleShape* triangle = (b3TriangleShape*)this; b3Vec3 v1 = xf * triangle->m_vertex1; b3Vec3 v2 = xf * triangle->m_vertex2; b3Vec3 v3 = xf * triangle->m_vertex3; b3Vec3 n = b3Cross(v2 - v1, v3 - v1); n.Normalize(); draw->DrawSolidTriangle(-n, v3, v2, v1, color); draw->DrawSolidTriangle(n, v1, v2, v3, color); break; } case b3Shape::e_hull: { const b3HullShape* hullShape = (b3HullShape*)this; const b3Hull* hull = hullShape->m_hull; for (u32 i = 0; i < hull->faceCount; ++i) { const b3Face* face = hull->GetFace(i); const b3HalfEdge* begin = hull->GetEdge(face->edge); b3Vec3 n = b3Mul(xf.rotation, hull->planes[i].normal); const b3HalfEdge* edge = hull->GetEdge(begin->next); do { u32 i1 = begin->origin; u32 i2 = edge->origin; const b3HalfEdge* next = hull->GetEdge(edge->next); u32 i3 = next->origin; b3Vec3 p1 = xf * hull->vertices[i1]; b3Vec3 p2 = xf * hull->vertices[i2]; b3Vec3 p3 = xf * hull->vertices[i3]; draw->DrawSolidTriangle(n, p1, p2, p3, color); edge = next; } while (hull->GetEdge(edge->next) != begin); } break; } case b3Shape::e_mesh: { const b3MeshShape* meshShape = (b3MeshShape*)this; const b3Mesh* mesh = meshShape->m_mesh; for (u32 i = 0; i < mesh->triangleCount; ++i) { const b3MeshTriangle* t = mesh->triangles + i; b3Vec3 p1 = xf * b3Mul(meshShape->m_scale, mesh->vertices[t->v1]); b3Vec3 p2 = xf * b3Mul(meshShape->m_scale, mesh->vertices[t->v2]); b3Vec3 p3 = xf * b3Mul(meshShape->m_scale, mesh->vertices[t->v3]); b3Vec3 n1 = b3Cross(p2 - p1, p3 - p1); n1.Normalize(); draw->DrawSolidTriangle(n1, p1, p2, p3, color); b3Vec3 n2 = -n1; draw->DrawSolidTriangle(n2, p3, p2, p1, color); } break; } default: { break; } }; }
2,877
2,073
<filename>Hadoop/apache-mahout-0.10.2-compile/math/src/main/java/org/apache/mahout/math/random/Missing.java /* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.mahout.math.random; import java.util.Random; import org.apache.mahout.common.RandomUtils; /** * Models data with missing values. Note that all variables with the same fraction of missing * values will have the same sequence of missing values. Similarly, if two variables have * missing probabilities of p1 > p2, then all of the p2 missing values will also be missing for * p1. */ public final class Missing<T> implements Sampler<T> { private final Random gen; private final double p; private final Sampler<T> delegate; private final T missingMarker; public Missing(int seed, double p, Sampler<T> delegate, T missingMarker) { this.p = p; this.delegate = delegate; this.missingMarker = missingMarker; gen = RandomUtils.getRandom(seed); } public Missing(double p, Sampler<T> delegate, T missingMarker) { this(1, p, delegate, missingMarker); } public Missing(double p, Sampler<T> delegate) { this(1, p, delegate, null); } @Override public T sample() { if (gen.nextDouble() >= p) { return delegate.sample(); } else { return missingMarker; } } }
615
1,006
/**************************************************************************** * boards/arm/cxd56xx/common/src/cxd56_spisd.c * * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. The * ASF licenses this file to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance with the * License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * ****************************************************************************/ /**************************************************************************** * Included Files ****************************************************************************/ #include <nuttx/config.h> #include <debug.h> #include <nuttx/mmcsd.h> #include <nuttx/board.h> #include <nuttx/fs/fs.h> #include "cxd56_spi.h" #include "cxd56_gpio.h" /**************************************************************************** * Pre-processor Definitions ****************************************************************************/ /* Configuration ************************************************************/ #ifndef CONFIG_CXD56_SPISD_SLOT_NO # define CONFIG_CXD56_SPISD_SLOT_NO 0 #endif /* Please configure the pin assignment for your board */ #ifndef MMCSD_DETECT # define MMCSD_DETECT PIN_I2S0_DATA_OUT #endif /**************************************************************************** * Public Functions ****************************************************************************/ /**************************************************************************** * Name: board_spisd_initialize * * Description: * Initialize the SPI-based SD card. * ****************************************************************************/ int board_spisd_initialize(int minor, int bus) { int ret; FAR struct spi_dev_s *spi; /* Enable input of detect pin */ cxd56_gpio_config(MMCSD_DETECT, true); /* Initialize spi deivce */ spi = cxd56_spibus_initialize(bus); if (!spi) { ferr("ERROR: Failed to initialize spi%d.\n", bus); return -ENODEV; } /* Get the SPI driver instance for the SD chip select */ finfo("Initializing SPI for the MMC/SD slot\n"); ret = mmcsd_spislotinitialize(minor, CONFIG_CXD56_SPISD_SLOT_NO, spi); if (ret < 0) { ferr("ERROR: Failed to bind SPI device to MMC/SD slot %d: %d\n", CONFIG_CXD56_SPISD_SLOT_NO, ret); return ret; } /* Mount filesystem */ ret = nx_mount("/dev/mmcsd0", "/mnt/sd0", "vfat", 0, NULL); if (ret < 0) { _err("ERROR: Failed to mount the SDCARD. %d\n", ret); } return OK; } /**************************************************************************** * Name: board_spisd_status * * Description: * Get the status whether SD Card is present or not. * This function is called only from cxd56_spi.c. * * Returned Value: * Return SPI_STATUS_PRESENT if SD Card is present. Otherwise, return 0. * ****************************************************************************/ uint8_t board_spisd_status(FAR struct spi_dev_s *dev, uint32_t devid) { uint8_t ret = 0; if (devid == SPIDEV_MMCSD(0)) { /* MMCSD_DETECT is mapping to SD Card detect pin * MMCSD_DETECT = 0: Inserted * MMCSD_DETECT = 1: Removed */ ret = cxd56_gpio_read(MMCSD_DETECT) ? 0 : SPI_STATUS_PRESENT; } return ret; }
1,168
568
package com.novoda.stickystaggered; import android.content.Context; import android.util.AttributeSet; import com.etsy.android.grid.StaggeredGridView; public class NovoStaggeredGridView extends StaggeredGridView { public NovoStaggeredGridView(Context context) { super(context); } public NovoStaggeredGridView(Context context, AttributeSet attrs) { super(context, attrs); } public NovoStaggeredGridView(Context context, AttributeSet attrs, int defStyle) { super(context, attrs, defStyle); } public void setOffsetY(int offset) { offsetChildrenTopAndBottom(offset); } }
230
2,039
package org.nd4j.linalg.function; /** * BiConsumer is an operation that accepts two arguments and returns no result. * * @param <T> Type of first argument * @param <U> Type of second argument */ public interface BiConsumer<T, U> { /** * Perform the operation on the given arguments * * @param t First input * @param u Second input */ void accept(T t, U u); }
138
7,482
/* * Copyright (c) 2020 Raspberry Pi (Trading) Ltd. * * SPDX-License-Identifier: BSD-3-Clause */ #ifndef _HARDWARE_STRUCTS_PIO_H #define _HARDWARE_STRUCTS_PIO_H #include "hardware/address_mapped.h" #include "hardware/platform_defs.h" #include "hardware/regs/pio.h" typedef struct { io_rw_32 ctrl; io_ro_32 fstat; io_rw_32 fdebug; io_ro_32 flevel; io_wo_32 txf[NUM_PIO_STATE_MACHINES]; io_ro_32 rxf[NUM_PIO_STATE_MACHINES]; io_rw_32 irq; io_wo_32 irq_force; io_rw_32 input_sync_bypass; io_rw_32 dbg_padout; io_rw_32 dbg_padoe; io_rw_32 dbg_cfginfo; io_wo_32 instr_mem[32]; struct pio_sm_hw { io_rw_32 clkdiv; io_rw_32 execctrl; io_rw_32 shiftctrl; io_ro_32 addr; io_rw_32 instr; io_rw_32 pinctrl; } sm[NUM_PIO_STATE_MACHINES]; io_rw_32 intr; io_rw_32 inte0; io_rw_32 intf0; io_ro_32 ints0; io_rw_32 inte1; io_rw_32 intf1; io_ro_32 ints1; } pio_hw_t; #define pio0_hw ((pio_hw_t *const)PIO0_BASE) #define pio1_hw ((pio_hw_t *const)PIO1_BASE) #endif
601
6,140
import shutil, os, datetime import numpy as np import tensorflow as tf BUCKET = None # set from task.py PATTERN = 'of' # gets all files # Determine CSV, label, and key columns CSV_COLUMNS = 'weight_pounds,is_male,mother_age,plurality,gestation_weeks,key'.split(',') LABEL_COLUMN = 'weight_pounds' KEY_COLUMN = 'key' # Set default values for each CSV column DEFAULTS = [[0.0], ['null'], [0.0], ['null'], [0.0], ['nokey']] # Define some hyperparameters TRAIN_EXAMPLES = 1000 * 1000 EVAL_STEPS = None NUM_EVALS = 10 BATCH_SIZE = 512 NEMBEDS = 3 NNSIZE = [64, 16, 4] # Create an input function reading a file using the Dataset API def features_and_labels(row_data): for unwanted_col in ['key']: row_data.pop(unwanted_col) label = row_data.pop(LABEL_COLUMN) return row_data, label # features, label # load the training data def load_dataset(pattern, batch_size=1, mode=tf.estimator.ModeKeys.EVAL): dataset = (tf.data.experimental.make_csv_dataset(pattern, batch_size, CSV_COLUMNS, DEFAULTS) .map(features_and_labels) # features, label ) if mode == tf.estimator.ModeKeys.TRAIN: dataset = dataset.shuffle(1000).repeat() dataset = dataset.prefetch(1) # take advantage of multi-threading; 1=AUTOTUNE return dataset ## Build a Keras wide-and-deep model using its Functional API def rmse(y_true, y_pred): return tf.sqrt(tf.reduce_mean(tf.square(y_pred - y_true))) # Helper function to handle categorical columns def categorical_fc(name, values): orig = tf.feature_column.categorical_column_with_vocabulary_list(name, values) wrapped = tf.feature_column.indicator_column(orig) return orig, wrapped def build_wd_model(dnn_hidden_units = [64, 32], nembeds = 3): # input layer deep_inputs = { colname : tf.keras.layers.Input(name=colname, shape=(), dtype='float32') for colname in ['mother_age', 'gestation_weeks'] } wide_inputs = { colname : tf.keras.layers.Input(name=colname, shape=(), dtype='string') for colname in ['is_male', 'plurality'] } inputs = {**wide_inputs, **deep_inputs} # feature columns from inputs deep_fc = { colname : tf.feature_column.numeric_column(colname) for colname in ['mother_age', 'gestation_weeks'] } wide_fc = {} is_male, wide_fc['is_male'] = categorical_fc('is_male', ['True', 'False', 'Unknown']) plurality, wide_fc['plurality'] = categorical_fc('plurality', ['Single(1)', 'Twins(2)', 'Triplets(3)', 'Quadruplets(4)', 'Quintuplets(5)','Multiple(2+)']) # bucketize the float fields. This makes them wide age_buckets = tf.feature_column.bucketized_column(deep_fc['mother_age'], boundaries=np.arange(15,45,1).tolist()) wide_fc['age_buckets'] = tf.feature_column.indicator_column(age_buckets) gestation_buckets = tf.feature_column.bucketized_column(deep_fc['gestation_weeks'], boundaries=np.arange(17,47,1).tolist()) wide_fc['gestation_buckets'] = tf.feature_column.indicator_column(gestation_buckets) # cross all the wide columns. We have to do the crossing before we one-hot encode crossed = tf.feature_column.crossed_column( [is_male, plurality, age_buckets, gestation_buckets], hash_bucket_size=20000) deep_fc['crossed_embeds'] = tf.feature_column.embedding_column(crossed, nembeds) # the constructor for DenseFeatures takes a list of numeric columns # The Functional API in Keras requires that you specify: LayerConstructor()(inputs) wide_inputs = tf.keras.layers.DenseFeatures(wide_fc.values(), name='wide_inputs')(inputs) deep_inputs = tf.keras.layers.DenseFeatures(deep_fc.values(), name='deep_inputs')(inputs) # hidden layers for the deep side layers = [int(x) for x in dnn_hidden_units] deep = deep_inputs for layerno, numnodes in enumerate(layers): deep = tf.keras.layers.Dense(numnodes, activation='relu', name='dnn_{}'.format(layerno+1))(deep) deep_out = deep # linear model for the wide side wide_out = tf.keras.layers.Dense(10, activation='relu', name='linear')(wide_inputs) # concatenate the two sides both = tf.keras.layers.concatenate([deep_out, wide_out], name='both') # final output is a linear activation because this is regression output = tf.keras.layers.Dense(1, activation='linear', name='weight')(both) model = tf.keras.models.Model(inputs, output) model.compile(optimizer='adam', loss='mse', metrics=[rmse, 'mse']) return model # The main function def train_and_evaluate(output_dir): model = build_wd_model(NNSIZE, NEMBEDS) print("Here is our Wide-and-Deep architecture so far:\n") print(model.summary()) train_file_path = 'gs://{}/babyweight/preproc/{}*{}*'.format(BUCKET, 'train', PATTERN) eval_file_path = 'gs://{}/babyweight/preproc/{}*{}*'.format(BUCKET, 'eval', PATTERN) trainds = load_dataset('train*', BATCH_SIZE, tf.estimator.ModeKeys.TRAIN) evalds = load_dataset('eval*', 1000, tf.estimator.ModeKeys.EVAL) if EVAL_STEPS: evalds = evalds.take(EVAL_STEPS) steps_per_epoch = TRAIN_EXAMPLES // (BATCH_SIZE * NUM_EVALS) checkpoint_path = os.path.join(output_dir, 'checkpoints/babyweight') cp_callback = tf.keras.callbacks.ModelCheckpoint(checkpoint_path, save_weights_only=True, verbose=1) history = model.fit(trainds, validation_data=evalds, epochs=NUM_EVALS, steps_per_epoch=steps_per_epoch, verbose=2, # 0=silent, 1=progress bar, 2=one line per epoch callbacks=[cp_callback]) EXPORT_PATH = os.path.join(output_dir, datetime.datetime.now().strftime('%Y%m%d%H%M%S')) tf.saved_model.save(model, EXPORT_PATH) # with default serving function print("Exported trained model to {}".format(EXPORT_PATH))
2,583
3,508
package com.fishercoder.solutions; import java.util.HashMap; import java.util.Map; public class _705 { public static class Solution1 { class MyHashSet { Map<Integer, Integer> map; /** * Initialize your data structure here. */ public MyHashSet() { map = new HashMap<>(); } public void add(int key) { map.put(key, 0); } public void remove(int key) { if (map.containsKey(key)) { map.remove(key); } } /** * Returns true if this set contains the specified element */ public boolean contains(int key) { return map.containsKey(key); } } } }
467
435
<gh_stars>100-1000 { "description": "MVP: Minimum Viable Product. We all follow agile product development\nprocess, iterate fast, fail fast and in the disguise of a MVP we cut\ncorners to release a product to the market ASAP. In this process, their\nare several compromises we make in the tech set up and the codebase to\njust get it out there. These compromises more often than not always\nbites us back.\n\nAnd this is what the talk is about. The assumptions which never hold,\nthe implementation which is never revisited, and the MVP which never\nends up being just a MVP. By the end of talk, you would know the\ncautionary tale of the most common mistakes everyone makes while\ndeveloping a new product and some measures on how you can avoid them.\n", "duration": 1416, "language": "eng", "published_at": "2019-08-06T12:14:16.000Z", "recorded": "2019-03-24", "speakers": [ "<NAME>" ], "thumbnail_url": "https://i.ytimg.com/vi/LtheyJas4As/hqdefault.jpg", "title": "MVP, is never just a MVP", "videos": [ { "type": "youtube", "url": "https://www.youtube.com/watch?v=LtheyJas4As" } ] }
373
340
<reponame>leha-bot/eve /** EVE - Expressive Vector Engine Copyright : EVE Contributors & Maintainers SPDX-License-Identifier: MIT **/ //================================================================================================== #include "test.hpp" #include <eve/constant/valmax.hpp> #include <eve/constant/valmin.hpp> #include <eve/constant/mone.hpp> #include <eve/function/abs.hpp> #include <eve/function/min.hpp> #include <eve/function/negabsmin.hpp> #include <eve/function/pedantic/negabsmin.hpp> #include <eve/function/numeric/negabsmin.hpp> #include <eve/function/saturated/negabsmin.hpp> #include <eve/function/diff/negabsmin.hpp> #include <eve/function/sign.hpp> #include <eve/function/is_negative.hpp> #include <algorithm> //================================================================================================== // Types tests //================================================================================================== EVE_TEST_TYPES( "Check return types of negabsmin" , eve::test::simd::all_types ) <typename T>(eve::as<T>) { using v_t = eve::element_type_t<T>; TTS_EXPR_IS( eve::negabsmin(T(), T(), T() ) , T); TTS_EXPR_IS( eve::negabsmin(T(), v_t(), T()) , T); TTS_EXPR_IS( eve::negabsmin(v_t(), T(), T()) , T); TTS_EXPR_IS( eve::negabsmin(T(), T(), v_t() ) , T); TTS_EXPR_IS( eve::negabsmin(v_t(), v_t(), T()) , T); TTS_EXPR_IS( eve::negabsmin(v_t(), T(), v_t()) , T); TTS_EXPR_IS( eve::negabsmin(v_t(), v_t(), v_t()) , v_t); TTS_EXPR_IS(eve::negabsmin( T(), T(), T()), T ); TTS_EXPR_IS(eve::negabsmin( T(), T(), v_t()), T ); TTS_EXPR_IS(eve::negabsmin( T(), v_t(), T()), T ); TTS_EXPR_IS(eve::negabsmin( T(), v_t(), v_t()), T ); TTS_EXPR_IS(eve::negabsmin(v_t(), T(), T()), T ); TTS_EXPR_IS(eve::negabsmin(v_t(), T(), v_t()), T ); TTS_EXPR_IS(eve::negabsmin(v_t(), v_t(), T()), T ); TTS_EXPR_IS(eve::negabsmin(v_t(), v_t(), v_t()), v_t); }; //================================================================================================== //== negabsmin tests //================================================================================================== EVE_TEST( "Check behavior of negabsmin on all types full range" , eve::test::simd::all_types , eve::test::generate ( eve::test::randoms(eve::valmin, eve::valmax) , eve::test::randoms(eve::valmin, eve::valmax) , eve::test::randoms(eve::valmin, eve::valmax) , eve::test::logicals(0, 3) ) ) <typename T, typename M>( T const& a0, T const& a1, T const& a2, M const & t) { using eve::negabsmin; using eve::detail::map; using eve::abs; using v_t = eve::element_type_t<T>; auto m = [](auto a, auto b, auto c)-> v_t {return -eve::abs(eve::min(a, b, c)); }; auto dm1 = [](auto a, auto b, auto c)-> v_t {return eve::min(a, b, c) == a? -eve::sign(a) : 0; }; auto dm2 = [](auto a, auto b, auto c)-> v_t {return eve::min(a, b, c) == b? -eve::sign(b) : 0; }; auto dm3 = [](auto a, auto b, auto c)-> v_t {return eve::min(a, b, c) == c? -eve::sign(c) : 0; }; TTS_ULP_EQUAL(negabsmin((a0), (a1), (a2)), map(m, a0, a1, a2), 2); TTS_ULP_EQUAL(eve::pedantic(negabsmin)((a0), (a1), (a2)), map(m, a0, a1, a2), 2); TTS_ULP_EQUAL(eve::numeric (negabsmin)((a0), (a1), (a2)), map(m, a0, a1, a2), 2); TTS_ULP_EQUAL(eve::saturated(negabsmin)((a0), (a1), (a2)), map(m, a0, a1, a2), 2); TTS_ULP_EQUAL(eve::diff_1st(negabsmin)((a0), (a1), (a2)), map(dm1, a0, a1, a2), 2); TTS_ULP_EQUAL(eve::diff_2nd(negabsmin)((a0), (a1), (a2)), map(dm2, a0, a1, a2), 2); TTS_ULP_EQUAL(eve::diff_3rd(negabsmin)((a0), (a1), (a2)), map(dm3, a0, a1, a2), 2); TTS_IEEE_EQUAL(negabsmin[t](a0, a1), eve::if_else(t, negabsmin(a0, a1), a0)); }; EVE_TEST_TYPES( "Check values of negabsmin" , eve::test::simd::ieee_reals ) <typename T>(eve::as<T>) { using v_t = eve::element_type_t<T>; TTS_IEEE_EQUAL(eve::pedantic(eve::negabsmin)(eve::nan(eve::as<T>()) , T(1) ) , eve::nan(eve::as<T>()) ); TTS_IEEE_EQUAL(eve::pedantic(eve::negabsmin)(eve::nan(eve::as<v_t>()) , T(1) ) , eve::nan(eve::as<T>()) ); TTS_IEEE_EQUAL(eve::pedantic(eve::negabsmin)(eve::nan(eve::as<T>()) , v_t(1)) , eve::nan(eve::as<T>()) ); TTS_IEEE_EQUAL(eve::pedantic(eve::negabsmin)(T(1) , eve::nan(eve::as<T>()) ), T(-1) ); TTS_IEEE_EQUAL(eve::pedantic(eve::negabsmin)(v_t(1), eve::nan(eve::as<T>()) ), T(-1) ); TTS_IEEE_EQUAL(eve::pedantic(eve::negabsmin)(T(1) , eve::nan(eve::as<v_t>()) ), T(-1) ); TTS_EXPECT(eve::all(eve::is_negative(eve::pedantic(eve::negabsmin)(T(-0.), T( 0 ))))); TTS_EXPECT(eve::all(eve::is_negative(eve::pedantic(eve::negabsmin)(T( 0 ), T(-0.))))); TTS_IEEE_EQUAL(eve::numeric(eve::negabsmin)((eve::nan(eve::as<T>()) ) , T(1)) , T(-1) ); TTS_IEEE_EQUAL(eve::numeric(eve::negabsmin)((eve::nan(eve::as<v_t>())), T(1)) , T(-1) ); TTS_IEEE_EQUAL(eve::numeric(eve::negabsmin)((eve::nan(eve::as<T>()) ) , v_t(1)) , T(-1) ); TTS_IEEE_EQUAL(eve::numeric(eve::negabsmin)(T(1) , eve::nan(eve::as<T>()) ), T(-1) ); TTS_IEEE_EQUAL(eve::numeric(eve::negabsmin)(v_t(1) , eve::nan(eve::as<T>()) ), T(-1) ); TTS_IEEE_EQUAL(eve::numeric(eve::negabsmin)(T(1) , eve::nan(eve::as<v_t>()) ), T(-1) ); TTS_EXPECT(eve::all(eve::is_negative(eve::numeric(eve::negabsmin)(T(-0.), T( 0 ))))); TTS_EXPECT(eve::all(eve::is_negative(eve::numeric(eve::negabsmin)(T( 0 ), T(-0.))))); };
2,598
897
<reponame>zhcet19/NeoAlgo-1 // C program to find the Length of Longest Increasing Subsequence /* In this problem, given an array we have to find the length of the longest increasing subsequence that array can make. The problem can be solved using Dynamic Programming */ #include <stdio.h> #include <string.h> int length_longest_increasing_subsequence(int arr[], int n) { int dp[n], max_len = 0; /* Initialize the dp array with the 1 as value, as the maximum length at each point is atleast 1, by including that value in the sequence */ for (int i = 0; i < n; ++i) dp[i] = 1; /* Now Lets Fill the dp array in Bottom-Up manner Compare Each i'th element to its previous elements from 0 to i-1, If arr[i] > arr[j](where j = 0 to i-1), then it qualifies for increasing subsequence and If dp[i] < dp[j] + 1, then that subsequence qualifies for being the longest one */ for (int i = 1; i < n; i++) { for (int j = 0; j < i; j++) { if (arr[i] > arr[j] && dp[i] < dp[j] + 1) dp[i] = dp[j] + 1; } } //Now Find the maximum element in the 'dp' array for (int i = 0; i < n; i++) { if (dp[i] > max_len) max_len = dp[i]; } return max_len; } int main() { int n, max_len; printf("\nWhat is the length of the array? "); scanf("%d", &n); int arr[n]; printf("Enter the numbers: "); for (int i = 0; i < n; i++) { scanf("%d", &arr[i]); } max_len = length_longest_increasing_subsequence(arr, n); printf("The length of the longest increasing subsequence of the given array is %d", max_len); return 0; } /* Time Complexity: O(num ^ 2), where 'num' is the size of the given array Space Complexity: O(num) SAMPLE INPUT AND OUTPUT SAMPLE 1 What is the length of the array? 5 Enter the numbers: 1 2 3 4 3 The length of the longest increasing subsequence of the given array is 4 SAMPLE 2 What is the length of the array? 5 Enter the numbers: 5 4 3 2 1 The maximum sum of an increasing subsequence of the given array is 1 */
840
318
/* * This file is part of RebornCore, licensed under the MIT License (MIT). * * Copyright (c) 2021 TeamReborn * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ package reborncore.api.items; import net.minecraft.entity.player.PlayerEntity; import net.minecraft.inventory.Inventories; import net.minecraft.inventory.Inventory; import net.minecraft.item.ItemStack; import net.minecraft.nbt.NbtCompound; import net.minecraft.nbt.NbtElement; import net.minecraft.util.collection.DefaultedList; public abstract class InventoryBase implements Inventory { private final int size; private DefaultedList<ItemStack> stacks; public InventoryBase(int size) { this.size = size; stacks = DefaultedList.ofSize(size, ItemStack.EMPTY); } public NbtElement serializeNBT() { NbtCompound tag = new NbtCompound(); Inventories.writeNbt(tag, stacks); return tag; } public void deserializeNBT(NbtCompound tag) { stacks = DefaultedList.ofSize(size, ItemStack.EMPTY); Inventories.readNbt(tag, stacks); } @Override public int size() { return size; } @Override public boolean isEmpty() { return stacks.stream().allMatch(ItemStack::isEmpty); } @Override public ItemStack getStack(int i) { return stacks.get(i); } @Override public ItemStack removeStack(int i, int i1) { ItemStack stack = Inventories.splitStack(stacks, i, i1); if (!stack.isEmpty()) { this.markDirty(); } return stack; } @Override public ItemStack removeStack(int i) { return Inventories.removeStack(stacks, i); } @Override public void setStack(int i, ItemStack itemStack) { stacks.set(i, itemStack); if (itemStack.getCount() > this.getMaxCountPerStack()) { itemStack.setCount(this.getMaxCountPerStack()); } this.markDirty(); } @Override public void markDirty() { //Stuff happens in the super methods } @Override public boolean canPlayerUse(PlayerEntity playerEntity) { return true; } @Override public void clear() { stacks.clear(); } public DefaultedList<ItemStack> getStacks() { return stacks; } }
966
327
#ifndef RCTConvert_GCKMediaQueueItem_h #define RCTConvert_GCKMediaQueueItem_h #import <GoogleCast/GoogleCast.h> #import <React/RCTConvert.h> @interface RCTConvert (GCKMediaQueueItem) + (NSArray<GCKMediaQueueItem *> *)GCKMediaQueueItemArray:(id)json; + (GCKMediaQueueItem *)GCKMediaQueueItem:(id)json; + (nonnull id)fromGCKMediaQueueItem:(nullable GCKMediaQueueItem *)item; @end #endif /* RCTConvert_GCKMediaQueueItem_h */
165
453
<gh_stars>100-1000 /* Oki bug report [OKI002](gcc008_2) The following program is not executed. Error message is as follow. illegal trap: 0x12 pc=d000d954 d000d954 08000240 NOP */ #include <stdio.h> #include <stdarg.h> int func (int, ...); void main () { func (2, 1., 2., 3.); pass ("func [OKI002]"); fflush (stdout); } int func (int i, ...) { return (i); }
185
368
<filename>plugin_III/game_III/CPathFind.h /* Plugin-SDK (Grand Theft Auto 3) header file Authors: GTA Community. See more here https://github.com/DK22Pac/plugin-sdk Do not delete this comment block. Respect others' work! */ #pragma once #include "PluginBase.h" #include "CVector2D.h" #include "CPathNode.h" #include "CTreadable.h" #include "CVector.h" #include "CTempNode.h" #include "CMatrix.h" class CVehicle; union PLUGIN_API CConnectionFlags { unsigned char flags; struct { unsigned char bCrossesRoad : 1; unsigned char bTrafficLight : 1; }; }; enum PLUGIN_API ePathType { PATH_CAR = 0, PATH_PED = 1 }; struct PLUGIN_API CTempDetachedNode { unsigned char foo[20]; }; struct PLUGIN_API CPathInfoNode { short x; short y; short z; char m_nType; char m_nNext; char m_nNumLeftLanes; char m_nNumRightLanes; unsigned char crossing : 1; }; struct PLUGIN_API CPathInfoForObject { CPathInfoNode m_aNodes[12]; }; class PLUGIN_API CCarPathLink { public: CVector2D m_vec2dPos; CVector2D m_vec2dDir; short m_nPathNodeIndex; char m_nNumLeftLanes; char m_nNumRightLanes; unsigned char m_nTrafficLightType; struct { unsigned char bBridgeLights : 1; } m_nFlags; }; class PLUGIN_API CPathFind { PLUGIN_NO_DEFAULT_CONSTRUCTION(CPathFind) public: CPathNode m_aPathNodes[4930]; CCarPathLink m_aCarPathLinks[2076]; CTreadable *m_apMapObjects[1250]; unsigned char m_anMapObjectFlags[1250]; short m_anConnections[10260]; short m_anDistances[10260]; CConnectionFlags m_anConnectionFlags[10260]; short m_anCarPathConnections[10260]; int m_nNumPathNodes; int m_nNumCarPathNodes; int m_nNumPedPathNodes; short m_nNumMapObjects; short m_nNumConnections; int m_nNumCarPathLinks; int field_45BEC; unsigned char m_nNumGroups[2]; CPathNode m_aSearchNodes[512]; SUPPORTED_10EN_11EN_STEAM static CVector &CoorsXFormed; SUPPORTED_10EN_11EN_STEAM void AddNodeToList(CPathNode *node, int listId); SUPPORTED_10EN_11EN_STEAM void AllocatePathFindInfoMem(short numPathGroups); SUPPORTED_10EN_11EN_STEAM void CalcNodeCoors(short x, short y, short z, int id, CVector *out); SUPPORTED_10EN_11EN_STEAM float CalcRoadDensity(float x, float y); SUPPORTED_10EN_11EN_STEAM void CountFloodFillGroups(unsigned char type); SUPPORTED_10EN_11EN_STEAM void DoPathSearch(unsigned char type, CVector start, int startNodeId, CVector target, CPathNode **nodes, short *numNodes, short maxNumNodes, CVehicle *vehicle, float *dist, float distLimit, int forcedTargetNode); SUPPORTED_10EN_11EN_STEAM void FindNextNodeWandering(unsigned char type, CVector coors, CPathNode **lastNode, CPathNode **nextNode, unsigned char curDir, unsigned char *nextDir); SUPPORTED_10EN_11EN_STEAM int FindNodeClosestToCoors(CVector coors, unsigned char type, float distLimit, bool ignoreDisabled, bool ignoreBetweenLevels); SUPPORTED_10EN_11EN_STEAM int FindNodeClosestToCoorsFavourDirection(CVector coors, unsigned char type, float dirX, float dirY); SUPPORTED_10EN_11EN_STEAM float FindNodeOrientationForCarPlacement(unsigned int nodeId); SUPPORTED_10EN_11EN_STEAM float FindNodeOrientationForCarPlacementFacingDestination(unsigned int nodeId, float x, float y, bool towards); SUPPORTED_10EN_11EN_STEAM CTreadable *FindRoadObjectClosestToCoors(CVector coors, unsigned char type); SUPPORTED_10EN_11EN_STEAM bool GeneratePedCreationCoors(float x, float y, float minDist, float maxDist, float minDistOffScreen, float maxDistOffScreen, CVector *posn, int *pNode1, int *pNode2, float *positionBetweenNodes, CMatrix *camMatrix); SUPPORTED_10EN_11EN_STEAM void Init(); SUPPORTED_10EN_11EN_STEAM void Load(unsigned char *buf, unsigned int size); SUPPORTED_10EN_11EN_STEAM void MarkRoadsBetweenLevelsInArea(float x1, float x2, float y1, float y2, float z1, float z2); SUPPORTED_10EN_11EN_STEAM void MarkRoadsBetweenLevelsNodeAndNeighbours(int nodeId); SUPPORTED_10EN_11EN_STEAM bool NewGenerateCarCreationCoors(float x, float y, float dirX, float dirY, float spawnDist, float angleLimit, bool forward, CVector *posn, int *pNode1, int *pNode2, float *positionBetweenNodes, bool ignoreDisabled); SUPPORTED_10EN_11EN_STEAM void PedMarkRoadsBetweenLevelsInArea(float x1, float x2, float y1, float y2, float z1, float z2); SUPPORTED_10EN_11EN_STEAM void PreparePathData(); SUPPORTED_10EN_11EN_STEAM void PreparePathDataForType(unsigned char type, CTempNode *tempNodes, CPathInfoForObject *info, float maxDist, CTempDetachedNode *detachedNodes, int numDetached); SUPPORTED_10EN_11EN_STEAM void RegisterMapObject(CTreadable *mapObject); SUPPORTED_10EN_11EN_STEAM void RemoveBadStartNode(CVector pos, CPathNode **nodes, short *nodeCount); SUPPORTED_10EN_11EN_STEAM void RemoveNodeFromList(CPathNode *node); SUPPORTED_10EN_11EN_STEAM void Save(unsigned char *buf, unsigned int *size); SUPPORTED_10EN_11EN_STEAM void SetLinksBridgeLights(float x1, float y1, float x2, float y2, bool enable); SUPPORTED_10EN_11EN_STEAM void StoreNodeInfoCar(short id, short node, char type, char next, short x, short y, short z, short width, char numLeft, char numRight); SUPPORTED_10EN_11EN_STEAM void StoreNodeInfoPed(short id, short node, char type, char next, short x, short y, short z, short width, bool crossing); SUPPORTED_10EN_11EN_STEAM void SwitchOffNodeAndNeighbours(int nodeId, bool disable); SUPPORTED_10EN_11EN_STEAM void SwitchPedRoadsOffInArea(float x1, float x2, float y1, float y2, float z1, float z2, bool disable); SUPPORTED_10EN_11EN_STEAM void SwitchRoadsInAngledArea(float x1, float y1, float z1, float x2, float y2, float z2, float length, unsigned char type, unsigned char enable); SUPPORTED_10EN_11EN_STEAM void SwitchRoadsOffInArea(float x1, float x2, float y1, float y2, float z1, float z2, bool disable); SUPPORTED_10EN_11EN_STEAM bool TestCoorsCloseness(CVector target, unsigned char type, CVector start); SUPPORTED_10EN_11EN_STEAM bool TestCrossesRoad(CPathNode *node1, CPathNode *node2); SUPPORTED_10EN_11EN_STEAM bool TestForPedTrafficLight(CPathNode *node1, CPathNode *node2); SUPPORTED_10EN_11EN_STEAM static bool LoadPathFindData(); }; SUPPORTED_10EN_11EN_STEAM extern int &TempListLength; SUPPORTED_10EN_11EN_STEAM extern CPathNode *(&apNodesToBeCleared)[4995]; // CPathNode *apNodesToBeCleared[4995] SUPPORTED_10EN_11EN_STEAM extern int &gMaxEntries; SUPPORTED_10EN_11EN_STEAM extern CPathNode *(&pNodeList)[32]; // CPathNode *pNodeList[32] SUPPORTED_10EN_11EN_STEAM extern CTempDetachedNode *&DetachedNodesCars; SUPPORTED_10EN_11EN_STEAM extern CTempDetachedNode *&DetachedNodesPeds; SUPPORTED_10EN_11EN_STEAM extern CPathInfoForObject *&InfoForTileCars; SUPPORTED_10EN_11EN_STEAM extern CPathInfoForObject *&InfoForTilePeds; SUPPORTED_10EN_11EN_STEAM extern CPathFind &ThePaths; VALIDATE_SIZE(CTempDetachedNode, 0x14); VALIDATE_SIZE(CPathInfoNode, 0xC); VALIDATE_SIZE(CPathInfoForObject, 0x90); VALIDATE_SIZE(CCarPathLink, 0x18); VALIDATE_SIZE(CPathFind, 0x49BF4); #include "meta/meta.CPathFind.h"
2,718