max_stars_count
int64
301
224k
text
stringlengths
6
1.05M
token_count
int64
3
727k
396
<reponame>seba10000/resonance-audio /* Copyright 2018 Google Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS-IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "geometrical_acoustics/collection_kernel.h" #include <array> #include <cmath> #include <vector> #include "Eigen/Core" #include "base/constants_and_types.h" #include "base/logging.h" namespace vraudio { using Eigen::Vector3f; namespace { // Adds a response to the output |energy_impulse_responses| array at an index // computed based on |total_source_listener_distance| and // |distance_to_impulse_response_index|. The values to be added are |energies| // multiplied by |energy_factor|. void AddResponse(float total_source_listener_distance, float distance_to_impulse_response_index, float energy_factor, const std::array<float, kNumReverbOctaveBands>& energies, std::array<std::vector<float>, kNumReverbOctaveBands>* energy_impulse_responses) { const size_t impulse_response_index = static_cast<size_t>( total_source_listener_distance * distance_to_impulse_response_index); // It is OK if |impulse_response_index| exceeds the size of the listener's // impulse response array (e.g. if the user only cares about the first second // of the response), in which case we simply discard the contribution. if (impulse_response_index >= (*energy_impulse_responses)[0].size()) { return; } for (size_t i = 0; i < energy_impulse_responses->size(); ++i) { (*energy_impulse_responses)[i][impulse_response_index] += energy_factor * energies.at(i); } } } // namespace // In our implementation, |sphere_size_energy_factor_| is defined such that a // listener 1.0 meter away from the source would give an attenuation of 1.0. // Therefore the value is computed by: // 4.0 * PI * 1.0^2 / (PI * R^2) = 4.0 / R^2. CollectionKernel::CollectionKernel(float listener_sphere_radius, float sampling_rate) : sphere_size_energy_factor_(4.0f / listener_sphere_radius / listener_sphere_radius), distance_to_impulse_response_index_(sampling_rate / kSpeedOfSound) {} void CollectionKernel::Collect(const AcousticRay& ray, float weight, AcousticListener* listener) const { CHECK_EQ(ray.energies().size(), listener->energy_impulse_responses.size()); // Collect the energy contribution to the listener's impulse response at the // arrival time. // The distance to listener on this ray is approximated by projecting // (listener.position - sub_ray's starting point) onto the ray direction. const Vector3f ray_direction(ray.direction()); const Vector3f ray_starting_point = Vector3f(ray.origin()) + ray.t_near() * ray_direction; const float distance_to_listener_on_ray = (listener->position - ray_starting_point).dot(ray_direction.normalized()); AddResponse(ray.prior_distance() + distance_to_listener_on_ray, distance_to_impulse_response_index_, weight * sphere_size_energy_factor_, ray.energies(), &listener->energy_impulse_responses); } // In a diffuse rain algorithm, instead of relying on the Monte Carlo process // to estimate expected energy gathered by the sphere, we directly multiply // the probability of a ray intersecting the sphere to the energy to be // collected, thus ensuring the expected gathered energies are the same (see // also [internal ref] // // <Energy by Monte Carlo process> = <Energy by diffuse rain> // sum_i (Prob[ray_i intersects sphere] * Energy_i) = // sum_i (factor_i * Energy_i) // // So factor_i = Prob[ray_i intersects sphere] // ~ PDF(ray_i in the direction pointing to the listener) * // (projected solid angle of the listener sphere) // ~ PDF * PI * R^2 / (4.0 * PI * D^2) // = PDF * R^2 / (4.0 * D^2), // // where PDF is the probability density function, R the radius of the // listener sphere, and D the distance between the listener and the // reflection point. // // Combining |factor_i| with |sphere_size_energy_factor_| = 4.0 / R^2, // the total energy factor that needs to be multiplied to the energies on a // diffuse-rain ray is therefore (PDF / D^2). void CollectionKernel::CollectDiffuseRain(const AcousticRay& diffuse_rain_ray, float weight, float direction_pdf, AcousticListener* listener) const { // Since a diffuse-rain ray already connects to the listener, and its // direction already normalized, the distance to listener is its // t_far - t_near. const float distance_to_listener_on_ray = diffuse_rain_ray.t_far() - diffuse_rain_ray.t_near(); const float diffuse_rain_energy_factor = direction_pdf / distance_to_listener_on_ray / distance_to_listener_on_ray; AddResponse(diffuse_rain_ray.prior_distance() + distance_to_listener_on_ray, distance_to_impulse_response_index_, weight * diffuse_rain_energy_factor, diffuse_rain_ray.energies(), &listener->energy_impulse_responses); } } // namespace vraudio
2,050
839
/** * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.cxf.tools.java2ws; import java.io.OutputStream; import java.util.Arrays; import java.util.List; import org.apache.cxf.common.util.StringUtils; import org.apache.cxf.tools.common.CommandInterfaceUtils; import org.apache.cxf.tools.common.ToolContext; import org.apache.cxf.tools.common.toolspec.ToolRunner; public class JavaToWS { private String[] args; public JavaToWS() { args = new String[0]; } public JavaToWS(String[] pargs) { args = pargs; } public static void main(String[] args) { System.setProperty("org.apache.cxf.JDKBugHacks.defaultUsesCaches", "true"); CommandInterfaceUtils.commandCommonMain(); JavaToWS j2w = new JavaToWS(args); try { j2w.run(); } catch (Throwable ex) { System.err.println("JavaToWS Error: " + ex.toString()); System.err.println(); if (j2w.isVerbose()) { ex.printStackTrace(); } if (j2w.isExitOnFinish()) { System.exit(1); } } } public boolean isVerbose() { return isSet(new String[] {"-V", "-verbose"}); } private boolean isSet(String[] keys) { if (args == null) { return false; } List<String> pargs = Arrays.asList(args); for (String key : keys) { if (pargs.contains(key)) { return true; } } return false; } public void run() throws Exception { ToolRunner.runTool(JavaToWSContainer.class, JavaToWSContainer.class .getResourceAsStream("java2ws.xml"), false, args); } public void run(OutputStream os) throws Exception { ToolRunner.runTool(JavaToWSContainer.class, JavaToWSContainer.class .getResourceAsStream("java2ws.xml"), false, args, os); } /** * Pass user app's (compiler) information in the context. * @param context * @param os * @throws Exception */ public void run(ToolContext context, OutputStream os) throws Exception { ToolRunner.runTool(JavaToWSContainer.class, JavaToWSContainer.class.getResourceAsStream("java2ws.xml"), false, args, isExitOnFinish(), context, os); } private boolean isExitOnFinish() { String exit = System.getProperty("exitOnFinish", "true"); if (StringUtils.isEmpty(exit)) { return false; } return "YES".equalsIgnoreCase(exit) || "TRUE".equalsIgnoreCase(exit); } }
1,366
3,579
/* * Copyright 2015, The Querydsl Team (http://www.querydsl.com/team) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.querydsl.sql.mssql; import static com.querydsl.sql.Constants.employee; import static com.querydsl.sql.SQLExpressions.rowNumber; import static org.junit.Assert.assertEquals; import org.junit.Test; import com.querydsl.core.types.Expression; import com.querydsl.sql.Configuration; import com.querydsl.sql.SQLSerializer; import com.querydsl.sql.SQLTemplates; import com.querydsl.sql.WindowFunction; public class WindowFunctionTest { private static final Configuration configuration = new Configuration(SQLTemplates.DEFAULT); private static String toString(Expression<?> e) { return new SQLSerializer(configuration).handle(e).toString(); } // ROW_NUMBER() OVER (ORDER BY OrderDate) AS 'RowNumber' // ROW_NUMBER() OVER (PARTITION BY PostalCode ORDER BY SalesYTD DESC) @Test public void mutable() { WindowFunction<Long> rn = rowNumber().over().orderBy(employee.firstname); assertEquals("row_number() over (order by e.FIRSTNAME asc)", toString(rn)); assertEquals("row_number() over (order by e.FIRSTNAME asc, e.LASTNAME asc)", toString(rn.orderBy(employee.lastname))); } @Test public void orderBy() { assertEquals("row_number() over (order by e.FIRSTNAME asc)", toString(rowNumber().over().orderBy(employee.firstname.asc()))); assertEquals("row_number() over (order by e.FIRSTNAME asc)", toString(rowNumber().over().orderBy(employee.firstname))); assertEquals("row_number() over (order by e.FIRSTNAME asc) as rn", toString(rowNumber().over().orderBy(employee.firstname.asc()).as("rn"))); assertEquals("row_number() over (order by e.FIRSTNAME desc)", toString(rowNumber().over().orderBy(employee.firstname.desc()))); } @Test public void partitionBy() { assertEquals("row_number() over (partition by e.LASTNAME order by e.FIRSTNAME asc)", toString(rowNumber().over().partitionBy(employee.lastname).orderBy(employee.firstname.asc()))); assertEquals("row_number() over (partition by e.LASTNAME, e.FIRSTNAME order by e.FIRSTNAME asc)", toString(rowNumber().over().partitionBy(employee.lastname, employee.firstname).orderBy(employee.firstname.asc()))); } }
1,026
2,151
<filename>components/safe_browsing/db/v4_feature_list.h // Copyright 2016 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef COMPONENTS_SAFE_BROWSING_DB_V4_FEATURE_LIST_H_ #define COMPONENTS_SAFE_BROWSING_DB_V4_FEATURE_LIST_H_ namespace safe_browsing { // Exposes methods to check whether a particular feature has been enabled // through Finch. namespace V4FeatureList { enum class V4UsageStatus { // The V4 database manager is not even instantiated i.e. is diabled. All // SafeBrowsing operations use PVer3 code. V4_DISABLED, // The V4 database manager is instantiated, and performs background updates, // but all SafeBrowsing verdicts are returned using the PVer3 database. V4_INSTANTIATED, // Only the V4 database manager is instantiated, PVer3 database manager is // not. All SafeBrowsing verdicts are returned using PVer4 database. V4_ONLY }; V4UsageStatus GetV4UsageStatus(); } // namespace V4FeatureList } // namespace safe_browsing #endif // COMPONENTS_SAFE_BROWSING_DB_V4_FEATURE_LIST_H_
368
676
// Use of this source code is governed by a BSD 3-Clause License // that can be found in the LICENSE file. // Author: caozhiyi (<EMAIL>) #ifndef CPPNET_EVENT_TIMER_EVENT #define CPPNET_EVENT_TIMER_EVENT #include <memory> #include <atomic> #include "event_interface.h" #include "include/cppnet_type.h" #include "common/timer/timer_solt.h" namespace cppnet { class TimerEvent: public Event, public TimerSolt { public: TimerEvent(): _timer_id(0) {} ~TimerEvent() {} void SetTimerCallBack(const user_timer_call_back& cb, void* param); void OnTimer(); private: uint64_t _timer_id; user_timer_call_back _timer_cb; }; } #endif
271
16,483
<gh_stars>1000+ /* * Copyright 1999-2019 Seata.io Group. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.seata.serializer.seata.protocol.transaction; import io.seata.serializer.seata.SeataSerializer; import io.seata.core.protocol.transaction.GlobalCommitRequest; import org.junit.jupiter.api.Test; import static org.assertj.core.api.Assertions.assertThat; /** * The type Global commit request codec test. * * @author zhangsen */ public class GlobalCommitRequestCodecTest { /** * The Seata codec. */ SeataSerializer seataSerializer = new SeataSerializer(); /** * Test codec. */ @Test public void test_codec(){ GlobalCommitRequest globalCommitRequest = new GlobalCommitRequest(); globalCommitRequest.setExtraData("aaaa"); globalCommitRequest.setXid("adf"); byte[] bytes = seataSerializer.serialize(globalCommitRequest); GlobalCommitRequest globalCommitRequest2 = seataSerializer.deserialize(bytes); assertThat(globalCommitRequest2.getExtraData()).isEqualTo(globalCommitRequest.getExtraData()); assertThat(globalCommitRequest2.getXid()).isEqualTo(globalCommitRequest.getXid()); } }
578
390
<reponame>pombredanne/flare-wmi #include "Helper.h" #include "EventConsumer.h" class LogFileEventConsumerClass : public EventConsumer { public: LogFileEventConsumerClass() : EventConsumer(), FileName(), Name(), Text(), MaximumFileSize(ALL_BITS_16), // default is 65535 IsUnicode(0) {} LogFileEventConsumerClass(const LogFileEventConsumerClass &copyin) : EventConsumer(copyin), FileName(copyin.FileName), Name(copyin.Name), Text(copyin.Text), MaximumFileSize(copyin.MaximumFileSize), IsUnicode(copyin.IsUnicode) { } static const uint32 ConsumerDataTypesSize = 8; static const ConsumerDataType ConsumerDataTypes[ConsumerDataTypesSize]; static const wchar_t CONSUMER_NAME[]; static const uint32 UNK_7_BYTES = 7; void SetName(std::vector<ExtentClass>& extents, int type); void SetFilename(std::vector<ExtentClass>& extents, int type); void SetText(std::vector<ExtentClass>& extents, int type); void SetIsUnicode(uint16 value); void SetMaximumFileSize(uint64 dataVal); virtual void Print(HANDLE hFile, FILE *out); static EventConsumer* Create(const void* recordBuf, std::vector<ExtentClass>& cRecordExtents, uint32 size, bool bXP); static bool IsConsumer(const void* recordBuf, uint32 size, bool bXP); private: StringValue FileName; BoolValue IsUnicode; Uint64Value MaximumFileSize; StringValue Name; StringValue Text; void SetName(uint64 s, uint64 c, int type); void SetFilename(uint64 s, uint64 c, int type); void SetText(uint64 s, uint64 c, int type); };
562
1,056
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.netbeans.modules.php.project.connections.spi; import javax.swing.JComponent; import javax.swing.event.ChangeListener; import org.netbeans.modules.php.project.connections.ConfigManager; /** * @author <NAME> */ public interface RemoteConfigurationPanel { /** * Attach a {@link ChangeListener change listener} that is to be notified of changes * in the configration panel (e.g., the result of the {@link #isValidConfiguration} method * has changed). * * @param listener a listener. */ void addChangeListener(ChangeListener listener); /** * Remove a {@link ChangeListener change listener}. * * @param listener a listener. */ void removeChangeListener(ChangeListener listener); /** * Return a UI component used to allow the user to customize this {@link RemoteConfiguration remote configuration}. * * @return a component which provides a configuration UI, never <code>null</code>. * This method might be called more than once and it is expected to always * return the same instance. */ JComponent getComponent(); /** * * @param configuration {@link org.netbeans.modules.php.project.connections.ConfigManager.Configuration} to read data from. */ void read(ConfigManager.Configuration configuration); /** * * @param configuration {@link org.netbeans.modules.php.project.connections.ConfigManager.Configuration} to store data to. */ void store(ConfigManager.Configuration configuration); /** * Check whether this {@link RemoteConfiguration configuration} is valid, it means it contains no errors. * @return <code>true</code> if this {@link RemoteConfiguration remote configuration} contains no errors, <code>false</code> otherwise. */ boolean isValidConfiguration(); /** * Get the error messsage if this {@link RemoteConfiguration remote configuration} is not valid. * @return error messsage if this {@link RemoteConfiguration remote configuration} is not valid. * @see #isValidConfiguration() * @see #getWarning() */ String getError(); /** * Get the warning messsage. Please notice that this warning message is not related * to panel {@link #isValidConfiguration() validity}. * @return warning messsage. * @see #isValidConfiguration() * @see #getError() */ String getWarning(); }
975
1,144
/* * #%L * metasfresh-webui-api * %% * Copyright (C) 2020 metas GmbH * %% * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as * published by the Free Software Foundation, either version 2 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program. If not, see * <http://www.gnu.org/licenses/gpl-2.0.html>. * #L% */ package de.metas.ui.web.pporder; import com.google.common.collect.ImmutableList; import de.metas.cache.CCache; import de.metas.handlingunits.reservation.HUReservationService; import de.metas.process.AdProcessId; import de.metas.process.IADProcessDAO; import de.metas.process.RelatedProcessDescriptor; import de.metas.process.RelatedProcessDescriptor.DisplayPlace; import de.metas.ui.web.handlingunits.DefaultHUEditorViewFactory; import de.metas.ui.web.pattribute.ASIRepository; import de.metas.ui.web.view.ASIViewRowAttributesProvider; import de.metas.ui.web.view.CreateViewRequest; import de.metas.ui.web.view.IView; import de.metas.ui.web.view.IViewFactory; import de.metas.ui.web.view.IViewsRepository; import de.metas.ui.web.view.ViewFactory; import de.metas.ui.web.view.ViewId; import de.metas.ui.web.view.ViewProfileId; import de.metas.ui.web.view.descriptor.IncludedViewLayout; import de.metas.ui.web.view.descriptor.ViewLayout; import de.metas.ui.web.view.json.JSONFilterViewRequest; import de.metas.ui.web.view.json.JSONViewDataType; import de.metas.ui.web.window.datatypes.WindowId; import de.metas.ui.web.window.descriptor.factory.standard.LayoutFactory; import de.metas.util.Services; import lombok.NonNull; import org.adempiere.exceptions.AdempiereException; import org.eevolution.api.IPPOrderBL; import org.eevolution.api.PPOrderDocBaseType; import org.eevolution.api.PPOrderId; import org.eevolution.model.I_PP_Order; import javax.annotation.Nullable; import java.util.List; import java.util.function.Supplier; @ViewFactory(windowId = PPOrderConstants.AD_WINDOW_ID_IssueReceipt_String) public class PPOrderLinesViewFactory implements IViewFactory { private final IADProcessDAO adProcessDAO = Services.get(IADProcessDAO.class); private final IPPOrderBL ppOrderBL = Services.get(IPPOrderBL.class); private final ASIRepository asiRepository; private final DefaultHUEditorViewFactory huEditorViewFactory; private final HUReservationService huReservationService; private final transient CCache<WindowId, ViewLayout> layouts = CCache.newLRUCache("PPOrderLinesViewFactory#Layouts", 10, 0); public PPOrderLinesViewFactory( @NonNull final ASIRepository asiRepository, @NonNull final DefaultHUEditorViewFactory huEditorViewFactory, @NonNull final HUReservationService huReservationService) { this.asiRepository = asiRepository; this.huEditorViewFactory = huEditorViewFactory; this.huReservationService = huReservationService; } @Override public PPOrderLinesView createView(final @NonNull CreateViewRequest request) { final ViewId viewId = request.getViewId(); final PPOrderId ppOrderId = PPOrderId.ofRepoId(request.getSingleFilterOnlyId()); final I_PP_Order ppOrder = ppOrderBL.getById(ppOrderId); final PPOrderDocBaseType ppOrderDocBaseType = PPOrderDocBaseType.ofCode(ppOrder.getDocBaseType()); final PPOrderLinesViewDataSupplier dataSupplier = PPOrderLinesViewDataSupplier .builder() .viewWindowId(viewId.getWindowId()) .ppOrderId(ppOrderId) .asiAttributesProvider(ASIViewRowAttributesProvider.newInstance(asiRepository)) .huSQLViewBinding(huEditorViewFactory.getSqlViewBinding()) .huReservationService(huReservationService) .build(); return PPOrderLinesView.builder() .parentViewId(request.getParentViewId()) .parentRowId(request.getParentRowId()) .viewId(viewId) .viewType(request.getViewType()) .referencingDocumentPaths(request.getReferencingDocumentPaths()) .ppOrderId(ppOrderId) .docBaseType(ppOrderDocBaseType) .dataSupplier(dataSupplier) .additionalRelatedProcessDescriptors(createAdditionalRelatedProcessDescriptors()) .build(); } @Override public IView filterView( final IView view, final JSONFilterViewRequest filterViewRequest, final Supplier<IViewsRepository> viewsRepo) { throw new AdempiereException("View does not support filtering") .setParameter("view", view) .setParameter("filterViewRequest", filterViewRequest); } @Override public IView deleteStickyFilter(final IView view, final String filterId) { throw new AdempiereException("View does not allow removing sticky/static filter") .setParameter("view", view) .setParameter("filterId", filterId); } @Override public ViewLayout getViewLayout(final WindowId windowId, final JSONViewDataType viewDataType_NOTUSED, @Nullable final ViewProfileId profileId_NOTUSED) { return layouts.getOrLoad(windowId, () -> createViewLayout(windowId)); } private ViewLayout createViewLayout(final WindowId windowId) { return ViewLayout.builder() .setWindowId(windowId) .setCaption("PP Order Issue/Receipt") .setEmptyResultText(LayoutFactory.HARDCODED_TAB_EMPTY_RESULT_TEXT) .setEmptyResultHint(LayoutFactory.HARDCODED_TAB_EMPTY_RESULT_HINT) // .setHasAttributesSupport(true) .setHasTreeSupport(true) .setIncludedViewLayout(IncludedViewLayout.DEFAULT) // .addElementsFromViewRowClass(PPOrderLineRow.class, JSONViewDataType.grid) // .build(); } private List<RelatedProcessDescriptor> createAdditionalRelatedProcessDescriptors() { return ImmutableList.of( createProcessDescriptorForIssueReceiptWindow(de.metas.ui.web.pporder.process.WEBUI_PP_Order_Receipt.class), createProcessDescriptorForIssueReceiptWindow(de.metas.ui.web.handlingunits.process.WEBUI_M_HU_Pick.class), createProcessDescriptorForIssueReceiptWindow(de.metas.ui.web.pporder.process.WEBUI_PP_Order_IssueServiceProduct.class), createProcessDescriptorForIssueReceiptWindow(de.metas.ui.web.pporder.process.WEBUI_PP_Order_ReverseCandidate.class), createProcessDescriptorForIssueReceiptWindow(de.metas.ui.web.pporder.process.WEBUI_PP_Order_ChangePlanningStatus_Planning.class), createProcessDescriptorForIssueReceiptWindow(de.metas.ui.web.pporder.process.WEBUI_PP_Order_ChangePlanningStatus_Review.class), createProcessDescriptorForIssueReceiptWindow(de.metas.ui.web.pporder.process.WEBUI_PP_Order_ChangePlanningStatus_Complete.class), createProcessDescriptorForIssueReceiptWindow(de.metas.ui.web.pporder.process.WEBUI_PP_Order_HUEditor_Launcher.class), createProcessDescriptorForIssueReceiptWindow(de.metas.ui.web.pporder.process.WEBUI_PP_Order_M_Source_HU_Delete.class), createProcessDescriptorForIssueReceiptWindow(de.metas.ui.web.pporder.process.WEBUI_PP_Order_M_Source_HU_IssueTuQty.class), createProcessDescriptorForIssueReceiptWindow(de.metas.ui.web.pporder.process.WEBUI_PP_Order_M_Source_HU_IssueCUQty.class), createProcessDescriptorForIssueReceiptWindow(de.metas.ui.web.pporder.process.WEBUI_PP_Order_PrintLabel.class)); } private RelatedProcessDescriptor createProcessDescriptorForIssueReceiptWindow(@NonNull final Class<?> processClass) { final AdProcessId processId = adProcessDAO.retrieveProcessIdByClass(processClass); return RelatedProcessDescriptor.builder() .processId(processId) .windowId(PPOrderConstants.AD_WINDOW_ID_IssueReceipt.toAdWindowIdOrNull()) .anyTable() .displayPlace(DisplayPlace.ViewQuickActions) .build(); } }
2,737
522
package algs.model.tree; import algs.model.IBinaryTreeNode; /** * Perform a pre traversal of the tree. * * Self - Left - Right * * @param <T> Type of value associated with each {@link IBinaryTreeNode} * @author <NAME> * @version 1.0, 6/15/08 * @since 1.0 */ public class PreorderTraversal<T extends IBinaryTreeNode<T>> extends AbstractBinaryTraversal<T> { /** * Start at the given node. * @param node starting node for the traversal. */ public PreorderTraversal(IBinaryTreeNode<T> node) { super(node); } /** * Initial phase for preorder traversal is SELF. * * @see AbstractBinaryTraversal#initialPhase() * @return initial phase for preorder traversal */ @Override public Phase initialPhase() { return self; } /** * Final phase for preorder traversal is RIGHT. * * @see AbstractBinaryTraversal#finalPhase() * @return final phase for preorder traversal */ @Override public Phase finalPhase() { return right; } /** * Advance phase to follow preorder traversal. * * @see AbstractBinaryTraversal#advancePhase(algs.model.tree.AbstractBinaryTraversal.Phase) * @param phase current phase * @return next phase in traversal following current phase. */ @Override public Phase advancePhase(Phase phase) { if (phase == left) { return right; } if (phase == right) { return done; } // must be SELF, so we go left. return left; } }
486
6,717
//****************************************************************************** // // Copyright (c) 2016 Microsoft Corporation. All rights reserved. // // This code is licensed under the MIT License (MIT). // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. // //****************************************************************************** #pragma once #import <AVFoundation/AVFoundationExport.h> #import <Foundation/NSObject.h> @class NSURL; @class NSError; @class NSData; typedef void (^AVMIDIPlayerCompletionHandler)(void); AVFOUNDATION_EXPORT_CLASS @interface AVMIDIPlayer : NSObject - (instancetype)initWithContentsOfURL:(NSURL*)inURL soundBankURL:(NSURL*)bankURL error:(NSError* _Nullable*)outError STUB_METHOD; - (instancetype)initWithData:(NSData*)data soundBankURL:(NSURL*)bankURL error:(NSError* _Nullable*)outError STUB_METHOD; - (void)prepareToPlay STUB_METHOD; - (void)play:(AVMIDIPlayerCompletionHandler)completionHandler STUB_METHOD; @property (readonly, getter=isPlaying, nonatomic) BOOL playing STUB_PROPERTY; - (void)stop STUB_METHOD; @property (readonly, nonatomic) NSTimeInterval duration STUB_PROPERTY; @property (nonatomic) NSTimeInterval currentPosition STUB_PROPERTY; @property (nonatomic) float rate STUB_PROPERTY; @end
514
739
{ "name": "<NAME>", "img": "https://images.pexels.com/photos/577585/pexels-photo-577585.jpeg?auto=compress&cs=tinysrgb&dpr=1&w=500", "email": "<EMAIL>", "links": { "website": "", "linkedin": "https://www.linkedin.com/in/dev-agarwal-5a0696170", "github": "https://github.com/dev499" }, "jobTitle": "Front end Developer", "location": { "city": "Bareilly", "state": "Uttar Pradesh ", "country": "India" } }
227
6,904
<filename>FreeRTOS/Demo/CORTEX_M4F_STM32F407ZG-SK/Libraries/STM32F4xx_StdPeriph_Driver/inc/stm32f4xx_dbgmcu.h /** ****************************************************************************** * @file stm32f4xx_dbgmcu.h * @author MCD Application Team * @version V1.0.0 * @date 30-September-2011 * @brief This file contains all the functions prototypes for the DBGMCU firmware library. ****************************************************************************** * @attention * * THE PRESENT FIRMWARE WHICH IS FOR GUIDANCE ONLY AIMS AT PROVIDING CUSTOMERS * WITH CODING INFORMATION REGARDING THEIR PRODUCTS IN ORDER FOR THEM TO SAVE * TIME. AS A RESULT, STMICROELECTRONICS SHALL NOT BE HELD LIABLE FOR ANY * DIRECT, INDIRECT OR CONSEQUENTIAL DAMAGES WITH RESPECT TO ANY CLAIMS ARISING * FROM THE CONTENT OF SUCH FIRMWARE AND/OR THE USE MADE BY CUSTOMERS OF THE * CODING INFORMATION CONTAINED HEREIN IN CONNECTION WITH THEIR PRODUCTS. * * <h2><center>&copy; COPYRIGHT 2011 STMicroelectronics</center></h2> ****************************************************************************** */ /* Define to prevent recursive inclusion -------------------------------------*/ #ifndef __STM32F4xx_DBGMCU_H #define __STM32F4xx_DBGMCU_H #ifdef __cplusplus extern "C" { #endif /* Includes ------------------------------------------------------------------*/ #include "stm32f4xx.h" /** @addtogroup STM32F4xx_StdPeriph_Driver * @{ */ /** @addtogroup DBGMCU * @{ */ /* Exported types ------------------------------------------------------------*/ /* Exported constants --------------------------------------------------------*/ /** @defgroup DBGMCU_Exported_Constants * @{ */ #define DBGMCU_SLEEP ((uint32_t)0x00000001) #define DBGMCU_STOP ((uint32_t)0x00000002) #define DBGMCU_STANDBY ((uint32_t)0x00000004) #define IS_DBGMCU_PERIPH(PERIPH) ((((PERIPH) & 0xFFFFFFF8) == 0x00) && ((PERIPH) != 0x00)) #define DBGMCU_TIM2_STOP ((uint32_t)0x00000001) #define DBGMCU_TIM3_STOP ((uint32_t)0x00000002) #define DBGMCU_TIM4_STOP ((uint32_t)0x00000004) #define DBGMCU_TIM5_STOP ((uint32_t)0x00000008) #define DBGMCU_TIM6_STOP ((uint32_t)0x00000010) #define DBGMCU_TIM7_STOP ((uint32_t)0x00000020) #define DBGMCU_TIM12_STOP ((uint32_t)0x00000040) #define DBGMCU_TIM13_STOP ((uint32_t)0x00000080) #define DBGMCU_TIM14_STOP ((uint32_t)0x00000100) #define DBGMCU_RTC_STOP ((uint32_t)0x00000400) #define DBGMCU_WWDG_STOP ((uint32_t)0x00000800) #define DBGMCU_IWDG_STOP ((uint32_t)0x00001000) #define DBGMCU_I2C1_SMBUS_TIMEOUT ((uint32_t)0x00200000) #define DBGMCU_I2C2_SMBUS_TIMEOUT ((uint32_t)0x00400000) #define DBGMCU_I2C3_SMBUS_TIMEOUT ((uint32_t)0x00800000) #define DBGMCU_CAN1_STOP ((uint32_t)0x02000000) #define DBGMCU_CAN2_STOP ((uint32_t)0x04000000) #define IS_DBGMCU_APB1PERIPH(PERIPH) ((((PERIPH) & 0xF91FE200) == 0x00) && ((PERIPH) != 0x00)) #define DBGMCU_TIM1_STOP ((uint32_t)0x00000001) #define DBGMCU_TIM8_STOP ((uint32_t)0x00000002) #define DBGMCU_TIM9_STOP ((uint32_t)0x00010000) #define DBGMCU_TIM10_STOP ((uint32_t)0x00020000) #define DBGMCU_TIM11_STOP ((uint32_t)0x00040000) #define IS_DBGMCU_APB2PERIPH(PERIPH) ((((PERIPH) & 0xFFF8FFFC) == 0x00) && ((PERIPH) != 0x00)) /** * @} */ /* Exported macro ------------------------------------------------------------*/ /* Exported functions --------------------------------------------------------*/ uint32_t DBGMCU_GetREVID(void); uint32_t DBGMCU_GetDEVID(void); void DBGMCU_Config(uint32_t DBGMCU_Periph, FunctionalState NewState); void DBGMCU_APB1PeriphConfig(uint32_t DBGMCU_Periph, FunctionalState NewState); void DBGMCU_APB2PeriphConfig(uint32_t DBGMCU_Periph, FunctionalState NewState); #ifdef __cplusplus } #endif #endif /* __STM32F4xx_DBGMCU_H */ /** * @} */ /** * @} */ /******************* (C) COPYRIGHT 2011 STMicroelectronics *****END OF FILE****/
1,928
403
<gh_stars>100-1000 package com.xcompany.xproject.auth.server.model; import java.io.Serializable; import java.util.Collection; import javax.persistence.CascadeType; import javax.persistence.Column; import javax.persistence.Entity; import javax.persistence.FetchType; import javax.persistence.GeneratedValue; import javax.persistence.GenerationType; import javax.persistence.Id; import javax.persistence.JoinColumn; import javax.persistence.JoinTable; import javax.persistence.ManyToMany; import javax.validation.constraints.NotNull; import org.hibernate.validator.constraints.NotEmpty; @Entity public class User implements Serializable { @Id @GeneratedValue(strategy = GenerationType.AUTO) private Integer id; // @NotEmpty private String name; @NotEmpty @Column(unique = true, nullable = false) private String login; @NotEmpty private String password; private String email; @NotNull private int platform; @ManyToMany(fetch = FetchType.EAGER) // @JoinTable(name = "user_role", joinColumns = { @JoinColumn(name = "user_id") }, inverseJoinColumns = { @JoinColumn(name = "role_id") }) @JoinTable(name = "user_role", joinColumns = @JoinColumn(name = "user_id", referencedColumnName = "id"), inverseJoinColumns = @JoinColumn(name = "role_id", referencedColumnName = "id")) private Collection<Role> roles; // @ManyToMany(fetch = FetchType.EAGER) @ManyToMany(cascade = {CascadeType.PERSIST, CascadeType.MERGE}) @JoinTable(name = "user_device", joinColumns = { @JoinColumn(name = "user_id", referencedColumnName = "id") }, inverseJoinColumns = { @JoinColumn(name = "device_id", referencedColumnName = "id") }) private Collection<Device> devices; private int authorized = 1; public User() { } public User(User user) { super(); this.id = user.getId(); this.name = user.getLogin(); this.login = user.getLogin(); this.password = <PASSWORD>(); this.email = user.getEmail(); this.roles = user.getRoles(); this.platform = user.getPlatform(); } public Integer getId() { return id; } public void setId(Integer id) { this.id = id; } public String getName() { return name; } public void setName(String name) { this.name = name; } public String getLogin() { return login; } public void setLogin(String login) { this.login = login; } public String getPassword() { return password; } public void setPassword(String password) { this.password = password; } public Collection<Role> getRoles() { return roles; } public void setRoles(Collection<Role> roles) { this.roles = roles; } public void setEmail(String email) { this.email = email; } public String getEmail() { return email; } public int getPlatform() { return platform; } public void setPlatform(Integer platform) { this.platform = platform; } public Collection<Device> getDevices() { return devices; } public void setDevices(Collection<Device> devices) { this.devices = devices; } public int authorized() { return authorized; } public void setAuthorized(int authorized) { this.authorized = authorized; } }
1,060
335
{ "word": "Zaffre", "definitions": [ "Impure cobalt oxide, formerly used to make smalt and blue enamels." ], "parts-of-speech": "Noun" }
71
587
<gh_stars>100-1000 // // SPDX-License-Identifier: BSD-3-Clause // Copyright (c) Contributors to the OpenEXR Project. // #ifndef INCLUDED_IMF_IMAGE_LEVEL_H #define INCLUDED_IMF_IMAGE_LEVEL_H //---------------------------------------------------------------------------- // // class ImageLevel // // For an explanation of images, levels and channels, // see the comments in header file Image.h. // //---------------------------------------------------------------------------- #include "ImfImageChannel.h" #include "ImfImageChannelRenaming.h" #include "ImfUtilExport.h" #include <ImathBox.h> #include <string> OPENEXR_IMF_INTERNAL_NAMESPACE_HEADER_ENTER class Image; class IMFUTIL_EXPORT_TYPE ImageLevel { public: // // Access to the image to which the level belongs. // Image& image () { return _image; } const Image& image () const { return _image; } // // Access to the level number and the data window of this level. // int xLevelNumber () const { return _xLevelNumber; } int yLevelNumber () const { return _yLevelNumber; } const IMATH_NAMESPACE::Box2i& dataWindow () const { return _dataWindow; } protected: friend class Image; IMFUTIL_EXPORT ImageLevel (Image& image, int xLevelNumber, int yLevelNumber); IMFUTIL_EXPORT virtual ~ImageLevel (); IMFUTIL_EXPORT virtual void resize (const IMATH_NAMESPACE::Box2i& dataWindow); IMFUTIL_EXPORT virtual void shiftPixels (int dx, int dy); virtual void insertChannel ( const std::string& name, PixelType type, int xSampling, int ySampling, bool pLinear) = 0; virtual void eraseChannel (const std::string& name) = 0; virtual void clearChannels () = 0; virtual void renameChannel (const std::string& oldName, const std::string& newName) = 0; virtual void renameChannels (const RenamingMap& oldToNewNames) = 0; IMFUTIL_EXPORT void throwChannelExists (const std::string& name) const; IMFUTIL_EXPORT void throwBadChannelName (const std::string& name) const; IMFUTIL_EXPORT void throwBadChannelNameOrType (const std::string& name) const; private: ImageLevel (const ImageLevel&); // not implemented ImageLevel& operator= (const ImageLevel&); // not implemented Image& _image; int _xLevelNumber; int _yLevelNumber; IMATH_NAMESPACE::Box2i _dataWindow; }; OPENEXR_IMF_INTERNAL_NAMESPACE_HEADER_EXIT #endif
1,001
1,306
/* * Copyright (C) 2011 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef ART_RUNTIME_CLASS_LINKER_H_ #define ART_RUNTIME_CLASS_LINKER_H_ #include <string> #include <utility> #include <vector> #include "base/macros.h" #include "base/mutex.h" #include "dex_file.h" #include "gtest/gtest.h" #include "root_visitor.h" #include "oat_file.h" namespace art { namespace gc { namespace space { class ImageSpace; } // namespace space } // namespace gc namespace mirror { class ClassLoader; class DexCache; class DexCacheTest_Open_Test; class IfTable; template<class T> class ObjectArray; class StackTraceElement; } // namespace mirror class InternTable; class ObjectLock; template<class T> class SirtRef; typedef bool (ClassVisitor)(mirror::Class* c, void* arg); class ClassLinker { public: // Creates the class linker by bootstrapping from dex files. static ClassLinker* CreateFromCompiler(const std::vector<const DexFile*>& boot_class_path, InternTable* intern_table) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Creates the class linker from an image. static ClassLinker* CreateFromImage(InternTable* intern_table) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); ~ClassLinker(); bool IsInBootClassPath(const char* descriptor); // Finds a class by its descriptor, loading it if necessary. // If class_loader is null, searches boot_class_path_. mirror::Class* FindClass(const char* descriptor, mirror::ClassLoader* class_loader) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); mirror::Class* FindSystemClass(const char* descriptor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Define a new a class based on a ClassDef from a DexFile mirror::Class* DefineClass(const char* descriptor, mirror::ClassLoader* class_loader, const DexFile& dex_file, const DexFile::ClassDef& dex_class_def) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Finds a class by its descriptor, returning NULL if it isn't wasn't loaded // by the given 'class_loader'. mirror::Class* LookupClass(const char* descriptor, const mirror::ClassLoader* class_loader) LOCKS_EXCLUDED(Locks::classlinker_classes_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Finds all the classes with the given descriptor, regardless of ClassLoader. void LookupClasses(const char* descriptor, std::vector<mirror::Class*>& classes) LOCKS_EXCLUDED(Locks::classlinker_classes_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); mirror::Class* FindPrimitiveClass(char type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // General class unloading is not supported, this is used to prune // unwanted classes during image writing. bool RemoveClass(const char* descriptor, const mirror::ClassLoader* class_loader) LOCKS_EXCLUDED(Locks::classlinker_classes_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void DumpAllClasses(int flags) LOCKS_EXCLUDED(Locks::classlinker_classes_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void DumpForSigQuit(std::ostream& os) LOCKS_EXCLUDED(Locks::classlinker_classes_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); size_t NumLoadedClasses() LOCKS_EXCLUDED(Locks::classlinker_classes_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Resolve a String with the given index from the DexFile, storing the // result in the DexCache. The referrer is used to identify the // target DexCache and ClassLoader to use for resolution. mirror::String* ResolveString(uint32_t string_idx, const mirror::ArtMethod* referrer) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Resolve a String with the given index from the DexFile, storing the // result in the DexCache. mirror::String* ResolveString(const DexFile& dex_file, uint32_t string_idx, mirror::DexCache* dex_cache) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Resolve a Type with the given index from the DexFile, storing the // result in the DexCache. The referrer is used to identity the // target DexCache and ClassLoader to use for resolution. mirror::Class* ResolveType(const DexFile& dex_file, uint16_t type_idx, const mirror::Class* referrer) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return ResolveType(dex_file, type_idx, referrer->GetDexCache(), referrer->GetClassLoader()); } // Resolve a Type with the given index from the DexFile, storing the // result in the DexCache. The referrer is used to identify the // target DexCache and ClassLoader to use for resolution. mirror::Class* ResolveType(uint16_t type_idx, const mirror::ArtMethod* referrer) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); mirror::Class* ResolveType(uint16_t type_idx, const mirror::ArtField* referrer) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Resolve a type with the given ID from the DexFile, storing the // result in DexCache. The ClassLoader is used to search for the // type, since it may be referenced from but not contained within // the given DexFile. mirror::Class* ResolveType(const DexFile& dex_file, uint16_t type_idx, mirror::DexCache* dex_cache, mirror::ClassLoader* class_loader) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Resolve a method with a given ID from the DexFile, storing the // result in DexCache. The ClassLinker and ClassLoader are used as // in ResolveType. What is unique is the method type argument which // is used to determine if this method is a direct, static, or // virtual method. mirror::ArtMethod* ResolveMethod(const DexFile& dex_file, uint32_t method_idx, mirror::DexCache* dex_cache, mirror::ClassLoader* class_loader, const mirror::ArtMethod* referrer, InvokeType type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); mirror::ArtMethod* ResolveMethod(uint32_t method_idx, const mirror::ArtMethod* referrer, InvokeType type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); mirror::ArtField* ResolveField(uint32_t field_idx, const mirror::ArtMethod* referrer, bool is_static) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Resolve a field with a given ID from the DexFile, storing the // result in DexCache. The ClassLinker and ClassLoader are used as // in ResolveType. What is unique is the is_static argument which is // used to determine if we are resolving a static or non-static // field. mirror::ArtField* ResolveField(const DexFile& dex_file, uint32_t field_idx, mirror::DexCache* dex_cache, mirror::ClassLoader* class_loader, bool is_static) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Resolve a field with a given ID from the DexFile, storing the // result in DexCache. The ClassLinker and ClassLoader are used as // in ResolveType. No is_static argument is provided so that Java // field resolution semantics are followed. mirror::ArtField* ResolveFieldJLS(const DexFile& dex_file, uint32_t field_idx, mirror::DexCache* dex_cache, mirror::ClassLoader* class_loader) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Get shorty from method index without resolution. Used to do handlerization. const char* MethodShorty(uint32_t method_idx, mirror::ArtMethod* referrer, uint32_t* length) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Returns true on success, false if there's an exception pending. // can_run_clinit=false allows the compiler to attempt to init a class, // given the restriction that no <clinit> execution is possible. bool EnsureInitialized(mirror::Class* c, bool can_run_clinit, bool can_init_fields) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Initializes classes that have instances in the image but that have // <clinit> methods so they could not be initialized by the compiler. void RunRootClinits() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void RegisterDexFile(const DexFile& dex_file) LOCKS_EXCLUDED(dex_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void RegisterDexFile(const DexFile& dex_file, SirtRef<mirror::DexCache>& dex_cache) LOCKS_EXCLUDED(dex_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void RegisterOatFile(const OatFile& oat_file) LOCKS_EXCLUDED(dex_lock_); const std::vector<const DexFile*>& GetBootClassPath() { return boot_class_path_; } void VisitClasses(ClassVisitor* visitor, void* arg) LOCKS_EXCLUDED(dex_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Less efficient variant of VisitClasses that doesn't hold the classlinker_classes_lock_ // when calling the visitor. void VisitClassesWithoutClassesLock(ClassVisitor* visitor, void* arg) LOCKS_EXCLUDED(dex_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void VisitRoots(RootVisitor* visitor, void* arg, bool only_dirty, bool clean_dirty) LOCKS_EXCLUDED(Locks::classlinker_classes_lock_, dex_lock_); mirror::DexCache* FindDexCache(const DexFile& dex_file) const LOCKS_EXCLUDED(dex_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool IsDexFileRegistered(const DexFile& dex_file) const LOCKS_EXCLUDED(dex_lock_); void FixupDexCaches(mirror::ArtMethod* resolution_method) const LOCKS_EXCLUDED(dex_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Generate an oat file from a dex file bool GenerateOatFile(const std::string& dex_filename, int oat_fd, const std::string& oat_cache_filename); const OatFile* FindOatFileFromOatLocation(const std::string& location) LOCKS_EXCLUDED(dex_lock_); const OatFile* FindOatFileFromOatLocationLocked(const std::string& location) SHARED_LOCKS_REQUIRED(dex_lock_); // Finds the oat file for a dex location, generating the oat file if // it is missing or out of date. Returns the DexFile from within the // created oat file. const DexFile* FindOrCreateOatFileForDexLocation(const std::string& dex_location, uint32_t dex_location_checksum, const std::string& oat_location) LOCKS_EXCLUDED(dex_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); const DexFile* FindOrCreateOatFileForDexLocationLocked(const std::string& dex_location, uint32_t dex_location_checksum, const std::string& oat_location) EXCLUSIVE_LOCKS_REQUIRED(dex_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Find a DexFile within an OatFile given a DexFile location. Note // that this returns null if the location checksum of the DexFile // does not match the OatFile. const DexFile* FindDexFileInOatFileFromDexLocation(const std::string& location, uint32_t location_checksum) LOCKS_EXCLUDED(dex_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Returns true if oat file contains the dex file with the given location and checksum. static bool VerifyOatFileChecksums(const OatFile* oat_file, const std::string& dex_location, uint32_t dex_location_checksum) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // TODO: replace this with multiple methods that allocate the correct managed type. template <class T> mirror::ObjectArray<T>* AllocObjectArray(Thread* self, size_t length) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); mirror::ObjectArray<mirror::Class>* AllocClassArray(Thread* self, size_t length) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); mirror::ObjectArray<mirror::String>* AllocStringArray(Thread* self, size_t length) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); mirror::ObjectArray<mirror::ArtMethod>* AllocArtMethodArray(Thread* self, size_t length) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); mirror::IfTable* AllocIfTable(Thread* self, size_t ifcount) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); mirror::ObjectArray<mirror::ArtField>* AllocArtFieldArray(Thread* self, size_t length) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); mirror::ObjectArray<mirror::StackTraceElement>* AllocStackTraceElementArray(Thread* self, size_t length) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void VerifyClass(mirror::Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool VerifyClassUsingOatFile(const DexFile& dex_file, mirror::Class* klass, mirror::Class::Status& oat_file_class_status) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void ResolveClassExceptionHandlerTypes(const DexFile& dex_file, mirror::Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void ResolveMethodExceptionHandlerTypes(const DexFile& dex_file, mirror::ArtMethod* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); mirror::Class* CreateProxyClass(mirror::String* name, mirror::ObjectArray<mirror::Class>* interfaces, mirror::ClassLoader* loader, mirror::ObjectArray<mirror::ArtMethod>* methods, mirror::ObjectArray<mirror::ObjectArray<mirror::Class> >* throws) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); std::string GetDescriptorForProxy(const mirror::Class* proxy_class) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); mirror::ArtMethod* FindMethodForProxy(const mirror::Class* proxy_class, const mirror::ArtMethod* proxy_method) LOCKS_EXCLUDED(dex_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Get the oat code for a method when its class isn't yet initialized const void* GetOatCodeFor(const mirror::ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Get the oat code for a method from a method index. const void* GetOatCodeFor(const DexFile& dex_file, uint16_t class_def_idx, uint32_t method_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); pid_t GetClassesLockOwner(); // For SignalCatcher. pid_t GetDexLockOwner(); // For SignalCatcher. const void* GetPortableResolutionTrampoline() const { return portable_resolution_trampoline_; } const void* GetQuickResolutionTrampoline() const { return quick_resolution_trampoline_; } InternTable* GetInternTable() const { return intern_table_; } // Attempts to insert a class into a class table. Returns NULL if // the class was inserted, otherwise returns an existing class with // the same descriptor and ClassLoader. mirror::Class* InsertClass(const char* descriptor, mirror::Class* klass, size_t hash) LOCKS_EXCLUDED(Locks::classlinker_classes_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); private: explicit ClassLinker(InternTable*); const OatFile::OatMethod GetOatMethodFor(const mirror::ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Initialize class linker by bootstraping from dex files void InitFromCompiler(const std::vector<const DexFile*>& boot_class_path) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Initialize class linker from one or more images. void InitFromImage() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); OatFile& GetImageOatFile(gc::space::ImageSpace* space) LOCKS_EXCLUDED(dex_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void FinishInit() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // For early bootstrapping by Init mirror::Class* AllocClass(Thread* self, mirror::Class* java_lang_Class, size_t class_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Alloc* convenience functions to avoid needing to pass in mirror::Class* // values that are known to the ClassLinker such as // kObjectArrayClass and kJavaLangString etc. mirror::Class* AllocClass(Thread* self, size_t class_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); mirror::DexCache* AllocDexCache(Thread* self, const DexFile& dex_file) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); mirror::ArtField* AllocArtField(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); mirror::ArtMethod* AllocArtMethod(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); mirror::Class* CreatePrimitiveClass(Thread* self, Primitive::Type type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); mirror::Class* InitializePrimitiveClass(mirror::Class* primitive_class, Primitive::Type type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); mirror::Class* CreateArrayClass(const char* descriptor, mirror::ClassLoader* class_loader) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void AppendToBootClassPath(const DexFile& dex_file) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void AppendToBootClassPath(const DexFile& dex_file, SirtRef<mirror::DexCache>& dex_cache) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void ConstructFieldMap(const DexFile& dex_file, const DexFile::ClassDef& dex_class_def, mirror::Class* c, SafeMap<uint32_t, mirror::ArtField*>& field_map) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); size_t SizeOfClass(const DexFile& dex_file, const DexFile::ClassDef& dex_class_def); void LoadClass(const DexFile& dex_file, const DexFile::ClassDef& dex_class_def, SirtRef<mirror::Class>& klass, mirror::ClassLoader* class_loader) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void LoadField(const DexFile& dex_file, const ClassDataItemIterator& it, SirtRef<mirror::Class>& klass, SirtRef<mirror::ArtField>& dst) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); mirror::ArtMethod* LoadMethod(Thread* self, const DexFile& dex_file, const ClassDataItemIterator& dex_method, SirtRef<mirror::Class>& klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void FixupStaticTrampolines(mirror::Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Finds the associated oat class for a dex_file and descriptor const OatFile::OatClass* GetOatClass(const DexFile& dex_file, uint16_t class_def_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void RegisterDexFileLocked(const DexFile& dex_file, SirtRef<mirror::DexCache>& dex_cache) EXCLUSIVE_LOCKS_REQUIRED(dex_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool IsDexFileRegisteredLocked(const DexFile& dex_file) const SHARED_LOCKS_REQUIRED(dex_lock_); void RegisterOatFileLocked(const OatFile& oat_file) EXCLUSIVE_LOCKS_REQUIRED(dex_lock_) EXCLUSIVE_LOCKS_REQUIRED(dex_lock_); bool InitializeClass(mirror::Class* klass, bool can_run_clinit, bool can_init_parents) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool WaitForInitializeClass(mirror::Class* klass, Thread* self, ObjectLock& lock); bool ValidateSuperClassDescriptors(const mirror::Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool IsSameDescriptorInDifferentClassContexts(const char* descriptor, const mirror::Class* klass1, const mirror::Class* klass2) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool IsSameMethodSignatureInDifferentClassContexts(const mirror::ArtMethod* method, const mirror::Class* klass1, const mirror::Class* klass2) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool LinkClass(SirtRef<mirror::Class>& klass, mirror::ObjectArray<mirror::Class>* interfaces, Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool LinkSuperClass(SirtRef<mirror::Class>& klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool LoadSuperAndInterfaces(SirtRef<mirror::Class>& klass, const DexFile& dex_file) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool LinkMethods(SirtRef<mirror::Class>& klass, mirror::ObjectArray<mirror::Class>* interfaces) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool LinkVirtualMethods(SirtRef<mirror::Class>& klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool LinkInterfaceMethods(SirtRef<mirror::Class>& klass, mirror::ObjectArray<mirror::Class>* interfaces) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool LinkStaticFields(SirtRef<mirror::Class>& klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool LinkInstanceFields(SirtRef<mirror::Class>& klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool LinkFields(SirtRef<mirror::Class>& klass, bool is_static) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void CreateReferenceInstanceOffsets(SirtRef<mirror::Class>& klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void CreateReferenceStaticOffsets(SirtRef<mirror::Class>& klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void CreateReferenceOffsets(SirtRef<mirror::Class>& klass, bool is_static, uint32_t reference_offsets) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // For use by ImageWriter to find DexCaches for its roots const std::vector<mirror::DexCache*>& GetDexCaches() { return dex_caches_; } const OatFile* FindOpenedOatFileForDexFile(const DexFile& dex_file) LOCKS_EXCLUDED(dex_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); const OatFile* FindOpenedOatFileFromDexLocation(const std::string& dex_location, uint32_t dex_location_checksum) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, dex_lock_); const OatFile* FindOpenedOatFileFromOatLocation(const std::string& oat_location) SHARED_LOCKS_REQUIRED(dex_lock_); const DexFile* FindDexFileInOatLocation(const std::string& dex_location, uint32_t dex_location_checksum, const std::string& oat_location) EXCLUSIVE_LOCKS_REQUIRED(dex_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); const DexFile* VerifyAndOpenDexFileFromOatFile(const OatFile* oat_file, const std::string& dex_location, uint32_t dex_location_checksum) EXCLUSIVE_LOCKS_REQUIRED(dex_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); mirror::ArtMethod* CreateProxyConstructor(Thread* self, SirtRef<mirror::Class>& klass, mirror::Class* proxy_class) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); mirror::ArtMethod* CreateProxyMethod(Thread* self, SirtRef<mirror::Class>& klass, SirtRef<mirror::ArtMethod>& prototype) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); std::vector<const DexFile*> boot_class_path_; mutable ReaderWriterMutex dex_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER; std::vector<mirror::DexCache*> dex_caches_ GUARDED_BY(dex_lock_); std::vector<const OatFile*> oat_files_ GUARDED_BY(dex_lock_); // multimap from a string hash code of a class descriptor to // mirror::Class* instances. Results should be compared for a matching // Class::descriptor_ and Class::class_loader_. typedef std::multimap<size_t, mirror::Class*> Table; Table class_table_ GUARDED_BY(Locks::classlinker_classes_lock_); // Do we need to search dex caches to find image classes? bool dex_cache_image_class_lookup_required_; // Number of times we've searched dex caches for a class. After a certain number of misses we move // the classes into the class_table_ to avoid dex cache based searches. AtomicInteger failed_dex_cache_class_lookups_; mirror::Class* LookupClassFromTableLocked(const char* descriptor, const mirror::ClassLoader* class_loader, size_t hash) SHARED_LOCKS_REQUIRED(Locks::classlinker_classes_lock_, Locks::mutator_lock_); void MoveImageClassesToClassTable() LOCKS_EXCLUDED(Locks::classlinker_classes_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); mirror::Class* LookupClassFromImage(const char* descriptor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // indexes into class_roots_. // needs to be kept in sync with class_roots_descriptors_. enum ClassRoot { kJavaLangClass, kJavaLangObject, kClassArrayClass, kObjectArrayClass, kJavaLangString, kJavaLangDexCache, kJavaLangRefReference, kJavaLangReflectArtField, kJavaLangReflectArtMethod, kJavaLangReflectProxy, kJavaLangStringArrayClass, kJavaLangReflectArtFieldArrayClass, kJavaLangReflectArtMethodArrayClass, kJavaLangClassLoader, kJavaLangThrowable, kJavaLangClassNotFoundException, kJavaLangStackTraceElement, kPrimitiveBoolean, kPrimitiveByte, kPrimitiveChar, kPrimitiveDouble, kPrimitiveFloat, kPrimitiveInt, kPrimitiveLong, kPrimitiveShort, kPrimitiveVoid, kBooleanArrayClass, kByteArrayClass, kCharArrayClass, kDoubleArrayClass, kFloatArrayClass, kIntArrayClass, kLongArrayClass, kShortArrayClass, kJavaLangStackTraceElementArrayClass, kClassRootsMax, }; mirror::ObjectArray<mirror::Class>* class_roots_; mirror::Class* GetClassRoot(ClassRoot class_root) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void SetClassRoot(ClassRoot class_root, mirror::Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); mirror::ObjectArray<mirror::Class>* GetClassRoots() { DCHECK(class_roots_ != NULL); return class_roots_; } static const char* class_roots_descriptors_[]; const char* GetClassRootDescriptor(ClassRoot class_root) { const char* descriptor = class_roots_descriptors_[class_root]; CHECK(descriptor != NULL); return descriptor; } mirror::IfTable* array_iftable_; bool init_done_; bool dex_caches_dirty_ GUARDED_BY(dex_lock_); bool class_table_dirty_ GUARDED_BY(Locks::classlinker_classes_lock_); InternTable* intern_table_; const void* portable_resolution_trampoline_; const void* quick_resolution_trampoline_; friend class ImageWriter; // for GetClassRoots FRIEND_TEST(ClassLinkerTest, ClassRootDescriptors); FRIEND_TEST(mirror::DexCacheTest, Open); FRIEND_TEST(ExceptionTest, FindExceptionHandler); FRIEND_TEST(ObjectTest, AllocObjectArray); DISALLOW_COPY_AND_ASSIGN(ClassLinker); }; } // namespace art #endif // ART_RUNTIME_CLASS_LINKER_H_
11,538
594
<reponame>Polaris-Dust/cfg4j<gh_stars>100-1000 /* * Copyright 2015-2018 <NAME> (<EMAIL>) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.cfg4j.source.git; import org.cfg4j.source.context.environment.Environment; /** * Adapter for {@link Environment} to provide git branch resolution through {@link BranchResolver} interface. * The adaptation process works as follows: * <ul> * <li>the environment name is split into tokens divided by "/"</li> * <li>first token is treated as a branch name</li> * <li>if the branch name is empty ("", or contains only whitespaces) then the "master" branch is used</li> * </ul> */ public class FirstTokenBranchResolver implements BranchResolver { @Override public String getBranchNameFor(Environment environment) { String[] tokens = environment.getName().split("/"); String branchName = tokens[0].trim(); if (branchName.isEmpty()) { branchName = "master"; } return branchName; } }
428
1,305
<filename>WDL/win32_curses/test.cpp #include "curses.h" #ifdef _WIN32 win32CursesCtx g_curses_context; // we only need the one instance #endif #ifdef _WIN32 int WINAPI WinMain(HINSTANCE hInstance, HINSTANCE hPrevInstance, LPSTR lpCmdLine, int nCmdShow) { g_curses_context.want_getch_runmsgpump = 1; // non-block curses_registerChildClass(hInstance); curses_CreateWindow(hInstance,&g_curses_context,"Sample Test App"); #else int main() { #endif initscr(); cbreak(); noecho(); nonl(); intrflush(stdscr,FALSE); keypad(stdscr,TRUE); nodelay(stdscr,TRUE); raw(); #if !defined(_WIN32) && !defined(MAC_NATIVE) ESCDELAY=0; // dont wait--at least on the console this seems to work. #endif if (has_colors()) // we don't use color yet, but we could { start_color(); init_pair(1, COLOR_WHITE, COLOR_BLUE); // normal status lines init_pair(2, COLOR_BLACK, COLOR_CYAN); // value } erase(); refresh(); float xpos=0,ypos=0, xdir=0.7, ydir=1.5; for (;;) { int t=getch(); if (t==27) break; else if (t== KEY_LEFT) xdir *=0.9; else if (t== KEY_RIGHT) xdir *=1.1; else if (t== KEY_UP) ydir *=1.1; else if (t== KEY_DOWN) ydir *=0.9; xpos+=xdir; ypos+=ydir; if (xpos >= COLS-1||xpos<1) { if (xpos<1)xpos=1; else xpos=COLS-1; xdir=-xdir; } if (ypos >= LINES-1||ypos<1) { if (ypos<1)ypos=1; else ypos=LINES-1; ydir=-ydir; } erase(); mvaddstr(ypos,xpos,"X"); Sleep(10); #ifdef _WIN32 if (!g_curses_context.m_hwnd) break; #endif } erase(); refresh(); endwin(); #ifdef _WIN32 if (g_curses_context.m_hwnd) DestroyWindow(g_curses_context.m_hwnd); curses_unregisterChildClass(hInstance); #endif return 0; }
762
852
#include "JetMETCorrections/FFTJetObjects/interface/FFTJetCorrectorSequenceTypes.h" #include "FWCore/Utilities/interface/typelookup.h" TYPELOOKUP_DATA_REG(FFTBasicJetCorrectorSequence); TYPELOOKUP_DATA_REG(FFTCaloJetCorrectorSequence); TYPELOOKUP_DATA_REG(FFTGenJetCorrectorSequence); TYPELOOKUP_DATA_REG(FFTPFJetCorrectorSequence); TYPELOOKUP_DATA_REG(FFTTrackJetCorrectorSequence); TYPELOOKUP_DATA_REG(FFTJPTJetCorrectorSequence);
169
3,765
/** * BSD-style license; for more info see http://pmd.sourceforge.net/license.html */ package net.sourceforge.pmd; import static org.junit.Assert.assertEquals; import java.io.File; import org.junit.Test; import net.sourceforge.pmd.lang.LanguageRegistry; import net.sourceforge.pmd.lang.LanguageVersion; import net.sourceforge.pmd.lang.LanguageVersionDiscoverer; import net.sourceforge.pmd.lang.vf.VfLanguageModule; /** * @author sergey.gorbaty * */ public class LanguageVersionDiscovererTest { /** * Test on VF file. */ @Test public void testVFFile() { LanguageVersionDiscoverer discoverer = new LanguageVersionDiscoverer(); File vfFile = new File("/path/to/MyPage.page"); LanguageVersion languageVersion = discoverer.getDefaultLanguageVersionForFile(vfFile); assertEquals("LanguageVersion must be VF!", LanguageRegistry.getLanguage(VfLanguageModule.NAME).getDefaultVersion(), languageVersion); } @Test public void testComponentFile() { LanguageVersionDiscoverer discoverer = new LanguageVersionDiscoverer(); File vfFile = new File("/path/to/MyPage.component"); LanguageVersion languageVersion = discoverer.getDefaultLanguageVersionForFile(vfFile); assertEquals("LanguageVersion must be VF!", LanguageRegistry.getLanguage(VfLanguageModule.NAME).getDefaultVersion(), languageVersion); } }
510
446
<reponame>prakharShuklaOfficial/Mastering-Python-for-Finance-source-codes """ Trinomial Lattice COM server """ from TrinomialLattice import TrinomialLattice import pythoncom class TrinomialLatticeCOMServer: _public_methods_ = ['pricer'] _reg_progid_ = "TrinomialLatticeCOMServer.Pricer" _reg_clsid_ = pythoncom.CreateGuid() def pricer(self, S0, K, r, T, N, sigma, is_call=True, div=0., is_eu=False): model = TrinomialLattice(S0, K, r, T, N, {"sigma": sigma, "div": div, "is_call": is_call, "is_eu": is_eu}) return model.price() if __name__ == "__main__": print "Registering COM server..." import win32com.server.register win32com.server.register.UseCommandLine(TrinomialLatticeCOMServer)
443
920
// // StackViewController.h // StackViewController // // Created by <NAME> on 2016-04-16. // Copyright © 2016 Seed Platform, Inc. All rights reserved. // #import <UIKit/UIKit.h> //! Project version number for StackViewController. FOUNDATION_EXPORT double StackViewControllerVersionNumber; //! Project version string for StackViewController. FOUNDATION_EXPORT const unsigned char StackViewControllerVersionString[]; // In this header, you should import all the public headers of your framework using statements like #import <StackViewController/PublicHeader.h>
151
325
{ "issue": { "id": "53650436-8e35-49a3-a610-56b442ae7620", "type": "issue", "state": "OPEN", "start": 1460537793322, "severity": 5, "text": "Garbage Collection Activity High (11%)", "suggestion": "Tune your Garbage Collector, reduce allocation rate through code changes", "link": "https://xyz.com/#/?snapshotId=rjhkZXdNzegliVVEswMScGNn0YY", "zone": "prod", "fqdn": "host1.demo.com", "entity": "jvm", "entityLabel": "Test jvm", "tags": "production, documents, elasticsearch", "container": "test-container" } }
247
28,056
<gh_stars>1000+ package com.alibaba.json.bvt.mixins; import com.alibaba.fastjson.JSON; import com.alibaba.fastjson.annotation.JSONField; import junit.framework.TestCase; import org.junit.Assert; public class MixinAPITest extends TestCase { static class BaseClass { public int a; public int b; public BaseClass() { } public BaseClass(int a, int b) { this.a = a; this.b = b; } } class MixIn1 { @JSONField(name = "apple") public int a; @JSONField(name = "banana") public int b; } public void test_mixIn_get_methods() throws Exception { BaseClass base = new BaseClass(1, 2); JSON.addMixInAnnotations(BaseClass.class, MixIn1.class); Assert.assertEquals("{\"apple\":1,\"banana\":2}", JSON.toJSONString(base)); Assert.assertTrue(MixIn1.class == JSON.getMixInAnnotations(BaseClass.class)); JSON.clearMixInAnnotations(); Assert.assertTrue(null == JSON.getMixInAnnotations(BaseClass.class)); JSON.removeMixInAnnotations(BaseClass.class); } }
492
373
<filename>src/oic/utils/session_backend.py import json import time from abc import ABCMeta from abc import abstractmethod from typing import Any from typing import Dict from typing import List from typing import Optional from typing import Union from typing import cast from oic.utils.time_util import time_sans_frac class AuthnEvent(object): def __init__( self, uid, salt, valid=3600, authn_info=None, time_stamp=0, authn_time=None, valid_until=None, ): """ Create a representation of an authentication event. :param uid: The local user identifier :param salt: Salt to be used in creating a sub :param valid: How long the authentication is expected to be valid :param authn_info: Info about the authentication event :return: """ self.uid = uid self.salt = <PASSWORD> self.authn_time = authn_time or (int(time_stamp) or time_sans_frac()) self.valid_until = valid_until or (self.authn_time + int(valid)) self.authn_info = authn_info def valid(self): return self.valid_until > time.time() def valid_for(self): return self.valid_until - time.time() def to_json(self): """Serialize AuthnEvent to JSON.""" return json.dumps(self.__dict__) @classmethod def from_json(cls, json_struct): """Create AuthnEvent from JSON.""" dic = json.loads(json_struct) return cls(**dic) class SessionBackend(metaclass=ABCMeta): """Backend for storing sessionDB data.""" @abstractmethod def __setitem__(self, key: str, value: Dict[str, Union[str, bool]]) -> None: """Store the session information under the session_id.""" @abstractmethod def __getitem__(self, key: str) -> Dict[str, Union[str, bool]]: """ Retrieve the session information based os session_id. @raises KeyError when no key is found. """ @abstractmethod def __delitem__(self, key: str) -> None: """Remove the stored session from storage.""" @abstractmethod def __contains__(self, key: str) -> bool: """Test presence of key in storage.""" @abstractmethod def get_by_uid(self, uid: str) -> List[str]: """Return session ids (keys) based on `uid` (internal user identifier).""" @abstractmethod def get_by_sub(self, sub: str) -> List[str]: """Return session ids based on `sub` (external user identifier).""" @abstractmethod def get(self, attr: str, val: str) -> List[str]: """Return session ids based on attribute name and value.""" def get_client_ids_for_uid(self, uid: str) -> List[str]: """Return client ids that have a session for given uid.""" return [cast(str, self[sid]["client_id"]) for sid in self.get_by_uid(uid)] def get_verified_logout(self, uid: str) -> Optional[str]: """Return logout verification key for given uid.""" # Since all the sessions should be the same, we return the first one sids = self.get_by_uid(uid) if len(sids) == 0: return None _dict = self[sids[0]] if "verified_logout" not in _dict: return None return cast(str, _dict["verified_logout"]) def get_token_ids(self, uid: str) -> List[str]: """Return id_tokens for the given uid.""" return [cast(str, self[sid]["id_token"]) for sid in self.get_by_uid(uid)] def is_revoke_uid(self, uid: str) -> bool: """Return if the session is revoked.""" # We do not care which session it is - once revoked, al are revoked return any([self[sid]["revoked"] for sid in self.get_by_uid(uid)]) def update(self, key: str, attribute: str, value: Any): """ Update information stored. If the key is not know a new entry will be constructed. :param key: Key to the database :param attribute: Attribute name :param value: Attribute value """ if key not in self: self[key] = {attribute: value} else: item = self[key] item[attribute] = value self[key] = item def get_uid_by_sub(self, sub: str) -> Optional[str]: """Return User id based on sub.""" for sid in self.get_by_sub(sub): return AuthnEvent.from_json(self[sid]["authn_event"]).uid return None def get_uid_by_sid(self, sid: str) -> str: """Return User id based on session ID.""" return AuthnEvent.from_json(self[sid]["authn_event"]).uid class DictSessionBackend(SessionBackend): """ Simple implementation of `SessionBackend` based on dictionary. This should really not be used in production. """ def __init__(self): """Create the storage.""" self.storage: Dict[str, Dict[str, Union[str, bool]]] = {} def __setitem__(self, key: str, value: Dict[str, Union[str, bool]]) -> None: """Store the session info in the storage.""" self.storage[key] = value def __getitem__(self, key: str) -> Dict[str, Union[str, bool]]: """Retrieve session information based on session id.""" return self.storage[key] def __delitem__(self, key: str) -> None: """Delete the session info.""" del self.storage[key] def __contains__(self, key: str) -> bool: return key in self.storage def get_by_sub(self, sub: str) -> List[str]: """Return session ids based on sub.""" return [ sid for sid, session in self.storage.items() if session.get("sub") == sub ] def get_by_uid(self, uid: str) -> List[str]: """Return session ids based on uid.""" return [ sid for sid, session in self.storage.items() if AuthnEvent.from_json(session["authn_event"]).uid == uid ] def get(self, attr: str, val: str) -> List[str]: """Return session ids based on attribute name and value.""" return [ sid for sid, session in self.storage.items() if session.get(attr) == val ]
2,548
5,169
{ "name": "GCanvas", "version": "0.1.0", "summary": "GCanvas Source.", "description": "A cross-platform fast Canvas render engine.", "homepage": "https://github.com/alibaba/GPlatform", "license": { "type": "Copyright", "text": " Alibaba-INC copyright\n" }, "authors": { "jwxbond": "<EMAIL>" }, "platforms": { "ios": "7.0" }, "source": { "git": "https://github.com/alibaba/GPlatform.git", "tag": "0.1.0" }, "source_files": [ "GCanvas/ios/Classes/**/*.{h,m,mm}", "GCanvas/core/src/GCanvas.{hpp,cpp}", "GCanvas/core/src/GCanvasManager.{h,cpp}", "GCanvas/core/src/gcanvas/shaders/*.glsl", "GCanvas/core/src/gcanvas/GCanvas2dContext.{h,cpp}", "GCanvas/core/src/gcanvas/GConvert.{h,cpp}", "GCanvas/core/src/gcanvas/GFillStyle.h", "GCanvas/core/src/gcanvas/GPath.{h,cpp}", "GCanvas/core/src/gcanvas/GPoint.{h,cpp}", "GCanvas/core/src/gcanvas/GShader.{h,cpp}", "GCanvas/core/src/gcanvas/GShaderManager.{h,cpp}", "GCanvas/core/src/gcanvas/GTextDefine.h", "GCanvas/core/src/gcanvas/GTexture.{h,cpp}", "GCanvas/core/src/gcanvas/GTransform.h", "GCanvas/core/src/gcanvas/GTriangulate.{h,cpp}", "GCanvas/core/src/gcanvas/GWebglContext.{h,cpp}", "GCanvas/core/src/memory/*.h", "GCanvas/core/src/png/**/*.*", "GCanvas/core/src/support/DynArray.h", "GCanvas/core/src/support/Encode.{h,cpp}", "GCanvas/core/src/support/Lesser.h", "GCanvas/core/src/support/Log.{h,cpp}", "GCanvas/core/src/support/Util.{h,cpp}" ], "public_header_files": [ "GCanvas/ios/Classes/**/*.h", "GCanvas/core/src/gcanvas/GTextDefine.h" ], "user_target_xcconfig": { "FRAMEWORK_SEARCH_PATHS": "'$(PODS_ROOT)/GCanvas'" }, "requires_arc": true, "frameworks": [ "Foundation", "UIKit", "GLKit" ], "libraries": "stdc++", "pod_target_xcconfig": { "OTHER_CFLAGS": "-DIOS" } }
962
1,020
<gh_stars>1000+ /* * Copyright 2017 Amazon.com, Inc. or its affiliates. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"). You may * not use this file except in compliance with the License. A copy of the * License is located at * * http://aws.amazon.com/apache2.0/ * * or in the "LICENSE" file accompanying this file. This file is distributed * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. See the License for the specific language governing * permissions and limitations under the License. */ package com.amazonaws.blox.dataservice.integration; import static org.assertj.core.api.Assertions.assertThat; import static org.assertj.core.api.Assertions.assertThatThrownBy; import com.amazonaws.blox.dataservicemodel.v1.exception.ResourceExistsException; import com.amazonaws.blox.dataservicemodel.v1.model.EnvironmentId; import com.amazonaws.blox.dataservicemodel.v1.model.wrappers.CreateEnvironmentResponse; import org.junit.Test; public class CreateEnvironmentIntegrationTest extends DataServiceIntegrationTestBase { private static final String CLUSTER_ONE = "cluster1"; private static final String CLUSTER_TWO = "cluster2"; private static final DataServiceModelBuilder models = DataServiceModelBuilder.builder().build(); private static final EnvironmentId createdEnvironmentId1 = models.environmentId().cluster(CLUSTER_ONE).build(); private static final EnvironmentId createdEnvironmentId2 = models.environmentId().cluster(CLUSTER_TWO).build(); @Test public void testCreateEnvironmentSuccessful() throws Exception { final CreateEnvironmentResponse createEnvironmentResponse = dataService.createEnvironment( models.createEnvironmentRequest().environmentId(createdEnvironmentId1).build()); assertThat(createEnvironmentResponse.getEnvironment().getEnvironmentId()) .isEqualTo(createdEnvironmentId1); } @Test public void testCreateAnEnvironmentAlreadyExist() throws Exception { dataService.createEnvironment( models.createEnvironmentRequest().environmentId(createdEnvironmentId1).build()); assertThatThrownBy( () -> dataService.createEnvironment( models.createEnvironmentRequest().environmentId(createdEnvironmentId1).build())) .isInstanceOf(ResourceExistsException.class) .hasMessageContaining( String.format("environment with id %s already exists", createdEnvironmentId1)); } @Test public void testCreateTwoEnvironmentsWithTheSameNameButDifferentClusters() throws Exception { final CreateEnvironmentResponse createEnvironmentResponse1 = dataService.createEnvironment( models.createEnvironmentRequest().environmentId(createdEnvironmentId1).build()); assertThat(createEnvironmentResponse1.getEnvironment().getEnvironmentId()) .isEqualTo(createdEnvironmentId1); final CreateEnvironmentResponse createEnvironmentResponse2 = dataService.createEnvironment( models.createEnvironmentRequest().environmentId(createdEnvironmentId2).build()); assertThat(createEnvironmentResponse2.getEnvironment().getEnvironmentId()) .isEqualTo(createdEnvironmentId2); } }
988
1,705
<reponame>tomhaigh/aws-sdk-net<gh_stars>1000+ { "noArgOverloads": [ "DescribeLogGroups" ], "useNullableType" : { "LogGroup" : [ "retentionInDays" ] }, "dataTypeSwap" : { "InputLogEvent" : { "timestamp" : { "Type" : "DateTime", "Marshaller" : "Amazon.Runtime.Internal.Transform.CustomMarshallTransformations.ConvertDateTimeToEpochMilliseconds", "Unmarshaller" : "DateTimeEpochLongMillisecondsUnmarshaller" } }, "OutputLogEvent" : { "timestamp" : { "Type" : "DateTime", "Marshaller" : "Amazon.Runtime.Internal.Transform.CustomMarshallTransformations.ConvertDateTimeToEpochMilliseconds", "Unmarshaller" : "DateTimeEpochLongMillisecondsUnmarshaller" }, "ingestionTime" : { "Type" : "DateTime", "Marshaller" : "Amazon.Runtime.Internal.Transform.CustomMarshallTransformations.ConvertDateTimeToEpochMilliseconds", "Unmarshaller" : "DateTimeEpochLongMillisecondsUnmarshaller" } }, "GetLogEventsRequest" : { "startTime" : { "Type" : "DateTime", "Marshaller" : "Amazon.Runtime.Internal.Transform.CustomMarshallTransformations.ConvertDateTimeToEpochMilliseconds", "Unmarshaller" : "DateTimeEpochLongMillisecondsUnmarshaller" }, "endTime" : { "Type" : "DateTime", "Marshaller" : "Amazon.Runtime.Internal.Transform.CustomMarshallTransformations.ConvertDateTimeToEpochMilliseconds", "Unmarshaller" : "DateTimeEpochLongMillisecondsUnmarshaller" } }, "LogGroup" : { "creationTime" : { "Type" : "DateTime", "Marshaller" : "Amazon.Runtime.Internal.Transform.CustomMarshallTransformations.ConvertDateTimeToEpochMilliseconds", "Unmarshaller" : "DateTimeEpochLongMillisecondsUnmarshaller" } }, "LogStream" : { "creationTime" : { "Type" : "DateTime", "Marshaller" : "Amazon.Runtime.Internal.Transform.CustomMarshallTransformations.ConvertDateTimeToEpochMilliseconds", "Unmarshaller" : "DateTimeEpochLongMillisecondsUnmarshaller" }, "firstEventTimestamp" : { "Type" : "DateTime", "Marshaller" : "Amazon.Runtime.Internal.Transform.CustomMarshallTransformations.ConvertDateTimeToEpochMilliseconds", "Unmarshaller" : "DateTimeEpochLongMillisecondsUnmarshaller" }, "lastEventTimestamp" : { "Type" : "DateTime", "Marshaller" : "Amazon.Runtime.Internal.Transform.CustomMarshallTransformations.ConvertDateTimeToEpochMilliseconds", "Unmarshaller" : "DateTimeEpochLongMillisecondsUnmarshaller" }, "lastIngestionTime" : { "Type" : "DateTime", "Marshaller" : "Amazon.Runtime.Internal.Transform.CustomMarshallTransformations.ConvertDateTimeToEpochMilliseconds", "Unmarshaller" : "DateTimeEpochLongMillisecondsUnmarshaller" } }, "MetricFilter" : { "creationTime" : { "Type" : "DateTime", "Marshaller" : "Amazon.Runtime.Internal.Transform.CustomMarshallTransformations.ConvertDateTimeToEpochMilliseconds", "Unmarshaller" : "DateTimeEpochLongMillisecondsUnmarshaller" }, } } }
1,150
360
<reponame>wotchin/openGauss-server /*------------------------------------------------------------------------- * pg_regress.h --- regression test driver * * Portions Copyright (c) 1996-2012, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/test/regress/pg_regress.h *------------------------------------------------------------------------- */ #include "postgres_fe.h" #include <unistd.h> #ifndef WIN32 #define PID_TYPE pid_t #define INVALID_PID (-1) #else #define PID_TYPE HANDLE #define INVALID_PID INVALID_HANDLE_VALUE #endif #define MAX_TUPLE_ONLY_STRLEN 5 #define MAX_COLUMN_SEP_STRLEN 5 typedef struct pgxc_node_info { int co_num; int* co_port; int* co_ctl_port; int* co_sctp_port; int* co_pool_port; int co_pid[10]; int dn_num; int* dn_port; int* dns_port; int dn_pid[20]; int* dn_pool_port; int* dn_ctl_port; int* dn_sctp_port; int* dns_ctl_port; int* dns_sctp_port; int* dn_primary_port; int* dn_standby_port; int* dn_secondary_port; int gtm_port; int gtm_pid; bool keep_data; bool run_check; int shell_pid[50]; int shell_count; } pgxc_node_info; extern pgxc_node_info myinfo; /* simple list of strings */ typedef struct _stringlist { char* str; struct _stringlist* next; } _stringlist; /* Structure corresponding to the regression * configuration file: regress.conf */ typedef struct tagREGR_CONF_ITEMS_STRU { char acFieldSepForAllText[MAX_COLUMN_SEP_STRLEN + 5]; /* Column seperator for * table query result * for aligned and * unaligned text * +2 for -C, +2 for * two " character and * +1 for \0 */ char acTuplesOnly[MAX_TUPLE_ONLY_STRLEN]; /* String indicating the value * of "column_name_present" * configuration item. -t * indicates to print tuples * only*/ } REGR_CONF_ITEMS_STRU; /* Structure for the storage of the details of the replacement pattern strings * that may be provided in the regress.conf */ typedef struct tagREGR_REPLACE_PATTERNS_STRU { int iNumOfPatterns; /* Total number of patterns already * loaded into Memry */ int iMaxNumOfPattern; /* Max num of pattern that can be loaded * as of now */ int iRemainingPattBuffSize; /* Remaining space in the storage * buffer */ unsigned int* puiPatternOffset; /* Points to the start of the memory * block allocated for the storage of * the replacement pattern strings and * their values including the index for * them*/ unsigned int* puiPattReplValOffset; /* Points to the index for the strings * that are to be replaced for the * replacement pattern strings */ char* pcBuf; /* Points to the memory where actual * replacement pattern strings and their * corresponding values are stored */ } REGR_REPLACE_PATTERNS_STRU; /* To store the values of the regress.conf values */ extern REGR_CONF_ITEMS_STRU g_stRegrConfItems; typedef PID_TYPE (*test_function)(const char*, _stringlist**, _stringlist**, _stringlist**); typedef void (*init_function)(void); typedef PID_TYPE (*diag_function)(char*); extern char* bindir; extern char* libdir; extern char* datadir; extern char* host_platform; extern _stringlist* dblist; extern bool debug; extern char* inputdir; extern char* outputdir; extern char* launcher; /* * This should not be global but every module should be able to read command * line parameters. */ extern char* psqldir; extern const char* basic_diff_opts; extern const char* pretty_diff_opts; int regression_main(int argc, char* argv[], init_function ifunc, test_function tfunc, diag_function dfunc); void add_stringlist_item(_stringlist** listhead, const char* str); PID_TYPE spawn_process(const char* cmdline); void exit_nicely(int code); void replace_string(char* string, char* replace, char* replacement); bool file_exists(const char* file);
2,439
435
package datawave.query.index.lookup; import com.google.common.base.Preconditions; import com.google.common.collect.Iterators; import datawave.query.jexl.visitors.JexlStringBuildingVisitor; import datawave.query.tables.RangeStreamScanner; import datawave.query.util.Tuple2; import org.apache.commons.jexl2.parser.JexlNode; import java.util.Iterator; /** * Provides a core set of variables for the ScannerStream, Union, and Intersection. * * A reference to the underlying {@link datawave.query.tables.RangeStreamScanner} is required for seeking. * * Note that the BaseIndexStream does not implement the {@link IndexStream#seek(String)} method. Inheriting classes are responsible for determining the correct * implementation. */ public abstract class BaseIndexStream implements IndexStream { protected RangeStreamScanner rangeStreamScanner; protected EntryParser entryParser; protected JexlNode node; protected StreamContext context; protected IndexStream debugDelegate; // variables to support the PeekingIterator interface protected Iterator<Tuple2<String,IndexInfo>> backingIter; protected Tuple2<String,IndexInfo> peekedElement; protected boolean hasPeeked = false; public BaseIndexStream(RangeStreamScanner rangeStreamScanner, EntryParser entryParser, JexlNode node, StreamContext context, IndexStream debugDelegate) { this.rangeStreamScanner = Preconditions.checkNotNull(rangeStreamScanner); this.entryParser = Preconditions.checkNotNull(entryParser); this.node = node; this.backingIter = Iterators.transform(this.rangeStreamScanner, this.entryParser); this.context = context; this.debugDelegate = debugDelegate; } public BaseIndexStream(Iterator<Tuple2<String,IndexInfo>> iterator, JexlNode node, StreamContext context, IndexStream debugDelegate) { this.rangeStreamScanner = null; this.entryParser = null; this.node = node; this.backingIter = Preconditions.checkNotNull(iterator); this.context = context; this.debugDelegate = debugDelegate; } // Empty constructor used by the Union and Intersection classes. public BaseIndexStream() { } /** * Reset the backing iterator after a seek. State must stay in sync with changes to the RangeStreamScanner. */ public void resetBackingIterator() { if (rangeStreamScanner != null && entryParser != null) { this.peekedElement = null; this.hasPeeked = false; this.backingIter = Iterators.transform(this.rangeStreamScanner, this.entryParser); } } @Override public boolean hasNext() { return (hasPeeked && peekedElement != null) || backingIter.hasNext(); } @Override public Tuple2<String,IndexInfo> peek() { if (!hasPeeked) { if (backingIter.hasNext()) { peekedElement = backingIter.next(); } else { peekedElement = null; } hasPeeked = true; } return peekedElement; } @Override public Tuple2<String,IndexInfo> next() { if (!hasPeeked) { return backingIter.next(); } Tuple2<String,IndexInfo> result = peekedElement; hasPeeked = false; peekedElement = null; return result; } @Override public void remove() {} @Override public StreamContext context() { return context; } @Override public String getContextDebug() { if (debugDelegate == null) { return context + ": ScannerStream for " + JexlStringBuildingVisitor.buildQuery(node) + " (next = " + (hasNext() ? peek() : null) + ")"; } else { return debugDelegate.getContextDebug(); } } @Override public JexlNode currentNode() { return node; } }
1,550
651
<gh_stars>100-1000 package Library; import java.math.BigInteger; public class Class extends _ExternBase_Class { private final BigInteger n; public Class(BigInteger n) { this.n = n; } public static void SayHi() { System.out.println("Hello!"); } public BigInteger Get() { return n; } }
137
977
<filename>src/external/IntelRDFPMathLib20U2/LIBRARY/float128/dpml_acosh_t.h /****************************************************************************** Copyright (c) 2007-2018, Intel Corp. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ #include "endian.h" static const TABLE_UNION max_direct_x[] = { DATA_1x2( 0x02ccc470, 0x400fe8cf ) }; static const TABLE_UNION max_asym_x[] = { DATA_1x2( 0xd1a81cd8, 0x41a46ac2 ) }; #define EVALUATE_ASYM_RANGE_POLYNOMIAL(x,c,y) \ POLY_9(x,c,y) static const TABLE_UNION asym_range_coef[] = { DATA_1x2( 0x00000000, 0x3fd00000 ), DATA_1x2( 0xffffff07, 0x3fb7ffff ), DATA_1x2( 0xaaaddfbf, 0x3faaaaaa ), DATA_1x2( 0xfdf6faba, 0x3fa17fff ), DATA_1x2( 0x7de0c4ff, 0x3f993334 ), DATA_1x2( 0x3680db02, 0x3f933fc5 ), DATA_1x2( 0xa311158b, 0x3f8eb0ca ), DATA_1x2( 0x0a8b1f0a, 0x3f88674b ), DATA_1x2( 0x0da7b094, 0x3f8b082b ), }; static const TABLE_UNION half_huge_x[] = { DATA_1x2( 0xffffffff, 0x7fdfffff ) }; static const TABLE_UNION log_2[] = { DATA_1x2( 0xfefa39ef, 0x3fe62e42 ) };
1,000
522
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #ifndef TENSORFLOW_COMPILER_XLA_SERVICE_COMPILATION_CACHE_H_ #define TENSORFLOW_COMPILER_XLA_SERVICE_COMPILATION_CACHE_H_ #include <map> #include <memory> #include <string> #include "tensorflow/compiler/xla/service/executable.h" #include "tensorflow/compiler/xla/service/hlo_module_config.h" #include "tensorflow/compiler/xla/service/versioned_computation_handle.h" #include "tensorflow/compiler/xla/types.h" #include "tensorflow/core/platform/macros.h" #include "tensorflow/core/platform/mutex.h" #include "tensorflow/core/platform/thread_annotations.h" namespace xla { // A cache which stores Executables indexed by computation handle and version. class CompilationCache { public: CompilationCache() {} // Insert the given Executable into the cache. Return a bare Executable // pointer for the caller to use. Note: the returned pointer will *not* be the // same as the given unique pointer if the computation already exists in the // cache. See comments in the .cc implementation for details of this case. // // module_config is provided by the caller, instead of being taken from the // executable, so that we can insert keys into the compilation cache that are // devoid of layout (where XLA gets to choose what layout to compile). // // A shared_ptr is returned so the caller can keep the Executable from being // destructed in the event that the Executable is evicted from the // computation cache (and the cache's shared_ptr to the Executable is // destructed). std::shared_ptr<Executable> Insert(std::unique_ptr<Executable> executable, const HloModuleConfig& module_config); // Lookup the Executable for the specified versioned computation in the cache. // Return a shared_ptr to the Executable if it exists in the cache. Return // nullptr otherwise. std::shared_ptr<Executable> LookUp( const VersionedComputationHandle& versioned_handle, const HloModuleConfig& module_config) const; protected: mutable tensorflow::mutex mutex_; // Map from versioned handle with program layout to Executable built // for that computation version and program layout. using CacheKey = string; CacheKey BuildKey(const VersionedComputationHandle& versioned_handle, const HloModuleConfig& module_config) const; std::map<CacheKey, std::shared_ptr<Executable>> cache_ GUARDED_BY(mutex_); private: TF_DISALLOW_COPY_AND_ASSIGN(CompilationCache); }; } // namespace xla #endif // TENSORFLOW_COMPILER_XLA_SERVICE_COMPILATION_CACHE_H_
980
1,825
<filename>unidbg-ios/src/main/java/com/github/unidbg/ios/struct/kernel/NotifyServerRegisterMachPortRequest.java package com.github.unidbg.ios.struct.kernel; import com.github.unidbg.pointer.UnidbgStructure; import com.sun.jna.Pointer; import java.util.Arrays; import java.util.List; public class NotifyServerRegisterMachPortRequest extends UnidbgStructure { public NotifyServerRegisterMachPortRequest(Pointer p) { super(p); } public int pad; public int name; public int namelen; public int flags; public int port; public int[] values = new int[4]; public int token; @Override protected List<String> getFieldOrder() { return Arrays.asList("pad", "name", "namelen", "flags", "port", "values", "token"); } }
286
1,666
package org.grobid.core.engines.tagging; import org.junit.Test; import static org.hamcrest.CoreMatchers.is; import static org.junit.Assert.*; public class GenericTaggerUtilsTest { @Test public void testGetPlainLabel_normalValue() throws Exception { assertThat(GenericTaggerUtils.getPlainLabel("<status>"), is("<status>")); } @Test public void testGetPlainLabel_startingValue() throws Exception { assertThat(GenericTaggerUtils.getPlainLabel("I-<status>"), is("<status>")); } @Test public void testGetPlainLabel_I_startingValue() throws Exception { assertThat(GenericTaggerUtils.getPlainLabel("I-<status>"), is("<status>")); } @Test public void testGetPlainLabel_nullValue() throws Exception { assertNull(GenericTaggerUtils.getPlainLabel(null)); } @Test public void testIsBeginningOfEntity_true() throws Exception { assertTrue(GenericTaggerUtils.isBeginningOfEntity("I-<status>")); } @Test public void testIsBeginningOfEntity_false() throws Exception { assertFalse(GenericTaggerUtils.isBeginningOfEntity("<status>")); } @Test public void testIsBeginningOfEntity_false2() throws Exception { assertFalse(GenericTaggerUtils.isBeginningOfEntity("<I-status>")); } @Test public void testIsBeginningOfIOBEntity_B_true() throws Exception { assertTrue(GenericTaggerUtils.isBeginningOfIOBEntity("B-<status>")); } @Test public void testIsBeginningOfEntity_B_false2() throws Exception { assertFalse(GenericTaggerUtils.isBeginningOfEntity("<B-status>")); } }
605
384
/* * This file is part of the GROMACS molecular simulation package. * * Copyright (c) 2009-2018, The GROMACS development team. * Copyright (c) 2019, by the GROMACS development team, led by * <NAME>, <NAME>, <NAME>, and <NAME>, * and including many others, as listed in the AUTHORS file in the * top-level source directory and at http://www.gromacs.org. * * GROMACS is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public License * as published by the Free Software Foundation; either version 2.1 * of the License, or (at your option) any later version. * * GROMACS is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with GROMACS; if not, see * http://www.gnu.org/licenses, or write to the Free Software Foundation, * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * If you want to redistribute modifications to GROMACS, please * consider that scientific software is very special. Version * control is crucial - bugs must be traceable. We will be happy to * consider code for inclusion in the official distribution, but * derived work must not be called official GROMACS. Details are found * in the README & COPYING files - if they are missing, get the * official version at http://www.gromacs.org. * * To help us fund GROMACS development, we humbly ask that you cite * the research papers on the package. Check out http://www.gromacs.org. */ /*! \internal \file * \brief * Implements functions in position.h. * * \author <NAME> <<EMAIL>> * \ingroup module_selection */ #include "gmxpre.h" #include "position.h" #include <cstring> #include "gromacs/math/vec.h" #include "gromacs/selection/indexutil.h" #include "gromacs/utility/gmxassert.h" #include "gromacs/utility/smalloc.h" gmx_ana_pos_t::gmx_ana_pos_t() { x = nullptr; v = nullptr; f = nullptr; gmx_ana_indexmap_clear(&m); nalloc_x = 0; } gmx_ana_pos_t::~gmx_ana_pos_t() { sfree(x); sfree(v); sfree(f); gmx_ana_indexmap_deinit(&m); } /*! * \param[in,out] pos Position data structure. * \param[in] n Maximum number of positions. * \param[in] isize Maximum number of atoms. * * Ensures that enough memory is allocated in \p pos to calculate \p n * positions from \p isize atoms. */ void gmx_ana_pos_reserve(gmx_ana_pos_t* pos, int n, int isize) { GMX_RELEASE_ASSERT(n >= 0, "Invalid position allocation count"); // Always reserve at least one entry to make NULL checks against pos->x // and gmx_ana_pos_reserve_velocities/forces() work as expected in the case // that there are actually no positions. if (n == 0) { n = 1; } if (pos->nalloc_x < n) { pos->nalloc_x = n; srenew(pos->x, n); if (pos->v) { srenew(pos->v, n); } if (pos->f) { srenew(pos->f, n); } } if (isize >= 0) { gmx_ana_indexmap_reserve(&pos->m, n, isize); } } /*! * \param[in,out] pos Position data structure. * * Currently, this function can only be called after gmx_ana_pos_reserve() * has been called at least once with a \p n >= 0. */ void gmx_ana_pos_reserve_velocities(gmx_ana_pos_t* pos) { GMX_RELEASE_ASSERT(pos->nalloc_x > 0, "No memory reserved yet for positions"); if (!pos->v) { snew(pos->v, pos->nalloc_x); } } /*! * \param[in,out] pos Position data structure. * * Currently, this function can only be called after gmx_ana_pos_reserve() * has been called at least once with a \p n >= 0. */ void gmx_ana_pos_reserve_forces(gmx_ana_pos_t* pos) { GMX_RELEASE_ASSERT(pos->nalloc_x > 0, "No memory reserved yet for positions"); if (!pos->f) { snew(pos->f, pos->nalloc_x); } } /*! * \param[in,out] pos Position data structure. * \param[in] n Maximum number of positions. * \param[in] isize Maximum number of atoms. * \param[in] bVelocities Whether to reserve space for velocities. * \param[in] bForces Whether to reserve space for forces. * * Ensures that enough memory is allocated in \p pos to calculate \p n * positions from \p isize atoms. * * This method needs to be called instead of gmx_ana_pos_reserve() if the * intent is to use gmx_ana_pos_append_init()/gmx_ana_pos_append(). */ void gmx_ana_pos_reserve_for_append(gmx_ana_pos_t* pos, int n, int isize, bool bVelocities, bool bForces) { gmx_ana_pos_reserve(pos, n, isize); snew(pos->m.mapb.a, isize); pos->m.mapb.nalloc_a = isize; if (bVelocities) { gmx_ana_pos_reserve_velocities(pos); } if (bForces) { gmx_ana_pos_reserve_forces(pos); } } /*! * \param[out] pos Position data structure to initialize. * \param[in] x Position vector to use. */ void gmx_ana_pos_init_const(gmx_ana_pos_t* pos, const rvec x) { snew(pos->x, 1); snew(pos->v, 1); snew(pos->f, 1); pos->nalloc_x = 1; copy_rvec(x, pos->x[0]); clear_rvec(pos->v[0]); clear_rvec(pos->f[0]); gmx_ana_indexmap_init(&pos->m, nullptr, nullptr, INDEX_UNKNOWN); } /*! * \param[in,out] dest Destination positions. * \param[in] src Source positions. * \param[in] bFirst If true, memory is allocated for \p dest and a full * copy is made; otherwise, only variable parts are copied, and no memory * is allocated. * * \p dest should have been initialized somehow (calloc() is enough). */ void gmx_ana_pos_copy(gmx_ana_pos_t* dest, gmx_ana_pos_t* src, bool bFirst) { if (bFirst) { gmx_ana_pos_reserve(dest, src->count(), -1); if (src->v) { gmx_ana_pos_reserve_velocities(dest); } if (src->f) { gmx_ana_pos_reserve_forces(dest); } } memcpy(dest->x, src->x, src->count() * sizeof(*dest->x)); if (dest->v) { GMX_ASSERT(src->v, "src velocities should be non-null if dest velocities are allocated"); memcpy(dest->v, src->v, src->count() * sizeof(*dest->v)); } if (dest->f) { GMX_ASSERT(src->f, "src forces should be non-null if dest forces are allocated"); memcpy(dest->f, src->f, src->count() * sizeof(*dest->f)); } gmx_ana_indexmap_copy(&dest->m, &src->m, bFirst); } /*! * \param[in,out] pos Position data structure. * \param[in] nr Number of positions. */ void gmx_ana_pos_set_nr(gmx_ana_pos_t* pos, int nr) { // TODO: This puts the mapping in a somewhat inconsistent state. pos->m.mapb.nr = nr; } /*! * \param[in,out] pos Position data structure. * * Sets the number of positions to 0. */ void gmx_ana_pos_empty_init(gmx_ana_pos_t* pos) { pos->m.mapb.nr = 0; pos->m.mapb.nra = 0; pos->m.b.nr = 0; pos->m.b.nra = 0; /* Initializing these should not really be necessary, but do it for * safety... */ pos->m.mapb.index[0] = 0; pos->m.b.index[0] = 0; /* This function should only be used to construct all the possible * positions, so the result should always be static. */ pos->m.bStatic = true; } /*! * \param[in,out] pos Position data structure. * * Sets the number of positions to 0. */ void gmx_ana_pos_empty(gmx_ana_pos_t* pos) { pos->m.mapb.nr = 0; pos->m.mapb.nra = 0; /* This should not really be necessary, but do it for safety... */ pos->m.mapb.index[0] = 0; /* We set the flag to true, although really in the empty state it * should be false. This makes it possible to update the flag in * gmx_ana_pos_append(), and just make a simple check in * gmx_ana_pos_append_finish(). */ pos->m.bStatic = true; } /*! * \param[in,out] dest Data structure to which the new position is appended. * \param[in] src Data structure from which the position is copied. * \param[in] i Index in \p from to copy. */ void gmx_ana_pos_append_init(gmx_ana_pos_t* dest, gmx_ana_pos_t* src, int i) { int j, k; j = dest->count(); copy_rvec(src->x[i], dest->x[j]); if (dest->v) { if (src->v) { copy_rvec(src->v[i], dest->v[j]); } else { clear_rvec(dest->v[j]); } } if (dest->f) { if (src->f) { copy_rvec(src->f[i], dest->f[j]); } else { clear_rvec(dest->f[j]); } } dest->m.refid[j] = j; dest->m.mapid[j] = src->m.mapid[i]; dest->m.orgid[j] = src->m.orgid[i]; for (k = src->m.mapb.index[i]; k < src->m.mapb.index[i + 1]; ++k) { dest->m.mapb.a[dest->m.mapb.nra++] = src->m.mapb.a[k]; dest->m.b.a[dest->m.b.nra++] = src->m.b.a[k]; } dest->m.mapb.index[j + 1] = dest->m.mapb.nra; dest->m.b.index[j + 1] = dest->m.mapb.nra; dest->m.mapb.nr++; dest->m.b.nr++; } /*! * \param[in,out] dest Data structure to which the new position is appended. * \param[in] src Data structure from which the position is copied. * \param[in] i Index in \p src to copy. * \param[in] refid Reference ID in \p out * (all negative values are treated as -1). */ void gmx_ana_pos_append(gmx_ana_pos_t* dest, gmx_ana_pos_t* src, int i, int refid) { for (int k = src->m.mapb.index[i]; k < src->m.mapb.index[i + 1]; ++k) { dest->m.mapb.a[dest->m.mapb.nra++] = src->m.mapb.a[k]; } const int j = dest->count(); if (dest->v) { if (src->v) { copy_rvec(src->v[i], dest->v[j]); } else { clear_rvec(dest->v[j]); } } if (dest->f) { if (src->f) { copy_rvec(src->f[i], dest->f[j]); } else { clear_rvec(dest->f[j]); } } copy_rvec(src->x[i], dest->x[j]); if (refid < 0) { dest->m.refid[j] = -1; dest->m.bStatic = false; /* If we are using masks, there is no need to alter the * mapid field. */ } else { if (refid != j) { dest->m.bStatic = false; } dest->m.refid[j] = refid; /* Use the original IDs from the output structure to correctly * handle user customization. */ dest->m.mapid[j] = dest->m.orgid[refid]; } dest->m.mapb.index[j + 1] = dest->m.mapb.nra; dest->m.mapb.nr++; } /*! * \param[in,out] pos Position data structure. * * After gmx_ana_pos_empty(), internal state of the position data structure * is not consistent before this function is called. This function should be * called after any gmx_ana_pos_append() calls have been made. */ void gmx_ana_pos_append_finish(gmx_ana_pos_t* pos) { if (pos->m.mapb.nr != pos->m.b.nr) { pos->m.bStatic = false; } } /*! * \param[in,out] g Data structure to which the new atoms are appended. * \param[in] src Data structure from which the position is copied. * \param[in] i Index in \p src to copy. */ void gmx_ana_pos_add_to_group(gmx_ana_index_t* g, gmx_ana_pos_t* src, int i) { for (int k = src->m.mapb.index[i]; k < src->m.mapb.index[i + 1]; ++k) { g->index[g->isize++] = src->m.mapb.a[k]; } }
5,105
25,151
<reponame>TamsilAmani/selenium // Licensed to the Software Freedom Conservancy (SFC) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The SFC licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. package org.openqa.selenium.grid.router; import static org.openqa.selenium.json.Json.MAP_TYPE; import static org.openqa.selenium.remote.http.HttpMethod.GET; import org.openqa.selenium.Capabilities; import org.openqa.selenium.grid.commands.EventBusCommand; import org.openqa.selenium.grid.commands.Hub; import org.openqa.selenium.grid.commands.Standalone; import org.openqa.selenium.grid.config.CompoundConfig; import org.openqa.selenium.grid.config.Config; import org.openqa.selenium.grid.config.MapConfig; import org.openqa.selenium.grid.config.MemoizedConfig; import org.openqa.selenium.grid.config.TomlConfig; import org.openqa.selenium.grid.distributor.httpd.DistributorServer; import org.openqa.selenium.grid.node.httpd.NodeServer; import org.openqa.selenium.grid.router.httpd.RouterServer; import org.openqa.selenium.grid.server.Server; import org.openqa.selenium.grid.sessionmap.httpd.SessionMapServer; import org.openqa.selenium.grid.sessionqueue.httpd.NewSessionQueueServer; import org.openqa.selenium.grid.web.Values; import org.openqa.selenium.json.Json; import org.openqa.selenium.json.JsonOutput; import org.openqa.selenium.net.PortProber; import org.openqa.selenium.remote.http.HttpClient; import org.openqa.selenium.remote.http.HttpRequest; import org.openqa.selenium.remote.http.HttpResponse; import org.openqa.selenium.support.ui.FluentWait; import org.openqa.selenium.testing.Safely; import org.openqa.selenium.testing.TearDownFixture; import java.io.IOException; import java.io.StringReader; import java.io.UncheckedIOException; import java.net.ConnectException; import java.time.Duration; import java.util.Arrays; import java.util.List; import java.util.Map; public enum DeploymentTypes { STANDALONE { @Override public Deployment start(Capabilities capabilities, Config additionalConfig) { StringBuilder rawCaps = new StringBuilder(); try (JsonOutput out = new Json().newOutput(rawCaps)) { out.setPrettyPrint(false).write(capabilities); } String[] rawConfig = new String[]{ "[network]", "relax-checks = true", "", "[server]", "registration-secret = \"provolone\"", "", "[sessionqueue]", "session-request-timeout = 100", "session-retry-interval = 1" }; Config config = new MemoizedConfig( new CompoundConfig( additionalConfig, new TomlConfig(new StringReader(String.join("\n", rawConfig))))); Server<?> server = new Standalone().asServer(new CompoundConfig(setRandomPort(), config)).start(); waitUntilReady(server, Duration.ofSeconds(5)); return new Deployment(server, server::stop); } }, HUB_AND_NODE { @Override public Deployment start(Capabilities capabilities, Config additionalConfig) { StringBuilder rawCaps = new StringBuilder(); try (JsonOutput out = new Json().newOutput(rawCaps)) { out.setPrettyPrint(false).write(capabilities); } int publish = PortProber.findFreePort(); int subscribe = PortProber.findFreePort(); String[] rawConfig = new String[] { "[events]", "publish = \"tcp://localhost:" + publish + "\"", "subscribe = \"tcp://localhost:" + subscribe + "\"", "", "[network]", "relax-checks = true", "", "[server]", "registration-secret = \"feta\"", "", "[sessionqueue]", "session-request-timeout = 100", "session-retry-interval = 1" }; Config baseConfig = new MemoizedConfig( new CompoundConfig( additionalConfig, new TomlConfig(new StringReader(String.join("\n", rawConfig))))); Config hubConfig = new MemoizedConfig( new CompoundConfig( setRandomPort(), new MapConfig(Map.of("events", Map.of("bind", true))), baseConfig)); Server<?> hub = new Hub().asServer(hubConfig).start(); MapConfig additionalNodeConfig = new MapConfig(Map.of("node", Map.of("hub", hub.getUrl()))); Config nodeConfig = new MemoizedConfig( new CompoundConfig( additionalNodeConfig, setRandomPort(), baseConfig)); Server<?> node = new NodeServer().asServer(nodeConfig).start(); waitUntilReady(node, Duration.ofSeconds(5)); waitUntilReady(hub, Duration.ofSeconds(5)); return new Deployment(hub, hub::stop, node::stop); } }, DISTRIBUTED { @Override public Deployment start(Capabilities capabilities, Config additionalConfig) { StringBuilder rawCaps = new StringBuilder(); try (JsonOutput out = new Json().newOutput(rawCaps)) { out.setPrettyPrint(false).write(capabilities); } int publish = PortProber.findFreePort(); int subscribe = PortProber.findFreePort(); String[] rawConfig = new String[] { "[events]", "publish = \"tcp://localhost:" + publish + "\"", "subscribe = \"tcp://localhost:" + subscribe + "\"", "bind = false", "", "[network]", "relax-checks = true", "", "[server]", "", "registration-secret = \"colby\"", "", "[sessionqueue]", "session-request-timeout = 100", "session-retry-interval = 1" }; Config sharedConfig = new MemoizedConfig( new CompoundConfig( additionalConfig, new TomlConfig(new StringReader(String.join("\n", rawConfig))))); Server<?> eventServer = new EventBusCommand() .asServer(new MemoizedConfig(new CompoundConfig( new TomlConfig(new StringReader(String.join("\n", new String[] { "[events]", "publish = \"tcp://localhost:" + publish + "\"", "subscribe = \"tcp://localhost:" + subscribe + "\"", "bind = true"}))), setRandomPort(), sharedConfig))) .start(); waitUntilReady(eventServer, Duration.ofSeconds(5)); Server<?> newSessionQueueServer = new NewSessionQueueServer() .asServer(new MemoizedConfig(new CompoundConfig(setRandomPort(), sharedConfig))).start(); waitUntilReady(newSessionQueueServer, Duration.ofSeconds(5)); Config newSessionQueueServerConfig = new TomlConfig(new StringReader(String.join( "\n", new String[] { "[sessionqueue]", "hostname = \"localhost\"", "port = " + newSessionQueueServer.getUrl().getPort() } ))); Server<?> sessionMapServer = new SessionMapServer() .asServer(new MemoizedConfig(new CompoundConfig(setRandomPort(), sharedConfig))).start(); Config sessionMapConfig = new TomlConfig(new StringReader(String.join( "\n", new String[] { "[sessions]", "hostname = \"localhost\"", "port = " + sessionMapServer.getUrl().getPort() } ))); Server<?> distributorServer = new DistributorServer() .asServer(new MemoizedConfig(new CompoundConfig( setRandomPort(), sessionMapConfig, newSessionQueueServerConfig, sharedConfig))) .start(); Config distributorConfig = new TomlConfig(new StringReader(String.join( "\n", new String[] { "[distributor]", "hostname = \"localhost\"", "port = " + distributorServer.getUrl().getPort() } ))); Server<?> router = new RouterServer() .asServer(new MemoizedConfig(new CompoundConfig( setRandomPort(), sessionMapConfig, distributorConfig, newSessionQueueServerConfig, sharedConfig))) .start(); MapConfig nodeConfig = new MapConfig(Map.of("node", Map.of("hub", router.getUrl()))); Server<?> nodeServer = new NodeServer() .asServer(new MemoizedConfig(new CompoundConfig( nodeConfig, setRandomPort(), sharedConfig, sessionMapConfig, distributorConfig, newSessionQueueServerConfig))) .start(); waitUntilReady(nodeServer, Duration.ofSeconds(5)); waitUntilReady(router, Duration.ofSeconds(5)); return new Deployment( router, router::stop, nodeServer::stop, distributorServer::stop, sessionMapServer::stop, newSessionQueueServer::stop, eventServer::stop); } }; private static Config setRandomPort() { return new MapConfig(Map.of("server", Map.of("port", PortProber.findFreePort()))); } private static void waitUntilReady(Server<?> server, Duration duration) { HttpClient client = HttpClient.Factory.createDefault().createClient(server.getUrl()); try { new FluentWait<>(client) .withTimeout(duration) .pollingEvery(Duration.ofMillis(250)) .ignoring(IOException.class) .ignoring(UncheckedIOException.class) .ignoring(ConnectException.class) .until( c -> { HttpResponse response = c.execute(new HttpRequest(GET, "/status")); Map<String, Object> status = Values.get(response, MAP_TYPE); return Boolean.TRUE.equals( status != null && Boolean.parseBoolean(status.get("ready").toString())); }); } finally { Safely.safelyCall(client::close); } } public abstract Deployment start(Capabilities capabilities, Config additionalConfig); public static class Deployment implements TearDownFixture { private final Server<?> server; private final List<TearDownFixture> tearDowns; private Deployment(Server<?> server, TearDownFixture... tearDowns) { this.server = server; this.tearDowns = Arrays.asList(tearDowns); } public Server<?> getServer() { return server; } @Override public void tearDown() throws Exception { tearDowns.parallelStream().forEach(Safely::safelyCall); } } }
4,251
6,663
<gh_stars>1000+ # cython: auto_cpdef=True # mode:run # tag: directive,auto_cpdef,closures def closure_func(x): """ >>> c = closure_func(2) >>> c() 2 """ def c(): return x return c def generator_func(): """ >>> for i in generator_func(): print(i) 1 2 """ yield 1 yield 2
160
985
package com.taobao.metamorphosis.client.extension.spring; import com.taobao.metamorphosis.exception.MetaClientException; /** * Messge body object converter. * * @author dennis<<EMAIL>> * @since 1.4.5 * @param <T> */ public interface MessageBodyConverter<T> { /** * Convert a message object to byte array. * * @param body * @return * @throws MetaClientException */ public byte[] toByteArray(T body) throws MetaClientException; /** * Convert a byte array to message object. * * @param bs * @return * @throws MetaClientException */ public T fromByteArray(byte[] bs) throws MetaClientException; }
258
6,541
<reponame>albertobarri/idk #include <string.h> #include "pthread_impl.h" #include "libc.h" void __reset_tls() { pthread_t self = __pthread_self(); struct tls_module *p; size_t i, n = (size_t)self->dtv[0]; if (n) for (p=libc.tls_head, i=1; i<=n; i++, p=p->next) { if (!self->dtv[i]) continue; memcpy(self->dtv[i], p->image, p->len); memset((char *)self->dtv[i]+p->len, 0, p->size - p->len); } }
208
378
<filename>aio-pro/src/main/java/org/smartboot/socket/extension/plugins/Plugin.java /******************************************************************************* * Copyright (c) 2017-2019, org.smartboot. All rights reserved. * project name: smart-socket * file name: Plugin.java * Date: 2019-12-31 * Author: sandao (<EMAIL>) * ******************************************************************************/ package org.smartboot.socket.extension.plugins; import org.smartboot.socket.NetMonitor; import org.smartboot.socket.StateMachineEnum; import org.smartboot.socket.transport.AioSession; /** * @author 三刀 * @version V1.0 , 2018/8/19 */ public interface Plugin<T> extends NetMonitor { /** * 对请求消息进行预处理,并决策是否进行后续的MessageProcessor处理。 * 若返回false,则当前消息将被忽略。 * 若返回true,该消息会正常秩序MessageProcessor.process. * * @param session * @param t * @return */ boolean preProcess(AioSession session, T t); /** * 监听状态机事件 * * @param stateMachineEnum * @param session * @param throwable * @see org.smartboot.socket.MessageProcessor#stateEvent(AioSession, StateMachineEnum, Throwable) */ void stateEvent(StateMachineEnum stateMachineEnum, AioSession session, Throwable throwable); }
531
1,656
<reponame>HashZhang/spring-cloud-sleuth /* * Copyright 2013-2021 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.springframework.cloud.sleuth.autoconfig.instrument.kafka; import org.apache.kafka.clients.consumer.Consumer; import org.apache.kafka.clients.consumer.MockConsumer; import org.apache.kafka.clients.consumer.OffsetResetStrategy; import org.apache.kafka.clients.producer.MockProducer; import org.apache.kafka.clients.producer.Producer; import org.junit.jupiter.api.Test; import org.springframework.boot.autoconfigure.AutoConfigurations; import org.springframework.boot.test.context.runner.ApplicationContextRunner; import org.springframework.cloud.sleuth.autoconfig.TraceNoOpAutoConfiguration; import org.springframework.cloud.sleuth.instrument.kafka.TracingKafkaConsumer; import org.springframework.cloud.sleuth.instrument.kafka.TracingKafkaProducer; import org.springframework.cloud.sleuth.instrument.kafka.TracingKafkaPropagatorGetter; import org.springframework.cloud.sleuth.instrument.kafka.TracingKafkaPropagatorSetter; import static org.assertj.core.api.Assertions.assertThat; class TraceKafkaAutoConfigurationTests { private final ApplicationContextRunner contextRunner = new ApplicationContextRunner() .withPropertyValues("spring.sleuth.noop.enabled=true") .withConfiguration(AutoConfigurations.of(TraceNoOpAutoConfiguration.class, TracingKafkaAutoConfiguration.class, TracingReactorKafkaAutoConfiguration.class)); @Test void should_inject_beans_for_getter_setter_kafka_propagation() { this.contextRunner.run(context -> assertThat(context).hasSingleBean(TracingKafkaPropagatorGetter.class) .hasSingleBean(TracingKafkaPropagatorSetter.class)); } @Test void should_decorate_kafka_producer() { this.contextRunner.withBean(Producer.class, MockProducer::new) .run(context -> assertThat(context).hasSingleBean(TracingKafkaProducer.class)); } @Test void should_decorate_kafka_consumer() { this.contextRunner.withBean(Consumer.class, () -> new MockConsumer<>(OffsetResetStrategy.NONE)) .run(context -> assertThat(context).hasSingleBean(TracingKafkaConsumer.class)); } @Test void should_not_decorate_tracing_kafka_consumer() { TracingKafkaConsumer<String, String> kafkaConsumer = new TracingKafkaConsumer<>( new MockConsumer<>(OffsetResetStrategy.NONE), null); this.contextRunner.withBean(TracingKafkaConsumer.class, () -> kafkaConsumer) .run(context -> assertThat(context).getBean(Consumer.class).isEqualTo(kafkaConsumer)); } @Test void should_not_decorate_tracing_kafka_producer() { TracingKafkaProducer<String, String> kafkaProducer = new TracingKafkaProducer<>(new MockProducer<>(), null); this.contextRunner.withBean(TracingKafkaProducer.class, () -> kafkaProducer) .run(context -> assertThat(context).getBean(Producer.class).isEqualTo(kafkaProducer)); } }
1,129
527
<reponame>gkgoat1/lexy // INPUT:type Id = <error>;\nfunction foo(...) {\n ...\n}\n constexpr auto id = dsl::identifier(dsl::ascii::alpha); constexpr auto kw_function = LEXY_KEYWORD("function", id); constexpr auto kw_type = LEXY_KEYWORD("type", id); struct function_decl { static constexpr auto rule = [] { auto arguments = dsl::parenthesized(LEXY_LIT("...")); auto body = dsl::curly_bracketed(LEXY_LIT("...")); return kw_function >> id + arguments + body; }(); }; struct type_decl { static constexpr auto rule // = kw_type >> id + dsl::lit_c<'='> + id + dsl::semicolon; }; struct production { static constexpr auto whitespace = dsl::ascii::space; static constexpr auto rule = [] { auto decl = dsl::p<function_decl> | dsl::p<type_decl>; // We recover from any errors by skipping until the next decl. auto decl_recover = dsl::find(kw_function, kw_type); auto try_decl = dsl::try_(decl, decl_recover); return dsl::list(try_decl); }(); };
459
1,544
# -*- coding: utf-8 -*- from django.db import models, migrations class Migration(migrations.Migration): dependencies = [("scripts", "0006_auto_20150310_2249")] operations = [ migrations.DeleteModel(name="DefaultScript"), migrations.DeleteModel(name="DoNothing"), migrations.DeleteModel(name="ScriptBase"), migrations.DeleteModel(name="Store"), ]
149
849
""" Copyright 2020 The Magma Authors. This source code is licensed under the BSD-style license found in the LICENSE file in the root directory of this source tree. Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. This file contains modifications of the core spyne functionality. This is done using child classes and function override to avoid modifying spyne code itself. Each function below is a modified version of the parent function. These modifications are required because: 1) Spyne is not fully python3-compliant 2) Not all parts of the TR-069 spec are possible through spyne APIs (e.g RPC calls from server to client in HTTP responses) 3) Minor enhancements for debug-ability """ from lxml import etree from magma.enodebd.logger import EnodebdLogger as logger from spyne.application import Application from spyne.interface._base import Interface from spyne.protocol.soap import Soap11 from spyne.protocol.xml import XmlDocument class Tr069Interface(Interface): """ Modified base interface class. """ def reset_interface(self): super(Tr069Interface, self).reset_interface() # Replace default namespace prefix (may not strictly be # required, but makes it easier to debug) del self.nsmap['tns'] self.nsmap['cwmp'] = self.get_tns() self.prefmap[self.get_tns()] = 'cwmp' # To validate against the xsd:<types>, the namespace # prefix is expected to be the same del self.nsmap['xs'] self.nsmap['xsd'] = 'http://www.w3.org/2001/XMLSchema' self.prefmap['http://www.w3.org/2001/XMLSchema'] = 'xsd' class Tr069Application(Application): """ Modified spyne application. """ def __init__( self, services, tns, name=None, in_protocol=None, out_protocol=None, config=None, ): super(Tr069Application, self).__init__( services, tns, name, in_protocol, out_protocol, config, ) # Use modified interface class self.interface = Tr069Interface(self) class Tr069Soap11(Soap11): """ Modified SOAP protocol. """ def __init__(self, *args, **kwargs): super(Tr069Soap11, self).__init__(*args, **kwargs) # Disabling type resolution as a workaround for # https://github.com/arskom/spyne/issues/567 self.parse_xsi_type = False # Bug in spyne is cleaning up the default XSD namespace # and causes validation issues on TR-069 clients self.cleanup_namespaces = False def create_in_document(self, ctx, charset=None): """ In TR-069, the ACS (e.g Magma) is an HTTP server, but acts as a client for SOAP messages. This is done by the CPE (e.g ENodeB) sending an empty HTTP request, and the ACS responding with a SOAP request in the HTTP response. This code replaces an empty HTTP request with a string that gets decoded to a call to the 'EmptyHttp' RPC . """ # Try cp437 as default to ensure that we dont get any decoding errors, # since it uses 1-byte encoding and has a 'full' char map if not charset: charset = 'cp437' # Convert from generator to bytes before doing comparison # Re-encode to chosen charset to remove invalid characters in_string = b''.join(ctx.in_string).decode(charset, 'ignore') ctx.in_string = [in_string.encode(charset, 'ignore')] if ctx.in_string == [b'']: ctx.in_string = [ b'<soap11env:Envelope xmlns:cwmp="urn:dslforum-org:cwmp-1-0" xmlns:soap11env="http://schemas.xmlsoap.org/soap/envelope/">/n' b' <soap11env:Body>/n' b' <cwmp:EmptyHttp/>/n' b' </soap11env:Body>/n' b'</soap11env:Envelope>', ] super(Tr069Soap11, self).create_in_document(ctx, charset) def decompose_incoming_envelope(self, ctx, message=XmlDocument.REQUEST): """ For TR-069, the SOAP fault message (CPE->ACS) contains useful information, and should not result in another fault response (ACS->CPE). Strip the outer SOAP fault structure, so that the CWMP fault structure is treated as a normal RPC call (to the 'Fault' function). """ super(Tr069Soap11, self).decompose_incoming_envelope(ctx, message) if ctx.in_body_doc.tag == '{%s}Fault' % self.ns_soap_env: faultstring = ctx.in_body_doc.findtext('faultstring') if not faultstring or 'CWMP fault' not in faultstring: # Not a CWMP fault return # Strip SOAP fault structure, leaving inner CWMP fault structure detail_elem = ctx.in_body_doc.find('detail') if detail_elem is not None: detail_children = list(detail_elem) if len(detail_children): if len(detail_children) > 1: logger.warning( "Multiple detail elements found in SOAP" " fault - using first one", ) ctx.in_body_doc = detail_children[0] ctx.method_request_string = ctx.in_body_doc.tag self.validate_body(ctx, message) def get_call_handles(self, ctx): """ Modified function to fix bug in receiving SOAP fault. In this case, ctx.method_request_string is None, so 'startswith' errors out. """ if ctx.method_request_string is None: return [] return super(Tr069Soap11, self).get_call_handles(ctx) def serialize(self, ctx, message): # Workaround for issue https://github.com/magma/magma/issues/7869 # Updates to ctx.descriptor.out_message.Attributes.sub_name are taking # effect on the descriptor. But when puled from _attrcache dictionary, # it still has a stale value. # Force repopulation of dictionary by deleting entry # TODO Remove this code once we have a better fix if (ctx.descriptor and ctx.descriptor.out_message in self._attrcache): del self._attrcache[ctx.descriptor.out_message] # noqa: WPS529 super(Tr069Soap11, self).serialize(ctx, message) # Keep XSD namespace etree.cleanup_namespaces(ctx.out_document, keep_ns_prefixes=['xsd'])
2,730
403
#include <vkpp/query.hh> #include <vkpp/device.hh> #include <vkpp/exception.hh> #include <utility> namespace vkpp { QueryPool::QueryPool(Device& device, VkQueryType query_type, std::uint32_t query_count, VkQueryPipelineStatisticFlags pipeline_stats) : query_types { query_type }, pipeline_statistics { pipeline_stats }, query_count { query_count }, device { device.get_handle() } { VkQueryPoolCreateInfo create_info; create_info.sType = VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO; create_info.pNext = nullptr; create_info.flags = 0; create_info.queryType = query_type; create_info.queryCount = query_count; create_info.pipelineStatistics = pipeline_stats; ns_per_unit = device.get_physical_device().get_properties().limits.timestampPeriod; timestamp_buffer = new std::uint64_t[query_count]; if (VkResult error = vkCreateQueryPool(this->device, &create_info, nullptr, &handle)) throw Exception { error, "couldn't create query pool!" }; } QueryPool::~QueryPool() noexcept { if (handle != VK_NULL_HANDLE) vkDestroyQueryPool(device, handle, nullptr); if (timestamp_buffer != nullptr) delete[] timestamp_buffer; } QueryPool::QueryPool(QueryPool&& command_pool) noexcept { swap(*this, command_pool); } QueryPool& QueryPool::operator=(QueryPool&& command_pool) noexcept { swap(*this, command_pool); return *this; } void swap(QueryPool& lhs, QueryPool& rhs) { using std::swap; swap(lhs.handle, rhs.handle); swap(lhs.device, rhs.device); swap(lhs.pipeline_statistics, rhs.pipeline_statistics); swap(lhs.query_count, rhs.query_count); swap(lhs.query_types, rhs.query_types); swap(lhs.ns_per_unit, rhs.ns_per_unit); swap(lhs.query, rhs.query); swap(lhs.timestamps, rhs.timestamps); swap(lhs.timestamp_ms_time, rhs.timestamp_ms_time); swap(lhs.timestamp_buffer, rhs.timestamp_buffer); } std::uint32_t QueryPool::get_timestamp_query_count() const { return 2 * timestamps.size(); } const QueryPool::TimestampPair& QueryPool::get_timestamp(const std::string& query_name) const { return timestamps.at(query_name); } void QueryPool::set_begin_timestamp(const std::string& name, std::uint32_t query) { auto timestamp = timestamps.find(name); if (timestamp == timestamps.end()) { timestamps[name] = TimestampPair { 0, 0 }; timestamp = timestamps.find(name); } timestamp->second.begin = query; } void QueryPool::set_end_timestamp(const std::string& name, std::uint32_t query) { auto timestamp = timestamps.find(name); if (timestamp == timestamps.end()) { timestamps[name] = TimestampPair { 0, 0 }; timestamp = timestamps.find(name); } timestamp->second.end = query; } VkQueryPool& QueryPool::get_handle() { return handle; } VkQueryType QueryPool::get_query_type() const { return query_types; } VkQueryPipelineStatisticFlags QueryPool::get_pipeline_statistics_flag() const { return pipeline_statistics; } std::uint32_t QueryPool::get_query_count() const { return query_count; } float QueryPool::get_ns_per_unit() const { return ns_per_unit; } VkResult QueryPool::get_results(std::uint32_t first_query, std::uint32_t query_count, VkDeviceSize size, void* buffer, VkQueryResultFlags result_flags, VkDeviceSize stride) { return vkGetQueryPoolResults(device, handle, first_query, query_count, size, buffer, stride, result_flags); } std::unordered_map<std::string, float>& QueryPool::request_timestamp_queries() { get_results(0, get_query_count(), sizeof(std::uint64_t) * get_query_count(), timestamp_buffer, VK_QUERY_RESULT_64_BIT, sizeof(std::uint64_t)); for (const auto& timestamp : timestamps) { std::int64_t begin_timestamp = timestamp_buffer[timestamp.second.begin]; std::int64_t end_timestamp = timestamp_buffer[timestamp.second.end]; auto duration_in_ns = (end_timestamp - begin_timestamp) * get_ns_per_unit(); timestamp_ms_time[timestamp.first] = duration_in_ns / 1e6; } return timestamp_ms_time; } std::vector<QueryPool> QueryPool::create(std::size_t count, Device& device, VkQueryType query_type, std::uint32_t query_count, VkQueryPipelineStatisticFlags pipeline_stats) { std::vector<QueryPool> query_pools; query_pools.reserve(count); for (std::size_t i { 0 }; i < count; ++i) { query_pools.emplace_back(device, query_type, query_count, pipeline_stats); } return query_pools; } void QueryPool::clear_timestamps() { timestamp_ms_time.clear(); timestamps.clear(); } }
2,820
1,104
{ "html": "W20.html", "css": "W20.css", "authors": "<NAME>.", "roll20userid": "492849", "preview": "W20.png", "instructions": "# Werewolf: The Apocalyspe Character Sheet \r \r **For use in the classic World of Darkness, 20th Anniversary edition.** \r \r Inspired and modified from by <NAME>'s classic World of Darkness Werewolf sheet, with heavy influnce in the brillance of Rich Finder and his Savage World Tabbed sheet. \r\r## Special Areas\r\r* \r\r**Claw Marks**\r\r* Denote an area hidden to save space, and includes Appearence, Story, Possessions, Gifts, Rites, and Fetishes.\r\r**Buttons**\r\r*Rage, Gnosis, Willpower (Advantages tab) \r* All entries under the Brawling table \r* All entries under the Soak table\r* If there's something you want added, or you'd like to report a bug, please contact the author of this sheet. \r \r **You will need to load unsafe scripts to see the font.** ", "legacy": true }
297
421
<gh_stars>100-1000 // <Snippet1> using namespace System; using namespace System::IO; using namespace System::Reflection; using namespace System::Text; public ref class Sample { protected: bool ShowMethods; StreamWriter^ myWriter; private: void DumpMethods( Type^ aType ) { if ( !ShowMethods ) return; array<MethodInfo^>^mInfo = aType->GetMethods(); myWriter->WriteLine( "Methods" ); bool found = false; if ( mInfo->Length != 0 ) { for ( int i = 0; i < mInfo->Length; i++ ) { // Only display methods declared in this type. Also // filter out any methods with special names, because these // cannot be generally called by the user. That is, their // functionality is usually exposed in other ways, for example, // property get/set methods are exposed as properties. if ( mInfo[ i ]->DeclaringType == aType && !mInfo[ i ]->IsSpecialName ) { found = true; StringBuilder^ modifiers = gcnew StringBuilder; if ( mInfo[ i ]->IsStatic ) { modifiers->Append( "static " ); } if ( mInfo[ i ]->IsPublic ) { modifiers->Append( "public " ); } if ( mInfo[ i ]->IsFamily ) { modifiers->Append( "protected " ); } if ( mInfo[ i ]->IsAssembly ) { modifiers->Append( "internal " ); } if ( mInfo[ i ]->IsPrivate ) { modifiers->Append( "private " ); } myWriter->WriteLine( "{0} {1}", modifiers, mInfo[ i ] ); } } } if ( !found ) { myWriter->WriteLine( "(none)" ); } } }; // </Snippet1>
1,080
748
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. package dev.fluttercommunity.plus.packageinfo; import android.annotation.SuppressLint; import android.content.Context; import android.content.pm.PackageInfo; import android.content.pm.PackageManager; import android.os.Build; import androidx.annotation.NonNull; import io.flutter.embedding.engine.plugins.FlutterPlugin; import io.flutter.plugin.common.MethodCall; import io.flutter.plugin.common.MethodChannel; import io.flutter.plugin.common.MethodChannel.MethodCallHandler; import io.flutter.plugin.common.MethodChannel.Result; import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; import java.util.HashMap; import java.util.Map; /** PackageInfoPlugin */ public class PackageInfoPlugin implements MethodCallHandler, FlutterPlugin { private Context applicationContext; private MethodChannel methodChannel; @SuppressWarnings("deprecation") private static long getLongVersionCode(PackageInfo info) { if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.P) { return info.getLongVersionCode(); } return info.versionCode; } /** Plugin registration. */ @Override public void onAttachedToEngine(FlutterPluginBinding binding) { this.applicationContext = binding.getApplicationContext(); methodChannel = new MethodChannel(binding.getBinaryMessenger(), "dev.fluttercommunity.plus/package_info"); methodChannel.setMethodCallHandler(this); } @Override public void onDetachedFromEngine(@NonNull FlutterPluginBinding binding) { applicationContext = null; methodChannel.setMethodCallHandler(null); methodChannel = null; } @Override public void onMethodCall(MethodCall call, @NonNull Result result) { try { if (call.method.equals("getAll")) { PackageManager pm = applicationContext.getPackageManager(); PackageInfo info = pm.getPackageInfo(applicationContext.getPackageName(), 0); String buildSignature = getBuildSignature(pm); Map<String, String> map = new HashMap<>(); map.put("appName", info.applicationInfo.loadLabel(pm).toString()); map.put("packageName", applicationContext.getPackageName()); map.put("version", info.versionName); map.put("buildNumber", String.valueOf(getLongVersionCode(info))); if (buildSignature != null) map.put("buildSignature", buildSignature); result.success(map); } else { result.notImplemented(); } } catch (PackageManager.NameNotFoundException ex) { result.error("Name not found", ex.getMessage(), null); } } @SuppressWarnings("deprecation") private String getBuildSignature(PackageManager pm) { try { if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.P) { PackageInfo packageInfo = pm.getPackageInfo( applicationContext.getPackageName(), PackageManager.GET_SIGNING_CERTIFICATES); if (packageInfo == null || packageInfo.signingInfo == null) { return null; } if (packageInfo.signingInfo.hasMultipleSigners()) { return signatureToSha1(packageInfo.signingInfo.getApkContentsSigners()[0].toByteArray()); } else { return signatureToSha1( packageInfo.signingInfo.getSigningCertificateHistory()[0].toByteArray()); } } else { @SuppressLint("PackageManagerGetSignatures") PackageInfo packageInfo = pm.getPackageInfo(applicationContext.getPackageName(), PackageManager.GET_SIGNATURES); if (packageInfo == null || packageInfo.signatures == null || packageInfo.signatures.length == 0 || packageInfo.signatures[0] == null) { return null; } return signatureToSha1(packageInfo.signatures[0].toByteArray()); } } catch (PackageManager.NameNotFoundException | NoSuchAlgorithmException e) { return null; } } // Credits https://gist.github.com/scottyab/b849701972d57cf9562e private String bytesToHex(byte[] bytes) { final char[] hexArray = { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F' }; char[] hexChars = new char[bytes.length * 2]; int v; for (int j = 0; j < bytes.length; j++) { v = bytes[j] & 0xFF; hexChars[j * 2] = hexArray[v >>> 4]; hexChars[j * 2 + 1] = hexArray[v & 0x0F]; } return new String(hexChars); } // Credits https://gist.github.com/scottyab/b849701972d57cf9562e private String signatureToSha1(byte[] sig) throws NoSuchAlgorithmException { MessageDigest digest = MessageDigest.getInstance("SHA1"); digest.update(sig); byte[] hashtext = digest.digest(); return bytesToHex(hashtext); } }
1,790
4,973
<filename>tests/roots/test-ext-autodoc/target/canonical/__init__.py from target.canonical.original import Bar, Foo
38
783
#ifndef MIAOW_HELPER_H #define MIAOW_HELPER_H #include <stdio.h> #include <stdlib.h> #ifndef WIN32 #include <unistd.h> #endif #include <string.h> #define MAX_IMM_VAL 50 // populate according to commandline values // only one variable of this type struct _configValues { int scalar_alu; int vector_alu; int scalar_mem; int vector_mem; int scalar_reg; int vector_reg; int data_memory; int instr_count; int thrd_count; int w_thrd_cnt[40]; int wfrt_count; int wgrp_count; int test_count; int unit_tests; } typedef configValues; void printInstruction32(void* instr); void printInstruction64(void* instr); void shuffleArray(int *arr, int size); void usage(char *prog); void parseArgs(int argc, char *argv[]); void openOutputFiles(); void closeOutputFiles(); void writeConfigFile(); void writeDataMemFile(); #endif
320
2,690
<gh_stars>1000+ # Copyright ClusterHQ Inc. See LICENSE file for details. """ Test helpers for ``flocker.node.agents``. """ from ._cinder import ( make_icindervolumemanager_tests, make_inovavolumemanager_tests, mimic_for_test, ) from ._blockdevice import ( FakeCloudAPI, InvalidConfig, detach_destroy_volumes, filesystem_label_for_test, get_blockdevice_config, get_blockdeviceapi_with_cleanup, get_ec2_client_for_test, get_minimum_allocatable_size, get_openstack_region_for_test, make_iblockdeviceapi_tests, make_icloudapi_tests, make_iprofiledblockdeviceapi_tests, mountroot_for_test, require_backend, umount, umount_all, ) from ._loopback import ( fakeprofiledloopbackblockdeviceapi_for_test, loopbackblockdeviceapi_for_test, formatted_loopback_device_for_test, ) __all__ = [ 'FakeCloudAPI', 'InvalidConfig', 'detach_destroy_volumes', 'fakeprofiledloopbackblockdeviceapi_for_test', 'filesystem_label_for_test', 'formatted_loopback_device_for_test', 'get_blockdevice_config', 'get_blockdeviceapi_with_cleanup', 'get_ec2_client_for_test', 'get_minimum_allocatable_size', 'get_openstack_region_for_test', 'loopbackblockdeviceapi_for_test', 'make_iblockdeviceapi_tests', 'make_icindervolumemanager_tests', 'make_icloudapi_tests', 'make_inovavolumemanager_tests', 'make_iprofiledblockdeviceapi_tests', 'mimic_for_test', 'mountroot_for_test', 'require_backend', 'umount', 'umount_all', ]
650
379
<filename>datavec/datavec-api/src/test/java/org/datavec/api/split/parittion/PartitionerTests.java<gh_stars>100-1000 package org.datavec.api.split.parittion; import com.google.common.io.Files; import org.datavec.api.conf.Configuration; import org.datavec.api.split.FileSplit; import org.datavec.api.split.partition.NumberOfRecordsPartitioner; import org.datavec.api.split.partition.PartitionMetaData; import org.datavec.api.split.partition.Partitioner; import org.junit.Test; import java.io.File; import java.io.OutputStream; import static junit.framework.TestCase.assertTrue; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; public class PartitionerTests { @Test public void testRecordsPerFilePartition() { Partitioner partitioner = new NumberOfRecordsPartitioner(); File tmpDir = Files.createTempDir(); FileSplit fileSplit = new FileSplit(tmpDir); assertTrue(fileSplit.needsBootstrapForWrite()); fileSplit.bootStrapForWrite(); partitioner.init(fileSplit); assertEquals(1,partitioner.numPartitions()); } @Test public void testInputAddFile() throws Exception { Partitioner partitioner = new NumberOfRecordsPartitioner(); File tmpDir = Files.createTempDir(); FileSplit fileSplit = new FileSplit(tmpDir); assertTrue(fileSplit.needsBootstrapForWrite()); fileSplit.bootStrapForWrite(); Configuration configuration = new Configuration(); configuration.set(NumberOfRecordsPartitioner.RECORDS_PER_FILE_CONFIG,String.valueOf(5)); partitioner.init(configuration,fileSplit); partitioner.updatePartitionInfo(PartitionMetaData.builder().numRecordsUpdated(5).build()); assertTrue(partitioner.needsNewPartition()); OutputStream os = partitioner.openNewStream(); os.close(); assertNotNull(os); //run more than once to ensure output stream creation works properly partitioner.updatePartitionInfo(PartitionMetaData.builder().numRecordsUpdated(5).build()); os = partitioner.openNewStream(); os.close(); assertNotNull(os); } }
803
591
import numpy as np import pytest import torch from docarray.math.distance.torch import cosine, euclidean, sqeuclidean @pytest.mark.parametrize( 'x_mat, y_mat, result', ( ( torch.tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]), torch.tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]), np.array([[1.192093e-07, 2.53681537e-02], [2.53681537e-02, 0.000000e00]]), ), ( torch.tensor([[1.0, 2.0, 3.0]]), torch.tensor([[1.0, 2.0, 3.0]]), np.array([[1.192093e-07]], dtype=np.float32), ), ( torch.tensor([[0.0, 0.0, 0.0]]), torch.tensor([[0.0, 0.0, 0.0]]), np.array([[1]]), ), ( torch.tensor([[1.0, 2.0, 3.0]]), torch.tensor([[19.0, 53.0, 201.0]]), np.array([[0.06788693]]), ), ), ) def test_cosine(x_mat, y_mat, result): np.testing.assert_almost_equal(cosine(x_mat, y_mat), result, decimal=3) @pytest.mark.parametrize( 'x_mat, y_mat, result', ( ( torch.tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]), torch.tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]), np.array([[0, 27], [27, 0]]), ), ( torch.tensor([[1.0, 2.0, 3.0]]), torch.tensor([[1.0, 2.0, 3.0]]), np.array([[0]]), ), ( torch.tensor([[0.0, 0.0, 0.0]]), torch.tensor([[0.0, 0.0, 0.0]]), np.array([[0]]), ), ( torch.tensor([[1.0, 2.0, 3.0]]), torch.tensor([[19.0, 53.0, 201.0]]), np.array([[42128.996]]), ), ), ) def test_sqeuclidean(x_mat, y_mat, result): np.testing.assert_almost_equal(sqeuclidean(x_mat, y_mat), result, decimal=3) @pytest.mark.parametrize( 'x_mat, y_mat, result', ( ( torch.tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]), torch.tensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]), np.array([[0, 5.19615242], [5.19615242, 0]]), ), ( torch.tensor([[1.0, 2.0, 3.0]]), torch.tensor([[1.0, 2.0, 3.0]]), np.array([[0]]), ), ( torch.tensor([[0.0, 0.0, 0.0]]), torch.tensor([[0.0, 0.0, 0.0]]), np.array([[0]]), ), ( torch.tensor([[1.0, 2.0, 3.0]]), torch.tensor([[19.0, 53.0, 201.0]]), np.array([[205.2535018]]), ), ), ) def test_euclidean(x_mat, y_mat, result): np.testing.assert_almost_equal(euclidean(x_mat, y_mat), result, decimal=3)
1,672
348
<filename>docs/data/leg-t2/048/04801173.json {"nom":"Saint-Michel-de-Dèze","circ":"1ère circonscription","dpt":"Lozère","inscrits":213,"abs":98,"votants":115,"blancs":11,"nuls":5,"exp":99,"res":[{"nuance":"LR","nom":"<NAME>","voix":58},{"nuance":"REM","nom":"<NAME>","voix":41}]}
115
2,381
/* * Created on 21.07.2015 */ package com.github.dockerjava.core.command; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.github.dockerjava.api.model.Frame; import com.github.dockerjava.core.async.ResultCallbackTemplate; /** * * @author <NAME> * * @deprecated use {@link com.github.dockerjava.api.async.ResultCallback.Adapter} */ @Deprecated public class AttachContainerResultCallback extends ResultCallbackTemplate<AttachContainerResultCallback, Frame> { private static final Logger LOGGER = LoggerFactory.getLogger(AttachContainerResultCallback.class); @Override public void onNext(Frame item) { LOGGER.debug(item.toString()); } }
224
478
<reponame>Illbatting/AdaptiveCards<filename>source/uwp/Renderer/lib/WholeItemsPanel.h<gh_stars>100-1000 // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include "WholeItemsPanel.g.h" namespace winrt::AdaptiveCards::Rendering::Uwp::implementation { struct DECLSPEC_UUID("32934D77-6248-4915-BD2A-8F52EF6C8322") WholeItemsPanel : public WholeItemsPanelT<WholeItemsPanel, ITypePeek> { public: WholeItemsPanel() = default; winrt::Size MeasureOverride(winrt::Size const& availableSize); winrt::Size ArrangeOverride(winrt::Size const& finalSize); virtual void OnApplyTemplate(void); virtual winrt::hstring GetAltText(); // Method used inside the component to reduce the number of temporary allocations void AppendAltText(std::wstring& buffer); void SetMainPanel(bool value); void SetAdaptiveHeight(bool value); static void SetBleedMargin(uint32_t bleedMargin); virtual bool IsAllContentClippedOut() { return ((m_measuredCount > 0) && (m_visibleCount == 0)); } virtual bool IsTruncated() { return m_isTruncated; } void AddElementToStretchablesList(winrt::UIElement const& element); bool IsUIElementInStretchableList(winrt::UIElement const& element); void SetVerticalContentAlignment(winrt::VerticalContentAlignment verticalContentAlignment) { m_verticalContentAlignment = verticalContentAlignment; } // ITypePeek method void* PeekAt(REFIID riid) override { return PeekHelper(riid, this); } private: static uint32_t s_bleedMargin; uint32_t m_visibleCount{}; uint32_t m_measuredCount{}; uint32_t m_stretchableItemCount{}; float m_calculatedSize{}; bool m_allElementsRendered{}; winrt::VerticalContentAlignment m_verticalContentAlignment{}; // true if this represents the mainPanel. // Some rules such as images vertical stretching only apply for this panel // This is set when generating the XAML Tree corresponding to the Tile's payload. bool m_isMainPanel = false; // true if the Panel has been truncated, i.e. if some items could not be displayed. // This is set by the panel during measure and read in case of nested panels bool m_isTruncated = false; // If true, avoid vertical whitespace before and after the render. bool m_adaptiveHeight = false; bool IsAnySubgroupTruncated(winrt::Panel const& panel); static void LayoutCroppedImage(winrt::Shape const& shape, double availableWidth, double availableHeight); static void AppendText(winrt::hstring const& text, std::wstring& buffer); static void AppendAltTextToUIElement(winrt::UIElement const& pUIElement, std::wstring& buffer); static winrt::hstring GetAltAsString(winrt::UIElement const& element); static bool HasExplicitSize(winrt::FrameworkElement const& element); }; } namespace winrt::AdaptiveCards::Rendering::Uwp::factory_implementation { struct WholeItemsPanel : WholeItemsPanelT<WholeItemsPanel, implementation::WholeItemsPanel> { }; }
1,192
348
<reponame>chamberone/Leaflet.PixiOverlay<gh_stars>100-1000 {"nom":"Surmont","circ":"3ème circonscription","dpt":"Doubs","inscrits":128,"abs":46,"votants":82,"blancs":1,"nuls":1,"exp":80,"res":[{"nuance":"REM","nom":"M. <NAME>","voix":45},{"nuance":"LR","nom":"M. <NAME>","voix":35}]}
118
1,024
package com.commafeed.frontend.session; import java.io.File; import javax.servlet.SessionTrackingMode; import org.eclipse.jetty.server.session.DefaultSessionCache; import org.eclipse.jetty.server.session.FileSessionDataStore; import org.eclipse.jetty.server.session.SessionCache; import org.eclipse.jetty.server.session.SessionHandler; import com.google.common.collect.ImmutableSet; import io.dropwizard.util.Duration; public class SessionHandlerFactory { private String path = "sessions"; private Duration cookieMaxAge = Duration.days(30); private Duration cookieRefreshAge = Duration.days(1); private Duration maxInactiveInterval = Duration.days(30); private Duration savePeriod = Duration.minutes(5); public SessionHandler build() { SessionHandler sessionHandler = new SessionHandler() { { // no setter available for maxCookieAge _maxCookieAge = (int) cookieMaxAge.toSeconds(); } }; SessionCache sessionCache = new DefaultSessionCache(sessionHandler); sessionHandler.setSessionCache(sessionCache); FileSessionDataStore dataStore = new FileSessionDataStore(); sessionCache.setSessionDataStore(dataStore); sessionHandler.setHttpOnly(true); sessionHandler.setSessionTrackingModes(ImmutableSet.of(SessionTrackingMode.COOKIE)); sessionHandler.setMaxInactiveInterval((int) maxInactiveInterval.toSeconds()); sessionHandler.setRefreshCookieAge((int) cookieRefreshAge.toSeconds()); dataStore.setDeleteUnrestorableFiles(true); dataStore.setStoreDir(new File(path)); dataStore.setSavePeriodSec((int) savePeriod.toSeconds()); return sessionHandler; } }
553
2,151
<filename>chrome/browser/metrics/desktop_session_duration/desktop_profile_session_durations_service.cc<gh_stars>1000+ // Copyright 2018 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "chrome/browser/metrics/desktop_session_duration/desktop_profile_session_durations_service.h" #include "base/metrics/histogram_macros.h" #include "components/signin/core/browser/profile_oauth2_token_service.h" #include "components/sync/driver/sync_service.h" #include "content/public/browser/browser_context.h" namespace metrics { DesktopProfileSessionDurationsService::DesktopProfileSessionDurationsService( browser_sync::ProfileSyncService* sync_service, OAuth2TokenService* oauth2_token_service, GaiaCookieManagerService* cookie_manager, DesktopSessionDurationTracker* tracker) : sync_service_(sync_service), oauth2_token_service_(oauth2_token_service), sync_observer_(this), oauth2_token_observer_(this), gaia_cookie_observer_(this), session_duration_observer_(this) { gaia_cookie_observer_.Add(cookie_manager); session_duration_observer_.Add(tracker); if (tracker->in_session()) { // The session was started before this service was created. Let's start // tracking now. OnSessionStarted(base::TimeTicks::Now()); } // sync_service can be null if sync is disabled by a command line flag. if (sync_service) { sync_observer_.Add(sync_service_); } oauth2_token_observer_.Add(oauth2_token_service_); // Since this is created after the profile itself is created, we need to // handle the initial state. HandleSyncAndAccountChange(); // Check if we already know the signed in cookies. This will trigger a fetch // if we don't have them yet. std::vector<gaia::ListedAccount> signed_in; std::vector<gaia::ListedAccount> signed_out; if (cookie_manager->ListAccounts(&signed_in, &signed_out, "durations_metrics")) { OnGaiaAccountsInCookieUpdated(signed_in, signed_out, GoogleServiceAuthError()); } DVLOG(1) << "Ready to track Session.TotalDuration metrics"; } DesktopProfileSessionDurationsService:: ~DesktopProfileSessionDurationsService() = default; void DesktopProfileSessionDurationsService::OnSessionStarted( base::TimeTicks session_start) { DVLOG(1) << "Session start"; total_session_timer_ = std::make_unique<base::ElapsedTimer>(); signin_session_timer_ = std::make_unique<base::ElapsedTimer>(); sync_account_session_timer_ = std::make_unique<base::ElapsedTimer>(); } namespace { base::TimeDelta SubtractInactiveTime(base::TimeDelta total_length, base::TimeDelta inactive_time) { // Substract any time the user was inactive from our session length. If this // ends up giving the session negative length, which can happen if the feature // state changed after the user became inactive, log the length as 0. base::TimeDelta session_length = total_length - inactive_time; if (session_length < base::TimeDelta()) { session_length = base::TimeDelta(); } return session_length; } } // namespace void DesktopProfileSessionDurationsService::OnSessionEnded( base::TimeDelta session_length) { DVLOG(1) << "Session end"; if (!total_session_timer_) { // If there was no active session, just ignore this call. return; } base::TimeDelta inactivity_at_session_end = total_session_timer_->Elapsed() - session_length; LogSigninDuration(SubtractInactiveTime(signin_session_timer_->Elapsed(), inactivity_at_session_end)); LogSyncAndAccountDuration(SubtractInactiveTime( sync_account_session_timer_->Elapsed(), inactivity_at_session_end)); total_session_timer_.reset(); signin_session_timer_.reset(); sync_account_session_timer_.reset(); } void DesktopProfileSessionDurationsService::OnGaiaAccountsInCookieUpdated( const std::vector<gaia::ListedAccount>& accounts, const std::vector<gaia::ListedAccount>& signed_out_accounts, const GoogleServiceAuthError& error) { DVLOG(1) << "Cookie state change. in: " << accounts.size() << " out: " << signed_out_accounts.size() << " err: " << error.ToString(); if (error.state() != GoogleServiceAuthError::NONE) { // Return early if there's an error. This should only happen if there's an // actual error getting the account list. If there are any auth errors with // the tokens, those accounts will be moved to signed_out_accounts instead. return; } if (accounts.empty()) { // No signed in account. if (signin_status_ == FeatureState::ON && signin_session_timer_) { LogSigninDuration(signin_session_timer_->Elapsed()); signin_session_timer_ = std::make_unique<base::ElapsedTimer>(); } signin_status_ = FeatureState::OFF; } else { // There is a signed in account. if (signin_status_ == FeatureState::OFF && signin_session_timer_) { LogSigninDuration(signin_session_timer_->Elapsed()); signin_session_timer_ = std::make_unique<base::ElapsedTimer>(); } signin_status_ = FeatureState::ON; } } void DesktopProfileSessionDurationsService::OnStateChanged( syncer::SyncService* sync) { DVLOG(1) << "Sync state change"; HandleSyncAndAccountChange(); } void DesktopProfileSessionDurationsService::OnRefreshTokensLoaded() { DVLOG(1) << "OnRefreshTokensLoaded"; HandleSyncAndAccountChange(); } void DesktopProfileSessionDurationsService::OnRefreshTokenAvailable( const std::string& account_id) { DVLOG(1) << "OnRefreshTokenAvailable"; HandleSyncAndAccountChange(); } void DesktopProfileSessionDurationsService::OnRefreshTokenRevoked( const std::string& account_id) { DVLOG(1) << "OnRefreshTokenRevoked"; HandleSyncAndAccountChange(); } bool DesktopProfileSessionDurationsService::ShouldLogUpdate( FeatureState new_sync_status, FeatureState new_account_status) { bool status_change = (new_sync_status != sync_status_ || new_account_status != account_status_); bool was_unknown = sync_status_ == FeatureState::UNKNOWN || account_status_ == FeatureState::UNKNOWN; return sync_account_session_timer_ && status_change && !was_unknown; } void DesktopProfileSessionDurationsService::UpdateSyncAndAccountStatus( FeatureState new_sync_status, FeatureState new_account_status) { if (ShouldLogUpdate(new_sync_status, new_account_status)) { LogSyncAndAccountDuration(sync_account_session_timer_->Elapsed()); sync_account_session_timer_ = std::make_unique<base::ElapsedTimer>(); } sync_status_ = new_sync_status; account_status_ = new_account_status; } void DesktopProfileSessionDurationsService::HandleSyncAndAccountChange() { // If sync is off, we can tell whether the user is signed in by just checking // if the token service has accounts, because the reconcilor will take care of // removing accounts in error state from that list. FeatureState non_sync_account_status = oauth2_token_service_->GetAccounts().empty() ? FeatureState::OFF : FeatureState::ON; if (sync_service_ && sync_service_->CanSyncStart()) { // Sync has potential to turn on, or get into account error state. if (sync_service_->GetAuthError().state() == GoogleServiceAuthError::INVALID_GAIA_CREDENTIALS) { // Sync is enabled, but we have an account issue. UpdateSyncAndAccountStatus(FeatureState::ON, FeatureState::OFF); } else if (sync_service_->IsSyncActive() && sync_service_->GetLastCycleSnapshot().is_initialized()) { // Sync is on and running, we must have an account too. UpdateSyncAndAccountStatus(FeatureState::ON, FeatureState::ON); } else { // We don't know yet if sync is going to work. // At least update the signin status, so that if we never learn // what the sync state is, we know the signin state. account_status_ = non_sync_account_status; } } else { // We know for sure that sync is off, so we just need to find out about the // account status. UpdateSyncAndAccountStatus(FeatureState::OFF, non_sync_account_status); } } void DesktopProfileSessionDurationsService::LogSigninDuration( base::TimeDelta session_length) { switch (signin_status_) { case FeatureState::ON: DVLOG(1) << "Logging Session.TotalDuration.WithAccount of " << session_length; UMA_HISTOGRAM_LONG_TIMES("Session.TotalDuration.WithAccount", session_length); break; case FeatureState::OFF: // Since the feature wasn't working for the user if we didn't know its // state, log the status as off. case FeatureState::UNKNOWN: DVLOG(1) << "Logging Session.TotalDuration.WithoutAccount of " << session_length; UMA_HISTOGRAM_LONG_TIMES("Session.TotalDuration.WithoutAccount", session_length); } } void DesktopProfileSessionDurationsService::LogSyncAndAccountDuration( base::TimeDelta session_length) { // TODO(feuunk): Distinguish between being NotOptedInToSync and // OptedInToSyncPaused. if (sync_status_ == FeatureState::ON) { if (account_status_ == FeatureState::ON) { DVLOG(1) << "Logging Session.TotalDuration.OptedInToSyncWithAccount of " << session_length; UMA_HISTOGRAM_LONG_TIMES("Session.TotalDuration.OptedInToSyncWithAccount", session_length); } else { DVLOG(1) << "Logging Session.TotalDuration.OptedInToSyncWithoutAccount of " << session_length; UMA_HISTOGRAM_LONG_TIMES( "Session.TotalDuration.OptedInToSyncWithoutAccount", session_length); } } else { if (account_status_ == FeatureState::ON) { DVLOG(1) << "Logging Session.TotalDuration.NotOptedInToSyncWithAccount of " << session_length; UMA_HISTOGRAM_LONG_TIMES( "Session.TotalDuration.NotOptedInToSyncWithAccount", session_length); } else { DVLOG(1) << "Logging Session.TotalDuration.NotOptedInToSyncWithoutAccount of " << session_length; UMA_HISTOGRAM_LONG_TIMES( "Session.TotalDuration.NotOptedInToSyncWithoutAccount", session_length); } } } void DesktopProfileSessionDurationsService::Shutdown() { session_duration_observer_.RemoveAll(); gaia_cookie_observer_.RemoveAll(); sync_observer_.RemoveAll(); oauth2_token_observer_.RemoveAll(); } } // namespace metrics
3,843
626
/* * Copyright 2018, OpenCensus Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.opencensus.contrib.resource.util; import static com.google.common.base.MoreObjects.firstNonNull; import java.io.BufferedReader; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.net.HttpURLConnection; import java.net.URL; import java.nio.charset.Charset; /** * Retrieves Google Cloud project-id and a limited set of instance attributes from Metadata server. * * @see <a href="https://cloud.google.com/compute/docs/storing-retrieving-metadata"> * https://cloud.google.com/compute/docs/storing-retrieving-metadata</a> */ final class GcpMetadataConfig { private static final String METADATA_URL = "http://metadata.google.internal/computeMetadata/v1/"; private GcpMetadataConfig() {} static boolean isRunningOnGcp() { return !getProjectId().isEmpty(); } static String getProjectId() { return getAttribute("project/project-id"); } static String getZone() { String zone = getAttribute("instance/zone"); if (zone.contains("/")) { return zone.substring(zone.lastIndexOf('/') + 1); } return zone; } static String getMachineType() { String machineType = getAttribute("instance/machine-type"); if (machineType.contains("/")) { return machineType.substring(machineType.lastIndexOf('/') + 1); } return machineType; } static String getInstanceId() { return getAttribute("instance/id"); } static String getClusterName() { return getAttribute("instance/attributes/cluster-name"); } static String getInstanceName() { return getAttribute("instance/hostname"); } static String getInstanceHostname() { return getAttribute("instance/name"); } private static String getAttribute(String attributeName) { try { URL url = new URL(METADATA_URL + attributeName); HttpURLConnection connection = (HttpURLConnection) url.openConnection(); connection.setRequestProperty("Metadata-Flavor", "Google"); InputStream input = connection.getInputStream(); if (connection.getResponseCode() == 200) { BufferedReader reader = null; try { reader = new BufferedReader(new InputStreamReader(input, Charset.forName("UTF-8"))); return firstNonNull(reader.readLine(), ""); } finally { if (reader != null) { reader.close(); } } } } catch (IOException ignore) { // ignore } return ""; } }
1,021
5,607
<gh_stars>1000+ package io.micronaut.docs.http.client.bind.type; public class Metadata { private final Double version; private final Long deploymentId; public Metadata(Double version, Long deploymentId) { this.version = version; this.deploymentId = deploymentId; } public Double getVersion() { return version; } public Long getDeploymentId() { return deploymentId; } }
162
389
<reponame>dmcreyno/gosu-lang package editor; import javax.swing.*; import javax.swing.tree.TreeCellRenderer; import java.awt.*; /** */ public abstract class AbstractTreeCellRenderer<T> extends JLabel implements TreeCellRenderer { private boolean _bSelected; private T _node; private JTree _tree; public AbstractTreeCellRenderer( JTree tree ) { _tree = tree; } public Component getTreeCellRendererComponent( JTree tree, Object value, boolean bSelected, boolean bExpanded, boolean bLeaf, int iRow, boolean bHasFocus ) { if( value != null ) { //noinspection unchecked _node = (T)value; _bSelected = bSelected; configure(); } return this; } protected T getNode() { return _node; } public void update() { _tree.repaint(); } public abstract void configure(); /** */ public void paint( Graphics g ) { // ((Graphics2D)g).setRenderingHint( RenderingHints.KEY_TEXT_ANTIALIASING, // RenderingHints.VALUE_TEXT_ANTIALIAS_ON ); // ((Graphics2D)g).setRenderingHint( RenderingHints.KEY_RENDERING, // RenderingHints.VALUE_RENDER_QUALITY ); Color bkColor; boolean bFocus = KeyboardFocusManager.getCurrentKeyboardFocusManager().getPermanentFocusOwner() == _tree; if( _bSelected ) { bkColor = _tree.isEnabled() && bFocus ? Scheme.active().getActiveCaption() : Scheme.active().getControl(); } else { bkColor = _tree.getBackground(); if( bkColor == null ) { bkColor = getBackground(); } } if( bkColor != null ) { g.setColor( bkColor ); g.fillRect( 0, 0, getWidth() - 1, getHeight() - 1 ); if( _bSelected ) { g.setColor( _tree.isEnabled() && bFocus ? Scheme.active().getXpBorderColor() : Scheme.active().getFieldBorderColor() ); g.drawRect( 0, 0, getWidth() - 1, getHeight() - 1 ); } g.setColor( bkColor ); } setForeground( Scheme.active().getWindowText() ); super.paint( g ); } public Dimension getPreferredSize() { Dimension dim = super.getPreferredSize(); if( dim != null ) { dim = new Dimension( dim.width + 3, dim.height ); } return dim; } }
1,087
435
{ "copyright_text": null, "description": "According to the always trustworthy Wikipedia, there are approximately\n360 million native English speakers in the world. We, as developers, are\nso used to write code and documentation in English that we may not\nrealize that this number only represents 4.67% of the world population.\nIt is very useful to have a common language for the communication\nbetween developers, but this doesn\u2019t mean that the user shouldn\u2019t feel a\nlittle bit more comfortable when using your product.\n\nTranslation of terms is only one step in the whole Internationalization\n(i18n) and Localization (l10n) process. It also entails number, date and\ntime formatting, currency conversion, sorting, legal requirements, among\nother issues. This talk will go through the definition of i18n and l10n\nas well as show the main tools available for developers to support\nmultiple languages and regional related preferences in their Python\nprogram. We will also see how one can enable local support for their\nwebsite in Django. Finally, this presentation will discuss how we can\nmanage Internationalization and Localization for a bigger product\nrunning in different platforms (front and back end) and how to\nincorporate i18n and l10n into our current development and deploy\nprocesses.\n\nOh, and by the way, \u201ceita!\u201d is a Brazilian interjection to show yourself\nsurprised with something. \ud83d\ude42\n", "duration": 2635, "language": "eng", "recorded": "2019-05-04T15:15:00", "related_urls": [ { "label": "Conference schedule", "url": "https://us.pycon.org/2019/schedule/talks/" }, { "label": "Conference slides (github)", "url": "https://github.com/PyCon/2019-slides" }, { "label": "Conference slides (speakerdeck)", "url": "https://speakerdeck.com/pycon2019" }, { "label": "Talk schedule", "url": "https://us.pycon.org/2019/schedule/presentation/192/" } ], "speakers": [ "<NAME>" ], "tags": [ "talk" ], "thumbnail_url": "https://i.ytimg.com/vi/2eF_MhgplQ8/maxresdefault.jpg", "title": "Eita! Why Internationalization and Localization matter", "videos": [ { "type": "youtube", "url": "https://www.youtube.com/watch?v=2eF_MhgplQ8" } ] }
756
919
<filename>src/memorymodel.cpp #include "memorymodel.h" #include <QBrush> #include <QFont> #include "fonts.h" #include "processorhandler.h" namespace Ripes { MemoryModel::MemoryModel(QObject* parent) : QAbstractTableModel(parent) {} int MemoryModel::columnCount(const QModelIndex&) const { return FIXED_COLUMNS_CNT + ProcessorHandler::currentISA()->bytes() /* byte columns */; } int MemoryModel::rowCount(const QModelIndex&) const { return m_rowsVisible; } void MemoryModel::processorWasClocked() { // Reload model beginResetModel(); endResetModel(); } AInt maxAddress() { return vsrtl::generateBitmask(ProcessorHandler::currentISA()->bits()); } void MemoryModel::setCentralAddress(AInt address) { address = address - (address % ProcessorHandler::currentISA()->bytes()); m_centralAddress = address; processorWasClocked(); } // Checks whether an overflow or underflow error occurred when calculating the new address, relative to the current // address bool validAddressChange(AInt currentAddress, AInt newAddress) { bool validAddress = true; const AIntS signed_center = static_cast<AIntS>(currentAddress); const AIntS signed_aligned = static_cast<AIntS>(newAddress); // Arithmetic underflow and overflow check (overflow, if ISA bytes == sizeof(AIntS)) validAddress &= (signed_center >= 0 && signed_aligned >= 0) || (signed_center < 0 && signed_aligned < 0); // Overflow check 2: if ISA bytes < sizeof(AIntS) validAddress &= newAddress < maxAddress(); return validAddress; } void MemoryModel::offsetCentralAddress(int rowOffset) { const int byteOffset = rowOffset * ProcessorHandler::currentISA()->bytes(); const AInt newCenterAddress = m_centralAddress + byteOffset; m_centralAddress = validAddressChange(m_centralAddress, newCenterAddress) ? newCenterAddress : m_centralAddress; processorWasClocked(); } QVariant MemoryModel::headerData(int section, Qt::Orientation orientation, int role) const { if (orientation == Qt::Horizontal && role == Qt::DisplayRole) { switch (section) { case Column::Address: return "Address"; case Column::WordValue: return "Word"; default: return "Byte " + QString::number(section - FIXED_COLUMNS_CNT); } } return QVariant(); } void MemoryModel::setRowsVisible(int rows) { m_rowsVisible = rows; processorWasClocked(); } QVariant MemoryModel::data(const QModelIndex& index, int role) const { if (!index.isValid()) { return QVariant(); } if (role == Qt::TextAlignmentRole) return Qt::AlignCenter; if (role == Qt::FontRole) { return QFont(Fonts::monospace, 11); } const auto bytes = ProcessorHandler::currentISA()->bytes(); // Calculate the word-aligned address corresponding to the row of the current index. // If the central address is at one of its two extrema, based on the address space of the processor, the aligned // address is invalid. /* AInt alignedAddress; bool validAddress = true; if (m_centralAddress == 0x0) { validAddress &= index.row() < m_rowsVisible / 2; } if (m_centralAddress >= maxAddress()) { validAddress &= index.row() > m_rowsVisible / 2; } if (validAddress) { alignedAddress = static_cast<AInt>(m_centralAddress) + ((((m_rowsVisible * bytes) / 2) / bytes) * bytes) - (index.row() * bytes); } */ const AInt alignedAddress = static_cast<AInt>(m_centralAddress) + ((((m_rowsVisible * bytes) / 2) / bytes) * bytes) - (index.row() * bytes); const bool validAddress = validAddressChange(m_centralAddress, alignedAddress); const unsigned byteOffset = index.column() - FIXED_COLUMNS_CNT; if (index.column() == Column::Address) { if (role == Qt::DisplayRole) { return addrData(alignedAddress, validAddress); } else if (role == Qt::ForegroundRole) { // Assign a brush if one of the byte-indexed address covered by the aligned address has been written to QVariant unusedAddressBrush; for (unsigned i = 0; i < ProcessorHandler::currentISA()->bytes(); ++i) { QVariant addressBrush = fgColorData(alignedAddress, i, validAddress); if (addressBrush.isNull()) { return addressBrush; } else { unusedAddressBrush = addressBrush; } } return unusedAddressBrush; } } else { switch (role) { case Qt::ForegroundRole: return fgColorData(alignedAddress, index.column() == Column::WordValue ? 0 : byteOffset, validAddress); case Qt::DisplayRole: if (index.column() == Column::WordValue) { return wordData(alignedAddress, validAddress); } else { return byteData(alignedAddress, byteOffset, validAddress); } default: break; } } return QVariant(); } void MemoryModel::setRadix(Radix r) { m_radix = r; processorWasClocked(); } QVariant MemoryModel::addrData(AInt address, bool validAddress) const { if (!validAddress) { return "-"; } return encodeRadixValue(address, Radix::Hex, ProcessorHandler::currentISA()->bytes()); } QVariant MemoryModel::fgColorData(AInt address, AInt byteOffset, bool validAddress) const { if (!validAddress || !ProcessorHandler::getMemory().contains(address + byteOffset)) { return QBrush(Qt::lightGray); } else { return QVariant(); // default } } QVariant MemoryModel::byteData(AInt address, AInt byteOffset, bool validAddress) const { if (!validAddress) { return "-"; } else if (!ProcessorHandler::getMemory().contains(address + byteOffset)) { // Dont read the memory (this will create an entry in the memory if done so). Instead, create a "fake" entry in // the memory model, containing X's. return "X"; } else { VInt value = ProcessorHandler::getMemory().readMemConst(address + byteOffset, 1); return encodeRadixValue(value & 0xFF, m_radix, 1); } } QVariant MemoryModel::wordData(AInt address, bool validAddress) const { if (!validAddress) { return "-"; } else if (!ProcessorHandler::getMemory().contains(address)) { // Dont read the memory (this will create an entry in the memory if done so). Instead, create a "fake" entry in // the memory model, containing X's. return "X"; } else { unsigned bytes = ProcessorHandler::currentISA()->bytes(); return encodeRadixValue(ProcessorHandler::getMemory().readMemConst(address, bytes), m_radix, bytes); } } Qt::ItemFlags MemoryModel::flags(const QModelIndex&) const { return Qt::ItemIsEnabled | Qt::ItemIsSelectable; } } // namespace Ripes
2,666
12,278
//---------------------------------------------------------------------------// // Copyright (c) 2013 <NAME> <<EMAIL>> // // Distributed under the Boost Software License, Version 1.0 // See accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt // // See http://boostorg.github.com/compute for more information. //---------------------------------------------------------------------------// #define BOOST_TEST_MODULE TestInplaceMerge #include <boost/test/unit_test.hpp> #include <boost/compute/system.hpp> #include <boost/compute/algorithm/inplace_merge.hpp> #include <boost/compute/container/vector.hpp> #include "check_macros.hpp" #include "context_setup.hpp" namespace compute = boost::compute; BOOST_AUTO_TEST_CASE(simple_merge_int) { int data[] = { 1, 3, 5, 7, 2, 4, 6, 8 }; compute::vector<int> vector(data, data + 8, queue); // merge each half in-place compute::inplace_merge( vector.begin(), vector.begin() + 4, vector.end(), queue ); CHECK_RANGE_EQUAL(int, 8, vector, (1, 2, 3, 4, 5, 6, 7, 8)); // run again on already sorted list compute::inplace_merge( vector.begin(), vector.begin() + 4, vector.end(), queue ); CHECK_RANGE_EQUAL(int, 8, vector, (1, 2, 3, 4, 5, 6, 7, 8)); } BOOST_AUTO_TEST_SUITE_END()
529
1,217
#include <stdlib.h> #include <stdio.h> #include <vector> #include <string> #include <sstream> #include <iostream> #include <stdexcept> #include <functional> #include <mutex> #include <Eigen/Geometry> #ifdef ENABLE_PARALLEL_HAUSDORFF_DISTANCE #include <omp.h> #endif #ifndef SIMPLE_HAUSDORFF_DISTANCE_HPP #define SIMPLE_HAUSDORFF_DISTANCE_HPP namespace simple_hausdorff_distance { class SimpleHausdorffDistance { private: SimpleHausdorffDistance() {} static inline size_t GetNumOMPThreads(void) { #ifdef ENABLE_PARALLEL_HAUSDORFF_DISTANCE #if defined(_OPENMP) size_t num_threads = 0; #pragma omp parallel { num_threads = (size_t)omp_get_num_threads(); } return num_threads; #else return 1; #endif #else return 1; #endif } public: template<typename FirstDatatype, typename SecondDatatype, typename FirstAllocator=std::allocator<FirstDatatype>, typename SecondAllocator=std::allocator<SecondDatatype>> static double ComputeDistance(const std::vector<FirstDatatype, FirstAllocator>& first_distribution, const std::vector<SecondDatatype, SecondAllocator>& second_distribution, const std::function<double(const FirstDatatype&, const SecondDatatype&)>& distance_fn) { // Compute the Hausdorff distance - the "maximum minimum" distance std::vector<double> per_thread_storage(GetNumOMPThreads(), 0.0); #ifdef ENABLE_PARALLEL_HAUSDORFF_DISTANCE #pragma omp parallel for #endif for (size_t idx = 0; idx < first_distribution.size(); idx++) { const FirstDatatype& first = first_distribution[idx]; double minimum_distance = INFINITY; for (size_t jdx = 0; jdx < second_distribution.size(); jdx++) { const SecondDatatype& second = second_distribution[jdx]; const double current_distance = distance_fn(first, second); if (current_distance < minimum_distance) { minimum_distance = current_distance; } } #ifdef ENABLE_PARALLEL_HAUSDORFF_DISTANCE #if defined(_OPENMP) const size_t current_thread_id = (size_t)omp_get_thread_num(); #else const size_t current_thread_id = 0; #endif #else const size_t current_thread_id = 0; #endif if (minimum_distance > per_thread_storage[current_thread_id]) { per_thread_storage[current_thread_id] = minimum_distance; } } double maximum_minimum_distance = 0.0; for (size_t idx = 0; idx < per_thread_storage.size(); idx++) { const double temp_minimum_distance = per_thread_storage[idx]; if (temp_minimum_distance > maximum_minimum_distance) { maximum_minimum_distance = temp_minimum_distance; } } return maximum_minimum_distance; } }; } #endif // SIMPLE_HAUSDORFF_DISTANCE_HPP
1,635
1,738
<gh_stars>1000+ /* * All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or * its licensors. * * For complete copyright and license terms please see the LICENSE at the root of this * distribution (the "License"). All use of this software is governed by the License, * or, if provided, by the license below or the license accompanying this file. Do not * remove or modify any license notices. This file is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * */ #pragma once #include <AzCore/EBus/EBus.h> #include <AzCore/std/string/string.h> namespace AzFramework { namespace StreamingInstall { //////////////////////////////////////////////////////////////////////////////////////// class StreamingInstallChunkNotifications : public AZ::EBusTraits { public: //////////////////////////////////////////////////////////////////////////////////////////// //! EBus Trait: StreaimingInstall chunk notifications can be handled by multiple listeners static const AZ::EBusHandlerPolicy HandlerPolicy = AZ::EBusHandlerPolicy::Multiple; //////////////////////////////////////////////////////////////////////////////////////////// //! EBus Trait: StreamingInstall chunk notifications are addressed to multiple addresses //! Events that are addressed to an ID are received by handlers that are connected to that ID typedef AZStd::string BusIdType; static const AZ::EBusAddressPolicy AddressPolicy = AZ::EBusAddressPolicy::ById; //////////////////////////////////////////////////////////////////////////////////////////// //! Override to be notified when a chunk has completed downloading. This is a responce from //! the RegisterChunkInstalledCallbacks event emitted from the platform implementation //! \param[in] the ID for the chunk that has downloaded virtual void OnChunkDownloadComplete(const AZStd::string& chunkId) = 0; //////////////////////////////////////////////////////////////////////////////////////////// //! Override to be notified when a chunk's download progress has changed. This is a responce //! from the StreamingInstallRequests::BroadcastChunkProgress event //! \param[in] the ID for the monitored chunk, the current download progress as a 0.0 to 1.0 value virtual void OnChunkProgressChanged(const AZStd::string& chunkId, float progress) = 0; //////////////////////////////////////////////////////////////////////////////////////////// //! Override to be notified if a chunk has been installed. This is a responce //! from the StreamingInstallRequests::IsChunkInstalled event //! \param[in] the ID of the chunk to query, installed flag for the chunk (true if installed and false otherwise). virtual void OnQueryChunkInstalled(const AZStd::string& chunkId, bool installed) = 0; }; using StreamingInstallChunkNotificationBus = AZ::EBus<StreamingInstallChunkNotifications>; //////////////////////////////////////////////////////////////////////////////////////// class StreamingInstallPackageNotifications : public AZ::EBusTraits { public: //////////////////////////////////////////////////////////////////////////////////////////// //! EBus Trait: StreaimingInstall package notifications can be handled by multiple listeners static const AZ::EBusHandlerPolicy HandlerPolicy = AZ::EBusHandlerPolicy::Multiple; //////////////////////////////////////////////////////////////////////////////////////////// //! EBus Trait: StreamingInstall package notifications are addressed to a single address static const AZ::EBusAddressPolicy AddressPolicy = AZ::EBusAddressPolicy::Single; //////////////////////////////////////////////////////////////////////////////////////////// //! Override to be notified the packages download progress has changed. This is a response //! from the StreamingInstallRequests::BroadcastOverallProgress event //! \param[in] the current download progress for the entire package as a 0.0 to 1.0 value virtual void OnPackageProgressChanged(float progress) = 0; }; using StreamingInstallPackageNotificationBus = AZ::EBus<StreamingInstallPackageNotifications>; } //namespace StreamingInstall }
1,355
446
/* ======================================== * StudioTan - StudioTan.h * Copyright (c) 2016 airwindows, All rights reserved * ======================================== */ #ifndef __StudioTan_H #include "StudioTan.h" #endif void StudioTan::processReplacing(float **inputs, float **outputs, VstInt32 sampleFrames) { float* in1 = inputs[0]; float* in2 = inputs[1]; float* out1 = outputs[0]; float* out2 = outputs[1]; bool highres = true; //for 24 bit: false for 16 bit bool brightfloor = true; //for Studio Tan: false for Dither Me Timbers bool benford = true; //for Not Just Another Dither: false for newer two bool cutbins = false; //for NJAD: only attenuate bins if one gets very full switch ((VstInt32)( A * 5.999 )) { case 0: benford = false; break; //Studio Tan 24 case 1: benford = false; brightfloor = false; break; //Dither Me Timbers 24 case 2: break; //Not Just Another Dither 24 case 3: benford = false; highres = false; break; //Studio Tan 16 case 4: benford = false; brightfloor = false; highres = false; break; //Dither Me Timbers 16 case 5: highres = false; break; //Not Just Another Dither 16 } while (--sampleFrames >= 0) { long double inputSampleL; long double outputSampleL; long double drySampleL; long double inputSampleR; long double outputSampleR; long double drySampleR; if (highres) { inputSampleL = *in1 * 8388608.0; inputSampleR = *in2 * 8388608.0; } else { inputSampleL = *in1 * 32768.0; inputSampleR = *in2 * 32768.0; } //shared input stage if (benford) { //begin Not Just Another Dither drySampleL = inputSampleL; drySampleR = inputSampleR; inputSampleL -= noiseShapingL; inputSampleR -= noiseShapingR; cutbins = false; long double benfordize; //we get to re-use this for each channel //begin left channel NJAD benfordize = floor(inputSampleL); while (benfordize >= 1.0) {benfordize /= 10;} if (benfordize < 1.0) {benfordize *= 10;} if (benfordize < 1.0) {benfordize *= 10;} if (benfordize < 1.0) {benfordize *= 10;} if (benfordize < 1.0) {benfordize *= 10;} if (benfordize < 1.0) {benfordize *= 10;} int hotbinA = floor(benfordize); //hotbin becomes the Benford bin value for this number floored long double totalA = 0; if ((hotbinA > 0) && (hotbinA < 10)) { bynL[hotbinA] += 1; if (bynL[hotbinA] > 982) cutbins = true; totalA += (301-bynL[1]); totalA += (176-bynL[2]); totalA += (125-bynL[3]); totalA += (97-bynL[4]); totalA += (79-bynL[5]); totalA += (67-bynL[6]); totalA += (58-bynL[7]); totalA += (51-bynL[8]); totalA += (46-bynL[9]); bynL[hotbinA] -= 1; } else {hotbinA = 10;} //produce total number- smaller is closer to Benford real benfordize = ceil(inputSampleL); while (benfordize >= 1.0) {benfordize /= 10;} if (benfordize < 1.0) {benfordize *= 10;} if (benfordize < 1.0) {benfordize *= 10;} if (benfordize < 1.0) {benfordize *= 10;} if (benfordize < 1.0) {benfordize *= 10;} if (benfordize < 1.0) {benfordize *= 10;} int hotbinB = floor(benfordize); //hotbin becomes the Benford bin value for this number ceiled long double totalB = 0; if ((hotbinB > 0) && (hotbinB < 10)) { bynL[hotbinB] += 1; if (bynL[hotbinB] > 982) cutbins = true; totalB += (301-bynL[1]); totalB += (176-bynL[2]); totalB += (125-bynL[3]); totalB += (97-bynL[4]); totalB += (79-bynL[5]); totalB += (67-bynL[6]); totalB += (58-bynL[7]); totalB += (51-bynL[8]); totalB += (46-bynL[9]); bynL[hotbinB] -= 1; } else {hotbinB = 10;} //produce total number- smaller is closer to Benford real if (totalA < totalB) { bynL[hotbinA] += 1; outputSampleL = floor(inputSampleL); } else { bynL[hotbinB] += 1; outputSampleL = floor(inputSampleL+1); } //assign the relevant one to the delay line //and floor/ceil signal accordingly if (cutbins) { bynL[1] *= 0.99; bynL[2] *= 0.99; bynL[3] *= 0.99; bynL[4] *= 0.99; bynL[5] *= 0.99; bynL[6] *= 0.99; bynL[7] *= 0.99; bynL[8] *= 0.99; bynL[9] *= 0.99; bynL[10] *= 0.99; //catchall for garbage data } noiseShapingL += outputSampleL - drySampleL; //end left channel NJAD //begin right channel NJAD cutbins = false; benfordize = floor(inputSampleR); while (benfordize >= 1.0) {benfordize /= 10;} if (benfordize < 1.0) {benfordize *= 10;} if (benfordize < 1.0) {benfordize *= 10;} if (benfordize < 1.0) {benfordize *= 10;} if (benfordize < 1.0) {benfordize *= 10;} if (benfordize < 1.0) {benfordize *= 10;} hotbinA = floor(benfordize); //hotbin becomes the Benford bin value for this number floored totalA = 0; if ((hotbinA > 0) && (hotbinA < 10)) { bynR[hotbinA] += 1; if (bynR[hotbinA] > 982) cutbins = true; totalA += (301-bynR[1]); totalA += (176-bynR[2]); totalA += (125-bynR[3]); totalA += (97-bynR[4]); totalA += (79-bynR[5]); totalA += (67-bynR[6]); totalA += (58-bynR[7]); totalA += (51-bynR[8]); totalA += (46-bynR[9]); bynR[hotbinA] -= 1; } else {hotbinA = 10;} //produce total number- smaller is closer to Benford real benfordize = ceil(inputSampleR); while (benfordize >= 1.0) {benfordize /= 10;} if (benfordize < 1.0) {benfordize *= 10;} if (benfordize < 1.0) {benfordize *= 10;} if (benfordize < 1.0) {benfordize *= 10;} if (benfordize < 1.0) {benfordize *= 10;} if (benfordize < 1.0) {benfordize *= 10;} hotbinB = floor(benfordize); //hotbin becomes the Benford bin value for this number ceiled totalB = 0; if ((hotbinB > 0) && (hotbinB < 10)) { bynR[hotbinB] += 1; if (bynR[hotbinB] > 982) cutbins = true; totalB += (301-bynR[1]); totalB += (176-bynR[2]); totalB += (125-bynR[3]); totalB += (97-bynR[4]); totalB += (79-bynR[5]); totalB += (67-bynR[6]); totalB += (58-bynR[7]); totalB += (51-bynR[8]); totalB += (46-bynR[9]); bynR[hotbinB] -= 1; } else {hotbinB = 10;} //produce total number- smaller is closer to Benford real if (totalA < totalB) { bynR[hotbinA] += 1; outputSampleR = floor(inputSampleR); } else { bynR[hotbinB] += 1; outputSampleR = floor(inputSampleR+1); } //assign the relevant one to the delay line //and floor/ceil signal accordingly if (cutbins) { bynR[1] *= 0.99; bynR[2] *= 0.99; bynR[3] *= 0.99; bynR[4] *= 0.99; bynR[5] *= 0.99; bynR[6] *= 0.99; bynR[7] *= 0.99; bynR[8] *= 0.99; bynR[9] *= 0.99; bynR[10] *= 0.99; //catchall for garbage data } noiseShapingR += outputSampleR - drySampleR; //end right channel NJAD //end Not Just Another Dither } else { //begin StudioTan or Dither Me Timbers if (brightfloor) { lastSampleL -= (noiseShapingL*0.8); lastSampleR -= (noiseShapingR*0.8); if ((lastSampleL+lastSampleL) <= (inputSampleL+lastSample2L)) outputSampleL = floor(lastSampleL); //StudioTan else outputSampleL = floor(lastSampleL+1.0); //round down or up based on whether it softens treble angles if ((lastSampleR+lastSampleR) <= (inputSampleR+lastSample2R)) outputSampleR = floor(lastSampleR); //StudioTan else outputSampleR = floor(lastSampleR+1.0); //round down or up based on whether it softens treble angles } else { lastSampleL -= (noiseShapingL*0.11); lastSampleR -= (noiseShapingR*0.11); if ((lastSampleL+lastSampleL) >= (inputSampleL+lastSample2L)) outputSampleL = floor(lastSampleL); //DitherMeTimbers else outputSampleL = floor(lastSampleL+1.0); //round down or up based on whether it softens treble angles if ((lastSampleR+lastSampleR) >= (inputSampleR+lastSample2R)) outputSampleR = floor(lastSampleR); //DitherMeTimbers else outputSampleR = floor(lastSampleR+1.0); //round down or up based on whether it softens treble angles } noiseShapingL += outputSampleL; noiseShapingL -= lastSampleL; //apply noise shaping lastSample2L = lastSampleL; lastSampleL = inputSampleL; //we retain three samples in a row noiseShapingR += outputSampleR; noiseShapingR -= lastSampleR; //apply noise shaping lastSample2R = lastSampleR; lastSampleR = inputSampleR; //we retain three samples in a row //end StudioTan or Dither Me Timbers } //shared output stage long double noiseSuppressL = fabs(inputSampleL); if (noiseShapingL > noiseSuppressL) noiseShapingL = noiseSuppressL; if (noiseShapingL < -noiseSuppressL) noiseShapingL = -noiseSuppressL; long double noiseSuppressR = fabs(inputSampleR); if (noiseShapingR > noiseSuppressR) noiseShapingR = noiseSuppressR; if (noiseShapingR < -noiseSuppressR) noiseShapingR = -noiseSuppressR; float ironBarL; float ironBarR; if (highres) { ironBarL = outputSampleL / 8388608.0; ironBarR = outputSampleR / 8388608.0; } else { ironBarL = outputSampleL / 32768.0; ironBarR = outputSampleR / 32768.0; } if (ironBarL > 1.0) ironBarL = 1.0; if (ironBarL < -1.0) ironBarL = -1.0; if (ironBarR > 1.0) ironBarR = 1.0; if (ironBarR < -1.0) ironBarR = -1.0; *out1 = ironBarL; *out2 = ironBarR; *in1++; *in2++; *out1++; *out2++; } } void StudioTan::processDoubleReplacing(double **inputs, double **outputs, VstInt32 sampleFrames) { double* in1 = inputs[0]; double* in2 = inputs[1]; double* out1 = outputs[0]; double* out2 = outputs[1]; bool highres = true; //for 24 bit: false for 16 bit bool brightfloor = true; //for Studio Tan: false for Dither Me Timbers bool benford = true; //for Not Just Another Dither: false for newer two bool cutbins = false; //for NJAD: only attenuate bins if one gets very full switch ((VstInt32)( A * 5.999 )) { case 0: benford = false; break; //Studio Tan 24 case 1: benford = false; brightfloor = false; break; //Dither Me Timbers 24 case 2: break; //Not Just Another Dither 24 case 3: benford = false; highres = false; break; //Studio Tan 16 case 4: benford = false; brightfloor = false; highres = false; break; //Dither Me Timbers 16 case 5: highres = false; break; //Not Just Another Dither 16 } while (--sampleFrames >= 0) { long double inputSampleL; long double outputSampleL; long double drySampleL; long double inputSampleR; long double outputSampleR; long double drySampleR; if (highres) { inputSampleL = *in1 * 8388608.0; inputSampleR = *in2 * 8388608.0; } else { inputSampleL = *in1 * 32768.0; inputSampleR = *in2 * 32768.0; } //shared input stage if (benford) { //begin Not Just Another Dither drySampleL = inputSampleL; drySampleR = inputSampleR; inputSampleL -= noiseShapingL; inputSampleR -= noiseShapingR; cutbins = false; long double benfordize; //we get to re-use this for each channel //begin left channel NJAD benfordize = floor(inputSampleL); while (benfordize >= 1.0) {benfordize /= 10;} if (benfordize < 1.0) {benfordize *= 10;} if (benfordize < 1.0) {benfordize *= 10;} if (benfordize < 1.0) {benfordize *= 10;} if (benfordize < 1.0) {benfordize *= 10;} if (benfordize < 1.0) {benfordize *= 10;} int hotbinA = floor(benfordize); //hotbin becomes the Benford bin value for this number floored long double totalA = 0; if ((hotbinA > 0) && (hotbinA < 10)) { bynL[hotbinA] += 1; if (bynL[hotbinA] > 982) cutbins = true; totalA += (301-bynL[1]); totalA += (176-bynL[2]); totalA += (125-bynL[3]); totalA += (97-bynL[4]); totalA += (79-bynL[5]); totalA += (67-bynL[6]); totalA += (58-bynL[7]); totalA += (51-bynL[8]); totalA += (46-bynL[9]); bynL[hotbinA] -= 1; } else {hotbinA = 10;} //produce total number- smaller is closer to Benford real benfordize = ceil(inputSampleL); while (benfordize >= 1.0) {benfordize /= 10;} if (benfordize < 1.0) {benfordize *= 10;} if (benfordize < 1.0) {benfordize *= 10;} if (benfordize < 1.0) {benfordize *= 10;} if (benfordize < 1.0) {benfordize *= 10;} if (benfordize < 1.0) {benfordize *= 10;} int hotbinB = floor(benfordize); //hotbin becomes the Benford bin value for this number ceiled long double totalB = 0; if ((hotbinB > 0) && (hotbinB < 10)) { bynL[hotbinB] += 1; if (bynL[hotbinB] > 982) cutbins = true; totalB += (301-bynL[1]); totalB += (176-bynL[2]); totalB += (125-bynL[3]); totalB += (97-bynL[4]); totalB += (79-bynL[5]); totalB += (67-bynL[6]); totalB += (58-bynL[7]); totalB += (51-bynL[8]); totalB += (46-bynL[9]); bynL[hotbinB] -= 1; } else {hotbinB = 10;} //produce total number- smaller is closer to Benford real if (totalA < totalB) { bynL[hotbinA] += 1; outputSampleL = floor(inputSampleL); } else { bynL[hotbinB] += 1; outputSampleL = floor(inputSampleL+1); } //assign the relevant one to the delay line //and floor/ceil signal accordingly if (cutbins) { bynL[1] *= 0.99; bynL[2] *= 0.99; bynL[3] *= 0.99; bynL[4] *= 0.99; bynL[5] *= 0.99; bynL[6] *= 0.99; bynL[7] *= 0.99; bynL[8] *= 0.99; bynL[9] *= 0.99; bynL[10] *= 0.99; //catchall for garbage data } noiseShapingL += outputSampleL - drySampleL; //end left channel NJAD //begin right channel NJAD cutbins = false; benfordize = floor(inputSampleR); while (benfordize >= 1.0) {benfordize /= 10;} if (benfordize < 1.0) {benfordize *= 10;} if (benfordize < 1.0) {benfordize *= 10;} if (benfordize < 1.0) {benfordize *= 10;} if (benfordize < 1.0) {benfordize *= 10;} if (benfordize < 1.0) {benfordize *= 10;} hotbinA = floor(benfordize); //hotbin becomes the Benford bin value for this number floored totalA = 0; if ((hotbinA > 0) && (hotbinA < 10)) { bynR[hotbinA] += 1; if (bynR[hotbinA] > 982) cutbins = true; totalA += (301-bynR[1]); totalA += (176-bynR[2]); totalA += (125-bynR[3]); totalA += (97-bynR[4]); totalA += (79-bynR[5]); totalA += (67-bynR[6]); totalA += (58-bynR[7]); totalA += (51-bynR[8]); totalA += (46-bynR[9]); bynR[hotbinA] -= 1; } else {hotbinA = 10;} //produce total number- smaller is closer to Benford real benfordize = ceil(inputSampleR); while (benfordize >= 1.0) {benfordize /= 10;} if (benfordize < 1.0) {benfordize *= 10;} if (benfordize < 1.0) {benfordize *= 10;} if (benfordize < 1.0) {benfordize *= 10;} if (benfordize < 1.0) {benfordize *= 10;} if (benfordize < 1.0) {benfordize *= 10;} hotbinB = floor(benfordize); //hotbin becomes the Benford bin value for this number ceiled totalB = 0; if ((hotbinB > 0) && (hotbinB < 10)) { bynR[hotbinB] += 1; if (bynR[hotbinB] > 982) cutbins = true; totalB += (301-bynR[1]); totalB += (176-bynR[2]); totalB += (125-bynR[3]); totalB += (97-bynR[4]); totalB += (79-bynR[5]); totalB += (67-bynR[6]); totalB += (58-bynR[7]); totalB += (51-bynR[8]); totalB += (46-bynR[9]); bynR[hotbinB] -= 1; } else {hotbinB = 10;} //produce total number- smaller is closer to Benford real if (totalA < totalB) { bynR[hotbinA] += 1; outputSampleR = floor(inputSampleR); } else { bynR[hotbinB] += 1; outputSampleR = floor(inputSampleR+1); } //assign the relevant one to the delay line //and floor/ceil signal accordingly if (cutbins) { bynR[1] *= 0.99; bynR[2] *= 0.99; bynR[3] *= 0.99; bynR[4] *= 0.99; bynR[5] *= 0.99; bynR[6] *= 0.99; bynR[7] *= 0.99; bynR[8] *= 0.99; bynR[9] *= 0.99; bynR[10] *= 0.99; //catchall for garbage data } noiseShapingR += outputSampleR - drySampleR; //end right channel NJAD //end Not Just Another Dither } else { //begin StudioTan or Dither Me Timbers if (brightfloor) { lastSampleL -= (noiseShapingL*0.8); lastSampleR -= (noiseShapingR*0.8); if ((lastSampleL+lastSampleL) <= (inputSampleL+lastSample2L)) outputSampleL = floor(lastSampleL); //StudioTan else outputSampleL = floor(lastSampleL+1.0); //round down or up based on whether it softens treble angles if ((lastSampleR+lastSampleR) <= (inputSampleR+lastSample2R)) outputSampleR = floor(lastSampleR); //StudioTan else outputSampleR = floor(lastSampleR+1.0); //round down or up based on whether it softens treble angles } else { lastSampleL -= (noiseShapingL*0.11); lastSampleR -= (noiseShapingR*0.11); if ((lastSampleL+lastSampleL) >= (inputSampleL+lastSample2L)) outputSampleL = floor(lastSampleL); //DitherMeTimbers else outputSampleL = floor(lastSampleL+1.0); //round down or up based on whether it softens treble angles if ((lastSampleR+lastSampleR) >= (inputSampleR+lastSample2R)) outputSampleR = floor(lastSampleR); //DitherMeTimbers else outputSampleR = floor(lastSampleR+1.0); //round down or up based on whether it softens treble angles } noiseShapingL += outputSampleL; noiseShapingL -= lastSampleL; //apply noise shaping lastSample2L = lastSampleL; lastSampleL = inputSampleL; //we retain three samples in a row noiseShapingR += outputSampleR; noiseShapingR -= lastSampleR; //apply noise shaping lastSample2R = lastSampleR; lastSampleR = inputSampleR; //we retain three samples in a row //end StudioTan or Dither Me Timbers } //shared output stage long double noiseSuppressL = fabs(inputSampleL); if (noiseShapingL > noiseSuppressL) noiseShapingL = noiseSuppressL; if (noiseShapingL < -noiseSuppressL) noiseShapingL = -noiseSuppressL; long double noiseSuppressR = fabs(inputSampleR); if (noiseShapingR > noiseSuppressR) noiseShapingR = noiseSuppressR; if (noiseShapingR < -noiseSuppressR) noiseShapingR = -noiseSuppressR; double ironBarL; double ironBarR; if (highres) { ironBarL = outputSampleL / 8388608.0; ironBarR = outputSampleR / 8388608.0; } else { ironBarL = outputSampleL / 32768.0; ironBarR = outputSampleR / 32768.0; } if (ironBarL > 1.0) ironBarL = 1.0; if (ironBarL < -1.0) ironBarL = -1.0; if (ironBarR > 1.0) ironBarR = 1.0; if (ironBarR < -1.0) ironBarR = -1.0; *out1 = ironBarL; *out2 = ironBarR; *in1++; *in2++; *out1++; *out2++; } }
8,248
357
/** * This file is part of Eclipse Steady. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * SPDX-License-Identifier: Apache-2.0 * SPDX-FileCopyrightText: Copyright (c) 2018-2020 SAP SE or an SAP affiliate company and Eclipse Steady contributors */ package org.eclipse.steady.java.sign.gson; import java.lang.reflect.Type; import java.util.Set; import org.eclipse.steady.ConstructId; import org.eclipse.steady.java.JavaClassId; import org.eclipse.steady.java.JavaClassInit; import org.eclipse.steady.java.JavaConstructorId; import org.eclipse.steady.java.JavaId; import org.eclipse.steady.java.JavaMethodId; import org.eclipse.steady.java.JavaPackageId; import org.eclipse.steady.java.sign.ASTConstructBodySignature; import org.eclipse.steady.java.sign.ASTSignatureChange; import com.google.gson.GsonBuilder; import com.google.gson.JsonArray; import com.google.gson.JsonDeserializationContext; import com.google.gson.JsonDeserializer; import com.google.gson.JsonElement; import com.google.gson.JsonObject; import com.google.gson.JsonParseException; import com.google.gson.JsonPrimitive; import com.google.gson.JsonSerializationContext; import com.google.gson.JsonSerializer; /** * <p>GsonHelper class.</p> */ public class GsonHelper { /** * Returns a Vulas-specific GsonBuilder, i.e., with several custom serializers and deserializers registered. * * * @return a {@link com.google.gson.GsonBuilder} object. */ public static GsonBuilder getCustomGsonBuilder() { final GsonBuilder gson = new GsonBuilder(); // Subclasses of ConstructId and JavaId gson.registerTypeAdapter(ConstructId.class, new ConstructIdSerializer()); gson.registerTypeAdapter(JavaPackageId.class, new ConstructIdSerializer()); gson.registerTypeAdapter(JavaClassId.class, new ConstructIdSerializer()); gson.registerTypeAdapter(JavaClassInit.class, new ConstructIdSerializer()); gson.registerTypeAdapter(JavaConstructorId.class, new ConstructIdSerializer()); gson.registerTypeAdapter(JavaMethodId.class, new ConstructIdSerializer()); // Signature-related classes gson.registerTypeAdapter(ASTSignatureChange.class, new ASTSignatureChangeDeserializer()); gson.registerTypeAdapter(ASTConstructBodySignature.class, new ASTSignatureDeserializer()); return gson; } static class ConstructIdSerializer implements JsonSerializer<ConstructId>, JsonDeserializer<ConstructId> { public JsonElement serialize( ConstructId src, Type typeOfSrc, JsonSerializationContext context) { final JsonObject c = new JsonObject(); c.addProperty("lang", src.getLanguage().toString()); c.addProperty("type", JavaId.typeToString(((JavaId) src).getType())); c.addProperty("qname", src.getQualifiedName()); final Set<String> annotations = ((JavaId) src).getAnnotations(); if (!annotations.isEmpty()) { final JsonArray anno = new JsonArray(); for (String a : annotations) { anno.add(new JsonPrimitive(a)); } c.add("a", anno); } return c; } public ConstructId deserialize( JsonElement json, Type typeOfT, JsonDeserializationContext context) throws JsonParseException { // To be returned ConstructId cid = null; final JsonObject c = (JsonObject) json; final String t = c.getAsJsonPrimitive("type").getAsString(); final String qn = c.getAsJsonPrimitive("qname").getAsString(); if (JavaId.typeToString(JavaId.Type.PACKAGE).equals(t)) cid = new JavaPackageId(qn); else if (JavaId.typeToString(JavaId.Type.CLASS).equals(t)) cid = JavaId.parseClassQName(qn); else if (JavaId.typeToString(JavaId.Type.CLASSINIT).equals(t)) cid = JavaId.parseClassInitQName(qn); else if (JavaId.typeToString(JavaId.Type.METHOD).equals(t)) cid = JavaId.parseMethodQName(qn); else if (JavaId.typeToString(JavaId.Type.CONSTRUCTOR).equals(t)) cid = JavaId.parseConstructorQName(qn); // TODO: Add annotations return cid; } } }
1,586
479
<gh_stars>100-1000 #pragma once /* * Error Handling * * All our API calls return integers to signal errors or success. Negative * error-codes are fatal errors that the caller should forward unchanged. * Positive error-codes are API errors that have documented behavior and must * be caught and handled by the caller. */ #include <c-stdaux.h> #include <stdlib.h> int error_slow_origin(int r, const char *function, const char *file, int line); int error_slow_trace(int r, const char *function, const char *file, int line); int error_slow_fold(int r, const char *function, const char *file, int line); /** * error_origin() - fast-path of error_slow_origin() * @r: error code * * This is the fast-path of error_slow_origin(). See its description for * details. * * Return: 0 or negative error code, depending on @r. */ #define error_origin(r) C_CC_MACRO1(ERROR_ORIGIN, (r)) #define ERROR_ORIGIN(r) (_c_likely_(!r) ? 0 : error_slow_origin(r, __func__, __FILE__, __LINE__)) /** * error_trace() - fast-path of error_slow_trace() * @r: error code * * This is the fast-path of error_slow_trace(). See its description for * details. * * Return: @r is returned. */ #define error_trace(r) C_CC_MACRO1(ERROR_TRACE, (r)) #define ERROR_TRACE(r) (_c_likely_(r >= 0) ? r : error_slow_trace(r, __func__, __FILE__, __LINE__)) /** * error_fold() - fast-path of error_slow_fold() * @r: error code * * This is the fast-path of error_slow_fold(). See its description for * details. * * Return: 0 or negative error code, depending on @r. */ #define error_fold(r) C_CC_MACRO1(ERROR_FOLD, (r)) #define ERROR_FOLD(r) (_c_likely_(!r) ? 0 : error_slow_fold(r, __func__, __FILE__, __LINE__))
610
3,897
<gh_stars>1000+ /*************************************************************************************************/ /*! * \file * * \brief Link layer controller ACAD definitions. * * Copyright (c) 2019 Packetcraft, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /*************************************************************************************************/ #ifndef LCTR_API_ADV_ACAD_H #define LCTR_API_ADV_ACAD_H #ifdef __cplusplus extern "C" { #endif /************************************************************************************************** Data Types **************************************************************************************************/ /*! \brief ACAD header */ typedef struct { uint8_t state; /*!< State of ACAD. */ uint8_t opcode; /*!< Opcode of ACAD. */ uint8_t len; /*!< Length of ACAD data field. */ } LctrAcadHdr_t; /*! \brief ACAD data field for channel map update */ typedef struct { LctrAcadHdr_t hdr; /*!< ACAD header. */ uint64_t chanMask; /*!< Channel mask for the update. */ uint16_t instant; /*!< Instant for the update. */ } LctrAcadChanMapUpd_t; /*! \brief ACAD data field for channel map update */ typedef struct { LctrAcadHdr_t hdr; /*!< ACAD header. */ /* Reference values. */ uint32_t bigAnchorPoint; /*!< BIG Anchor Point time. */ /* ACAD fields. */ uint16_t bigOffs; /*!< BIG offset. */ uint8_t bigOffsUnits; /*!< BIG offset units. */ uint16_t isoInter; /*!< ISO interval in units of 1.25ms. */ uint8_t numBis; /*!< Number of BISs. */ uint8_t nse; /*!< Number of subevents. */ uint8_t bn; /*!< Burst number. */ uint32_t subEvtInterUsec; /*!< Subevent interval in microseconds. */ uint8_t pto; /*!< Pre-transmission offset. */ uint32_t bisSpaceUsec; /*!< BIS spacing in microseconds. */ uint8_t irc; /*!< Immediate repetition count. */ uint16_t maxPdu; /*!< Maximum PDU size. */ uint32_t seedAccAddr; /*!< Seed access address. */ uint32_t sduInterUsec; /*!< SDU interval in microseconds. */ uint16_t maxSdu; /*!< Maximum SDU size. */ uint16_t baseCrcInit; /*!< Base CRC init. */ uint64_t chanMap; /*!< Channel map. */ uint8_t phy; /*!< PHY used by BIG. */ uint64_t bisPldCtr; /*!< BIS payload counter. */ uint8_t framing; /*!< BIG carries framed or unframed data. */ uint8_t encrypt; /*!< Encryption mode of the BISes in the BIG. */ uint8_t giv[LL_GIV_LEN]; /*!< GIV. */ uint8_t gskd[LL_GSKD_LEN]; /*!< GSKD. */ } LctrAcadBigInfo_t; #ifdef __cplusplus }; #endif #endif /* LCTR_API_ADV_ACAD_H */
1,521
348
{"nom":"Beaumont","circ":"2ème circonscription","dpt":"Gers","inscrits":113,"abs":51,"votants":62,"blancs":12,"nuls":5,"exp":45,"res":[{"nuance":"SOC","nom":"<NAME>","voix":26},{"nuance":"REM","nom":"<NAME>","voix":19}]}
89
333
<reponame>pjfanning/jackson-dataformats-text<filename>csv/src/test/java/com/fasterxml/jackson/dataformat/csv/deser/TestParserStrictQuoting.java package com.fasterxml.jackson.dataformat.csv.deser; import com.fasterxml.jackson.annotation.JsonPropertyOrder; import com.fasterxml.jackson.dataformat.csv.*; // Tests for [dataformat-csv#26] public class TestParserStrictQuoting extends ModuleTestBase { @JsonPropertyOrder({"a", "b"}) protected static class AB { public String a, b; public AB() { } public AB(String a, String b) { this.a = a; this.b = b; } } /* /********************************************************************** /* Test methods /********************************************************************** */ public void testStrictQuoting() throws Exception { final String NUMS = "12345 6789"; final String LONG = NUMS + NUMS + NUMS + NUMS; // 40 chars should do it CsvMapper mapper = mapperForCsv(); assertFalse(mapper.getFactory().isEnabled(CsvGenerator.Feature.STRICT_CHECK_FOR_QUOTING)); CsvSchema schema = mapper.schemaFor(AB.class).withoutHeader(); final AB input = new AB("x", LONG); // with non-strict, should quote String csv = mapper.writer(schema).writeValueAsString(input); assertEquals(aposToQuotes("x,'"+LONG+"'"), csv.trim()); // should be possible to hot-swap // and with strict/optimal, no quoting mapper.configure(CsvGenerator.Feature.STRICT_CHECK_FOR_QUOTING, true); csv = mapper.writer(schema).writeValueAsString(input); assertEquals(aposToQuotes("x,"+LONG), csv.trim()); } }
699
988
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.netbeans.modules.db.dataview.util; import java.sql.Time; import java.text.DateFormat; import java.text.ParseException; import java.text.SimpleDateFormat; import java.util.Date; import java.util.TimeZone; import java.util.logging.Level; import java.util.logging.Logger; import org.netbeans.modules.db.dataview.meta.DBException; import org.openide.util.NbBundle; /** * Implements a date type which can generate instances of java.sql.Date and other JDBC * date-related types. * * @author <NAME> */ public class TimeType { public static final String DEFAULT_FOMAT_PATTERN = "HH:mm:ss"; // NOI18N private static final DateFormat[] TIME_PARSING_FORMATS = new DateFormat[]{ new SimpleDateFormat (DEFAULT_FOMAT_PATTERN), DateFormat.getTimeInstance(), DateFormat.getTimeInstance(DateFormat.SHORT), new SimpleDateFormat("HH:mm"), // NOI18N }; { for (int i = 0; i < TIME_PARSING_FORMATS.length; i++) { TIME_PARSING_FORMATS[i].setLenient(false); } } public static final TimeZone TIME_ZONE = TimeZone.getDefault(); /* Increment to use in computing a successor value. */ // One day = 1 day x 24 hr/day x 60 min/hr x 60 sec/min x 1000 ms/sec private static final long INCREMENT_DAY = 1 * 24 * 60 * 60 * 1000; public static long normalizeTime(long rawTimeMillis) { int dstOffset = (TIME_ZONE.inDaylightTime(new java.util.Date(rawTimeMillis))) ? TIME_ZONE.getDSTSavings() : 0; return (rawTimeMillis < INCREMENT_DAY) ? rawTimeMillis : (rawTimeMillis % INCREMENT_DAY) + dstOffset; } private static Time getNormalizedTime(long time) { Time ret = null; ret = new Time(normalizeTime(time)); return ret; } public static Time convert(Object value) throws DBException { if (null == value) { return null; } else if (value instanceof java.sql.Time) { return (Time) value; } else if (value instanceof String) { Date dVal = doParse ((String) value); if (dVal == null) { throw new DBException(NbBundle.getMessage(TimeType.class,"LBL_invalid_time")); } return getNormalizedTime(dVal.getTime()); } else { throw new DBException(NbBundle.getMessage(TimeType.class,"LBL_invalid_time")); } } private static synchronized Date doParse (String sVal) { Date dVal = null; for (DateFormat format : TIME_PARSING_FORMATS) { try { dVal = format.parse (sVal); break; } catch (ParseException ex) { Logger.getLogger (TimeType.class.getName ()).log (Level.FINEST, ex.getLocalizedMessage () , ex); } } return dVal; } }
1,386
1,338
/* * Copyright 2006, Haiku. All rights reserved. * Distributed under the terms of the MIT License. * * Authors: * <NAME> <<EMAIL>> */ #include "RDefExporter.h" #include <stdio.h> #include <string.h> #include <DataIO.h> // constructor RDefExporter::RDefExporter() : FlatIconExporter() { } // destructor RDefExporter::~RDefExporter() { } // Export status_t RDefExporter::Export(const Icon* icon, BPositionIO* stream) { BMallocIO buffer; status_t ret = FlatIconExporter::Export(icon, &buffer); if (ret < B_OK) return ret; return _Export((const uint8*)buffer.Buffer(), buffer.BufferLength(), stream); } // MIMEType const char* RDefExporter::MIMEType() { return "text/x-vnd.Be.ResourceDef"; } // #pragma mark - // _Export status_t RDefExporter::_Export(const uint8* source, size_t sourceSize, BPositionIO* stream) const { char buffer[2048]; // write header sprintf(buffer, "\nresource(<your resource id here>) #'VICN' array {\n"); size_t size = strlen(buffer); ssize_t written = stream->Write(buffer, size); if (written < 0) return (status_t)written; if (written < (ssize_t)size) return B_ERROR; status_t ret = B_OK; const uint8* b = source; // print one line (32 values) while (sourceSize >= 32) { sprintf(buffer, " $\"%.2X%.2X%.2X%.2X" "%.2X%.2X%.2X%.2X" "%.2X%.2X%.2X%.2X" "%.2X%.2X%.2X%.2X" "%.2X%.2X%.2X%.2X" "%.2X%.2X%.2X%.2X" "%.2X%.2X%.2X%.2X" "%.2X%.2X%.2X%.2X\"\n", b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7], b[8], b[9], b[10], b[11], b[12], b[13], b[14], b[15], b[16], b[17], b[18], b[19], b[20], b[21], b[22], b[23], b[24], b[25], b[26], b[27], b[28], b[29], b[30], b[31]); size = strlen(buffer); written = stream->Write(buffer, size); if (written != (ssize_t)size) { if (written >= 0) ret = B_ERROR; else ret = (status_t)written; break; } sourceSize -= 32; b += 32; } // beginning of last line if (ret >= B_OK && sourceSize > 0) { sprintf(buffer, " $\""); size = strlen(buffer); written = stream->Write(buffer, size); if (written != (ssize_t)size) { if (written >= 0) ret = B_ERROR; else ret = (status_t)written; } } // last line (up to 32 values) bool endQuotes = sourceSize > 0; if (ret >= B_OK && sourceSize > 0) { for (size_t i = 0; i < sourceSize; i++) { sprintf(buffer, "%.2X", b[i]); size = strlen(buffer); written = stream->Write(buffer, size); if (written != (ssize_t)size) { if (written >= 0) ret = B_ERROR; else ret = (status_t)written; break; } } } if (ret >= B_OK) { // finish sprintf(buffer, endQuotes ? "\"\n};\n" : "};\n"); size = strlen(buffer); written = stream->Write(buffer, size); if (written != (ssize_t)size) { if (written >= 0) ret = B_ERROR; else ret = (status_t)written; } } return ret; }
1,370
491
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software distributed under the License // is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express // or implied. See the License for the specific language governing permissions and limitations under // the License. // // ╔════════════════════════════════════════════════════════════════════════════════════════╗ // ║──█████████╗───███████╗───████████╗───██╗──────██╗───███████╗───████████╗───████████╗───║ // ║──██╔══════╝──██╔════██╗──██╔════██╗──██║──────██║──██╔════██╗──██╔════██╗──██╔════██╗──║ // ║──████████╗───██║────██║──████████╔╝──██║──█╗──██║──█████████║──████████╔╝──██║────██║──║ // ║──██╔═════╝───██║────██║──██╔════██╗──██║█████╗██║──██╔════██║──██╔════██╗──██║────██║──║ // ║──██║─────────╚███████╔╝──██║────██║──╚████╔████╔╝──██║────██║──██║────██║──████████╔╝──║ // ║──╚═╝──────────╚══════╝───╚═╝────╚═╝───╚═══╝╚═══╝───╚═╝────╚═╝──╚═╝────╚═╝──╚═══════╝───║ // ╚════════════════════════════════════════════════════════════════════════════════════════╝ // // Authors: <NAME> (<EMAIL>) // Yzx (<EMAIL>) // <NAME> (<EMAIL>) #pragma once #include <string> #include "unit_test/unit_test_onnx_helper.h" TEST(TestOnnxModels, DynamicMin) { const std::string model_path = std::string(models_dir) + "onnx_models/"; const std::string torch_file = model_path + "resnet50_dynamic.pth"; const std::string onnx_file = model_path + "resnet50_dynamic.onnx"; c10::IValue input = torch::randn({1, 3, 224, 224}, device); std::vector<c10::IValue> inputs{input}; TestOnnxInferenceDynamic(torch_file, onnx_file, inputs, "float32"); // TestOnnxInferenceDynamic(torch_file, onnx_file, inputs, "float16", 5e-2); } TEST(TestOnnxModels, DynamicOpt) { const std::string model_path = std::string(models_dir) + "onnx_models/"; const std::string torch_file = model_path + "resnet50_dynamic.pth"; const std::string onnx_file = model_path + "resnet50_dynamic.onnx"; c10::IValue input = torch::randn({16, 3, 224, 224}, device); std::vector<c10::IValue> inputs{input}; TestOnnxInferenceDynamic(torch_file, onnx_file, inputs, "float32"); // TestOnnxInferenceDynamic(torch_file, onnx_file, inputs, "float16", 5e-2); } TEST(TestOnnxModels, DynamicMax) { const std::string model_path = std::string(models_dir) + "onnx_models/"; const std::string torch_file = model_path + "resnet50_dynamic.pth"; const std::string onnx_file = model_path + "resnet50_dynamic.onnx"; c10::IValue input = torch::randn({32, 3, 224, 224}, device); std::vector<c10::IValue> inputs{input}; TestOnnxInferenceDynamic(torch_file, onnx_file, inputs, "float32"); // TestOnnxInferenceDynamic(torch_file, onnx_file, inputs, "float16", 5e-2); } TEST(TestOnnxModels, DynamicRandnA) { const std::string model_path = std::string(models_dir) + "onnx_models/"; const std::string torch_file = model_path + "resnet50_dynamic.pth"; const std::string onnx_file = model_path + "resnet50_dynamic.onnx"; c10::IValue input = torch::randn({10, 3, 224, 224}, device); std::vector<c10::IValue> inputs{input}; TestOnnxInferenceDynamic(torch_file, onnx_file, inputs, "float32"); // TestOnnxInferenceDynamic(torch_file, onnx_file, inputs, "float16", 5e-2); } TEST(TestOnnxModels, DynamicRandnB) { const std::string model_path = std::string(models_dir) + "onnx_models/"; const std::string torch_file = model_path + "resnet50_dynamic.pth"; const std::string onnx_file = model_path + "resnet50_dynamic.onnx"; c10::IValue input = torch::randn({20, 3, 224, 224}, device); std::vector<c10::IValue> inputs{input}; TestOnnxInferenceDynamic(torch_file, onnx_file, inputs, "float32"); // TestOnnxInferenceDynamic(torch_file, onnx_file, inputs, "float16", 5e-2); } TEST(TestOnnxModels, DynamicRandnC) { const std::string model_path = std::string(models_dir) + "onnx_models/"; const std::string torch_file = model_path + "resnet50_dynamic.pth"; const std::string onnx_file = model_path + "resnet50_dynamic.onnx"; c10::IValue input = torch::randn({30, 3, 224, 224}, device); std::vector<c10::IValue> inputs{input}; TestOnnxInferenceDynamic(torch_file, onnx_file, inputs, "float32"); // TestOnnxInferenceDynamic(torch_file, onnx_file, inputs, "float16", 5e-2); }
1,954
2,728
# -------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- from typing import cast from xml.etree.ElementTree import ElementTree import urllib.parse as urlparse from ...management import _constants as constants from ...management._api_version import DEFAULT_VERSION from ...management._handle_response_error import _handle_response_error # This module defines functions get_next_template and extract_data_template. # Application code uses functools.partial to substantialize their params and builds an # azure.core.async_paging.AsyncItemPaged instance with the two substantialized functions. # The following is an ATOM feed XML list of QueueDescription with page size = 2. # Tag <feed> has 2 (the page size) children <entry> tags. # Tag <link rel="next" .../> tells the link to the next page. # The whole XML will be deserialized into an XML ElementTree. # Then model class QueueDescriptionFeed deserializes the ElementTree into a QueueDescriptionFeed instance. # (QueueDescriptionFeed is defined in file ../../management/_generated/models/_models.py and _models_py3.py) # Function get_next_template gets the next page of XML data like this one and returns the ElementTree. # Function extract_data_template deserialize data from the ElementTree and provide link to the next page. # azure.core.async_paging.AsyncItemPaged orchestrates the data flow between them. # <feed xmlns="http://www.w3.org/2005/Atom"> # <title type="text">Queues</title> # <id>https://servicebusname.servicebus.windows.net/$Resources/queues?$skip=0&amp;$top=2&amp;api-version=2017-04</id> # <updated>2020-06-30T23:49:41Z</updated> # <link rel="self" href="https://servicebusname.servicebus.windows.net/$Resources/queues? # $skip=0&amp;$top=2&amp;api-version=2017-04"/> # <link rel="next" href="https://servicebusname.servicebus.windows.net/$Resources/queues? # %24skip=2&amp;%24top=2&amp;api-version=2017-04"/> # # <entry xml:base="https://servicebusname.servicebus.windows.net/$Resources/queues? # $skip=0&amp;$top=2&amp;api-version=2017-04"> # <id>https://servicebusname.servicebus.windows.net/5?api-version=2017-04</id> # <title type="text">5</title> # <published>2020-06-05T00:24:34Z</published> # <updated>2020-06-25T05:57:29Z</updated> # <author> # <name>servicebusname</name> # </author> # <link rel="self" href="../5?api-version=2017-04"/> # <content type="application/xml"> # <QueueDescription xmlns="http://schemas.microsoft.com/netservices/2010/10/servicebus/connect" # xmlns:i="http://www.w3.org/2001/XMLSchema-instance"> # ... # </QueueDescription> # </content> # </entry> # <entry xml:base="https://servicebusname.servicebus.windows.net/$Resources/queues? # $skip=0&amp;$top=2&amp;api-version=2017-04"> # <id>https://servicebusname.servicebus.windows.net/6?api-version=2017-04</id> # <title type="text">6</title> # <published>2020-06-15T19:49:35Z</published> # <updated>2020-06-15T19:49:35Z</updated> # <author> # <name>servicebusname</name> # </author> # <link rel="self" href="../6?api-version=2017-04"/> # <content type="application/xml"> # <QueueDescription xmlns="http://schemas.microsoft.com/netservices/2010/10/servicebus/connect" # xmlns:i="http://www.w3.org/2001/XMLSchema-instance"> # ... # </QueueDescription> # </content> # </entry> # </feed> async def extract_data_template(feed_class, convert, feed_element): """A function that will be partialized to build a function used by AsyncItemPaged. It deserializes the ElementTree returned from function `get_next_template`, returns data in an iterator and the link to next page. azure.core.async_paging.AsyncItemPaged will use the returned next page to call a partial function created from `get_next_template` to fetch data of next page. """ deserialized = feed_class.deserialize(feed_element) list_of_qd = [convert(x) if convert else x for x in deserialized.entry] next_link = None # when the response xml has two <link> tags, the 2nd if the next-page link. if deserialized.link and len(deserialized.link) == 2: next_link = deserialized.link[1].href return next_link, iter( list_of_qd ) # when next_page is None, AsyncPagedItem will stop fetch next page data. async def extract_rule_data_template(feed_class, convert, feed_element): """Special version of function extrat_data_template for Rule. Pass both the XML entry element and the rule instance to function `convert`. Rule needs to extract KeyValue from XML Element and set to Rule model instance manually. The autorest/msrest serialization/deserialization doesn't work for this special part. After autorest is enhanced, this method can be removed. Refer to autorest issue https://github.com/Azure/autorest/issues/3535 """ deserialized = feed_class.deserialize(feed_element) next_link = None if deserialized.link and len(deserialized.link) == 2: next_link = deserialized.link[1].href if deserialized.entry: list_of_entities = [ convert(*x) if convert else x for x in zip( feed_element.findall(constants.ATOM_ENTRY_TAG), deserialized.entry ) ] else: list_of_entities = [] return next_link, iter(list_of_entities) async def get_next_template( list_func, *args, start_index=0, max_page_size=100, **kwargs ): """Call list_func to get the XML data and deserialize it to XML ElementTree. azure.core.async_paging.AsyncItemPaged will call `extract_data_template` and use the returned XML ElementTree to call a partial function created from `extrat_data_template`. """ api_version = kwargs.pop("api_version", DEFAULT_VERSION) if args[0]: # It's next link. It's None for the first page. queries = urlparse.parse_qs(urlparse.urlparse(args[0]).query) start_index = int(queries[constants.LIST_OP_SKIP][0]) max_page_size = int(queries[constants.LIST_OP_TOP][0]) api_version = queries[constants.API_VERSION_PARAM_NAME][0] with _handle_response_error(): feed_element = cast( ElementTree, await list_func( skip=start_index, top=max_page_size, api_version=api_version, **kwargs ), ) return feed_element
2,352
1,177
import unittest from pascals_triangle import rows # Tests adapted from `problem-specifications//canonical-data.json` @ v1.2.0 TRIANGLE = [ [1], [1, 1], [1, 2, 1], [1, 3, 3, 1], [1, 4, 6, 4, 1], [1, 5, 10, 10, 5, 1], [1, 6, 15, 20, 15, 6, 1], [1, 7, 21, 35, 35, 21, 7, 1], [1, 8, 28, 56, 70, 56, 28, 8, 1], [1, 9, 36, 84, 126, 126, 84, 36, 9, 1] ] class PascalsTriangleTest(unittest.TestCase): def test_zero_rows(self): self.assertEqual(rows(0), []) def test_single_row(self): self.assertEqual(rows(1), TRIANGLE[:1]) def test_two_rows(self): self.assertEqual(rows(2), TRIANGLE[:2]) def test_three_rows(self): self.assertEqual(rows(3), TRIANGLE[:3]) def test_four_rows(self): self.assertEqual(rows(4), TRIANGLE[:4]) def test_five_rows(self): self.assertEqual(rows(5), TRIANGLE[:5]) def test_six_rows(self): self.assertEqual(rows(6), TRIANGLE[:6]) def test_ten_rows(self): self.assertEqual(rows(10), TRIANGLE[:10]) def test_negative_rows(self): self.assertEqual(rows(-1), None) if __name__ == '__main__': unittest.main()
583
1,822
""" Test color support """ _stash = globals()["_stash"] def get_all_bg_colors(): """ Return a list of all known bg colors """ return _stash.renderer.BG_COLORS.keys() def get_all_fg_colors(): """ Return a list of all known fg colors """ return _stash.renderer.FG_COLORS.keys() def main(): """ The main function """ print("============ COLOR TEST ===================") bg_colors = get_all_bg_colors() fg_colors = get_all_fg_colors() print("------------ available colors -------------") print("Known FG colors: " + ", ".join(fg_colors)) print("Known BG colors: " + ", ".join(bg_colors)) print("------- showing all combinations ----------") for fg in _stash.renderer.FG_COLORS: for bg in _stash.renderer.BG_COLORS: for bold in (False, True): for italics in (False, True): for underscore in (False, True): for strikethrough in (False, True): for reverse in (False, True): traits = [] if bold: traits.append("bold") if italics: traits.append("italic") if underscore: traits.append("underline") if strikethrough: traits.append("strikethrough") desc = "{}-{}{}{}".format(fg, bg, ("-" if len(traits) > 0 else ""), "-".join(traits)) s = _stash.text_style( desc, dict( color=fg, bgcolor=bg, traits=traits, ) ) print(s) print("================= Done =====================") if __name__ == "__main__": main()
1,276
1,988
<reponame>clayne/botan /* * KDF2 * (C) 1999-2007 <NAME> * * Botan is released under the Simplified BSD License (see license.txt) */ #include <botan/internal/kdf2.h> #include <botan/exceptn.h> namespace Botan { std::string KDF2::name() const { return "KDF2(" + m_hash->name() + ")"; } std::unique_ptr<KDF> KDF2::new_object() const { return std::make_unique<KDF2>(m_hash->new_object()); } void KDF2::kdf(uint8_t key[], size_t key_len, const uint8_t secret[], size_t secret_len, const uint8_t salt[], size_t salt_len, const uint8_t label[], size_t label_len) const { if(key_len == 0) return; const size_t blocks_required = key_len / m_hash->output_length(); if(blocks_required >= 0xFFFFFFFE) throw Invalid_Argument("KDF2 maximum output length exceeeded"); uint32_t counter = 1; secure_vector<uint8_t> h; size_t offset = 0; while(offset != key_len) { m_hash->update(secret, secret_len); m_hash->update_be(counter); m_hash->update(label, label_len); m_hash->update(salt, salt_len); m_hash->final(h); const size_t added = std::min(h.size(), key_len - offset); copy_mem(&key[offset], h.data(), added); offset += added; counter += 1; BOTAN_ASSERT_NOMSG(counter != 0); // no overflow } } }
617
524
# -*- coding:utf-8 -*- __author__ = 'Randolph' import os import sys import time import logging import numpy as np sys.path.append('../') logging.getLogger('tensorflow').disabled = True import tensorflow as tf from utils import checkmate as cm from utils import data_helpers as dh from utils import param_parser as parser from sklearn.metrics import precision_score, recall_score, f1_score, roc_auc_score, average_precision_score args = parser.parameter_parser() MODEL = dh.get_model_name() logger = dh.logger_fn("tflog", "logs/Test-{0}.log".format(time.asctime())) CPT_DIR = 'runs/' + MODEL + '/checkpoints/' BEST_CPT_DIR = 'runs/' + MODEL + '/bestcheckpoints/' SAVE_DIR = 'output/' + MODEL def create_input_data(data: dict): return zip(data['pad_seqs'], data['onehot_labels'], data['labels']) def test_sann(): """Test SANN model.""" # Print parameters used for the model dh.tab_printer(args, logger) # Load word2vec model word2idx, embedding_matrix = dh.load_word2vec_matrix(args.word2vec_file) # Load data logger.info("Loading data...") logger.info("Data processing...") test_data = dh.load_data_and_labels(args, args.test_file, word2idx) # Load sann model OPTION = dh._option(pattern=1) if OPTION == 'B': logger.info("Loading best model...") checkpoint_file = cm.get_best_checkpoint(BEST_CPT_DIR, select_maximum_value=True) else: logger.info("Loading latest model...") checkpoint_file = tf.train.latest_checkpoint(CPT_DIR) logger.info(checkpoint_file) graph = tf.Graph() with graph.as_default(): session_conf = tf.ConfigProto( allow_soft_placement=args.allow_soft_placement, log_device_placement=args.log_device_placement) session_conf.gpu_options.allow_growth = args.gpu_options_allow_growth sess = tf.Session(config=session_conf) with sess.as_default(): # Load the saved meta graph and restore variables saver = tf.train.import_meta_graph("{0}.meta".format(checkpoint_file)) saver.restore(sess, checkpoint_file) # Get the placeholders from the graph by name input_x = graph.get_operation_by_name("input_x").outputs[0] input_y = graph.get_operation_by_name("input_y").outputs[0] dropout_keep_prob = graph.get_operation_by_name("dropout_keep_prob").outputs[0] is_training = graph.get_operation_by_name("is_training").outputs[0] # Tensors we want to evaluate scores = graph.get_operation_by_name("output/scores").outputs[0] loss = graph.get_operation_by_name("loss/loss").outputs[0] # Split the output nodes name by '|' if you have several output nodes output_node_names = "output/scores" # Save the .pb model file output_graph_def = tf.graph_util.convert_variables_to_constants(sess, sess.graph_def, output_node_names.split("|")) tf.train.write_graph(output_graph_def, "graph", "graph-sann-{0}.pb".format(MODEL), as_text=False) # Generate batches for one epoch batches = dh.batch_iter(list(create_input_data(test_data)), args.batch_size, 1, shuffle=False) # Collect the predictions here test_counter, test_loss = 0, 0.0 test_pre_tk = [0.0] * args.topK test_rec_tk = [0.0] * args.topK test_F1_tk = [0.0] * args.topK # Collect the predictions here true_labels = [] predicted_labels = [] predicted_scores = [] # Collect for calculating metrics true_onehot_labels = [] predicted_onehot_scores = [] predicted_onehot_labels_ts = [] predicted_onehot_labels_tk = [[] for _ in range(args.topK)] for batch_test in batches: x, y_onehot, y = zip(*batch_test) feed_dict = { input_x: x, input_y: y_onehot, dropout_keep_prob: 1.0, is_training: False } batch_scores, cur_loss = sess.run([scores, loss], feed_dict) # Prepare for calculating metrics for i in y_onehot: true_onehot_labels.append(i) for j in batch_scores: predicted_onehot_scores.append(j) # Get the predicted labels by threshold batch_predicted_labels_ts, batch_predicted_scores_ts = \ dh.get_label_threshold(scores=batch_scores, threshold=args.threshold) # Add results to collection for i in y: true_labels.append(i) for j in batch_predicted_labels_ts: predicted_labels.append(j) for k in batch_predicted_scores_ts: predicted_scores.append(k) # Get onehot predictions by threshold batch_predicted_onehot_labels_ts = \ dh.get_onehot_label_threshold(scores=batch_scores, threshold=args.threshold) for i in batch_predicted_onehot_labels_ts: predicted_onehot_labels_ts.append(i) # Get onehot predictions by topK for top_num in range(args.topK): batch_predicted_onehot_labels_tk = dh.get_onehot_label_topk(scores=batch_scores, top_num=top_num+1) for i in batch_predicted_onehot_labels_tk: predicted_onehot_labels_tk[top_num].append(i) test_loss = test_loss + cur_loss test_counter = test_counter + 1 # Calculate Precision & Recall & F1 test_pre_ts = precision_score(y_true=np.array(true_onehot_labels), y_pred=np.array(predicted_onehot_labels_ts), average='micro') test_rec_ts = recall_score(y_true=np.array(true_onehot_labels), y_pred=np.array(predicted_onehot_labels_ts), average='micro') test_F1_ts = f1_score(y_true=np.array(true_onehot_labels), y_pred=np.array(predicted_onehot_labels_ts), average='micro') for top_num in range(args.topK): test_pre_tk[top_num] = precision_score(y_true=np.array(true_onehot_labels), y_pred=np.array(predicted_onehot_labels_tk[top_num]), average='micro') test_rec_tk[top_num] = recall_score(y_true=np.array(true_onehot_labels), y_pred=np.array(predicted_onehot_labels_tk[top_num]), average='micro') test_F1_tk[top_num] = f1_score(y_true=np.array(true_onehot_labels), y_pred=np.array(predicted_onehot_labels_tk[top_num]), average='micro') # Calculate the average AUC test_auc = roc_auc_score(y_true=np.array(true_onehot_labels), y_score=np.array(predicted_onehot_scores), average='micro') # Calculate the average PR test_prc = average_precision_score(y_true=np.array(true_onehot_labels), y_score=np.array(predicted_onehot_scores), average="micro") test_loss = float(test_loss / test_counter) logger.info("All Test Dataset: Loss {0:g} | AUC {1:g} | AUPRC {2:g}" .format(test_loss, test_auc, test_prc)) # Predict by threshold logger.info("Predict by threshold: Precision {0:g}, Recall {1:g}, F1 {2:g}" .format(test_pre_ts, test_rec_ts, test_F1_ts)) # Predict by topK logger.info("Predict by topK:") for top_num in range(args.topK): logger.info("Top{0}: Precision {1:g}, Recall {2:g}, F1 {3:g}" .format(top_num + 1, test_pre_tk[top_num], test_rec_tk[top_num], test_F1_tk[top_num])) # Save the prediction result if not os.path.exists(SAVE_DIR): os.makedirs(SAVE_DIR) dh.create_prediction_file(output_file=SAVE_DIR + "/predictions.json", data_id=test_data['id'], true_labels=true_labels, predict_labels=predicted_labels, predict_scores=predicted_scores) logger.info("All Done.") if __name__ == '__main__': test_sann()
4,567
2,151
<filename>src/compiler/glsl/glcpp/tests/086-reserved-macro-names.c #define __BAD reserved #define GL_ALSO_BAD() also reserved #define THIS__TOO__IS__BAD reserved
62
357
<filename>vmidentity/rest/idm/server/src/test/java/com/vmware/identity/rest/idm/server/test/resources/SolutionUserResourceTest.java<gh_stars>100-1000 /* * Copyright (c) 2012-2015 VMware, Inc. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy * of the License at http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, without * warranties or conditions of any kind, EITHER EXPRESS OR IMPLIED. See the * License for the specific language governing permissions and limitations * under the License. */ package com.vmware.identity.rest.idm.server.test.resources; import static org.easymock.EasyMock.expect; import static org.easymock.EasyMock.expectLastCall; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import java.io.IOException; import java.nio.charset.Charset; import java.nio.file.Files; import java.nio.file.Paths; import java.security.cert.CertificateException; import java.security.cert.X509Certificate; import java.util.Locale; import javax.ws.rs.container.ContainerRequestContext; import org.easymock.EasyMock; import org.easymock.IMocksControl; import org.junit.Assert; import org.junit.Before; import org.junit.Test; import com.vmware.identity.idm.IDMException; import com.vmware.identity.idm.InvalidArgumentException; import com.vmware.identity.idm.NoSuchTenantException; import com.vmware.identity.idm.PrincipalId; import com.vmware.identity.idm.SolutionDetail; import com.vmware.identity.idm.SolutionUser; import com.vmware.identity.idm.client.CasIdmClient; import com.vmware.identity.rest.core.server.authorization.Config; import com.vmware.identity.rest.core.server.exception.client.BadRequestException; import com.vmware.identity.rest.core.server.exception.client.NotFoundException; import com.vmware.identity.rest.core.server.exception.server.InternalServerErrorException; import com.vmware.identity.rest.core.server.exception.server.NotImplementedError; import com.vmware.identity.rest.core.util.CertificateHelper; import com.vmware.identity.rest.idm.data.SolutionUserDTO; import com.vmware.identity.rest.idm.server.resources.SolutionUserResource; /** * * Unit tests for SolutionUser Resource * * @author <NAME> * @author <NAME> */ public class SolutionUserResourceTest { private static final String TEST_TENANT = "test.local"; private static final String TEST_DOMAIN = "test.local"; private static final int MAX_USERS_TO_FETCH = 10; private static final String TEST_SOLN_USER_NAME = "testSolnUser"; private static final String TEST_SOLN_USER_ALIAS = "testSolnUserAlias"; private static final String TEST_CERT_LOC = "src/test/resources/test_cert.pem"; private static final String SOLN_USER_DESC = "Test solution user"; private static final boolean IS_DISABLED = false; private SolutionUserResource solnUserResource; private X509Certificate testCertificate; private IMocksControl mControl; private CasIdmClient mockCasIDMClient; private ContainerRequestContext request; @Before public void setUp() throws CertificateException, IOException { mControl = EasyMock.createControl(); request = EasyMock.createMock(ContainerRequestContext.class); EasyMock.expect(request.getLanguage()).andReturn(Locale.getDefault()).anyTimes(); EasyMock.expect(request.getHeaderString(Config.CORRELATION_ID_HEADER)).andReturn("test").anyTimes(); EasyMock.replay(request); mockCasIDMClient = mControl.createMock(CasIdmClient.class); testCertificate = CertificateHelper.convertToX509(getTestPEMCert()); solnUserResource = new SolutionUserResource(TEST_TENANT, request, null); solnUserResource.setIDMClient(mockCasIDMClient); } @Test public void testGet() throws Exception { expect(mockCasIDMClient.findSolutionUser(TEST_TENANT, TEST_SOLN_USER_NAME)) .andReturn(getTestSolutionUser(TEST_SOLN_USER_NAME, TEST_SOLN_USER_ALIAS)); mControl.replay(); SolutionUserDTO user = solnUserResource.get(TEST_SOLN_USER_NAME); assertNotNull(user); assertEquals(TEST_SOLN_USER_NAME, user.getName()); assertEquals(TEST_TENANT, user.getDomain()); Assert.assertNotNull(user.getCertificate().getEncoded()); mControl.verify(); } @Test(expected = NotFoundException.class) public void testGetOnNoSuchTenant_ThrowsNotFoundEx() throws Exception { mockCasIDMClient.findSolutionUser(TEST_TENANT, TEST_SOLN_USER_NAME); expectLastCall().andThrow(new NoSuchTenantException("no such tenant")); mControl.replay(); solnUserResource.get(TEST_SOLN_USER_NAME); mControl.verify(); } @Test(expected = BadRequestException.class) public void testGetOnInvalidArgument_ThrowsBadRequestEx() throws Exception { mockCasIDMClient.findSolutionUser(TEST_TENANT, TEST_SOLN_USER_NAME); expectLastCall().andThrow(new InvalidArgumentException("invalid argument")); mControl.replay(); solnUserResource.get(TEST_SOLN_USER_NAME); mControl.verify(); } @Test(expected = InternalServerErrorException.class) public void testGetOnIDMError_ThrowsInternalServerError() throws Exception { mockCasIDMClient.findSolutionUser(TEST_TENANT, TEST_SOLN_USER_NAME); expectLastCall().andThrow(new IDMException("IDM error")); mControl.replay(); solnUserResource.get(TEST_SOLN_USER_NAME); mControl.verify(); } @Test(expected=NotImplementedError.class) public void testGetGroups() { mControl.replay(); solnUserResource.getGroups(TEST_SOLN_USER_NAME, MAX_USERS_TO_FETCH); } private SolutionUser getTestSolutionUser(String solutionUserName, String solnUserAlias) { PrincipalId solutionUserId = new PrincipalId(solutionUserName, TEST_TENANT); SolutionDetail detail = new SolutionDetail(testCertificate); return new SolutionUser(solutionUserId, detail, IS_DISABLED); } private static String getTestPEMCert() throws IOException { byte[] encoded = Files.readAllBytes(Paths.get(TEST_CERT_LOC)); return new String(encoded, Charset.defaultCharset()); } }
2,321
5,964
<reponame>wenfeifei/miniblink49 {% macro v8_value_to_local_cpp_value(thing) %} {# This indirection is just to avoid spurious white-space lines. #} {{generate_v8_value_to_local_cpp_value(thing) | trim}} {%- endmacro %} {% macro generate_v8_value_to_local_cpp_value(thing) %} {% set item = thing.v8_value_to_local_cpp_value or thing %} {% if item.error_message %} /* {{item.error_message}} */ {% else %} {% if item.declare_variable %} {% if item.assign_expression %} {{item.cpp_type}} {{item.cpp_name}} = {{item.assign_expression}}; {% else %} {{item.cpp_type}} {{item.cpp_name}}; {% endif %} {% else %}{# item.declare_variable #} {% if item.assign_expression %} {{item.cpp_name}} = {{item.assign_expression}}; {% endif %} {% endif %}{# item.declare_variable #} {% if item.set_expression %} {{item.set_expression}}; {% endif %} {% if item.check_expression %} if ({{item.check_expression}}) return{% if item.return_expression %} {{item.return_expression}}{% endif %}; {% endif %}{# item.check_expression #} {% endif %}{# item.error_message #} {% endmacro %} {% macro declare_enum_validation_variable(enum_values) %} static const char* validValues[] = { {% for enum_value in enum_values %} "{{enum_value}}", {% endfor %} }; {%-endmacro %}
485
1,444
package mage.cards.c; import java.util.UUID; import mage.MageInt; import mage.abilities.Ability; import mage.abilities.common.AttacksTriggeredAbility; import mage.abilities.common.SimpleActivatedAbility; import mage.abilities.costs.Cost; import mage.abilities.costs.common.RemoveAllCountersSourceCost; import mage.abilities.costs.mana.ManaCostsImpl; import mage.abilities.dynamicvalue.DynamicValue; import mage.abilities.dynamicvalue.common.EquipmentAttachedCount; import mage.abilities.effects.Effect; import mage.abilities.effects.common.DamageTargetEffect; import mage.abilities.effects.common.counter.AddCountersSourceEffect; import mage.abilities.keyword.FirstStrikeAbility; import mage.abilities.keyword.ReachAbility; import mage.cards.CardImpl; import mage.cards.CardSetInfo; import mage.constants.CardType; import mage.constants.SubType; import mage.constants.SuperType; import mage.constants.TargetController; import mage.constants.Zone; import mage.counters.CounterType; import mage.filter.common.FilterAttackingOrBlockingCreature; import mage.game.Game; import mage.target.common.TargetCreaturePermanent; /** * * @author zeffirojoe */ public final class CattiBrieOfMithralHall extends CardImpl { private static final FilterAttackingOrBlockingCreature filter = new FilterAttackingOrBlockingCreature( "attacking or blocking creature an opponent controls"); static { filter.add(TargetController.OPPONENT.getControllerPredicate()); } public CattiBrieOfMithralHall(UUID ownerId, CardSetInfo setInfo) { super(ownerId, setInfo, new CardType[] { CardType.CREATURE }, "{G}{W}"); this.addSuperType(SuperType.LEGENDARY); this.subtype.add(SubType.HUMAN); this.subtype.add(SubType.ARCHER); this.power = new MageInt(2); this.toughness = new MageInt(2); // First strike this.addAbility(FirstStrikeAbility.getInstance()); // Reach this.addAbility(ReachAbility.getInstance()); // Whenever Catti-brie of Mithral Hall attacks, put a +1/+1 counter on it for // each Equipment attached to it. EquipmentAttachedCount amount = new EquipmentAttachedCount(); this.addAbility(new AttacksTriggeredAbility( new AddCountersSourceEffect(CounterType.P1P1.createInstance(), amount, false).setText("put a +1/+1 counter on it for each Equipment attached to it"))); // {1}, Remove all +1/+1 counters from Catti-brie: It deals X damage to target // attacking or blocking creature an opponent controls, where X is the number of // counters removed this way. Ability damageAbility = new SimpleActivatedAbility(Zone.BATTLEFIELD, new DamageTargetEffect(CattiBrieRemovedCounterValue.instance).setText("it deals X damage to target attacking or blocking creature an opponent controls, where X is the number of counters removed this way"), new ManaCostsImpl("{1}")); damageAbility.addTarget(new TargetCreaturePermanent(filter)); damageAbility.addCost(new RemoveAllCountersSourceCost(CounterType.P1P1)); this.addAbility(damageAbility); } private CattiBrieOfMithralHall(final CattiBrieOfMithralHall card) { super(card); } @Override public CattiBrieOfMithralHall copy() { return new CattiBrieOfMithralHall(this); } } enum CattiBrieRemovedCounterValue implements DynamicValue { instance; @Override public int calculate(Game game, Ability sourceAbility, Effect effect) { int countersRemoved = 0; for (Cost cost : sourceAbility.getCosts()) { if (cost instanceof RemoveAllCountersSourceCost) { countersRemoved = ((RemoveAllCountersSourceCost) cost).getRemovedCounters(); } } return countersRemoved; } @Override public CattiBrieRemovedCounterValue copy() { return instance; } @Override public String getMessage() { return ""; } }
1,394
421
// System::Windows::Forms::DataGridColumnStyle.MappingNameChanged /* * The following example demonstrates the 'MappingNameChanged' event of 'DataGridColumnStyle' class. It adds a DataGrid and a button to a Form. When the user clicks on the 'Change Mapping Name' button, it changes mapping name and generates 'MappingNameChanged' event. */ #using <System.dll> #using <System.Drawing.dll> #using <System.Windows.Forms.dll> #using <System.Data.dll> #using <System.Xml.dll> using namespace System; using namespace System::Drawing; using namespace System::Collections; using namespace System::Windows::Forms; using namespace System::Data; public ref class MyForm: public Form { private: DataGrid^ myDataGrid; bool * flag; Button^ myButton; DataSet^ myDataSet; DataGridColumnStyle^ myColumnStyle; public: MyForm() { InitializeComponent(); SetUp(); } private: void InitializeComponent() { myDataGrid = gcnew DataGrid; myButton = gcnew Button; myDataGrid->Location = Point(24,24); myDataGrid->Name = "myDataGrid"; myDataGrid->CaptionText = "DataGridColumn"; myDataGrid->Height = 130; myDataGrid->Width = 150; myDataGrid->TabIndex = 0; myButton->Location = Point(60,208); myButton->Name = "myButton "; myButton->TabIndex = 3; myButton->Size = System::Drawing::Size( 140, 20 ); myButton->Text = "Change Mapping Name"; myButton->Click += gcnew EventHandler( this, &MyForm::button_Click ); ClientSize = System::Drawing::Size( 292, 273 ); array<Control^>^temp0 = {myButton,myDataGrid}; Controls->AddRange( temp0 ); Name = "Form1"; Text = "MappingNameChanged Event"; ResumeLayout( false ); } void SetUp() { MakeDataSet(); myDataGrid->SetDataBinding( myDataSet, "Orders" ); } void MakeDataSet() { myDataSet = gcnew DataSet( "myDataSet" ); DataTable^ myTable = gcnew DataTable( "Orders" ); DataColumn^ myColumn = gcnew DataColumn( "Amount",Decimal::typeid ); DataColumn^ myColumn1 = gcnew DataColumn( "Orders",Decimal::typeid ); myTable->Columns->Add( myColumn ); myTable->Columns->Add( myColumn1 ); myDataSet->Tables->Add( myTable ); DataRow^ newRow; for ( int j = 1; j < 15; j++ ) { newRow = myTable->NewRow(); newRow[ "Amount" ] = j * 10; newRow[ "Orders" ] = 10; myTable->Rows->Add( newRow ); } AddCustomColumnStyle(); } // <Snippet1> private: void AddCustomColumnStyle() { DataGridTableStyle^ myTableStyle = gcnew DataGridTableStyle; myTableStyle->MappingName = "Orders"; myColumnStyle = gcnew DataGridTextBoxColumn; myColumnStyle->MappingName = "Orders"; myColumnStyle->HeaderText = "Orders"; myTableStyle->GridColumnStyles->Add( myColumnStyle ); myDataGrid->TableStyles->Add( myTableStyle ); myColumnStyle->MappingNameChanged += gcnew EventHandler( this, &MyForm::columnStyle_MappingNameChanged ); flag = (bool *)true; } // MappingNameChanged event handler of DataGridColumnStyle. void columnStyle_MappingNameChanged( Object^ /*sender*/, EventArgs^ /*e*/ ) { MessageBox::Show( "Mapping Name changed" ); } // </Snippet1> void button_Click( Object^ /*sender*/, EventArgs^ /*e*/ ) { // Change the Mapping name. if ( flag ) { myColumnStyle = myDataGrid->TableStyles[ 0 ]->GridColumnStyles[ "Orders" ]; myColumnStyle->MappingName = "Amount"; myColumnStyle->HeaderText = "Amount"; this->Refresh(); flag = false; } else { myColumnStyle = myDataGrid->TableStyles[ 0 ]->GridColumnStyles[ "Amount" ]; myColumnStyle->MappingName = "Orders"; myColumnStyle->HeaderText = "Orders"; this->Refresh(); flag = (bool *)true; } } }; int main() { Application::Run( gcnew MyForm ); }
1,740
628
<gh_stars>100-1000 # -*- coding: utf-8 -*- # Generated by Django 1.11 on 2017-04-24 21:09 from __future__ import unicode_literals from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('osf', '0015_auto_20170421_1244'), ] operations = [ migrations.RemoveField( model_name='abstractnode', name='guid_string', ), migrations.RemoveField( model_name='basefilenode', name='guid_string', ), migrations.RemoveField( model_name='comment', name='guid_string', ), migrations.RemoveField( model_name='osfuser', name='guid_string', ), migrations.RemoveField( model_name='preprintservice', name='guid_string', ), ]
432
435
<gh_stars>100-1000 { "copyright_text": null, "description": "When I discovered at a young age that I had a life threatening heart condition, the last thing I expected was to have to worry about software. Now, with a heart device implanted in my body, I have come to understand not only how vulnerable medical devices are but how we are making critical choices about software that will have huge societal impact.\n\nI will also touch on potential avenues for accountability, transparency, and access to remedies as we hurtle towards an Internet of Things built on proprietary source code that prevents us from knowing exactly how these vital devices work, what data they are collecting and to what ends, what their vulnerabilities might be, and the extent to which their closed, proprietary nature keeps us from developing societal mechanisms and review processes to keep us safe.", "duration": 2699, "language": "eng", "recorded": "2018-06-03", "related_urls": [ { "label": "Conference schedule", "url": "https://cz.pycon.org/2018/programme/schedule/" } ], "speakers": [ "<NAME>" ], "tags": [], "thumbnail_url": "https://i.ytimg.com/vi/Z5mknpGpLVY/hqdefault.jpg", "title": "Cyborgs Unite!", "videos": [ { "type": "youtube", "url": "https://www.youtube.com/watch?v=Z5mknpGpLVY" } ] }
409
653
<gh_stars>100-1000 //===-- LowerWGLocalMemory.h - SYCL kernel local memory allocation pass ---===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // This pass does the following for each allocate call to // __sycl_allocateLocalMemory(Size, Alignment) function at the kernel scope: // - inserts a global (in scope of a program) byte array of Size bytes with // specified alignment in work group local address space. // - replaces allocate call with access to this memory. // // For example, the following IR code in a kernel function: // define spir_kernel void @KernelA() { // %0 = call spir_func i8 addrspace(3)* @__sycl_allocateLocalMemory( // i64 128, i64 4) // %1 = bitcast i8 addrspace(3)* %0 to i32 addrspace(3)* // } // // is translated to the following: // @WGLocalMem = internal addrspace(3) global [128 x i8] undef, align 4 // define spir_kernel void @KernelA() { // %0 = bitcast i8 addrspace(3)* getelementptr inbounds ( // [128 x i8], [128 x i8] addrspace(3)* @WGLocalMem, i32 0, i32 0) // to i32 addrspace(3)* // } //===----------------------------------------------------------------------===// #ifndef LLVM_SYCLLOWERIR_LOWERWGLOCALMEMORY_H #define LLVM_SYCLLOWERIR_LOWERWGLOCALMEMORY_H #include "llvm/IR/Module.h" #include "llvm/IR/PassManager.h" namespace llvm { class SYCLLowerWGLocalMemoryPass : public PassInfoMixin<SYCLLowerWGLocalMemoryPass> { public: PreservedAnalyses run(Module &M, ModuleAnalysisManager &); }; ModulePass *createSYCLLowerWGLocalMemoryLegacyPass(); void initializeSYCLLowerWGLocalMemoryLegacyPass(PassRegistry &); } // namespace llvm #endif // LLVM_SYCLLOWERIR_LOWERWGLOCALMEMORY_H
654
956
/* Cisco Systems, Inc. */ /* Copyright (c) 2016-2016 Cisco Systems, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <string> #include <iostream> #include <pkt_gen.h> #include "utl_ip.h" void COneIPInfo::dump(FILE *fd, const char *offset) const { uint8_t mac[ETHER_ADDR_LEN]; m_mac.copyToArray(mac); char ip_str[100]; get_ip_str(ip_str); std::string mac_str; utl_macaddr_to_str(mac, mac_str); const char *mac_char = resolve_needed() ? "Unknown" : mac_str.c_str(); fprintf(fd, "%sip: %s ", offset, ip_str); if (m_vlan != 0) fprintf(fd, "vlan: %d ", m_vlan); if (m_port != UINT8_MAX) fprintf(fd, "port: %d ", m_port); fprintf(fd, "mac: %s", mac_char); fprintf(fd, "\n"); } bool COneIPInfo::resolve_needed() const { return m_mac.isDefaultAddress(); } /* * Fill buffer p with arp request. * port_id - port id we intend to send on * sip - source IP/MAC information */ void COneIPv4Info::fill_arp_req_buf(uint8_t *p, uint16_t port_id, COneIPInfo *sip) { uint8_t src_mac[ETHER_ADDR_LEN]; sip->get_mac(src_mac); CTestPktGen::create_arp_req(p, ((COneIPv4Info *)sip)->get_ip(), m_ip, src_mac, port_id, m_vlan); } void COneIPv4Info::fill_grat_arp_buf(uint8_t *p) { uint8_t src_mac[ETHER_ADDR_LEN]; get_mac(src_mac); CTestPktGen::create_arp_req(p, m_ip, m_ip, src_mac, 0, m_vlan); } bool COneIPv4Info::is_zero_ip() { return m_ip == 0; } void COneIPv6Info::fill_arp_req_buf(uint8_t *p, uint16_t port_id, COneIPInfo *sip) { //??? implement ipv6 } void COneIPv6Info::fill_grat_arp_buf(uint8_t *p) { //??? implement ipv6 } bool COneIPv6Info::is_zero_ip() { return m_ip[0] == 0 && m_ip[1] == 0 && m_ip[2] == 0 && m_ip[3] == 0 && m_ip[4] == 0 && m_ip[5] == 0 && m_ip[6] == 0 && m_ip[7] == 0; } const COneIPInfo *CManyIPInfo::get_next() { const COneIPInfo *ret; if (!m_iter_initiated) { m_ipv4_iter = m_ipv4_resolve.begin(); m_iter_initiated = true; } if (m_ipv4_iter == m_ipv4_resolve.end()) { m_ipv4_iter = m_ipv4_resolve.begin(); return NULL; } ret = &(m_ipv4_iter->second); m_ipv4_iter++; return ret; } void CManyIPInfo::dump(FILE *fd) { ip_vlan_to_many_ip_iter_t it; for (it = m_ipv4_resolve.begin(); it != m_ipv4_resolve.end(); it++) { fprintf(fd, "IPv4 resolved list:\n"); uint8_t mac[ETHER_ADDR_LEN]; it->second.get_mac(mac); fprintf(fd, "ip:%s vlan: %d resolved to mac %s\n", ip_to_str(it->first.get_ip()).c_str(), it->first.get_vlan() , utl_macaddr_to_str(mac).c_str()); } } void CManyIPInfo::insert(const COneIPv4Info &ip_info) { CIpVlan ip_vlan(ip_info.get_ip(), ip_info.get_vlan()); m_ipv4_resolve.insert(std::make_pair(ip_vlan, ip_info)); } bool CManyIPInfo::lookup(uint32_t ip, uint16_t vlan, MacAddress &ret_mac) const { ip_vlan_to_many_ip_iter_t it = m_ipv4_resolve.find(CIpVlan(ip, vlan)); if (it != m_ipv4_resolve.end()) { uint8_t mac[ETHER_ADDR_LEN]; (*it).second.get_mac(mac); ret_mac.set(mac); return true; } else { return false; } } bool CManyIPInfo::exists(uint32_t ip, uint16_t vlan) const { ip_vlan_to_many_ip_iter_t it = m_ipv4_resolve.find(CIpVlan(ip, vlan)); return (it != m_ipv4_resolve.end()); } void CManyIPInfo::clear() { m_ipv4_resolve.clear(); m_ipv6_resolve.clear(); m_iter_initiated = false; } const COneIPInfo *CManyIPInfo::get_first() const { if (m_ipv4_resolve.size() == 0) { return NULL; } else { return &((m_ipv4_resolve.begin())->second); } }
1,935
1,104
<filename>Barbaric/translation.json { "name":"name", "heritage":"heritage", "endurance":"endurance", "lifeblood":"lifeblood", "fatigued-":"Fatigued?", "hero-points":"hero points", "skills":"skills", "combat":"combat", "craft":"craft", "lore":"lore", "physical":"physical", "social":"social", "sorcery":"sorcery", "stealth":"stealth", "traits":"traits", "initative":"Initiative", "armor":"armor", "weapons":"weapons", "weapon":"Weapon", "range":"range", "damage":"damage", "notes":"notes", "equipment":"equipment", "sorcery-notes":"sorcery/notes", "description-short":"desc", "xp":"XP", "experienced":"experienced", "expert":"expert", "master":"master", "grandmaster":"grandmaster", "legend":"legend", "critical-fumble":"Critical Fumble!", "critical-success":"Critical Success!", "roll-i18n":"roll", "player":"Player", "human":"Human", "anunaki":"Anunaki", "apefolk":"Apefolk", "dwarf":"Dwarf", "elf":"Elf", "geckofolk":"Geckofolk", "lizardfolk":"Lizardfolk" }
472
1,223
<gh_stars>1000+ # Copyright 2020 Google LLC # # Use of this source code is governed by a BSD-style # license that can be found in the LICENSE file or at # https://developers.google.com/open-source/licenses/bsd import json from functools import partial import time import jax.numpy as np import jax.random as random from jax import jit from jax.config import config config.enable_omnistaging() # warm up np.dot(1.0, 1.0) def benchit(bench_name, x, f): f_jitted = jit(f) t0 = time.time() f_jitted(x).block_until_ready() t1 = time.time() f_jitted(x).block_until_ready() t2 = time.time() run_time = t2 - t1 compile_time = t1 - t0 - run_time print(json.dumps( {"bench_name" : bench_name, "compile_time" : compile_time, "run_time" : run_time})) @partial(benchit, "sum", 0) def sum_bench(key): xs = random.normal(random.PRNGKey(key), shape=(10000,)) return np.sum(xs[:, None] + xs[None, :], axis=0) @partial(benchit, "gaussian", 0) def gaussian_bench(key): return random.normal(random.PRNGKey(key), shape=(100000000,)) @partial(benchit, "matmul", 0) def matmul_bench(key): mat = random.normal(random.PRNGKey(key), shape=(1000, 1000)) return np.dot(mat, mat)
480
841
package org.jboss.resteasy.test.validation.resource; import jakarta.validation.executable.ExecutableType; import jakarta.validation.executable.ValidateOnExecution; import jakarta.ws.rs.POST; import jakarta.ws.rs.Path; public interface TestValidateOnExecutionInterface { @POST @Path("overrideInterface1") @ValidateOnExecution(type = {ExecutableType.NONE}) void overrideInterface1(String s); }
137
772
<gh_stars>100-1000 { "addCSSButton": "Add CSS", "addImageButton": "Add Image", "addHTMLButton": "Add HTML", "confirmExitWithUnsavedChanges": "Do you want to exit with unsaved changes?", "refreshPreview": "Refresh and Save", "toggleInspectorOn": "Inspector: Off", "toggleInspectorOff": "Inspector: On" }
114
363
<filename>frostmourne-spi/src/main/java/com/autohome/frostmourne/spi/dao/IWeChatSender.java package com.autohome.frostmourne.spi.dao; import java.util.List; public interface IWeChatSender { boolean send(List<String> users, String title, String content, String wechatRobotHook); }
112
852
#ifndef RecoParticleFlow_Benchmark_MatchCandidateBenchmark_h #define RecoParticleFlow_Benchmark_MatchCandidateBenchmark_h #include "DQMOffline/PFTau/interface/Benchmark.h" #include "DataFormats/Candidate/interface/Candidate.h" #include "DataFormats/Candidate/interface/CandidateFwd.h" #include "FWCore/ParameterSet/interface/ParameterSet.h" #include <vector> /// To plot Candidate quantities class MatchCandidateBenchmark : public Benchmark { public: MatchCandidateBenchmark(Mode mode); ~MatchCandidateBenchmark() override; /// book histograms void setup(DQMStore::IBooker &b); void setup(DQMStore::IBooker &b, const edm::ParameterSet &parameterSet); /// fill histograms with a given particle void fillOne(const reco::Candidate &candidate, const reco::Candidate &matchedCandidate); void fillOne(const reco::Candidate &candidate, const reco::Candidate &matchedCandidate, const edm::ParameterSet &parameterSet); protected: TH2F *delta_et_Over_et_VS_et_; TH2F *delta_et_VS_et_; TH2F *delta_eta_VS_et_; TH2F *delta_phi_VS_et_; TH2F *BRdelta_et_Over_et_VS_et_; TH2F *ERdelta_et_Over_et_VS_et_; std::vector<TH1F *> pTRes_; std::vector<TH1F *> BRpTRes_; std::vector<TH1F *> ERpTRes_; std::vector<float> ptBins_; bool histogramBooked_; double eta_min_barrel_; double eta_max_barrel_; double eta_min_endcap_; double eta_max_endcap_; private: void computePtBins(const edm::ParameterSet &, const edm::ParameterSet &); bool inEtaRange(double, bool); inline bool inBarrelRange(double value) { return inEtaRange(value, true); } inline bool inEndcapRange(double value) { return inEtaRange(value, false); } }; #endif
651
348
{"nom":"Mesnil-Follemprise","dpt":"Seine-Maritime","inscrits":102,"abs":21,"votants":81,"blancs":11,"nuls":2,"exp":68,"res":[{"panneau":"2","voix":39},{"panneau":"1","voix":29}]}
75
884
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. name = "commondatamodel_objectmodel" __version__ = '1.0.0'
63